text stringlengths 11 4.05M |
|---|
package rabbitmq
import (
_ "fmt"
_ "github.com/tidwall/gjson"
_ "io/ioutil"
)
type Conn struct {
Url string `json:"url"`
}
|
package main
import "fmt"
func main() {
slice1 := []int{1, 2, 3}
slice2 := make([]int, 2) // define a slices, 2 is number of "room for element"
copy(slice2, slice1)
fmt.Println(slice1, slice2)
x := [6]string{"a", "b", "c", "d", "e", "f"}
fmt.Println(x[2:5])
}
|
package main
import "fmt"
func main() {
card := newCard()
fmt.Println(card)
}
// golang function and return type of function.
func newCard() string {
return "Ace of Diamond"
}
|
package v1alpha1_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/experimental/workflows/pkg/apis/workflows/v1alpha1"
"github.com/tektoncd/experimental/workflows/test/parse"
"knative.dev/pkg/apis"
)
func TestValidateFilters(t *testing.T) {
tcs := []struct {
name string
wf *v1alpha1.Workflow
wantErr *apis.FieldError
}{{
name: "gitref filter with push event",
wf: parse.MustParseWorkflow(t, "trigger-workflow", "some-namespace", `
spec:
triggers:
- name: on-push
event:
type: "push"
filters:
gitRef:
regex: "^main$"
`),
}, {
name: "gitref filter with pull_request event",
wf: parse.MustParseWorkflow(t, "trigger-workflow", "some-namespace", `
spec:
triggers:
- name: on-pr
event:
type: "pull_request"
filters:
gitRef:
regex: "^main$"
`),
}, {
name: "gitref filter with other event type",
wf: parse.MustParseWorkflow(t, "trigger-workflow", "some-namespace", `
spec:
triggers:
- name: on-event
event:
type: "some-other-event-type"
filters:
gitRef:
regex: "^main$"
`),
wantErr: &apis.FieldError{Message: "gitRef filter can be used only with 'push' and 'pull_request' events but got event some-other-event-type"},
}}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
err := tc.wf.Validate(context.Background())
if d := cmp.Diff(tc.wantErr.Error(), err.Error()); d != "" {
t.Errorf("wrong error: %s", d)
}
})
}
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// MedicationStatement is documented here http://hl7.org/fhir/StructureDefinition/MedicationStatement
type MedicationStatement struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Text *Narrative `bson:"text,omitempty" json:"text,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
BasedOn []Reference `bson:"basedOn,omitempty" json:"basedOn,omitempty"`
PartOf []Reference `bson:"partOf,omitempty" json:"partOf,omitempty"`
Status string `bson:"status" json:"status"`
StatusReason []CodeableConcept `bson:"statusReason,omitempty" json:"statusReason,omitempty"`
Category *CodeableConcept `bson:"category,omitempty" json:"category,omitempty"`
MedicationCodeableConcept CodeableConcept `bson:"medicationCodeableConcept" json:"medicationCodeableConcept"`
MedicationReference Reference `bson:"medicationReference" json:"medicationReference"`
Subject Reference `bson:"subject" json:"subject"`
Context *Reference `bson:"context,omitempty" json:"context,omitempty"`
EffectiveDateTime *string `bson:"effectiveDateTime,omitempty" json:"effectiveDateTime,omitempty"`
EffectivePeriod *Period `bson:"effectivePeriod,omitempty" json:"effectivePeriod,omitempty"`
DateAsserted *string `bson:"dateAsserted,omitempty" json:"dateAsserted,omitempty"`
InformationSource *Reference `bson:"informationSource,omitempty" json:"informationSource,omitempty"`
DerivedFrom []Reference `bson:"derivedFrom,omitempty" json:"derivedFrom,omitempty"`
ReasonCode []CodeableConcept `bson:"reasonCode,omitempty" json:"reasonCode,omitempty"`
ReasonReference []Reference `bson:"reasonReference,omitempty" json:"reasonReference,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
Dosage []Dosage `bson:"dosage,omitempty" json:"dosage,omitempty"`
}
type OtherMedicationStatement MedicationStatement
// MarshalJSON marshals the given MedicationStatement as JSON into a byte slice
func (r MedicationStatement) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherMedicationStatement
ResourceType string `json:"resourceType"`
}{
OtherMedicationStatement: OtherMedicationStatement(r),
ResourceType: "MedicationStatement",
})
}
// UnmarshalMedicationStatement unmarshals a MedicationStatement.
func UnmarshalMedicationStatement(b []byte) (MedicationStatement, error) {
var medicationStatement MedicationStatement
if err := json.Unmarshal(b, &medicationStatement); err != nil {
return medicationStatement, err
}
return medicationStatement, nil
}
|
package main
import (
"advent-2015/utils"
"fmt"
"regexp"
"strconv"
)
func main() {
input := utils.ReadLines("./day07/input.txt")
c := NewCircuit(input)
fmt.Println("------- Part 1 -------")
a := c.GetValue("a")
fmt.Printf("After running the circuit, wire a has signal value %d\n\n", a)
fmt.Println("------- Part 2 -------")
c.Reset()
c.Override("b", a)
a = c.GetValue("a")
fmt.Printf("After overriding 'b' and re-running the circuit, wire a has signal value %d\n\n", a)
}
type operation int
const (
set operation = iota
and
or
lshift
rshift
not
)
type Circuit map[string]*Wire
type Wire struct {
Inputs []string
Operation operation
resolved bool
output uint16
}
func (c Circuit) GetValue(wire string) uint16 {
// wire might actually be a number instead
if n, err := strconv.Atoi(wire); err == nil {
return uint16(n)
}
w := c[wire]
if !w.resolved {
switch w.Operation {
case set:
w.output = c.GetValue(w.Inputs[0])
case and:
w.output = c.GetValue(w.Inputs[0]) & c.GetValue(w.Inputs[1])
case or:
w.output = c.GetValue(w.Inputs[0]) | c.GetValue(w.Inputs[1])
case lshift:
w.output = c.GetValue(w.Inputs[0]) << c.GetValue(w.Inputs[1])
case rshift:
w.output = c.GetValue(w.Inputs[0]) >> c.GetValue(w.Inputs[1])
case not:
w.output = ^c.GetValue(w.Inputs[0])
}
w.resolved = true
}
return w.output
}
func (c Circuit) Reset() {
for _, wire := range c {
wire.output = 0
wire.resolved = false
}
}
func (c Circuit) Override(wire string, value uint16) {
c[wire].resolved = true
c[wire].output = value
}
func NewCircuit(instructions []string) Circuit {
c := make(Circuit)
reSet := regexp.MustCompile(`^(\w+) -> (\w+)$`)
reAnd := regexp.MustCompile(`^(\w+) AND (\w+) -> (\w+)$`)
reOr := regexp.MustCompile(`^(\w+) OR (\w+) -> (\w+)$`)
reLshift := regexp.MustCompile(`^(\w+) LSHIFT (\d+) -> (\w+)$`)
reRshift := regexp.MustCompile(`^(\w+) RSHIFT (\d+) -> (\w+)$`)
reNot := regexp.MustCompile(`^NOT (\w+) -> (\w+)$`)
for _, line := range instructions {
if matches := reSet.FindStringSubmatch(line); len(matches) > 0 {
c[matches[2]] = &Wire{Operation: set, Inputs: matches[1:2]}
} else if matches := reAnd.FindStringSubmatch(line); len(matches) > 0 {
c[matches[3]] = &Wire{Operation: and, Inputs: matches[1:3]}
} else if matches := reOr.FindStringSubmatch(line); len(matches) > 0 {
c[matches[3]] = &Wire{Operation: or, Inputs: matches[1:3]}
} else if matches := reLshift.FindStringSubmatch(line); len(matches) > 0 {
c[matches[3]] = &Wire{Operation: lshift, Inputs: matches[1:3]}
} else if matches := reRshift.FindStringSubmatch(line); len(matches) > 0 {
c[matches[3]] = &Wire{Operation: rshift, Inputs: matches[1:3]}
} else if matches := reNot.FindStringSubmatch(line); len(matches) > 0 {
c[matches[2]] = &Wire{Operation: not, Inputs: matches[1:2]}
} else {
panic(fmt.Sprintf("Nothing matched '%s'", line))
}
}
return c
}
|
// Copyright 2017 The go-interpreter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wast
import (
"fmt"
"unicode"
)
type Pos struct {
Line, Column int
}
type Token struct {
Kind TokenKind
Pos Pos
Text string
Value interface{}
}
func (t *Token) String() string {
switch t.Kind {
case EOF:
return "<EOF>"
default:
return fmt.Sprintf("<%v %q>", t.Kind, t.Text)
}
}
type TokenKind rune
const (
INVALID = iota + unicode.MaxRune
ALIGN
ASSERT_EXHAUSTION
ASSERT_INVALID
ASSERT_MALFORMED
ASSERT_RETURN
ASSERT_TRAP
ASSERT_UNLINKABLE
BINARY
BLOCK
BR
BR_IF
BR_TABLE
CALL
CALL_INDIRECT
COMPARE
CONST
CONVERT
DATA
DROP
ELEM
ELSE
END
EOF
ERROR
EXPORT
F32
F32_ABS
F32_ADD
F32_CEIL
F32_CONST
F32_CONVERT_I32_S
F32_CONVERT_I32_U
F32_CONVERT_I64_S
F32_CONVERT_I64_U
F32_COPYSIGN
F32_DEMOTE_F64
F32_DIV
F32_EQ
F32_FLOOR
F32_GE
F32_GT
F32_LE
F32_LOAD
F32_LT
F32_MAX
F32_MIN
F32_MUL
F32_NE
F32_NEAREST
F32_NEG
F32_REINTERPRET_I32
F32_SQRT
F32_STORE
F32_SUB
F32_TRUNC
F64
F64_ABS
F64_ADD
F64_CEIL
F64_CONST
F64_CONVERT_I32_S
F64_CONVERT_I32_U
F64_CONVERT_I64_S
F64_CONVERT_I64_U
F64_COPYSIGN
F64_DIV
F64_EQ
F64_FLOOR
F64_GE
F64_GT
F64_LE
F64_LOAD
F64_LT
F64_MAX
F64_MIN
F64_MUL
F64_NE
F64_NEAREST
F64_NEG
F64_PROMOTE_F32
F64_REINTERPRET_I64
F64_SQRT
F64_STORE
F64_SUB
F64_TRUNC
FLOAT
FUNC
FUNCREF
GET
GLOBAL
GLOBAL_GET
GLOBAL_SET
I32
I32_ADD
I32_AND
I32_CLZ
I32_CONST
I32_CTZ
I32_DIV_S
I32_DIV_U
I32_EQ
I32_EQZ
I32_EXTEND16_S
I32_EXTEND8_S
I32_GE_S
I32_GE_U
I32_GT_S
I32_GT_U
I32_LE_S
I32_LE_U
I32_LOAD
I32_LOAD16_S
I32_LOAD16_U
I32_LOAD8_S
I32_LOAD8_U
I32_LT_S
I32_LT_U
I32_MUL
I32_NE
I32_OR
I32_POPCNT
I32_REINTERPRET_F32
I32_REM_S
I32_REM_U
I32_ROTL
I32_ROTR
I32_SHL
I32_SHR_S
I32_SHR_U
I32_STORE
I32_STORE16
I32_STORE8
I32_SUB
I32_TRUNC_F32_S
I32_TRUNC_F32_U
I32_TRUNC_F64_S
I32_TRUNC_F64_U
I32_TRUNC_SAT_F32_S
I32_TRUNC_SAT_F32_U
I32_TRUNC_SAT_F64_S
I32_TRUNC_SAT_F64_U
I32_WRAP_I64
I32_XOR
I64
I64_ADD
I64_AND
I64_CLZ
I64_CONST
I64_CTZ
I64_DIV_S
I64_DIV_U
I64_EQ
I64_EQZ
I64_EXTEND16_S
I64_EXTEND32_S
I64_EXTEND8_S
I64_EXTEND_I32_S
I64_EXTEND_I32_U
I64_GE_S
I64_GE_U
I64_GT_S
I64_GT_U
I64_LE_S
I64_LE_U
I64_LOAD
I64_LOAD16_S
I64_LOAD16_U
I64_LOAD32_S
I64_LOAD32_U
I64_LOAD8_S
I64_LOAD8_U
I64_LT_S
I64_LT_U
I64_MUL
I64_NE
I64_OR
I64_POPCNT
I64_REINTERPRET_F64
I64_REM_S
I64_REM_U
I64_ROTL
I64_ROTR
I64_SHL
I64_SHR_S
I64_SHR_U
I64_STORE
I64_STORE16
I64_STORE32
I64_STORE8
I64_SUB
I64_TRUNC_F32_S
I64_TRUNC_F32_U
I64_TRUNC_F64_S
I64_TRUNC_F64_U
I64_TRUNC_SAT_F32_S
I64_TRUNC_SAT_F32_U
I64_TRUNC_SAT_F64_S
I64_TRUNC_SAT_F64_U
I64_XOR
IF
IMPORT
INPUT
INT
INVOKE
LOCAL
LOCAL_GET
LOCAL_SET
LOCAL_TEE
LOOP
MEMORY
MEMORY_GROW
MEMORY_SIZE
MODULE
MUT
NAN_ARITHMETIC
NAN_CANONICAL
NAT
NOP
OFFSET
OUTPUT
PARAM
QUOTE
REGISTER
RESULT
RETURN
SCRIPT
SELECT
START
STRING
TABLE
TEST
THEN
TYPE
UNARY
UNREACHABLE
VALUE_TYPE
VAR
)
var tokenKindOf = map[string]TokenKind{
"align": ALIGN,
"assert_exhaustion": ASSERT_EXHAUSTION,
"assert_invalid": ASSERT_INVALID,
"assert_malformed": ASSERT_MALFORMED,
"assert_return": ASSERT_RETURN,
"assert_trap": ASSERT_TRAP,
"assert_unlinkable": ASSERT_UNLINKABLE,
"binary": BINARY,
"block": BLOCK,
"br": BR,
"br_if": BR_IF,
"br_table": BR_TABLE,
"call": CALL,
"call_indirect": CALL_INDIRECT,
"data": DATA,
"drop": DROP,
"elem": ELEM,
"else": ELSE,
"end": END,
"export": EXPORT,
"f32": F32,
"f32.abs": F32_ABS,
"f32.add": F32_ADD,
"f32.ceil": F32_CEIL,
"f32.const": F32_CONST,
"f32.convert_i32_s": F32_CONVERT_I32_S,
"f32.convert_i32_u": F32_CONVERT_I32_U,
"f32.convert_i64_s": F32_CONVERT_I64_S,
"f32.convert_i64_u": F32_CONVERT_I64_U,
"f32.copysign": F32_COPYSIGN,
"f32.demote_f64": F32_DEMOTE_F64,
"f32.div": F32_DIV,
"f32.eq": F32_EQ,
"f32.floor": F32_FLOOR,
"f32.ge": F32_GE,
"f32.gt": F32_GT,
"f32.le": F32_LE,
"f32.load": F32_LOAD,
"f32.lt": F32_LT,
"f32.max": F32_MAX,
"f32.min": F32_MIN,
"f32.mul": F32_MUL,
"f32.ne": F32_NE,
"f32.nearest": F32_NEAREST,
"f32.neg": F32_NEG,
"f32.reinterpret_i32": F32_REINTERPRET_I32,
"f32.sqrt": F32_SQRT,
"f32.store": F32_STORE,
"f32.sub": F32_SUB,
"f32.trunc": F32_TRUNC,
"f64": F64,
"f64.abs": F64_ABS,
"f64.add": F64_ADD,
"f64.ceil": F64_CEIL,
"f64.const": F64_CONST,
"f64.convert_i32_s": F64_CONVERT_I32_S,
"f64.convert_i32_u": F64_CONVERT_I32_U,
"f64.convert_i64_s": F64_CONVERT_I64_S,
"f64.convert_i64_u": F64_CONVERT_I64_U,
"f64.copysign": F64_COPYSIGN,
"f64.div": F64_DIV,
"f64.eq": F64_EQ,
"f64.floor": F64_FLOOR,
"f64.ge": F64_GE,
"f64.gt": F64_GT,
"f64.le": F64_LE,
"f64.load": F64_LOAD,
"f64.lt": F64_LT,
"f64.max": F64_MAX,
"f64.min": F64_MIN,
"f64.mul": F64_MUL,
"f64.ne": F64_NE,
"f64.nearest": F64_NEAREST,
"f64.neg": F64_NEG,
"f64.promote_f32": F64_PROMOTE_F32,
"f64.reinterpret_i64": F64_REINTERPRET_I64,
"f64.sqrt": F64_SQRT,
"f64.store": F64_STORE,
"f64.sub": F64_SUB,
"f64.trunc": F64_TRUNC,
"func": FUNC,
"funcref": FUNCREF,
"get": GET,
"global": GLOBAL,
"global.get": GLOBAL_GET,
"global.set": GLOBAL_SET,
"i32": I32,
"i32.add": I32_ADD,
"i32.and": I32_AND,
"i32.clz": I32_CLZ,
"i32.const": I32_CONST,
"i32.ctz": I32_CTZ,
"i32.div_s": I32_DIV_S,
"i32.div_u": I32_DIV_U,
"i32.eq": I32_EQ,
"i32.eqz": I32_EQZ,
"i32.extend16_s": I32_EXTEND16_S,
"i32.extend8_s": I32_EXTEND8_S,
"i32.ge_s": I32_GE_S,
"i32.ge_u": I32_GE_U,
"i32.gt_s": I32_GT_S,
"i32.gt_u": I32_GT_U,
"i32.le_s": I32_LE_S,
"i32.le_u": I32_LE_U,
"i32.load": I32_LOAD,
"i32.load16_s": I32_LOAD16_S,
"i32.load16_u": I32_LOAD16_U,
"i32.load8_s": I32_LOAD8_S,
"i32.load8_u": I32_LOAD8_U,
"i32.lt_s": I32_LT_S,
"i32.lt_u": I32_LT_U,
"i32.mul": I32_MUL,
"i32.ne": I32_NE,
"i32.or": I32_OR,
"i32.popcnt": I32_POPCNT,
"i32.reinterpret_f32": I32_REINTERPRET_F32,
"i32.rem_s": I32_REM_S,
"i32.rem_u": I32_REM_U,
"i32.rotl": I32_ROTL,
"i32.rotr": I32_ROTR,
"i32.shl": I32_SHL,
"i32.shr_s": I32_SHR_S,
"i32.shr_u": I32_SHR_U,
"i32.store": I32_STORE,
"i32.store16": I32_STORE16,
"i32.store8": I32_STORE8,
"i32.sub": I32_SUB,
"i32.trunc_f32_s": I32_TRUNC_F32_S,
"i32.trunc_f32_u": I32_TRUNC_F32_U,
"i32.trunc_f64_s": I32_TRUNC_F64_S,
"i32.trunc_f64_u": I32_TRUNC_F64_U,
"i32.trunc_sat_f32_s": I32_TRUNC_SAT_F32_S,
"i32.trunc_sat_f32_u": I32_TRUNC_SAT_F32_U,
"i32.trunc_sat_f64_s": I32_TRUNC_SAT_F64_S,
"i32.trunc_sat_f64_u": I32_TRUNC_SAT_F64_U,
"i32.wrap_i64": I32_WRAP_I64,
"i32.xor": I32_XOR,
"i64": I64,
"i64.add": I64_ADD,
"i64.and": I64_AND,
"i64.clz": I64_CLZ,
"i64.const": I64_CONST,
"i64.ctz": I64_CTZ,
"i64.div_s": I64_DIV_S,
"i64.div_u": I64_DIV_U,
"i64.eq": I64_EQ,
"i64.eqz": I64_EQZ,
"i64.extend16_s": I64_EXTEND16_S,
"i64.extend32_s": I64_EXTEND32_S,
"i64.extend8_s": I64_EXTEND8_S,
"i64.extend_i32_s": I64_EXTEND_I32_S,
"i64.extend_i32_u": I64_EXTEND_I32_U,
"i64.ge_s": I64_GE_S,
"i64.ge_u": I64_GE_U,
"i64.gt_s": I64_GT_S,
"i64.gt_u": I64_GT_U,
"i64.le_s": I64_LE_S,
"i64.le_u": I64_LE_U,
"i64.load": I64_LOAD,
"i64.load16_s": I64_LOAD16_S,
"i64.load16_u": I64_LOAD16_U,
"i64.load32_s": I64_LOAD32_S,
"i64.load32_u": I64_LOAD32_U,
"i64.load8_s": I64_LOAD8_S,
"i64.load8_u": I64_LOAD8_U,
"i64.lt_s": I64_LT_S,
"i64.lt_u": I64_LT_U,
"i64.mul": I64_MUL,
"i64.ne": I64_NE,
"i64.or": I64_OR,
"i64.popcnt": I64_POPCNT,
"i64.reinterpret_f64": I64_REINTERPRET_F64,
"i64.rem_s": I64_REM_S,
"i64.rem_u": I64_REM_U,
"i64.rotl": I64_ROTL,
"i64.rotr": I64_ROTR,
"i64.shl": I64_SHL,
"i64.shr_s": I64_SHR_S,
"i64.shr_u": I64_SHR_U,
"i64.store": I64_STORE,
"i64.store16": I64_STORE16,
"i64.store32": I64_STORE32,
"i64.store8": I64_STORE8,
"i64.sub": I64_SUB,
"i64.trunc_f32_s": I64_TRUNC_F32_S,
"i64.trunc_f32_u": I64_TRUNC_F32_U,
"i64.trunc_f64_s": I64_TRUNC_F64_S,
"i64.trunc_f64_u": I64_TRUNC_F64_U,
"i64.trunc_sat_f32_s": I64_TRUNC_SAT_F32_S,
"i64.trunc_sat_f32_u": I64_TRUNC_SAT_F32_U,
"i64.trunc_sat_f64_s": I64_TRUNC_SAT_F64_S,
"i64.trunc_sat_f64_u": I64_TRUNC_SAT_F64_U,
"i64.xor": I64_XOR,
"if": IF,
"import": IMPORT,
"input": INPUT,
"invoke": INVOKE,
"local": LOCAL,
"local.get": LOCAL_GET,
"local.set": LOCAL_SET,
"local.tee": LOCAL_TEE,
"loop": LOOP,
"memory": MEMORY,
"memory.grow": MEMORY_GROW,
"memory.size": MEMORY_SIZE,
"module": MODULE,
"mut": MUT,
"nan:arithmetic": NAN_ARITHMETIC,
"nan:canonical": NAN_CANONICAL,
"nop": NOP,
"offset": OFFSET,
"output": OUTPUT,
"param": PARAM,
"quote": QUOTE,
"register": REGISTER,
"result": RESULT,
"return": RETURN,
"script": SCRIPT,
"select": SELECT,
"start": START,
"table": TABLE,
"then": THEN,
"type": TYPE,
"unreachable": UNREACHABLE,
}
func (t TokenKind) String() string {
switch t {
case ALIGN:
return "ALIGN"
case ASSERT_EXHAUSTION:
return "ASSERT_EXHAUSTION"
case ASSERT_INVALID:
return "ASSERT_INVALID"
case ASSERT_MALFORMED:
return "ASSERT_MALFORMED"
case ASSERT_RETURN:
return "ASSERT_RETURN"
case ASSERT_TRAP:
return "ASSERT_TRAP"
case ASSERT_UNLINKABLE:
return "ASSERT_UNLINKABLE"
case BINARY:
return "BINARY"
case BLOCK:
return "BLOCK"
case BR:
return "BR"
case BR_IF:
return "BR_IF"
case BR_TABLE:
return "BR_TABLE"
case CALL:
return "CALL"
case CALL_INDIRECT:
return "CALL_INDIRECT"
case COMPARE:
return "COMPARE"
case CONST:
return "CONST"
case CONVERT:
return "CONVERT"
case DATA:
return "DATA"
case DROP:
return "DROP"
case ELEM:
return "ELEM"
case ELSE:
return "ELSE"
case END:
return "END"
case EOF:
return "EOF"
case ERROR:
return "ERROR"
case EXPORT:
return "EXPORT"
case F32:
return "F32"
case F32_ABS:
return "F32_ABS"
case F32_ADD:
return "F32_ADD"
case F32_CEIL:
return "F32_CEIL"
case F32_CONST:
return "F32_CONST"
case F32_CONVERT_I32_S:
return "F32_CONVERT_I32_S"
case F32_CONVERT_I32_U:
return "F32_CONVERT_I32_U"
case F32_CONVERT_I64_S:
return "F32_CONVERT_I64_S"
case F32_CONVERT_I64_U:
return "F32_CONVERT_I64_U"
case F32_COPYSIGN:
return "F32_COPYSIGN"
case F32_DEMOTE_F64:
return "F32_DEMOTE_F64"
case F32_DIV:
return "F32_DIV"
case F32_EQ:
return "F32_EQ"
case F32_FLOOR:
return "F32_FLOOR"
case F32_GE:
return "F32_GE"
case F32_GT:
return "F32_GT"
case F32_LE:
return "F32_LE"
case F32_LOAD:
return "F32_LOAD"
case F32_LT:
return "F32_LT"
case F32_MAX:
return "F32_MAX"
case F32_MIN:
return "F32_MIN"
case F32_MUL:
return "F32_MUL"
case F32_NE:
return "F32_NE"
case F32_NEAREST:
return "F32_NEAREST"
case F32_NEG:
return "F32_NEG"
case F32_REINTERPRET_I32:
return "F32_REINTERPRET_I32"
case F32_SQRT:
return "F32_SQRT"
case F32_STORE:
return "F32_STORE"
case F32_SUB:
return "F32_SUB"
case F32_TRUNC:
return "F32_TRUNC"
case F64:
return "F64"
case F64_ABS:
return "F64_ABS"
case F64_ADD:
return "F64_ADD"
case F64_CEIL:
return "F64_CEIL"
case F64_CONST:
return "F64_CONST"
case F64_CONVERT_I32_S:
return "F64_CONVERT_I32_S"
case F64_CONVERT_I32_U:
return "F64_CONVERT_I32_U"
case F64_CONVERT_I64_S:
return "F64_CONVERT_I64_S"
case F64_CONVERT_I64_U:
return "F64_CONVERT_I64_U"
case F64_COPYSIGN:
return "F64_COPYSIGN"
case F64_DIV:
return "F64_DIV"
case F64_EQ:
return "F64_EQ"
case F64_FLOOR:
return "F64_FLOOR"
case F64_GE:
return "F64_GE"
case F64_GT:
return "F64_GT"
case F64_LE:
return "F64_LE"
case F64_LOAD:
return "F64_LOAD"
case F64_LT:
return "F64_LT"
case F64_MAX:
return "F64_MAX"
case F64_MIN:
return "F64_MIN"
case F64_MUL:
return "F64_MUL"
case F64_NE:
return "F64_NE"
case F64_NEAREST:
return "F64_NEAREST"
case F64_NEG:
return "F64_NEG"
case F64_PROMOTE_F32:
return "F64_PROMOTE_F32"
case F64_REINTERPRET_I64:
return "F64_REINTERPRET_I64"
case F64_SQRT:
return "F64_SQRT"
case F64_STORE:
return "F64_STORE"
case F64_SUB:
return "F64_SUB"
case F64_TRUNC:
return "F64_TRUNC"
case FLOAT:
return "FLOAT"
case FUNC:
return "FUNC"
case FUNCREF:
return "FUNCREF"
case GET:
return "GET"
case GLOBAL:
return "GLOBAL"
case GLOBAL_GET:
return "GLOBAL_GET"
case GLOBAL_SET:
return "GLOBAL_SET"
case I32:
return "I32"
case I32_ADD:
return "I32_ADD"
case I32_AND:
return "I32_AND"
case I32_CLZ:
return "I32_CLZ"
case I32_CONST:
return "I32_CONST"
case I32_CTZ:
return "I32_CTZ"
case I32_DIV_S:
return "I32_DIV_S"
case I32_DIV_U:
return "I32_DIV_U"
case I32_EQ:
return "I32_EQ"
case I32_EQZ:
return "I32_EQZ"
case I32_EXTEND16_S:
return "I32_EXTEND16_S"
case I32_EXTEND8_S:
return "I32_EXTEND8_S"
case I32_GE_S:
return "I32_GE_S"
case I32_GE_U:
return "I32_GE_U"
case I32_GT_S:
return "I32_GT_S"
case I32_GT_U:
return "I32_GT_U"
case I32_LE_S:
return "I32_LE_S"
case I32_LE_U:
return "I32_LE_U"
case I32_LOAD:
return "I32_LOAD"
case I32_LOAD16_S:
return "I32_LOAD16_S"
case I32_LOAD16_U:
return "I32_LOAD16_U"
case I32_LOAD8_S:
return "I32_LOAD8_S"
case I32_LOAD8_U:
return "I32_LOAD8_U"
case I32_LT_S:
return "I32_LT_S"
case I32_LT_U:
return "I32_LT_U"
case I32_MUL:
return "I32_MUL"
case I32_NE:
return "I32_NE"
case I32_OR:
return "I32_OR"
case I32_POPCNT:
return "I32_POPCNT"
case I32_REINTERPRET_F32:
return "I32_REINTERPRET_F32"
case I32_REM_S:
return "I32_REM_S"
case I32_REM_U:
return "I32_REM_U"
case I32_ROTL:
return "I32_ROTL"
case I32_ROTR:
return "I32_ROTR"
case I32_SHL:
return "I32_SHL"
case I32_SHR_S:
return "I32_SHR_S"
case I32_SHR_U:
return "I32_SHR_U"
case I32_STORE:
return "I32_STORE"
case I32_STORE16:
return "I32_STORE16"
case I32_STORE8:
return "I32_STORE8"
case I32_SUB:
return "I32_SUB"
case I32_TRUNC_F32_S:
return "I32_TRUNC_F32_S"
case I32_TRUNC_F32_U:
return "I32_TRUNC_F32_U"
case I32_TRUNC_F64_S:
return "I32_TRUNC_F64_S"
case I32_TRUNC_F64_U:
return "I32_TRUNC_F64_U"
case I32_TRUNC_SAT_F32_S:
return "I32_TRUNC_SAT_F32_S"
case I32_TRUNC_SAT_F32_U:
return "I32_TRUNC_SAT_F32_U"
case I32_TRUNC_SAT_F64_S:
return "I32_TRUNC_SAT_F64_S"
case I32_TRUNC_SAT_F64_U:
return "I32_TRUNC_SAT_F64_U"
case I32_WRAP_I64:
return "I32_WRAP_I64"
case I32_XOR:
return "I32_XOR"
case I64:
return "I64"
case I64_ADD:
return "I64_ADD"
case I64_AND:
return "I64_AND"
case I64_CLZ:
return "I64_CLZ"
case I64_CONST:
return "I64_CONST"
case I64_CTZ:
return "I64_CTZ"
case I64_DIV_S:
return "I64_DIV_S"
case I64_DIV_U:
return "I64_DIV_U"
case I64_EQ:
return "I64_EQ"
case I64_EQZ:
return "I64_EQZ"
case I64_EXTEND16_S:
return "I64_EXTEND16_S"
case I64_EXTEND32_S:
return "I64_EXTEND32_S"
case I64_EXTEND8_S:
return "I64_EXTEND8_S"
case I64_EXTEND_I32_S:
return "I64_EXTEND_I32_S"
case I64_EXTEND_I32_U:
return "I64_EXTEND_I32_U"
case I64_GE_S:
return "I64_GE_S"
case I64_GE_U:
return "I64_GE_U"
case I64_GT_S:
return "I64_GT_S"
case I64_GT_U:
return "I64_GT_U"
case I64_LE_S:
return "I64_LE_S"
case I64_LE_U:
return "I64_LE_U"
case I64_LOAD:
return "I64_LOAD"
case I64_LOAD16_S:
return "I64_LOAD16_S"
case I64_LOAD16_U:
return "I64_LOAD16_U"
case I64_LOAD32_S:
return "I64_LOAD32_S"
case I64_LOAD32_U:
return "I64_LOAD32_U"
case I64_LOAD8_S:
return "I64_LOAD8_S"
case I64_LOAD8_U:
return "I64_LOAD8_U"
case I64_LT_S:
return "I64_LT_S"
case I64_LT_U:
return "I64_LT_U"
case I64_MUL:
return "I64_MUL"
case I64_NE:
return "I64_NE"
case I64_OR:
return "I64_OR"
case I64_POPCNT:
return "I64_POPCNT"
case I64_REINTERPRET_F64:
return "I64_REINTERPRET_F64"
case I64_REM_S:
return "I64_REM_S"
case I64_REM_U:
return "I64_REM_U"
case I64_ROTL:
return "I64_ROTL"
case I64_ROTR:
return "I64_ROTR"
case I64_SHL:
return "I64_SHL"
case I64_SHR_S:
return "I64_SHR_S"
case I64_SHR_U:
return "I64_SHR_U"
case I64_STORE:
return "I64_STORE"
case I64_STORE16:
return "I64_STORE16"
case I64_STORE32:
return "I64_STORE32"
case I64_STORE8:
return "I64_STORE8"
case I64_SUB:
return "I64_SUB"
case I64_TRUNC_F32_S:
return "I64_TRUNC_F32_S"
case I64_TRUNC_F32_U:
return "I64_TRUNC_F32_U"
case I64_TRUNC_F64_S:
return "I64_TRUNC_F64_S"
case I64_TRUNC_F64_U:
return "I64_TRUNC_F64_U"
case I64_TRUNC_SAT_F32_S:
return "I64_TRUNC_SAT_F32_S"
case I64_TRUNC_SAT_F32_U:
return "I64_TRUNC_SAT_F32_U"
case I64_TRUNC_SAT_F64_S:
return "I64_TRUNC_SAT_F64_S"
case I64_TRUNC_SAT_F64_U:
return "I64_TRUNC_SAT_F64_U"
case I64_XOR:
return "I64_XOR"
case IF:
return "IF"
case IMPORT:
return "IMPORT"
case INPUT:
return "INPUT"
case INT:
return "INT"
case INVALID:
return "INVALID"
case INVOKE:
return "INVOKE"
case LOCAL:
return "LOCAL"
case LOCAL_GET:
return "LOCAL_GET"
case LOCAL_SET:
return "LOCAL_SET"
case LOCAL_TEE:
return "LOCAL_TEE"
case LOOP:
return "LOOP"
case MEMORY:
return "MEMORY"
case MEMORY_GROW:
return "MEMORY_GROW"
case MEMORY_SIZE:
return "MEMORY_SIZE"
case MODULE:
return "MODULE"
case MUT:
return "MUT"
case NAN_ARITHMETIC:
return "NAN_ARITHMETIC"
case NAN_CANONICAL:
return "NAN_CANONICAL"
case NAT:
return "NAT"
case NOP:
return "NOP"
case OFFSET:
return "OFFSET"
case OUTPUT:
return "OUTPUT"
case PARAM:
return "PARAM"
case QUOTE:
return "QUOTE"
case REGISTER:
return "REGISTER"
case RESULT:
return "RESULT"
case RETURN:
return "RETURN"
case SCRIPT:
return "SCRIPT"
case SELECT:
return "SELECT"
case START:
return "START"
case STRING:
return "STRING"
case TABLE:
return "TABLE"
case TEST:
return "TEST"
case THEN:
return "THEN"
case TYPE:
return "TYPE"
case UNARY:
return "UNARY"
case UNREACHABLE:
return "UNREACHABLE"
case VALUE_TYPE:
return "VALUE_TYPE"
case VAR:
return "VAR"
default:
return string([]rune{rune(t)})
}
}
|
package dgraph
import (
"context"
"encoding/json"
"fmt"
"github.com/dgraph-io/dgo"
"github.com/dgraph-io/dgo/protos/api"
"google.golang.org/grpc"
)
var Client *dgo.Dgraph
var Connection *grpc.ClientConn
func init() {
Open("127.0.0.1:9080")
err := CreateSchema()
if err != nil {
fmt.Println("Error while creating schema ", err)
}
}
func Open(url string) error {
conn, err := grpc.Dial(url, grpc.WithInsecure())
if err != nil {
return err
}
Connection = conn
dc := api.NewDgraphClient(Connection)
Client = dgo.NewDgraphClient(dc)
return nil
}
func Close() {
Connection.Close()
}
func CreateSchema() error {
op := &api.Operation{}
op.Schema = `
name: string @index(term) .
xid: string @index(term) .
isService: string @index(term) .
isPod: string @index(term) .
`
ctx := context.Background()
err := Client.Alter(ctx, op)
return err
}
func GetUId(dg *dgo.Dgraph, id string, nodeType string) (string, error) {
query := `query Me($id:string, $nodeType:string) {
getUid(func: eq(xid, $id)) @filter(has(` + nodeType + `)) {
uid
}
}`
ctx := context.Background()
variables := make(map[string]string)
variables["$nodeType"] = nodeType
variables["$id"] = id
resp, err := dg.NewReadOnlyTxn().QueryWithVars(ctx, query, variables)
if err != nil {
return "", err
}
type Root struct {
IDs []ID `json:"getUid"`
}
var r Root
err = json.Unmarshal(resp.Json, &r)
if err != nil {
return "", err
}
if len(r.IDs) == 0 {
return "", fmt.Errorf("id %s is not in dgraph", id)
}
return r.IDs[0].UID, nil
}
func MutateNode(dg *dgo.Dgraph, n []byte) error {
mu := &api.Mutation{
CommitNow: true,
}
mu.SetJson = n
ctx := context.Background()
_, err := dg.NewTxn().Mutate(ctx, mu)
return err
}
|
package main
import (
"flag"
"fmt"
"github.com/drjerry/nnetlab/core"
"log"
"os"
)
type Args struct {
dataStream *os.File
initialConfig string
finalConfig string
// training-related arguments
testMode bool
lossFunction string
learnRate float32
batchSize int
quiet bool
}
var (
parser Parser
labelMap LabelMap
network core.Network
)
func main() {
log.SetFlags(log.Lmicroseconds)
var args Args
args.Parse()
cfg, err := LoadConfig(args.initialConfig)
if err != nil {
log.Fatal(err)
}
network, err = core.NewNetwork(cfg.Dimensions, cfg.Weights, cfg.Activations)
if err != nil {
log.Fatal(err)
}
labelMap = NewLabelMap(cfg.Labels)
parser = NewParser(args.dataStream, network.X[0])
if !args.testMode {
trainingRun(&args)
} else {
testRun(&args)
}
if args.finalConfig != "" {
if err := cfg.SaveConfig(args.finalConfig); err != nil {
log.Fatal(err)
}
}
}
func (a *Args) Parse() {
flag.StringVar(&a.initialConfig, "i", "", "initial JSON file (required)")
flag.StringVar(&a.finalConfig, "f", "", "final JSON on exit")
flag.StringVar(&a.lossFunction, "loss", "sigmoid", "loss function")
flag.IntVar(&a.batchSize, "batch", 50, "mini-batch size")
flag.BoolVar(&a.testMode, "test", false, "run in test mode")
flag.BoolVar(&a.quiet, "quiet", false, "reduce logging")
filename := flag.String("d", "", "data file [default stdin]")
rate := flag.Float64("rate", 0.01, "learning rate")
flag.Parse()
if a.initialConfig == "" {
flag.PrintDefaults()
fmt.Println("missing -i argument")
os.Exit(1)
}
if *filename == "" {
a.dataStream = os.Stdin
} else {
var err error
if a.dataStream, err = os.Open(*filename); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
a.learnRate = float32(*rate)
}
func trainingRun(args *Args) {
var exmplCt int
var loss, meanLoss float32
lossFtn := core.NewLossFunction(args.lossFunction)
L := len(network.X) - 1
oneHot := make([]float32, len(network.X[L]))
dLoss := make([]float32, len(network.X[L]))
for parser.Next() {
network.Forward()
labelMap.GetVector(parser.Label, oneHot)
lossFtn.Loss(network.X[L], oneHot, &loss, dLoss)
network.BackProp(dLoss)
exmplCt++
meanLoss += (loss - meanLoss) / float32(exmplCt)
if exmplCt%args.batchSize == 0 {
network.Update(args.learnRate)
if !args.quiet {
log.Printf("examples %d loss %f\n", exmplCt, meanLoss)
}
}
}
if parser.Error != nil {
log.Fatal(parser.Error)
}
network.Update(args.learnRate)
log.Printf("examples %d loss %f\n", exmplCt, meanLoss)
}
func testRun(args *Args) {
L := len(network.X) - 1
for parser.Next() {
network.Forward()
label, err := labelMap.GetLabel(network.X[L])
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s %s\n", label, parser.Label)
}
if parser.Error != nil {
log.Fatal(parser.Error)
}
}
|
package controllers
import (
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/itang/gotang"
"github.com/itang/yunshang/main/app/models"
"github.com/itang/yunshang/main/app/models/entity"
"github.com/itang/yunshang/main/app/routes"
"github.com/itang/yunshang/main/app/utils"
"github.com/lunny/xorm"
"github.com/revel/revel"
)
// 管理端相关Actions
type Admin struct {
AdminController
}
// 管理端主页
func (c Admin) Index() revel.Result {
userTotal := c.userApi().Total()
orderTotal := c.orderApi().TotalNewOrders()
users := c.userApi().TotalAUsers()
uusers := c.userApi().TotalUAUsers()
products := c.productApi().TotalAProducts()
uproducts := c.productApi().TotalUAProducts()
stock_warning_products := c.productApi().TotalStockWarningProducts()
submited_orders := c.orderApi().TotalOrders(entity.OS_SUBMIT)
payed_orders := c.orderApi().TotalOrders(entity.OS_PAY)
ship_orders := c.orderApi().TotalOrders(entity.OS_VERIFY)
recv_orders := c.orderApi().TotalOrders(entity.OS_FINISH)
ins := c.orderApi().TotalInquires()
in_unreplies := c.orderApi().TotalUnreplyInquires()
pcomments := c.productApi().TotalProductComments()
pcomments_unconfirms := c.productApi().TotalProductCommentsUnconfirm()
ncomments := c.newsApi().TotalNewsComments()
ncomments_unconfirms := c.newsApi().TotalNewsCommentsUnconfirm()
feedbacks := c.appApi().TotalFeedbacks()
c.setChannel("/")
return c.Render(userTotal, orderTotal, products, uproducts,
ins, in_unreplies, submited_orders, payed_orders, ship_orders, recv_orders,
pcomments, pcomments_unconfirms,
ncomments, ncomments_unconfirms, stock_warning_products,
users, uusers, feedbacks)
}
// 用户列表
func (c Admin) Users() revel.Result {
c.setChannel("users/users")
return c.Render()
}
// 用户列表数据
func (c Admin) UsersData(filter_status string, filter_certified string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
switch filter_status {
case "true":
session.And("enabled=?", true)
case "false":
session.And("enabled=?", false)
}
switch filter_certified {
case "true":
session.And("certified=?", true)
case "false":
session.And("certified=?", false)
}
})
page := c.userApi().FindAllUsersForPage(ps)
return c.renderDTJson(page)
}
// 重置用户密码
func (c Admin) ResetUserPassword(id int64) revel.Result {
user, ok := c.userApi().GetUserById(id)
if !ok {
return c.RenderJson(Error("用户不存在", nil))
}
if user.IsAdminUser() {
return c.RenderJson(Error("admin用户的状态不能通过此入口修改", nil))
}
newPassword := utils.RandomString(6)
err := c.userApi().DoChangePassword(&user, newPassword)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
}
err = gotang.DoIOWithTimeout(func() error {
return models.SendHtmlMail("重置密码邮件",
utils.RenderTemplate("Passport/ResetPasswordResultTemplate.html",
struct {
NewPassword string
}{newPassword}),
user.Email)
}, time.Second*30)
if err != nil {
panic(err)
}
return c.RenderJson(Success("重置用户密码成功并新密码已经通过告知邮件用户", newPassword))
}
// 激活/禁用用户
func (c Admin) ToggleUserEnabled(id int64) revel.Result {
user, ok := c.userApi().GetUserById(id)
if !ok {
return c.RenderJson(Error("用户不存在", nil))
}
if user.IsAdminUser() {
return c.RenderJson(Error("admin用户的状态不能通过此入口修改", nil))
}
err := c.userApi().ToggleUserEnabled(&user)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if user.Enabled {
return c.RenderJson(Success("激活用户成功!", nil))
}
return c.RenderJson(Success("禁用用户成功!", nil))
}
}
func (c Admin) ToggleCommentEnabled(id int64) revel.Result {
comment, ok := c.userApi().GetCommentById(id)
if !ok {
return c.RenderJson(Error("评论不存在", nil))
}
err := c.userApi().ToggleCommentEnabled(&comment)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if comment.Enabled {
return c.RenderJson(Success("审核评论通过!", nil))
}
return c.RenderJson(Success("审核评论不通过!", nil))
}
}
// 认证用户
func (c Admin) ToggleUserCertified(id int64) revel.Result {
user, ok := c.userApi().GetUserById(id)
if !ok {
return c.RenderJson(Error("用户不存在", nil))
}
if user.IsAdminUser() {
return c.RenderJson(Error("admin用户的状态不能通过此入口修改", nil))
}
err := c.userApi().ToggleUserCertified(&user)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if user.Certified {
return c.RenderJson(Success("设置用户认证成功!", nil))
}
return c.RenderJson(Success("取消用户认证成功!", nil))
}
}
// 显示用户登录日志
func (c Admin) ShowUserLoginLogs(id int64) revel.Result {
loginLogs := c.userApi().FindUserLoginLogs(id)
return c.Render(loginLogs)
}
// 显示用户信息
func (c Admin) ShowUserInfos(id int64) revel.Result {
user, _ := c.userApi().GetUserById(id)
userDetail, _ := c.userApi().GetUserDetailByUserId(user.Id)
userDas := c.userApi().FindUserDeliveryAddresses(user.Id)
return c.Render(user, userDetail, userDas)
}
///////////////////////////////////////////////////////////////
// Products
// 产品列表
func (c Admin) Products() revel.Result {
c.setChannel("products/products")
return c.Render()
}
// 产品列表数据
func (c Admin) ProductsData(filter_status string, filter_tag string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
switch filter_status {
case "true":
session.And("enabled=?", true)
case "false":
session.And("enabled=?", false)
}
if len(filter_tag) > 0 {
session.And("tags like ?", "%"+filter_tag+"%")
}
})
page := c.productApi().FindAllProductsForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) NewProduct(id int64) revel.Result {
var (
p entity.Product
detail = ""
stockLogs []entity.ProductStockLog
splits = ""
)
if id == 0 { // new
p.MinNumberOfOrders = 1
} else { //edit
p, _ = c.productApi().GetProductById(id)
detail, _ = c.productApi().GetProductDetail(p.Id)
stockLogs = c.productApi().FindAllProductStockLogs(p.Id)
splits = c.productApi().GetProductPricesSplits(p.Id)
}
revel.INFO.Println("splits", splits)
return c.Render(p, detail, stockLogs, splits)
}
func (c Admin) DoNewProduct(p entity.Product) revel.Result {
c.Validation.Required(p.Name).Message("请填写名称").Key("name")
c.Validation.Required(p.MinNumberOfOrders >= 1).Message("起订最小数量应该大于0").Key("min_number_of_orders")
if ret := c.doValidate(routes.Admin.NewProduct(p.Id)); ret != nil {
return ret
}
id, err := c.productApi().SaveProduct(p)
if err != nil {
c.Flash.Error("保存产品失败,请重试!" + err.Error())
} else {
c.Flash.Success("保存产品成功!")
}
return c.Redirect(routes.Admin.NewProduct(id))
}
func (c Admin) ToggleProductEnabled(id int64) revel.Result {
api := c.productApi()
p, ok := api.GetProductById(id)
if !ok {
return c.RenderJson(Error("产品不存在", nil))
}
err := api.ToggleProductEnabled(&p)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if p.Enabled {
return c.RenderJson(Success("上架成功!", nil))
}
return c.RenderJson(Success("下架成功!", nil))
}
}
func (c Admin) UploadProductImage(id int64, t int) revel.Result {
var (
dir, ct string
count int
)
if t == entity.PTScheDiag {
dir = "data/products/sd/"
ct = "thumbnail"
} else if t == entity.PTPics {
dir = "data/products/pics/"
ct = "fit"
} else {
return c.RenderJson(Error("上传失败! 类型不对", nil))
}
for _, fileHeaders := range c.Params.Files {
for _, fileHeader := range fileHeaders {
p := entity.ProductParams{Type: t, Name: fileHeader.Filename, ProductId: id}
e, err := c.db.Insert(&p)
gotang.Assert(e == 1, "New")
gotang.AssertNoError(err, `Insert`)
to := utils.Uuid() + ".jpg"
p.Value = to
c.db.Id(p.Id).Cols("value").Update(&p)
from, _ := fileHeader.Open()
err = utils.MakeAndSaveFromReader(from, dir+to, ct, 200, 200)
gotang.AssertNoError(err, "生成图片出错!")
count += 1
}
}
if count == 0 {
return c.RenderJson(Error("请选择要上传的图片", nil))
}
return c.RenderJson(Success("上传成功!", nil))
}
func (c Admin) UploadProductImageForUEditor(id int64) revel.Result {
dir := "data/products/pics/"
t := entity.PTPics
var Original = ""
var Url = ""
var Title = ""
var State = ""
for _, fileHeaders := range c.Params.Files {
for _, fileHeader := range fileHeaders {
p := entity.ProductParams{Type: t, Name: fileHeader.Filename, ProductId: id}
e, err := c.db.Insert(&p)
gotang.Assert(e == 1, "New")
gotang.AssertNoError(err, `Insert`)
to := utils.Uuid() + ".jpg"
p.Value = to
c.db.Id(p.Id).Cols("value").Update(&p)
from, _ := fileHeader.Open()
err = utils.MakeAndSaveFromReader(from, dir+to, "fit", 600, 600)
gotang.AssertNoError(err, "生成图片出错!")
Original = fileHeader.Filename
Title = Original
State = "SUCCESS"
Url = "?file=" + to
}
}
ret := struct {
Original string `json:"original"`
Url string `json:"url"`
Title string `json:"title"`
State string `json:"state"`
}{Original, Url, Title, State}
return c.RenderJson(ret)
}
func (c Admin) SaveProductDetail(id int64, content string) revel.Result {
err := c.productApi().SaveProductDetail(id, content)
if err != nil {
return c.RenderJson(Error("保存信息出错,"+err.Error(), nil))
}
return c.RenderJson(Success("保存信息成功!", nil))
}
func (c Admin) UploadProductMaterial(id int64) revel.Result {
count := 0
for _, fileHeaders := range c.Params.Files {
for _, fileHeader := range fileHeaders {
to := ""
p := entity.ProductParams{Type: entity.PTMaterial, Name: fileHeader.Filename, Value: to, ProductId: id}
e, err := c.db.Insert(&p)
gotang.Assert(e == 1, "New")
gotang.AssertNoError(err, `Insert`)
to = fmt.Sprintf("%d-%s", p.Id, fileHeader.Filename)
p.Value = to
c.db.Id(p.Id).Cols("value").Update(&p)
out, err := os.Create("data/products/m/" + to)
gotang.AssertNoError(err, `os.Create`)
in, err := fileHeader.Open()
gotang.AssertNoError(err, `fileHeader.Open()`)
io.Copy(out, in)
out.Close()
in.Close()
count += 1
}
}
if count == 0 {
return c.RenderJson(Error("请选择要上传的文件", nil))
}
return c.RenderJson(Success("上传成功!", nil))
}
func (c Admin) deleteProductParams(id int64) revel.Result {
if ret := c.checkErrorAsJsonResult(c.productApi().DeleteProductParams(id)); ret != nil {
return ret
}
return c.RenderJson(Success("删除成功!", ""))
}
func (c Admin) DeleteSdImage(id int64) revel.Result {
return c.deleteProductParams(id)
}
func (c Admin) DeleteImagePic(id int64) revel.Result {
return c.deleteProductParams(id)
}
func (c Admin) DeleteMFile(id int64) revel.Result {
return c.deleteProductParams(id)
}
func (c Admin) DoSaveProductSpec(productId int64, id int64, name string, value string) revel.Result {
pp := entity.ProductParams{ProductId: productId, Id: id, Name: name, Value: value, Type: entity.PTSpec}
if id == 0 { //new
c.db.Insert(&pp)
} else { //update
c.db.Id(id).Update(&pp)
}
return c.RenderJson(Success("操作完成!", ""))
}
func (c Admin) DeleteSpec(id int64) revel.Result {
return c.deleteProductParams(id)
}
func (c Admin) DeletePrice(id int64) revel.Result {
var price entity.ProductPrices
_, _ = c.db.Where("id=?", id).Get(&price)
c.db.Delete(&price)
return c.RenderJson(Success("", ""))
}
func (c Admin) ProductStockLogs(id int64) revel.Result {
logs := c.productApi().FindAllProductStockLogs(id)
return c.RenderJson(Success("", logs))
}
func (c Admin) AddProductStock(productId int64, stock int, message string) revel.Result {
c.Validation.Required(stock != 0)
if c.Validation.HasErrors() {
return c.RenderJson(Error("请填入合法的入库数", nil))
}
newStock, err := c.productApi().AddProductStock(productId, stock, message)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("操作成功!", newStock))
}
func (c Admin) DoSaveSplitProductPrice(productId int64, start_quantitys string) revel.Result {
err := c.productApi().SplitProductPrices(productId, start_quantitys)
if err != nil {
return c.RenderJson(Error(err.Error(), "start_quantitys"))
}
return c.RenderJson(Success("操作成功!", ""))
}
func (c Admin) DoSaveProductPrice(productId int64, id int64, price float64) revel.Result {
if price < 0 {
return c.RenderJson(Error("请输入合法的价格(>=0)", "price"))
}
var p entity.ProductPrices
_, err := c.db.Where("id=?", id).Get(&p)
if err != nil {
return c.RenderJson(Error("操作失败!", err.Error()))
}
p.Price = price
_, err = c.db.Id(p.Id).Cols("price").Update(&p)
//更新冗余的价格
err = c.productApi().UpdateProductPrice(productId)
gotang.AssertNoError(err, "")
return c.RenderJson(Success("操作成功!", ""))
}
/////////////////////////////////////////////////////
func (c Admin) Categories() revel.Result {
c.setChannel("products/categories")
return c.Render()
}
func (c Admin) CategoriesData(filter_status, filter_tags string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
switch filter_status {
case "true":
session.And("enabled=?", true)
case "false":
session.And("enabled=?", false)
}
if len(filter_tags) > 0 {
session.And("tags=?", filter_tags)
}
})
page := c.productApi().FindAllCategoriesForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) NewCategory(id int64) revel.Result {
var p entity.ProductCategory
if id == 0 { // new
//p = entity.Provider{}
} else { //edit
p, _ = c.productApi().GetCategoryById(id)
}
return c.Render(p)
}
func (c Admin) DoNewCategory(p entity.ProductCategory) revel.Result {
c.Validation.Required(p.Name).Message("请填写名称")
if ret := c.doValidate(routes.Admin.NewCategory(p.Id)); ret != nil {
return ret
}
id, err := c.productApi().SaveCategory(p)
if err != nil {
c.Flash.Error("保存分类失败,请重试!" + err.Error())
} else {
c.Flash.Success("保存分类成功!")
}
return c.Redirect(routes.Admin.NewCategory(id))
}
func (c Admin) ToggleCategoryEnabled(id int64) revel.Result {
api := c.productApi()
p, ok := api.GetCategoryById(id)
if !ok {
return c.RenderJson(Error("分类不存在", nil))
}
err := api.ToggleCategoryEnabled(&p)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if p.Enabled {
return c.RenderJson(Success("激活成功!", nil))
}
return c.RenderJson(Success("禁用成功!", nil))
}
}
//////////////////////////////////////////////////////////////////
// Providers
func (c Admin) Providers() revel.Result {
c.setChannel("providers/providers")
return c.Render()
}
func (c Admin) ProvidersData(filter_status string, filter_tags string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
switch filter_status {
case "true":
session.And("enabled=?", true)
case "false":
session.And("enabled=?", false)
}
if len(filter_tags) > 0 {
session.And("tags=?", filter_tags)
}
})
page := c.productApi().FindAllProvidersForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) NewProvider(id int64) revel.Result {
var p entity.Provider
if id == 0 { // new
//p = entity.Provider{}
} else { //edit
p, _ = c.productApi().GetProviderById(id)
}
return c.Render(p)
}
func (c Admin) DoNewProvider(p entity.Provider) revel.Result {
c.Validation.Required(p.Name).Message("请填写名称")
if ret := c.doValidate(routes.Admin.NewProvider(p.Id)); ret != nil {
return ret
}
id, err := c.productApi().SaveProvider(p)
if err != nil {
c.Flash.Error("保存制造商失败,请重试!" + err.Error())
} else {
c.Flash.Success("保存制造商成功!")
}
return c.Redirect(routes.Admin.NewProvider(id))
}
func (c Admin) ToggleProviderEnabled(id int64) revel.Result {
api := c.productApi()
p, ok := api.GetProviderById(id)
if !ok {
return c.RenderJson(Error("制造商不存在", nil))
}
err := api.ToggleProviderEnabled(&p)
if err != nil {
return c.RenderJson(Error(err.Error(), nil))
} else {
if p.Enabled {
return c.RenderJson(Success("激活成功!", nil))
}
return c.RenderJson(Success("禁用成功!", nil))
}
}
func (c Admin) DeleteProvider(id int64) revel.Result {
_ = c.productApi().DeleteProvider(id)
return c.RenderJson(Success("删除成功!", nil))
}
// 上传Logo
func (c Admin) UploadProviderImage(id int64, image *os.File) revel.Result {
c.Validation.Required(image != nil)
if c.Validation.HasErrors() {
return c.RenderJson(Error("请选择图片", nil))
}
p, exists := c.productApi().GetProviderById(id)
if !exists {
return c.RenderJson(Error("操作失败,制造商不存在", nil))
}
to := filepath.Join(revel.Config.StringDefault("dir.data.providers", "data/providers"), fmt.Sprintf("%d.jpg", p.Id))
err := utils.MakeAndSaveFromReaderMaxWithMode(image, "fit", to, 150, 150)
if ret := c.checkUploadError(err, "保存上传图片报错!"); ret != nil {
return ret
}
return c.RenderJson(Success("上传成功", nil))
}
// 上传Logo
func (c Admin) UploadProductLogo(id int64, image *os.File) revel.Result {
c.Validation.Required(image != nil)
if c.Validation.HasErrors() {
return c.RenderJson(Error("请选择图片", nil))
}
p, exists := c.productApi().GetProductById(id)
if !exists {
return c.RenderJson(Error("操作失败,产品不存在", nil))
}
to := filepath.Join(revel.Config.StringDefault("dir.data.products/logo", "data/products/logo"), fmt.Sprintf("%d.jpg", p.Id))
err := utils.MakeAndSaveFromReader(image, to, "thumbnail", 200, 200)
if ret := c.checkUploadError(err, "保存上传图片报错!"); ret != nil {
return ret
}
return c.RenderJson(Success("上传成功", nil))
}
func (c Admin) ProviderRecommends() revel.Result {
c.setChannel("providers/recommends")
return c.Render()
}
func (c Admin) ProductHots() revel.Result {
c.setChannel("products/hots")
return c.Render()
}
func (c Admin) checkUploadError(err error, msg string) revel.Result {
if err != nil {
revel.WARN.Printf("上传头像操作失败,%s, msg:%s", msg, err.Error())
return c.RenderJson(Error("操作失败,"+msg+", "+err.Error(), nil))
}
return nil
}
func (c Admin) AdImages() revel.Result {
c.setChannel("system/adimages")
return c.Render()
}
func (c Admin) UploadAdImage() revel.Result {
var (
dir = "data/adimages/"
t = entity.ATAd
count = 0
)
for _, fileHeaders := range c.Params.Files {
for _, fileHeader := range fileHeaders {
to := utils.Uuid() + ".jpg"
p := entity.AppParams{Type: t, Name: fileHeader.Filename, Value: to}
e, err := c.db.Insert(&p)
gotang.Assert(e == 1, "New")
gotang.AssertNoError(err, `Insert`)
from, _ := fileHeader.Open()
err = utils.MakeAndSaveFromReader(from, dir+to, "fit", 698, 220)
gotang.AssertNoError(err, "生成图片出错!")
count += 1
}
}
if count == 0 {
return c.RenderJson(Error("请选择要上传的图片", nil))
}
return c.RenderJson(Success("上传成功!", nil))
}
func (c Admin) DeleteAdImage(id int64) revel.Result {
c.appApi().DeleteAdImage(id)
return c.RenderJson(Success("", ""))
}
func (c Admin) SetFirstAdImageUrl(id int64) revel.Result {
_ = c.appApi().SetFirstAdImage(id)
return c.RenderJson(Success("", ""))
}
func (c Admin) SetAdImageLink(id int64, link string) revel.Result {
_ = c.appApi().SetAdImageLink(id, link)
return c.RenderJson(Success("", ""))
}
func (c Admin) HotKeywords() revel.Result {
c.setChannel("system/hotkeywords")
return c.Render()
}
func (c Admin) DeleteHotKeyword(id int64) revel.Result {
c.appApi().DeleteHotKeyword(id)
return c.RenderJson(Success("", ""))
}
func (c Admin) SetFirstHotKeyword(id int64) revel.Result {
_ = c.appApi().SetFirstHotKeyword(id)
return c.RenderJson(Success("", ""))
}
func (c Admin) DoSaveHotKeyword(id int64, value string) revel.Result {
pp := entity.AppParams{Id: id, Name: "", Value: value, Type: entity.ATHk}
if id == 0 { //new
c.db.Insert(&pp)
} else { //update
c.db.Id(id).Update(&pp)
}
return c.RenderJson(Success("操作完成!", ""))
}
func (c Admin) Slogan() revel.Result {
c.setChannel("system/slogan")
p, _ := c.appApi().GetSlogan()
return c.Render(p)
}
func (c Admin) SaveSlogan(p entity.AppParams) revel.Result {
c.appApi().SaveSlogan(p)
return c.Redirect(Admin.Slogan)
}
func (c Admin) Orders() revel.Result {
osJSON := utils.ToJSON(entity.OSMap)
pmJSON := utils.ToJSON(entity.PMMap)
spJSON := utils.ToJSON(entity.SPMap)
c.setChannel("orders/index")
return c.Render(osJSON, pmJSON, spJSON)
}
func (c Admin) OrdersData(filter_status int) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
if filter_status != 0 {
session.And("status=?", filter_status)
}
})
orders := c.orderApi().FindSubmitedOrdersForPage(ps)
return c.renderDTJson(orders)
}
func (c Admin) ShowOrder(userId int64, code string) revel.Result {
order, exists := c.orderApi().GetOrder(userId, code)
if !exists {
return c.NotFound("订单不存在!")
}
orderBy := c.userApi().GetUserDesc(order.UserId)
return c.Render(order, orderBy)
}
func (c Admin) ToggleOrderLock(id int64) revel.Result {
err := c.orderApi().ToggleOrderLock(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) ChangeOrderPayed(id int64) revel.Result {
err := c.orderApi().ChangeOrderPayed(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) ChangePayAmount(id int64, payAmount float64) revel.Result {
err := c.orderApi().ChangePayAmount(id, payAmount)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) ChangeOrderVerify(id int64) revel.Result {
err := c.orderApi().ChangeOrderVerify(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) ChangeOrderShiped(id int64) revel.Result {
err := c.orderApi().ChangeOrderShiped(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) ProductComments() revel.Result {
c.setChannel("products/comments")
return c.Render()
}
func (c Admin) ProductCommentsData(filter_status string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
switch filter_status {
case "true":
session.And("enabled=?", true)
case "false":
session.And("enabled=?", false)
}
session.And("target_type=?", entity.CT_PRODUCT)
})
page := c.userApi().CommentsForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) Prices() revel.Result {
c.setChannel("prices/index")
return c.Render()
}
func (c Admin) PricesData(filter_status string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
if filter_status == "1" {
session.And("replies = 0")
} else if filter_status == "2" {
session.And("replies > 0")
}
})
page := c.orderApi().FindAllInquiresForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) Feedbacks() revel.Result {
c.setChannel("prices/feedback")
return c.Render()
}
func (c Admin) FeedbacksData(filter_status string) revel.Result {
ps := c.pageSearcherWithCalls(func(session *xorm.Session) {
})
page := c.appApi().FindAllFeedbacksForPage(ps)
return c.renderDTJson(page)
}
func (c Admin) NewInquiryReply(id int64) revel.Result {
in, exists := c.orderApi().GetInquiryById(id)
if !exists {
return c.NotFound("此询价不存在!")
}
replies := c.orderApi().GetInquiryReplies(id)
return c.Render(in, replies)
}
func (c Admin) DoNewInquiryReply(reply entity.InquiryReply) revel.Result {
reply.UserId = c.forceSessionUserId()
err := c.orderApi().SaveInquiryReply(reply)
if err != nil {
c.FlashParams()
c.Flash.Error("回复出错,请重试!")
return c.Redirect(routes.Admin.NewInquiryReply(reply.InquiryId))
}
c.Flash.Success("回复成功!")
return c.Redirect(routes.Admin.NewInquiryReply(reply.InquiryId))
}
func (c Admin) DeleteInquiryReply(id int64) revel.Result {
err := c.orderApi().DeleteInquiryReply(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("操作完成", ""))
}
func (c Admin) Site() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.basic")
c.setChannel("system/site")
return c.Render(ps)
}
func (c Admin) SaveSiteBasic(p []entity.StringKV) revel.Result {
c.Flash.Success("保存成功!")
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
//
initOAuth()
return c.Redirect(Admin.Site)
}
func (c Admin) SiteComment() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.comment")
c.setChannel("system/system_comment")
return c.Render(ps)
}
func (c Admin) SaveSiteComment(p []entity.StringKV) revel.Result {
c.Flash.Success("保存成功!")
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
return c.Redirect(Admin.SiteComment)
}
func (c Admin) Contact() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.contact")
c.setChannel("system/contact")
return c.Render(ps)
}
func (c Admin) SaveSiteContact(p []entity.StringKV) revel.Result {
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
c.Flash.Success("保存成功!")
return c.Redirect(Admin.Contact)
}
func (c Admin) Mail() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.mail")
c.setChannel("system/mail")
return c.Render(ps)
}
func (c Admin) SaveMail(p []entity.StringKV) revel.Result {
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
c.Flash.Success("保存成功!")
return c.Redirect(Admin.Mail)
}
func (c Admin) Shippings() revel.Result {
ps := c.orderApi().FindAllShippings()
c.setChannel("system/shippings")
return c.Render(ps)
}
func (c Admin) SaveShippings(p []entity.Shipping) revel.Result {
err := c.orderApi().SaveShippings(p)
if err != nil {
c.Flash.Error("保存失败!" + err.Error())
} else {
c.Flash.Success("保存成功!")
}
return c.Redirect(Admin.Shippings)
}
func (c Admin) Payments() revel.Result {
ps := c.orderApi().FindAllPayments()
c.setChannel("system/payments")
return c.Render(ps)
}
func (c Admin) SavePayments(p []entity.Payment) revel.Result {
err := c.orderApi().SavePayments(p)
if err != nil {
c.Flash.Error("保存失败!" + err.Error())
} else {
c.Flash.Success("保存成功!")
}
return c.Redirect(Admin.Payments)
}
func (c Admin) Banks() revel.Result {
ps := c.orderApi().FindAllBanks()
c.setChannel("system/banks")
return c.Render(ps)
}
func (c Admin) SaveBanks(p []entity.Bank) revel.Result {
err := c.orderApi().SaveBanks(p)
if err != nil {
c.Flash.Error("保存失败!" + err.Error())
} else {
c.Flash.Success("保存成功!")
}
return c.Redirect(Admin.Banks)
}
func (c Admin) Alipay() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.alipay")
c.setChannel("system/alipay")
return c.Render(ps)
}
func (c Admin) SaveAlipay(p []entity.StringKV) revel.Result {
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
c.Flash.Success("保存成功!")
return c.Redirect(Admin.Alipay)
}
func (c Admin) SetOrderBack(id int64) revel.Result {
err := c.orderApi().SetOrderBack(id)
if ret := c.checkErrorAsJsonResult(err); ret != nil {
return ret
}
return c.RenderJson(Success("", ""))
}
func (c Admin) SetUserRole(id int64) revel.Result {
user, exists := c.userApi().GetUserById(id)
gotang.Assert(exists, "")
return c.Render(user)
}
func (c Admin) DoSaveUserRole(id int64, roles string) revel.Result {
err := c.userApi().SaveUserRole(id, roles)
if err != nil {
c.Flash.Error("保存失败!" + err.Error())
} else {
c.Flash.Success("保存成功!")
}
return c.Redirect(routes.Admin.SetUserRole(id))
}
func (c Admin) TestMail(email string) revel.Result {
err := gotang.DoIOWithTimeout(func() error {
return models.SendHtmlMail("测试邮件", "这是一封测试邮件", email)
}, time.Second*30)
if err != nil {
return c.RenderJson(Error("发送失败,"+err.Error(), ""))
}
return c.RenderJson(Success("发送完成, 请登录邮箱查收", ""))
}
func (c Admin) Opens() revel.Result {
ps := c.appConfigApi().FindConfigsBySection("site.open")
c.setChannel("system/open")
return c.Render(ps)
}
func (c Admin) SaveOpens(p []entity.StringKV) revel.Result {
c.Flash.Success("保存成功!")
for _, v := range p {
c.appConfigApi().SaveOrUpdateConfig(v.Key, v.Value, "")
}
//try reset OAuth set
initOAuth()
return c.Redirect(Admin.Opens)
}
|
package buildInfo
import (
"fmt"
"github.com/mitchellh/cli"
)
// CommandT is a Command implementation that returns version information
type CommandT struct {
bi *BuildInfo
ui cli.Ui
}
// Command Builds and returns a CommandT struct
func (bi *BuildInfo) Command(ui cli.Ui) (*CommandT, error) {
return &CommandT{
bi: bi,
ui: ui,
}, nil
}
// Run is a function to run the command
func (c *CommandT) Run(args []string) int {
// show output
return c.show()
}
// Synopsis shows the command summary
func (c *CommandT) Synopsis() string {
return "Display application version information"
}
// Help shows the detailed command options
func (c *CommandT) Help() string {
return fmt.Sprintf(`Usage: %s version [options]
Display application version and dependency information.
`, c.bi.Name)
}
// print version information
func (c *CommandT) show() int {
// print standard version
c.ui.Output(fmt.Sprintf("==>\t%s v%s\nBuild:\t%s\nBranch:\t%s\nDate:\t%s",
c.bi.Name,
c.bi.Version,
c.bi.Build,
c.bi.Branch,
c.bi.Date))
// all good
return 0
}
|
package main
import "fmt"
func main() {
//var m1 map[string]int
//m2 := make(map[int]interface{},100)
m3 := map[string]string{
"name":"james",
"age":"35",
}
//m1["key1"] = 1
//m2[1] = 1
m3["key1"] = "v1"
m3["key2"] = "v2"
m3["key3"] = "v3"
m3["key3"] = "v0"
//fmt.Println(len(m3))
//
//fmt.Println(m3["name"])
//确定键值对存在
//if value, ok:= m3["name"]; ok{
// fmt.Println(value)
//}
//遍历
//for key,value := range m3{
// fmt.Println("key: ", key, " value: ", value)
//}
//delete(m3,"key3")
//遍历
//for key,value := range m3{
// fmt.Println("key: ", key, " value: ", value)
//}
m := make(map[string]func(a, b int) int)
m["add"] = func(a, b int) int {
return a+b
}
m["multi"] = func(a, b int) int {
return a*b
}
fmt.Println(m["add"](3,2))
fmt.Println(m["multi"](3,2))
}
|
package main
import (
"nes"
"os"
"log"
)
func main() {
var file *os.File
var err error
if file, err = os.Open("assets/nestest.nes"); err != nil {
log.Fatal(err)
return
}
var rom *nes.ROM
rom, err = nes.ReadROM(file)
if err != nil {
log.Fatal(err)
return
}
var machine = nes.NewMachine()
machine.Insert(rom)
machine.CPU.Debug = true
machine.CPU.Reset()
machine.CPU.PC = 0xc000
for i:=0; i < 9000; i++ {
machine.CPU.Step()
}
}
|
// Package nats provides sample codes for NATS/STAN client/server.
package nats
// RunClient runs STAN client
func RunClient() {
var err error
DispMsg(TypeReq, "run STAN client", err)
}
|
package bike
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBattleMsg(t *testing.T) {
ast := assert.New(t)
msg := `91A14694 :参戦ID
参加者募集!
Lv10 ユグドラシル・マグナ`
ast.True(IsGBFBattle(msg))
msg = `2F1E6FF1 :参戦ID
参加者募集!
黄龍・黒麒麟HL`
ast.True(IsGBFBattle(msg))
}
func TestBattleMsgFail(t *testing.T) {
ast := assert.New(t)
msg := `参加者募集!参戦I:37F7B348
ALv60 リヴァイアサン・マグナ
https://t.co/RbqZBFIUBz`
ast.False(IsGBFBattle(msg))
}
func TestConvertBattleMsg(t *testing.T) {
ast := assert.New(t)
msg := `91A14694 :参戦ID
参加者募集!
Lv100 ユグドラシル・マグナ`
result, err := ConvertGBFBattleInfo(msg)
if err != nil {
ast.Fail(err.Error())
return
}
ast.Equal("91A14694", result.RoomId)
ast.Equal("100", result.Level)
ast.Equal("ユグドラシル・マグナ", result.MobName)
msg = `2F1E6FF1 :参戦ID
参加者募集!
黄龍・黒麒麟HL`
result, err = ConvertGBFBattleInfo(msg)
if err != nil {
ast.Fail(err.Error())
return
}
ast.Equal("2F1E6FF1", result.RoomId)
ast.Equal("", result.Level)
ast.Equal("黄龍・黒麒麟HL", result.MobName)
}
|
package brackets
import "testing"
type testCase struct {
name string
input string
want bool
}
var testCases = []testCase{
{"0", "{[()]}", true},
{"1", "{[(])}", false},
{"2", "{{[[(())]]}}", true},
{"3", "}{}{}", false},
{"4", "[](){}", true},
}
func TestBalanced(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := balanced(tc.input)
if got != tc.want {
t.Errorf("%q returned %t; expected %t", tc.input, got, tc.want)
}
})
}
}
|
package main
import(
"errors"
"fmt"
)
func main(){
pilha:=Pilha{} // criamos uma instância da pilha e atribuímos o objeto retornado à variável 'pilha'.
fmt.Println("Pilha criada com tamanho ",pilha.Tamanho())
fmt.Println("Vazia? ",pilha.Vazia())
pilha.Empilhar("Go")
pilha.Empilhar(2009)
pilha.Empilhar(3.14)
pilha.Empilhar("Fim")
for !pilha.Vazia(){
v,_:=pilha.Desempilhar()
fmt.Println("Desempilhando ",v)
fmt.Println("Tamanho: ",pilha.Tamanho())
fmt.Println("Vazia? ",pilha.Vazia())
}
_,err:=pilha.Desempilhar()
if err !=nil{
fmt.Println(err)
}
}
type Pilha struct {
valores []interface{} // armazena objetos do tipo interface
/*
Esse tipo é conhecido como interface vazia e descreve uma interface sem nenhum método. Na prática, isto faz com que a nossa
implementação de pilha seja capaz de armazenar objetos de qualquer tipo válido. O slice valores foi intencionalmente declarado
com a inicial minúscula para que ele não seja acessível a outro pacote.
*/
}
func(pilha Pilha)Tamanho() int{
return len(pilha.valores)
}
/*
A definição de um método é semelhante à da função, a diferença é que métodos definem um objeto receptor que deve ser especificado entre
parênteses antes do nome do método. Assim, o método Tamanho() acessa o slice pilha.valores e retorna seu tamanho utilizando a função len
*/
func(pilha Pilha)Vazia() bool{
return pilha.Tamanho()==0
}
func(pilha *Pilha)Empilhar(valor interface{}){
pilha.valores=append(pilha.valores,valor)
}
/*
No caso dos métodos Empilhar e Desempilhar, desejamos alterar a pilha na qual tais métodos foram chamados. Em Go, argumentos de funções
e métodos são sempre passados por cópia(exceto slices,maps e channels). Por isso, quando precisamos alterar qualquer argumento - incluindo
receptores de métodos - devemos declará-los como ponteiros.
*/
func(pilha *Pilha)Desempilhar()(interface{},error){
if pilha.Vazia(){
return nil,errors.New("Pilha vazia!")
}
valor:=pilha.valores[pilha.Tamanho()-1]
pilha.valores=pilha.valores[:pilha.Tamanho()-1]
return valor,nil
}
/*
este método possui dois valores de retorno: o objeto desempilhado e um valor do tipo error que é retornado quando a pilha está vazia.
Para isso, utilizamos o método pilha.Vazia() e, em caso de retorno positivo, criamos um novo erro e retornamos nil no lugar do objeto
desempilhado,junto como o erro recém-criado.Caso a pilha não esteja vazia, atribuímos o último objeto empilhado à variável valor. Em
seguida, atualizamos o slice pilha.valores com uma fatia do slice atual,incluindo todos os objetos empilhados com a exceção do último - que
acabou de ser desempilhado. Finalmente, retornamos o objeto removido e nil no lugar do erro, indicando que o objeto foi desempilhado com
sucesso.
*/ |
package pandaTvAPI
import (
"testing"
)
var c *client
func Test_newClient(t *testing.T) {
var err error
c, err = newClient("__guid=96554777.1566464319758375400.1481558348791.0151; R=r%3D22412424%26u%3DCnaqnGi22412424%26n%3D%25R8%25OS%2599%25R4%25O8%258Q%25R6%2598%25NS%25R6%2588%2591%25R7%259N%2584%25R7%259O%258N%25R8%25OR%25OR%26le%3DZwtkZmNkZmZ1BFH0ZUSkYzAioD%3D%3D%26m%3DZGH3ZQNkAwx3ZmR%3D%26im%3DnUE0pPHmDFHlEvHlEzx2YaOxnJ0hM3ZyZxMvMQHmZQplMwp0AGRjA2RlMwSwLzHjAwNkMJH1MzLmBP5jozp%3D; M=t%3D1481558528%26v%3D1.0%26mt%3D1481558528%26s%3D1c896fb10b6f39e0df59926c207cbf53; aliyungf_tc=AQAAAIFx/kUk+AgAPhq2Jzimx4eUqQKY; monitor_count=16; Hm_lvt_204071a8b1d0b2a04c782c44b88eb996=1481558349,1481561678; Hm_lpvt_204071a8b1d0b2a04c782c44b88eb996=1481561687; smid=04046eaf-4327-4798-af31-8078f672deb2")
if err != nil {
t.Error(err)
}
}
|
package retry
import (
"runtime"
"testing"
"time"
)
func TestRetry(t *testing.T) {
backoff := 20 * time.Millisecond
{
cnt := 0
now := time.Now()
err := Retry(ConstantBackoffs(5, backoff), func() (State, error) {
cnt++
return Continue, ErrNeedRetry
})
assert(t, true, err != nil)
assert(t, cnt, 6)
elapsed := time.Now().Sub(now)
assert(t, elapsed > backoff*5, true)
assert(t, elapsed < backoff*6, true)
}
{
cnt := 0
now := time.Now()
err := Retry(ExponentialBackoffs(5, backoff), func() (State, error) {
cnt++
return Continue, nil
})
must(t, err)
assert(t, cnt, 1)
assert(t, true, time.Now().Sub(now) < backoff)
}
{
cnt := 0
err := Retry(ZeroBackoffs(5), func() (State, error) {
cnt++
if cnt == 2 {
return StopWithErr, ErrNeedRetry
}
return Continue, ErrNeedRetry
})
assert(t, cnt, 2)
assert(t, true, err == ErrNeedRetry)
}
{
cnt := 0
err := Retry(ZeroBackoffs(5), func() (State, error) {
cnt++
if cnt == 2 {
return StopWithNil, ErrNeedRetry
}
return Continue, ErrNeedRetry
})
assert(t, cnt, 2)
assert(t, true, err == nil)
}
}
func TestBackoffFactory(t *testing.T) {
{
backoffs := ZeroBackoffs(3)
assert(t, len(backoffs), 3)
for _, v := range backoffs {
assert(t, v, time.Duration(0))
}
}
{
backoff := 100 * time.Millisecond
backoffs := ConstantBackoffs(5, backoff)
assert(t, len(backoffs), 5)
for _, v := range backoffs {
assert(t, v, backoff)
}
}
{
backoff := 10 * time.Millisecond
backoffs := ExponentialBackoffs(10, backoff)
assert(t, len(backoffs), 10)
for i, v := range backoffs {
assert(t, v, backoff*(1<<uint(i)))
}
}
}
func assert(t *testing.T, actual interface{}, expect interface{}) {
if actual != expect {
_, fileName, line, _ := runtime.Caller(1)
t.Fatalf("expect %v, got %v at (%v:%v)\n", expect, actual, fileName, line)
}
}
func must(t *testing.T, err error) {
if err != nil {
_, fileName, line, _ := runtime.Caller(1)
t.Fatalf("expect nil, got %v at (%v:%v)\n", err, fileName, line)
}
}
|
package main
import (
"fmt"
"sync"
)
func main() {
values := []string{"a", "b", "c"}
var wg sync.WaitGroup
for _, v := range values {
wg.Add(1)
go func() {
fmt.Println(v)
wg.Done()
}()
}
wg.Wait()
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package check
import (
"encoding/gob"
"io"
"reflect"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/asmdecl"
"golang.org/x/tools/go/analysis/passes/assign"
"golang.org/x/tools/go/analysis/passes/atomic"
"golang.org/x/tools/go/analysis/passes/bools"
"golang.org/x/tools/go/analysis/passes/buildtag"
"golang.org/x/tools/go/analysis/passes/cgocall"
"golang.org/x/tools/go/analysis/passes/composite"
"golang.org/x/tools/go/analysis/passes/copylock"
"golang.org/x/tools/go/analysis/passes/errorsas"
"golang.org/x/tools/go/analysis/passes/httpresponse"
"golang.org/x/tools/go/analysis/passes/loopclosure"
"golang.org/x/tools/go/analysis/passes/lostcancel"
"golang.org/x/tools/go/analysis/passes/nilfunc"
"golang.org/x/tools/go/analysis/passes/nilness"
"golang.org/x/tools/go/analysis/passes/printf"
"golang.org/x/tools/go/analysis/passes/shadow"
"golang.org/x/tools/go/analysis/passes/shift"
"golang.org/x/tools/go/analysis/passes/stdmethods"
"golang.org/x/tools/go/analysis/passes/stringintconv"
"golang.org/x/tools/go/analysis/passes/structtag"
"golang.org/x/tools/go/analysis/passes/tests"
"golang.org/x/tools/go/analysis/passes/unmarshal"
"golang.org/x/tools/go/analysis/passes/unreachable"
"golang.org/x/tools/go/analysis/passes/unsafeptr"
"golang.org/x/tools/go/analysis/passes/unusedresult"
"honnef.co/go/tools/staticcheck"
"honnef.co/go/tools/stylecheck"
"gvisor.dev/gvisor/tools/checkaligned"
"gvisor.dev/gvisor/tools/checkconst"
"gvisor.dev/gvisor/tools/checkescape"
"gvisor.dev/gvisor/tools/checklinkname"
"gvisor.dev/gvisor/tools/checklocks"
"gvisor.dev/gvisor/tools/checkunsafe"
)
// binaryAnalyzer is a special class of analyzer which supports an additional
// operation to run an analyzer with the object binary data.
type binaryAnalyzer interface {
// Run runs the analyzer with the given binary data.
Run(*analysis.Pass, io.Reader) (any, error)
}
// analyzer is a simple analysis.Analyzer interface.
//
// This is implemented by plainAnalyzer, and is used to allow calls to
// non-standard analyzers (e.g. checkescape, which requires the objdump output
// in addition to the existing pass information).
type analyzer interface {
Legacy() *analysis.Analyzer
}
// plainAnalyzer implements analyzer.
type plainAnalyzer struct {
*analysis.Analyzer
}
// Legacy implements analyzer.Legacy.
func (pa *plainAnalyzer) Legacy() *analysis.Analyzer {
return pa.Analyzer
}
var (
// allAnalyzers is a list of all available analyzers.
//
// This is guaranteed to be complete closure around the dependency
// graph of all analyzers (via the "Requires" attribute, below).
// Therefore, to map an *analysis.Analyzer to a runner, you may safely
// use "findAnalyzer".
allAnalyzers = make(map[*analysis.Analyzer]analyzer)
// allFactTypes is a list of all fact types, useful as a filter.
allFactTypes = make(map[reflect.Type]bool)
)
// findAnalyzer maps orig to an analyzer instance.
//
// This is guaranteed to work provided allAnalyzers is made into a transitive
// closure of all known analyzers (see init).
func findAnalyzer(orig *analysis.Analyzer) analyzer {
return allAnalyzers[orig]
}
// registerFactType registers an analysis.Fact.
func registerFactType(f analysis.Fact) {
// Already registered?
t := reflect.TypeOf(f)
if _, ok := allFactTypes[t]; ok {
return
}
// Register the type.
gob.Register(f)
allFactTypes[t] = true
}
// register recursively registers an analyzer.
func register(a analyzer) {
// Already registered?
if _, ok := allAnalyzers[a.Legacy()]; ok {
return
}
// Register all fact types.
for _, f := range a.Legacy().FactTypes {
registerFactType(f)
}
// Register dependencies.
for _, orig := range a.Legacy().Requires {
if findAnalyzer(orig) == nil {
register(&plainAnalyzer{orig})
}
}
// Save the analyzer.
allAnalyzers[a.Legacy()] = a
}
func init() {
// Standard & internal analyzers.
register(&plainAnalyzer{asmdecl.Analyzer})
register(&plainAnalyzer{assign.Analyzer})
register(&plainAnalyzer{atomic.Analyzer})
register(&plainAnalyzer{bools.Analyzer})
register(&plainAnalyzer{buildtag.Analyzer})
register(&plainAnalyzer{cgocall.Analyzer})
register(&plainAnalyzer{composite.Analyzer})
register(&plainAnalyzer{copylock.Analyzer})
register(&plainAnalyzer{errorsas.Analyzer})
register(&plainAnalyzer{httpresponse.Analyzer})
register(&plainAnalyzer{loopclosure.Analyzer})
register(&plainAnalyzer{lostcancel.Analyzer})
register(&plainAnalyzer{nilfunc.Analyzer})
register(&plainAnalyzer{nilness.Analyzer})
register(&plainAnalyzer{printf.Analyzer})
register(&plainAnalyzer{shift.Analyzer})
register(&plainAnalyzer{stdmethods.Analyzer})
register(&plainAnalyzer{stringintconv.Analyzer})
register(&plainAnalyzer{shadow.Analyzer})
register(&plainAnalyzer{structtag.Analyzer})
register(&plainAnalyzer{tests.Analyzer})
register(&plainAnalyzer{unmarshal.Analyzer})
register(&plainAnalyzer{unreachable.Analyzer})
register(&plainAnalyzer{unsafeptr.Analyzer})
register(&plainAnalyzer{unusedresult.Analyzer})
register(checkescape.Analyzer)
register(&plainAnalyzer{checkconst.Analyzer})
register(&plainAnalyzer{checkunsafe.Analyzer})
register(&plainAnalyzer{checklinkname.Analyzer})
register(&plainAnalyzer{checklocks.Analyzer})
register(&plainAnalyzer{checkaligned.Analyzer})
// Add all staticcheck analyzers.
for _, a := range staticcheck.Analyzers {
register(&plainAnalyzer{a.Analyzer})
}
// Add all stylecheck analyzers.
for _, a := range stylecheck.Analyzers {
register(&plainAnalyzer{a.Analyzer})
}
}
|
package storage
import (
"encoding/binary"
"errors"
"io"
"os"
sm "github.com/lni/dragonboat/v3/statemachine"
"github.com/tecbot/gorocksdb"
)
var indexKeyCf = "__index_default_cf__"
var indexKeyPrefix = []byte("disk_kv_applied_index")
type RocksDBStateMachine struct {
ClusterID uint64
NodeID uint64
store *Store
indexKeyPrefix []byte
}
func newRocksDBStateMachine(clusterId uint64, nodeId uint64, s *Store) (*RocksDBStateMachine, error) {
smIndexKey := make([]byte, len(indexKeyPrefix)+16)
copy(smIndexKey, indexKeyPrefix)
binary.BigEndian.PutUint64(smIndexKey[len(indexKeyPrefix):], clusterId)
binary.BigEndian.PutUint64(smIndexKey[len(indexKeyPrefix)+8:], nodeId)
return &RocksDBStateMachine{ClusterID: clusterId, NodeID: nodeId, indexKeyPrefix: smIndexKey, store: s}, nil
}
func (r *RocksDBStateMachine) Open(stopChan <-chan struct{}) (uint64, error) {
select {
case <-stopChan:
return 0, sm.ErrOpenStopped
default:
data, err := r.store.GetUint64(indexKeyCf, r.indexKeyPrefix)
return data, err
}
}
func (r *RocksDBStateMachine) Update(entries []sm.Entry) ([]sm.Entry, error) {
resultEntry := []sm.Entry{}
//将raft的日志转换为rocksdb要执行的命令
for _, e := range entries {
r, err := r.processEntry(e)
if err != nil {
return nil, err
}
resultEntry = append(resultEntry, r)
}
idx := entries[len(entries)-1].Index
idxByte := make([]byte, 8)
binary.BigEndian.PutUint64(idxByte, idx)
batch := gorocksdb.NewWriteBatch()
defer batch.Destroy()
batch.Put(r.indexKeyPrefix, idxByte)
if err := r.store.Write(batch); err != nil {
return nil, err
}
return resultEntry, nil
}
func (r *RocksDBStateMachine) processEntry(e sm.Entry) (sm.Entry, error) {
cmd := DecodeCmd(e.Cmd)
if cmd == nil {
return sm.Entry{}, errors.New("error command!")
}
if err := cmd.LocalInvoke(r.store); err != nil {
return e, err
}
resp := cmd.GetResp()
if len(resp) > 0 {
e.Result = sm.Result{Value: 1, Data: resp}
}
return e, nil
}
func (r *RocksDBStateMachine) Lookup(query interface{}) (interface{}, error) {
cmd := DecodeCmd(query.([]byte))
if cmd == nil {
return nil, errors.New("error command!")
}
if err := cmd.LocalInvoke(r.store); err != nil {
return nil, err
}
return cmd.GetResp(), nil
}
func (r *RocksDBStateMachine) Sync() error {
return nil
}
func (r *RocksDBStateMachine) PrepareSnapshot() (interface{}, error) {
return r.store.NewSnapshotDir()
}
func (r *RocksDBStateMachine) SaveSnapshot(snapshot interface{}, writer io.Writer, stopChan <-chan struct{}) error {
path := snapshot.(string)
defer func() {
_ = os.RemoveAll(path)
}()
return r.store.SaveSnapShotToWriter(path, writer, stopChan)
}
func (r *RocksDBStateMachine) RecoverFromSnapshot(reader io.Reader, stopChan <-chan struct{}) error {
return r.store.LoadSnapShotFromReader(reader, stopChan)
}
func (r *RocksDBStateMachine) Close() error {
return r.store.Close()
}
|
package cluster
import "github.com/cohesity/management-sdk-go/models"
import "github.com/cohesity/management-sdk-go/configuration"
/*
* Interface for the CLUSTER_IMPL
*/
type CLUSTER interface {
UpdateCluster (*models.UpdateCluster) (*models.CohesityCluster, error)
GetCluster (*bool) (*models.CohesityCluster, error)
GetBasicClusterInfo () (*models.BasicCohesityClusterInformation, error)
}
/*
* Factory for the CLUSTER interaface returning CLUSTER_IMPL
*/
func NewCLUSTER(config configuration.CONFIGURATION) *CLUSTER_IMPL {
client := new(CLUSTER_IMPL)
client.config = config
return client
}
|
package beachfront
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/prebid/openrtb/v19/adcom1"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
)
const Seat = "beachfront"
const BidCapacity = 5
const defaultVideoEndpoint = "https://reachms.bfmio.com/bid.json?exchange_id"
const nurlVideoEndpointSuffix = "&prebidserver"
const beachfrontAdapterName = "BF_PREBID_S2S"
const beachfrontAdapterVersion = "1.0.0"
const minBidFloor = 0.01
const defaultVideoWidth = 300
const defaultVideoHeight = 250
const fakeIP = "255.255.255.255"
type BeachfrontAdapter struct {
bannerEndpoint string
extraInfo ExtraInfo
}
type ExtraInfo struct {
VideoEndpoint string `json:"video_endpoint,omitempty"`
}
type beachfrontRequests struct {
Banner beachfrontBannerRequest
NurlVideo []beachfrontVideoRequest
ADMVideo []beachfrontVideoRequest
}
// ---------------------------------------------------
// Video
// ---------------------------------------------------
type beachfrontVideoRequest struct {
AppId string `json:"appId"`
VideoResponseType string `json:"videoResponseType"`
Request openrtb2.BidRequest `json:"request"`
}
// ---------------------------------------------------
//
// Banner
//
// ---------------------------------------------------
type beachfrontBannerRequest struct {
Slots []beachfrontSlot `json:"slots"`
Domain string `json:"domain"`
Page string `json:"page"`
Referrer string `json:"referrer"`
Search string `json:"search"`
Secure int8 `json:"secure"`
DeviceOs string `json:"deviceOs"`
DeviceModel string `json:"deviceModel"`
IsMobile int8 `json:"isMobile"`
UA string `json:"ua"`
Dnt int8 `json:"dnt"`
User openrtb2.User `json:"user"`
AdapterName string `json:"adapterName"`
AdapterVersion string `json:"adapterVersion"`
IP string `json:"ip"`
RequestID string `json:"requestId"`
Real204 bool `json:"real204"`
SChain openrtb2.SupplyChain `json:"schain,omitempty"`
}
type beachfrontSlot struct {
Slot string `json:"slot"`
Id string `json:"id"`
Bidfloor float64 `json:"bidfloor"`
Sizes []beachfrontSize `json:"sizes"`
}
type beachfrontSize struct {
W uint64 `json:"w"`
H uint64 `json:"h"`
}
// ---------------------------------------------------
// Banner response
// ---------------------------------------------------
type beachfrontResponseSlot struct {
CrID string `json:"crid"`
Price float64 `json:"price"`
W uint64 `json:"w"`
H uint64 `json:"h"`
Slot string `json:"slot"`
Adm string `json:"adm"`
}
type beachfrontVideoBidExtension struct {
Duration int `json:"duration"`
}
func (a *BeachfrontAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
beachfrontRequests, errs := preprocess(request, reqInfo)
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
if request.Device != nil {
if request.Device.UA != "" {
headers.Add("User-Agent", request.Device.UA)
}
if request.Device.Language != "" {
headers.Add("Accept-Language", request.Device.Language)
}
if request.Device.DNT != nil {
headers.Add("DNT", strconv.Itoa(int(*request.Device.DNT)))
}
}
var reqCount = len(beachfrontRequests.ADMVideo) + len(beachfrontRequests.NurlVideo)
if len(beachfrontRequests.Banner.Slots) > 0 {
reqCount++
}
var reqs = make([]*adapters.RequestData, reqCount)
var nurlBump = 0
var admBump = 0
if len(beachfrontRequests.Banner.Slots) > 0 {
bytes, err := json.Marshal(beachfrontRequests.Banner)
if err == nil {
reqs[0] = &adapters.RequestData{
Method: "POST",
Uri: a.bannerEndpoint,
Body: bytes,
Headers: headers,
}
nurlBump++
admBump++
} else {
errs = append(errs, err)
}
}
if request.User != nil && request.User.BuyerUID != "" && reqCount > 0 {
headers.Add("Cookie", "__io_cid="+request.User.BuyerUID)
}
for j := 0; j < len(beachfrontRequests.ADMVideo); j++ {
bytes, err := json.Marshal(beachfrontRequests.ADMVideo[j].Request)
if err == nil {
reqs[j+nurlBump] = &adapters.RequestData{
Method: "POST",
Uri: a.extraInfo.VideoEndpoint + "=" + beachfrontRequests.ADMVideo[j].AppId,
Body: bytes,
Headers: headers,
}
admBump++
} else {
errs = append(errs, err)
}
}
for j := 0; j < len(beachfrontRequests.NurlVideo); j++ {
bytes, err := json.Marshal(beachfrontRequests.NurlVideo[j].Request)
if err == nil {
bytes = append([]byte(`{"isPrebid":true,`), bytes[1:]...)
reqs[j+admBump] = &adapters.RequestData{
Method: "POST",
Uri: a.extraInfo.VideoEndpoint + "=" + beachfrontRequests.NurlVideo[j].AppId + nurlVideoEndpointSuffix,
Body: bytes,
Headers: headers,
}
} else {
errs = append(errs, err)
}
}
return reqs, errs
}
func preprocess(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) (beachfrontReqs beachfrontRequests, errs []error) {
var videoImps = make([]openrtb2.Imp, 0)
var bannerImps = make([]openrtb2.Imp, 0)
for i := 0; i < len(request.Imp); i++ {
if request.Imp[i].Banner != nil && request.Imp[i].Banner.Format != nil &&
request.Imp[i].Banner.Format[0].H != 0 && request.Imp[i].Banner.Format[0].W != 0 {
bannerImps = append(bannerImps, request.Imp[i])
}
if request.Imp[i].Video != nil {
videoImps = append(videoImps, request.Imp[i])
}
}
if len(bannerImps)+len(videoImps) == 0 {
errs = append(errs, errors.New("no valid impressions were found in the request"))
return
}
if len(bannerImps) > 0 {
request.Imp = bannerImps
beachfrontReqs.Banner, errs = getBannerRequest(request, reqInfo)
}
if len(videoImps) > 0 {
var videoErrs []error
var videoList []beachfrontVideoRequest
request.Imp = videoImps
request.Ext = nil
videoList, videoErrs = getVideoRequests(request, reqInfo)
errs = append(errs, videoErrs...)
for i := 0; i < len(videoList); i++ {
if videoList[i].VideoResponseType == "nurl" {
beachfrontReqs.NurlVideo = append(beachfrontReqs.NurlVideo, videoList[i])
}
if videoList[i].VideoResponseType == "adm" {
beachfrontReqs.ADMVideo = append(beachfrontReqs.ADMVideo, videoList[i])
}
}
}
return
}
func getAppId(ext openrtb_ext.ExtImpBeachfront, media openrtb_ext.BidType) (string, error) {
var appid string
var error error
if ext.AppId != "" {
appid = ext.AppId
} else if media == openrtb_ext.BidTypeVideo && ext.AppIds.Video != "" {
appid = ext.AppIds.Video
} else if media == openrtb_ext.BidTypeBanner && ext.AppIds.Banner != "" {
appid = ext.AppIds.Banner
} else {
error = errors.New("unable to determine the appId(s) from the supplied extension")
}
return appid, error
}
func getSchain(request *openrtb2.BidRequest) (openrtb_ext.ExtRequestPrebidSChain, error) {
var schain openrtb_ext.ExtRequestPrebidSChain
return schain, json.Unmarshal(request.Source.Ext, &schain)
}
func getBannerRequest(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) (beachfrontBannerRequest, []error) {
var bfr beachfrontBannerRequest
var errs = make([]error, 0, len(request.Imp))
for i := 0; i < len(request.Imp); i++ {
beachfrontExt, err := getBeachfrontExtension(request.Imp[i])
if err != nil {
errs = append(errs, err)
continue
}
appid, err := getAppId(beachfrontExt, openrtb_ext.BidTypeBanner)
if err != nil {
errs = append(errs, err)
continue
}
if fatal, err := setBidFloor(&beachfrontExt, &request.Imp[i], reqInfo); err != nil {
errs = append(errs, err)
if fatal {
continue
}
}
slot := beachfrontSlot{
Id: appid,
Slot: request.Imp[i].ID,
Bidfloor: request.Imp[i].BidFloor,
}
for j := 0; j < len(request.Imp[i].Banner.Format); j++ {
slot.Sizes = append(slot.Sizes, beachfrontSize{
H: uint64(request.Imp[i].Banner.Format[j].H),
W: uint64(request.Imp[i].Banner.Format[j].W),
})
}
bfr.Slots = append(bfr.Slots, slot)
}
if len(bfr.Slots) == 0 {
return bfr, errs
}
if request.Device != nil {
bfr.IP = request.Device.IP
bfr.DeviceModel = request.Device.Model
bfr.DeviceOs = request.Device.OS
if request.Device.DNT != nil {
bfr.Dnt = *request.Device.DNT
}
if request.Device.UA != "" {
bfr.UA = request.Device.UA
}
}
var t = fallBackDeviceType(request)
if t == adcom1.DeviceMobile {
bfr.Page = request.App.Bundle
if request.App.Domain == "" {
bfr.Domain = getDomain(request.App.Domain)
} else {
bfr.Domain = request.App.Domain
}
bfr.IsMobile = 1
} else if t == adcom1.DevicePC {
bfr.Page = request.Site.Page
if request.Site.Domain == "" {
bfr.Domain = getDomain(request.Site.Page)
} else {
bfr.Domain = request.Site.Domain
}
bfr.IsMobile = 0
}
bfr.Secure = isSecure(bfr.Page)
if request.User != nil && request.User.ID != "" {
if bfr.User.ID == "" {
bfr.User.ID = request.User.ID
}
}
if request.User != nil && request.User.BuyerUID != "" {
if bfr.User.BuyerUID == "" {
bfr.User.BuyerUID = request.User.BuyerUID
}
}
bfr.RequestID = request.ID
bfr.AdapterName = beachfrontAdapterName
bfr.AdapterVersion = beachfrontAdapterVersion
if request.Imp[0].Secure != nil {
bfr.Secure = *request.Imp[0].Secure
}
bfr.Real204 = true
if request.Source != nil && request.Source.Ext != nil {
schain, err := getSchain(request)
if err == nil {
bfr.SChain = schain.SChain
}
}
return bfr, errs
}
func fallBackDeviceType(request *openrtb2.BidRequest) adcom1.DeviceType {
if request.Site != nil {
return adcom1.DevicePC
}
return adcom1.DeviceMobile
}
func getVideoRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]beachfrontVideoRequest, []error) {
var bfReqs = make([]beachfrontVideoRequest, len(request.Imp))
var errs = make([]error, 0, len(request.Imp))
var failedRequestIndicies = make([]int, 0)
for i := 0; i < len(request.Imp); i++ {
beachfrontExt, err := getBeachfrontExtension(request.Imp[i])
if err != nil {
failedRequestIndicies = append(failedRequestIndicies, i)
errs = append(errs, err)
continue
}
appid, err := getAppId(beachfrontExt, openrtb_ext.BidTypeVideo)
bfReqs[i].AppId = appid
if err != nil {
failedRequestIndicies = append(failedRequestIndicies, i)
errs = append(errs, err)
continue
}
bfReqs[i].Request = *request
var secure int8
var deviceCopy openrtb2.Device
if bfReqs[i].Request.Device == nil {
deviceCopy = openrtb2.Device{}
} else {
deviceCopy = *bfReqs[i].Request.Device
}
if beachfrontExt.VideoResponseType == "nurl" {
bfReqs[i].VideoResponseType = "nurl"
} else {
bfReqs[i].VideoResponseType = "adm"
if deviceCopy.IP == "" {
deviceCopy.IP = fakeIP
}
}
if bfReqs[i].Request.Site != nil && bfReqs[i].Request.Site.Domain == "" && bfReqs[i].Request.Site.Page != "" {
siteCopy := *bfReqs[i].Request.Site
siteCopy.Domain = getDomain(bfReqs[i].Request.Site.Page)
bfReqs[i].Request.Site = &siteCopy
secure = isSecure(bfReqs[i].Request.Site.Page)
}
if bfReqs[i].Request.App != nil && bfReqs[i].Request.App.Domain == "" && bfReqs[i].Request.App.Bundle != "" {
if bfReqs[i].Request.App.Bundle != "" {
var chunks = strings.Split(strings.Trim(bfReqs[i].Request.App.Bundle, "_"), ".")
if len(chunks) > 1 {
appCopy := *bfReqs[i].Request.App
appCopy.Domain = fmt.Sprintf("%s.%s", chunks[len(chunks)-(len(chunks)-1)], chunks[0])
bfReqs[i].Request.App = &appCopy
}
}
}
if deviceCopy.DeviceType == 0 {
deviceCopy.DeviceType = fallBackDeviceType(request)
}
bfReqs[i].Request.Device = &deviceCopy
imp := request.Imp[i]
imp.Banner = nil
imp.Ext = nil
imp.Secure = &secure
if fatal, err := setBidFloor(&beachfrontExt, &imp, reqInfo); err != nil {
errs = append(errs, err)
if fatal {
failedRequestIndicies = append(failedRequestIndicies, i)
continue
}
}
if imp.Video.H == 0 && imp.Video.W == 0 {
imp.Video.W = defaultVideoWidth
imp.Video.H = defaultVideoHeight
}
if len(bfReqs[i].Request.Cur) == 0 {
bfReqs[i].Request.Cur = make([]string, 1)
bfReqs[i].Request.Cur[0] = "USD"
}
bfReqs[i].Request.Imp = nil
bfReqs[i].Request.Imp = make([]openrtb2.Imp, 1)
bfReqs[i].Request.Imp[0] = imp
}
if len(failedRequestIndicies) > 0 {
for i := 0; i < len(failedRequestIndicies); i++ {
bfReqs = removeVideoElement(bfReqs, failedRequestIndicies[i])
}
}
return bfReqs, errs
}
func (a *BeachfrontAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode >= http.StatusInternalServerError {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("server error status code %d from %s. Run with request.debug = 1 for more info", response.StatusCode, externalRequest.Uri),
}}
}
if response.StatusCode >= http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("request error status code %d from %s. Run with request.debug = 1 for more info", response.StatusCode, externalRequest.Uri),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{fmt.Errorf("unexpected status code %d from %s. Run with request.debug = 1 for more info", response.StatusCode, externalRequest.Uri)}
}
var bids []openrtb2.Bid
var errs = make([]error, 0)
var xtrnal openrtb2.BidRequest
if err := json.Unmarshal(externalRequest.Body, &xtrnal); err != nil {
errs = append(errs, err)
} else {
bids, errs = postprocess(response, xtrnal, externalRequest.Uri, internalRequest.ID)
}
if len(errs) != 0 {
return nil, errs
}
var dur beachfrontVideoBidExtension
bidResponse := adapters.NewBidderResponseWithBidsCapacity(BidCapacity)
for i := 0; i < len(bids); i++ {
if err := json.Unmarshal(bids[i].Ext, &dur); err == nil && dur.Duration > 0 {
impVideo := openrtb_ext.ExtBidPrebidVideo{
Duration: int(dur.Duration),
}
if len(bids[i].Cat) > 0 {
impVideo.PrimaryCategory = bids[i].Cat[0]
}
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bids[i],
BidType: a.getBidType(externalRequest),
BidVideo: &impVideo,
})
} else {
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bids[i],
BidType: a.getBidType(externalRequest),
})
}
}
return bidResponse, errs
}
func setBidFloor(ext *openrtb_ext.ExtImpBeachfront, imp *openrtb2.Imp, reqInfo *adapters.ExtraRequestInfo) (bool, error) {
var initialImpBidfloor float64 = imp.BidFloor
var err error
if imp.BidFloorCur != "" && strings.ToUpper(imp.BidFloorCur) != "USD" && imp.BidFloor > 0 {
imp.BidFloor, err = reqInfo.ConvertCurrency(imp.BidFloor, imp.BidFloorCur, "USD")
var convertedFromCurrency = imp.BidFloorCur
imp.BidFloorCur = "USD"
if err != nil {
if ext.BidFloor > minBidFloor {
imp.BidFloor = ext.BidFloor
return false, &errortypes.Warning{
Message: fmt.Sprintf("The following error was recieved from the currency converter while attempting to convert the imp.bidfloor value of %.2f from %s to USD:\n%s\nThe provided value of imp.ext.beachfront.bidfloor, %.2f USD is being used as a fallback.",
initialImpBidfloor,
convertedFromCurrency,
err,
ext.BidFloor,
),
}
} else {
return true, &errortypes.BadInput{
Message: fmt.Sprintf("The following error was recieved from the currency converter while attempting to convert the imp.bidfloor value of %.2f from %s to USD:\n%s\nA value of imp.ext.beachfront.bidfloor was not provided. The bid is being skipped.",
initialImpBidfloor,
convertedFromCurrency,
err,
),
}
}
}
}
if imp.BidFloor < ext.BidFloor {
imp.BidFloor = ext.BidFloor
}
if imp.BidFloor > minBidFloor {
imp.BidFloorCur = "USD"
} else {
imp.BidFloor = 0
imp.BidFloorCur = ""
}
return false, nil
}
func (a *BeachfrontAdapter) getBidType(externalRequest *adapters.RequestData) openrtb_ext.BidType {
t := strings.Split(externalRequest.Uri, "=")[0]
if t == a.extraInfo.VideoEndpoint {
return openrtb_ext.BidTypeVideo
}
return openrtb_ext.BidTypeBanner
}
func postprocess(response *adapters.ResponseData, xtrnal openrtb2.BidRequest, uri string, id string) ([]openrtb2.Bid, []error) {
var beachfrontResp []beachfrontResponseSlot
var openrtbResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &openrtbResp); err != nil || len(openrtbResp.SeatBid) == 0 {
if err := json.Unmarshal(response.Body, &beachfrontResp); err != nil {
return nil, []error{&errortypes.BadServerResponse{
Message: "server response failed to unmarshal as valid rtb. Run with request.debug = 1 for more info",
}}
} else {
return postprocessBanner(beachfrontResp, id)
}
}
return postprocessVideo(openrtbResp.SeatBid[0].Bid, xtrnal, uri, id)
}
func postprocessBanner(beachfrontResp []beachfrontResponseSlot, id string) ([]openrtb2.Bid, []error) {
var bids = make([]openrtb2.Bid, len(beachfrontResp))
var errs = make([]error, 0)
for i := 0; i < len(beachfrontResp); i++ {
bids[i] = openrtb2.Bid{
CrID: beachfrontResp[i].CrID,
ImpID: beachfrontResp[i].Slot,
Price: beachfrontResp[i].Price,
ID: fmt.Sprintf("%sBanner", beachfrontResp[i].Slot),
AdM: beachfrontResp[i].Adm,
H: int64(beachfrontResp[i].H),
W: int64(beachfrontResp[i].W),
}
}
return bids, errs
}
func postprocessVideo(bids []openrtb2.Bid, xtrnal openrtb2.BidRequest, uri string, id string) ([]openrtb2.Bid, []error) {
var errs = make([]error, 0)
if uri[len(uri)-len(nurlVideoEndpointSuffix):] == nurlVideoEndpointSuffix {
for i := 0; i < len(bids); i++ {
crid := extractNurlVideoCrid(bids[i].NURL)
bids[i].CrID = crid
bids[i].ImpID = xtrnal.Imp[i].ID
bids[i].H = xtrnal.Imp[i].Video.H
bids[i].W = xtrnal.Imp[i].Video.W
bids[i].ID = fmt.Sprintf("%sNurlVideo", xtrnal.Imp[i].ID)
}
} else {
for i := 0; i < len(bids); i++ {
bids[i].ID = fmt.Sprintf("%sAdmVideo", bids[i].ImpID)
}
}
return bids, errs
}
func extractNurlVideoCrid(nurl string) string {
chunky := strings.SplitAfter(nurl, ":")
if len(chunky) > 1 {
return strings.TrimSuffix(chunky[2], ":")
}
return ""
}
func getBeachfrontExtension(imp openrtb2.Imp) (openrtb_ext.ExtImpBeachfront, error) {
var err error
var bidderExt adapters.ExtImpBidder
var beachfrontExt openrtb_ext.ExtImpBeachfront
if err = json.Unmarshal(imp.Ext, &bidderExt); err != nil {
return beachfrontExt, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, error while decoding extImpBidder, err: %s", imp.ID, err),
}
}
if err = json.Unmarshal(bidderExt.Bidder, &beachfrontExt); err != nil {
return beachfrontExt, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, error while decoding extImpBeachfront, err: %s", imp.ID, err),
}
}
return beachfrontExt, err
}
func getDomain(page string) string {
protoURL := strings.Split(page, "//")
var domainPage string
if len(protoURL) > 1 {
domainPage = protoURL[1]
} else {
domainPage = protoURL[0]
}
return strings.Split(domainPage, "/")[0]
}
func isSecure(page string) int8 {
protoURL := strings.Split(page, "://")
if len(protoURL) > 1 && protoURL[0] == "https" {
return 1
}
return 0
}
func removeVideoElement(slice []beachfrontVideoRequest, s int) []beachfrontVideoRequest {
if len(slice) >= s+1 {
return append(slice[:s], slice[s+1:]...)
}
return []beachfrontVideoRequest{}
}
// Builder builds a new instance of the Beachfront adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
extraInfo, err := getExtraInfo(config.ExtraAdapterInfo)
if err != nil {
return nil, err
}
bidder := &BeachfrontAdapter{
bannerEndpoint: config.Endpoint,
extraInfo: extraInfo,
}
return bidder, nil
}
func getExtraInfo(v string) (ExtraInfo, error) {
if len(v) == 0 {
return getDefaultExtraInfo(), nil
}
var extraInfo ExtraInfo
if err := json.Unmarshal([]byte(v), &extraInfo); err != nil {
return extraInfo, fmt.Errorf("invalid extra info: %v", err)
}
if extraInfo.VideoEndpoint == "" {
extraInfo.VideoEndpoint = defaultVideoEndpoint
}
return extraInfo, nil
}
func getDefaultExtraInfo() ExtraInfo {
return ExtraInfo{
VideoEndpoint: defaultVideoEndpoint,
}
}
|
package task
import (
"fmt"
"github.com/mizuki1412/go-core-kit/library/commonkit"
"github.com/mizuki1412/go-core-kit/service/cronkit"
"github.com/mizuki1412/go-core-kit/service/influxkit"
"github.com/spf13/cast"
"jd-mining-server/service"
"jd-mining-server/service/config"
"jd-mining-server/service/model"
"jd-mining-server/service/wechat"
"time"
)
func UpdateRouterList(pin, tgt string) {
//更新路由器列表
cronkit.AddFunc("@every 10m", func() {
model.RouterMap.Set(pin, service.ListRouter(pin, tgt))
})
}
func CollectTask(pin, tgt string) {
//采集路由器数据
cronkit.AddFunc("@every 1m", func() {
for _, v := range model.RouterMap.Read(pin) {
if v.Status != model.RouterStatusOffline {
commonkit.RecoverFuncWrapper(func() {
s := service.GetPCDNStatus(v.FeedId, pin, tgt)
sql := fmt.Sprintf("%s ip=%s,online=%s,cpu=%s,mem=%s,upload=%s,download=%s,rom=%s %d", v.Mac, influxkit.Decorate(s.Ip), s.OnlineTime, s.Cpu, s.Mem, s.Upload, s.Download, influxkit.Decorate(s.Rom), time.Now().UnixNano())
influxkit.WriteDefaultDB(sql)
//log.Println(sql)
})
}
}
})
}
func RebootTask(pin, tgt, user string, waitFree bool) {
//如果收益低于一定阈值,自动重启
cronkit.AddFunc("0 45 6 */1 * ?", func() {
//tgt与wsKey相等
service.GetPointsDetail(pin, tgt, waitFree)
for _, v := range model.PointsDetailMap.Read(pin) {
commonkit.RecoverFuncWrapper(func() {
if threshold := config.Conf[pin].Reboot; v.TodayIncome < threshold {
feedId := model.RouterMap.MacConvertFeedId(pin, v.Mac)
service.RebootRouter(feedId, pin, tgt)
content := fmt.Sprintf("********京东云矿机********\n\n【%s】收益低于%s,已重启", v.Name, cast.ToString(threshold))
wechat.Push2Wechat(user, content)
}
})
}
})
}
func PushPointTask(pin, tgt, user string, waitFree bool) {
//获取路由器积分等信息、然后推送至企业微信
cronkit.AddFunc("0 30 7 */1 * ?", func() {
//tgt与wsKey相等
service.GetPointsDetail(pin, tgt, waitFree)
var content string
totalPoints := model.TotalPointsMap.Read(pin)
content += "********京东云矿机********\n\n"
content += fmt.Sprintf("今日总收:%s\n总收:%s\n总剩:%s\n\n", cast.ToString(totalPoints.TotalToday), cast.ToString(totalPoints.TotalIncome), cast.ToString(totalPoints.TotalRemain))
l := model.PointsDetailMap.Len(pin)
for i, v := range model.PointsDetailMap.Read(pin) {
if waitFree {
if l != i+1 {
content += fmt.Sprintf("设备名:%s\n坐享其成:%s\n单台今收:%s\n单台总收:%s\n单台剩余:%s\n\n", v.Name, cast.ToString(v.WaitFreeDay), cast.ToString(v.TodayIncome), cast.ToString(v.AllIncome), cast.ToString(v.RemainIncome))
continue
}
//最后一台设备最后一个回车
content += fmt.Sprintf("设备名:%s\n坐享其成:%s\n单台今收:%s\n单台总收:%s\n单台剩余:%s\n", v.Name, cast.ToString(v.WaitFreeDay), cast.ToString(v.TodayIncome), cast.ToString(v.AllIncome), cast.ToString(v.RemainIncome))
} else {
if l != i+1 {
content += fmt.Sprintf("设备名:%s\n单台今收:%s\n单台总收:%s\n单台剩余:%s\n\n", v.Name, cast.ToString(v.TodayIncome), cast.ToString(v.AllIncome), cast.ToString(v.RemainIncome))
continue
}
//最后一台设备最后一个回车
content += fmt.Sprintf("设备名:%s\n单台今收:%s\n单台总收:%s\n单台剩余:%s\n", v.Name, cast.ToString(v.TodayIncome), cast.ToString(v.AllIncome), cast.ToString(v.RemainIncome))
}
}
wechat.Push2Wechat(user, content)
})
}
|
package command
import (
"fmt"
"strings"
"github.com/lets-cli/lets/util"
)
var (
CMD = "cmd"
DESCRIPTION = "description"
ENV = "env"
EvalEnv = "eval_env"
OPTIONS = "options"
DEPENDS = "depends"
CHECKSUM = "checksum"
PersistChecksum = "persist_checksum"
AFTER = "after"
)
var validFields = []string{
CMD,
DESCRIPTION,
ENV,
EvalEnv,
OPTIONS,
DEPENDS,
CHECKSUM,
PersistChecksum,
AFTER,
}
type Command struct {
Name string
// script to run
Cmd string
// script to run after cmd finished (cleanup, etc)
After string
// map of named scripts to run in parallel
CmdMap map[string]string
Description string
// env from command
Env map[string]string
// env from -E flag
OverrideEnv map[string]string
RawOptions string
Options map[string]string
CliOptions map[string]string
Depends []string
Checksum string
ChecksumMap map[string]string
PersistChecksum bool
// prepared args - started from command name
Args []string
// run only specified commands from cmd map
Only []string
// run all but excluded commands from cmd map
Exclude []string
// if command has declared checksum
hasChecksum bool
checksumSource map[string][]string
// store loaded persisted checksums here
persistedChecksums map[string]string
}
type ParseCommandError struct {
Name string
Path struct {
Full string
Field string
}
Err error
}
func (e *ParseCommandError) Error() string {
return fmt.Sprintf("failed to parse '%s' command: %s", e.Name, e.Err)
}
// env is not proper arg
func newParseCommandError(msg string, name string, field string, meta string) error {
fields := []string{field}
if meta != "" {
fields = append(fields, meta)
}
fullPath := strings.Join(fields, ".")
return &ParseCommandError{
Name: name,
Path: struct {
Full string
Field string
}{
Full: fullPath,
Field: field,
},
Err: fmt.Errorf("field %s: %s", fullPath, msg),
}
}
// NewCommand creates new command struct
func NewCommand(name string) Command {
return Command{
Name: name,
Env: make(map[string]string),
}
}
func (cmd *Command) ChecksumCalculator(workDir string) error {
if len(cmd.checksumSource) == 0 {
return nil
}
return calculateChecksumFromSource(workDir, cmd)
}
func (cmd *Command) GetPersistedChecksums() map[string]string {
return cmd.persistedChecksums
}
// ParseAndValidateCommand parses and validates unmarshaled yaml
func ParseAndValidateCommand(newCmd *Command, rawCommand map[interface{}]interface{}) error {
if err := validateCommandFields(rawCommand, validFields); err != nil {
return err
}
if cmd, ok := rawCommand[CMD]; ok {
if err := parseAndValidateCmd(cmd, newCmd); err != nil {
return err
}
}
if after, ok := rawCommand[AFTER]; ok {
if err := parseAndValidateAfter(after, newCmd); err != nil {
return err
}
}
if desc, ok := rawCommand[DESCRIPTION]; ok {
if err := parseAndValidateDescription(desc, newCmd); err != nil {
return err
}
}
if env, ok := rawCommand[ENV]; ok {
if err := parseAndValidateEnv(env, newCmd); err != nil {
return err
}
}
if evalEnv, ok := rawCommand[EvalEnv]; ok {
if err := parseAndValidateEvalEnv(evalEnv, newCmd); err != nil {
return err
}
}
if options, ok := rawCommand[OPTIONS]; ok {
if err := parseAndValidateOptions(options, newCmd); err != nil {
return err
}
}
if depends, ok := rawCommand[DEPENDS]; ok {
if err := parseAndValidateDepends(depends, newCmd); err != nil {
return err
}
}
if checksum, ok := rawCommand[CHECKSUM]; ok {
if err := parseAndValidateChecksum(checksum, newCmd); err != nil {
return err
}
}
if persistChecksum, ok := rawCommand[PersistChecksum]; ok {
if err := parseAndValidatePersistChecksum(persistChecksum, newCmd); err != nil {
return err
}
}
return nil
}
func validateCommandFields(rawKeyValue map[interface{}]interface{}, validFields []string) error {
for key := range rawKeyValue {
if !util.IsStringInList(key.(string), validFields) {
return fmt.Errorf("unknown command field '%s'", key)
}
}
return nil
}
|
package protocol
import (
"fmt"
mintcommon "mint-server/common"
"mint-server/config"
"net"
"github.com/golang/protobuf/proto"
)
type functionType string
const (
WELCOME functionType = "Welcome"
SIGNIN functionType = "SignIn"
SIGNUP functionType = "SignUp"
UNKNOWN functionType = "Unknown"
SETPROGRESS functionType = "SetProgress"
GETPROGRESS functionType = "GetProgress"
)
const (
HANDSHAKE = 0
REQUEST_SIGNIN = 1
REQUEST_SIGNUP = 2
RESPONSE_SIGNIN = 3
RESPONSE_SIGNUP = 4
REQUEST_SET_PROGR = 5
RESPONSE_PROGRESS = 6
REQUEST_GET_PROGR = 7
)
// MainHanlder handles functions
// For example, Login function is received, call LoginCheck
// or Register function is received, call Register
// Now only login is implemented here
func MainHandler(conn *net.TCPConn, CID *int, msgChan chan []byte, data []byte, cnt int) error {
for cnt > 0 {
fmt.Println(data)
if len(data) <= 4 {
if err := serverResponse(conn, msgChan, nil, UNKNOWN, ServerReturnCode_UNKNOWN_FUNC); err != nil {
return err
}
return nil
}
proto_len := mintcommon.BytesToUint16(data[0:2])
fn_type := mintcommon.BytesToUint16(data[2:4])
suflen := mintcommon.BytesToUint8(data[4:5])
cnt = cnt - 5 - int(suflen)
fmt.Println(fn_type, proto_len)
rl := &ReqLogin{}
rr := &ReqRegister{}
var fn functionType
switch fn_type {
case HANDSHAKE:
fn = WELCOME
case REQUEST_SIGNIN:
fn = SIGNIN
case REQUEST_SIGNUP:
fn = SIGNUP
case REQUEST_SET_PROGR:
fn = SETPROGRESS
case REQUEST_GET_PROGR:
fn = GETPROGRESS
default:
fn = UNKNOWN
}
au := &PlayerInfo{}
switch fn {
case WELCOME:
if err := writeShakehandResponse(conn, msgChan); err != nil {
return err
}
case SIGNIN:
if err := proto.Unmarshal(data[5:5+suflen], rl); err != nil {
return err
}
au.Account = rl.GetAccount()
au.Password = rl.GetPassword()
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
rl.String())
result, __id := signIn(au)
*CID = __id
if err2 := serverResponse(conn, msgChan, au, SIGNIN, result); err2 != nil {
return err2
}
case SIGNUP:
if err := proto.Unmarshal(data[5:5+suflen], rr); err != nil {
return err
}
au = rr.GetPlayerInfo()
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
rr.String())
result, __id := signUp(au)
*CID = __id
err2 := serverResponse(conn, msgChan, au, SIGNUP, result)
if err2 != nil {
return err2
}
case GETPROGRESS:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] user get progress, uid %d", *CID))
rp, err := getProgress(*CID)
if err != nil && rp == nil {
rp = &RetProgress{Chapter: -1, Section: -1}
}
if err3 := writeProgressResponse(conn, msgChan, *CID, rp, false); err3 != nil {
return err3
}
case SETPROGRESS:
rp := &ReqProgress{}
if err := proto.Unmarshal(data[5:5+suflen], rp); err != nil {
return err
}
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
rr.String())
rspp, err := setProgress(*CID, rp)
if err != nil {
return err
}
if err2 := writeProgressResponse(conn, msgChan, *CID, rspp, true); err2 != nil {
return err2
}
case UNKNOWN:
if err := serverResponse(conn, msgChan, au, fn, ServerReturnCode_UNKNOWN_FUNC); err != nil {
return err
}
default:
if err := serverResponse(conn, msgChan, au, fn, ServerReturnCode_UNKNOWN_FUNC); err != nil {
return err
}
}
data = data[5+suflen:]
}
return nil
}
// serverResponse send messages back to clients
// The message includes
// 1. Whether the operation failed or not
// 2. Some additional information
func serverResponse(conn *net.TCPConn, msgChan chan []byte, au *PlayerInfo, ft functionType, result ServerReturnCode) error {
switch ft {
case SIGNIN:
switch result {
case ServerReturnCode_OK:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in succeeded, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_OK)
case ServerReturnCode_ACC_PSW_NO_MATCH:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_ACC_PSW_NO_MATCH)
case ServerReturnCode_ACC_INVALID:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_ACC_INVALID)
case ServerReturnCode_ACC_TOO_SHORT:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_ACC_TOO_SHORT)
case ServerReturnCode_ACC_TOO_LONG:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_ACC_TOO_LONG)
case ServerReturnCode_PSW_INVALID:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_PSW_INVALID)
case ServerReturnCode_PSW_TOO_SHORT:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_PSW_TOO_SHORT)
case ServerReturnCode_PSW_TOO_LONG:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_PSW_TOO_LONG)
case ServerReturnCode_DBFAIL:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignInResponse(conn, msgChan, ServerReturnCode_DBFAIL)
default:
break
}
case SIGNUP:
switch result {
case ServerReturnCode_OK:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign up succeeded, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_OK)
case ServerReturnCode_ACC_INVALID:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign up failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_ACC_INVALID)
case ServerReturnCode_ACC_TOO_SHORT:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_ACC_TOO_SHORT)
case ServerReturnCode_ACC_TOO_LONG:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_ACC_TOO_LONG)
case ServerReturnCode_PSW_INVALID:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign up failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_PSW_INVALID)
case ServerReturnCode_PSW_TOO_SHORT:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_PSW_TOO_SHORT)
case ServerReturnCode_PSW_TOO_LONG:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign in failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_PSW_TOO_LONG)
case ServerReturnCode_ACC_EXISTED:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign up failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_ACC_EXISTED)
case ServerReturnCode_DBFAIL:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] sign up failed, account: %s", au.Account))
return writeSignUpResponse(conn, msgChan, ServerReturnCode_DBFAIL)
default:
break
}
default:
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
"[info] Unknown function in ServerResponse")
return nil
}
return nil
}
// writeSignInResponse is the internal implementation of serverResponse
// it sends the proto message to client use net.TCPConn.Write
func writeSignInResponse(conn *net.TCPConn, msgChan chan []byte, isError ServerReturnCode) error {
rsp := &RetLogin{Code: isError}
data, err := proto.Marshal(rsp)
if err != nil {
return err
}
resp_type := mintcommon.Uint16ToBytes(RESPONSE_SIGNIN)
suflen := mintcommon.Uint8ToBytes(uint8(len(data)))
buf_len := mintcommon.Uint16ToBytes(uint16(len(data) + 1))
binMsg := mintcommon.BytesConcatenate(buf_len, resp_type, suflen, data)
msgChan <- binMsg
return nil
}
// writeSignUpResponse is the internal implementation of serverResponse
// it sends the proto message to client use net.TCPConn.Write
func writeSignUpResponse(conn *net.TCPConn, msgChan chan []byte, isError ServerReturnCode) error {
srvrsp := &RetRegister{Code: isError}
data, err := proto.Marshal(srvrsp)
if err != nil {
return err
}
resp_type := mintcommon.Uint16ToBytes(RESPONSE_SIGNUP)
suflen := mintcommon.Uint8ToBytes(uint8(len(data)))
buf_len := mintcommon.Uint16ToBytes(uint16(len(data) + 1))
binMsg := mintcommon.BytesConcatenate(buf_len, resp_type, suflen, data)
msgChan <- binMsg
return nil
}
// writeProgressResponse is the internal implementation of serverResponse
// it sends the proto message to client use net.TCPConn.Write
func writeProgressResponse(conn *net.TCPConn, msgChan chan []byte, id int, rsp *RetProgress, output bool) error {
if output {
if rsp.GetChapter() != -1 && rsp.GetSection() != -1 {
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] set progress for, id: %d, chapter: %d, section: %d succeeded",
id, rsp.GetChapter(), rsp.GetSection()))
} else {
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[error] set progress for, id: %d, %d, %d failed",
id, rsp.GetChapter(), rsp.GetSection()))
}
}
mintcommon.DebugPrint(config.GlobalConfiguration.EnableLog,
config.GlobalConfiguration.LogToConsole,
config.GlobalConfiguration.LogPath,
fmt.Sprintf("[info] return progress to, id: %d, chapter: %d, section: %d",
id, rsp.GetChapter(), rsp.GetSection()))
data, err := proto.Marshal(rsp)
if err != nil {
return err
}
resp_type := mintcommon.Uint16ToBytes(RESPONSE_PROGRESS)
suflen := mintcommon.Uint8ToBytes(uint8(len(data)))
buf_len := mintcommon.Uint16ToBytes(uint16(len(data) + 1))
binMsg := mintcommon.BytesConcatenate(buf_len, resp_type, suflen, data)
msgChan <- binMsg
return nil
}
// writeShakehandResponse is the internal implementation of serverResponse
// it sends the proto message to client use net.TCPConn.Write
func writeShakehandResponse(conn *net.TCPConn, msgChan chan []byte) error {
srvrsp := &Handshake{Token: "Welcome"}
data, err := proto.Marshal(srvrsp)
if err != nil {
return err
}
resp_type := mintcommon.Uint16ToBytes(HANDSHAKE)
suflen := mintcommon.Uint8ToBytes(uint8(len(data)))
buf_len := mintcommon.Uint16ToBytes(uint16(len(data) + 1))
binMsg := mintcommon.BytesConcatenate(buf_len, resp_type, suflen, data)
msgChan <- binMsg
return nil
}
// WrapSignUpRequest wrap a PlayerInfo message and a function string into ClientRequest
func WrapSignUpRequest(user *PlayerInfo) *ReqRegister {
cr := &ReqRegister{}
cr.PlayerInfo = user
return cr
}
// WrapSignInRequest wrap a PlayerInfo message and a function string into ClientRequest
func WrapSignInRequest(user *PlayerInfo) *ReqLogin {
cr := &ReqLogin{}
cr.Account = user.GetAccount()
cr.Password = user.GetPassword()
return cr
}
// WrapSetProgressRequest wrap a PlayerInfo message and a function string into ClientRequest
func WrapSetProgressRequest(chap int, sec int) *RetProgress {
cr := &RetProgress{}
cr.Chapter = int32(chap)
cr.Section = int32(sec)
return cr
}
|
/*
* Databricks
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 0.0.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package models
type ClustersClusterState string
// List of ClustersClusterState
const (
PENDING ClustersClusterState = "PENDING"
RUNNING ClustersClusterState = "RUNNING"
RESTARTING ClustersClusterState = "RESTARTING"
RESIZING ClustersClusterState = "RESIZING"
TERMINATING ClustersClusterState = "TERMINATING"
TERMINATED ClustersClusterState = "TERMINATED"
ERROR_ ClustersClusterState = "ERROR"
UNKNOWN ClustersClusterState = "UNKNOWN"
)
|
package subset
import "testing"
type testCase struct {
name string
k int32
set []int32
ans int32
}
var testCases = []testCase{
{"0", 4, []int32{19, 10, 12, 10, 24, 25, 22}, 3},
{"1", 3, []int32{1, 7, 2, 4}, 3},
{"2", 5, []int32{6, 7, 8, 9, 10, 11, 12}, 5},
}
func TestNonDivisibleSubset(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := nonDivisibleSubset(tc.k, tc.set)
if got != tc.ans {
t.Errorf("Found size %d set, expected %d", got, tc.ans)
}
})
}
}
|
package routers
import (
"eff/controllers"
"github.com/astaxie/beego"
)
func init() {
beego.Router("/", &controllers.MainController{})
beego.Router("/dah", &controllers.Dah{})
beego.Router("/dah/delete/:id([0-9]+)", &controllers.Dah{}, "get:Delete")
beego.Router("/dah/status", &controllers.Dah{}, "get:Status")
beego.Router("/dih", &controllers.Dih{})
beego.Router("/dih/delete/:id([0-9]+)", &controllers.Dih{}, "get:Delete")
beego.Router("/dih/status", &controllers.Dih{}, "get:Status")
}
|
package localregistry
import (
"context"
"time"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
appsapplyv1 "k8s.io/client-go/applyconfigurations/apps/v1"
)
func (r *LocalRegistry) ensureStatefulset(ctx devspacecontext.Context) (*appsv1.StatefulSet, error) {
// Switching from an unpersistent registry, delete the deployment.
_, err := ctx.KubeClient().KubeClient().AppsV1().Deployments(r.Namespace).Get(ctx.Context(), r.Name, metav1.GetOptions{})
if err == nil {
err := ctx.KubeClient().KubeClient().AppsV1().Deployments(r.Namespace).Delete(ctx.Context(), r.Name, metav1.DeleteOptions{})
if err != nil && kerrors.IsNotFound(err) {
return nil, err
}
}
var existing *appsv1.StatefulSet
desired := r.getStatefulSet()
kubeClient := ctx.KubeClient()
err = wait.PollImmediateWithContext(ctx.Context(), time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
var err error
existing, err = kubeClient.KubeClient().AppsV1().StatefulSets(r.Namespace).Get(ctx, r.Name, metav1.GetOptions{})
if err == nil {
return true, nil
}
if kerrors.IsNotFound(err) {
existing, err = kubeClient.KubeClient().AppsV1().StatefulSets(r.Namespace).Create(ctx, desired, metav1.CreateOptions{})
if err == nil {
return true, nil
}
if kerrors.IsAlreadyExists(err) {
return false, nil
}
return false, err
}
return false, err
})
if err != nil {
return nil, err
}
// Use server side apply if it does exist
applyConfiguration, err := appsapplyv1.ExtractStatefulSet(existing, ApplyFieldManager)
if err != nil {
return nil, err
}
return ctx.KubeClient().KubeClient().AppsV1().StatefulSets(r.Namespace).Apply(
ctx.Context(),
applyConfiguration,
metav1.ApplyOptions{
FieldManager: ApplyFieldManager,
Force: true,
},
)
}
func (r *LocalRegistry) getStatefulSet() *appsv1.StatefulSet {
var storageClassName *string
if r.StorageClassName != "" {
storageClassName = &r.StorageClassName
}
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: r.Name,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": r.Name,
},
},
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: r.Name,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse(r.StorageSize),
},
},
StorageClassName: storageClassName,
},
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": r.Name,
},
Annotations: map[string]string{
"container.apparmor.security.beta.kubernetes.io/buildkitd": "unconfined",
},
},
Spec: corev1.PodSpec{
EnableServiceLinks: new(bool),
Containers: getContainers(r.RegistryImage, r.BuildKitImage, r.Name, int32(r.Port), r.LocalBuild),
Volumes: []corev1.Volume{
{
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
Name: "buildkitd",
},
},
},
},
},
}
}
|
//nolint:dupl
package mongodb
import (
"context"
"errors"
"github.com/joshprzybyszewski/cribbage/model"
"github.com/joshprzybyszewski/cribbage/server/interaction"
"github.com/joshprzybyszewski/cribbage/server/persistence"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsoncodec"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
const (
// needs to match interaction.PlayerMeans.PlayerID
interactionCollectionIndex string = `playerID`
)
var _ persistence.InteractionService = (*interactionService)(nil)
type interactionService struct {
ctx context.Context
session mongo.Session
col *mongo.Collection
}
func getInteractionService(
ctx context.Context,
session mongo.Session,
mdb *mongo.Database,
r *bsoncodec.Registry,
) (persistence.InteractionService, error) {
col := mdb.Collection(interactionsCollectionName, &options.CollectionOptions{
Registry: r,
})
idxs := col.Indexes()
hasIndex, err := hasInteractionCollectionIndex(ctx, idxs)
if err != nil {
return nil, err
}
if !hasIndex {
err = createInteractionCollectionIndex(ctx, idxs)
if err != nil {
return nil, err
}
}
return &interactionService{
ctx: ctx,
session: session,
col: col,
}, nil
}
func hasInteractionCollectionIndex(ctx context.Context, idxs mongo.IndexView) (bool, error) {
return hasCollectionIndex(ctx, idxs, interactionCollectionIndex)
}
func createInteractionCollectionIndex(ctx context.Context, idxs mongo.IndexView) error {
return createCollectionIndex(ctx, idxs, interactionCollectionIndex)
}
func bsonInteractionFilter(id model.PlayerID) interface{} {
// interaction.PlayerMeans{PlayerID: id}
return bson.M{`playerID`: id}
}
func (s *interactionService) Get(id model.PlayerID) (interaction.PlayerMeans, error) {
result := interaction.PlayerMeans{}
filter := bsonInteractionFilter(id)
err := mongo.WithSession(s.ctx, s.session, func(sc mongo.SessionContext) error {
err := s.col.FindOne(sc, filter).Decode(&result)
if err != nil {
if err == mongo.ErrNoDocuments {
return persistence.ErrInteractionNotFound
}
return err
}
return nil
})
if err != nil {
return interaction.PlayerMeans{}, err
}
return result, nil
}
func (s *interactionService) Create(pm interaction.PlayerMeans) error {
_, err := s.Get(pm.PlayerID)
if err != nil && err != persistence.ErrInteractionNotFound {
return err
}
return mongo.WithSession(s.ctx, s.session, func(sc mongo.SessionContext) error {
ior, err := s.col.InsertOne(sc, pm)
if err != nil {
return err
}
if ior.InsertedID == nil {
// :shrug: not sure if this is the right thing to check
return errors.New(`interaction not created`)
}
return nil
})
}
func (s *interactionService) Update(pm interaction.PlayerMeans) error {
if _, err := s.Get(pm.PlayerID); err == persistence.ErrInteractionNotFound {
return mongo.WithSession(s.ctx, s.session, func(sc mongo.SessionContext) error {
ior, err := s.col.InsertOne(sc, pm)
if err != nil {
return err
}
if ior.InsertedID == nil {
// :shrug: not sure if this is the right thing to check
return errors.New(`interaction not updated`)
}
return nil
})
}
opt := &options.ReplaceOptions{}
opt.SetUpsert(true)
return mongo.WithSession(s.ctx, s.session, func(sc mongo.SessionContext) error {
ur, err := s.col.ReplaceOne(sc, pm, opt)
if err != nil {
return err
}
switch {
case ur.ModifiedCount > 1:
return errors.New(`modified too many interactions`)
case ur.MatchedCount > 1:
return errors.New(`matched more than one interaction`)
case ur.UpsertedCount > 1:
return errors.New(`upserted more than one interaction`)
}
return nil
})
}
|
package main
/*
Developed by "https://github.com/vitorfmc"
=======================================================
Overview:
=======================================================
This Lambda Function is example of integration with DynamoDB.
The idea is make a insert into DB.
Obs.: Remember to give DynamoDB policies to your Lambda Function
*/
import (
"context"
"fmt"
"os"
"strconv"
"time"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
type SaleHistory struct {
Id string `json:"id"`
SaleId string `json:"sale_id"`
SaleDate time.Time `json:"sale_date"`
Amount float64 `json:"amount"`
Items *[]ItemHistory `json:"items"`
}
type ItemHistory struct {
SaleId string `json:"sale_id"`
Id string `json:"id"`
Code string `json:"ean"`
Quantity int64 `json:"quantity"`
Value float64 `json:"value"`
}
func main() {
lambda.Start(handleRequest)
}
func handleRequest(ctx context.Context, e events.DynamoDBEvent) {
sess, err := session.NewSession(&aws.Config{})
if err != nil {
fmt.Println("[ERROR]: ", err)
return
}
db := dynamodb.New(sess, aws.NewConfig().WithRegion("us-east-1"))
currentTime := time.Now().Format("20060102150405")
saleItens := make([]ItemHistory, 0)
item := &ItemHistory{Code: "19283129382193822", Quantity: int64(10), Value: float64(10.20)}
saleItens = append(saleItens, *item)
item = &ItemHistory{Code: "19283129382193829", Quantity: int64(10), Value: float64(5.21)}
saleItens = append(saleItens, *item)
currentId := time.Now().UnixNano() / int64(time.Millisecond)
history := &SaleHistory{
Id: strconv.FormatInt(currentId, 10),
SaleId: currentTime,
SaleDate: time.Now(),
Amount: float64(15.41),
Items: &saleItens,
}
itemMap, err := dynamodbattribute.MarshalMap(history)
if err != nil {
fmt.Println("[ERROR]: ", err)
return
}
_, err = db.PutItem(&dynamodb.PutItemInput{
TableName: aws.String(os.Getenv("ITEM_NAME")),
Item: itemMap,
})
}
|
// Update view - updates already installed dotfiles
// =================================================
package views
import (
"net/http"
"text/template"
)
type UpdateData struct {
ClientSecret string
RepoOpts string
BaseURL string
URLMask string
}
func ServeUpdate(w http.ResponseWriter, r *http.Request, baseurl string, client_secret string, directory string, foldersMap map[string]string, urlMask string) {
// generate body of bash case with repo packages
repoPackages := repoPackagesCasePrint(foldersMap, true, directory, baseurl)
// build data for template
data := UpdateData{client_secret, repoPackages, baseurl, urlMask}
// render template
tmpl, err := template.New("update").Parse(tmplUpdate)
if err != nil { panic(err) }
err = tmpl.Execute(w, data)
if err != nil { panic(err) }
}
var tmplUpdate = gitCloneTmpl + `
SECRET="{{ .ClientSecret }}"
selectPackage() {
case "$1" in
{{ .RepoOpts }}
esac
}
if [ ! -f "$HOME/.dotman/managed" ]; then
echo " It appears, you don't manage any dotfiles using dotman. Exiting."
exit 1
fi
if [ -d "$HOME/.dotman/dotfiles" ]; then
gitCloneIfPresent "$SECRET"
fi
for NAME in $(cat "$HOME/.dotman/managed"); do
selectPackage $NAME
done
`
|
package gore
import (
"os"
"testing"
)
func init() {
if os.Getenv("TEST_REDIS_CLIENT") != "" {
shouldTest = true
}
}
func TestPool(t *testing.T) {
if !shouldTest {
return
}
conn, err := Dial("localhost:6379")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
pool := &Pool{
InitialConn: 5,
MaximumConn: 5,
}
err = pool.Dial("localhost:6379")
if err != nil {
t.Fatal(err)
}
c := make(chan bool, 20)
for i := 0; i < 10000; i++ {
go func(pool *Pool, c chan bool, x int64) {
defer func() {
c <- true
}()
conn, err := pool.Acquire()
if err != nil || conn == nil {
t.Fatal(err, "nil")
}
defer pool.Release(conn)
rep, err := NewCommand("SET", x, x).Run(conn)
if err != nil || !rep.IsOk() {
t.Fatal(err, "not ok")
}
}(pool, c, int64(i))
}
for i := 0; i < 10000; i++ {
<-c
}
for i := 0; i < 10000; i++ {
go func(pool *Pool, c chan bool, x int64) {
defer func() {
c <- true
}()
conn, err := pool.Acquire()
if err != nil || conn == nil {
t.Fatal(err, "nil")
}
defer pool.Release(conn)
rep, err := NewCommand("GET", x).Run(conn)
if err != nil {
t.Fatal(err)
}
y, err := rep.Int()
if err != nil || y != x {
t.Fatal(err, x, y)
}
}(pool, c, int64(i))
}
for i := 0; i < 10000; i++ {
<-c
}
rep, err := NewCommand("FLUSHALL").Run(conn)
if err != nil || !rep.IsOk() {
t.Fatal(err, "not ok")
}
}
func TestPoolClose(t *testing.T) {
if !shouldTest {
return
}
pool := &Pool{
InitialConn: 20,
MaximumConn: 20,
}
err := pool.Dial("localhost:6379")
if err != nil {
t.Fatal(err)
}
c := make(chan bool, 20)
ready := make(chan bool, 10)
for i := 0; i < 1000; i++ {
go func() {
defer func() {
c <- true
}()
conn, err := pool.Acquire()
if err != nil {
t.Fatal(err)
}
if conn != nil {
ready <- true
}
}()
}
for i := 0; i < 20; i++ {
<-ready
}
pool.Close()
for i := 0; i < 1000; i++ {
<-c
}
}
|
package piscine
import "github.com/01-edu/z01"
func PrintNbrInOrder(n int) {
if n < 0 {
return
}
if n == 0 {
z01.PrintRune('0')
}
var array [10]int // creating an array to append
for n != 0 {
array[n%10]++
n /= 10
}
for i := 0; i < 10; i++ {
for array[i] > 0 {
z01.PrintRune(rune(i) + '0')
array[i]--
}
}
}
|
package spacelift
import (
"fmt"
"sort"
"strings"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
e "github.com/cloudposse/atmos/internal/exec"
cfg "github.com/cloudposse/atmos/pkg/config"
"github.com/cloudposse/atmos/pkg/schema"
s "github.com/cloudposse/atmos/pkg/stack"
u "github.com/cloudposse/atmos/pkg/utils"
)
// CreateSpaceliftStacks takes a list of paths to YAML config files, processes and deep-merges all imports,
// and returns a map of Spacelift stack configs
func CreateSpaceliftStacks(
stacksBasePath string,
terraformComponentsBasePath string,
helmfileComponentsBasePath string,
filePaths []string,
processStackDeps bool,
processComponentDeps bool,
processImports bool,
stackConfigPathTemplate string,
) (map[string]any, error) {
if len(filePaths) > 0 {
_, stacks, rawStackConfigs, err := s.ProcessYAMLConfigFiles(
stacksBasePath,
terraformComponentsBasePath,
helmfileComponentsBasePath,
filePaths,
processStackDeps,
processComponentDeps,
false,
)
if err != nil {
u.LogError(err)
return nil, err
}
return TransformStackConfigToSpaceliftStacks(stacks, stackConfigPathTemplate, "", processImports, rawStackConfigs)
} else {
cliConfig, err := cfg.InitCliConfig(schema.ConfigAndStacksInfo{}, true)
if err != nil {
u.LogError(err)
return nil, err
}
_, stacks, rawStackConfigs, err := s.ProcessYAMLConfigFiles(
cliConfig.StacksBaseAbsolutePath,
cliConfig.TerraformDirAbsolutePath,
cliConfig.HelmfileDirAbsolutePath,
cliConfig.StackConfigFilesAbsolutePaths,
processStackDeps,
processComponentDeps,
false,
)
if err != nil {
u.LogError(err)
return nil, err
}
return TransformStackConfigToSpaceliftStacks(
stacks,
stackConfigPathTemplate,
cliConfig.Stacks.NamePattern,
processImports,
rawStackConfigs,
)
}
}
// TransformStackConfigToSpaceliftStacks takes a map of stack configs and transforms it to a map of Spacelift stacks
func TransformStackConfigToSpaceliftStacks(
stacks map[string]any,
stackConfigPathTemplate string,
stackNamePattern string,
processImports bool,
rawStackConfigs map[string]map[string]any,
) (map[string]any, error) {
var err error
res := map[string]any{}
allStackNames, err := e.BuildSpaceliftStackNames(stacks, stackNamePattern)
if err != nil {
return nil, err
}
for stackName, stackConfig := range stacks {
config := stackConfig.(map[any]any)
var imports []string
if processImports {
if i, ok := config["imports"]; ok {
imports = i.([]string)
}
}
if i, ok := config["components"]; ok {
componentsSection := i.(map[string]any)
if terraformComponents, ok := componentsSection["terraform"]; ok {
terraformComponentsMap := terraformComponents.(map[string]any)
for component, v := range terraformComponentsMap {
componentMap := v.(map[string]any)
componentSettings := map[any]any{}
if i, ok2 := componentMap["settings"]; ok2 {
componentSettings = i.(map[any]any)
}
spaceliftSettings := map[any]any{}
spaceliftWorkspaceEnabled := false
if i, ok2 := componentSettings["spacelift"]; ok2 {
spaceliftSettings = i.(map[any]any)
if i3, ok3 := spaceliftSettings["workspace_enabled"]; ok3 {
spaceliftWorkspaceEnabled = i3.(bool)
}
}
// If Spacelift workspace is disabled, don't include it, continue to the next component
if !spaceliftWorkspaceEnabled {
continue
}
spaceliftExplicitLabels := []any{}
if i, ok2 := spaceliftSettings["labels"]; ok2 {
spaceliftExplicitLabels = i.([]any)
}
spaceliftConfig := map[string]any{}
spaceliftConfig["enabled"] = spaceliftWorkspaceEnabled
componentVars := map[any]any{}
if i, ok2 := componentMap["vars"]; ok2 {
componentVars = i.(map[any]any)
}
componentEnv := map[any]any{}
if i, ok2 := componentMap["env"]; ok2 {
componentEnv = i.(map[any]any)
}
componentStacks := []string{}
if i, ok2 := componentMap["stacks"]; ok2 {
componentStacks = i.([]string)
}
componentInheritance := []string{}
if i, ok2 := componentMap["inheritance"]; ok2 {
componentInheritance = i.([]string)
}
// Process component metadata and find a base component (if any) and whether the component is real or abstract
componentMetadata, baseComponentName, componentIsAbstract := e.ProcessComponentMetadata(component, componentMap)
if componentIsAbstract {
continue
}
context := cfg.GetContextFromVars(componentVars)
context.Component = component
context.BaseComponent = baseComponentName
var contextPrefix string
if stackNamePattern != "" {
contextPrefix, err = cfg.GetContextPrefix(stackName, context, stackNamePattern, stackName)
if err != nil {
u.LogError(err)
return nil, err
}
} else {
contextPrefix = strings.Replace(stackName, "/", "-", -1)
}
spaceliftConfig["component"] = component
spaceliftConfig["stack"] = contextPrefix
spaceliftConfig["imports"] = imports
spaceliftConfig["vars"] = componentVars
spaceliftConfig["settings"] = componentSettings
spaceliftConfig["env"] = componentEnv
spaceliftConfig["stacks"] = componentStacks
spaceliftConfig["inheritance"] = componentInheritance
spaceliftConfig["base_component"] = baseComponentName
spaceliftConfig["metadata"] = componentMetadata
// backend
backendTypeName := ""
if backendType, backendTypeExist := componentMap["backend_type"]; backendTypeExist {
backendTypeName = backendType.(string)
}
spaceliftConfig["backend_type"] = backendTypeName
componentBackend := map[any]any{}
if i, ok2 := componentMap["backend"]; ok2 {
componentBackend = i.(map[any]any)
}
spaceliftConfig["backend"] = componentBackend
// Component dependencies
configAndStacksInfo := schema.ConfigAndStacksInfo{
ComponentFromArg: component,
ComponentType: "terraform",
StackFile: stackName,
ComponentVarsSection: componentVars,
ComponentEnvSection: componentEnv,
ComponentSettingsSection: componentSettings,
ComponentBackendSection: componentBackend,
ComponentBackendType: backendTypeName,
ComponentInheritanceChain: componentInheritance,
}
sources, err := e.ProcessConfigSources(configAndStacksInfo, rawStackConfigs)
if err != nil {
return nil, err
}
componentDeps, componentDepsAll, err := e.FindComponentDependencies(stackName, sources)
if err != nil {
return nil, err
}
spaceliftConfig["deps"] = componentDeps
spaceliftConfig["deps_all"] = componentDepsAll
// Terraform workspace
workspace, err := e.BuildTerraformWorkspace(
stackName,
stackNamePattern,
componentMetadata,
context,
)
if err != nil {
u.LogError(err)
return nil, err
}
spaceliftConfig["workspace"] = workspace
// labels
labels := []string{}
for _, v := range imports {
labels = append(labels, fmt.Sprintf("import:"+stackConfigPathTemplate, v))
}
for _, v := range componentStacks {
labels = append(labels, fmt.Sprintf("stack:"+stackConfigPathTemplate, v))
}
for _, v := range componentDeps {
labels = append(labels, fmt.Sprintf("deps:"+stackConfigPathTemplate, v))
}
for _, v := range spaceliftExplicitLabels {
labels = append(labels, v.(string))
}
var terraformComponentNamesInCurrentStack []string
for v := range terraformComponentsMap {
terraformComponentNamesInCurrentStack = append(terraformComponentNamesInCurrentStack, strings.Replace(v, "/", "-", -1))
}
// Legacy/deprecated `settings.spacelift.depends_on`
spaceliftDependsOn := []any{}
if i, ok2 := spaceliftSettings["depends_on"]; ok2 {
spaceliftDependsOn = i.([]any)
}
var spaceliftStackNameDependsOnLabels1 []string
for _, dep := range spaceliftDependsOn {
spaceliftStackNameDependsOn, err := e.BuildDependentStackNameFromDependsOn(
dep.(string),
allStackNames,
contextPrefix,
terraformComponentNamesInCurrentStack,
component)
if err != nil {
u.LogError(err)
return nil, err
}
spaceliftStackNameDependsOnLabels1 = append(spaceliftStackNameDependsOnLabels1, fmt.Sprintf("depends-on:%s", spaceliftStackNameDependsOn))
}
sort.Strings(spaceliftStackNameDependsOnLabels1)
labels = append(labels, spaceliftStackNameDependsOnLabels1...)
// Recommended `settings.depends_on`
var stackComponentSettingsDependsOn schema.Settings
err = mapstructure.Decode(componentSettings, &stackComponentSettingsDependsOn)
if err != nil {
return nil, err
}
var spaceliftStackNameDependsOnLabels2 []string
for _, stackComponentSettingsDependsOnContext := range stackComponentSettingsDependsOn.DependsOn {
if stackComponentSettingsDependsOnContext.Component == "" {
continue
}
if stackComponentSettingsDependsOnContext.Namespace == "" {
stackComponentSettingsDependsOnContext.Namespace = context.Namespace
}
if stackComponentSettingsDependsOnContext.Tenant == "" {
stackComponentSettingsDependsOnContext.Tenant = context.Tenant
}
if stackComponentSettingsDependsOnContext.Environment == "" {
stackComponentSettingsDependsOnContext.Environment = context.Environment
}
if stackComponentSettingsDependsOnContext.Stage == "" {
stackComponentSettingsDependsOnContext.Stage = context.Stage
}
var contextPrefixDependsOn string
if stackNamePattern != "" {
contextPrefixDependsOn, err = cfg.GetContextPrefix(
stackName,
stackComponentSettingsDependsOnContext,
stackNamePattern,
stackName,
)
if err != nil {
return nil, err
}
} else {
contextPrefixDependsOn = strings.Replace(stackName, "/", "-", -1)
}
spaceliftStackNameDependsOn, err := e.BuildDependentStackNameFromDependsOn(
stackComponentSettingsDependsOnContext.Component,
allStackNames,
contextPrefixDependsOn,
terraformComponentNamesInCurrentStack,
component)
if err != nil {
u.LogError(err)
return nil, err
}
spaceliftStackNameDependsOnLabels2 = append(spaceliftStackNameDependsOnLabels2, fmt.Sprintf("depends-on:%s", spaceliftStackNameDependsOn))
}
sort.Strings(spaceliftStackNameDependsOnLabels2)
labels = append(labels, spaceliftStackNameDependsOnLabels2...)
// Add `component` and `folder` labels
labels = append(labels, fmt.Sprintf("folder:component/%s", component))
labels = append(labels, fmt.Sprintf("folder:%s", strings.Replace(contextPrefix, "-", "/", -1)))
spaceliftConfig["labels"] = u.UniqueStrings(labels)
// Spacelift stack name
spaceliftStackName, spaceliftStackNamePattern := e.BuildSpaceliftStackName(spaceliftSettings, context, contextPrefix)
// Add Spacelift stack config to the final map
spaceliftStackNameKey := strings.Replace(spaceliftStackName, "/", "-", -1)
if !u.MapKeyExists(res, spaceliftStackNameKey) {
res[spaceliftStackNameKey] = spaceliftConfig
} else {
errorMessage := fmt.Sprintf("\nDuplicate Spacelift stack name '%s' for component '%s' in the stack '%s'."+
"\nCheck if the component name is correct and the Spacelift stack name pattern 'stack_name_pattern=%s' is specific enough."+
"\nDid you specify the correct context tokens {namespace}, {tenant}, {environment}, {stage}, {component}?",
spaceliftStackName,
component,
stackName,
spaceliftStackNamePattern,
)
er := errors.New(errorMessage)
u.LogError(er)
return nil, er
}
}
}
}
}
return res, nil
}
|
// Package frames implements HTTP/2 frames exchanged by peers as defined in
// RFC 7540 Section 6.
package frames
import (
"errors"
"fmt"
"github.com/jamescun/http2/settings"
)
var (
// ErrFrameTooBig is returned when attempting to marshal a Frame but its
// configured length exceeds a uint24.
ErrFrameTooBig = errors.New("frames: too big")
// ErrShortHeader is returned when attempting to unmarshal a Header but
// not enough bytes are available.
ErrShortHeader = errors.New("frames: short header")
// ErrShortFrame is returned when attempting to unmarshal a Frame but not
// enough bytes are available.
ErrShortFrame = errors.New("frames: too short")
)
// Frame is implemented by all HTTP/2 Frame definitions, as defined in RFC 7540
// Section 6.
type Frame interface {
// MarshalFrame converts a Frame into it's wire format and set the Frame
// specific fields on the given Header.
MarshalFrame(*Header) ([]byte, error)
// UnmarshalFrame converts a Frame from it's wire format. Header is the
// Frame header that preceded the Frame being unmarshalled.
UnmarshalFrame(*Header, []byte) error
}
// Type is the unique identifier given to each Frame. FrameTypes greater than
// 0x09 are considered extensions and MUST be ignored if not understood.
// RFC 7540 Section 4.1
type Type uint8
const (
// TypeData (0x0) is defined by RFC 7540 Section 6.1.
TypeData = Type(0x0)
// TypeHeaders (0x1) is defined by RFC 7540 Section 6.2
TypeHeaders = Type(0x1)
// TypePriority (0x2) is defined by RFC 7540 Section 6.3.
TypePriority = Type(0x2)
// TypeResetStream (0x3) is defined by RFC 7540 Section 6.4.
TypeResetStream = Type(0x3)
// TypeSettings (0x4) is defined by RFC 7540 Section 6.5.
TypeSettings = Type(0x4)
// TypePushPromise (0x5) is defined by RFC 7540 Section 6.6.
TypePushPromise = Type(0x5)
// TypePing (0x6) is defined by RFC 7540 Section 6.7.
TypePing = Type(0x6)
// TypeGoAway (0x7) is defined by RFC 7540 Section 6.8.
TypeGoAway = Type(0x7)
// TypeWindowUpdate (0x8) is defined by RFC 7540 Section 6.9.
TypeWindowUpdate = Type(0x8)
// TypeContinuation (0x9) is defined by RFC 7540 Section 6.10.
TypeContinuation = Type(0x9)
)
// Flags are Frame specific options set on the FrameHeader.
// RFC 7540 Section 4.1
type Flags uint8
// Set sets Flags v on Flags f.
func (f Flags) Set(v Flags) {
f = f | v
}
// Has returns true if Flags f contains Flags v.
func (f Flags) Has(v Flags) bool {
return f&v != 0
}
// HeaderLength is the fixed length of a Header Frame in bytes.
// RFC 7540 Section 4.1
const HeaderLength = 9
// Header prefixes all HTTP/2 payloads identifying Frame type, length,
// optional flags and its associated Stream.
// RFC 7540 Section 4.1
type Header struct {
Length uint32
Type Type
Flags Flags
StreamID uint32
}
// MarshalFrameHeader marshals Header to the wire format.
func (h *Header) MarshalFrameHeader() ([]byte, error) {
// NOTE(jc): Header contains a uint32 but the protocol demands a uint24,
// unavailable in Go, throw ErrFrameTooBig if given >uint24.
if h.Length >= (1 << 24) {
return nil, ErrFrameTooBig
}
b := make([]byte, HeaderLength)
putUint24(b, h.Length)
b[3] = byte(h.Type)
b[4] = byte(h.Flags)
putUint31(b[5:], h.StreamID)
return b, nil
}
// UnmarshalFrameHeader unmarshals a Header from the wire format.
func (h *Header) UnmarshalFrameHeader(b []byte) error {
if len(b) < HeaderLength {
return ErrShortHeader
}
h.Length = uint24(b)
h.Type = Type(b[3])
h.Flags = Flags(b[4])
h.StreamID = uint31(b[5:])
return nil
}
const (
// FlagSettingsAck indicates a Settings frame is an acknowledgement of a
// previously sent Settings frame.
// RFC 7540 Section 6.5
FlagSettingsAck = Flags(0x1)
)
// Settings conveys and acknowledges configration values between peers, it
// is it not used for negotiation.
// RFC 7540 Section 6.5
type Settings struct {
Header
// Ack acknowledges a previously sent Settings frame and MUST NOT contain
// any Settings itself.
Ack bool
Settings []settings.Setting
}
// MarshalFrame marshals Settings into the wire format.
func (s *Settings) MarshalFrame(hdr *Header) ([]byte, error) {
hdr.Length = uint32(6 * len(s.Settings))
hdr.Type = TypeSettings
hdr.StreamID = 0
if s.Ack {
hdr.Flags = FlagSettingsAck
}
b := make([]byte, 0, 6*len(s.Settings))
for _, setting := range s.Settings {
b = settings.AppendSetting(b, setting)
}
return b, nil
}
// UnmarshalFrame unmarshals Settings from the wire format.
func (s *Settings) UnmarshalFrame(hdr *Header, b []byte) error {
if len(b) == 0 {
return nil
}
// NOTE(jc): settings identifiers and values are always a multiple of six.
if len(b)%6 != 0 {
return ErrShortFrame
}
s.Header = *hdr
s.Settings = make([]settings.Setting, 0, len(b)/6)
for len(b) > 0 {
setting, err := settings.ParseSetting(b)
b = b[6:]
if err == settings.ErrUnknown {
continue
} else if err != nil {
return err
}
s.Settings = append(s.Settings, setting)
}
return nil
}
var (
// FlagHeadersEndStream indicates a Headers frame is to also terminate its
// Stream (excluding any Continuation frames).
// RFC 7540 Section 6.2
FlagHeadersEndStream = Flags(0x01)
// FlagHeadersEndHeaders indicates a Headers frame is the last of the
// Headers sent by a peer.
// RFC 7540 Section 6.2
FlagHeadersEndHeaders = Flags(0x04)
// FlagHeadersPadded indicates a Headers frame contains trailing padding.
// RFC 7540 Section 6.2
FlagHeadersPadded = Flags(0x08)
// FlagHeadersPriority indicates a Headers frame contains priority
// information similar to a Priority frame.
// RFC 7540 Section 6.2
FlagHeadersPriority = Flags(0x20)
)
// Headers is used to initialize a Stream and contains zero or more HPACK
// header block fragments.
//
// NOTE(jc): Padding and Priority are not currently implemented.
//
// RFC 7540 Section 6.2
type Headers struct {
Header
// EndStream indicates this Header frame (and possible Continuation frames)
// are the last in this Stream.
EndStream bool
// EndHeaders indicates this Headers frame is the last of this set and no
// other Headers frame or Continuation frame will be sent.
EndHeaders bool
// Block contains an HPACK header block fragment, described in RFC 7541.
Block []byte
}
// MarshalFrame marshals Headers into the wire format.
func (h *Headers) MarshalFrame(hdr *Header) ([]byte, error) {
// TODO(jc): implement security padding and stream prioritization.
if h.Header.Flags.Has(FlagHeadersPadded) {
return nil, fmt.Errorf("headers: padding not implemented")
} else if h.Header.Flags.Has(FlagHeadersPriority) {
return nil, fmt.Errorf("headers: priority not implemented")
}
if h.EndStream {
hdr.Flags.Set(FlagHeadersEndStream)
}
if h.EndHeaders {
hdr.Flags.Set(FlagHeadersEndHeaders)
}
hdr.Type = TypeHeaders
hdr.Length = uint32(len(h.Block))
hdr.StreamID = h.StreamID
b := make([]byte, len(h.Block))
copy(b, h.Block)
return b, nil
}
// UnmarshalFrame unmarshals Headers from the wire format.
func (h *Headers) UnmarshalFrame(hdr *Header, b []byte) error {
// TODO(jc): implement security padding and stream prioritization from
// initial Headers frame.
if hdr.Flags.Has(FlagHeadersPadded) {
return fmt.Errorf("headers: padding not implemented")
} else if hdr.Flags.Has(FlagHeadersPriority) {
return fmt.Errorf("headers: priority not implemented")
}
if hdr.Flags.Has(FlagHeadersEndStream) {
h.EndStream = true
}
if hdr.Flags.Has(FlagHeadersEndHeaders) {
h.EndHeaders = true
}
h.Header = *hdr
h.Block = make([]byte, len(b))
copy(h.Block, b)
return nil
}
const (
// FlagDataEndStream indicates a Data frame also terminates its Stream.
// RFC 7540 Section 6.1
FlagDataEndStream = Flags(0x01)
// FlagDataPadded indicates a Data frame contains trailing padding.
// RFC 7540 Section 6.1
FlagDataPadded = Flags(0x08)
)
// Data is used to carry request or response data between peers.
//
// NOTE(jc): Padding is not currently implemented.
//
// RFC 7540 Section 6.1
type Data struct {
Header
// EndStream indicates this Data frame terminates the Stream.
EndStream bool
// Application data from peer.
Data []byte
}
// MarshalFrame marshals Data into the wire format.
func (d *Data) MarshalFrame(hdr *Header) ([]byte, error) {
// TODO(jc): implement security padding.
if d.Header.Flags.Has(FlagDataPadded) {
return nil, fmt.Errorf("data: padding not implemented")
}
if d.EndStream {
hdr.Flags.Set(FlagDataEndStream)
}
hdr.Type = TypeData
hdr.Length = uint32(len(d.Data))
hdr.StreamID = d.Header.StreamID
b := make([]byte, len(d.Data))
copy(b, d.Data)
return b, nil
}
// UnmarshalFrame unmarshals Data from the wire format.
func (d *Data) UnmarshalFrame(hdr *Header, b []byte) error {
// TODO(jc): implement security padding.
if hdr.Flags.Has(FlagDataPadded) {
return fmt.Errorf("data: padding not implemented")
}
if hdr.Flags.Has(FlagDataEndStream) {
d.EndStream = true
}
d.Header = *hdr
d.Data = make([]byte, len(b))
copy(d.Data, b)
return nil
}
const (
// FlagPingAck indicates a Ping Frame is an acknowledgement of received
// Ping Frame.
// RFC 7540 Section 6.7
FlagPingAck = Flags(0x1)
)
func uint24(b []byte) uint32 {
_ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])
}
func putUint24(b []byte, v uint32) {
_ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
b[0] = byte(v >> 16)
b[1] = byte(v >> 8)
b[2] = byte(v)
}
func uint31(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return (uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) & (1<<31 - 1)
}
func putUint31(b []byte, v uint32) {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
|
package smartcontractdatastore
import (
"fmt"
"github.com/multivactech/MultiVAC/model/chaincfg/multivacaddress"
"github.com/multivactech/MultiVAC/model/merkle"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/model/wire"
"github.com/multivactech/MultiVAC/processor/shared/state"
)
// SContractDataStore used to store smart-contract data
type SContractDataStore struct {
shardIdx shard.Index
// key is the public key hash of contract address, value is a map of shard smartContractInfo
dataStore map[multivacaddress.PublicKeyHash]*wire.SmartContractInfo
}
// AddSmartContractInfo adds infos to data store
func (sCDS *SContractDataStore) AddSmartContractInfo(sCInfo *wire.SmartContractInfo, root *merkle.MerkleHash) error {
if err := sCInfo.Verify(root); err != nil {
return err
}
contractAddr := sCInfo.SmartContract.ContractAddr
shardIdx := sCInfo.ShardIdx
if shardIdx != sCDS.shardIdx {
return fmt.Errorf("the shard index of smartContractInfo is wrong, want: %v, get: %v",
sCDS.shardIdx, sCInfo.ShardIdx)
}
pKHash, err := contractAddr.GetPublicKeyHash(multivacaddress.SmartContractAddress)
if err != nil {
return err
}
sCDS.dataStore[*pKHash] = sCInfo
return nil
}
// GetSmartContractInfo 返回指定智能合约指定分片编号的SmartContractInfo
func (sCDS *SContractDataStore) GetSmartContractInfo(pKHash multivacaddress.PublicKeyHash) *wire.SmartContractInfo {
return sCDS.dataStore[pKHash]
}
// RefreshDataStore updates the smartContractDataStore according to the given StateUpdate and Actions.
func (sCDS *SContractDataStore) RefreshDataStore(update *state.Update, actions []*wire.UpdateAction) {
// Update proof without update action
if len(actions) == 0 {
for addrHash, sCInfo := range sCDS.dataStore {
proofs := []merkle.MerklePath{*sCInfo.CodeOutProof, *sCInfo.ShardInitOutProof}
newProofs, err := update.UpdateProofs(proofs)
if err != nil {
log.Errorf("failed to update the proof of codeOut and shardInitOut, err: %v", err)
delete(sCDS.dataStore, addrHash)
continue
}
sCInfo.CodeOutProof = &newProofs[0]
sCInfo.ShardInitOutProof = &newProofs[1]
sCDS.dataStore[addrHash] = sCInfo
}
return
}
// Update proof with update action
for _, action := range actions {
pKHash, err := action.OriginOut.ContractAddress.GetPublicKeyHash(multivacaddress.SmartContractAddress)
if err != nil {
log.Errorf("smartContractDataStore failed to GetPublicKeyHash, err: %v", err)
continue
}
if action.OriginOut.Shard != sCDS.shardIdx {
log.Errorf("the shardIdx of smartContractDataStore and action.OriginOut.Shard are not equal")
continue
}
if !action.OriginOut.IsSmartContractShardInitOut() {
log.Errorf("the action.OriginOut is not smartContractShardInitOut")
continue
}
sCInfo, ok := sCDS.dataStore[*pKHash]
if ok {
sCInfo.ShardInitOut = action.NewOut
}
newCodeProof, err := update.UpdateProofs([]merkle.MerklePath{*sCInfo.CodeOutProof})
if err != nil {
log.Errorf("fail to update proofs of code out, err: %v", err)
delete(sCDS.dataStore, *pKHash)
continue
}
sCInfo.CodeOutProof = &newCodeProof[0]
newShardOutProof, err := update.GetShardDataOutProof(action.OriginOut, action.NewOut)
if err != nil {
log.Errorf("fail to update proofs of shard out, err: %v", err)
delete(sCDS.dataStore, *pKHash)
continue
}
sCInfo.ShardInitOutProof = newShardOutProof
sCDS.dataStore[*pKHash] = sCInfo
}
}
// Reset sets all smartContractInfo's shard data and code data in the smartContractDataStore into null info.
func (sCDS *SContractDataStore) Reset() {
sCDS.dataStore = make(map[multivacaddress.PublicKeyHash]*wire.SmartContractInfo)
}
|
package strategies
import (
"github.com/matang28/reshape/reshape"
"github.com/matang28/reshape/reshape/sinks"
"github.com/stretchr/testify/assert"
"testing"
)
func TestDirectStrategy_Solve_HappyCase(t *testing.T) {
strg := NewDirectStrategy()
src := make(chan interface{})
sink := sinks.NewArraySink()
go strg.Solve(src, nil, []interface{}{plusOneTrans, plusOneTrans, dropEvens, sink})
predefinedSource(src, -1)
tick()
assert.EqualValues(t, []interface{}{1}, sink.Get())
predefinedSource(src, 1, 2, 3, 4, 5)
tick()
assert.EqualValues(t, []interface{}{1, 3, 5, 7}, sink.Get())
predefinedSource(src, 5, 11, 10)
tick()
assert.EqualValues(t, []interface{}{1, 3, 5, 7, 7, 13}, sink.Get())
close(src)
}
func TestDirectStrategy_Solve_BadTransformation(t *testing.T) {
strg := NewDirectStrategy()
src := make(chan interface{})
sink := sinks.NewArraySink()
errors := make(chan error)
go strg.Solve(src, errors, []interface{}{plusOneTrans, plusOneTrans, badTrans, sink})
predefinedSource(src, 1, 2, 3, 4)
tick()
assert.Nil(t, sink.Get())
err := <-errors
err = <-errors
err = <-errors
err = <-errors
_, ok := err.(*reshape.TransformationError)
assert.True(t, ok)
}
func TestDirectStrategy_Solve_BadSink(t *testing.T) {
strg := NewDirectStrategy()
src := make(chan interface{})
sink := &badSink{}
errors := make(chan error)
go strg.Solve(src, errors, []interface{}{plusOneTrans, plusOneTrans, sink})
predefinedSource(src, 1, 2, 3, 4)
tick()
err := <-errors
err = <-errors
err = <-errors
err = <-errors
_, ok := err.(*reshape.SinkError)
assert.True(t, ok)
}
func TestDirectStrategy_Solve_UnrecognizedHandler(t *testing.T) {
strg := NewDirectStrategy()
src := make(chan interface{})
errors := make(chan error)
go strg.Solve(src, errors, []interface{}{plusOneTrans, plusOneTrans, 10})
predefinedSource(src, 1, 2, 3, 4)
tick()
for i := 0; i < 3; i++ {
err := <-errors
_, ok := err.(*reshape.UnrecognizedHandlerError)
assert.True(t, ok)
}
}
|
package tests
import (
"testing"
)
/**
* [64] Minimum Path Sum
*
* Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.
*
* Note: You can only move either down or right at any point in time.
*
* Example:
*
*
* Input:
* [
* [1,3,1],
* [1,5,1],
* [4,2,1]
* ]
* Output: 7
* Explanation: Because the path 1→3→1→1→1 minimizes the sum.
*
*
*/
func TestMinimumPathSum(t *testing.T) {
var cases = []struct {
input [][]int
output int
}{
{
input: [][]int{{1, 3, 1}, {1, 5, 1}, {4, 2, 1}},
output: 7,
},
{
input: [][]int{{1, 2, 5}, {3, 2, 1}},
output: 6,
},
}
for _, c := range cases {
x := minPathSum(c.input)
if x != c.output {
t.Fail()
}
}
}
// submission codes start here
func minPathSum(grid [][]int) int {
m := len(grid)
if m == 0 {
return 0
}
n := len(grid[0])
if n == 0 {
return 0
}
l := m * n
paths := make([]int, l)
paths[0] = grid[0][0]
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if i != 0 || j != 0 {
if i == 0 {
paths[i*n+j] = paths[i*n+j-1] + grid[i][j]
} else if j == 0 {
paths[i*n+j] = paths[n*(i-1)+j] + grid[i][j]
} else {
paths[i*n+j] = min(paths[i*n+j-1], paths[n*(i-1)+j]) + grid[i][j]
}
}
}
}
return paths[l-1]
}
// submission codes end
|
package consumer
import (
"fmt"
"hash/crc32"
"os"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
"github.com/couchbase/eventing/common"
"github.com/couchbase/eventing/dcp"
mcd "github.com/couchbase/eventing/dcp/transport"
"github.com/couchbase/eventing/dcp/transport/client"
"github.com/couchbase/eventing/logging"
"github.com/couchbase/eventing/suptree"
"github.com/couchbase/eventing/timer_transfer"
"github.com/couchbase/eventing/util"
"github.com/couchbase/plasma"
)
// NewConsumer called by producer to create consumer handle
func NewConsumer(streamBoundary common.DcpStreamBoundary, cleanupTimers, enableRecursiveMutation bool,
executionTimeout, index, lcbInstCapacity, skipTimerThreshold, sockWriteBatchSize, timerProcessingPoolSize int,
cppWorkerThrCount, vbOwnershipGiveUpRoutineCount, vbOwnershipTakeoverRoutineCount int,
bucket, eventingAdminPort, eventingDir, logLevel, tcpPort, uuid string,
eventingNodeUUIDs []string, vbnos []uint16, app *common.AppConfig,
p common.EventingProducer, s common.EventingSuperSup, vbPlasmaStore *plasma.Plasma,
socketTimeout time.Duration) *Consumer {
var b *couchbase.Bucket
consumer := &Consumer{
app: app,
aggDCPFeed: make(chan *memcached.DcpEvent, dcpGenChanSize),
bucket: bucket,
cbBucket: b,
checkpointInterval: checkpointInterval,
cleanupTimers: cleanupTimers,
clusterStateChangeNotifCh: make(chan struct{}, ClusterChangeNotifChBufSize),
cppThrPartitionMap: make(map[int][]uint16),
cppWorkerThrCount: cppWorkerThrCount,
crcTable: crc32.MakeTable(crc32.Castagnoli),
dcpFeedCancelChs: make([]chan struct{}, 0),
dcpFeedVbMap: make(map[*couchbase.DcpFeed][]uint16),
dcpStreamBoundary: streamBoundary,
debuggerStarted: false,
docTimerEntryCh: make(chan *byTimerEntry, timerChanSize),
enableRecursiveMutation: enableRecursiveMutation,
eventingAdminPort: eventingAdminPort,
eventingDir: eventingDir,
eventingNodeUUIDs: eventingNodeUUIDs,
executionTimeout: executionTimeout,
gracefulShutdownChan: make(chan struct{}, 1),
hostDcpFeedRWMutex: &sync.RWMutex{},
kvHostDcpFeedMap: make(map[string]*couchbase.DcpFeed),
lcbInstCapacity: lcbInstCapacity,
logLevel: logLevel,
nonDocTimerEntryCh: make(chan string, timerChanSize),
nonDocTimerStopCh: make(chan struct{}, 1),
opsTimestamp: time.Now(),
persistAllTicker: time.NewTicker(persistAllTickInterval),
plasmaReaderRWMutex: &sync.RWMutex{},
plasmaStoreRWMutex: &sync.RWMutex{},
producer: p,
restartVbDcpStreamTicker: time.NewTicker(restartVbDcpStreamTickInterval),
sendMsgCounter: 0,
sendMsgToDebugger: false,
signalBootstrapFinishCh: make(chan struct{}, 1),
signalConnectedCh: make(chan struct{}, 1),
signalDebugBlobDebugStopCh: make(chan struct{}, 1),
signalInstBlobCasOpFinishCh: make(chan struct{}, 1),
signalSettingsChangeCh: make(chan struct{}, 1),
signalPlasmaClosedCh: make(chan uint16, numVbuckets),
signalPlasmaTransferFinishCh: make(chan *plasmaStoreMsg, numVbuckets),
signalProcessTimerPlasmaCloseAckCh: make(chan uint16, numVbuckets),
signalStartDebuggerCh: make(chan struct{}, 1),
signalStopDebuggerCh: make(chan struct{}, 1),
signalStopDebuggerRoutineCh: make(chan struct{}, 1),
signalStoreTimerPlasmaCloseAckCh: make(chan uint16, numVbuckets),
signalStoreTimerPlasmaCloseCh: make(chan uint16, numVbuckets),
signalUpdateDebuggerInstBlobCh: make(chan struct{}, 1),
skipTimerThreshold: skipTimerThreshold,
socketTimeout: socketTimeout,
socketWriteBatchSize: sockWriteBatchSize,
statsTicker: time.NewTicker(statsTickInterval),
stopControlRoutineCh: make(chan struct{}),
stopPlasmaPersistCh: make(chan struct{}, 1),
stopVbOwnerGiveupCh: make(chan struct{}, 1),
stopVbOwnerTakeoverCh: make(chan struct{}, 1),
superSup: s,
tcpPort: tcpPort,
timerRWMutex: &sync.RWMutex{},
timerProcessingTickInterval: timerProcessingTickInterval,
timerProcessingWorkerCount: timerProcessingPoolSize,
timerProcessingVbsWorkerMap: make(map[uint16]*timerProcessingWorker),
timerProcessingRunningWorkers: make([]*timerProcessingWorker, 0),
timerProcessingWorkerSignalCh: make(map[*timerProcessingWorker]chan struct{}),
uuid: uuid,
vbDcpFeedMap: make(map[uint16]*couchbase.DcpFeed),
vbFlogChan: make(chan *vbFlogEntry),
vbnos: vbnos,
vbOwnershipGiveUpRoutineCount: vbOwnershipGiveUpRoutineCount,
vbOwnershipTakeoverRoutineCount: vbOwnershipTakeoverRoutineCount,
vbPlasmaStore: vbPlasmaStore,
vbPlasmaReader: make(map[uint16]*plasma.Writer),
vbPlasmaWriter: make(map[uint16]*plasma.Writer),
vbProcessingStats: newVbProcessingStats(app.AppName),
vbsRemainingToGiveUp: make([]uint16, 0),
vbsRemainingToOwn: make([]uint16, 0),
vbsRemainingToRestream: make([]uint16, 0),
workerName: fmt.Sprintf("worker_%s_%d", app.AppName, index),
writeBatchSeqnoMap: make(map[uint16]uint64),
}
return consumer
}
// Serve acts as init routine for consumer handle
func (c *Consumer) Serve() {
c.stopConsumerCh = make(chan struct{}, 1)
c.stopCheckpointingCh = make(chan struct{}, 1)
c.dcpMessagesProcessed = make(map[mcd.CommandCode]uint64)
c.v8WorkerMessagesProcessed = make(map[string]uint64)
c.consumerSup = suptree.NewSimple(c.workerName)
go c.consumerSup.ServeBackground()
c.timerTransferHandle = timer.NewTimerTransfer(c, c.app.AppName, c.eventingDir,
c.HostPortAddr(), c.workerName)
c.timerTransferSupToken = c.consumerSup.Add(c.timerTransferHandle)
c.cppWorkerThrPartitionMap()
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), commonConnectBucketOpCallback, c, &c.cbBucket)
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), gocbConnectBucketCallback, c)
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), gocbConnectMetaBucketCallback, c)
var flogs couchbase.FailoverLog
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), getFailoverLogOpCallback, c, &flogs, dcpConfig)
sort.Sort(util.Uint16Slice(c.vbnos))
logging.Infof("V8CR[%s:%s:%s:%d] vbnos len: %d",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), len(c.vbnos))
util.Retry(util.NewFixedBackoff(clusterOpRetryInterval), getEventingNodeAddrOpCallback, c)
logging.Infof("V8CR[%s:%s:%s:%d] Spawning worker corresponding to producer, node addr: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), c.HostPortAddr())
var feedName couchbase.DcpFeedName
kvHostPorts := c.producer.KvHostPorts()
for _, kvHostPort := range kvHostPorts {
feedName = couchbase.DcpFeedName("eventing:" + c.HostPortAddr() + "_" + kvHostPort + "_" + c.workerName)
c.hostDcpFeedRWMutex.Lock()
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), startDCPFeedOpCallback, c, feedName, dcpConfig, kvHostPort)
cancelCh := make(chan struct{}, 1)
c.dcpFeedCancelChs = append(c.dcpFeedCancelChs, cancelCh)
c.addToAggChan(c.kvHostDcpFeedMap[kvHostPort], cancelCh)
c.hostDcpFeedRWMutex.Unlock()
}
c.client = newClient(c, c.app.AppName, c.tcpPort, c.workerName, c.eventingAdminPort)
c.clientSupToken = c.consumerSup.Add(c.client)
c.startDcp(dcpConfig, flogs)
// Initialises timer processing worker instances
c.vbTimerProcessingWorkerAssign(true)
// go c.plasmaPersistAll()
// doc_id timer events
for _, r := range c.timerProcessingRunningWorkers {
go r.processTimerEvents()
}
// non doc_id timer events
go c.processNonDocTimerEvents()
// V8 Debugger polling routine
go c.pollForDebuggerStart()
c.signalBootstrapFinishCh <- struct{}{}
c.controlRoutine()
logging.Debugf("V8CR[%s:%s:%s:%d] Exiting consumer init routine",
c.app.AppName, c.workerName, c.tcpPort, c.Pid())
}
// HandleV8Worker sets up CPP V8 worker post it's bootstrap
func (c *Consumer) HandleV8Worker() {
<-c.signalConnectedCh
logging.SetLogLevel(util.GetLogLevel(c.logLevel))
c.sendLogLevel(c.logLevel, false)
c.sendWorkerThrMap(nil, false)
c.sendWorkerThrCount(0, false)
util.Retry(util.NewFixedBackoff(clusterOpRetryInterval), getEventingNodeAddrOpCallback, c)
var currHost string
h := c.HostPortAddr()
if h != "" {
currHost = strings.Split(h, ":")[0]
} else {
currHost = "127.0.0.1"
}
var user, password string
util.Retry(util.NewFixedBackoff(time.Second), getMemcachedServiceAuth, c.producer.KvHostPorts()[0], &user, &password)
payload := makeV8InitPayload(c.app.AppName, currHost, c.eventingDir, c.eventingAdminPort,
c.producer.KvHostPorts()[0], c.producer.CfgData(), user, password, c.lcbInstCapacity,
c.executionTimeout, c.enableRecursiveMutation)
logging.Debugf("V8CR[%s:%s:%s:%d] V8 worker init enable_recursive_mutation flag: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), c.enableRecursiveMutation)
c.sendInitV8Worker(payload, false)
c.sendLoadV8Worker(c.app.AppCode, false)
c.sendGetSourceMap(false)
c.sendGetHandlerCode(false)
go c.doLastSeqNoCheckpoint()
go c.processEvents()
}
// Stop acts terminate routine for consumer handle
func (c *Consumer) Stop() {
defer func() {
if r := recover(); r != nil {
trace := debug.Stack()
logging.Errorf("V8CR[%s:%s:%s:%d] Consumer stop routine, panic and recover, %v stack trace: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), r, string(trace))
}
}()
logging.Infof("V8CR[%s:%s:%s:%d] Gracefully shutting down consumer routine",
c.app.AppName, c.workerName, c.tcpPort, c.Pid())
c.cbBucket.Close()
c.gocbBucket.Close()
c.gocbMetaBucket.Close()
c.producer.CleanupDeadConsumer(c)
c.consumerSup.Remove(c.timerTransferSupToken)
c.consumerSup.Remove(c.clientSupToken)
c.consumerSup.Stop()
c.checkpointTicker.Stop()
c.restartVbDcpStreamTicker.Stop()
c.statsTicker.Stop()
c.persistAllTicker.Stop()
c.conn.Close()
if c.debugClient != nil {
c.debugConn.Close()
c.debugListener.Close()
}
for k := range c.timerProcessingWorkerSignalCh {
k.stopCh <- struct{}{}
}
c.nonDocTimerStopCh <- struct{}{}
c.stopControlRoutineCh <- struct{}{}
c.stopPlasmaPersistCh <- struct{}{}
c.signalStopDebuggerRoutineCh <- struct{}{}
for _, cancelCh := range c.dcpFeedCancelChs {
cancelCh <- struct{}{}
}
for _, dcpFeed := range c.kvHostDcpFeedMap {
dcpFeed.Close()
}
close(c.aggDCPFeed)
}
// Implement fmt.Stringer interface to allow better debugging
// if C++ V8 worker crashes
func (c *Consumer) String() string {
countMsg, _, _ := util.SprintDCPCounts(c.dcpMessagesProcessed)
return fmt.Sprintf("consumer => app: %s name: %v tcpPort: %s ospid: %d"+
" dcpEventProcessed: %s v8EventProcessed: %s", c.app.AppName, c.ConsumerName(),
c.tcpPort, c.Pid(), countMsg, util.SprintV8Counts(c.v8WorkerMessagesProcessed))
}
// Pid returns the process id of CPP V8 worker
func (c *Consumer) Pid() int {
pid, ok := c.osPid.Load().(int)
if ok {
return pid
}
return 0
}
// NotifyClusterChange is called by producer handle to signify each
// consumer instance about StartTopologyChange rpc call from cbauth service.Manager
func (c *Consumer) NotifyClusterChange() {
logging.Infof("V8CR[%s:%s:%s:%d] Got notification about cluster state change",
c.app.AppName, c.ConsumerName(), c.tcpPort, c.Pid())
if !c.isRebalanceOngoing {
c.clusterStateChangeNotifCh <- struct{}{}
} else {
logging.Infof("V8CR[%s:%s:%s:%d] Skipping cluster state change notification to control routine because another rebalance is in ongoing",
c.app.AppName, c.ConsumerName(), c.tcpPort, c.Pid())
}
}
// NotifyRebalanceStop is called by producer to signal stopping of
// rebalance operation
func (c *Consumer) NotifyRebalanceStop() {
logging.Infof("V8CR[%s:%s:%s:%d] Got notification about rebalance stop",
c.app.AppName, c.workerName, c.tcpPort, c.Pid())
for i := 0; i < c.vbOwnershipGiveUpRoutineCount; i++ {
c.stopVbOwnerGiveupCh <- struct{}{}
}
for i := 0; i < c.vbOwnershipTakeoverRoutineCount; i++ {
c.stopVbOwnerTakeoverCh <- struct{}{}
}
}
// NotifySettingsChange signals consumer instance of settings update
func (c *Consumer) NotifySettingsChange() {
logging.Infof("V8CR[%s:%s:%s:%d] Got notification about application settings update",
c.app.AppName, c.workerName, c.tcpPort, c.Pid())
c.signalSettingsChangeCh <- struct{}{}
}
// SignalPlasmaClosed is used by producer instance to signal message from SuperSupervisor
// to under consumer about Closed plasma store instance
func (c *Consumer) SignalPlasmaClosed(vb uint16) {
logging.Infof("V8CR[%s:%s:%s:%d] vb: %v got signal from parent producer about plasma store instance close",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), vb)
c.signalPlasmaClosedCh <- vb
}
// SignalPlasmaTransferFinish is called by parent producer instance to signal consumer
// about timer data transfer completion during rebalance
func (c *Consumer) SignalPlasmaTransferFinish(vb uint16, store *plasma.Plasma) {
defer func() {
if r := recover(); r != nil {
trace := debug.Stack()
logging.Errorf("V8CR[%s:%s:%s:%d] vb: %v SignalPlasmaTransferFinish: panic and recover, %v, stack trace: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), vb, r, string(trace))
}
}()
logging.Infof("V8CR[%s:%s:%s:%d] vb: %v got signal from parent producer about plasma timer data transfer finish",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), vb)
c.signalPlasmaTransferFinishCh <- &plasmaStoreMsg{vb, store}
}
// SignalStopDebugger signal C++ V8 consumer to stop Debugger Agent
func (c *Consumer) SignalStopDebugger() {
logging.Infof("V8CR[%s:%s:%s:%d] Got signal to stop V8 Debugger Agent",
c.app.AppName, c.workerName, c.tcpPort, c.Pid())
c.signalStopDebuggerCh <- struct{}{}
c.stopDebuggerServer()
// Reset the debugger instance blob
dInstAddrKey := fmt.Sprintf("%s::%s", c.app.AppName, debuggerInstanceAddr)
dInstAddrBlob := &common.DebuggerInstanceAddrBlob{}
util.Retry(util.NewFixedBackoff(bucketOpRetryInterval), setOpCallback, c, dInstAddrKey, dInstAddrBlob)
cwd, err := os.Getwd()
if err != nil {
logging.Infof("V8CR[%s:%s:%s:%d] Failed to get current working dir, err: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), err)
return
}
frontendURLFilePath := fmt.Sprintf("%s/%s_frontend.url", cwd, c.app.AppName)
err = os.Remove(frontendURLFilePath)
if err != nil {
logging.Infof("V8CR[%s:%s:%s:%d] Failed to remove frontend.url file, err: %v",
c.app.AppName, c.workerName, c.tcpPort, c.Pid(), err)
}
}
|
package requestid
import (
"context"
"net/http"
httputil "github.com/ahmedalhulaibi/httpfw"
"github.com/google/uuid"
)
const ContextKey = "request_id"
type requestIDExtractor interface {
GetRequestID(r *http.Request) string
}
type RequestIDMiddleware struct {
h http.Handler
ridex requestIDExtractor
}
func New(ridex requestIDExtractor) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return RequestIDMiddleware{
h: h,
ridex: ridex,
}
}
}
func (ridmw RequestIDMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if requestID := ridmw.ridex.GetRequestID(r); requestID != "" {
r = r.WithContext(context.WithValue(r.Context(), ContextKey, requestID))
}
ridmw.h.ServeHTTP(w, r)
}
type RequestIDGenerator struct{}
func NewRequestIDGenerator() RequestIDGenerator {
return RequestIDGenerator{}
}
func (r RequestIDGenerator) GetRequestID(_ *http.Request) string {
return uuid.NewString()
}
type RequestIDExtractor struct {
header string
}
func NewRequestIDExtractor(header string) RequestIDExtractor {
return RequestIDExtractor{header: header}
}
func (ridex RequestIDExtractor) GetRequestID(r *http.Request) string {
return httputil.GetStringFromHeader(r, ridex.header)
}
type RequestIDChain struct {
extractors []requestIDExtractor
}
func NewRequestIDChain(extractors ...requestIDExtractor) RequestIDChain {
return RequestIDChain{
extractors: extractors,
}
}
func (ridex RequestIDChain) GetRequestID(r *http.Request) string {
for _, extractor := range ridex.extractors {
if reqID := extractor.GetRequestID(r); reqID != "" {
return reqID
}
}
return ""
}
|
package goserver
import (
"fmt"
"github.com/jmoiron/sqlx"
)
// UserRepoSqlite3 fulfills UserRepo using a Sqlite3 database
type UserRepoSqlite3 struct {
db *sqlx.DB
insertStmt *sqlx.NamedStmt
updatePasswdStmt *sqlx.NamedStmt
getByIDStmt *sqlx.NamedStmt
getByUsernameStmt *sqlx.NamedStmt
}
const (
userInsert = `
INSERT INTO users (username, password, salt)
VALUES (:username, :password, :salt)
`
userUpdatePasswd = `
UPDATE users SET
password=:password,
salt=:salt
WHERE id=:id
`
userSelectBase = `SELECT * FROM users`
userGetByID = userSelectBase + ` WHERE id=:id LIMIT 1`
userGetByUsername = userSelectBase + ` WHERE username=:username LIMIT 1`
)
// NewUserRepoSqlite3 prepares a repo given a sqlite db handle
func NewUserRepoSqlite3(db *sqlx.DB) (*UserRepoSqlite3, error) {
var err error
repo := &UserRepoSqlite3{db: db}
repo.insertStmt, err = db.PrepareNamed(userInsert)
if err != nil {
return repo, fmt.Errorf("Failed to prepare statement `%s`: %v", userInsert, err)
}
repo.updatePasswdStmt, err = db.PrepareNamed(userUpdatePasswd)
if err != nil {
return repo, fmt.Errorf("Failed to prepare statement `%s`: %v", userUpdatePasswd, err)
}
repo.getByIDStmt, err = db.PrepareNamed(userGetByID)
if err != nil {
return repo, fmt.Errorf("Failed to prepare statement `%s`: %v", userGetByID, err)
}
repo.getByUsernameStmt, err = db.PrepareNamed(userGetByUsername)
if err != nil {
return repo, fmt.Errorf("Failed to prepare statement `%s`: %v", userGetByUsername, err)
}
return repo, err
}
// CreateUser tries to insert a user and fills in the ID of the provided user
func (repo *UserRepoSqlite3) CreateUser(s *User) error {
res, err := repo.insertStmt.Exec(s)
if err == nil {
var id int64
id, err = res.LastInsertId()
if err == nil {
s.ID = int(id)
}
}
return err
}
// UpdateUserPasswd attempts to update the given user's corresponding password
// and salt
func (repo *UserRepoSqlite3) UpdateUserPasswd(s *User) (err error) {
_, err = repo.updatePasswdStmt.Exec(s)
return
}
// GetUserByID fills the passed User struct's fields using the ID field, which
// must be filled in
func (repo *UserRepoSqlite3) GetUserByID(s *User) (err error) {
err = repo.getByIDStmt.Get(s, s)
return
}
// GetUserByUsername fills the passed User struct's fields using the username field, which
// must be filled in
func (repo *UserRepoSqlite3) GetUserByUsername(s *User) (err error) {
err = repo.getByUsernameStmt.Get(s, s)
return
}
|
package main
// Leetcode 292. (easy)
func canWinNim(n int) bool {
return (n % 4) != 0
}
|
package handlers
import (
"net/http"
"path"
"plugin"
"github.com/layer5io/meshery/models"
)
var (
extendedEndpoints = make(map[string]*models.Router)
)
func (h *Handler) ExtensionsEndpointHandler(w http.ResponseWriter, req *http.Request, prefObj *models.Preference, user *models.User, provider models.Provider) {
if val, ok := extendedEndpoints[req.URL.Path]; ok {
val.HTTPHandler.ServeHTTP(w, req)
return
}
http.Error(w, "Invalid endpoint", http.StatusInternalServerError)
}
func (h *Handler) LoadExtensionFromPackage(w http.ResponseWriter, req *http.Request, provider models.Provider) error {
packagePath := ""
if len(provider.GetProviderProperties().Extensions.GraphQL) > 0 {
packagePath = provider.GetProviderProperties().Extensions.GraphQL[0].Path
}
plug, err := plugin.Open(path.Join(provider.PackageLocation(), packagePath))
if err != nil {
return ErrPluginOpen(err)
}
// Run function
symRun, err := plug.Lookup("Run")
if err != nil {
return ErrPluginLookup(err)
}
runFunction := symRun.(func(*models.ExtensionInput) (*models.ExtensionOutput, error))
output, err := runFunction(&models.ExtensionInput{
DBHandler: provider.GetGenericPersister(),
MeshSyncChannel: h.meshsyncChannel,
BrokerConn: h.brokerConn,
Logger: h.log,
})
if err != nil {
return ErrPluginRun(err)
}
// Add http endpoint to serve
if output.Router != nil {
extendedEndpoints[output.Router.Path] = output.Router
}
return nil
}
|
package model
import (
"reflect"
"testing"
)
type CompareTest struct {
A reflect.Value
B reflect.Value
E bool
}
func TestConstant_EqualsTo(t *testing.T) {
tl := make([]*CompareTest, 0)
tl = append(tl, &CompareTest{
A: reflect.ValueOf(12),
B: reflect.ValueOf(12),
E: true,
}, &CompareTest{
A: reflect.ValueOf(12.12),
B: reflect.ValueOf(12.12),
E: true,
}, &CompareTest{
A: reflect.ValueOf("12"),
B: reflect.ValueOf("12"),
E: true,
}, &CompareTest{
A: reflect.ValueOf(false),
B: reflect.ValueOf(false),
E: true,
}, &CompareTest{
A: reflect.ValueOf(byte(12)),
B: reflect.ValueOf(byte(12)),
E: true,
}, &CompareTest{
A: reflect.ValueOf(uint(12)),
B: reflect.ValueOf(uint(12)),
E: true,
}, &CompareTest{
A: reflect.ValueOf(12),
B: reflect.ValueOf(12.0),
E: false,
}, &CompareTest{
A: reflect.ValueOf(12),
B: reflect.ValueOf("12"),
E: false,
}, &CompareTest{
A: reflect.ValueOf(12),
B: reflect.ValueOf(uint(12)),
E: false,
}, &CompareTest{
A: reflect.ValueOf(nil),
B: reflect.ValueOf(uint(12)),
E: false,
}, &CompareTest{
A: reflect.ValueOf(nil),
B: reflect.ValueOf(nil),
E: true,
})
for i, tls := range tl {
a := &Constant{ConstantValue: tls.A}
b := &Constant{ConstantValue: tls.B}
if a.EqualsTo(b) != tls.E {
t.Errorf("#%d failed.", i)
t.Fail()
}
}
}
|
package router
import (
"github.com/1071496910/simple-http-router/lib/dispatcher"
"net/http"
"path/filepath"
"sync"
)
type Route struct {
route map[string]map[string]http.Handler
dps map[string]dispatcher.Dispatcher
mtx sync.Mutex
filters []filterFunc
}
type filterFunc func(rw http.ResponseWriter, r *http.Request) bool
func New() *Route {
routeMap := make(map[string]map[string]http.Handler)
dps := make(map[string]dispatcher.Dispatcher)
for _, method := range []string{"GET", "POST", "PUT", "DELETE", "HEAD"} {
routeMap[method] = make(map[string]http.Handler)
dps[method] = dispatcher.NewDispatcher()
}
return &Route{
route: routeMap,
dps: dps,
filters: make([]filterFunc, 0),
}
}
func (rt *Route) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
for _, filter := range rt.filters {
accept := filter(rw, r)
if !accept {
return
}
}
url := filepath.Join(r.URL.String())
location, err := rt.dps[r.Method].Dispatch(url)
if err != nil {
if err == dispatcher.ErrNoRoute {
http.NotFound(rw, r)
return
}
http.Error(rw, "unknow err", 503)
return
}
rt.route[r.Method][location].ServeHTTP(rw, r)
}
func (r *Route) Filter(fn filterFunc) {
r.filters = append(r.filters, fn)
}
func (r *Route) Head(path string, handlerFunc http.HandlerFunc) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.dps["HEAD"].Register(path)
r.route["HEAD"][filepath.Join(path)] = handlerFunc
return
}
//Handle for all method
func (r *Route) Handle(path string, handlerFunc http.HandlerFunc) {
r.Get(path, handlerFunc)
r.Post(path, handlerFunc)
r.Put(path, handlerFunc)
r.Delete(path, handlerFunc)
r.Head(path, handlerFunc)
}
func (r *Route) Get(path string, handlerFunc http.HandlerFunc) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.dps["GET"].Register(path)
r.route["GET"][filepath.Join(path)] = handlerFunc
return
}
func (r *Route) Put(path string, handlerFunc http.HandlerFunc) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.dps["PUT"].Register(path)
r.route["PUT"][filepath.Join(path)] = handlerFunc
return
}
func (r *Route) Post(path string, handlerFunc http.HandlerFunc) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.dps["POST"].Register(path)
r.route["POST"][filepath.Join(path)] = handlerFunc
return
}
func (r *Route) Delete(path string, handlerFunc http.HandlerFunc) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.dps["DELETE"].Register(path)
r.route["DELETE"][filepath.Join(path)] = handlerFunc
return
}
|
package example1
import "errors"
type Opener interface {
Open(c *Connection) error
}
type Closer interface {
Close(c *Connection) error
}
type StateManager interface {
Opener
Closer
}
type Connection struct {
state StateManager
}
func (c *Connection) Open() error {
return c.state.Open(c)
}
func (c *Connection) Close() error {
return c.state.Close(c)
}
func (c *Connection) setState(state StateManager) {
c.state = state
}
type OpenState struct {}
type CloseState struct {}
func (o OpenState) Open(c *Connection) error {
return errors.New("connection is opened")
}
func (o OpenState) Close(c *Connection) error {
c.setState(CloseState{})
return nil
}
func (o CloseState) Open(c *Connection) error {
c.setState(OpenState{})
return nil
}
func (o CloseState) Close(c *Connection) error {
return errors.New("connection is opened")
}
|
package db
import (
"database/sql"
"fmt"
"os"
"github.com/joho/godotenv"
_ "github.com/lib/pq"
)
func loadDotEnv(key string) string {
err := godotenv.Load(".env")
if err != nil {
panic(error(err))
}
return os.Getenv(key)
}
// ConnSQL is a function to connect with database
func ConnSQL() *sql.DB {
host := loadDotEnv("HOST_PG")
port := loadDotEnv("PORT_PG")
user := loadDotEnv("USER_PG")
password := loadDotEnv("PASSWORD_PG")
dbname := loadDotEnv("DBNAME_PG")
psqlInfo := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", host, port, user, password, dbname)
db, err := sql.Open("postgres", psqlInfo)
if err != nil {
panic(error(err))
}
return db
}
|
package scan
import (
"database/sql"
"fmt"
"reflect"
)
// ErrOneRow is returned by Row scan when the query returns more than one row
var ErrOneRow = fmt.Errorf("sql/scan: expect exactly one row in result set")
// Readable provides a scannable interface
type Readable interface {
Scan(interface{}) error
}
// Scanner is the interface that wraps the
// three sql.Rows methods used for scanning.
type Scanner interface {
Next() bool
Scan(...interface{}) error
Columns() ([]string, error)
}
// Row scans one row to the given value. It fails if the rows holds more than 1 row.
func Row(scanner Scanner, src interface{}) error {
value, err := valueOf(src)
if err != nil {
return err
}
columns, err := scanner.Columns()
if err != nil {
return fmt.Errorf("sql/scan: failed getting column names: %v", err)
}
if !scanner.Next() {
return sql.ErrNoRows
}
allocator, err := NewAllocator(value.Type(), columns)
if err != nil {
return err
}
if expected, actual := len(columns), len(allocator.types); expected > actual {
return fmt.Errorf("sql/scan: columns do not match (%d > %d)", expected, actual)
}
values := allocator.Allocate()
if err := scanner.Scan(values...); err != nil {
return fmt.Errorf("sql/scan: failed scanning rows: %v", err)
}
next := allocator.Create(values)
allocator.Set(value, next, columns)
if scanner.Next() {
return ErrOneRow
}
return nil
}
// Rows scans the given ColumnScanner (basically, sql.Row or sql.Rows) into the given slice.
func Rows(scanner Scanner, src interface{}) error {
value, err := valueOf(src)
if err != nil {
return err
}
columns, err := scanner.Columns()
if err != nil {
return fmt.Errorf("sql/scan: failed getting column names: %v", err)
}
if kind := value.Kind(); kind != reflect.Slice {
return fmt.Errorf("sql/scan: invalid type %s. expected slice as an argument", kind)
}
allocator, err := NewAllocator(value.Type().Elem(), columns)
if err != nil {
return err
}
if expected, actual := len(columns), len(allocator.types); expected > actual {
return fmt.Errorf("sql/scan: columns do not match (%d > %d)", expected, actual)
}
var (
count = value.Len()
index = 0
)
for scanner.Next() {
values := allocator.Allocate()
if err := scanner.Scan(values...); err != nil {
return fmt.Errorf("sql/scan: failed scanning rows: %v", err)
}
switch {
case index < count:
allocator.Set(value.Index(index), allocator.Create(values), columns)
default:
value.Set(reflect.Append(value, allocator.Create(values)))
}
index++
}
return nil
}
|
package db
import (
"fmt"
"time"
"gin-use/configs"
"gorm.io/driver/postgres"
"github.com/pkg/errors"
"gorm.io/gorm"
)
var _ Repo = (*dbRepo)(nil)
type Repo interface {
i()
GetDbR() *gorm.DB
GetDbW() *gorm.DB
DbRClose() error
DbWClose() error
}
type dbRepo struct {
DbR *gorm.DB
DbW *gorm.DB
}
func New() (Repo, error) {
cfg := configs.Get().Pg
dbr, err := dbConnect(cfg.Read.User, cfg.Read.Pass, cfg.Read.Host, cfg.Read.Port, cfg.Read.Name)
if err != nil {
return nil, err
}
dbw, err := dbConnect(cfg.Write.User, cfg.Write.Pass,cfg.Write.Host, cfg.Write.Port, cfg.Write.Name)
if err != nil {
return nil, err
}
return &dbRepo{
DbR: dbr,
DbW: dbw,
}, nil
}
func (d *dbRepo) i() {}
func (d *dbRepo) GetDbR() *gorm.DB {
return d.DbR
}
func (d *dbRepo) GetDbW() *gorm.DB {
return d.DbW
}
func (d *dbRepo) DbRClose() error {
sqlDB, err := d.DbR.DB()
if err != nil {
return err
}
return sqlDB.Close()
}
func (d *dbRepo) DbWClose() error {
sqlDB, err := d.DbW.DB()
if err != nil {
return err
}
return sqlDB.Close()
}
func dbConnect(user, pass, host, port, dbName string) (*gorm.DB, error) {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable TimeZone=Asia/Shanghai",
host,
port,
user,
pass,
dbName)
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("[db connection failed] Database name: %s", dbName))
}
cfg := configs.Get().Pg.Base
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
// 设置连接池 用于设置最大打开的连接数,默认值为0表示不限制.设置最大的连接数,可以避免并发太高导致连接mysql出现too many connections的错误。
sqlDB.SetMaxOpenConns(cfg.MaxOpenConn)
// 设置最大连接数 用于设置闲置的连接数.设置闲置的连接数则当开启的一个连接使用完成后可以放在池里等候下一次使用。
sqlDB.SetMaxIdleConns(cfg.MaxIdleConn)
// 设置最大连接超时
sqlDB.SetConnMaxLifetime(time.Minute * cfg.ConnMaxLifeTime)
// 使用插件
// db.Use(&TracePlugin{})
return db, nil
}
|
/*
* openapi-ipify
*
* OpenAPI client for ipify, a simple public IP address API
*
* API version: 3.3.1-pre.0
* Contact: blah@cliffano.com
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
import (
"net/http"
"github.com/gin-gonic/gin"
)
// GetIp - Get your public IP address
func GetIp(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
|
/*
* Copyright (c) zrcoder 2019-2020. All rights reserved.
*/
package longest_increasing_path_in_a_matrix
import "math"
/*
给定一个整数矩阵,找出最长递增路径的长度。
对于每个单元格,你可以往上,下,左,右四个方向移动。 你不能在对角线方向上移动或移动到边界外(即不允许环绕)。
示例 1:
输入: nums =
[
[9,9,4],
[6,6,8],
[2,1,1]
]
输出: 4
解释: 最长递增路径为 [1, 2, 6, 9]。
示例 2:
输入: nums =
[
[3,4,5],
[3,2,6],
[2,2,1]
]
输出: 4
解释: 最长递增路径是 [3, 4, 5, 6]。注意不允许在对角线方向上移动。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
/*
朴素dfs
时间复杂度 :O(2^(m+n))。对每个有效递增路径均进行搜索。在最坏情况下,会有2^(m+n)次调用。例如:
1 2 3 . . . n
2 3 . . . n+1
3 . . . n+2
. .
. .
. .
m m+1 . . . n+m-1
空间复杂度 : O(mn)。 对于每次深度优先搜索,系统栈需要 O(h)空间,其中 h 为递归的最深深度。最坏情况下, O(h) = O(mn)。
作者:LeetCode
链接:https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix/solution/ju-zhen-zhong-de-zui-chang-di-zeng-lu-jing-by-leet/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
*/
func longestIncreasingPath1(matrix [][]int) int {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return 0
}
m, n := len(matrix), len(matrix[0])
dirs := [][]int{{0, 1}, {1, 0}, {0, -1}, {-1, 0}}
var dfs func(r, c int) int
dfs = func(r, c int) int {
result := 0
for _, d := range dirs {
x, y := r+d[0], c+d[1]
if x >= 0 && x < m && y >= 0 && y < n && matrix[x][y] > matrix[r][c] {
result = max(result, dfs(x, y))
}
}
return result + 1 // 一个元素自身的长度为1
}
result := 0
for r := 0; r < m; r++ {
for c := 0; c < n; c++ {
result = max(result, dfs(r, c))
}
}
return result
}
/*
可以用一个备忘录存储dfs函数里已经计算的结果,减少重复计算
时间复杂度降为O(mn);空间复杂度依然是O(mn)
*/
func longestIncreasingPath(matrix [][]int) int {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return 0
}
m, n := len(matrix), len(matrix[0])
dirs := [][]int{{0, 1}, {1, 0}, {0, -1}, {-1, 0}}
memo := make([][]int, m)
for i := range memo {
memo[i] = make([]int, n) // 可以初始化全部元素为1,但也不一定必须如此,dfs函数返回前会将结果加一
}
var dfs func(r, c int) int
dfs = func(r, c int) int {
if memo[r][c] != 0 {
return memo[r][c]
}
for _, d := range dirs {
x, y := r+d[0], c+d[1]
if x >= 0 && x < m && y >= 0 && y < n && matrix[x][y] > matrix[r][c] {
memo[r][c] = max(memo[r][c], dfs(x, y))
}
}
memo[r][c]++
return memo[r][c]
}
result := 0
for r := 0; r < m; r++ {
for c := 0; c < n; c++ {
result = max(result, dfs(r, c))
}
}
return result
}
func max(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package graphics
import (
"context"
"os"
"path/filepath"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/upstart"
"chromiumos/tast/shutil"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
// overlay defines the parameters for a HW overlay (a.k.a. DRM plane).
type overlay struct {
format string
size string
}
// overlaysTestParam defines the overlays structure for a test case.
type overlaysTestParam struct {
primaryFormats []string
overlay overlay
}
func init() {
testing.AddTest(&testing.Test{
Func: PlatformOverlays,
LacrosStatus: testing.LacrosVariantUnknown,
Desc: "Checks that certain configurations of primary and overlay planes are indeed supported",
Contacts: []string{
"mcasas@chromium.org",
"chromeos-gfx-compositor@google.com",
},
Attr: []string{"group:graphics", "graphics_perbuild"},
SoftwareDeps: []string{"video_overlays", "no_qemu"},
HardwareDeps: hwdep.D(hwdep.InternalDisplay()),
Timeout: time.Minute,
Params: []testing.Param{{
Name: "24bpp",
Val: overlaysTestParam{
primaryFormats: []string{"XR24", "XB24", "AR24", "AB24"},
},
}, {
Name: "30bpp",
Val: overlaysTestParam{
primaryFormats: []string{"AR30", "AB30", "XR30", "XB30"},
},
ExtraHardwareDeps: hwdep.D(hwdep.Supports30bppFramebuffer()),
}, {
Name: "24bpp_nv12_overlay",
Val: overlaysTestParam{
primaryFormats: []string{"XR24", "XB24", "AR24", "AB24"},
overlay: overlay{"NV12", "640x360"},
},
// rk3399 ("bob"/"gru" and "scarlet") technically support NV12 overlays
// but has only 1 DRM plane all in all, so cannot pass this test.
ExtraHardwareDeps: hwdep.D(hwdep.SupportsNV12Overlays(), hwdep.SkipOnPlatform("bob", "gru", "kevin")),
}, {
Name: "30bpp_nv12_overlay",
Val: overlaysTestParam{
primaryFormats: []string{"AR30", "AB30", "XR30", "XB30"},
overlay: overlay{"NV12", "640x360"},
},
ExtraHardwareDeps: hwdep.D(hwdep.Supports30bppFramebuffer(), hwdep.SupportsNV12Overlays()),
}, {
Name: "24bpp_p010_overlay",
Val: overlaysTestParam{
primaryFormats: []string{"XR24", "XB24", "AR24", "AB24"},
overlay: overlay{"P010", "640x360"},
},
ExtraHardwareDeps: hwdep.D(hwdep.Supports30bppFramebuffer(), hwdep.SupportsP010Overlays()),
}, {
Name: "30bpp_p010_overlay",
Val: overlaysTestParam{
primaryFormats: []string{"AR30", "AB30", "XR30", "XB30"},
overlay: overlay{"P010", "640x360"},
},
ExtraHardwareDeps: hwdep.D(hwdep.Supports30bppFramebuffer(), hwdep.SupportsP010Overlays()),
}},
Fixture: "gpuWatchHangs",
})
}
// PlatformOverlays runs plane_test binary test for a given format.
func PlatformOverlays(ctx context.Context, s *testing.State) {
if err := upstart.StopJob(ctx, "ui"); err != nil {
s.Fatal("Failed to stop ui job: ", err)
}
defer upstart.EnsureJobRunning(ctx, "ui")
const testCommand string = "plane_test"
f, err := os.Create(filepath.Join(s.OutDir(), filepath.Base(testCommand)+".txt"))
if err != nil {
s.Fatal("Failed to create a log file: ", err)
}
defer f.Close()
const formatFlag string = "--format"
primaryFormats := s.Param().(overlaysTestParam).primaryFormats
overlayFormat := s.Param().(overlaysTestParam).overlay.format
overlaySize := s.Param().(overlaysTestParam).overlay.size
invocationError := make(map[string]error)
for _, primaryFormat := range primaryFormats {
params := []string{formatFlag, primaryFormat}
if overlayFormat != "" {
params = append(params, "--plane", formatFlag, overlayFormat, "--size", overlaySize)
}
invocationCommand := shutil.EscapeSlice(append([]string{testCommand}, params...))
s.Log("Running ", invocationCommand)
cmd := testexec.CommandContext(ctx, testCommand, params...)
cmd.Stdout = f
cmd.Stderr = f
if err := cmd.Run(); err != nil {
invocationError[invocationCommand] = err
} else {
// TODO(b/217970618): Parse the DRM response or debugfs to verify that the
// actual plane combination is what was intended.
s.Logf("Run succeeded for %s primary format", primaryFormat)
// This test verifies if any of the primaryFormats is supported. This
// emulates Chrome's behaviour, where we cannot know in advance if a
// given format would work ahead of time (due to e.g. internal/external
// display, format, dimensions etc).
return
}
}
s.Errorf("%s failed for all formats (Chrome must support at least one)", testCommand)
for command, err := range invocationError {
exitCode, ok := testexec.ExitCode(err)
if !ok {
s.Errorf("Failed to run %s: %v", command, err)
} else {
s.Errorf("Command %s exited with status %v", command, exitCode)
}
}
}
|
package ch13
import "container/heap"
type RouteHeap []Route
func (h RouteHeap) Len() int { return len(h) }
func (h RouteHeap) Less(i, j int) bool { return h[i].Price < h[j].Price }
func (h RouteHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *RouteHeap) Push(x interface{}) {
*h = append(*h, x.(Route))
}
func (h *RouteHeap) Pop() interface{} {
value := (*h)[len(*h)-1]
*h = (*h)[:len(*h)-1]
return value
}
type Price struct {
V int
W int
}
type Route struct {
Price int
Stop int
Arrival int
}
func findCheapestPrice(n int, flights [][]int, src int, dst int, K int) int {
graph := map[int][]Price{}
for _, flight := range flights {
u, v, w := flight[0], flight[1], flight[2]
graph[u] = append(graph[u], Price{v, w})
}
q := &RouteHeap{Route{0, -1, src}}
for q.Len() > 0 {
route := heap.Pop(q).(Route)
if route.Stop > K {
continue
}
if route.Arrival == dst {
return route.Price
}
for _, p := range graph[route.Arrival] {
heap.Push(q, Route{p.W + route.Price, route.Stop + 1, p.V})
}
}
return -1
}
|
package gotification_test
import (
"github.com/mikegw/gotification/pkg/notification"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("SQS", func(){
Describe("SQSPersistor", func(){
It("sends the persisted model to SQS", func(){
mockSender := notification.MockMessageSender{}
persistor := notification.SQSPersistorImpl{&mockSender}
model := notification.Model{
Payload: "{\"message\":\"hi\"}",
}
persistor.Persist(model)
Expect(mockSender.InputBody()).To(Equal("{\"message\":\"hi\"}"))
})
})
})
|
package main
import "sort"
func p12933(n int64) int64 {
var tmp []int
var ret int64
for n > 0 {
tmp = append(tmp, int(n%10))
n /= 10
}
sort.Ints(tmp)
// sort.Sort(sort.Reverse(sort.IntSlice(tmp)))
for i := len(tmp) - 1; i >= 0; i-- {
ret *= 10
ret += int64(tmp[i])
}
return ret
}
|
package main
import (
"fmt"
"io"
"net"
"os"
"strconv"
)
func main() {
address := "localhost:9999"
tcpAddr, err := net.ResolveTCPAddr("tcp4", address)
fmt.Println(tcpAddr)
if err != nil {
fmt.Println("err in resolve: ", err)
}
listener, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
fmt.Println("err in listening: ", err)
}
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err)
continue
}
go handleCient(conn, address)
}
}
var i = 1
func handleCient(conn net.Conn, port string) {
fmt.Println("new client:", conn.RemoteAddr())
for {
buf := make([]byte, 1024)
length, err := conn.Read(buf)
if err != nil {
if err != io.EOF {
fmt.Println("Error reading:", err.Error())
}
return
}
fmt.Println("Receive data from client:", string(buf[:length]))
_, err = conn.Write([]byte("hello world" + strconv.Itoa(i)))
i++
if err != nil {
fmt.Println("Write data error: ", err.Error())
}
}
}
|
package constants
const (
OpenIDCTestStatusPort = 6922
CertmanagerPortNumber = 6940
AcmeProxyPortNumber = 6941
AcmePath = "/.well-known/acme-challenge"
AcmeProxyCleanupResponses = "/api/responses/cleanup"
AcmeProxyRecordResponse = "/api/responses/recordOne"
OpenIDCConfigurationDocumentPath = "/.well-known/openid-configuration"
// Copied from github.com/Cloud-Foundations/Dominator/constants
AssignedOIDBase = "1.3.6.1.4.1.9586.100.7"
PermittedMethodListOID = AssignedOIDBase + ".1"
GroupListOID = AssignedOIDBase + ".2"
)
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package camera
import (
"context"
"time"
"chromiumos/tast/common/media/caps"
"chromiumos/tast/local/bundles/cros/camera/hal3"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: HAL3Perf,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Measures camera HAL3 performance",
Contacts: []string{"hywu@chromium.org", "shik@chromium.org", "chromeos-camera-eng@google.com"},
Attr: []string{"group:crosbolt", "crosbolt_perbuild"},
SoftwareDeps: []string{"arc", "arc_camera3", "chrome", caps.BuiltinCamera},
Pre: chrome.LoggedIn(),
Timeout: 4 * time.Minute,
})
}
func HAL3Perf(ctx context.Context, s *testing.State) {
if err := hal3.RunTest(ctx, hal3.PerfTestConfig()); err != nil {
s.Error("Test failed: ", err)
}
}
|
package main
import (
"github.com/sfreiberg/gotwilio"
"log"
"strconv"
"strings"
)
func send(cl *gotwilio.Twilio, from string, to string, body string) {
var messages []string
if len(body) < 1500 {
messages = append(messages, body)
} else {
messages = splitLongBody(body)
}
for _, message := range messages {
res, exception, err := cl.SendSMS(from, to, message, "", "")
log.Printf("res: %+v, exeption: %+v, err: %+v", res, exception, err)
if exception != nil {
errorMessage := "oops! we have an error with code " +
strconv.Itoa(exception.Code) +
". If you'd like help, please share this code with ouidevelop@gmail.com"
res, exception, err = cl.SendSMS(from, to, errorMessage, "", "")
log.Printf("res: %+v, exeption: %+v, err: %+v", res, exception, err)
}
}
}
func splitLongBody(str string) []string {
var messages []string
segments := strings.Split(str, "\n\n")
charCount := 0
message := ""
for _, segment := range segments {
if charCount < 1500 {
message += segment + "\n\n"
charCount += len(segment + "\n\n")
} else {
messages = append(messages, message)
charCount = 0
message = ""
}
}
return messages
}
|
package k8sstatus
import (
"context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
binddnsv1 "github.com/bind-dns/binddns-operator/pkg/apis/binddns/v1"
"github.com/bind-dns/binddns-operator/pkg/kube"
"github.com/bind-dns/binddns-operator/pkg/utils"
)
func UpdateDomainStatus(zone string, status binddnsv1.DomainStatus) error {
domain, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsDomains().Get(context.Background(), zone, v1.GetOptions{})
if err != nil {
return err
}
if domain.Status.CreateTime == "" {
domain.Status.CreateTime = utils.TimeNow()
}
domain.Status.UpdateTime = utils.TimeNow()
if domain.Status.InstanceStatuses == nil {
domain.Status.InstanceStatuses = make(map[string]binddnsv1.InstanceStatus)
}
podName := utils.GetPodName()
domain.Status.Phase = status
domain.Status.InstanceStatuses[podName] = binddnsv1.InstanceStatus{
Status: status,
Name: podName,
UpdatedAt: utils.TimeNow(),
}
_, err = kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsDomains().
UpdateStatus(context.Background(), domain, v1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
func UpdateRuleStatus(name string) error {
rule, err := kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsRules().Get(context.Background(), name, v1.GetOptions{})
if err != nil {
return err
}
if rule.Status.CreateTime == "" {
rule.Status.CreateTime = utils.TimeNow()
}
rule.Status.UpdateTime = utils.TimeNow()
_, err = kube.GetKubeClient().GetDnsClientSet().BinddnsV1().DnsRules().
UpdateStatus(context.Background(), rule, v1.UpdateOptions{})
if err != nil {
return err
}
return UpdateDomainStatus(rule.Spec.Zone, binddnsv1.DomainProgressing)
}
|
/*
* @lc app=leetcode id=42 lang=golang
*
* [42] Trapping Rain Water
*
* https://leetcode.com/problems/trapping-rain-water/description/
*
* algorithms
* Hard (48.22%)
* Likes: 7426
* Dislikes: 128
* Total Accepted: 530.5K
* Total Submissions: 1.1M
* Testcase Example: '[0,1,0,2,1,0,1,3,2,1,2,1]'
*
* Given n non-negative integers representing an elevation map where the width
* of each bar is 1, compute how much water it is able to trap after raining.
*
*
* The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1].
* In this case, 6 units of rain water (blue section) are being trapped. Thanks
* Marcos for contributing this image!
*
* Example:
*
*
* Input: [0,1,0,2,1,0,1,3,2,1,2,1]
* Output: 6
*
*/
// @lc code=start
// [4,2,3]
func trap(height []int) int {
return trap3(height)
}
// using two pointer, time complexity: O(n), space complexity: O(n)
func trap3(height []int) int {
left, right, count := 0, len(height)-1, 0
leftMax, rightMax := 0, 0
for left < right {
if height[left] < height[right] {
if height[left] > leftMax {
leftMax = height[left]
} else {
count += leftMax - height[left]
}
left++
} else {
if height[right] > rightMax {
rightMax = height[right]
} else {
count += rightMax - height[right]
}
right--
}
}
return count
}
// this method similary brute method, enhance time complexity, using extra space to store max for each element
// time complexity: O(n), space complexity: O(n)
func trap2(height []int) int {
if len(height) <= 1 {
return 0
}
leftMaxContainer := make([]int, len(height))
leftMaxVal := 0
for i := 0; i < len(height); i++ {
leftMaxVal = max(leftMaxVal, height[i])
leftMaxContainer[i] = leftMaxVal
}
rightMaxContainer := make([]int, len(height))
rightMaxVal := 0
for i := len(height) - 1; i >= 0; i-- {
rightMaxVal = max(rightMaxVal, height[i])
rightMaxContainer[i] = rightMaxVal
}
count := 0
for i := 1; i < len(height)-1; i++ {
leftMax, rightMax := leftMaxContainer[i], rightMaxContainer[i]
count += min(leftMax, rightMax) - height[i]
}
return count
}
// brute method, time complexity: O(n*n), space complexity: O(1)
func trap1(height []int) int {
count, size := 0, len(height)
// start at index one and end at len(height) -2, not include first and last element
for i := 1; i < size-1; i++ {
maxLeft, maxRight := 0, 0
for j := i; j >= 0; j-- {
// search the left part for max bar size
maxLeft = max(maxLeft, height[j])
}
for j := i; j < size; j++ {
// search the right part for max bar size
maxRight = max(maxRight, height[j])
}
count += min(maxLeft, maxRight) - height[i]
}
return count
}
func max(val1, val2 int) int {
if val1 > val2 {
return val1
}
return val2
}
func min(val1, val2 int) int {
if val1 > val2 {
return val2
}
return val1
}
// @lc code=end |
package insights
import (
"context"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/dolittle/platform-api/pkg/platform"
platformK8s "github.com/dolittle/platform-api/pkg/platform/k8s"
"github.com/dolittle/platform-api/pkg/platform/mongo"
"github.com/dolittle/platform-api/pkg/utils"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
)
func NewService(logContext logrus.FieldLogger, k8sDolittleRepo platformK8s.K8sPlatformRepo, lokiHost string) service {
return service{
logContext: logContext,
k8sDolittleRepo: k8sDolittleRepo,
lokiHost: lokiHost,
}
}
func (s *service) GetRuntimeV1(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
applicationID := vars["applicationID"]
environment := strings.ToLower(vars["environment"])
userID := r.Header.Get("User-ID")
customerID := r.Header.Get("Tenant-ID")
allowed := s.k8sDolittleRepo.CanModifyApplicationWithResponse(w, customerID, applicationID, userID)
if !allowed {
return
}
logContext := s.logContext.WithFields(logrus.Fields{
"application_id": applicationID,
"customer_id": customerID,
"user_id": userID,
})
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
mongoURI := mongo.GetMongoURI(applicationID, environment)
client, err := mongo.SetupMongo(ctx, mongoURI)
if err != nil {
logContext.WithFields(logrus.Fields{
"mongoURI": mongoURI,
"error": err,
}).Error("Connecting to mongo")
utils.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
dbs := mongo.GetEventStoreDatabases(ctx, client)
latestEvents := make(map[string]platform.RuntimeLatestEvent, 0)
latestEventsPerEventType := make(map[string][]platform.RuntimeLatestEvent, 0)
eventLogCounts := make(map[string]int64, 0)
runtimeStates := make(map[string][]platform.RuntimeState, 0)
for _, db := range dbs {
key := fmt.Sprintf("%s", db)
_latestEvents, err := mongo.GetLatestEvent(ctx, client, db)
if err != nil {
logContext.WithFields(logrus.Fields{
"database": db,
"error": err,
"method": "mongo.GetLatestEvent",
}).Info("Skipping db: Failed to get latest event, skipping db")
continue
}
latestEvents[key] = _latestEvents
_latestEventsPerEventType, err := mongo.GetLatestEventPerEventType(ctx, client, db)
if err != nil {
logContext.WithFields(logrus.Fields{
"database": db,
"error": err,
"method": "mongo.GetLatestEventPerEventType",
}).Info("Skipping db: Failed to get latest event per event type")
continue
}
latestEventsPerEventType[key] = _latestEventsPerEventType
eventLogCount, err := mongo.GetEventLogCount(ctx, client, db)
if err != nil {
logContext.WithFields(logrus.Fields{
"database": db,
"error": err,
"method": "mongo.GetEventLogCount",
}).Info("Skipping db: Failed to get event log count")
continue
}
eventLogCounts[key] = eventLogCount
_runtimeStates, err := mongo.GetRuntimeStates(ctx, client, db)
if err != nil {
logContext.WithFields(logrus.Fields{
"database": db,
"error": err,
"method": "mongo.GetRuntimeStates",
}).Info("Skipping db: Failed to get runtime states")
continue
}
runtimeStates[key] = _runtimeStates
}
utils.RespondWithJSON(w, http.StatusOK, map[string]interface{}{
"applicationID": applicationID,
"environment": environment,
"latestEvents": latestEvents,
"latestEventsPerEventType": latestEventsPerEventType,
"eventLogCounts": eventLogCounts,
"runtimeStates": runtimeStates,
})
return
}
// ProxyLoki
func (s *service) ProxyLoki(w http.ResponseWriter, r *http.Request) {
customerID := r.Header.Get("Tenant-ID")
r.Header.Del("Tenant-ID")
r.Header.Del("User-ID")
r.Header.Del("x-shared-secret")
r.Header.Set("X-Scope-OrgId", platform.GetCustomerGroup(customerID))
// Remove prefix
parts := strings.Split(r.URL.Path, "/loki")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
r = r.WithContext(ctx)
r.URL.Path = strings.TrimPrefix(r.URL.Path, parts[0])
serveReverseProxy(s.lokiHost, w, r)
}
func serveReverseProxy(host string, res http.ResponseWriter, req *http.Request) {
url, _ := url.Parse("/")
url.Host = host
// Hard coding to http for now
url.Scheme = "http"
proxy := httputil.NewSingleHostReverseProxy(url)
// Update the request
req.Host = url.Host
req.URL.Host = url.Host
req.URL.Scheme = url.Scheme
req.Header.Set("X-Forwarded-Host", req.Header.Get("Host"))
// Note that ServeHttp is non blocking and uses a go routine under the hood
proxy.ServeHTTP(res, req)
}
|
package undertone
import (
"encoding/json"
"github.com/prebid/prebid-server/openrtb_ext"
"testing"
)
func TestValidParams(t *testing.T) {
validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params")
if err != nil {
t.Fatalf("Failed to fetch the json schema. %v", err)
}
for _, validParam := range validParams {
if err := validator.Validate(openrtb_ext.BidderUndertone, json.RawMessage(validParam)); err != nil {
t.Errorf("Schema rejected valid params: %s", validParam)
}
}
}
func TestInvalidParams(t *testing.T) {
validator, err := openrtb_ext.NewBidderParamsValidator("../../static/bidder-params")
if err != nil {
t.Fatalf("Failed to fetch the json schema. %v", err)
}
for _, p := range invalidParams {
if err := validator.Validate(openrtb_ext.BidderUndertone, json.RawMessage(p)); err == nil {
t.Errorf("Schema allowed invalid params: %s", p)
}
}
}
var validParams = []string{
`{"placementId": 12345, "publisherId": 1234}`,
`{"placementId": 1, "publisherId": 1}`,
}
var invalidParams = []string{
`{"placementId": "1234some-string"}`,
`{"placementId": 1234, "publisherId": 0}`,
`{"placementId": 0, "publisherId": 1}`,
`{"placementId": "1non-numeric", "publisherId": "non-numeric"}`,
}
|
package factory
import (
"encoding/json"
"fmt"
"github.com/mitchellh/cli"
"seeder/constants"
"seeder/models"
"seeder/services"
"seeder/tools"
"seeder/utils"
"time"
)
func Destroy() (cli.Command, error) {
destroy := &destroyCommandCLI{}
return destroy, nil
}
type destroyCommandCLI struct {
Args []string
}
func (c *destroyCommandCLI) Run(args []string) int {
c.Args = args
fmt.Println(fmt.Sprintf("Destroying deployments found in file %s", constants.DEPLOYMENT_PLAN))
yamlConfig := models.NewYamlConfig().GetYamlConfig()
plannedDeployments := make([]*models.ServerDeployment, 0)
err := json.Unmarshal(utils.ReadFile(constants.DEPLOYMENT_PLAN), &plannedDeployments)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
//enter check loop
for {
saveRemoteState()
noChanges := getNoChanges()
destroy(yamlConfig, noChanges)
savePlan()
if len(noChanges) == len(plannedDeployments) {
break
}
fmt.Println("Waiting ...")
time.Sleep(10 * time.Second)
}
//save state
saveRemoteState()
//save plan
plan := getPlan()
jsonPlan, err := json.Marshal(plan)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
utils.WriteFile(constants.DEPLOYMENT_PLAN, jsonPlan)
return 0
}
func destroy(yamlConfig models.YamlConfig, plannedDeployments []*models.ServerDeployment) {
for _, deployment := range plannedDeployments {
if deployment.Discovery != constants.NA {
discoveryService := services.NewDiscoveryService(deployment.Discovery, yamlConfig.GetAccessToken())
discoveryService.DeleteDeploymentId(deployment)
} else {
deployerService := services.NewDeployerService(deployment.Deployer, yamlConfig.GetAccessToken())
deployerService.DeleteDeploymentId(deployment)
}
}
}
func getNoChanges() []*models.ServerDeployment {
remoteDeployments := make([]*models.ServerDeployment, 0)
err := json.Unmarshal(utils.ReadFile(constants.DEPLOYMENT_STATE), &remoteDeployments)
if err != nil {
fmt.Println(err.Error())
}
deploymentPlanCreator := tools.NewDeploymentPlanCreator(remoteDeployments)
return deploymentPlanCreator.GetNoChanges()
}
func (c *destroyCommandCLI) Synopsis() string { return "Usage: seeder destroy" }
func (c *destroyCommandCLI) Help() string {
return `
Usage: seeder destroy
Destroys the remote state and empties your local plan.
Scenarios:
Call it without arguments and it will erase all your remote state.
Call it with args and destroy only the objects specified by 'id'.
After all deployments are destroyed:
- it will save a new plan where the deployments are marked for deployment.
- it will save a new remote state with remaining remote deployments.
`
}
|
package main
import (
"fmt"
"isshe/algo"
)
func main() {
stack := algo.NewStack()
stack.Push(123, "isshe")
fmt.Println(stack.Top())
fmt.Println(stack.Pop())
fmt.Println(stack.Top())
fmt.Println(stack.Size())
fmt.Println(stack.IsEmpty())
fmt.Println(stack.Pop())
fmt.Println(stack.IsEmpty())
}
|
/*
git2sqlite - converts git repositories to sqlite databases.
When ran against a git repository, will output an sqlite database
with the following tables:
refs :: <path, hash>
blobs :: <hash, content>
trees :: <hash, content>
commits :: <hash, content>
This project is a work in progress.
*/
package main
import (
"log"
"os"
"path/filepath"
)
func main() {
// check args
var location string
if len(os.Args) == 2 {
location, _ = filepath.Abs(os.Args[1])
} else {
location, _ = filepath.Abs(filepath.Dir(os.Args[0]))
}
REPOSITORY_NAME := filepath.Join(location, ".git")
DATABASE_NAME := filepath.Base(location)+".db"
repo := GitRepository{REPOSITORY_NAME}
db := SQLiteDatabase{DATABASE_NAME}
db.Create()
db.WriteRepository(repo)
log.Println("Finished.")
}
|
package voronoi
func Number() int {
return 42
}
|
package builder
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/go-logr/logr"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/match"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/google/go-containerregistry/pkg/v1/types"
"k8s.io/klog/v2"
)
// ImageBuilder use an OCI workspace to add layers and change configuration to images.
type ImageBuilder struct {
NameOpts []name.Option
RemoteOpts []remote.Option
Logger klog.Logger
}
// ErrInvalidReference is returned the target reference is a digest.
type ErrInvalidReference struct {
image string
}
func (e ErrInvalidReference) Error() string {
return fmt.Sprintf("target reference %q must have a tag reference", e.image)
}
// NewImageBuilder creates a new instance of an ImageBuilder.
func NewImageBuilder(nameOpts []name.Option, remoteOpts []remote.Option) *ImageBuilder {
b := &ImageBuilder{
NameOpts: nameOpts,
RemoteOpts: remoteOpts,
}
b.init()
return b
}
func (b *ImageBuilder) init() {
if b.Logger == (logr.Logger{}) {
b.Logger = klog.NewKlogr()
}
}
/*
configUpdateFunc allows callers of ImageBuilder.Run to modify the *v1.ConfigFile argument as appropriate for
the circumstances.
*/
type configUpdateFunc func(*v1.ConfigFile)
/*
Run modifies and pushes the catalog image existing in an OCI layout. The image configuration will be updated
with the required labels and any provided layers will be appended.
# Arguments
• ctx: a cancellation context
• targetRef: a docker image reference
• layoutPath: an OCI image layout path
• update: an optional function that allows callers to modify the *v1.ConfigFile if necessary
• layers: zero or more layers to add to the images discovered during processing
# Returns
error: non-nil on error, nil otherwise
*/
func (b *ImageBuilder) Run(ctx context.Context, targetRef string, layoutPath layout.Path, update configUpdateFunc, layers ...v1.Layer) error {
b.init()
var v2format bool
// Target can't have a digest since we are
// adding layers and possibly updating the
// configuration. This will result in a failure
// due to computed hash differences.
targetIdx := strings.Index(targetRef, "@")
if targetIdx != -1 {
return &ErrInvalidReference{targetRef}
}
tag, err := name.NewTag(targetRef, b.NameOpts...)
if err != nil {
return err
}
idx, err := layoutPath.ImageIndex()
if err != nil {
return err
}
// make a copy of the original manifest for later
originalIdxManifest, err := idx.IndexManifest()
if err != nil {
return err
}
originalIdxManifest = originalIdxManifest.DeepCopy()
// process the image index for updates to images discovered along the way
resultIdx, err := b.processImageIndex(ctx, idx, &v2format, update, targetRef, layers...)
if err != nil {
return err
}
// Ensure the index media type is a docker manifest list
// if child manifests are docker V2 schema
if v2format {
resultIdx = mutate.IndexMediaType(resultIdx, types.DockerManifestList)
}
// get the hashes from the original manifest since we need to remove them
originalHashes := []v1.Hash{}
for _, desc := range originalIdxManifest.Manifests {
originalHashes = append(originalHashes, desc.Digest)
}
// write out the index, replacing the old value
err = layoutPath.ReplaceIndex(resultIdx, match.Digests(originalHashes...))
if err != nil {
return err
}
// "Pull" the updated index
idx, err = layoutPath.ImageIndex()
if err != nil {
return err
}
// while it's entirely valid to have nested "manifest list" (i.e. an ImageIndex) within an OCI layout,
// this does NOT work for remote registries. So if we have those, then we need to get the nested
// ImageIndex and push that to the remote registry. In theory there could be any number of nested
// ImageIndexes, but in practice, there's only one level deep, and its a "singleton".
topLevelIndexManifest, err := idx.IndexManifest()
if err != nil {
return err
}
var imageIndexToPush v1.ImageIndex
for _, descriptor := range topLevelIndexManifest.Manifests {
if descriptor.MediaType.IsImage() {
// if we find an image, then this top level index can be used to push to remote registry
imageIndexToPush = idx
// no need to look any further
break
} else if descriptor.MediaType.IsIndex() {
// if we find an image index, we can push that to the remote registry
imageIndexToPush, err = idx.ImageIndex(descriptor.Digest)
if err != nil {
return err
}
// we're not going to look any deeper or look for other indexes at this level
break
}
}
// push to the remote
return remote.WriteIndex(tag, imageIndexToPush, b.RemoteOpts...)
}
/*
processImageIndex is a recursive helper function that allows for traversal of the hierarchy of
parent/child indexes that can exist for a multi arch image. There's always
at least one index at the root since this is an OCI layout that we're dealing with.
In theory there can be "infinite levels" of "index indirection" for multi arch images, but typically
its only two levels deep (i.e. index.json itself which is level one, and the manifest list
defined in the blobs directory, which is level two).
Each image that is encountered is updated using the update function (if provided) and whatever layers are provided.
# Arguments
• ctx: a cancellation context
• idx: the "current" image index for this stage of recursion
• v2format: a boolean used to keep track of the type of image we're dealing with. false means OCI media types
should be used and true means docker v2s2 media types should be used
• update: an optional function that allows callers to modify the *v1.ConfigFile if necessary
• targetRef: the docker image reference, which is only used for error reporting in this function
• layers: zero or more layers to add to the images discovered during processing
# Returns
• v1.ImageIndex: The resulting image index after processing has completed. Will be nil if an error occurs, otherwise non-nil.
• error: non-nil if an error occurs, nil otherwise
*/
func (b *ImageBuilder) processImageIndex(ctx context.Context, idx v1.ImageIndex, v2format *bool, update configUpdateFunc, targetRef string, layers ...v1.Layer) (v1.ImageIndex, error) {
var resultIdx v1.ImageIndex
resultIdx = idx
idxManifest, err := idx.IndexManifest()
if err != nil {
return nil, err
}
for _, manifest := range idxManifest.Manifests {
currentHash := *manifest.Digest.DeepCopy()
switch manifest.MediaType {
case types.DockerManifestList, types.OCIImageIndex:
innerIdx, err := idx.ImageIndex(currentHash)
if err != nil {
return nil, err
}
// recursive call
processedIdx, err := b.processImageIndex(ctx, innerIdx, v2format, update, targetRef, layers...)
if err != nil {
return nil, err
}
resultIdx = processedIdx
// making an assumption here that at any given point in the parent/child
// hierarchy, there's only a single image index entry
return resultIdx, nil
case types.DockerManifestSchema2:
*v2format = true
case types.OCIManifestSchema1:
*v2format = false
default:
return nil, fmt.Errorf("image %q: unsupported manifest format %q", targetRef, manifest.MediaType)
}
img, err := idx.Image(currentHash)
if err != nil {
return nil, err
}
// Add new layers to image.
// Ensure they have the right media type.
var mt types.MediaType
if *v2format {
mt = types.DockerLayer
} else {
mt = types.OCILayer
}
additions := make([]mutate.Addendum, 0, len(layers))
for _, layer := range layers {
additions = append(additions, mutate.Addendum{Layer: layer, MediaType: mt})
}
img, err = mutate.Append(img, additions...)
if err != nil {
return nil, err
}
if update != nil {
// Update image config
cfg, err := img.ConfigFile()
if err != nil {
return nil, err
}
update(cfg)
img, err = mutate.Config(img, cfg.Config)
if err != nil {
return nil, err
}
}
desc, err := partial.Descriptor(img)
if err != nil {
return nil, err
}
// if the platform is not set, we need to attempt to do something about that
if desc.Platform == nil {
if manifest.Platform != nil {
// use the value from the manifest
desc.Platform = manifest.Platform
} else {
if config, err := img.ConfigFile(); err != nil {
// we can't get the config file so fall back to linux/amd64
desc.Platform = &v1.Platform{Architecture: "amd64", OS: "linux"}
} else {
// if one of the required values is missing, fall back to linux/amd64
if config.Architecture == "" || config.OS == "" {
desc.Platform = &v1.Platform{Architecture: "amd64", OS: "linux"}
} else {
// use the value provided by the image config
desc.Platform = &v1.Platform{Architecture: config.Architecture, OS: config.OS}
}
}
}
}
add := mutate.IndexAddendum{
Add: img,
Descriptor: *desc,
}
modifiedIndex := mutate.AppendManifests(mutate.RemoveManifests(resultIdx, match.Digests(currentHash)), add)
resultIdx = modifiedIndex
}
return resultIdx, nil
}
/*
CreateLayout will create an OCI image layout from an image or return
a layout path from an existing OCI layout.
# Arguments
• srcRef: if empty string, the dir argument is used for the layout.Path, otherwise
this value is used to pull an image into dir.
• dir: a pre-populated OCI layout directory if srcRef is empty string, otherwise
this directory will be created
# Returns
• layout.Path: a OCI layout path if successful or an empty string if an error occurs
• error: non-nil if an error occurs, nil otherwise
*/
func (b *ImageBuilder) CreateLayout(srcRef, dir string) (layout.Path, error) {
b.init()
if srcRef == "" {
b.Logger.V(1).Info("Using existing OCI layout to " + dir)
return layout.FromPath(dir)
}
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return "", err
}
// Pull source reference image
ref, err := name.ParseReference(srcRef, b.NameOpts...)
if err != nil {
return "", err
}
idx, err := remote.Index(ref, b.RemoteOpts...)
if err != nil {
return "", err
}
b.Logger.V(1).Info("Writing OCI layout to " + dir)
return layout.Write(dir, idx)
}
// LayerFromPath will write the contents of the path(s) the target
// directory and build a v1.Layer
func LayerFromPath(targetPath, path string) (v1.Layer, error) {
return LayerFromPathWithUidGid(targetPath, path, -1, -1)
}
// LayerFromPath will write the contents of the path(s) the target
// directory specifying the target UID/GID and build a v1.Layer.
// Use gid = -1 , uid = -1 if you don't want to override.
func LayerFromPathWithUidGid(targetPath, path string, uid int, gid int) (v1.Layer, error) {
var b bytes.Buffer
tw := tar.NewWriter(&b)
pathInfo, err := os.Stat(path)
if err != nil {
return nil, err
}
processPaths := func(hdr *tar.Header, info os.FileInfo, fp string) error {
if !info.IsDir() {
hdr.Size = info.Size()
}
if info.Mode().IsDir() {
hdr.Typeflag = tar.TypeDir
} else if info.Mode().IsRegular() {
hdr.Typeflag = tar.TypeReg
} else {
return fmt.Errorf("not implemented archiving file type %s (%s)", info.Mode(), info.Name())
}
if err := tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("failed to write tar header: %w", err)
}
if !info.IsDir() {
f, err := os.Open(filepath.Clean(fp))
if err != nil {
return err
}
if _, err := io.Copy(tw, f); err != nil {
return fmt.Errorf("failed to read file into the tar: %w", err)
}
err = f.Close()
if err != nil {
return err
}
}
return nil
}
if pathInfo.IsDir() {
err := filepath.Walk(path, func(fp string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := filepath.Rel(path, fp)
if err != nil {
return fmt.Errorf("failed to calculate relative path: %w", err)
}
hdr := &tar.Header{
Name: filepath.Join(targetPath, filepath.ToSlash(rel)),
Mode: int64(info.Mode()),
}
if uid != -1 {
hdr.Uid = uid
}
if gid != -1 {
hdr.Gid = gid
}
if err := processPaths(hdr, info, fp); err != nil {
return err
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to scan files: %w", err)
}
} else {
base := filepath.Base(path)
hdr := &tar.Header{
Name: filepath.Join(targetPath, filepath.ToSlash(base)),
Mode: int64(pathInfo.Mode()),
}
if uid != -1 { // uid was specified in the input param
hdr.Uid = uid
}
if gid != -1 { // gid was specified in the input param
hdr.Gid = gid
}
if err := processPaths(hdr, pathInfo, path); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, fmt.Errorf("failed to finish tar: %w", err)
}
return tarball.LayerFromReader(&b)
}
|
package main
//go:generate go run scripts/includedict.go
import (
"flag"
"fmt"
"log"
"math/rand"
"strings"
"time"
)
var numWords = flag.Int("num-words", 4, "number of words to use")
var number = flag.Bool("number", false, "replace a random character with a number")
var capitalize = flag.Bool("capitalize", false, "capitalize the first letter of every word")
var separator = flag.String("separator", "", "character to use between words")
type replacement struct {
letter string
number string
}
var replacements = []replacement{
{"o", "0"},
{"i", "1"},
{"l", "1"},
{"s", "5"},
{"a", "4"},
{"e", "3"},
}
func init() {
flag.Parse()
if *numWords < 1 {
log.Fatal("num-words must be > 0")
}
}
func main() {
rand.Seed(time.Now().UnixNano())
words := []string{}
for i := 0; i < *numWords; i++ {
words = append(words, dict[rand.Intn(len(dict))])
}
if *capitalize {
for i := range words {
words[i] = strings.Title(words[i])
}
}
if *number {
replaced := false
for i, word := range words {
for _, r := range replacements {
if strings.Contains(word, r.letter) {
words[i] = strings.Replace(word, r.letter, r.number, 1)
replaced = true
break
}
}
if replaced {
break
}
}
if !replaced {
words[0] = fmt.Sprintf("%s%d", words[0], rand.Intn(10))
}
}
rand.Shuffle(len(words), func(i, j int) {
words[i], words[j] = words[j], words[i]
})
fmt.Printf(strings.Join(words, *separator))
}
|
package pb
import (
"time"
"github.com/fanaticscripter/EggContractor/util"
)
func (c *SoloContract) GetDurationUntilProductionDeadline() time.Duration {
return util.DoubleToDuration(c.SecondsUntilProductionDeadline)
}
func (c *SoloContract) GetDurationUntilCollectionDeadline() time.Duration {
return util.DoubleToDuration(c.SecondsUntilCollectionDeadline)
}
func (c *SoloContract) GetServerRefreshTime() time.Time {
return util.DoubleToTime(c.ServerRefreshTimestamp)
}
|
package i18n
import (
"strings"
"github.com/windrivder/gopkg/errorx"
)
var errUnmarshalNilLocate = errorx.New("can't unmarshal a nil *Locate")
type Locale int
const (
LocaleEN Locale = iota
LocaleZH
)
var (
locates = [...]string{
LocaleEN: "en",
LocaleZH: "zh",
}
)
func (l Locale) Int() int {
return int(l)
}
func (l Locale) String() string {
index := l.Int()
if 0 <= index && index <= len(locates)-1 {
return locates[l]
}
return ""
}
func (l Locale) CapitalString() string {
return strings.ToUpper(l.String())
}
func (l Locale) MarshalText() ([]byte, error) {
return []byte(l.String()), nil
}
func (l *Locale) UnmarshalText(text []byte) error {
if l == nil {
return errUnmarshalNilLocate
}
switch string(text) {
case LocaleEN.String(), LocaleEN.CapitalString():
*l = LocaleEN
case LocaleZH.String(), LocaleZH.CapitalString():
*l = LocaleZH
default:
return errorx.New("unrecognized locate: %q", text)
}
return nil
}
// Set sets the locate for the flag.Value interface.
func (l *Locale) Set(s string) error {
return l.UnmarshalText([]byte(s))
}
// Get gets the locate for the flag.Getter interface.
func (l *Locale) Get() interface{} {
return *l
}
|
package controllers
import (
"go_simpleweibo/config"
"go_simpleweibo/routes/named"
"net/http"
"github.com/gin-gonic/gin"
)
// Redirect : 路由重定向 use path
func Redirect(c *gin.Context, redirectPath string, withRoot bool) {
path := redirectPath
if withRoot {
path = config.AppConfig.URL + redirectPath
}
redirect(c, path)
}
// RedirectRouter : 路由重定向 use router name
func RedirectRouter(c *gin.Context, routerName string, args ...interface{}) {
redirect(c, named.G(routerName, args...))
}
// RedirectToLoginPage : 重定向到登录页面
func RedirectToLoginPage(c *gin.Context) {
loginPath := named.G("login.create")
if c.Request.Method == http.MethodPost {
redirect(c, loginPath)
return
}
redirect(c, loginPath+"?back="+c.Request.URL.Path)
}
// ------------------------ private
func redirect(c *gin.Context, redirectPath string) {
// 千万注意,这个地方不能用 301(永久重定向)
c.Redirect(http.StatusFound, redirectPath)
}
|
// Copyright 2019 TriggerMesh, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package buildtemplate
import (
"errors"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/triggermesh/tm/pkg/client"
)
func TestList(t *testing.T) {
namespace := "test-namespace"
if ns, ok := os.LookupEnv("NAMESPACE"); ok {
namespace = ns
}
buildTemplateClient, err := client.NewClient(client.ConfigPath(""))
assert.NoError(t, err)
buildtemplate := &Buildtemplate{Namespace: namespace}
_, err = buildtemplate.List(&buildTemplateClient)
assert.NoError(t, err)
}
func TestBuildTemplate(t *testing.T) {
namespace := "test-namespace"
if ns, ok := os.LookupEnv("NAMESPACE"); ok {
namespace = ns
}
buildTemplateClient, err := client.NewClient(client.ConfigPath(""))
assert.NoError(t, err)
testCases := []struct {
Name string
File string
ErrMSG error
}{
{"foo", "", errors.New("Buildtemplate \"\" not found")},
//{"foo", "https://github.com/triggermesh/tm/blob/master/testfiles/broken-buildtemplate.yaml", "", errors.New("error converting YAML to JSON: yaml: line 526: mapping values are not allowed in this context")},
{"foo", "../../../testfiles/buildtemplate-err1-test.yaml", errors.New("Build template \"IMAGE\" parameter is missing")},
{"foo", "../../../testfiles/buildtemplate-err2-test.yaml", errors.New("Can't create object, only BuildTemplate is allowed")},
{"foo", "../../../testfiles/buildtemplate-test.yaml", nil},
{"foo", "../../../testfiles/buildtemplate-test.yaml", nil},
}
for _, tt := range testCases {
buildtemplate := &Buildtemplate{
Name: tt.Name,
File: tt.File,
Namespace: namespace,
}
_, err := buildtemplate.Deploy(&buildTemplateClient)
if err != nil {
assert.Equal(t, tt.ErrMSG, err)
continue
}
bt, err := buildtemplate.Get(&buildTemplateClient)
assert.NoError(t, err)
assert.Equal(t, tt.Name, bt.Name)
_, err = buildtemplate.Deploy(&buildTemplateClient)
if err != nil {
assert.Equal(t, tt.ErrMSG, err)
}
err = buildtemplate.Delete(&buildTemplateClient)
assert.NoError(t, err)
}
}
|
package main
import (
"mall/app/api/web/system/conf"
"mall/app/api/web/system/server/http/server"
)
func main() {
if err := conf.Init(); err != nil {
panic(err)
}
server.Init(conf.Conf)
}
|
/*
* Copyright 2019-2020 VMware, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package service
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetPods(namespace, LabelSelector string) (*v1.PodList, error) {
clientset, err := getClientset()
if err != nil {
fmt.Println(err)
}
pods, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: LabelSelector})
return pods, err
}
func GetClusterPodStatus(name, namespace string) (map[string]string, error) {
var labelSelector string
labelSelector = fmt.Sprintf("name=%s", name)
list, err := GetPods(namespace, labelSelector)
if err != nil {
return nil, err
}
return GetPodStatus(list), nil
}
func GetPodStatus(pods *v1.PodList) map[string]string {
status := make(map[string]string)
for _, v := range pods.Items {
/*
for _, vv := range v.Status.ContainerStatuses {
if vv.State.Running != nil {
status[vv.Name] = "Running"
continue
}
if vv.State.Waiting != nil {
status[vv.Name] = "Waiting"
continue
}
if vv.State.Terminated != nil {
status[vv.Name] = "Terminated"
continue
}
status[vv.Name] = "Unknown"
}
*/
switch string(v.Status.Phase) {
case "Running", "Succeeded", "Pending", "Failed":
status[v.Name] = string(v.Status.Phase)
continue
default:
status[v.Name] = "Unknown"
}
}
return status
}
func CheckClusterStatus(ClusterStatus map[string]string) bool {
if len(ClusterStatus) == 0 {
return false
}
var clusterStatusOk = true
for _, v := range ClusterStatus {
if v != "Running" {
clusterStatusOk = false
}
}
return clusterStatusOk
}
func GetPodList(name, namespace string) ([]string, error) {
var labelSelector string
labelSelector = fmt.Sprintf("name=%s", name)
list, err := GetPods(namespace, labelSelector)
if err != nil {
return nil, err
}
var podList []string
for _, v := range list.Items {
podList = append(podList, v.GetName())
}
return podList, nil
}
|
//go:generate mockery -name=Purchaser -output=./internal/mocks
package purchasepersister
import (
"context"
"encoding/json"
"errors"
"net/http"
"github.com/diegoholiveira/bookstore-sample/pkg/http/render"
"github.com/diegoholiveira/bookstore-sample/purchases"
)
type (
Purchaser interface {
MakePurchase(context.Context, purchases.Purchase) error
}
PurchaseHandler struct {
purchaser Purchaser
}
)
func NewPurchaseHandler(p Purchaser) PurchaseHandler {
return PurchaseHandler{
purchaser: p,
}
}
func (h PurchaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
dec := json.NewDecoder(r.Body)
dec.DisallowUnknownFields()
var purchase purchases.Purchase
err = dec.Decode(&purchase)
if err != nil {
render.JSON(w, http.StatusBadRequest, map[string]string{
"error": "Error while decoding the JSON payload",
})
return
}
if dec.More() {
render.JSON(w, http.StatusBadRequest, map[string]string{
"error": "Request body must only contain a single JSON object",
})
return
}
err = h.purchaser.MakePurchase(r.Context(), purchase)
if err == nil {
w.WriteHeader(http.StatusCreated)
return
}
var errPurchase ErrPurchaseInvalid
if errors.As(err, &errPurchase) {
render.JSON(w, http.StatusBadRequest, map[string]string{
"error": err.Error(),
})
return
}
render.JSON(w, http.StatusInternalServerError, map[string]string{
"error": err.Error(),
})
}
|
package main
import (
"fmt"
"os"
gcp_memberlist "github.com/stefanhans/cloud-function-play/memberlist"
)
var (
gcpMemberList *gcp_memberlist.Memberlist
)
// CreateMemberlist creates a memberlist regarding GCP Cloud Functions with Firestore
func CreateMemberlist(name, ip string) (*gcp_memberlist.Memberlist, error) {
//Get URL for GCP service
serviceUrl := os.Getenv("GCP_SERVICE_URL")
if serviceUrl == "" {
return nil, fmt.Errorf("GCP_SERVICE_URL environment variable unset or missing")
}
return gcp_memberlist.Create(
&gcp_memberlist.IpAddress{
Name: name,
Ip: ip,
Port: "",
Protocol: "tcp",
})
}
|
package main
import (
"testing"
)
func TestCode(t *testing.T) {
var tests = []struct {
input [][]int
output bool
}{
{
input: [][]int{
{0, 2, 1},
{1, 1, 1},
{2, 0, 0},
},
output: true,
},
{
input: [][]int{
{1, 3, 1},
{2, 1, 2},
{3, 3, 3},
},
output: false,
},
}
for _, test := range tests {
if got := organizingContainers(test.input); got != test.output {
t.Errorf(
"Organzing Containers for Input=%v; Got%v; Expected %v",
test.input, got, test.output,
)
}
}
}
|
package httpbot
import (
"fmt"
"log"
"os"
"util"
"golang.org/x/net/websocket"
)
type VideoClient struct {
r *Robot
msgChan chan []byte
debug bool
logger *log.Logger
}
func NewVideoClient(r *Robot, maxBuffer int, debug bool) *VideoClient {
return &VideoClient{
r: r,
msgChan: make(chan []byte, maxBuffer),
debug: debug,
logger: log.New(os.Stderr, "", log.LstdFlags),
}
}
func (c *VideoClient) Run(ws *websocket.Conn) {
errChan := make(chan error, 1)
go c.messageInHandler(ws, errChan)
go c.messageOutHandler(ws, errChan)
c.logf("Registering.\n")
c.r.videoLock.Lock()
c.r.videoClients[ws] = c.msgChan
c.r.videoLock.Unlock()
err := <-errChan
c.logf("Handler error: %v\n", err)
c.logf("Closing socket.\n")
ws.Close()
c.logf("Deregistering.\n")
c.r.videoLock.Lock()
delete(c.r.videoClients, ws)
c.r.videoLock.Unlock()
c.logf("Shutting down byte channel.\n")
close(c.msgChan)
c.logf("Waiting for handler.\n")
for err := range errChan {
if err == nil {
break
} else {
c.logf("Handler err: %v\n", err)
}
}
c.logf("Client finished!\n")
}
func (c *VideoClient) messageInHandler(ws *websocket.Conn, errChan chan error) {
c.logf("messageInHanlder started.\n")
defer c.logf("messageInHandler ended.\n")
for {
msg, err := c.ReadMessage(ws, 1)
if err != nil {
errChan <- err
return
}
c.logf("Read %v bytes\n", len(msg))
}
}
func (c *VideoClient) messageOutHandler(ws *websocket.Conn, errChan chan error) {
c.logf("messageOutHanlder started.\n")
defer c.logf("messageOutHandler ended.\n")
hadErr := false
for {
select {
case buf, ok := <-c.msgChan:
if !ok {
errChan <- nil
return
}
if !hadErr {
if err := websocket.Message.Send(ws, buf); err != nil {
errChan <- err
hadErr = true
}
}
}
}
}
func (c *VideoClient) ReadMessage(ws *websocket.Conn, max uint32) ([]byte, error) {
return util.ReadMessage(ws, 1024)
}
func (c *VideoClient) logf(format string, v ...interface{}) {
if c.debug && c.logger != nil {
l := fmt.Sprintf(format, v...)
c.logger.Printf("V[%p:%p]: %v", c.r, c, l)
}
}
|
// carbon-relay-ng
// route traffic to anything that speaks the Graphite Carbon protocol,
// such as Graphite's carbon-cache.py, influxdb, ...
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/Dieterbe/statsd-go"
"github.com/graphite-ng/carbon-relay-ng/admin"
"github.com/graphite-ng/carbon-relay-ng/routing"
"github.com/rcrowley/goagain"
"html/template"
"io"
"log"
"net"
"net/http"
"os"
"regexp"
"runtime/pprof"
)
type StatsdConfig struct {
Enabled bool
Instance string
Host string
Port int
}
type Config struct {
Listen_addr string
Admin_addr string
Http_addr string
Spool_dir string
First_only bool
Routes map[string]*routing.Route
Statsd StatsdConfig
}
var (
config_file string
config Config
to_dispatch = make(chan []byte)
routes *routing.Routes
statsdClient statsd.Client
cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
)
func init() {
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
}
func accept(l *net.TCPListener, config Config) {
for {
c, err := l.AcceptTCP()
if nil != err {
log.Println(err)
break
}
go handle(c, config)
}
}
func handle(c *net.TCPConn, config Config) {
defer c.Close()
// TODO c.SetTimeout(60e9)
r := bufio.NewReaderSize(c, 4096)
for {
buf, isPrefix, err := r.ReadLine()
if nil != err {
if io.EOF != err {
log.Println(err)
}
break
}
if isPrefix { // TODO Recover from partial reads.
log.Println("isPrefix: true")
break
}
buf = append(buf, '\n')
buf_copy := make([]byte, len(buf), len(buf))
copy(buf_copy, buf)
statsdClient.Increment("target_type=count.unit=Metric.direction=in")
to_dispatch <- buf_copy
}
}
func Router() {
for buf := range to_dispatch {
routed := routes.Dispatch(buf, config.First_only)
if !routed {
log.Printf("unrouteable: %s\n", buf)
}
}
}
func tcpListHandler(req admin.Req) (err error) {
if len(req.Command) != 2 {
return errors.New("extraneous arguments")
}
longest_key := 9
longest_patt := 9
longest_addr := 9
list := routes.List()
for key, route := range list {
if len(key) > longest_key {
longest_key = len(key)
}
if len(route.Patt) > longest_patt {
longest_patt = len(route.Patt)
}
if len(route.Addr) > longest_addr {
longest_addr = len(route.Addr)
}
}
fmt_str := fmt.Sprintf("%%%ds %%%ds %%%ds %%8v\n", longest_key+1, longest_patt+1, longest_addr+1)
(*req.Conn).Write([]byte(fmt.Sprintf(fmt_str, "key", "pattern", "addr", "spool")))
for key, route := range list {
(*req.Conn).Write([]byte(fmt.Sprintf(fmt_str, key, route.Patt, route.Addr, route.Spool)))
}
(*req.Conn).Write([]byte("--\n"))
return
}
func tcpAddHandler(req admin.Req) (err error) {
key := req.Command[2]
var patt, addr, spool_str string
if len(req.Command) == 5 {
patt = ""
addr = req.Command[3]
spool_str = req.Command[4]
} else if len(req.Command) == 6 {
patt = req.Command[3]
addr = req.Command[4]
spool_str = req.Command[5]
} else {
return errors.New("bad number of arguments")
}
spool := false
if spool_str == "1" {
spool = true
}
err = routes.Add(key, patt, addr, spool, &statsdClient)
if err != nil {
return err
}
(*req.Conn).Write([]byte("added\n"))
return
}
func tcpDelHandler(req admin.Req) (err error) {
if len(req.Command) != 3 {
return errors.New("bad number of arguments")
}
key := req.Command[2]
err = routes.Del(key)
if err != nil {
return err
}
(*req.Conn).Write([]byte("deleted\n"))
return
}
func tcpPattHandler(req admin.Req) (err error) {
key := req.Command[2]
var patt string
if len(req.Command) == 4 {
patt = req.Command[3]
} else if len(req.Command) == 3 {
patt = ""
} else {
return errors.New("bad number of arguments")
}
err = routes.Update(key, nil, &patt)
if err != nil {
return err
}
(*req.Conn).Write([]byte("updated\n"))
return
}
func tcpHelpHandler(req admin.Req) (err error) {
writeHelp(*req.Conn, []byte(""))
return
}
func tcpDefaultHandler(req admin.Req) (err error) {
writeHelp(*req.Conn, []byte("unknown command\n"))
return
}
func writeHelp(conn net.Conn, write_first []byte) { // bytes.Buffer
//write_first.WriteTo(conn)
conn.Write(write_first)
help := `
commands:
help show this menu
route list list routes
route add <key> [pattern] <addr> <spool> add the route. (empty pattern allows all). (spool has to be 1 or 0)
route del <key> delete the matching route
route patt <key> [pattern] update pattern for given route key. (empty pattern allows all)
`
conn.Write([]byte(help))
}
func adminListener() {
admin.HandleFunc("route list", tcpListHandler)
admin.HandleFunc("route add", tcpAddHandler)
admin.HandleFunc("route del", tcpDelHandler)
admin.HandleFunc("route patt", tcpPattHandler)
admin.HandleFunc("help", tcpHelpHandler)
admin.HandleFunc("", tcpDefaultHandler)
log.Printf("admin TCP listener starting on %v", config.Admin_addr)
err := admin.ListenAndServe(config.Admin_addr)
if err != nil {
fmt.Println("Error listening:", err.Error())
os.Exit(1)
}
}
func homeHandler(w http.ResponseWriter, r *http.Request, title string) {
tc := make(map[string]interface{})
tc["Title"] = title
tc["routes"] = routes.Map
templates := template.Must(loadTemplates("templates/base.html", "templates/index.html"))
if err := templates.Execute(w, tc); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func editHandler(w http.ResponseWriter, r *http.Request, title string) {
key := r.URL.Path[len("/edit/"):]
route := routes.Map[key]
fmt.Printf("Editting %s with %s - %s \n", route.Key, route.Patt, route.Addr)
tc := make(map[string]interface{})
tc["Title"] = title
tc["Key"] = route.Key
tc["Addr"] = route.Addr
tc["Patt"] = route.Patt
templates := template.Must(loadTemplates("templates/base.html", "templates/edit.html"))
if err := templates.Execute(w, tc); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func saveHandler(w http.ResponseWriter, r *http.Request, title string) {
key := r.FormValue("key")
patt := r.FormValue("patt")
addr := r.FormValue("addr")
err := routes.Add(key, patt, addr, false, &statsdClient)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func updateHandler(w http.ResponseWriter, r *http.Request, title string) {
key := r.FormValue("key")
patt := r.FormValue("patt")
addr := r.FormValue("addr")
err := routes.Update(key, &addr, &patt)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func deleteHandler(w http.ResponseWriter, r *http.Request, title string) {
key := r.URL.Path[len("/delete/"):]
err := routes.Del(key)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
validPath := regexp.MustCompile("^/(edit|save|delete|update)?(.*)$")
m := validPath.FindStringSubmatch(r.URL.Path)
if m == nil {
http.NotFound(w, r)
return
}
fn(w, r, m[2])
}
}
func httpListener() {
// TODO treat errors like 'not found' etc differently, don't just return http.StatusInternalServerError in all cases
http.HandleFunc("/edit/", makeHandler(editHandler))
http.HandleFunc("/save/", makeHandler(saveHandler))
http.HandleFunc("/update/", makeHandler(updateHandler))
http.HandleFunc("/delete/", makeHandler(deleteHandler))
http.HandleFunc("/", makeHandler(homeHandler))
log.Printf("admin HTTP listener starting on %v", config.Http_addr)
err := http.ListenAndServe(config.Http_addr, nil)
if err != nil {
fmt.Println("Error listening:", err.Error())
os.Exit(1)
}
}
func usage() {
fmt.Fprintln(
os.Stderr,
"Usage: carbon-relay-ng <path-to-config>",
)
flag.PrintDefaults()
}
func main() {
flag.Usage = usage
flag.Parse()
config_file = "/etc/carbon-relay-ng.ini"
if 1 == flag.NArg() {
config_file = flag.Arg(0)
}
if _, err := toml.DecodeFile(config_file, &config); err != nil {
fmt.Printf("Cannot use config file '%s':\n", config_file)
fmt.Println(err)
return
}
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
log.Println("initializing routes...")
var err error
routes, err = routing.NewRoutes(config.Routes, config.Spool_dir, &statsdClient)
if err != nil {
log.Println(err)
os.Exit(1)
}
err = routes.Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
statsdPrefix := fmt.Sprintf("service=carbon-relay-ng.instance=%s.", config.Statsd.Instance)
statsdClient = *statsd.NewClient(config.Statsd.Enabled, config.Statsd.Host, config.Statsd.Port, statsdPrefix)
// Follow the goagain protocol, <https://github.com/rcrowley/goagain>.
l, ppid, err := goagain.GetEnvs()
if nil != err {
laddr, err := net.ResolveTCPAddr("tcp", config.Listen_addr)
if nil != err {
log.Println(err)
os.Exit(1)
}
l, err = net.ListenTCP("tcp", laddr)
if nil != err {
log.Println(err)
os.Exit(1)
}
log.Printf("listening on %v", laddr)
go accept(l.(*net.TCPListener), config)
} else {
log.Printf("resuming listening on %v", l.Addr())
go accept(l.(*net.TCPListener), config)
if err := goagain.KillParent(ppid); nil != err {
log.Println(err)
os.Exit(1)
}
}
if config.Admin_addr != "" {
go adminListener()
}
if config.Http_addr != "" {
go httpListener()
}
go Router()
if err := goagain.AwaitSignals(l); nil != err {
log.Println(err)
os.Exit(1)
}
}
|
package extract
import (
"context"
"errors"
"io"
)
func copyCancel(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
return io.Copy(dst, newCancelableReader(ctx, src))
}
type cancelableReader struct {
ctx context.Context
src io.Reader
}
func (r *cancelableReader) Read(p []byte) (int, error) {
select {
case <-r.ctx.Done():
return 0, errors.New("interrupted")
default:
return r.src.Read(p)
}
}
func newCancelableReader(ctx context.Context, src io.Reader) *cancelableReader {
return &cancelableReader{
ctx: ctx,
src: src,
}
}
|
package scanner
import (
"github.com/morlay/gin-swagger/program"
"go/ast"
"go/types"
"regexp"
"strings"
)
func isGinMethod(method string) bool {
var ginMethods = map[string]bool{
"GET": true,
"POST": true,
"PUT": true,
"PATCH": true,
"HEAD": true,
"DELETE": true,
"OPTIONS": true,
}
return ginMethods[method]
}
func getJSONNameAndFlags(tagValue string) (string, []string) {
values := strings.SplitN(tagValue, ",", -1)
return values[0], values[1:]
}
func parseCommentToSummaryDesc(str string) (string, string) {
lines := strings.SplitN(str, "\n", -1)
return lines[0], strings.TrimSpace(strings.Join(lines[1:], "\n"))
}
func getExportedNameOfPackage(path string) string {
var parts = strings.Split(path, ".")
return parts[len(parts)-1]
}
func getRouterPathByCallExpr(callExpr *ast.CallExpr) string {
return program.GetBasicLitValue(callExpr.Args[0].(*ast.BasicLit)).(string)
}
func indirect(t types.Type) types.Type {
switch t.(type) {
case *types.Pointer:
return indirect(t.(*types.Pointer).Elem())
case *types.Named:
return indirect(t.(*types.Named).Underlying())
default:
return t
}
}
func convertGinPathToSwaggerPath(str string) string {
r := regexp.MustCompile("/:([^/]+)")
result := r.ReplaceAllString(str, "/{$1}")
return result
}
|
package main
import "fmt"
func main() {
// Go is a statically typed programming language. This means that variables always have a specific type associated with it and cannot be changed.
// Syntax => var <name> <type>
var hello_str string
hello_str = "Hello World!"
fmt.Println(hello_str)
// Syntax => var <name> <type> = <expression>
var name string = "Numan Ibn Mazid"
fmt.Println(name)
// Shorthand for assignment + declaration => <name> := <expression>
message := "Learning GoLang"
fmt.Println(message)
// Multiple variable declaration
var a, b int = 10, 20
fmt.Println(a, b)
var c, d string
c, d = "Apple", "Banana"
fmt.Println(c, d)
// const variable => cannot be changed or modified
// syntax => const <name> <type> = <expression>
const gender string= "Male"
fmt.Println(gender)
}
|
package accessibility
import (
"fmt"
"strconv"
"github.com/perthgophers/govhack/db"
)
type CongestionResult struct {
MeanValue float64 `db:"trafficrank"`
}
func Congestion(longitude, latitude float64) (int, error) {
score := []CongestionResult{}
longStr := strconv.FormatFloat(longitude, 'f', 6, 64)
latStr := strconv.FormatFloat(latitude, 'f', 6, 64)
/*
queryStr := `
SELECT has_congestion_mean
FROM traffic_congestion_rank
WHERE
poly_line not like '%)' AND
ST_DWithin(ST_GeomFromText(poly_line), ST_GeomFromText($1,4326), 1000)
`
*/
queryStr := `
SELECT
case
when avg(has_congestion_mean) < 0.06321347 then 1
when avg(has_congestion_mean) < 0.103310502 then 2
when avg(has_congestion_mean) < 0.1293379 then 3
when avg(has_congestion_mean) < 0.146261416 then 4
when avg(has_congestion_mean) < 0.173401827 then 5
when avg(has_congestion_mean) < 0.206478311 then 6
when avg(has_congestion_mean) < 0.242037671 then 7
when avg(has_congestion_mean) < 0.292751142 then 8
when avg(has_congestion_mean) < 0.363527397 then 9
else 10
end AS trafficrank
FROM traffic_congestion_rank
WHERE
poly_line like '%)' AND NOT poly_line LIKE 'MULTI%' AND ST_DWithin(ST_GeomFromText(poly_line, 4326), ST_GeomFromText($1, 4326), 0.5);
`
dbclient := db.Client()
err := dbclient.Select(
&score,
queryStr,
fmt.Sprintf("POINT(%s %s)", longStr, latStr),
)
//fmt.Printf("phor %+v\n", score)
/*
hashmap := make(map[float64]float64){
0.063213: 1,
0.103311: 2,
0.129338: 3,
0.146261: 4,
0.173402: 5,
0.206478: 6,
0.242038: 7,
0.292751: 8,
0.363527: 9,
}
*/
if err != nil {
fmt.Println("err", err)
return 0, err
}
for k, i := range score {
fmt.Println(k, i)
}
if len(score) >= 1 {
return int(score[0].MeanValue), nil
}
return 0, nil
}
|
package main
//编译成LINUX下面的软件
//set GOOS=linux
//set GOARCH=amd64
//set CGO_ENABLED=0
//go install
//go build //1.8
//version
//bee v1.6.2
//beego v1.7.2
//go v1.6.2
import (
"openvpn/models"
_ "openvpn/routers"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/session"
)
var globalSessions *session.Manager
func init() {
models.RegisterDB()
// globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid", "enableSetCookie,omitempty": true, "gclifetime":3600, "maxLifetime": 3600, "secure": false, "sessionIDHashFunc": "sha1", "sessionIDHashKey": "", "cookieLifeTime": 3600, "providerConfig": ""}`)
// go globalSessions.GC()
}
func main() {
orm.Debug = true
orm.RunSyncdb("default", false, true)
beego.BConfig.WebConfig.Session.SessionOn = true
beego.Run()
}
|
package host
// Enable marks a profile as enable by uncommenting all hosts lines
// making the routing work again.
func Enable(dst, profile string) error {
h, err := getHostData(dst, profile)
if err != nil {
return err
}
if profile == "" {
for p := range h.profiles {
if p != "default" {
enableProfile(h, p)
}
}
} else {
enableProfile(h, profile)
}
return writeHostData(dst, h)
}
func enableProfile(h *hostFile, profile string) {
for i, r := range h.profiles[profile] {
if IsDisabled(r) {
h.profiles[profile][i] = EnableLine(r)
}
}
}
|
package user
import (
"fmt"
"github.com/gin-gonic/gin"
"lhc.go.game.center/model"
"net/http"
)
func GetList(c *gin.Context) {
page := model.NewPage()
if err:=c.ShouldBind(&page);err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
fmt.Println(1)
fmt.Printf("%#v\n",page)
fmt.Println(2)
if page.Length == 0 {
page.Length = 10
}
user := model.NewUser()
if err:=c.ShouldBind(&user);err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
data, total, err := user.GetList(page)
if err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
fmt.Printf("%#v\n",data)
c.JSON(http.StatusOK,gin.H{"code":200,"data":data,"total":total})
}
func UpdateData(c *gin.Context){
params := model.NewUser()
if err:=c.ShouldBind(¶ms);err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
if err:=params.UpdateData();err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
c.JSON(http.StatusOK,gin.H{"code":200,"msg":"操作成功","url":"index"})
}
func Get(c *gin.Context){
params := model.NewUser()
if err:=c.ShouldBind(¶ms);err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error()})
return
}
if err:=params.GetOne();err!=nil {
c.JSON(http.StatusOK,gin.H{"code":400,"msg":err.Error(),"data":nil})
return
}
c.JSON(http.StatusOK,gin.H{"code":200,"msg":"查询成功","data":params})
}
|
package launcher_test
import (
"context"
"math/rand"
"sync"
"testing"
"github.com/go-rod/rod"
"github.com/go-rod/rod/lib/cdp"
"github.com/go-rod/rod/lib/launcher"
"github.com/go-rod/rod/lib/utils"
"github.com/ysmood/got"
)
func BenchmarkManager(b *testing.B) {
const concurrent = 30 // how many browsers will run at the same time
const num = 300 // how many browsers we will launch
limiter := make(chan int, concurrent)
s := got.New(b).Serve()
// docker run --rm -p 7317:7317 ghcr.io/go-rod/rod
s.HostURL.Host = "host.docker.internal"
s.Route("/", ".html", `<html><body>
ok
</body><script>
function wait() {
return new Promise(r => setTimeout(r, 1000 * Math.random()))
}
</script></html>`)
wg := &sync.WaitGroup{}
wg.Add(num)
for i := 0; i < num; i++ {
limiter <- 0
go func() {
utils.Sleep(rand.Float64())
ctx, cancel := context.WithCancel(context.Background())
defer func() {
go func() {
utils.Sleep(2)
cancel()
}()
}()
l := launcher.MustNewManaged("")
u, h := l.ClientHeader()
browser := rod.New().Client(cdp.MustStartWithURL(ctx, u, h)).MustConnect()
page := browser.MustPage()
wait := page.MustWaitNavigation()
page.MustNavigate(s.URL())
wait()
page.MustEval(`wait()`)
if rand.Int()%10 == 0 {
// 10% we will drop the websocket connection without call the api to gracefully close the browser
cancel()
} else {
browser.MustClose()
}
wg.Done()
<-limiter
}()
}
wg.Wait()
}
|
package dbModel
//
// defines your Database Models at here.
// reference: https://github.com/oceanho/gw/wiki/Scaffold-Guides#3-dbmodelxxgo
//
|
package generator
// TODO refactor to reuse common code
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/brainicorn/skelp/skelplate"
"github.com/brainicorn/skelp/skelputil"
)
var (
readmeFmtRepo = "README.md"
projectFmtRepo = "%s.md"
packageFmtRepo = "%s/%s.go"
projectNameRepo = "repogen"
newProjectNameRepo = "newrepogen"
packageNameRepo = "repopack"
readmeExpectedRepo = "## " + projectNameRepo + " by brainicorn"
newReadmeExpectedRepo = "## " + newProjectNameRepo + " by brainicorn"
projectExpectedRepo = projectNameRepo + " contains package " + packageNameRepo
packageExpectedRepo = "package " + packageNameRepo
)
func optionsForRepoTests() SkelpOptions {
outDir, _ := ioutil.TempDir("", "skelp-repogen-test")
homeDir, _ := ioutil.TempDir("", "skelp-custom-home")
opts := DefaultOptions()
opts.HomeDirOverride = homeDir
opts.OutputDir = outDir
return opts
}
func cleanOptions(opts SkelpOptions) {
os.RemoveAll(opts.HomeDirOverride)
os.RemoveAll(opts.OutputDir)
}
func TestRepoGenSimple(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
tmpDir := opts.OutputDir
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template.git", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
readmePath := filepath.Join(tmpDir, readmeFmtRepo)
projectPath := filepath.Join(tmpDir, fmt.Sprintf(projectFmtRepo, projectNameRepo))
packagePath := filepath.Join(tmpDir, fmt.Sprintf(packageFmtRepo, packageNameRepo, packageNameRepo))
readme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
prjfile, err := ioutil.ReadFile(projectPath)
if err != nil {
t.Errorf("can't open out file (%s): %s", projectPath, err)
}
pkgfile, err := ioutil.ReadFile(packagePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", packagePath, err)
}
if string(readme) != readmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(readme), readmeExpectedRepo)
}
if string(prjfile) != projectExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(prjfile), projectExpectedRepo)
}
if string(pkgfile) != packageExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(pkgfile), packageExpectedRepo)
}
}
func TestRepoGenCWD(t *testing.T) {
origCWD, _ := os.Getwd()
tmpDir, _ := ioutil.TempDir("", "skelp-repogen-test")
defer os.RemoveAll(tmpDir)
homeDir, _ := ioutil.TempDir("", "skelp-custom-home")
defer os.RemoveAll(homeDir)
os.Chdir(tmpDir)
defer os.Chdir(origCWD)
opts := DefaultOptions()
opts.HomeDirOverride = homeDir
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
readmePath := filepath.Join(tmpDir, readmeFmtRepo)
projectPath := filepath.Join(tmpDir, fmt.Sprintf(projectFmtRepo, projectNameRepo))
packagePath := filepath.Join(tmpDir, fmt.Sprintf(packageFmtRepo, packageNameRepo, packageNameRepo))
readme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
prjfile, err := ioutil.ReadFile(projectPath)
if err != nil {
t.Errorf("can't open out file (%s): %s", projectPath, err)
}
pkgfile, err := ioutil.ReadFile(packagePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", packagePath, err)
}
if string(readme) != readmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(readme), readmeExpectedRepo)
}
if string(prjfile) != projectExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(prjfile), projectExpectedRepo)
}
if string(pkgfile) != packageExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(pkgfile), packageExpectedRepo)
}
}
func TestRepoNotFound(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("git@github.com:brainicorn/does-not-exist", dp.DataProviderFunc)
if err == nil || !strings.HasPrefix(err.Error(), "repository not found") {
t.Errorf("wrong error: have (%s), want (%s)", err, "repository not found")
}
}
func TestRepoTemplatesFolderNotFound(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("../testdata/generator/notmplfolder", dp.DataProviderFunc)
if err == nil || !strings.HasPrefix(err.Error(), "Skelp templates dir not found") {
t.Errorf("wrong error: have (%s), want (%s)", err, "Skelp templates dir not found")
}
}
func TestRepoGenBadTmpl(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("../testdata/generator/badtmpl", dp.DataProviderFunc)
if err == nil {
t.Error("expected error but was nil")
}
}
func TestRepoMissingDescriptor(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("../testdata/generator/nodescriptor", dp.DataProviderFunc)
if err == nil || !strings.HasPrefix(err.Error(), "skelp.json not found:") {
t.Errorf("wrong error: have (%s), want (%s)", err, "skelp.json not found:")
}
}
func TestRepoNoOverwrite(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
tmpDir := opts.OutputDir
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
readmePath := filepath.Join(tmpDir, readmeFmtRepo)
readme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
if string(readme) != readmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(readme), readmeExpectedRepo)
}
// run again with different data
newData := map[string]interface{}{"projectName": newProjectNameRepo, "packageName": packageNameRepo}
newDP := skelplate.NewDataProvider(newData)
err = gen.Generate("https://github.com/brainicorn/skelp-test-template", newDP.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
newReadme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
if string(newReadme) != readmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(newReadme), readmeExpectedRepo)
}
}
func TestRepoOverwrite(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
tmpDir := opts.OutputDir
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
readmePath := filepath.Join(tmpDir, readmeFmtRepo)
readme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
if string(readme) != readmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(readme), readmeExpectedRepo)
}
// run again with different data
newData := map[string]interface{}{"projectName": newProjectNameRepo, "packageName": packageNameRepo}
newDP := skelplate.NewDataProvider(newData)
opts.OverwriteProvider = func(rootDir, relFile string) bool { return true }
gen.skelpOptions = opts
err = gen.Generate("https://github.com/brainicorn/skelp-test-template", newDP.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
newReadme, err := ioutil.ReadFile(readmePath)
if err != nil {
t.Errorf("can't open out file (%s): %s", readmePath, err)
}
if string(newReadme) != newReadmeExpectedRepo {
t.Errorf("contents don't match, have (%s), want (%s)", string(newReadme), newReadmeExpectedRepo)
}
}
func TestRepoNoDownloadNoCache(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
opts.Download = false
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err == nil || !strings.HasPrefix(err.Error(), "Cached template not found and downloads are turned off:") {
t.Errorf("wrong error: have (%s), want (%s)", err, "Cached template not found and downloads are turned off:")
}
}
func TestRepoNoDownload(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
opts.Download = false
gen2 := New(opts)
err = gen2.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("cached generation error: %s", err)
}
}
func TestReoSkelpDirOverride(t *testing.T) {
opts := optionsForRepoTests()
defer cleanOptions(opts)
skelpDir, _ := ioutil.TempDir("", "custom-skelp-dir")
defer os.RemoveAll(skelpDir)
opts.SkelpDirOverride = skelpDir
gen := New(opts)
defData := map[string]interface{}{"projectName": projectNameRepo, "packageName": packageNameRepo}
dp := skelplate.NewDataProvider(defData)
err := gen.Generate("https://github.com/brainicorn/skelp-test-template", dp.DataProviderFunc)
if err != nil {
t.Errorf("generation error: %s", err)
}
readmeCachePath := filepath.Join(opts.HomeDirOverride, skelpDir, "gitcache", "github.com", "brainicorn", "skelp-test-template", readmeFmtRepo)
if !skelputil.PathExists(readmeCachePath) {
t.Errorf("cached readme not found: %s", readmeCachePath)
}
}
|
package utils
import (
"bytes"
"net/url"
"sort"
)
// KeySet 得到map的key集合
func KeySet(dict map[string]string) []string {
s := make([]string, 0, len(dict))
for k, _ := range dict {
s = append(s, k)
}
return s
}
// BuildQuery 建立带参数URL
func BuildQuery(dict map[string]string) (val url.Values) {
val = url.Values{}
for k, v := range dict {
val.Set(k, v)
}
return val
}
// PutAll 往map里放map
func PutAll(dest, src map[string]string) {
for k, v := range src {
dest[k] = v
}
}
// PrepareContent 准备请求的报文
// 按照字典排序
func PrepareContent(dict map[string]string) string {
s := make([]string, 0, len(dict))
for k, _ := range dict {
s = append(s, k)
}
// 排序
sort.Strings(s)
var buf bytes.Buffer
for _, v := range s {
param := dict[v]
// 过滤掉空的key
if param != "" {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(v + "=" + param)
}
}
return buf.String()
}
|
package album
import (
"encoding/json"
"io"
"io/ioutil"
"log"
"net/http"
"strings"
"gopkg.in/matryer/respond.v1"
"github.com/gorilla/mux"
"github.com/dgrijalva/jwt-go"
// "github.com/gorilla/context"
)
//Controller ...
type Controller struct {
Repository Repository
}
// Index GET /
func (c *Controller) Index(w http.ResponseWriter, r *http.Request) {
respond.With(w, r, http.StatusOK, "welcome to the era of golang")
}
//GET albums
func (c *Controller) GetAlbums(w http.ResponseWriter, r *http.Request) {
authUser := r.Context().Value("authUser")
user := authUser.(jwt.MapClaims)["user"]
business := authUser.(jwt.MapClaims)["business"]
log.Println("user_name===>",user.(map[string]interface{})["DisplayName"])
log.Println("business_name===>",business.(map[string]interface{})["business_name"])
albums := c.Repository.GetAlbums() // list of all albums
respond.With(w, r, http.StatusOK, albums)
}
//Get Album
func (c *Controller) GetAlbum(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["id"] // param id
album := c.Repository.GetAlbum(id)
log.Println(album)
respond.With(w, r, http.StatusOK, album)
}
// AddAlbum POST /
func (c *Controller) AddAlbum(w http.ResponseWriter, r *http.Request) {
var album Album
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) // read the body of the request
if err != nil {
log.Fatalln("Error AddAlbum", err)
respond.With(w, r, http.StatusInternalServerError, err)
}
if err := r.Body.Close(); err != nil {
log.Fatalln("Error AddAlbum", err)
}
if err := json.Unmarshal(body, &album); err != nil { // unmarshall body contents as a type Candidate
respond.With(w, r, 422, err)
if err := json.NewEncoder(w).Encode(err); err != nil {
log.Fatalln("Error AddAlbum unmarshalling data", err)
respond.With(w, r, http.StatusInternalServerError, err)
}
}
success := c.Repository.AddAlbum(album) // adds the album to the DB
if !success {
respond.With(w, r, http.StatusInternalServerError, err)
}
respond.With(w, r, http.StatusCreated, album)
}
// UpdateAlbum PUT /
func (c *Controller) UpdateAlbum(w http.ResponseWriter, r *http.Request) {
var album Album
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) // read the body of the request
if err != nil {
log.Fatalln("Error UpdateAlbum", err)
respond.With(w, r, http.StatusInternalServerError, err)
}
if err := r.Body.Close(); err != nil {
log.Fatalln("Error AddaUpdateAlbumlbum", err)
}
if err := json.Unmarshal(body, &album); err != nil { // unmarshall body contents as a type Candidate
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
respond.With(w, r, 422, err) // unprocessable entity
if err := json.NewEncoder(w).Encode(err); err != nil {
log.Fatalln("Error UpdateAlbum unmarshalling data", err)
respond.With(w, r, http.StatusBadRequest, err)
}
}
success := c.Repository.UpdateAlbum(album) // updates the album in the DB
if !success {
respond.With(w, r, http.StatusInternalServerError, err)
}
respond.With(w, r, http.StatusOK, album)
}
// DeleteAlbum DELETE /
func (c *Controller) DeleteAlbum(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["id"] // param id
if err := c.Repository.DeleteAlbum(id); err != "" { // delete a album by id
if strings.Contains(err, "404") {
respond.With(w, r, http.StatusNotFound, err)
} else if strings.Contains(err, "500") {
respond.With(w, r, http.StatusInternalServerError, err)
}
}
respond.With(w, r, http.StatusOK, "")
}
|
package database
import (
"ambassador/src/models"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
var DB *gorm.DB
func Connect() {
var err error
dsn := "root:root@tcp(db:3306)/ambassador?charset=utf8mb4&parseTime=True&loc=Local"
DB, err = gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
panic("Could not connect with the database!")
}
}
func AutoMigrate() {
DB.AutoMigrate(models.User{})
}
|
package shell
import (
"encoding/json"
"errors"
"sync"
"time"
)
// WindowEventCallback - function signature for window callbacks.
type WindowEventCallback func([]byte)
// WindowOnCloseCallback - function called when window is closed.
type WindowOnCloseCallback func()
// Window handle to an electron window.
type Window struct {
WindowID int
electron *Electron
listeners map[string]WindowEventCallback
closedCallback WindowOnCloseCallback
sync.RWMutex
}
// WindowOptions - creation options for creating a window with various properties.
type WindowOptions struct {
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
X int `json:"x,omitempty"`
Y int `json:"y,omitempty"`
UseContentSize bool `json:"useContentSize"`
Center bool `json:"center"`
MinWidth int `json:"minWidth,omitempty"`
MinHeight int `json:"minHeight,omitempty"`
MaxWidth int `json:"maxWidth,omitempty"`
MaxHeight int `json:"maxHeight,omitempty"`
Resizable bool `json:"resizable"`
AlwaysOnTop bool `json:"alwaysOnTop"`
Fullscreen bool `json:"fullscreen"`
SkipTaskbar bool `json:"skipTaskbar"`
Kiosk bool `json:"kiosk"`
Title string `json:"title,omitempty"`
Show bool `json:"show"`
Frame bool `json:"frame"`
AcceptFirstMouse bool `json:"acceptFirstMouse"`
BackgroundColor string `json:"backgroundColor,omitempty"`
}
// windowIDCommand - used for the many messages that involve only a WindowID
type windowIDCommand struct {
WindowID int
}
// CreateWindow - creates a window on the remote shell. Takes a WindowOptions which
// loosely maps to Electron's window options.
func (e *Electron) CreateWindow(options WindowOptions) (*Window, error) {
// convert WindowOptions to JSON and send the command to the remote shell.
jsonData, err := json.Marshal(options)
if nil != err {
return nil, err
}
e.Command("window_create", jsonData)
// wait for a response from the remote shell for 5 seconds or timeout.
var response []byte
select {
case r := <-windowCreationResponses:
response = r
case <-time.After(time.Second * 5):
return nil, errors.New("WindowCreate timed out.")
}
// turn the JSON data into a WindowID.
window := Window{electron: e, listeners: make(map[string]WindowEventCallback)}
err = json.Unmarshal(response, &window)
if nil != err {
return nil, err
}
// lock electron so that the active window map can be written to.
e.Lock()
defer e.Unlock()
// store the window.
e.activeWindows[window.WindowID] = &window
return &window, nil
}
// Close - used to shutdown a window.
func (w *Window) Close() {
// lock electron so that the active window map can be written to.
w.electron.Lock()
defer w.electron.Unlock()
// remove this from the active window list.
delete(w.electron.activeWindows, w.WindowID)
wID := windowIDCommand{w.WindowID}
jsonData, _ := json.Marshal(wID)
w.electron.Command("window_close", jsonData)
}
// Here are the channels for synchronous window ops
var windowChannelsInitialized bool
var windowCreationResponses chan []byte
var windowLoadResponses chan []byte
// Here are the callbacks that pipe to channels for synchronous window ops.
func windowCreationCallback(data []byte) {
windowCreationResponses <- data
}
func windowLoadCompletionCallback(data []byte) {
windowLoadResponses <- data
}
type windowListenCallbackPartial struct {
WindowID int
MessageID string
Message json.RawMessage
}
func (e *Electron) windowListenCallback(data []byte) {
e.RLock()
defer e.RUnlock()
// pull the partial message out of data
partialData := windowListenCallbackPartial{}
err := json.Unmarshal(data, &partialData)
if nil != err {
return
}
// pass the data on to the callback.
if key, ok := e.activeWindows[partialData.WindowID]; ok && nil != key {
key.RLock()
defer key.RUnlock()
if key1, ok1 := key.listeners[partialData.MessageID]; ok1 && nil != key1 {
go key1(partialData.Message)
}
}
}
func (e *Electron) windowClosedCallback(data []byte) {
e.Lock()
defer e.Unlock()
wID := windowIDCommand{}
err := json.Unmarshal(data, &wID)
if nil != err {
return
}
if key, ok := e.activeWindows[wID.WindowID]; ok && nil != key {
if nil != key.closedCallback {
go key.closedCallback()
}
delete(e.activeWindows, wID.WindowID)
}
}
// InitializeWindowCallbacks - sets up callbacks and channels for synchronous window operations.
func InitializeWindowCallbacks(electron *Electron) {
if !windowChannelsInitialized {
windowChannelsInitialized = true
windowCreationResponses = make(chan []byte)
windowLoadResponses = make(chan []byte)
}
electron.Listen("window_create_response", windowCreationCallback)
electron.Listen("window_load_complete", windowLoadCompletionCallback)
electron.Listen("window_get_subscribed_message", electron.windowListenCallback)
electron.Listen("window_closed", electron.windowClosedCallback)
}
type loadURLCommand struct {
WindowID int
URL string
}
// LoadURL - Commands the window to load a URL.
func (w *Window) LoadURL(location string) error {
// create the JSON for the loadURL command.
c := loadURLCommand{WindowID: w.WindowID, URL: location}
jsonData, err := json.Marshal(c)
if nil != err {
return err
}
w.electron.Command("window_load_url", jsonData)
select {
case <-windowLoadResponses:
return nil
case <-time.After(time.Second * 30):
return errors.New("LoadURL timed out.")
}
}
// listenToMessageCommand - struct that describes the JSON that will be seent to
// subscribee to messages.
type listenToMessageCommand struct {
WindowID int
MessageID string
}
// Listen - register's a callback on the WINDOW CONTENT (webpage displayed by Electron)
func (w *Window) Listen(messageID string, callback WindowEventCallback) error {
w.Lock()
defer w.Unlock()
// create the listen command JSON string.
listenCommand := listenToMessageCommand{w.WindowID, messageID}
jsonCommand, err := json.Marshal(listenCommand)
if nil != err {
return err
}
// save the callback.
w.listeners[messageID] = callback
// subscribe
w.electron.Command("window_subscribe_message", jsonCommand)
return nil
}
// sendMessageCommand - filled to send a message to the WINDOW CONTENT.
type sendMessageCommand struct {
WindowID int
MessageID string
Message string
}
// Message - send a message to the WINDOW CONTENT (webpage displayed by Electron).
func (w *Window) Message(messageID string, message []byte) error {
// create the command.
messageCommand := sendMessageCommand{w.WindowID, messageID, string(message)}
jsonCommand, err := json.Marshal(messageCommand)
if nil != err {
return err
}
w.electron.Command("window_send_message", jsonCommand)
return nil
}
// OpenDevTools - opens a developer console on the window in question.
func (w *Window) OpenDevTools() {
commandData := windowIDCommand{w.WindowID}
jsonCommandData, _ := json.Marshal(commandData)
w.electron.Command("window_open_dev_tools", jsonCommandData)
}
// CloseDevTools - closes developer tools on the window in question.
func (w *Window) CloseDevTools() {
commandData := windowIDCommand{w.WindowID}
jsonCommandData, _ := json.Marshal(commandData)
w.electron.Command("window_close_dev_tools", jsonCommandData)
}
// OnClosed - called when the window is closed, either by ELECTRON or the user.
func (w *Window) OnClosed(callback WindowOnCloseCallback) {
w.closedCallback = callback
}
|
package db
import (
"gopkg.in/mgo.v2"
"log"
"os"
"github.com/antholord/poe-ML-indexer/api"
)
var dbString = os.Getenv("db")
type DB struct {
Session *mgo.Session
ScTempColl *mgo.Collection
}
func Connect() *DB{
if (dbString == ""){//dbString = "mongodb://test:test@ds123371.mlab.com:23371/heroku_lnc7sl64"
dbString = "mongodb://woned:w0n3dp455 @34.209.73.153:27017/test"
}
s, err := mgo.Dial(dbString)
if err != nil {
panic(err)
}
coll := s.DB("heroku_lnc7sl64").C("Legacy")
return &DB{Session : s, ScTempColl : coll}
}
func (DB *DB)BulkInsert(itemSlice []*api.Item){
if (len(itemSlice) == 0) {return}
bulk := DB.ScTempColl.Bulk()
bulk.Unordered()
for _, v := range itemSlice {
bulk.Insert(v)
}
_, err := bulk.Run()
if err != nil {
log.Fatal(err)
}
}
|
package db
import (
"github.com/RainerGevers/tasker/db/migrations"
"github.com/RainerGevers/tasker/models"
"gorm.io/gorm"
"log"
)
func RunMigrations(db *gorm.DB) {
_ = db.AutoMigrate(&models.Version{}, &models.User{})
var versions []models.Version
dbVersions := db.Select("version").Find(&versions)
if dbVersions.Error != nil {
log.Fatal(dbVersions.Error)
}
// TODO: Pluck https://v2.gorm.io/docs/advanced_query.html
var versionNumbers []string
for _, version := range versions {
versionNumbers = append(versionNumbers, version.Version)
}
versionsToMigrate := difference(availableMigrations(), versionNumbers)
if len(versionsToMigrate) == 0 {
return
}
for _, versionToMigrate := range versionsToMigrate {
switch versionToMigrate {
case "13082020175800":
migrations.CreateSessionsTable13082020175800(db)
case "13082020205200":
migrations.AddRefreshToSessionsTable13082020205200(db)
}
}
}
func availableMigrations() []string {
return []string{
"13082020175800",
"13082020205200",
}
}
func difference(slice1 []string, slice2 []string) []string {
var diff []string
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
return diff
}
|
package main
import (
"bytes"
"context"
"errors"
"fmt"
blocks "github.com/ipfs/go-block-format"
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipld/go-ipld-prime"
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
"github.com/ipld/go-ipld-prime/multicodec"
"github.com/ipld/go-ipld-prime/node/basicnode"
"github.com/kenlabs/pando/pkg/util/log"
"io"
"time"
)
// decodeIPLDNode decodes an ipld.Node from bytes read from an io.Reader.
func decodeIPLDNode(codec uint64, r io.Reader, prototype ipld.NodePrototype) (ipld.Node, error) {
// NOTE: Considering using the schema prototypes. This was failing, using
// a map gives flexibility. Maybe is worth revisiting this again in the
// future.
nb := prototype.NewBuilder()
decoder, err := multicodec.LookupDecoder(codec)
if err != nil {
return nil, err
}
err = decoder(nb, r)
if err != nil {
return nil, err
}
return nb.Build(), nil
}
func isMetadata(n ipld.Node) bool {
signature, _ := n.LookupByString("Signature")
provider, _ := n.LookupByString("Provider")
payload, _ := n.LookupByString("Payload")
return signature != nil && provider != nil && payload != nil
}
func MkLinkSystem(bs blockstore.Blockstore, ch chan Status) ipld.LinkSystem {
logger := log.NewSubsystemLogger()
lsys := cidlink.DefaultLinkSystem()
lsys.TrustedStorage = true
lsys.StorageReadOpener = func(lnkCtx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) {
asCidLink, ok := lnk.(cidlink.Link)
if !ok {
return nil, fmt.Errorf("unsupported link types")
}
block, err := bs.Get(lnkCtx.Ctx, asCidLink.Cid)
if err != nil {
return nil, err
}
return bytes.NewBuffer(block.RawData()), nil
}
lsys.StorageWriteOpener = func(lctx ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) {
buf := bytes.NewBuffer(nil)
return buf, func(lnk ipld.Link) error {
c := lnk.(cidlink.Link).Cid
codec := lnk.(cidlink.Link).Prefix().Codec
origBuf := buf.Bytes()
log := logger.With("cid", c)
// Decode the node to check its type.
n, err := decodeIPLDNode(codec, buf, basicnode.Prototype.Any)
if err != nil {
log.Errorw("Error decoding IPLD node in linksystem", "err", err)
return errors.New("bad ipld data")
}
//fmt.Println("Reveiving ipld node:")
t, err := UnwrapFinishedTask(n)
if err == nil {
//dagjson.Encode(n, os.Stdout)
//fmt.Println(t.Status)
if ch != nil {
go func() {
ctx, cncl := context.WithTimeout(context.Background(), time.Second*3)
defer cncl()
select {
case _ = <-ctx.Done():
log.Errorf("time out for send status info")
return
case ch <- t.Status:
}
}()
}
} else {
log.Debugf("not FinishedTask, ignore...")
}
//dagjson.Encode(n, os.Stdout)
if isMetadata(n) {
log.Infow("Received metadata")
// todo: how to deal different signature version
//_, peerid, err := verifyMetadata(n)
//if err != nil {
// return err
//}
block, err := blocks.NewBlockWithCid(origBuf, c)
if err != nil {
return err
}
return bs.Put(lctx.Ctx, block)
}
block, err := blocks.NewBlockWithCid(origBuf, c)
if err != nil {
return err
}
log.Debugf("Received unexpected IPLD node, cid: %s", c.String())
return bs.Put(lctx.Ctx, block)
}, nil
}
return lsys
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.