text stringlengths 11 4.05M |
|---|
package main
import (
"github.com/garyburd/redigo/redis"
)
var (
RedisAddr = ":6379"
keyspacePrefix = "__keyspace@0__:"
workerKeyPrefix = "test/"
)
func NewConn() (redis.Conn, error) {
return redis.Dial("tcp", RedisAddr)
}
|
package main
import(
"fmt"
)
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
left := 0
right := len(nums) - 1
mid := -1
for ;left<=right; {
mid = left + (right + 1 - left) /2
if nums[mid] == target {
return mid
}
if nums[left] <= nums[mid] {
if nums[left] <= target && target < nums[mid] {
right = mid - 1
} else {
left = mid + 1
}
} else {
if nums[mid] < target && target <= nums[right] {
left = mid + 1
} else {
right = mid - 1
}
}
}
return -1
}
func main() {
nums := []int{4,5,6,7,0,1,2}
fmt.Println(search(nums, 7))
}
|
package main
import (
"./models"
"database/sql"
"flag"
"fmt"
_ "github.com/go-sql-driver/mysql"
"log"
)
var (
schemaFilePath string
driver string
dbUser string
dbPwd string
dbHost string
dbPort int
drop = true
operation string
)
func init() {
flag.StringVar(&schemaFilePath, "schema", "schema.xml", "The schema xml file path")
flag.StringVar(&driver, "driver", "mysql", "The driver of the database")
flag.StringVar(&dbUser, "user", "", "Database user name")
flag.StringVar(&dbPwd, "pwd", "", "The password of the database user")
flag.StringVar(&dbHost, "host", "127.0.0.1", "The database host name")
flag.IntVar(&dbPort, "port", 3306, "The port for the database")
flag.StringVar(&operation, "op", "show", "The operation to do, can be: show, populate, devsql, gostruct")
flag.Parse()
}
func main() {
dbSchema, err := models.ParseSchema(schemaFilePath)
if err != nil {
log.Println("Failed to parse schema", err)
return
}
switch operation {
case "populate":
PopulateTables(dbSchema)
case "show":
ShowTableInfo(dbSchema)
case "devsql":
ShowDevSql(dbSchema)
case "gostruct":
ShowGoStruct(dbSchema)
default:
log.Println("Unknown operation:", operation)
}
}
func ShowDevSql(dbSchema models.DBSchema) {
for index, table := range dbSchema.Tables {
if index > 0 {
fmt.Println()
}
fmt.Println("========== " + table.Name + " ==========")
sql, err := table.GetSelectSQL()
if err != nil {
log.Println("Failed to get select sql.", err)
} else {
fmt.Printf("Select SQL:\n\x1b[31;1m%s\x1b[0m\n", sql)
}
sql, err = table.GetInsertSQL()
if err != nil {
log.Println("Failed to get insert sql.", err)
} else {
fmt.Printf("Insert SQL:\n\x1b[31;1m%s\x1b[0m\n", sql)
}
sql, err = table.GetUpdateSQL()
if err != nil {
log.Println("Failed to get update sql.", err)
} else {
fmt.Printf("Update SQL:\n\x1b[31;1m%s\x1b[0m\n", sql)
}
}
}
func ShowGoStruct(dbSchema models.DBSchema) {
for index, table := range dbSchema.Tables {
if index > 0 {
fmt.Println()
}
fmt.Println("// model for table " + table.Name)
result, err := table.GetGoStruct()
if err != nil {
log.Println("Failed to get go struct.", err)
} else {
fmt.Printf("\x1b[31;1m%s\x1b[0m\n", result)
}
}
}
func ShowTableInfo(dbSchema models.DBSchema) {
for _, table := range dbSchema.Tables {
sql, err := table.GetDropSQL()
if err != nil {
log.Println("Failed to generate drop sql.", err)
continue
}
fmt.Printf("Drop sql:\n\x1b[31;1m%s\x1b[0m\n\n", sql)
sql, err = table.GetCreateSQL()
if err != nil {
log.Println("Generate create sql failed.", err)
continue
}
fmt.Printf("Create sql:\n\x1b[31;1m%s\x1b[0m\n\n", sql)
}
}
func PopulateTables(dbSchema models.DBSchema) {
if dbSchema.Name == "" || dbUser == "" || dbHost == "" || dbPwd == "" {
log.Println("The db name, user, host or password is not specified.")
return
}
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", dbUser, dbPwd, dbHost, dbPort, dbSchema.Name)
log.Println("The data source name is", dsn)
db, err := sql.Open(driver, dsn)
if err != nil {
log.Println("Connect database failed.")
return
}
defer db.Close()
for _, table := range dbSchema.Tables {
if drop {
sql, err := table.GetDropSQL()
if err != nil {
log.Println("Failed to generate drop sql.", err)
continue
}
log.Println("Execute sql:\n" + sql)
_, err = db.Exec(sql)
if err != nil {
log.Println("Drop table failed.", err)
log.Println("We will try to create table.")
}
}
sql, err := table.GetCreateSQL()
if err != nil {
log.Println("Generate create sql failed.", err)
continue
}
log.Println("Execute sql:\n" + sql)
_, err = db.Exec(sql)
if err != nil {
log.Println("Execute sql "+sql+" Failed", err)
return
}
}
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
fmt.Println(generate(12, "気温", 22.4))
}
func generate(x int, y string, z float64) string {
xs := strconv.Itoa(x)
zs := strconv.FormatFloat(z, 'f', 1, 64)
return xs + "時の" + y + "は" + zs
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build linux
package osidm
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"syscall"
"github.com/avfs/avfs"
)
// To avoid flaky tests when executing commands or making system calls as root,
// the current goroutine is locked to the operating system thread just before calling the function.
// For details see https://github.com/golang/go/issues/1435
// GroupAdd adds a new group.
func (idm *OsIdm) GroupAdd(name string) (avfs.GroupReader, error) {
if !idm.initUser.IsRoot() || !idm.CurrentUser().IsRoot() {
return nil, avfs.ErrPermDenied
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
cmd := exec.Command("groupadd", name)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
errStr := strings.TrimSpace(stderr.String())
switch {
case errStr == "groupadd: group '"+name+"' already exists":
return nil, avfs.AlreadyExistsGroupError(name)
default:
return nil, avfs.UnknownError(err.Error() + errStr)
}
}
g, err := idm.LookupGroup(name)
if err != nil {
return nil, err
}
return g, nil
}
// GroupDel deletes an existing group.
func (idm *OsIdm) GroupDel(name string) error {
if !idm.initUser.IsRoot() || !idm.CurrentUser().IsRoot() {
return avfs.ErrPermDenied
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
cmd := exec.Command("groupdel", name)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
errStr := strings.TrimSpace(stderr.String())
switch {
case errStr == "groupdel: group '"+name+"' does not exist":
return avfs.UnknownGroupError(name)
default:
return avfs.UnknownError(err.Error() + errStr)
}
}
return nil
}
// LookupGroup looks up a group by name. If the group cannot be found, the
// returned error is of type UnknownGroupError.
func (idm *OsIdm) LookupGroup(name string) (avfs.GroupReader, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
return lookupGroup(name)
}
// LookupGroupId looks up a group by groupid. If the group cannot be found, the
// returned error is of type UnknownGroupIdError.
func (idm *OsIdm) LookupGroupId(gid int) (avfs.GroupReader, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
return lookupGroupId(gid)
}
// LookupUser looks up a user by username. If the user cannot be found, the
// returned error is of type UnknownUserError.
func (idm *OsIdm) LookupUser(name string) (avfs.UserReader, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
return lookupUser(name)
}
// LookupUserId looks up a user by userid. If the user cannot be found, the
// returned error is of type UnknownUserIdError.
func (idm *OsIdm) LookupUserId(uid int) (avfs.UserReader, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
return lookupUserId(uid)
}
// User sets the current user of the file system to uid.
// If the current user has not root privileges avfs.errPermDenied is returned.
func (idm *OsIdm) User(name string) (avfs.UserReader, error) {
const op = "user"
if !idm.initUser.IsRoot() {
return nil, avfs.ErrPermDenied
}
u, err := lookupUser(name)
if err != nil {
return nil, err
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// If the current user is the target user there is nothing to do.
curUid := syscall.Geteuid()
if curUid == u.uid {
return u, nil
}
runtime.LockOSThread()
curGid := syscall.Getegid()
// If the current user is not root, root privileges must be restored
// before setting the new uid and gid.
if curGid != 0 {
runtime.LockOSThread()
if err := syscall.Setresgid(0, 0, 0); err != nil {
return nil, avfs.UnknownError(fmt.Sprintf("%s : can't change gid to %d : %v", op, 0, err))
}
}
if curUid != 0 {
runtime.LockOSThread()
if err := syscall.Setresuid(0, 0, 0); err != nil {
return nil, avfs.UnknownError(fmt.Sprintf("%s : can't change uid to %d : %v", op, 0, err))
}
}
if u.uid == 0 {
return u, nil
}
runtime.LockOSThread()
if err := syscall.Setresgid(u.gid, u.gid, 0); err != nil {
return nil, avfs.UnknownError(fmt.Sprintf("%s : can't change gid to %d : %v", op, u.gid, err))
}
runtime.LockOSThread()
if err := syscall.Setresuid(u.uid, u.uid, 0); err != nil {
return nil, avfs.UnknownError(fmt.Sprintf("%s : can't change uid to %d : %v", op, u.uid, err))
}
return u, nil
}
// UserAdd adds a new user.
func (idm *OsIdm) UserAdd(name, groupName string) (avfs.UserReader, error) {
if !idm.initUser.IsRoot() || !idm.CurrentUser().IsRoot() {
return nil, avfs.ErrPermDenied
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
cmd := exec.Command("useradd", "-M", "-g", groupName, name)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
errStr := strings.TrimSpace(stderr.String())
switch {
case errStr == "useradd: user '"+name+"' already exists":
return nil, avfs.AlreadyExistsUserError(name)
case errStr == "useradd: group '"+groupName+"' does not exist":
return nil, avfs.UnknownGroupError(groupName)
default:
return nil, avfs.UnknownError(err.Error() + errStr)
}
}
u, err := lookupUser(name)
if err != nil {
return nil, err
}
return u, nil
}
// UserDel deletes an existing user.
func (idm *OsIdm) UserDel(name string) error {
if !idm.initUser.IsRoot() || !idm.CurrentUser().IsRoot() {
return avfs.ErrPermDenied
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
cmd := exec.Command("userdel", name)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
errStr := strings.TrimSpace(stderr.String())
switch {
case errStr == "userdel: user '"+name+"' does not exist":
return avfs.UnknownUserError(name)
default:
return avfs.UnknownError(err.Error() + errStr)
}
}
return nil
}
const (
groupFile = "/etc/group"
userFile = "/etc/passwd"
)
var colon = []byte{':'} //nolint:gochecknoglobals // Used in matchGroupIndexValue and matchUserIndexValue.
// lineFunc returns a value, an error, or (nil, nil) to skip the row.
type lineFunc func(line []byte) (v interface{}, err error)
// readColonFile parses r as an /etc/group or /etc/passwd style file, running
// fn for each row. readColonFile returns a value, an error, or (nil, nil) if
// the end of the file is reached without a match.
func readColonFile(r io.Reader, fn lineFunc) (v interface{}, err error) {
bs := bufio.NewScanner(r)
for bs.Scan() {
line := bs.Bytes()
// There's no spec for /etc/passwd or /etc/group, but we try to follow
// the same rules as the glibc parser, which allows comments and blank
// space at the beginning of a line.
line = bytes.TrimSpace(line)
if len(line) == 0 || line[0] == '#' {
continue
}
v, err = fn(line)
if v != nil || err != nil {
return
}
}
return nil, bs.Err()
}
func matchGroupIndexValue(value string, idx int) lineFunc {
var leadColon string
if idx > 0 {
leadColon = ":"
}
substr := []byte(leadColon + value + ":")
return func(line []byte) (v interface{}, err error) {
if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 3 {
return
}
// wheel:*:0:root
parts := strings.SplitN(string(line), ":", 4)
if len(parts) < 4 || parts[0] == "" || parts[idx] != value ||
// If the file contains +foo and you search for "foo", glibc
// returns an "invalid argument" error. Similarly, if you search
// for a gid for a row where the group name starts with "+" or "-",
// glibc fails to find the record.
parts[0][0] == '+' || parts[0][0] == '-' {
return
}
gid, err := strconv.Atoi(parts[2])
if err != nil {
return nil, nil
}
return &Group{name: parts[0], gid: gid}, nil
}
}
func findGroupId(gid int, r io.Reader) (*Group, error) {
sGid := strconv.Itoa(gid)
if v, err := readColonFile(r, matchGroupIndexValue(sGid, 2)); err != nil {
return nil, err
} else if v != nil {
return v.(*Group), nil
}
return nil, avfs.UnknownGroupIdError(gid)
}
func findGroupName(name string, r io.Reader) (*Group, error) {
if v, err := readColonFile(r, matchGroupIndexValue(name, 0)); err != nil {
return nil, err
} else if v != nil {
return v.(*Group), nil
}
return nil, avfs.UnknownGroupError(name)
}
// returns a *User for a row if that row's has the given value at the
// given index.
func matchUserIndexValue(value string, idx int) lineFunc {
var leadColon string
if idx > 0 {
leadColon = ":"
}
substr := []byte(leadColon + value + ":")
return func(line []byte) (v interface{}, err error) {
if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 6 {
return
}
// kevin:x:1005:1006::/home/kevin:/usr/bin/zsh
parts := strings.SplitN(string(line), ":", 7)
if len(parts) < 6 || parts[idx] != value || parts[0] == "" ||
parts[0][0] == '+' || parts[0][0] == '-' {
return
}
uid, err := strconv.Atoi(parts[2])
if err != nil {
return nil, nil
}
gid, err := strconv.Atoi(parts[3])
if err != nil {
return nil, nil
}
u := &User{
name: parts[0],
uid: uid,
gid: gid,
}
// The pw_gecos field isn't quite standardized. Some docs
// say: "It is expected to be a comma separated list of
// personal data where the first item is the full name of the
// user."
if i := strings.Index(u.name, ","); i >= 0 {
u.name = u.name[:i]
}
return u, nil
}
}
func findUserId(uid int, r io.Reader) (*User, error) {
sUid := strconv.Itoa(uid)
if v, err := readColonFile(r, matchUserIndexValue(sUid, 2)); err != nil {
return nil, err
} else if v != nil {
return v.(*User), nil
}
return nil, avfs.UnknownUserIdError(uid)
}
func findUsername(name string, r io.Reader) (*User, error) {
if v, err := readColonFile(r, matchUserIndexValue(name, 0)); err != nil {
return nil, err
} else if v != nil {
return v.(*User), nil
}
return nil, avfs.UnknownUserError(name)
}
func lookupGroup(groupname string) (*Group, error) {
f, err := os.Open(groupFile)
if err != nil {
return nil, err
}
defer f.Close()
return findGroupName(groupname, f)
}
func lookupGroupId(gid int) (*Group, error) {
f, err := os.Open(groupFile)
if err != nil {
return nil, err
}
defer f.Close()
return findGroupId(gid, f)
}
func lookupUser(username string) (*User, error) {
f, err := os.Open(userFile)
if err != nil {
return nil, err
}
defer f.Close()
return findUsername(username, f)
}
func lookupUserId(uid int) (*User, error) {
f, err := os.Open(userFile)
if err != nil {
return nil, err
}
defer f.Close()
return findUserId(uid, f)
}
func currentUser() *User {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
uid := syscall.Geteuid()
user, err := lookupUserId(uid)
if err != nil {
return nil
}
return user
}
|
package parser
import "fmt"
// SourceFile represents a source file
type SourceFile struct {
Name string
Src []rune
}
// Cursor represents a source-code location
type Cursor struct {
Index uint
Column uint
Line uint
File *SourceFile
}
// NewCursor creates a new cursor location based on the given source file
func NewCursor(file *SourceFile) Cursor {
return Cursor{
Index: 0,
Column: 1,
Line: 1,
File: file,
}
}
// String stringifies the cursor
func (c Cursor) String() string {
if c.File == nil {
return fmt.Sprintf("<unknown>:%d:%d", c.Line, c.Column)
}
return fmt.Sprintf("%s:%d:%d", c.File.Name, c.Line, c.Column)
}
|
package iotdatahandler
import (
"github.com/gravitational/trace"
"github.com/jinzhu/gorm"
)
//IotDataHandlerDB is the main struct
type IotDataHandlerDB struct {
dbconn *gorm.DB
tableName string
}
//GetNewIotDataHandlerDB returns a new IotDataHandlerDB
func GetNewIotDataHandlerDB(db *gorm.DB) *IotDataHandlerDB {
db.AutoMigrate(&Metric{})
return &IotDataHandlerDB{
dbconn: db,
tableName: "metrics",
}
}
//SaveInDB saves a metric in the users db and updates the user count in the assoc admin
func (db *IotDataHandlerDB) SaveInDB(m *Metric) error {
_, err := db.findMetric(m)
// handle duplciate users
if trace.IsNotFound(err) {
err = db.dbconn.Create(&m).Error
} else if err == nil {
err = db.updateMetric(m)
}
if err != nil {
return trace.Wrap(err)
}
return nil
}
func (db *IotDataHandlerDB) updateMetric(m *Metric) error {
err := db.dbconn.Table(db.tableName).Where(&Metric{
AccountID: m.AccountID,
UserID: m.UserID,
}).Updates(m)
if err.Error != nil {
return trace.Wrap(err.Error)
}
return nil
}
func (db *IotDataHandlerDB) findMetric(m *Metric) (*Metric, error) {
metric := &Metric{}
record := db.dbconn.Table(db.tableName).Where(&Metric{
AccountID: m.AccountID,
UserID: m.UserID,
}).Find(&metric)
if record.RecordNotFound() {
return nil, trace.NotFound("metric not found in db")
}
// All unahndled errors e.g. db conn errs
if record.Error != nil {
return nil, trace.Wrap(record.Error)
}
return metric, nil
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
// Allows to execute several search template operations in one request.
package msearchtemplate
import (
gobytes "bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
"github.com/elastic/go-elasticsearch/v8/typedapi/types"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype"
)
const (
indexMask = iota + 1
)
// ErrBuildPath is returned in case of missing parameters within the build of the request.
var ErrBuildPath = errors.New("cannot build path, check for missing path parameters")
type MsearchTemplate struct {
transport elastictransport.Interface
headers http.Header
values url.Values
path url.URL
buf *gobytes.Buffer
req []types.RequestItem
deferred []func(request []types.RequestItem) error
raw io.Reader
paramSet int
index string
}
// NewMsearchTemplate type alias for index.
type NewMsearchTemplate func() *MsearchTemplate
// NewMsearchTemplateFunc returns a new instance of MsearchTemplate with the provided transport.
// Used in the index of the library this allows to retrieve every apis in once place.
func NewMsearchTemplateFunc(tp elastictransport.Interface) NewMsearchTemplate {
return func() *MsearchTemplate {
n := New(tp)
return n
}
}
// Allows to execute several search template operations in one request.
//
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html
func New(tp elastictransport.Interface) *MsearchTemplate {
r := &MsearchTemplate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
}
return r
}
// Raw takes a json payload as input which is then passed to the http.Request
// If specified Raw takes precedence on Request method.
func (r *MsearchTemplate) Raw(raw io.Reader) *MsearchTemplate {
r.raw = raw
return r
}
// Request allows to set the request property with the appropriate payload.
func (r *MsearchTemplate) Request(req []types.RequestItem) *MsearchTemplate {
r.req = req
return r
}
// HttpRequest returns the http.Request object built from the
// given parameters.
func (r *MsearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) {
var path strings.Builder
var method string
var req *http.Request
var err error
if len(r.deferred) > 0 {
for _, f := range r.deferred {
deferredErr := f(r.req)
if deferredErr != nil {
return nil, deferredErr
}
}
}
if r.raw != nil {
r.buf.ReadFrom(r.raw)
} else if r.req != nil {
for _, elem := range r.req {
data, err := json.Marshal(elem)
if err != nil {
return nil, err
}
r.buf.Write(data)
r.buf.Write([]byte("\n"))
}
if err != nil {
return nil, fmt.Errorf("could not serialise request for MsearchTemplate: %w", err)
}
}
r.path.Scheme = "http"
switch {
case r.paramSet == 0:
path.WriteString("/")
path.WriteString("_msearch")
path.WriteString("/")
path.WriteString("template")
method = http.MethodPost
case r.paramSet == indexMask:
path.WriteString("/")
path.WriteString(r.index)
path.WriteString("/")
path.WriteString("_msearch")
path.WriteString("/")
path.WriteString("template")
method = http.MethodPost
}
r.path.Path = path.String()
r.path.RawQuery = r.values.Encode()
if r.path.Path == "" {
return nil, ErrBuildPath
}
if ctx != nil {
req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf)
} else {
req, err = http.NewRequest(method, r.path.String(), r.buf)
}
req.Header = r.headers.Clone()
if req.Header.Get("Content-Type") == "" {
if r.buf.Len() > 0 {
req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8")
}
}
if req.Header.Get("Accept") == "" {
req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8")
}
if err != nil {
return req, fmt.Errorf("could not build http.Request: %w", err)
}
return req, nil
}
// Perform runs the http.Request through the provided transport and returns an http.Response.
func (r MsearchTemplate) Perform(ctx context.Context) (*http.Response, error) {
req, err := r.HttpRequest(ctx)
if err != nil {
return nil, err
}
res, err := r.transport.Perform(req)
if err != nil {
return nil, fmt.Errorf("an error happened during the MsearchTemplate query execution: %w", err)
}
return res, nil
}
// Do runs the request through the transport, handle the response and returns a msearchtemplate.Response
func (r MsearchTemplate) Do(ctx context.Context) (*Response, error) {
response := NewResponse()
r.TypedKeys(true)
res, err := r.Perform(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 299 {
err = json.NewDecoder(res.Body).Decode(response)
if err != nil {
return nil, err
}
return response, nil
}
errorResponse := types.NewElasticsearchError()
err = json.NewDecoder(res.Body).Decode(errorResponse)
if err != nil {
return nil, err
}
if errorResponse.Status == 0 {
errorResponse.Status = res.StatusCode
}
return nil, errorResponse
}
// Header set a key, value pair in the MsearchTemplate headers map.
func (r *MsearchTemplate) Header(key, value string) *MsearchTemplate {
r.headers.Set(key, value)
return r
}
// Index A comma-separated list of index names to use as default
// API Name: index
func (r *MsearchTemplate) Index(index string) *MsearchTemplate {
r.paramSet |= indexMask
r.index = index
return r
}
// CcsMinimizeRoundtrips Indicates whether network round-trips should be minimized as part of
// cross-cluster search requests execution
// API name: ccs_minimize_roundtrips
func (r *MsearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *MsearchTemplate {
r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips))
return r
}
// MaxConcurrentSearches Controls the maximum number of concurrent searches the multi search api will
// execute
// API name: max_concurrent_searches
func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *MsearchTemplate {
r.values.Set("max_concurrent_searches", maxconcurrentsearches)
return r
}
// SearchType Search operation type
// API name: search_type
func (r *MsearchTemplate) SearchType(searchtype searchtype.SearchType) *MsearchTemplate {
r.values.Set("search_type", searchtype.String())
return r
}
// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in
// the rest search response
// API name: rest_total_hits_as_int
func (r *MsearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *MsearchTemplate {
r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint))
return r
}
// TypedKeys Specify whether aggregation and suggester names should be prefixed by their
// respective types in the response
// API name: typed_keys
func (r *MsearchTemplate) TypedKeys(typedkeys bool) *MsearchTemplate {
r.values.Set("typed_keys", strconv.FormatBool(typedkeys))
return r
}
|
package sort
import (
"fmt"
"testing"
)
func TestInsertionSort(t *testing.T) {
arr := []int{5, 7, 2, 5, 6, 8, 4, 13, 5, 6, 7}
InsertionSort(&arr)
fmt.Println(arr)
} |
package textutils
import (
"bufio"
"io"
)
type NgramIterator struct {
s *bufio.Scanner
minN int
maxN int
currGrams [][]byte
currMaxN int
currN int
filter func([]byte) bool
}
func NewNgramIterator(r io.Reader, minN, maxN int) *NgramIterator {
if minN <= 0 {
minN = 1
}
if minN > maxN {
maxN = minN
}
iter := &NgramIterator{
s: bufio.NewScanner(r),
minN: minN,
maxN: maxN,
}
iter.s.Split(bufio.ScanWords)
return iter
}
func (iter *NgramIterator) SetFilter(f func([]byte) bool) {
iter.filter = f
}
func (iter *NgramIterator) refill() bool {
var n int
switch {
case iter.currGrams == nil:
iter.currGrams = make([][]byte, iter.maxN)
n = 0
iter.currMaxN = 1
case iter.currMaxN < iter.maxN:
n = iter.currMaxN
iter.currMaxN++
default:
n = copy(iter.currGrams, iter.currGrams[1:])
}
var gram []byte // = nil
for gram == nil {
if !iter.s.Scan() {
// fmt.Println("empty")
return false
}
gram = iter.s.Bytes()
if len(gram) == 0 {
gram = nil
continue
}
if iter.filter != nil && !iter.filter(gram) {
gram = nil
}
}
iter.currGrams[n] = iter.s.Bytes()
// fmt.Printf("currGrams: %+v\n", iter.currGrams)
iter.currN = n + 1
return true
}
func (iter *NgramIterator) Next() ([][]byte, error) {
// initial conditions
for iter.currMaxN < iter.minN {
if !iter.refill() {
return nil, io.EOF
}
}
// actual next logic
if iter.currN == 0 {
// fmt.Println("refilling")
if !iter.refill() {
return nil, io.EOF
}
}
// fmt.Printf("currMaxN=%v, currN=%v ", iter.currMaxN, iter.currN)
out := iter.currGrams[iter.currMaxN-iter.currN : iter.currMaxN]
// fmt.Printf("gram=%+v\n", out)
iter.currN--
if iter.currN < iter.minN {
iter.currN = 0
}
return out, nil
}
|
// -----------------------------------------------------------------------------
// Coordinator package used for defining queue listener and event aggregator.
// -----------------------------------------------------------------------------
package coordinator
import (
"bytes"
"encoding/gob"
"godistributed-rabbitmq/common"
"godistributed-rabbitmq/common/dto"
"log"
"github.com/streadway/amqp"
)
type WebappConsumer struct {
eventRaiser EventRaiser
connection *amqp.Connection
channel *amqp.Channel
sources []string
}
func NewWebappConsumer(eventRaiser EventRaiser) *WebappConsumer {
consumer := WebappConsumer{
eventRaiser: eventRaiser,
}
consumer.connection, consumer.channel = common.GetChannel(common.URL_GUEST)
common.GetQueue(common.PERSISTENCE_QUEUE, consumer.channel, false)
go consumer.ListenForDiscoveryRequests()
consumer.eventRaiser.Subscribe(common.SENSOR_DISCOVER_EVENT, func(eventData Any) {
consumer.Subscribe(eventData.(string))
})
consumer.channel.ExchangeDeclare(
common.WEBAPP_SOURCE_EXCHANGE, common.FANOUT, false, false, false, false, nil)
consumer.channel.ExchangeDeclare(
common.WEBAPP_READINGS_EXCHANGE, common.FANOUT, false, false, false, false, nil)
return &consumer
}
func (consumer *WebappConsumer) ListenForDiscoveryRequests() {
queue := common.GetQueue(common.WEBAPP_DISCOVERY_QUEUE, consumer.channel, false)
msgs, _ := consumer.channel.Consume(queue.Name, "", true, false, false, false, nil)
log.Print("Web Consumer: ListenForDiscoveryRequests")
for range msgs {
for _, src := range consumer.sources {
consumer.SendMessageSource(src)
}
}
}
func (consumer *WebappConsumer) SendMessageSource(src string) {
log.Printf("Web Consumer: Sending message: %s", src)
consumer.channel.Publish(
common.WEBAPP_SOURCE_EXCHANGE, "", false, false, amqp.Publishing{Body: []byte(src)})
}
func (consumer *WebappConsumer) Subscribe(eventName string) {
for _, v := range consumer.sources {
if v == eventName {
return
}
}
consumer.sources = append(consumer.sources, eventName)
consumer.SendMessageSource(eventName)
toEvent := common.NewEvent(common.MESSAGE_RECEIVED_EVENT, eventName)
consumer.eventRaiser.Subscribe(toEvent, func(eventData Any) {
data := eventData.(dto.EventData)
readout := dto.Readout{
Name: data.Name,
Value: data.Value,
Timestamp: data.Timestamp,
}
buffer := new(bytes.Buffer)
encoder := gob.NewEncoder(buffer)
encoder.Encode(readout)
message := amqp.Publishing{
Body: buffer.Bytes(),
}
log.Printf("Web Consumer: Sending readout from: %s", data.Name)
consumer.channel.Publish(common.WEBAPP_READINGS_EXCHANGE, "", false, false, message)
})
}
|
package registry
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewRepoConfig(t *testing.T) {
dir, err := os.MkdirTemp("", "feature_repo_*")
assert.Nil(t, err)
defer func() {
assert.Nil(t, os.RemoveAll(dir))
}()
filePath := filepath.Join(dir, "feature_store.yaml")
data := []byte(`
project: feature_repo
registry: "data/registry.db"
provider: local
online_store:
type: redis
connection_string: "localhost:6379"
`)
err = os.WriteFile(filePath, data, 0666)
assert.Nil(t, err)
config, err := NewRepoConfigFromFile(dir)
assert.Nil(t, err)
assert.Equal(t, "feature_repo", config.Project)
assert.Equal(t, dir, config.RepoPath)
assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path)
assert.Equal(t, "local", config.Provider)
assert.Equal(t, map[string]interface{}{
"type": "redis",
"connection_string": "localhost:6379",
}, config.OnlineStore)
assert.Empty(t, config.OfflineStore)
assert.Empty(t, config.FeatureServer)
assert.Empty(t, config.Flags)
}
func TestNewRepoConfigRegistryMap(t *testing.T) {
dir, err := os.MkdirTemp("", "feature_repo_*")
assert.Nil(t, err)
defer func() {
assert.Nil(t, os.RemoveAll(dir))
}()
filePath := filepath.Join(dir, "feature_store.yaml")
data := []byte(`
registry:
path: data/registry.db
project: feature_repo
provider: local
online_store:
type: redis
connection_string: "localhost:6379"
`)
err = os.WriteFile(filePath, data, 0666)
assert.Nil(t, err)
config, err := NewRepoConfigFromFile(dir)
assert.Nil(t, err)
assert.Equal(t, "feature_repo", config.Project)
assert.Equal(t, dir, config.RepoPath)
assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path)
assert.Equal(t, "local", config.Provider)
assert.Equal(t, map[string]interface{}{
"type": "redis",
"connection_string": "localhost:6379",
}, config.OnlineStore)
assert.Empty(t, config.OfflineStore)
assert.Empty(t, config.FeatureServer)
assert.Empty(t, config.Flags)
}
func TestNewRepoConfigRegistryConfig(t *testing.T) {
dir, err := os.MkdirTemp("", "feature_repo_*")
assert.Nil(t, err)
defer func() {
assert.Nil(t, os.RemoveAll(dir))
}()
filePath := filepath.Join(dir, "feature_store.yaml")
data := []byte(`
registry:
path: data/registry.db
project: feature_repo
provider: local
online_store:
type: redis
connection_string: "localhost:6379"
`)
err = os.WriteFile(filePath, data, 0666)
assert.Nil(t, err)
config, err := NewRepoConfigFromFile(dir)
assert.Nil(t, err)
assert.Equal(t, dir, config.RepoPath)
assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path)
}
|
package core
import (
"time"
"github.com/cbergoon/merkletree"
)
type Block struct {
Index int64
CreationTime time.Time
CommitTime time.Time
Transactions merkletree.MerkleTree
}
func NewBlock() *Block {
b := &Block{}
return b
}
func (b *Block) GenerateGenesis() {
}
func (b *Block) Generate() {
}
func (b *Block) Validate() bool {
return true
} |
package config
type Parameter interface {
}
|
package main
import (
"strings"
"github.com/corymurphy/adventofcode/shared"
)
type Commands []Command
func NewCommands(input []string) *Commands {
commands := Commands{}
for _, row := range input {
if row == "" {
continue
}
commands = append(commands, *NewCommand(row))
}
return &commands
}
type Command struct {
Move int
From int
To int
}
func NewCommand(input string) *Command {
args := strings.Split(input, " ")
return &Command{
Move: shared.ToInt(args[1]),
From: shared.ToInt(args[3]),
To: shared.ToInt(args[5]),
}
}
|
/*
Shared memory allocator. Currently we're just allocating memory on a fixed
"heap", no free.
*/
package main
import (
"github.com/apache/arrow/go/arrow/memory"
)
const (
memAlign = 64
)
var (
// Make sure ShmAllocator implements memory.Allocator
_ memory.Allocator = &ShmAllocator{}
)
// ShmAllocator is a shared memory allocator
type ShmAllocator struct {
shm *SharedMemory
offset int
}
// NewShmAlloactor returns a new shared memory allocator
func NewShmAlloactor(id string, maxSize int) (*ShmAllocator, error) {
size := align(maxSize, memAlign)
shm, err := NewSharedMemory(id, size)
if err != nil {
return nil, err
}
return &ShmAllocator{shm: shm}, nil
}
// Allocate memory
func (a *ShmAllocator) Allocate(size int) []byte {
size = align(size, memAlign)
data := a.shm.Data()
offset := a.offset
if offset+size > cap(data) {
panic("out of memory")
}
a.offset += size
return data[offset : offset+size]
}
// Reallocate reallocates memory
func (a *ShmAllocator) Reallocate(size int, b []byte) []byte {
if size == len(b) {
return b
}
data := a.Allocate(size)
copy(data, b)
return data
}
// Free frees the memory
func (a *ShmAllocator) Free(b []byte) {
// TODO: Keep free list?
}
// Close closes the shared memory
func (a *ShmAllocator) Close(del bool) error {
if a.shm == nil {
return nil
}
err := a.shm.Close(del)
a.shm = nil
return err
}
func align(num, size int) int {
n := (num + size - 1) / size
return n * size
}
|
package main
import (
"fmt"
)
var a string
//---------------
var (
c string
d int
)
//---------------
var e string = "3. 變數同時宣告並賦值"
//-----------------
func main() {
a = "1. 宣告一個變數並賦值"
fmt.Println(a)
var b int //變數宣告可於func外或者func內
b = 1
fmt.Println(b)
//---------------
c = "2. 一次宣告多筆變數並賦值 "
d = 0
fmt.Println(c)
fmt.Println(d)
//---------------
fmt.Println(e)
var f int = 101
fmt.Println(f)
//---------------
g := "變數、同時宣告並賦值 簡寫"
fmt.Println(g)
//---------------
const(
monday = iota + 1
tuesday
wednsday
)
fmt.Println(monday)
fmt.Println(tuesday)
fmt.Println(wednsday)
//const為常數不可修改
}
//注意! 於func外宣告變數時為"全域變數",若不希望變數互相干擾,盡量宣告於func內 |
package email
import (
"gopkg.in/gomail.v2"
)
func SendEmail(content,email string)error{
d := gomail.NewDialer("smtp.163.com",25,"y484742285@163.com","YMZDBQFXSWIRXSQR")
//YMZDBQFXSWIRXSQR
m := gomail.NewMessage()
m.SetAddressHeader("From","y484742285@163.com","yinqingping")
m.SetHeader("To",email)
m.SetHeader("Subject", "通知")
m.SetBody("text/html",content)
if err := d.DialAndSend(m);err != nil {
return err
}
return nil
}
|
package v1
import (
"context"
v2beta2 "k8s.io/api/autoscaling/v2beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
type HorizontalPodAutoScalersGetter interface {
Deployment(namespace string) HorizontalPodAutoScalersInterface
}
type HorizontalPodAutoScalersInterface interface {
Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error)
Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
}
type horizontalPodAutoScaler struct {
client *kubernetes.Clientset
ns string
}
func newHorizontalPodAutoScaler(c *kubernetes.Clientset, namespace string) *horizontalPodAutoScaler {
return &horizontalPodAutoScaler{
client: c,
ns: namespace,
}
}
func (h *horizontalPodAutoScaler) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) {
return h.client.AutoscalingV2beta2().
HorizontalPodAutoscalers(h.ns).
Get(ctx, name, opts)
}
func (h *horizontalPodAutoScaler) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) {
return h.client.AutoscalingV2beta2().
HorizontalPodAutoscalers(h.ns).
Update(ctx, horizontalPodAutoscaler, opts)
}
func (h *horizontalPodAutoScaler) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) {
return h.client.AutoscalingV2beta2().
HorizontalPodAutoscalers(h.ns).
Create(ctx, horizontalPodAutoscaler, opts)
}
|
// Copyright 2020-present Kuei-chun Chen. All rights reserved.
package keyhole
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"github.com/simagix/gox"
"github.com/simagix/keyhole/mdb"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"golang.org/x/text/language"
"golang.org/x/text/message"
)
// Comparison contains parameters of comparison parameters
type Comparison struct {
Logger *gox.Logger `bson:"keyhole"`
SourceStats *mdb.ClusterStats `bson:"source"`
TargetStats *mdb.ClusterStats `bson:"target"`
nocolor bool
verbose bool
}
// NewComparison returns *Comparison
func NewComparison(keyholeVersion string) *Comparison {
comp := Comparison{Logger: gox.GetLogger(keyholeVersion)}
comp.SourceStats = mdb.NewClusterStats(keyholeVersion)
comp.TargetStats = mdb.NewClusterStats(keyholeVersion)
return &comp
}
// SetNoColor set nocolor flag
func (p *Comparison) SetNoColor(nocolor bool) {
p.nocolor = nocolor
}
// SetVerbose sets verbose
func (p *Comparison) SetVerbose(verbose bool) {
p.verbose = verbose
}
// Compare executes compare commands
func (p *Comparison) Compare(source string, target string) error {
var err error
if strings.HasSuffix(source, "-stats.bson.gz") && strings.HasSuffix(target, "-stats.bson.gz") { // compare files
var data []byte
var fd *bufio.Reader
if fd, err = gox.NewFileReader(source); err != nil {
return err
}
if data, err = ioutil.ReadAll(fd); err != nil {
return err
}
if err = bson.Unmarshal(data, p.SourceStats); err != nil {
return err
}
if fd, err = gox.NewFileReader(target); err != nil {
return err
}
if data, err = ioutil.ReadAll(fd); err != nil {
return err
}
if err = bson.Unmarshal(data, p.TargetStats); err != nil {
return err
}
return p.compare()
}
var sourceClient *mongo.Client
var targetClient *mongo.Client
var sourceConnString connstring.ConnString
var targetConnString connstring.ConnString
// connection string is required from here forward
if sourceConnString, err = mdb.ParseURI(source); err != nil {
return err
}
if sourceClient, err = mdb.NewMongoClient(sourceConnString.String()); err != nil {
return err
}
if targetConnString, err = mdb.ParseURI(target); err != nil {
return err
}
if targetClient, err = mdb.NewMongoClient(targetConnString.String()); err != nil {
return err
}
var wg = gox.NewWaitGroup(1)
wg.Add(1)
go func(stats *mdb.ClusterStats, client *mongo.Client, connString connstring.ConnString) {
defer wg.Done()
stats.SetVerbose(false)
if err = stats.GetClusterStats(client, connString); err != nil {
result := `Roles 'clusterMonitor' and 'readAnyDatabase' are required`
log.Fatal(result)
}
}(p.SourceStats, sourceClient, sourceConnString)
wg.Add(1)
go func(stats *mdb.ClusterStats, client *mongo.Client, connString connstring.ConnString) {
defer wg.Done()
stats.SetVerbose(false)
if err = stats.GetClusterStats(client, connString); err != nil {
result := `Roles 'clusterMonitor' and 'readAnyDatabase' are required`
log.Fatal(result)
}
}(p.TargetStats, targetClient, targetConnString)
wg.Wait()
return p.compare()
}
func (p *Comparison) compare() error {
var err error
// build target stats map
dbMap := map[string]mdb.Database{}
for i, db := range *p.TargetStats.Databases {
dbMap[db.Name] = (*p.TargetStats.Databases)[i]
}
// compare a few key metrics
codeDefault := mdb.CodeDefault
if p.nocolor {
codeDefault = ""
}
printer := message.NewPrinter(language.English)
p.Logger.Info("=== Comparison Results (source vs. target) ===")
p.Logger.Info(printer.Sprintf("Number of Databases: \t%12d%v\t%12d%v",
len(*p.SourceStats.Databases), p.getColor(int64(len(*p.SourceStats.Databases)), int64(len(*p.TargetStats.Databases))), len(*p.TargetStats.Databases), codeDefault))
for _, db := range *p.SourceStats.Databases {
collMap := map[string]mdb.Collection{}
for i, coll := range dbMap[db.Name].Collections {
collMap[coll.NS] = dbMap[db.Name].Collections[i]
}
p.Logger.Info(fmt.Sprintf("Database %v", db.Name))
nColl := len(dbMap[db.Name].Collections)
p.Logger.Info(printer.Sprintf(" ├─Number of Collections:\t%12d%v\t%12d%v",
len(db.Collections), p.getColor(int64(len(db.Collections)), int64(nColl)), nColl, codeDefault))
p.Logger.Info(printer.Sprintf(" ├─Number of Indexes: \t%12d%v\t%12d%v (all shards)",
db.Stats.Indexes, p.getColor(db.Stats.Indexes, dbMap[db.Name].Stats.Indexes), dbMap[db.Name].Stats.Indexes, codeDefault))
p.Logger.Info(printer.Sprintf(" ├─Number of Objects: \t%12d%v\t%12d%v",
db.Stats.Objects, p.getColor(db.Stats.Objects, dbMap[db.Name].Stats.Objects), dbMap[db.Name].Stats.Objects, codeDefault))
p.Logger.Info(printer.Sprintf(" ├─Total Data Size: \t%12s%v\t%12s%v",
gox.GetStorageSize(db.Stats.DataSize), p.getColor(db.Stats.DataSize, dbMap[db.Name].Stats.DataSize), gox.GetStorageSize(dbMap[db.Name].Stats.DataSize), codeDefault))
p.Logger.Info(printer.Sprintf(" ├─Average Data Size: \t%12s%v\t%12s%v",
gox.GetStorageSize(db.Stats.AvgObjSize), p.getColor(db.Stats.AvgObjSize, dbMap[db.Name].Stats.AvgObjSize), gox.GetStorageSize(dbMap[db.Name].Stats.AvgObjSize), codeDefault))
p.Logger.Info(" └─Number of indexes:")
for _, coll := range db.Collections {
length := 0
if val, ok := collMap[coll.NS]; ok {
length = len(val.Indexes)
}
p.Logger.Info(fmt.Sprintf(" ├─%v: \t%12d\t%12d", coll.NS, len(coll.Indexes), length))
}
}
return err
}
func (p *Comparison) getColor(a int64, b int64) string {
if p.nocolor {
if a != b {
return " ≠"
}
return ""
}
if a != b {
return mdb.CodeRed
}
return mdb.CodeDefault
}
// OutputBSON writes bson data to a file
func (p *Comparison) OutputBSON() error {
if p.TargetStats.HostInfo.System.Hostname == "" {
result := `roles 'clusterMonitor' and 'readAnyDatabase' are required`
return errors.New(result)
}
var err error
var data []byte
if data, err = bson.Marshal(p); err != nil {
return err
}
os.Mkdir(outdir, 0755)
basename := p.TargetStats.HostInfo.System.Hostname
basename = strings.ReplaceAll(basename, ":", "_")
ofile := fmt.Sprintf(`%v/%v-compare.bson.gz`, outdir, basename)
i := 1
for mdb.DoesFileExist(ofile) {
ofile = fmt.Sprintf(`%v/%v.%d-compare.bson.gz`, outdir, basename, i)
i++
}
if err = gox.OutputGzipped(data, ofile); err != nil {
return err
}
p.Logger.Info(fmt.Sprintf(`bson data written to %v`, ofile))
return err
}
|
package pkg3
import (
"fmt"
)
var (
_ = constInitCheck()
_ = variableInit("v1")
_ = variableInit("v2")
)
const (
c1 = "c1"
c2 = "c2"
)
func constInitCheck() string {
if c1 != "" {
fmt.Println("pkg3: const c1 has been initialized")
}
if c2 != "" {
fmt.Println("pkg3: const c2 has been initialized")
}
return ""
}
func variableInit(name string) string {
fmt.Printf("pkg3: var %s has been initialized\n", name)
return name
}
func init() {
fmt.Println("pkg3: first init func")
}
func init() {
fmt.Println("pkg3: second init func")
}
func main() {
fmt.Println("main func for pkg3 package")
}
|
package main
import "fmt"
func isRepetitive(slice []int) bool {
for i := 1; i < len(slice); i++ {
if slice[i] != slice[0] {
return false
}
}
return true
}
func msBits(slice []int, sliceElement int) int {
max,min := slice[0], slice[0]
for i:=0;i<len(slice);i++ {
if slice[i] > max {
max = slice[i]
} else if slice[i] < min {
min = slice[i]
}
}
number := int(float64(sliceElement-min)*float64(len(slice))/float64(max-min+1))
return number
}
func bucketSort(slice []int, msBits func(slice []int, sliceElement int) int) []int{
buckets := make([][]int, len(slice))
for i:=0; i<len(slice);i++ {
buckets[i] = make([]int, len(slice))
for j:=0;j<len(slice);j++ {
buckets[i] = nil
}
}
for i:=0;i<len(slice);i++ {
bucketNum := msBits(slice, slice[i])
buckets[bucketNum] = append(buckets[bucketNum], slice[i])
}
for i:=0;i<len(buckets);i++ {
if len(buckets[i]) > 1 && isRepetitive(buckets[i]) == false{
buckets[i] = bucketSort(buckets[i], msBits)
}
}
k:=0
for i:=0;i<len(buckets);i++ {
for j:=0;j<len(buckets[i]);j++{
slice[k] = buckets[i][j]
k++
}
}
return slice
}
func main() {
test := []int{1,5,6,7,3,-5,-1,4,-5}
fmt.Print(bucketSort(test, msBits))
}
|
/*
create a func with the identifier foo that returns an int
create a func with the identifier bar that returns an int and a string
call both funcs
print out their results
*/
package main
import "fmt"
func foo() int {
return 2
}
func bar() (int, string) {
return 42, "The answer for everthing"
}
func main() {
fmt.Println(foo())
fmt.Println(bar())
}
|
package main
// Generated code
var creditsB64 = "20kdc/CCUpdaterUI & Compliance
the program itself
~!~

MIT License

Copyright (c) 2019 CCDirectLink

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Compliance:

It is assumed that this text being reasonably accessible to the user within the program is sufficient to count as being "provided with the distribution".

CCUpdaterUI will try to store the location you specify.
This is to prevent repetition.
If you activate Developer Mode, CCUpdaterUI will store this.
CCUpdaterUI may, when asked to install a package, install other packages necessary for that package to operate.
It is assumed that asking to install the package grants permission to do this to prevent failures.

~!~
CCDirectLink/CCUpdaterCLI
backend to the program
~!~

MIT License

Copyright (c) 2019 CCDirectLink

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

~!~
Masterminds/semver
semantic versioning
~!~

Copyright (C) 2014-2019, Matt Butcher and Matt Farina

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

~!~
The Go Standard Library
the basis of everything here
~!~

Copyright (c) 2009 The Go Authors. All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

   * Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
   * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
   * Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

~!~
veandco/go-sdl2
wrapping sdl2 so we didn't have to
~!~

Copyright (c) 2013, Go-SDL2 Authors
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

	* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
	* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
	* Neither the name of Go-SDL2 nor the names of its contributors may be
used to endorse or promote products derived from this software without specific
prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

~!~
SDL2
underlying go-sdl2 is of course sdl2 itself
~!~

Please distribute this file with the SDL runtime environment:

The Simple DirectMedia Layer (SDL for short) is a cross-platform library
designed to make it easy to write multi-media software, such as games
and emulators.

The Simple DirectMedia Layer library source code is available from:
https://www.libsdl.org/

This library is distributed under the terms of the zlib license:
http://www.zlib.net/zlib_license.html

----

This software is provided 'as-is', without any express or implied
warranty.  In no event will the authors be held liable for any damages
arising from the use of this software.

Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:

1. The origin of this software must not be misrepresented; you must not
   claim that you wrote the original software. If you use this software
   in a product, an acknowledgment in the product documentation would be
   appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
   misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

~!~
golang/freetype
every character of text here is drawn by this
~!~

This situation is a tad complicated.
Officially, according to the license, the following block appears to suffice:

    Portions of this software are copyright © 2010 The FreeType
    Project (www.freetype.org).  All rights reserved.

However, it's reasonable to assume that in the conversion to Go some original work was performed.
As such, it should be acknowledged that not only is it the work of the FreeType authors but also those working on golang/freetype.

~!~
golang.org/x/image/font/gofont
the shapes of the letters you see come from here
~!~

These fonts were created by the Bigelow & Holmes foundry specifically for the
Go project. See https://blog.golang.org/go-fonts for details.

They are licensed under the same open source license as the rest of the Go
project's software:

Copyright (c) 2016 Bigelow & Holmes Inc.. All rights reserved.

Distribution of this font is governed by the following license. If you do not
agree to this license, including the disclaimer, do not distribute or modify
this font.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

	* Redistributions of source code must retain the above copyright notice,
	  this list of conditions and the following disclaimer.

	* Redistributions in binary form must reproduce the above copyright notice,
	  this list of conditions and the following disclaimer in the documentation
	  and/or other materials provided with the distribution.

	* Neither the name of Google Inc. nor the names of its contributors may be
	  used to endorse or promote products derived from this software without
	  specific prior written permission.

DISCLAIMER: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"
|
package scrape
import (
"encoding/json"
"fmt"
"log"
)
// Print the JSON data
func (s *Scrape) PPrint() {
b, err := json.MarshalIndent(s.Events, "", "\t")
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s\n", b)
}
|
package svc
import (
"bookstore/rpc/add/internal/config"
"bookstore/rpc/model"
"github.com/tal-tech/go-zero/core/stores/sqlx"
)
type ServiceContext struct {
c config.Config
Model *model.BookModel
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
Model: model.NewBookModel(sqlx.NewMysql(c.DataSource), c.Cache, c.Table),
}
} |
package main
import (
"fmt"
"crypto/hmac"
"crypto/sha256"
"io"
)
func main() {
a:=getcode("example")
fmt.Println(a)
b:=getcode("example1")
fmt.Println(b)
}
func getcode(str string) string{
h:=hmac.New(sha256.New,[]byte("passkey"))
io.WriteString(h,str) //write our string into the hash
return fmt.Sprintf("%x",h.Sum(nil)) //this line has no explanation but its necessary
}
|
package main
import (
"fmt"
"regexp"
"io/ioutil"
"io"
"os"
)
//error checking function
func check(e error) {
if e != nil{
panic(e)
}
}
//function to open file and read the contents and get file name for wrtie file
func read() (string, []byte) {
//gets input file and output file name by command line argumement and stores
//each argument in seperate variables
inputFile := os.Args[1]
outputFile := os.Args[2]
fmt.Println("\nReading input file...")
//open file based on user input
data, err := ioutil.ReadFile(inputFile)
check(err)
return outputFile, data
}
//function to parse out all numbers in the file
func parse(input []byte) string {
//convert byte array to string of entire input file
myString := string(input[:])
//use regex to find all integers in the string
pattern := regexp.MustCompile("[0-9]+")
//returns an array of type []string
noNumbers := pattern.ReplaceAllString(myString,"")
return noNumbers
}
//function to write the new data with only the numbers
func write(data string, fileName string) {
//create file message
fmt.Printf("\nCreating output file with name '%s'...\n\n", fileName)
//create file using given filename
file, createErr := os.Create(fileName)
check(createErr)
defer file.Close()
//write data to newly created file
_, writeErr:= io.WriteString(file, data)
check(writeErr)
//isssue a sync to flush writes to stable storage
file.Sync()
}
//main
func main() {
//call read(), put file name and file contents into variables
fileName, readData := read()
//call parse(), read in entire file and parse numbers from file
parsedData := parse(readData)
//give string from input file minus parsed out numbers and output file name,
//write contents to created output file with given name
write(parsedData, fileName)
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testdata
var (
_ = `LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcHRMN21JQmdaaDVEd1FYYkJXaHRSeURGK01qaytQeStNU1Vyck5pc1EyeXF4cnphCjhEeWw5a0pRWW5oMnVYVFV1RnBzbDRhM1J5dEJaVkdrMDNQT0RXOWJIblR2QUNPZHJjMnR2WmR5ZXRXU1ZtQ2cKOHhuc3Y3WHVQS0VGb0VwakVaMDdCWjY2blFIRDg2MHFMeGFGRWtMNHk2MzU5SThWVlRBYk5RejVPQ3dmM29mUQpMN0JPL2RVNUJtRTNXTDhhVHF3SXRSa0hJeE5pWCs4OWU2Z3dCY3RHdUZLR3ZacFhGaW1VeXA1Y0crVWI2RzkyCi9KUTZJWm45dGFIZ3NFYWIvWUNwZ2U1Rkp5WVR1dzVlakhRajRYNVh3ZVRKU0tsN0UwUmZhMjl5VnM5aXdhNDQKcmVNSzVXR2hVUFl6T1o0MURGZnU1MmJnMjVPODF6QWJFSFpLUndJREFRQUJBb0lCQUNUVUJ2OFB1RGhURGhvYQp0Tk5vemxjWmdSci9IcTFvL29QUzlPVmZvQWZ5Z1hFR1dEOFk1SHFOQVRuNzVobmpGT0x0ODNNd0psM3J5ckFYCmFnL1VUUFRpVkhkUTBVSnltbWk0TTFiYmpFWlp4OGlSNUhaR2p1Rnp4SGhXQSt2ekFCUHZaZ3hEa21iKzhNZG0KdngxT0YycUVwbkF3cERHOU5MUnR2bFBqM1ZEczhVODU2c2hWeDdBdFE3RGJUWkQwdEpsQ0pzTzR5TitjL1oxOApiRzJKNDB2RWFLalVGTE9HNitScE43NEZLeGtvOFJJejZxeERQMk5VMUg1ajVVVi9tZXdRdDBsRTNqbEc5MmcvCnVwTngyK0xnYUkrMWhCR3AzV2prQlRWcWloZWxrUk5XZkNLczdXOHJtYk83V3MvK2cwcVNidnAvUjBWQWpQd0MKdGt4SENFRUNnWUVBM2s3K0hOVkNZY0YxN2k2ZTJnNTJDVHV0cDJBYzkvSUNMdVJzNGFRZlB4MkxFc2VDalJnNgovaHNsOGpLbmRDS1JQdTBJbkoxckF4NzVrZXBKZWpWcTBIbkFEN2VtcVhuMDN0UjJmb3hvbkxBOEtQMzdSSnJqClhlZ0k5NiswWUU3QUY5dWZqQVhPeXpFU3RQVkNSVDlJOFRMSlEwRFhraW56bDhVUm5aZ1RjdmtDZ1lFQXdCdFYKLzNnbFR5Z0syNTFpMS9FakdrK3I3THF5NzdCY29LVzZHTm91K0FiQ3gxalhZVE1URDNTRXVyMzBueHB6VWNkdgpIbEI1NkI2Q1JmRkdXN0o1U0tkeXI5WmhQUUtITUQ1TkZhbm00S1F4NmZmVFhubExRdnhhT2c2TFRnTDRSdjFyCjVaeUdEbDhBKzRRckpNVk1OOTZOVEY1VDB0TXRUaHlIVnpLbHR6OENnWUJ3Q3BQYjZFZUtpVHhzakthVzg4N2QKbkd4Sy9RL2NqdVkyeC8xd1E0MVQvQW5KcnkvRytMMVNzRkFSbnlIeVVER3Y2enI1NUFTNUQvVnNhdzRaUDY3VAozMmpEQXlaR0tDY1gzekRSV3VhbWdkUHdQUUZVZEZPL1VtQ2lwTFZlREpLWDg2S1hxWjJ0bnMvMHo5OVVreTZxCkVaU0tCclllL25HOHZoL0FzNUtwMFFLQmdRQzFxT1BncWFkMk8rSlFuSHE4d3UwejAwVTduYXpabFlkeDdtV1YKWExUdm04MFNuME5FU2Z6ckwzN1g3QXJuYlNiQm5YckpTc2FNcGxVQWVORFVvMmVuT1pqdENDZDVmdXVCeGxnMApkUzY3SE9tS1d1ekl1S0JmM3F3Zm5HTkV5UEFvaVRvL3JZempDQm13dmVIaWFxUFJiU1Ztb3doWEk1VUMrVjFPCktybWtGd0tCZ1FEVERDWlg1WWQ5ZUdXZG1OM3pUU2Z6YkRrRkxqZkYyYTVBK2lDL281TmoyVmpHRG4xTjRvVUwKajF0dVZLb0xoVjhVZzd0Lzc4V0V0UkRnK1p3QVZhSW84bE1zU244dDVQNFFrY2pkSDI4bHpFaTQwWHpxQkF0Lwpoalppb1pNN2ZHUmJWK29yakZSQ2tZWnNaMUdua2FrbG5Mdk4vYVRuM25HV2tEZjFaZGM0YVE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==` //ca.key
caCrt = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDVENDQWZHZ0F3SUJBZ0lVR0JyQzVnODhaamxOSVlmbzVHdnFSbUhFNFY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0pNVEkzTGpBdU1DNHhNQjRYRFRJeU1Ea3lOekE0TkRBd05sb1hEVEl6TURreApPREE0TkRBd05sb3dGREVTTUJBR0ExVUVBd3dKTVRJM0xqQXVNQzR4TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBUThBTUlJQkNnS0NBUUVBdWJyNFNXQVJneDM4dzcxanVVNFdJZ0NLZVpYKzRDR1A1WHBSVVQ0SFg2VmsKaENadGlrZmJPc0ErZUpmdWlHZVNhUDlPL0lFRUlRZXhya2lrY3F2SzNseVJ0YzNuWnhPK1NzakY3WTVsM1ROZwpGSWh3L25GMWkxdVJuMUxwc0Y2b1hWdUtGb0QvTVNCdU1rWU82Z2VRaEZnTmZNNXdEU3NsVHFRR2pIRUlPZWkyCmRYeTh6UHFNMVowREFQOUxRZHdXN1BKeG9NRWVkNTN6Y2hhYlJWNlZXTE45WkxjWURJLzhpVG5CTWlPNDdnRVQKeXo5Q1B0N0htWjM4N0JsWUQwN3REMWlkLzFieUhCSGt5Mng2YU9OOUo3dEpOU1Zna0VKRmozUVBFQzFlVUp0NgpWbFRUS1c3cGRUVTBybWFDNDBhbmgyVExnYlYwR0pKdnI4ZDVkeUhvMlFJREFRQUJvMU13VVRBZEJnTlZIUTRFCkZnUVVhKzJHeWswdE9oaFpTanNjaGQ0bnV1K1IrUEl3SHdZRFZSMGpCQmd3Rm9BVWErMkd5azB0T2hoWlNqc2MKaGQ0bnV1K1IrUEl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBWlpHeApsSlhGUDJZVDlCaDdaYnFkWTllcEViRTNJWENzZEsvS3E2Rmx4T0RKTUJqTUIrUVM0R2QzeUMwVEU3NFZGY3lpCk5tSUxPQ2g4amJUR01QNVRuYUR0Z0pQaXpGaEZySzQwYjhnVXVqVXVZUDdJWTFlUkE1Sno5V3lSSjNFeG5RYmcKaGFkekJWQUJVdkl5RS9BUHZIeGNEKzgzcEhWd1VlS3JIcVZHU1Qxc2hmdWVDeHVnQ1pvMUg3OFVET1NNM0tsdAplQkFTbUhZc1Rtb3VTa2RVR2JwbXpvUEc0YlJyYzk3M3JycDZLcSsxZmMwQmF5NVZna2YzUXJGdFFQSE5WSVlNCnVWaTBKaHAraVpSN0MyTXVHbUFKc2U4dHRWNHBpUTY2RlBrNjdPNmZwa0NGbnp2VlYyNko2MTMwNjAwMURtOUkKTHFPajFFUVhOK29RMWJpRFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=`
serverKey = `LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTNOaUF5Um91aVQ3ZkpaTmhIZWt1TjgwSnh2ZVpDMStwYVVYZk00UVBoNUE1R0MyCkdLN2psMXlpUC9wQ0U4NHoyWmdSQldnalh3UWVqa0UweU1QdDdHUkxEZzVmYmhsSGNVUUZ3UDRyd3BFRjRBSDkKYVRYeVNONUxqUDFzbyswMU80bHJ1L0FPRXlibUxKYjdYSkJOc1dlZ2lqM1dYWlR3NWpYeWFjWWxwT2ZKVE1ObQpQQ0thOHpRNlVMNk1QUmZFaERGNHB4SXhnUTVuaFMxcUcxWUtqUkUycTdkRU95R0o3cFUrT3Vjdm83dzRTSlNpCkdRUDFteVRiVGV0YkxXZ3h0N2tqVUlxRVZFNERHU0h1TUszeks3V2t2bzVrcDRBVytCYjZkRk96Ulg5OVFWN08KM0pZV1U3eGVWSm5zZHFpVXh6d0Z2WjA2ZEtWeCtlOEswK0RGUFFJREFRQUJBb0lCQURvMVgwYzRoQ2pobVJLYwpIZUEzd3ZnQm45RlJMeU1PbVpoWWhzMmpzNW1HallJZG9nNVNLS3gxQUpFN1prOStKYzI1RWZnSzJZa0UzM1F2ClBYUG1tN2hmZ0lzUjNZSno2U0o4REFsZEpEdWNDeVgvbURDV3B2RUh6cEF1bGhEbGRUVlN2QmkxTjdtc3g2Y3IKRDAwMEpsd2pvTStzMmlkZk85dFEvMERuWGdNeHlvYUJSVjd0OGF6enJVaFYxM01xenF5eHcvbjI5dDFXcld0ZQpjaTNkN3g4RWx3Yk5YNFJpOUk2dk9lZktOSDN4bEUrNWpjdlJSWmR4ajhXaHUzMlhhcTA1SG0za0h4SGw2ODM4Cm12aDlyeU9Ec3NTTm5RS0R3VVRYRjdsYXZFMDJyaW9scmoyRUJNcUErVE1jQ2ZvTnJ4elNGajVtVUxDamxGNWsKQnZxRVVpVUNnWUVBNndyYTVGR3U2MUZsTjlqY0p2dVNiNmlTUHN3RTczWE5pOWx1anhxSmFTeHVkSDFnMk9GTgp4WUNIZFR2WFhsdTc3TThadHVmMGFUMUM3UWQxcU8vVngyVFJTSVpOTG95RzJpRHBEL0RXdlBwKzBCTWN3K0orCitKTEpsWFlnbnZYQjlXRmk5eFljWnZDblR2eDJwS0ZFV1M4bTkvbDBQQUU4eUhDdVUxd1VPM2NDZ1lFQXpDb3YKdFIrVkp0dis2STRQV3o0bGQ1VTE0UWwrdk1hV3pXNDhHWnNsSm8zdjducnFPS2piU2xjakgrc3NNVldjYVprVQp0TFRRbFljcWFOdUJONkJydEtXQTVlSi9mV0VyVlF4azRpa0o4c3BLL2VCSTBGcnRPSnAxSm41amc0TTIvYmdvCjdBYUFHTDUvbjFNOTBscmNpZHNRYkg2TlhNN29JOTZjU2ZKQkNlc0NnWUVBdkNrdmZOS0xkcVR0bzl5K3VaSngKODJOKzJEalp4cDJIRkdyWFlFWjlOSzQyS3Bsb201Y2FmSDdkY2hPYTRWWU14cEl3NHNVa1c4K0lNVnJrYlg2NgpwR3BvUkdnSGg3bEdCMytMTkpDNFNBYzgxL1JFOWVmdmY2MTdKV1N3enJDdE9uUmhGcThqdzZEcVA0aEtycGJQCnNablcxM05qQXRwMnYzdTlnc3hYQWhjQ2dZQXRnR0Z0am9KaFRMcDgvZHd5UzZGeUMxRWN2RThBcDRuSWN2NzEKL2Z2RG9mS05SZHVaa1JoK2N2a2pEZmlsYmgwVDg4Z0hsaHkrbG9jL0kxeWpGeCtwL1JEREt6MmFwZU5RYXhpNAp4c2l1MGFMdy9lRjhmaWRNYkRBYnlpTkhsaURWWHd2UHZvc2grS0xjMFdKLzFUdzloUk1kK3Y1cVpycVo4KzBGCkZmYWt6UUtCZ1FDWk5DT3huWHlYQ2JYZ2RCY0txbE84L3BoWXBFOVVDMS81WktBMm1icng3ejBFdVdyR2pLNjUKU09QNDVQcithUDBTa3hTMll2QyttM3dCU0dJZU93QWEvb1dMY0dmbmdUbUpoMHNiYVJ1OXR5Y1QrR2hZemlGawp3ajV4TENBTzdFLzFKT3VkaGgramtwdGVMMVJiSUh2eXRwU3ZiN0VtUUVIbytOQWxBMHFPQkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
serverCrt = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKakNDQWc2Z0F3SUJBZ0lVRHd3UklLMnRLVlVVMHJlZXF3U3htZVhEQWVNd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0pNVEkzTGpBdU1DNHhNQjRYRFRJeU1Ea3lOekE0TkRJeU5Wb1hEVEl6TURreQpOekE0TkRJeU5Wb3dLekVSTUE4R0ExVUVDZ3dJUzNWaVpWWmxiR0V4RmpBVUJnTlZCQU1NRFNvdWEzVmlaWFpsCmJHRXVhVzh3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQzdjMklESkdpNkpQdDgKbGsyRWQ2UzQzelFuRzk1a0xYNmxwUmQ4emhBK0hrRGtZTFlZcnVPWFhLSS8ra0lUempQWm1CRUZhQ05mQkI2TwpRVFRJdyszc1pFc09EbDl1R1VkeFJBWEEvaXZDa1FYZ0FmMXBOZkpJM2t1TS9XeWo3VFU3aVd1NzhBNFRKdVlzCmx2dGNrRTJ4WjZDS1BkWmRsUERtTmZKcHhpV2s1OGxNdzJZOElwcnpORHBRdm93OUY4U0VNWGluRWpHQkRtZUYKTFdvYlZncU5FVGFydDBRN0lZbnVsVDQ2NXkranZEaElsS0laQS9XYkpOdE42MXN0YURHM3VTTlFpb1JVVGdNWgpJZTR3cmZNcnRhUytqbVNuZ0JiNEZ2cDBVN05GZjMxQlhzN2NsaFpUdkY1VW1leDJxSlRIUEFXOW5UcDBwWEg1Cjd3clQ0TVU5QWdNQkFBR2pXVEJYTUI4R0ExVWRJd1FZTUJhQUZHdnRoc3BOTFRvWVdVbzdISVhlSjdydmtmankKTUFrR0ExVWRFd1FDTUFBd0N3WURWUjBQQkFRREFnVHdNQndHQTFVZEVRUVZNQk9DQzJ0MVltVjJaV3hoTG1sdgpod1IvQUFBQk1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ1Y2S3VjTmNKdGVMelpNQW1acE9CR0Y0NWZJVGVrCnRDeUZPMGRXWVRabDgzRXlSbWtCd1hSQzJoekZSQ3RaMU5jY2hGNE5SaVhMbWIwM0FnTGhRRGxUR09PN3hlNFcKVUF4MjRtdlNteW05a3ljZGIyUUhqZ2xzRHJVS040QWJuRWpoYzladWxTeVNnVG15YTl2bEh5SHBoZ3V0YUhSQgpKNkVTWHJlYzUwZUxtSC9sSjkyTEwwV01JNW4rd05lajdyTkltbktQeWZ5dUhhaXdmUlZzeVJ1ZXZETWJhSXdpCmhVNmhFOXZGTEkwa0hKaEdYa05ZOEFWbTFoYXhyWGp5Q2xwaWlsQ04xcnV1U3QzYUplN2NpUkVCL08xR2JEeGwKaFQxMUtnMTZzMEJWZHZjL2lzaVZ5SVZTQmozdVRDRCtQWStaMjUzWUc1cnd3U2xxWUlWa2QzdFkKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=`
clientKey = `LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTNOaUF5Um91aVQ3ZkpaTmhIZWt1TjgwSnh2ZVpDMStwYVVYZk00UVBoNUE1R0MyCkdLN2psMXlpUC9wQ0U4NHoyWmdSQldnalh3UWVqa0UweU1QdDdHUkxEZzVmYmhsSGNVUUZ3UDRyd3BFRjRBSDkKYVRYeVNONUxqUDFzbyswMU80bHJ1L0FPRXlibUxKYjdYSkJOc1dlZ2lqM1dYWlR3NWpYeWFjWWxwT2ZKVE1ObQpQQ0thOHpRNlVMNk1QUmZFaERGNHB4SXhnUTVuaFMxcUcxWUtqUkUycTdkRU95R0o3cFUrT3Vjdm83dzRTSlNpCkdRUDFteVRiVGV0YkxXZ3h0N2tqVUlxRVZFNERHU0h1TUszeks3V2t2bzVrcDRBVytCYjZkRk96Ulg5OVFWN08KM0pZV1U3eGVWSm5zZHFpVXh6d0Z2WjA2ZEtWeCtlOEswK0RGUFFJREFRQUJBb0lCQURvMVgwYzRoQ2pobVJLYwpIZUEzd3ZnQm45RlJMeU1PbVpoWWhzMmpzNW1HallJZG9nNVNLS3gxQUpFN1prOStKYzI1RWZnSzJZa0UzM1F2ClBYUG1tN2hmZ0lzUjNZSno2U0o4REFsZEpEdWNDeVgvbURDV3B2RUh6cEF1bGhEbGRUVlN2QmkxTjdtc3g2Y3IKRDAwMEpsd2pvTStzMmlkZk85dFEvMERuWGdNeHlvYUJSVjd0OGF6enJVaFYxM01xenF5eHcvbjI5dDFXcld0ZQpjaTNkN3g4RWx3Yk5YNFJpOUk2dk9lZktOSDN4bEUrNWpjdlJSWmR4ajhXaHUzMlhhcTA1SG0za0h4SGw2ODM4Cm12aDlyeU9Ec3NTTm5RS0R3VVRYRjdsYXZFMDJyaW9scmoyRUJNcUErVE1jQ2ZvTnJ4elNGajVtVUxDamxGNWsKQnZxRVVpVUNnWUVBNndyYTVGR3U2MUZsTjlqY0p2dVNiNmlTUHN3RTczWE5pOWx1anhxSmFTeHVkSDFnMk9GTgp4WUNIZFR2WFhsdTc3TThadHVmMGFUMUM3UWQxcU8vVngyVFJTSVpOTG95RzJpRHBEL0RXdlBwKzBCTWN3K0orCitKTEpsWFlnbnZYQjlXRmk5eFljWnZDblR2eDJwS0ZFV1M4bTkvbDBQQUU4eUhDdVUxd1VPM2NDZ1lFQXpDb3YKdFIrVkp0dis2STRQV3o0bGQ1VTE0UWwrdk1hV3pXNDhHWnNsSm8zdjducnFPS2piU2xjakgrc3NNVldjYVprVQp0TFRRbFljcWFOdUJONkJydEtXQTVlSi9mV0VyVlF4azRpa0o4c3BLL2VCSTBGcnRPSnAxSm41amc0TTIvYmdvCjdBYUFHTDUvbjFNOTBscmNpZHNRYkg2TlhNN29JOTZjU2ZKQkNlc0NnWUVBdkNrdmZOS0xkcVR0bzl5K3VaSngKODJOKzJEalp4cDJIRkdyWFlFWjlOSzQyS3Bsb201Y2FmSDdkY2hPYTRWWU14cEl3NHNVa1c4K0lNVnJrYlg2NgpwR3BvUkdnSGg3bEdCMytMTkpDNFNBYzgxL1JFOWVmdmY2MTdKV1N3enJDdE9uUmhGcThqdzZEcVA0aEtycGJQCnNablcxM05qQXRwMnYzdTlnc3hYQWhjQ2dZQXRnR0Z0am9KaFRMcDgvZHd5UzZGeUMxRWN2RThBcDRuSWN2NzEKL2Z2RG9mS05SZHVaa1JoK2N2a2pEZmlsYmgwVDg4Z0hsaHkrbG9jL0kxeWpGeCtwL1JEREt6MmFwZU5RYXhpNAp4c2l1MGFMdy9lRjhmaWRNYkRBYnlpTkhsaURWWHd2UHZvc2grS0xjMFdKLzFUdzloUk1kK3Y1cVpycVo4KzBGCkZmYWt6UUtCZ1FDWk5DT3huWHlYQ2JYZ2RCY0txbE84L3BoWXBFOVVDMS81WktBMm1icng3ejBFdVdyR2pLNjUKU09QNDVQcithUDBTa3hTMll2QyttM3dCU0dJZU93QWEvb1dMY0dmbmdUbUpoMHNiYVJ1OXR5Y1QrR2hZemlGawp3ajV4TENBTzdFLzFKT3VkaGgramtwdGVMMVJiSUh2eXRwU3ZiN0VtUUVIbytOQWxBMHFPQkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=`
clientCrt = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKakNDQWc2Z0F3SUJBZ0lVRHd3UklLMnRLVlVVMHJlZXF3U3htZVhEQWVNd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0pNVEkzTGpBdU1DNHhNQjRYRFRJeU1Ea3lOekE0TkRJeU5Wb1hEVEl6TURreQpOekE0TkRJeU5Wb3dLekVSTUE4R0ExVUVDZ3dJUzNWaVpWWmxiR0V4RmpBVUJnTlZCQU1NRFNvdWEzVmlaWFpsCmJHRXVhVzh3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQzdjMklESkdpNkpQdDgKbGsyRWQ2UzQzelFuRzk1a0xYNmxwUmQ4emhBK0hrRGtZTFlZcnVPWFhLSS8ra0lUempQWm1CRUZhQ05mQkI2TwpRVFRJdyszc1pFc09EbDl1R1VkeFJBWEEvaXZDa1FYZ0FmMXBOZkpJM2t1TS9XeWo3VFU3aVd1NzhBNFRKdVlzCmx2dGNrRTJ4WjZDS1BkWmRsUERtTmZKcHhpV2s1OGxNdzJZOElwcnpORHBRdm93OUY4U0VNWGluRWpHQkRtZUYKTFdvYlZncU5FVGFydDBRN0lZbnVsVDQ2NXkranZEaElsS0laQS9XYkpOdE42MXN0YURHM3VTTlFpb1JVVGdNWgpJZTR3cmZNcnRhUytqbVNuZ0JiNEZ2cDBVN05GZjMxQlhzN2NsaFpUdkY1VW1leDJxSlRIUEFXOW5UcDBwWEg1Cjd3clQ0TVU5QWdNQkFBR2pXVEJYTUI4R0ExVWRJd1FZTUJhQUZHdnRoc3BOTFRvWVdVbzdISVhlSjdydmtmankKTUFrR0ExVWRFd1FDTUFBd0N3WURWUjBQQkFRREFnVHdNQndHQTFVZEVRUVZNQk9DQzJ0MVltVjJaV3hoTG1sdgpod1IvQUFBQk1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ1Y2S3VjTmNKdGVMelpNQW1acE9CR0Y0NWZJVGVrCnRDeUZPMGRXWVRabDgzRXlSbWtCd1hSQzJoekZSQ3RaMU5jY2hGNE5SaVhMbWIwM0FnTGhRRGxUR09PN3hlNFcKVUF4MjRtdlNteW05a3ljZGIyUUhqZ2xzRHJVS040QWJuRWpoYzladWxTeVNnVG15YTl2bEh5SHBoZ3V0YUhSQgpKNkVTWHJlYzUwZUxtSC9sSjkyTEwwV01JNW4rd05lajdyTkltbktQeWZ5dUhhaXdmUlZzeVJ1ZXZETWJhSXdpCmhVNmhFOXZGTEkwa0hKaEdYa05ZOEFWbTFoYXhyWGp5Q2xwaWlsQ04xcnV1U3QzYUplN2NpUkVCL08xR2JEeGwKaFQxMUtnMTZzMEJWZHZjL2lzaVZ5SVZTQmozdVRDRCtQWStaMjUzWUc1cnd3U2xxWUlWa2QzdFkKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=`
MockCerts = struct {
Ca string
ServerKey string
ServerCrt string
ClientCrt string
ClientKey string
}{
Ca: caCrt,
ServerCrt: serverCrt,
ServerKey: serverKey,
ClientKey: clientKey,
ClientCrt: clientCrt,
}
)
|
package iot
// Cloud is the interface of IOT clound
type Cloud interface {
Push(v *Value) error
}
// Value ...
type Value struct {
Device string
Value interface{}
}
// NewCloud ...
func NewCloud(config interface{}) Cloud {
var cloud Cloud
switch config.(type) {
case *WsnConfig:
cfg := config.(*WsnConfig)
cloud = NewWsnClound(cfg)
case *OneNetConfig:
cfg := config.(*OneNetConfig)
cloud = NewOneNetCloud(cfg)
default:
cloud = nil
}
return cloud
}
|
package setup
import (
"log"
"strings"
"io/ioutil"
"os"
"fmt"
"path/filepath"
"strconv"
)
func reportNeedOfChanges(name string) {
fmt.Println("\n\nStart searching for change needs!")
rootPath := fmt.Sprintf("%s/", getConsumerName(name))
filepath.Walk(rootPath, needChange)
fmt.Println("\n\nFinished searching for change needs!")
}
func needChange(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
report := ""
input, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalln(err)
}
lines := strings.Split(string(input), "\n")
for i, line := range lines {
if strings.Contains(line, "***fixme***") {
report += fmt.Sprintf("%s. %s\n", strconv.Itoa(i+1), line)
}
}
if len(report) > 0 {
fmt.Printf("\n\nSearching file %s\n", path)
fmt.Printf(report)
}
return nil
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package feedback
import (
"encoding/hex"
"fmt"
"log"
"github.com/golang/dep/gps"
)
const (
// ConsTypeConstraint represents a constraint
ConsTypeConstraint = "constraint"
// ConsTypeHint represents a constraint type hint
ConsTypeHint = "hint"
// DepTypeDirect represents a direct dependency
DepTypeDirect = "direct dep"
// DepTypeTransitive represents a transitive dependency,
// or a dependency of a dependency
DepTypeTransitive = "transitive dep"
// DepTypeImported represents a dependency imported by an external tool
DepTypeImported = "imported dep"
)
// ConstraintFeedback holds project constraint feedback data
type ConstraintFeedback struct {
Constraint, LockedVersion, Revision, ConstraintType, DependencyType, ProjectPath string
}
// NewConstraintFeedback builds a feedback entry for a constraint in the manifest.
func NewConstraintFeedback(pc gps.ProjectConstraint, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
Constraint: pc.Constraint.String(),
ProjectPath: string(pc.Ident.ProjectRoot),
DependencyType: depType,
}
if _, ok := pc.Constraint.(gps.Revision); ok {
cf.ConstraintType = ConsTypeHint
} else {
cf.ConstraintType = ConsTypeConstraint
}
return cf
}
// NewLockedProjectFeedback builds a feedback entry for a project in the lock.
func NewLockedProjectFeedback(lp gps.LockedProject, depType string) *ConstraintFeedback {
cf := &ConstraintFeedback{
ProjectPath: string(lp.Ident().ProjectRoot),
DependencyType: depType,
}
switch vt := lp.Version().(type) {
case gps.PairedVersion:
cf.LockedVersion = vt.String()
cf.Revision = vt.Revision().String()
case gps.UnpairedVersion: // Logically this should never occur, but handle for completeness sake
cf.LockedVersion = vt.String()
case gps.Revision:
cf.Revision = vt.String()
}
return cf
}
// LogFeedback logs feedback on changes made to the manifest or lock.
func (cf ConstraintFeedback) LogFeedback(logger *log.Logger) {
if cf.Constraint != "" {
logger.Printf(" %v", GetUsingFeedback(cf.Constraint, cf.ConstraintType, cf.DependencyType, cf.ProjectPath))
}
if cf.Revision != "" {
logger.Printf(" %v", GetLockingFeedback(cf.LockedVersion, cf.Revision, cf.DependencyType, cf.ProjectPath))
}
}
type brokenImport interface {
String() string
}
type modifiedImport struct {
source, branch, revision, version *gps.StringDiff
projectPath string
}
func (mi modifiedImport) String() string {
var pv string
var pr string
pp := mi.projectPath
var cr string
var cv string
cp := ""
if mi.revision != nil {
pr = fmt.Sprintf("(%s)", trimSHA(mi.revision.Previous))
cr = fmt.Sprintf("(%s)", trimSHA(mi.revision.Current))
}
if mi.version != nil {
pv = mi.version.Previous
cv = mi.version.Current
} else if mi.branch != nil {
pv = mi.branch.Previous
cv = mi.branch.Current
}
if mi.source != nil {
pp = fmt.Sprintf("%s(%s)", mi.projectPath, mi.source.Previous)
cp = fmt.Sprintf(" for %s(%s)", mi.projectPath, mi.source.Current)
}
// Warning: Unable to preserve imported lock VERSION/BRANCH (REV) for PROJECT(SOURCE). Locking in VERSION/BRANCH (REV) for PROJECT(SOURCE)
return fmt.Sprintf("%v %s for %s. Locking in %v %s%s", pv, pr, pp, cv, cr, cp)
}
type removedImport struct {
source, branch, revision, version *gps.StringDiff
projectPath string
}
func (ri removedImport) String() string {
var pr string
var pv string
pp := ri.projectPath
if ri.revision != nil {
pr = fmt.Sprintf("(%s)", trimSHA(ri.revision.Previous))
}
if ri.version != nil {
pv = ri.version.Previous
} else if ri.branch != nil {
pv = ri.branch.Previous
}
if ri.source != nil {
pp = fmt.Sprintf("%s(%s)", ri.projectPath, ri.source.Previous)
}
// Warning: Unable to preserve imported lock VERSION/BRANCH (REV) for PROJECT(SOURCE). Locking in VERSION/BRANCH (REV) for PROJECT(SOURCE)
return fmt.Sprintf("%v %s for %s. The project was removed from the lock because it is not used.", pv, pr, pp)
}
// BrokenImportFeedback holds information on changes to locks pre- and post- solving.
type BrokenImportFeedback struct {
brokenImports []brokenImport
}
// NewBrokenImportFeedback builds a feedback entry that compares an initially
// imported, unsolved lock to the same lock after it has been solved.
func NewBrokenImportFeedback(ld *gps.LockDiff) *BrokenImportFeedback {
bi := &BrokenImportFeedback{}
for _, lpd := range ld.Modify {
// Ignore diffs where it's just a modified package set
if lpd.Branch == nil && lpd.Revision == nil && lpd.Source == nil && lpd.Version == nil {
continue
}
bi.brokenImports = append(bi.brokenImports, modifiedImport{
projectPath: string(lpd.Name),
source: lpd.Source,
branch: lpd.Branch,
revision: lpd.Revision,
version: lpd.Version,
})
}
for _, lpd := range ld.Remove {
bi.brokenImports = append(bi.brokenImports, removedImport{
projectPath: string(lpd.Name),
source: lpd.Source,
branch: lpd.Branch,
revision: lpd.Revision,
version: lpd.Version,
})
}
return bi
}
// LogFeedback logs a warning for all changes between the initially imported and post- solve locks
func (b BrokenImportFeedback) LogFeedback(logger *log.Logger) {
for _, bi := range b.brokenImports {
logger.Printf("Warning: Unable to preserve imported lock %v\n", bi)
}
}
// GetUsingFeedback returns a dependency "using" feedback message. For example:
//
// Using ^1.0.0 as constraint for direct dep github.com/foo/bar
// Using 1b8edb3 as hint for direct dep github.com/bar/baz
func GetUsingFeedback(version, consType, depType, projectPath string) string {
if depType == DepTypeImported {
return fmt.Sprintf("Using %s as initial %s for %s %s", version, consType, depType, projectPath)
}
return fmt.Sprintf("Using %s as %s for %s %s", version, consType, depType, projectPath)
}
// GetLockingFeedback returns a dependency "locking" feedback message. For
// example:
//
// Locking in v1.1.4 (bc29b4f) for direct dep github.com/foo/bar
// Locking in master (436f39d) for transitive dep github.com/baz/qux
func GetLockingFeedback(version, revision, depType, projectPath string) string {
revision = trimSHA(revision)
if depType == DepTypeImported {
if version == "" {
version = "*"
}
return fmt.Sprintf("Trying %s (%s) as initial lock for %s %s", version, revision, depType, projectPath)
}
return fmt.Sprintf("Locking in %s (%s) for %s %s", version, revision, depType, projectPath)
}
// trimSHA checks if revision is a valid SHA1 digest and trims to 7 characters.
func trimSHA(revision string) string {
if len(revision) == 40 {
if _, err := hex.DecodeString(revision); err == nil {
// Valid SHA1 digest
revision = revision[0:7]
}
}
return revision
}
|
package transformer
import (
"github.com/confluentinc/confluent-kafka-go/kafka"
)
type passThrough struct{}
// Transform a kafka Message
func (p passThrough) Transform(src *kafka.Message) []*kafka.Message {
topic := *src.TopicPartition.Topic + "-passthrough"
msg := &kafka.Message{
TopicPartition: kafka.TopicPartition{
Topic: &topic,
Partition: kafka.PartitionAny,
},
Value: src.Value,
Key: src.Key,
Headers: src.Headers,
}
return []*kafka.Message{msg}
}
// PassThrough returns a transformer which does nothing,
// just transferring the message in a new topic
func PassThrough() Transformer {
return passThrough{}
}
|
package main
func Min(a, b int) int {
if a < b {
return a
}
return b
}
func Max(a, b int) int {
if a > b {
return a
}
return b
}
func maxArea(height []int) int {
maxArea := 0
var left, right = 0, len(height) - 1
for left < right {
area := Min(height[left], height[right]) * (right - left)
maxArea = Max(maxArea, area)
if height[left] < height[right] {
left++
} else {
right--
}
}
return maxArea
}
|
package main
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"strings"
"unicode"
"github.com/sirupsen/logrus"
"github.com/viert/go-lame"
)
const (
audioBitRate = 123
streamApiUrl = "http://youtube.com/get_video_info?video_id="
)
type stream map[string]string
type RawVideoStream struct {
VideoId string
VideoInfo string
Title string `json:"title"`
Author string `json:"author"`
URLEncodedFmtStreamMap []stream `json:"url_encoded_fmt_stream_map"`
Status string `json:"status"`
}
// removeWhiteSpace removes white spaces from string
// removeWhiteSpace returns a filename without whitespaces
func removeWhiteSpace(str string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, str)
}
// fixExtension is a helper function that
// fixes file the extension
func fixExtension(str string) string {
if strings.Contains(str, "mp3") {
str = ".mp3"
} else {
str = ".flv"
}
return str
}
// encodeAudioStream consumes a raw data stream and
// encodeAudioStream encodes the data stream in mp3
func encodeAudioStream(file, path, surl string, bitrate uint) error {
data, err := downloadVideoStream(surl)
if err != nil {
log.Printf("Http.Get\nerror: %s\nURL: %s\n", err, surl)
return err
}
tmp, _ := os.OpenFile("_temp_", os.O_CREATE, 0755)
defer tmp.Close()
if _, err := tmp.Write(data); err != nil {
logrus.Errorf("Failed to read response body: %v", err)
return err
}
// Create output file
currentDirectory, err := user.Current()
if err != nil {
logrus.Errorf("Error getting current user directory: %v", err)
return err
}
outputDirectory := currentDirectory.HomeDir + "/Downloads/" + path
outputFile := filepath.Join(outputDirectory, file)
if err := os.MkdirAll(filepath.Dir(outputFile), 0775); err != nil {
logrus.Errorf("Unable to create output directory: %v", err)
}
fp, err := os.OpenFile(outputFile, os.O_CREATE, 0755)
if err != nil {
logrus.Errorf("Unable to create output file: %v", err)
return err
}
defer fp.Close()
// write audio/video file to output
reader := bufio.NewReader(tmp)
writer := lame.NewEncoder(fp)
defer writer.Close()
writer.SetBrate(int(bitrate))
writer.SetQuality(1)
reader.WriteTo(writer)
return nil
}
// encodeVideoStream consumes video data stream and
// encodeVideoStream encodes the video in flv
func encodeVideoStream(file, path, surl string) error {
data, err := downloadVideoStream(surl)
if err != nil {
log.Printf("Http.Get\nerror: %s\nURL: %s\n", err, surl)
return err
}
// Create output file
currentDirectory, err := user.Current()
if err != nil {
logrus.Errorf("Error getting current user directory: %v", err)
return err
}
outputDirectory := currentDirectory.HomeDir + "/Downloads/" + path
outputFile := filepath.Join(outputDirectory, file)
if err := os.MkdirAll(filepath.Dir(outputFile), 0775); err != nil {
logrus.Errorf("Unable to create output directory: %v", err)
}
fp, err := os.OpenFile(outputFile, os.O_CREATE, 0755)
if err != nil {
logrus.Errorf("Unable to create output file: %v", err)
return err
}
defer fp.Close()
//saving downloaded file.
if _, err = fp.Write(data); err != nil {
logrus.Errorf("Unable to encode video stream: %s `->` %v", surl, err)
return err
}
return nil
}
// downloadVideoStream downloads video streams from youtube
// downloadVideoStream returns the *http.Reponse body
func downloadVideoStream(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
logrus.Errorf("Unable to fetch Data stream from URL(%s)\n: %v", url, err)
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
logrus.Errorf("Video Download error with status: '%v'", resp.StatusCode)
return nil, errors.New("Non 200 status code received")
}
output, _ := ioutil.ReadAll(resp.Body)
return output, nil
}
// getVideoId extracts the video id string from youtube url
// getVideoId returns a video id string to calling function
func getVideoId(url string) (string, error) {
if len(url) < 15 {
return url, nil
} else {
if !strings.Contains(url, "youtube.com") {
return "", errors.New("Invalid Youtube URL")
}
s := strings.Split(url, "?v=")[1]
if len(s) == 0 {
return s, errors.New("Empty string")
}
return s, nil
}
}
// decodeStream accept Values and decodes them individually
// decodeStream returns the final RawVideoStream object
func decodeStream(values url.Values, streams *RawVideoStream, rawstream []stream) error {
streams.Author = values.Get("author")
streams.Title = values.Get("title")
streamMap := values.Get("url_encoded_fmt_stream_map")
// read and decode streams
streamsList := strings.Split(string(streamMap), ",")
for streamPos, streamRaw := range streamsList {
streamQry, err := url.ParseQuery(streamRaw)
if err != nil {
logrus.Infof("Error occured during stream decoding %d: %s\n", streamPos, err)
continue
}
var sig string
sig = streamQry.Get("sig")
rawstream = append(rawstream, stream{
"quality": streamQry.Get("quality"),
"type": streamQry.Get("type"),
"url": streamQry.Get("url"),
"sig": sig,
"title": values.Get("title"),
"author": values.Get("author"),
})
logrus.Infof("Stream found: quality '%s', format '%s'", streamQry.Get("quality"), streamQry.Get("type"))
}
streams.URLEncodedFmtStreamMap = rawstream
return nil
}
// decodeVideoStream processes downloaded video stream and
// decodeVideoStream calls helper functions and writes the
// output in the required format
func decodeVideoStream(videoId, path, format string, bitrate uint) error {
var decStreams []stream //decoded video streams
rawVideo := new(RawVideoStream) // raw video stream
// Get video data
rawVideo.VideoId = videoId
rawVideo.VideoInfo = streamApiUrl + videoId
data, err := downloadVideoStream(rawVideo.VideoInfo)
if err != nil {
logrus.Errorf("Unable to get video stream: %v", err)
return err
}
parsedResp, err := url.ParseQuery(string(data))
if err != nil {
logrus.Errorf("Error parsing video byte stream: %v", err)
return err
}
status, ok := parsedResp["status"]
if !ok {
return errors.New("No response from server")
}
reason, _ := parsedResp["reason"]
if status[0] == "fail" {
return errors.New(fmt.Sprintf("'fail' response with reason: %s", reason))
} else if status[0] != "ok" {
return errors.New(fmt.Sprintf("'non-success' response with reason: %s", reason))
}
if err := decodeStream(parsedResp, rawVideo, decStreams); err != nil {
return errors.New("Unable to decode raw video streams")
}
file := removeWhiteSpace(rawVideo.Title) + fixExtension(format)
surl := decStreams[0]["url"] + "&signature" + decStreams[0]["sig"]
logrus.Infof("Downloading data to file: %s", file)
if strings.Contains(file, "mp3") {
if err := encodeAudioStream(file, path, surl, bitrate); err != nil {
logrus.Errorf("Unable to encode %s: %v", format, err)
}
} else {
if err := encodeVideoStream(file, path, surl); err != nil {
logrus.Errorf("Unable to encode %s: %v", format, err)
}
}
return nil
}
|
package main
import "testing"
func TestCalcFuel(t *testing.T) {
s := CalcFuel(14)
if s != 2 {
t.Errorf("CalcFuel(14) should be 2, got %v: ", s)
}
s = CalcFuelRecur(14, 0)
if s != 2 {
t.Errorf("CalcFuelRecur(14) should be 2, got %v: ", s)
}
s = CalcFuel(1969)
if s != 966 {
t.Errorf("CalcFuel(1969) should be 966, got %v: ", s)
}
s = CalcFuelRecur(1969, 0)
if s != 966 {
t.Errorf("CalcFuelRecur(1969) should be 966, got %v: ", s)
}
s = CalcFuel(100756)
if s != 50346 {
t.Errorf("CalcFuel(100756) should be 50346, got %v: ", s)
}
s = CalcFuelRecur(100756, 0)
if s != 50346 {
t.Errorf("CalcFuelRecur(100756) should be 50346, got %v: ", s)
}
}
|
package api
import (
"GinPractice/users"
"fmt"
"github.com/gin-gonic/gin"
)
// Login 是用来处理登陆操作的func
func Login(c *gin.Context) {
username := c.PostForm("username")
password := c.PostForm("password")
if username == "" {
fmt.Print("后台消息“不存在的用户要登陆")
}
fmt.Print(username)
fmt.Print(password)
success, err := users.CheckVaild(username, password)
if err != nil {
c.JSON(200, gin.H{
"status": "10002",
"message": "数据库错误,请联系管理人员",
})
} else {
if success {
c.JSON(200, gin.H{
//0代表无错误
"status": "0",
"message": "信息正确,成功登录",
})
} else {
c.JSON(200, gin.H{
"status": "00001",
"message": "用户名或密码不正确",
})
}
}
}
|
package _606_Construct_String_from_Binary_Tree
import (
"testing"
)
type testCase struct {
input *TreeNode
output string
}
func TestTree2str(t *testing.T) {
cases := []testCase{
{
input: &TreeNode{Val: 1, Left: &TreeNode{Val: 2, Left: &TreeNode{Val: 4}}, Right: &TreeNode{Val: 3}},
output: "1(2(4))(3)",
},
}
for _, c := range cases {
if x := tree2str(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
|
package models
type IPLocation struct {
// The right side is the name of the JSON variable
Ip string `json:"ip,omitempty"`
CountryCode string `json:"country_code,omitempty"`
CountryName string `json:"country_name,omitempty"`
RegionCode string `json:"region_code,omitempty"`
RegionName string `json:"region_name,omitempty"`
City string `json:"city,omitempty"`
Zipcode string `json:"zipcode,omitempty"`
Lat float32 `json:"latitude,omitempty"`
Lon float32 `json:"longitude,omitempty"`
MetroCode int `json:"metro_code,omitempty"`
AreaCode int `json:"area_code,omitempty"`
}
|
package gradients
import (
"image/color"
"math"
"math/rand"
"github.com/devinmcgloin/clr/clr"
"github.com/devinmcgloin/sail/pkg/slog"
"github.com/fogleman/gg"
)
// Skyspace defines the type of the sketch
type Skyspace struct {
}
// Dimensions determines how large it should be
func (ss Skyspace) Dimensions() (int, int) {
return 2000, 1400
}
// Draw actually renders the sketch onto the canvas
func (ss Skyspace) Draw(context *gg.Context, rand *rand.Rand) {
hueVariance := rand.Intn(85)
hue := rand.Intn(365)
radialHue := hue + (rand.Intn(hueVariance) - hueVariance/2)
slog.InfoValues("hueVariance", hueVariance, "hue", hue, "radialHue", radialHue)
center := clr.HSV{H: hue, S: 100, V: 100}
surroundingColor := clr.HSV{H: radialHue, S: 85, V: 20}
slog.InfoValues("center", center, "surroundingcolor", surroundingColor)
rectGradient := SquareDistanceGradient{
from: center, to: surroundingColor,
x1: 300, y1: 300, x2: 1700, y2: 1100, maxDistance: 2000,
}
slog.InfoValues("dist", rectGradient.Distance(0, 0))
slog.InfoValues("dist", rectGradient.Distance(500, 500))
// Render background
context.SetFillStyle(rectGradient)
context.DrawRectangle(0, 0, 2000, 1400)
context.Fill()
//linearGradient := gg.NewLinearGradient(300, 300, 1400, 800)
//linearGradient.AddColorStop(0, center)
//linearGradient.AddColorStop(1, center)
//context.SetFillStyle(linearGradient)
//context.DrawRoundedRectangle(300, 300, 1400, 800, 4)
// context.DrawRoundedRectangle(0, 0, 100, 100, 4)
context.Fill()
}
type SquareDistanceGradient struct {
from clr.HSV
to clr.HSV
x1, y1, x2, y2 int
maxDistance float64
}
func (sdg SquareDistanceGradient) ColorAt(x, y int) color.Color {
dist := sdg.Distance(x, y) / sdg.maxDistance
return lerp(sdg.from, sdg.to, dist)
}
func (sgd SquareDistanceGradient) Distance(x, y int) float64 {
dx := max(sgd.x1-x, 0, x-sgd.x2)
dy := max(sgd.y1-y, 0, y-sgd.y2)
// slog.InfoValues("x", x, "y", y, "dx", dx, "dy", dy)
return math.Sqrt(dx*dx + dy*dy)
}
func lerp(a, b clr.HSV, t float64) clr.HSV {
var h, s, v float64
var h1, s1, v1 = float64(a.H) / 365, float64(a.S) / 100, float64(a.V) / 100
var h2, s2, v2 = float64(b.H) / 365, float64(b.S) / 100, float64(b.V) / 100
d := h2 - h1
if h1 > h2 {
h1, h2 = h2, h1
d = -d
t = 1 - t
}
if d > 0.5 {
h1 = h1 + 1
h = math.Mod(h1+t*(h2-h1), 1.0)
} else if d <= 0.5 {
h = h1 + t*d
}
s = s1 + t*(s2-s1)
v = v1 + t*(v2-v1)
return clr.HSV{
H: int(h * 365),
S: int(s * 100),
V: int(v * 100),
}
}
func max(args ...int) float64 {
max := 0.0
for _, val := range args {
if float64(val) > max {
max = float64(val)
}
}
return max
}
|
package bitset
// the uint64Size of a bit set
const uint64Size = uint(64)
// log2Uint64Size is lg(uint64Size)
const log2Uint64Size = uint(6)
// BitSet efficient and fast set of bits.
type BitSet struct {
length uint
set []uint64
}
// New creates a new BitSet with specified length.
func New(length uint) *BitSet {
return &BitSet{length, make([]uint64, longsNeeded(length))}
}
// Len returns the length of the BitSet in longs(uint64).
func (b *BitSet) Len() uint {
return b.length
}
// Test whether bit i is set.
func (b *BitSet) Test(i uint) bool {
if i >= b.length {
return false
}
return b.set[i>>log2Uint64Size]&(1<<(i&(uint64Size-1))) != 0
}
// Set bit i to 1.
func (b *BitSet) Set(i uint) *BitSet {
b.set[i>>log2Uint64Size] |= 1 << (i & (uint64Size - 1))
return b
}
// Clear bit i to 0.
func (b *BitSet) Clear(i uint) *BitSet {
if i >= b.length {
return b
}
b.set[i>>log2Uint64Size] &^= 1 << (i & (uint64Size - 1))
return b
}
// Flip bit at i.
func (b *BitSet) Flip(i uint) *BitSet {
if i >= b.length {
return b.Set(i)
}
b.set[i>>log2Uint64Size] ^= 1 << (i & (uint64Size - 1))
return b
}
// longsNeeded calculates the number of longs(uint64) needed for bits
func longsNeeded(bits uint) int {
if bits > ((^uint(0)) - uint64Size + 1) {
return int((^uint(0)) >> log2Uint64Size)
}
return int((bits + (uint64Size - 1)) >> log2Uint64Size)
}
|
/*
A distributed block-chain transactional key-value service
Assignment 7 of UBC CS 416 2016 W2
http://www.cs.ubc.ca/~bestchai/teaching/cs416_2016w2/assign7/index.html
Created by Harlan Sim and Sean Blair, April 2017
This package specifies the application's interface to the key-value
service library.
*/
package kvservice
import (
"errors"
"fmt"
"log"
"math"
"math/rand"
"net/rpc"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
)
var (
sortedKvnodeIpPortStatuses []NodeIpPortStatus
currentTransaction Transaction
originalKeyValueStore map[Key]Value
abortedMessage string = "This transaction is aborted!!"
mutex *sync.Mutex
)
type Transaction struct {
ID int
// For storing this transaction's Puts before it commits.
// On commit, they will be added to the keyValueStore
PutSet map[Key]Value
KeySet map[Key]bool
IsAborted bool
IsCommitted bool
CommitID int
}
type NodeIpPortStatus struct {
IpPort string
IsAlive bool
}
// Represents a key in the system.
type Key string
// Represent a value in the system.
type Value string
// An interface representing a connection to the key-value store. To
// create a new connection use the NewConnection() method.
type connection interface {
// The 'constructor' for a new logical transaction object. This is the
// only way to create a new transaction. The returned transaction must
// correspond to a specific, reachable, node in the k-v service. If
// none of the nodes are reachable then tx must be nil and error must
// be set (non-nil).
NewTX() (newTX tx, err error)
// Used by a client to ask a node for information about the
// block-chain. Node is an IP:port string of one of the nodes that
// was used to create the connection. parentHash is either an
// empty string to indicate that the client wants to retrieve the
// SHA 256 hash of the genesis block. Or, parentHash is a string
// identifying the hexadecimal SHA 256 hash of one of the blocks
// in the block-chain. In this case the return value should be the
// string representations of SHA 256 hash values of all of the
// children blocks that have the block identified by parentHash as
// their prev-hash value.
GetChildren(node string, parentHash string) (children []string)
// Close the connection.
Close()
}
// An interface representing a client's transaction. To create a new
// transaction use the connection.NewTX() method.
type tx interface {
// Retrieves a value v associated with a key k as part of this
// transaction. If success is true then v contains the value
// associated with k and err is nil. If success is false then the
// tx has aborted, v is an empty string, and err is non-nil. If
// success is false, then all future calls on this transaction
// must immediately return success = false (indicating an earlier
// abort).
Get(k Key) (success bool, v Value, err error)
// Associates a value v with a key k as part of this
// transaction. If success is true, then put was recoded
// successfully, otherwise the transaction has aborted (see
// above).
Put(k Key, v Value) (success bool, err error)
// Commits this transaction. If success is true then commit
// succeeded, otherwise the transaction has aborted (see above).
// The validateNum argument indicates the number of blocks that
// must follow this transaction's block in the block-chain along
// the longest path before the commit returns with a success.
// txID represents the transactions's global sequence number
// (which determines this transaction's position in the serialized
// sequence of all the other transactions executed by the
// service).
Commit(validateNum int) (success bool, txID int, err error)
// Aborts this transaction. This call always succeeds.
Abort()
}
// Concrete implementation of a connection interface
type myconn int
// Concrete implementation of a tx interface
type mytx struct {
ID int
}
// RPC structs /////////////////////////////
type NewTransactionResp struct {
TxID int
KeyValueStore map[Key]Value
}
type CommitRequest struct {
Transaction Transaction
RequiredKeyValues map[Key]Value
ValidateNum int
}
type CommitResponse struct {
Success bool
CommitID int
Err string
}
type GetChildrenRequest struct {
ParentHash string
}
type GetChildrenResponse struct {
Children []string
}
/////////////////////////////////////////////
// The 'constructor' for a new logical connection object. This is the
// only way to create a new connection. Takes a set of k-v service
// node ip:port strings.
func NewConnection(nodes []string) connection {
setSortedIpPorts(nodes)
c := new(myconn)
return c
}
// sorts the unique ip addresses and sets sortedKvnodesIpPorts
func setSortedIpPorts(nodes []string) {
nodeTotalKey := make(map[int]string)
var totalList []int
for _, node := range nodes {
sum := getWeightedSum(node)
totalList = append(totalList, sum)
// check if sum already in nodeTotalKey map
_, ok := nodeTotalKey[sum]
var err error
if ok {
err = errors.New("NewConnection was called with non-unique ip addresses")
}
checkError("Error in setSortedIpPorts():", err, true)
nodeTotalKey[sum] = node
}
// they are all unique
sort.Ints(totalList)
for _, sum := range totalList {
nodeIpPortStatus := NodeIpPortStatus{nodeTotalKey[sum], true}
sortedKvnodeIpPortStatuses = append(sortedKvnodeIpPortStatuses, nodeIpPortStatus)
}
}
// returns the weighted sum of the ipv4 address which represents a unique number for each possible Ipv4 address
func getWeightedSum(ipPort string) (sum int) {
ip := strings.Split(ipPort, ":")
nums := strings.Split(ip[0], ".")
for i, n := range nums {
intN, err := strconv.Atoi(n)
checkError("Error in getSum(), strconv.Atoi()", err, true)
// a.b.c.d == a * 256^(4-0) + b * 256^(4-1) + c * 256^(4-2) + d * 256^(4-3)
// returns a unique number representing each ip address
sum += int(math.Pow(256, float64(4-i)) * float64(intN))
}
return
}
// Initializes a Transaction
func (c *myconn) NewTX() (tx, error) {
mutex = &sync.Mutex{}
newTx := new(mytx)
newTx.ID = getNewTransactionIDFromAll()
if newTx.ID == -1 {
currentTransaction.IsAborted = true
return newTx, errors.New("The kvnode system appears to be down...")
} else {
return newTx, nil
}
}
// Returns a transaction Id by querying all kvnodes.
// If none alive (impossible according to specs), returns -1
func getNewTransactionIDFromAll() (txid int) {
newTxResponses := make(map[string]NewTransactionResp)
txChannel := make(chan (NewTransactionResp))
nodeChannel := make(chan (string))
for i, nodeIpPortStatus := range sortedKvnodeIpPortStatuses {
go func(nodeIP string, index int) {
txChannel <- getNewTransactionID(nodeIP, index)
nodeChannel <- nodeIP
}(nodeIpPortStatus.IpPort, i)
}
// waits for all to respond
for _ = range sortedKvnodeIpPortStatuses {
newTx := <-txChannel
node := <-nodeChannel
mutex.Lock()
newTxResponses[node] = newTx
mutex.Unlock()
}
fmt.Println("Received all responses and they are:", newTxResponses)
max := -1
min := -1
var kvs map[Key]Value
for nodeIpP := range newTxResponses {
resp := newTxResponses[nodeIpP]
if resp.TxID != -1 {
// If this is first non-err, set min
if min == -1 {
min = resp.TxID
}
// Find max, also take their kvs arbitrarily
if resp.TxID > max {
max = resp.TxID
kvs = resp.KeyValueStore
}
}
}
// Set it to be max
txid = max
// If the min and max are not equal, change to int in between
if min != max {
rand.Seed(time.Now().Unix())
innerRange := max - min - 2
txid = rand.Intn(innerRange) + min
}
currentTransaction = Transaction{txid, make(map[Key]Value), make(map[Key]bool), false, false, 0}
originalKeyValueStore = kvs
return txid
}
// Calls KVServer.NewTransaction RPC, returns a unique transaction ID
// if the called node is dead, sets its IsAlive = false and returns TxID -1
func getNewTransactionID(ipPort string, index int) (resp NewTransactionResp) {
client, err := rpc.Dial("tcp", ipPort)
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return NewTransactionResp{-1, nil}
}
err = client.Call("KVServer.NewTransaction", true, &resp)
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return NewTransactionResp{-1, nil}
}
err = client.Close()
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return NewTransactionResp{-1, nil}
}
return
}
//
func (c *myconn) GetChildren(node string, parentHash string) (children []string) {
req := GetChildrenRequest{parentHash}
var resp GetChildrenResponse
client, err := rpc.Dial("tcp", node)
if err != nil {
return []string{"The provided node is dead!!!"}
}
err = client.Call("KVServer.GetChildren", req, &resp)
if err != nil {
return []string{"The provided node is dead!!!"}
}
err = client.Close()
if err != nil {
return []string{"The provided node is dead!!!"}
}
return resp.Children
}
// Stub
func (c *myconn) Close() {
}
// Returns the Value associated with the given Key
func (t *mytx) Get(k Key) (success bool, v Value, err error) {
if currentTransaction.IsAborted {
return false, "", errors.New(abortedMessage)
} else {
val, ok := currentTransaction.PutSet[k]
if !ok {
val = originalKeyValueStore[k]
}
currentTransaction.KeySet[k] = true
printState()
return true, val, nil
}
}
// Associates Value v with Key k in the system
func (t *mytx) Put(k Key, v Value) (bool, error) {
if currentTransaction.IsAborted {
return false, errors.New(abortedMessage)
} else {
currentTransaction.PutSet[k] = v
currentTransaction.KeySet[k] = true
printState()
return true, nil
}
}
// Commits a transaction
func (t *mytx) Commit(validateNum int) (success bool, commitID int, err error) {
mutex = &sync.Mutex{}
if currentTransaction.IsAborted {
return false, 0, errors.New(abortedMessage)
} else {
success, commitID, err = commitAll(validateNum)
if success {
currentTransaction.IsCommitted = true
currentTransaction.CommitID = commitID
}
}
printState()
return
}
//
func commitAll(validateNum int) (success bool, commitID int, err error) {
commitResponses := make(map[string]CommitResponse)
commitChan := make(chan (CommitResponse))
nodeChan := make(chan (string))
for i, nodeIpPortStatus := range sortedKvnodeIpPortStatuses {
go func(node string, index int) {
commitChan <- commit(node, validateNum, index)
nodeChan <- node
}(nodeIpPortStatus.IpPort, i)
}
// waits for all to respond
for i := 0; i < len(sortedKvnodeIpPortStatuses); i++ {
commitResp := <-commitChan
node := <-nodeChan
mutex.Lock()
commitResponses[node] = commitResp
mutex.Unlock()
}
fmt.Println("Received all responses and they are:", commitResponses)
for nodeIpP := range commitResponses {
resp := commitResponses[nodeIpP]
if resp.Success {
fmt.Println("Returning commit response:", resp, " from node:", nodeIpP)
return resp.Success, resp.CommitID, errors.New(resp.Err)
}
}
return false, -1, errors.New("This transaction is aborted!!")
}
// Calls KVServer.Commit RPC to start the process of committing transaction txid
func commit(nodeIpPort string, validateNum int, index int) (resp CommitResponse) {
deadNodeMessage := "The queried kvnode is dead!!!"
requiredKeyValues := getRequiredKeyValues()
req := CommitRequest{currentTransaction, requiredKeyValues, validateNum}
client, err := rpc.Dial("tcp", nodeIpPort)
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return CommitResponse{false, -1, deadNodeMessage}
}
err = client.Call("KVServer.Commit", req, &resp)
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return CommitResponse{false, -1, deadNodeMessage}
}
err = client.Close()
if err != nil {
sortedKvnodeIpPortStatuses[index].IsAlive = false
return CommitResponse{false, -1, deadNodeMessage}
}
return
}
// Creates map of the original values that keys in KeySet had in originalKeyValueStore
// If they did not exist adds ""
func getRequiredKeyValues() map[Key]Value {
kvMap := make(map[Key]Value)
for k := range currentTransaction.KeySet {
kvMap[k] = originalKeyValueStore[k]
}
return kvMap
}
// Calls KVServer.Abort to abort the given transaction
func (t *mytx) Abort() {
currentTransaction.IsAborted = true
printState()
return
}
// For visualizing the current state of kvservice's originalKeyValueStore map and currentTransaction
func printState() {
// fmt.Println("\nKVSERVICE STATE:")
// fmt.Println("-originalKeyValueStore:")
// for k := range originalKeyValueStore {
// fmt.Println(" Key:", k, "Value:", originalKeyValueStore[k])
// }
// fmt.Println("-currentTransaction:")
// tx := currentTransaction
// fmt.Println(" --Transaction ID:", tx.ID, "IsAborted:", tx.IsAborted, "IsCommitted:", tx.IsCommitted, "CommitId:", tx.CommitID)
// fmt.Println(" PutSet:")
// for k := range tx.PutSet {
// fmt.Println(" Key:", k, "Value:", tx.PutSet[k])
// }
// fmt.Println(" KeySet:")
// for k := range tx.KeySet {
// fmt.Println(" Key:", k)
// }
}
// Prints msg + err to console and exits program if exit == true
func checkError(msg string, err error, exit bool) {
if err != nil {
log.Println(msg, err)
if exit {
os.Exit(-1)
}
}
}
|
package defaults
import "github.com/openshift/installer/pkg/types/external"
// SetPlatformDefaults sets the defaults for the platform.
func SetPlatformDefaults(p *external.Platform) {
p.PlatformName = "Unknown"
}
|
package light
import (
"testing"
"github.com/calbim/ray-tracer/src/color"
"github.com/calbim/ray-tracer/src/tuple"
)
func TestPointLight(t *testing.T) {
intensity := color.New(1, 1, 1)
position := tuple.Point(0, 0, 0)
pointLight := PointLight(position, intensity)
if pointLight.Intensity != intensity || pointLight.Position != position {
t.Errorf("wanted point light to be %v, got %v", Light{Intensity: intensity, Position: position}, pointLight)
}
}
|
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
"context"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] HostProcess containers", func() {
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessNodeOSDistroIs("windows")
SkipUnlessWindowsHostProcessContainersEnabled()
})
f := framework.NewDefaultFramework("host-process-test-windows")
ginkgo.It("should run as a process on the host/node", func() {
ginkgo.By("selecting a Windows node")
targetNode, err := findWindowsNode(f)
framework.ExpectNoError(err, "Error finding Windows node")
framework.Logf("Using node: %v", targetNode.Name)
ginkgo.By("scheduling a pod with a container that verifies %COMPUTERNAME% matches selected node name")
image := imageutils.GetConfig(imageutils.BusyBox)
trueVar := true
podName := "host-process-test-pod"
user := "NT AUTHORITY\\Local service"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
WindowsOptions: &v1.WindowsSecurityContextOptions{
HostProcess: &trueVar,
RunAsUserName: &user,
},
},
HostNetwork: true,
Containers: []v1.Container{
{
Image: image.GetE2EImage(),
Name: "computer-name-test",
Command: []string{"cmd.exe", "/K", "IF", "NOT", "%COMPUTERNAME%", "==", targetNode.Name, "(", "exit", "-1", ")"},
},
},
RestartPolicy: v1.RestartPolicyNever,
NodeName: targetNode.Name,
},
}
f.PodClient().Create(pod)
ginkgo.By("Waiting for pod to run")
f.PodClient().WaitForFinish(podName, 3*time.Minute)
ginkgo.By("Then ensuring pod finished running successfully")
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(
context.TODO(),
podName,
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
})
})
func SkipUnlessWindowsHostProcessContainersEnabled() {
if !framework.TestContext.FeatureGates[string(features.WindowsHostProcessContainers)] {
e2eskipper.Skipf("Skipping test because feature 'WindowsHostProcessContainers' is not enabled")
}
}
|
/*
I want format a JSON string into human-readable form. A string like this:
'{"foo":"hello","bar":"world","c":[1,55,"bye"]}'
would be formatted as:
{
"foo": "hello",
"bar": "world",
"c": [
1,
55,
"bye"
]
}
Rules:
For objects and arrays properties and items should be starting in new line with indent of +2 as you see in the above mentioned example.
Space after colon is mandatory
This is code-golf. Shortest code in bytes wins!
please be aware that json string should be part of the answer (but we will omit that from number of bytes, example:
echo '{"foo":"hello","bar":"world","c":[1,55,"bye"]}'|npx json
will be counted as 14 bytes => echo |npx json after omitting the string
*/
package main
import (
"bytes"
"encoding/json"
"fmt"
)
func main() {
fmt.Println(jsonfmt(`{"foo":"hello","bar":"world","c":[1,55,"bye"]}`))
}
func jsonfmt(s string) (string, error) {
w := new(bytes.Buffer)
err := json.Indent(w, []byte(s), "", "\t")
if err != nil {
return "", err
}
return w.String(), nil
}
|
package localcache
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/andywow/golang-lessons/lesson-calendar/internal/calendar/repository"
"github.com/andywow/golang-lessons/lesson-calendar/pkg/eventapi"
)
func createTestEvent(t *testing.T, date time.Time) eventapi.Event {
t.Helper()
return eventapi.Event{
StartTime: &date,
Duration: 60,
Header: "test",
Description: "test",
Username: "test",
}
}
func TestCreateEvent(t *testing.T) {
s := NewEventLocalStorage()
now := time.Now()
event := createTestEvent(t, now)
err := s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
// check event
event2 := createTestEvent(t, now.Add(time.Hour*24))
err = s.CreateEvent(context.Background(), &event2)
require.NoError(t, err)
// check for event on same date
eventSameTime := createTestEvent(t, now)
err = s.CreateEvent(context.Background(), &eventSameTime)
require.Error(t, err)
assert.Equal(t, repository.ErrDateBusy, err)
}
func TestDeleteEvent(t *testing.T) {
s := NewEventLocalStorage()
event := createTestEvent(t, time.Now())
// create event
err := s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
// delete event
err = s.DeleteEvent(context.Background(), event.Uuid)
require.NoError(t, err)
// delete non existing event
err = s.DeleteEvent(context.Background(), event.Uuid)
require.Error(t, err)
assert.Equal(t, repository.ErrEventNotFound, err)
}
func TestGetEventsForDate(t *testing.T) {
s := NewEventLocalStorage()
event := createTestEvent(t, time.Now())
event2 := createTestEvent(t, time.Now().Truncate(24*time.Hour).Add(time.Hour*4))
event3 := createTestEvent(t, time.Now().Add(time.Hour*48))
// create events
err := s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event2)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event3)
require.NoError(t, err)
// get events
events, err := s.GetEventsForDate(context.Background(), time.Now())
require.NoError(t, err)
assert.Equal(t, 2, len(events))
}
func TestGetEventsForWeek(t *testing.T) {
s := NewEventLocalStorage()
startTime := time.Date(2020, time.March, 2, 12, 12, 12, 12, time.UTC)
event := createTestEvent(t, startTime)
event2 := createTestEvent(t, startTime.Truncate(24*time.Hour).Add(time.Hour*3*24))
event3 := createTestEvent(t, startTime.Add(time.Hour*8*24))
// create events
err := s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event2)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event3)
require.NoError(t, err)
// get events
events, err := s.GetEventsForWeek(context.Background(), startTime)
require.NoError(t, err)
assert.Equal(t, 2, len(events))
}
func TestGetEventsForMonth(t *testing.T) {
s := NewEventLocalStorage()
startTime := time.Date(2020, time.February, 1, 12, 12, 12, 12, time.UTC)
event := createTestEvent(t, startTime)
event2 := createTestEvent(t, startTime.Truncate(24*time.Hour).Add(time.Hour*7*24))
event3 := createTestEvent(t, startTime.Add(time.Hour*29*24))
// create events
err := s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event2)
require.NoError(t, err)
err = s.CreateEvent(context.Background(), &event3)
require.NoError(t, err)
// get events
events, err := s.GetEventsForMonth(context.Background(), startTime)
require.NoError(t, err)
assert.Equal(t, 2, len(events))
}
func TestUpdateEvent(t *testing.T) {
s := NewEventLocalStorage()
event := createTestEvent(t, time.Now())
// update non existing event
err := s.UpdateEvent(context.Background(), &event)
require.Error(t, err)
assert.Equal(t, err, repository.ErrEventNotFound)
// create event
err = s.CreateEvent(context.Background(), &event)
require.NoError(t, err)
// update event
newTime := event.StartTime.Add(time.Duration(60*24) * time.Minute)
event.StartTime = &newTime
err = s.UpdateEvent(context.Background(), &event)
require.NoError(t, err)
}
|
package main
import "fmt"
// func keywork | [receiver] | <func name> | [return] | ([params]) | { //code }
func main() {
fmt.Println("Hello World!")
}
// main is the entry point to your program
|
package main
import (
"fmt"
"github.com/ClarityServices/skynet2"
"github.com/ClarityServices/skynet2/daemon"
"github.com/ClarityServices/skynet2/log"
"github.com/kballard/go-shellquote"
"os"
"sync"
"text/template"
)
var startTemplate = template.Must(template.New("").Parse(
`Started service with UUID {{.UUID}}.
`))
func Start(criteria *skynet.Criteria, args []string) {
if len(args) < 1 {
fmt.Println("Please provide a service name 'sky start binaryName'")
return
}
hosts, err := skynet.GetServiceManager().ListHosts(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, host := range hosts {
wait.Add(1)
go func(host string) {
fmt.Println("Starting on host: " + host)
d := daemon.GetDaemonForHost(host)
in := daemon.StartSubServiceRequest{
BinaryName: args[0],
Args: shellquote.Join(args[1:]...),
// TODO: maybe an optional flag to change this?
Registered: true,
}
out, err := d.StartSubService(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
startTemplate.Execute(os.Stdout, out)
wait.Done()
}(host)
}
wait.Wait()
}
var stopTemplate = template.Must(template.New("").Parse(
`{{if .Ok}}Stopped service with UUID {{.UUID}}.
{{else}}Service with UUID {{.UUID}} is already stopped.
{{end}}`))
func Stop(criteria *skynet.Criteria) {
instances, err := skynet.GetServiceManager().ListInstances(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, instance := range filterDaemon(instances) {
wait.Add(1)
go func(instance skynet.ServiceInfo) {
fmt.Println("Stopping: " + instance.UUID)
d := daemon.GetDaemonForService(&instance)
in := daemon.StopSubServiceRequest{
UUID: instance.UUID,
}
out, err := d.StopSubService(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
stopTemplate.Execute(os.Stdout, out)
wait.Done()
}(instance)
}
wait.Wait()
}
var restartTemplate = template.Must(template.New("").Parse(
`{{if .Ok}}Restarted service with UUID {{.UUID}}.
{{else}}Service with UUID {{.UUID}} is not running.
{{end}}`))
func Restart(criteria *skynet.Criteria) {
instances, err := skynet.GetServiceManager().ListInstances(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, instance := range filterDaemon(instances) {
wait.Add(1)
go func(instance skynet.ServiceInfo) {
fmt.Println("Restarting: " + instance.UUID)
d := daemon.GetDaemonForService(&instance)
in := daemon.RestartSubServiceRequest{
UUID: instance.UUID,
}
out, err := d.RestartSubService(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
restartTemplate.Execute(os.Stdout, out)
wait.Done()
}(instance)
}
wait.Wait()
}
var registerTemplate = template.Must(template.New("").Parse(
`Registered service with UUID {{.UUID}}.
`))
func Register(criteria *skynet.Criteria) {
instances, err := skynet.GetServiceManager().ListInstances(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, instance := range filterDaemon(instances) {
wait.Add(1)
go func(instance skynet.ServiceInfo) {
fmt.Println("Registering: " + instance.UUID)
d := daemon.GetDaemonForService(&instance)
in := daemon.RegisterSubServiceRequest{
UUID: instance.UUID,
}
out, err := d.RegisterSubService(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
registerTemplate.Execute(os.Stdout, out)
wait.Done()
}(instance)
}
wait.Wait()
}
var unregisterTemplate = template.Must(template.New("").Parse(
`Unregistered service with UUID {{.UUID}}.
`))
func Unregister(criteria *skynet.Criteria) {
instances, err := skynet.GetServiceManager().ListInstances(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, instance := range filterDaemon(instances) {
wait.Add(1)
go func(instance skynet.ServiceInfo) {
fmt.Println("Unregistering: " + instance.UUID)
d := daemon.GetDaemonForService(&instance)
in := daemon.UnregisterSubServiceRequest{
UUID: instance.UUID,
}
out, err := d.UnregisterSubService(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
unregisterTemplate.Execute(os.Stdout, out)
wait.Done()
}(instance)
}
wait.Wait()
}
var logLevelTemplate = template.Must(template.New("").Parse(
`Set LogLevel to {{.Level}} for UUID {{.UUID}}.
`))
func SetLogLevel(criteria *skynet.Criteria, level string) {
instances, err := skynet.GetServiceManager().ListInstances(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, instance := range filterDaemon(instances) {
wait.Add(1)
go func(instance skynet.ServiceInfo) {
fmt.Println("Setting LogLevel to " + level + " for: " + instance.UUID)
d := daemon.GetDaemonForService(&instance)
in := daemon.SubServiceLogLevelRequest{
UUID: instance.UUID,
Level: level,
}
out, err := d.SubServiceLogLevel(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
logLevelTemplate.Execute(os.Stdout, out)
wait.Done()
}(instance)
}
wait.Wait()
}
func SetDaemonLogLevel(criteria *skynet.Criteria, level string) {
hosts, err := skynet.GetServiceManager().ListHosts(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, host := range hosts {
wait.Add(1)
go func(host string) {
d := daemon.GetDaemonForHost(host)
in := daemon.LogLevelRequest{
Level: level,
}
out, err := d.LogLevel(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
if out.Ok {
fmt.Printf("Set daemon log level to %v on host: %v\n", level, host)
} else {
fmt.Printf("Failed to set daemon log level to %v on host: %v\n", level, host)
}
wait.Done()
}(host)
}
wait.Wait()
}
func StopDaemon(criteria *skynet.Criteria) {
hosts, err := skynet.GetServiceManager().ListHosts(criteria)
if err != nil {
log.Fatal(err)
}
var wait sync.WaitGroup
for _, host := range hosts {
wait.Add(1)
go func(host string) {
d := daemon.GetDaemonForHost(host)
in := daemon.StopRequest{}
out, err := d.Stop(in)
if err != nil {
fmt.Println("Returned Error: " + err.Error())
wait.Done()
return
}
if out.Ok {
fmt.Printf("Daemon stopped on host: %v\n", host)
} else {
fmt.Printf("Failed to stop daemon on host: %v\n", host)
}
wait.Done()
}(host)
}
wait.Wait()
}
func filterDaemon(instances []skynet.ServiceInfo) []skynet.ServiceInfo {
filteredInstances := make([]skynet.ServiceInfo, 0)
for _, i := range instances {
if i.Name != "SkynetDaemon" {
filteredInstances = append(filteredInstances, i)
}
}
return filteredInstances
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestFloat64Set(t *testing.T) {
assert := assert.New(t)
set := NewFloat64Set()
vals := []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0}
for i := range vals {
set.Insert(vals[i])
set.Insert(vals[i])
set.Insert(vals[i])
set.Insert(vals[i])
set.Insert(vals[i])
}
assert.Equal(len(vals), set.Count())
assert.Equal(len(vals), len(set))
for i := range vals {
assert.True(set.Exist(vals[i]))
}
assert.False(set.Exist(3))
}
|
// Package concurrency provides common concurrency patterns and utilities.
package concurrency
import "github.com/pkg/errors"
// Runnable describes something which can start and stop.
type Runnable interface {
Start() error
Stop()
}
// AsyncRunnable is a runnable which is can run asynchrounously
type AsyncRunnable interface {
Runnable
StopWait()
}
// Worker is anything that can work
type Worker interface {
Work() error
}
// WorkFunc is the worker function
type WorkFunc func() error
// Work adapts WorkFunc to the Worker interface.
func (f WorkFunc) Work() error {
return f()
}
//go:generate mockery -name Worker -case underscore
// Retryable is an interface which describes whether something is retryable
type Retryable interface {
Retryable() bool
}
// Stopped is a special error value is signals that the runnable is stopped
var Stopped = errors.New("stopped")
|
package testutil
import (
"database/sql"
"testing"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
)
func OpenDBForTest(t *testing.T) *sqlx.DB {
t.Helper()
db, err := sql.Open(
"mysql",
"todo:todo@tcp(127.0.0.1:33306)/todo?parseTime=true",
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = db.Close()
})
return sqlx.NewDb(db, "mysql")
}
|
package ca
import (
"regexp"
"strings"
"github.com/rightscale/rsc/ca/cac"
"github.com/rightscale/rsc/cmd"
"github.com/rightscale/rsc/metadata"
"github.com/rightscale/rsc/rsapi"
)
// Metadata synthetized from all CA APIs metadata; setup once
var GenMetadata = setupMetadata()
// API is the CA 1.0 common client to all cloud analytics APIs.
type API struct {
*rsapi.API
}
// FromCommandLine builds a client from the command line.
func FromCommandLine(cmdLine *cmd.CommandLine) (*API, error) {
api, err := rsapi.FromCommandLine(cmdLine)
if err != nil {
return nil, err
}
api.Host = apiHostFromLogin(cmdLine.Host)
api.Metadata = GenMetadata
return &API{api}, nil
}
// New returns a CA API client.
func New(h string, a rsapi.Authenticator) *API {
api := rsapi.New(h, a)
api.Metadata = GenMetadata
return &API{API: api}
}
func apiHostFromLogin(host string) string {
integration, _ := regexp.MatchString("^cobalt", host)
staging, _ := regexp.MatchString("^moo", host)
prefix := ""
switch {
case integration:
prefix = "ca1-analytics-499"
case staging:
prefix = "moo-analytics"
default:
prefix = "analytics"
}
urlElems := strings.Split(host, ".")
urlElems[0] = prefix
return strings.Join(urlElems, ".")
}
// Initialize GenMetadata from each CA API generated metadata
func setupMetadata() (result map[string]*metadata.Resource) {
result = make(map[string]*metadata.Resource)
for n, r := range cac.GenMetadata {
result[n] = r
}
return
}
|
package orm
import (
//"GoldenTimes-web/models"
//"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
)
func Insert(i interface{}) (int64, error) {
o := orm.NewOrm()
num, err := o.Insert(i)
return num, err
}
|
package test_driver
import (
"github.com/netapp/netappdvp/storage_drivers"
log "github.com/Sirupsen/logrus"
)
type FakeStorageDriverConfig struct {
storage_drivers.CommonStorageDriverConfig // embedded types replicate all fields
ManagementLIF string `json:"managementLIF"`
DataLIF string `json:"dataLIF"`
IgroupName string `json:"igroupName"`
SVM string `json:"svm"`
Username string `json:"username"`
Password string `json:"password"`
Aggregate string `json:"aggregate"`
}
type FakeStorageDriver struct {
Initialized bool
Config FakeStorageDriverConfig
}
const FakeStorageDriverName = "fake"
func (d *FakeStorageDriver) Name() string {
return FakeStorageDriverName
}
func (d *FakeStorageDriver) Initialize(configJSON string) error {
//TODO: Figure out how to fill in config from configJSON
config := &FakeStorageDriverConfig{}
d.Config = *config
d.Initialized = true
//TODO:Call Validate d.Validate()
return nil
}
func (d *FakeStorageDriver) Validate() error {
//TODO: Validate test data?
log.Debugf("FakeStorageDriver.Validate()")
return nil
}
func (d *FakeStorageDriver) Create(name string, opts map[string]string) error {
//TODO: Add logic once theres a need
log.Debugf("FakeStorageDriver.Create()- name: %v, opts: %v", name, opts)
return nil
}
func (d *FakeStorageDriver) CreateClone(name, source, snapshot, newSnapshotPrefix string) error {
log.Debugf("FakeStorageDriver.CreateClone()- \n\tname: %v, \n\tsource: %v, \n\tsnapshot: %v, \n\tnewSnapshotPrefix: %v",
name, source, snapshot, newSnapshotPrefix)
//TODO: Add logic once theres a need
return nil
}
func (d *FakeStorageDriver) Destroy(name string) error {
log.Debugf("FakeStorageDriver.Destroy()- \n\tname: %v", name)
//TODO: Add logic once theres a need
return nil
}
func (d *FakeStorageDriver) Attach(name, mountpoint string, opts map[string]string) error {
log.Debugf("FakeStorageDriver.Attach()- name: %v, mountpoint: %v, opts: %v", name, mountpoint, opts)
//TODO: Add logic once theres a need
return nil
}
func (d *FakeStorageDriver) Detach(name, mountpoint string) error {
log.Debugf("FakeStorageDriver.Detach()- name: %v, mountpoint: %v", name, mountpoint)
//TODO: Add logic once theres a need
return nil
}
func (d *FakeStorageDriver) DefaultStoragePrefix() string {
log.Debugf("FakeStorageDriver.DefaultStoragePrefix()")
return "fake_"
}
func (d *FakeStorageDriver) DefaultSnapshotPrefix() string {
log.Debugf("FakeStorageDriver.DefaultSnapshotPrefix()")
return "fake_"
}
func (d *FakeStorageDriver) SnapshotList(name string) ([]storage_drivers.CommonSnapshot, error) {
log.Debugf("FakeStorageDriver.SnapshotList()- name: %v", name)
var snapshots []storage_drivers.CommonSnapshot
//TODO: Add necessary stuff here
return snapshots, nil
}
|
package cmd
import (
"MyCart/cmd/Services"
"github.com/spf13/cobra"
)
var addProductsToCatlogCmd = &cobra.Command{
Use: "addProductsToCatlog",
Short: "You can view all the categories for shopping",
Long: `You can view all the categories for shopping`,
RunE: func(cmd *cobra.Command, args []string) error {
//fmt.Println("viewCategories called")
return Services.AddProductsToCatlog(category, subcategory, productname, specification, price)
},
}
var viewCartDetailsCmd = &cobra.Command{
Use: "viewCartDetails",
Short: "You can view all the categories for shopping",
Long: `You can view all the categories for shopping`,
RunE: func(cmd *cobra.Command, args []string) error {
//fmt.Println("viewCategories called")
return Services.ViewCartDetails(userName)
},
}
var viewBillDetailsCmd = &cobra.Command{
Use: "viewBillDetails",
Short: "You can view all the categories for shopping",
Long: `You can view all the categories for shopping`,
RunE: func(cmd *cobra.Command, args []string) error {
//fmt.Println("viewCategories called")
return Services.ViewBillDetails(userName)
},
}
func init() {
rootCmd.AddCommand(addProductsToCatlogCmd)
rootCmd.AddCommand(viewCartDetailsCmd)
rootCmd.AddCommand(viewBillDetailsCmd)
}
|
package payment
import (
"fmt"
)
type Cash struct {
}
func CreateCashAccount() *Cash {
return &Cash{}
}
func (c Cash) ProcessPayment(amount float32) bool {
fmt.Println("Processing a cash transaction...")
return true
}
|
package http
import (
prox "github.com/davepgreene/slackmac/proxy"
log "github.com/sirupsen/logrus"
"github.com/vulcand/oxy/stream"
"net/http"
)
func proxy(url string) http.Handler {
proxy := prox.New(url)
s, err := stream.New(proxy)
if err != nil {
log.Fatal(err)
}
return s
}
|
/*
Copyright 2020 SUSE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mits_test
import (
. "github.com/onsi/ginkgo"
"github.com/cloudfoundry-incubator/cf-test-helpers/generator"
"github.com/SUSE/minibroker-integration-tests/mits"
)
var _ = Describe("MariaDB", func() {
BeforeEach(func() {
if !mitsConfig.Tests.MariaDB.Enabled {
Skip("All MariaDB tests are disabled")
}
})
Context("Without overrideParams set", func() {
BeforeEach(func() {
if mitsConfig.Minibroker.Provisioning.OverrideParams.Enabled {
Skip("overrideParams are set")
}
})
It("should deploy and connect WITH extra provisioning parameters", func() {
mits.SimpleAppAndService(
testSetup,
mitsConfig.Tests.MariaDB,
mitsConfig.Timeouts,
serviceBrokerName,
"assets/mysqlapp",
map[string]interface{}{
"db": map[string]interface{}{
"name": generator.PrefixedRandomName(mitsConfig.Tests.MariaDB.Class, "db"),
"user": generator.PrefixedRandomName(mitsConfig.Tests.MariaDB.Class, "user"),
},
"replication": map[string]interface{}{
"enabled": false,
},
},
)
})
})
Context("With overrideParams set", func() {
BeforeEach(func() {
if !mitsConfig.Minibroker.Provisioning.OverrideParams.Enabled {
Skip("overrideParams are not set")
}
})
It("should deploy and connect WITHOUT extra provisioning parameters", func() {
mits.SimpleAppAndService(
testSetup,
mitsConfig.Tests.MariaDB,
mitsConfig.Timeouts,
serviceBrokerName,
"assets/mysqlapp",
nil,
)
})
})
})
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"simpleDateParser"
"syscall"
)
func main(){
var (
httpAddr = flag.String("http",":8080","http port to listen on")
)
flag.Parse()
ctx := context.Background()
// our simpleDateParser service
srv := simpleDateParser.NewService()
errChan := make(chan error)
// This goroutine is to stop the server when a user presses CTRL + C
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
errChan <- fmt.Errorf("%s", <-c)
}()
// mapping endpoints
endpoints := simpleDateParser.Endpoints{
GetEndpoint: simpleDateParser.MakeGetEndpoint(srv),
StatusEndpoint: simpleDateParser.MakeStatusEndpoint(srv),
ValidateEndpoint: simpleDateParser.MakeValidateEndpoint(srv),
}
// HTTP transport
go func() {
log.Println("simpleDateParser is listening on port: ", *httpAddr)
handler := simpleDateParser.NewHTTPServer(ctx,endpoints)
errChan <- http.ListenAndServe(*httpAddr, handler)
}()
log.Fatalln(<-errChan)
}
|
package main
import (
"fmt"
"strconv"
"time"
)
func main() {
channel := make(chan int)
for i := 0; i < 5; i++ {
canal := "Canal-" + strconv.Itoa(i)
go worker(channel, canal)
}
for i := 0; i < 25; i++ {
channel <- i
}
}
func worker(channel chan int, canal string) {
for i := range channel {
fmt.Println(canal, i)
time.Sleep(time.Second * 2)
}
}
|
package XMLParsers
import (
"encoding/xml"
"WorkingPromo/Utils"
)
//Главный хмл по предложениям по секции который мы получаем
//что то возможно проигнорировал, сейчас не вспомню
type OffersXML struct {
XMLName xml.Name `xml:"digiseller.response"`
Retval string `xml:"retval"`
RetDesc string `xml:"retdesc"`
IdSection string `xml:"id_section"`
NameSection string `xml:"name_section"`
Page string `xml:"page"`
Order string `xml:"order"`
CountGoods string `xml:"cnt_good"`
Pages string `xml:"pages"`
Rows OfferRows `xml:"rows"`
}
//Массив строк, есть счетчик сколько в ответе будет товаров
type OfferRows struct {
RowsCount string `xml:"cnt,attr"`
OfferRows []OfferRowXML `xml:"row"`
}
//Статистика по продавцу, идет вместе с предложением
type OfferStatistic struct {
CountSell string `xml:"cnt_sell"`
CountReturn string `xml:"cnt_return"`
CountGoodResponses string `xml:"cnt_goodresponses"`
CountBadResponses string `xml:"cnt_badresponses"`
}
//Строка по определенному предложению, вся инфа что есть
type OfferRowXML struct {
OfferRowID string `xml:"id,attr"`
OfferID string `xml:"id_goods"`
OffersName string `xml:"name_goods"`
Price string `xml:"price"`
Currency string `xml:"currency"`
Discount string `xml:"discount"`
Gift string `xml:"gift"`
Reward string `xml:"reward"`
IdSeller string `xml:"id_seller"`
NameSeller string `xml:"name_seller"`
Rating string `xml:"rating"`
Summpay string `xml:"summpay"`
Statistics OfferStatistic `xml:"statistics"`
}
//Этим мы требуем у партнерки список предложений по секции
//Важно! обязательно нужно указывать количество строк на страницу и валюту, ну и само собой
//агент айди
type GetOffersXML struct {
XMLName xml.Name `xml:"digiseller.request"`
AgentId string `xml:"guid_agent"`
IDSection string `xml:"id_section"`
Lang string `xml:"lang"`
Encoding string `xml:"encoding"`
Page string `xml:"page"`
Rows string `xml:"rows"`
Currency string `xml:"currency"`
Order string `xml:"order"`
}
//Требуем в виде байт хмл для запроса предложений по секции
func GetOffersInSectionXML(section string, rowsNumber string, currency string) ([]byte) {
v := &GetOffersXML{AgentId: Utils.AgentID, IDSection: section,
Lang: Utils.Lang, Encoding: "utf-8", Rows: rowsNumber, Currency: currency}
getOffersInSectionXML, err := xml.MarshalIndent(&v, "", " ")
Utils.CheckError(err)
return getOffersInSectionXML
}
//Переводим байты в структуры, как и в секциях вообщем, ничего интересного
func GetOffersViaStructXML(data []byte) (*OffersXML) {
var sections = &OffersXML{}
err := xml.Unmarshal(data, §ions)
Utils.CheckError(err)
return sections
}
|
package Utils
import (
"math/rand"
)
func Delete(array []int, index int) []int{
a := array
a = append(a[:index], a[index+1:]...)
return a
}
func GenArray(len int) []int{
var Sorted_Array = make([]int, len)
for i:= 0; i < len; i++{
Sorted_Array[i] = i+1
}
var Unsorted_Array = make([]int, 0)
for i:= 0; i < len - 1; i++{
a := rand.Intn(len - i - 1)
Unsorted_Array = append(Unsorted_Array,Sorted_Array[a])
Sorted_Array = Delete(Sorted_Array,a)
}
Unsorted_Array = append(Unsorted_Array,Sorted_Array[0])
return Unsorted_Array
} |
package ctx
import (
"net/http"
)
type ctxKeyUserID struct{}
// GetUserID reads userId from context
func GetUserID(r *http.Request) string {
// check context
if id, ok := getStr(ctxKeyUserID{}, r); ok {
return id
}
// no id
return ""
}
// SetUserID stores userID in context
func SetUserID(id string, r *http.Request) *http.Request {
return set(ctxKeyUserID{}, id, r)
}
|
package suite_init
import (
"fmt"
"os"
"path/filepath"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
. "github.com/onsi/gomega"
"github.com/werf/werf/integration/pkg/utils"
)
type TmpDirData struct {
TmpDir string
TestDirPath string
}
func NewTmpDirData() *TmpDirData {
data := &TmpDirData{}
SetupTmpDir(&data.TmpDir, &data.TestDirPath)
return data
}
func SetupTmpDir(tmpDir, testDirPath *string) bool {
ginkgo.BeforeEach(func() {
*tmpDir = utils.GetTempDir()
*testDirPath = *tmpDir
})
ginkgo.AfterEach(func() {
err := os.RemoveAll(*tmpDir)
Ω(err).ShouldNot(HaveOccurred())
})
return true
}
func (data *TmpDirData) GetProjectWorktree(projectName string) string {
return filepath.Join(data.TestDirPath, fmt.Sprintf("%s.worktree", projectName))
}
func (data *TmpDirData) CommitProjectWorktree(projectName, worktreeFixtureDir, commitMessage string) {
worktreeDir := data.GetProjectWorktree(projectName)
repoDir := filepath.Join(data.TestDirPath, fmt.Sprintf("%s.repo", projectName))
gomega.Expect(os.RemoveAll(worktreeDir)).To(Succeed())
utils.CopyIn(worktreeFixtureDir, worktreeDir)
gomega.Expect(utils.SetGitRepoState(worktreeDir, repoDir, commitMessage)).To(gomega.Succeed())
}
|
package manager
import (
"github.com/golang/glog"
"sub_account_service/finance/db"
)
func InitAutoTB() {
if db.AutoMigrate == true {
glog.Infoln("init AutoMigrate mysql db tables")
}
}
|
package xml
import (
"bufio"
"fmt"
"sync"
)
var endPool = sync.Pool{
New: func() interface{} {
return new(EndElement)
},
}
// releaseEnd returns an EndElement to the pool.
func releaseEnd(end *EndElement) {
//end.Reset()
endPool.Put(end)
}
// EndElement represents a XML end element.
type EndElement struct {
name []byte
}
// NewEnd creates a new EndElement.
func NewEnd(name string) *EndElement {
return &EndElement{
name: []byte(name),
}
}
// String returns the string representation of EndElement.
func (e *EndElement) String() string {
return fmt.Sprintf("</%s>", e.name)
}
// SetName sets the name to the end element.
func (e *EndElement) SetName(name string) {
e.name = append(e.name[:0], name...)
}
// SetNameBytes sets the name to the end element in bytes.
func (e *EndElement) SetNameBytes(name []byte) {
e.name = append(e.name[:0], name...)
}
func (e *EndElement) Reset() {
e.name = e.name[:0]
}
// Name returns the name of the XML node.
func (e *EndElement) Name() string {
return string(e.name)
}
// NameBytes returns the name of the XML node in bytes.
func (e *EndElement) NameBytes() []byte {
return e.name
}
// NameUnsafe returns a string holding the name parameter.
//
// This function differs from Name() on using unsafe methods.
func (e *EndElement) NameUnsafe() string {
return b2s(e.name)
}
func (e *EndElement) parse(r *bufio.Reader) error {
e.Reset()
c, err := skipWS(r)
if err != nil {
return err
}
e.name = append(e.name, c)
for {
c, err = r.ReadByte()
if err != nil {
break
}
if c == '>' {
break
}
if c == ' ' {
continue
}
e.name = append(e.name, c)
}
return err
}
|
package platform
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"image"
"io"
"io/ioutil"
"log"
"os"
"time"
"github.com/jcorbin/anansi"
"github.com/jcorbin/anansi/ansi"
)
var (
errReplayDone = errors.New("replay done")
errReplayStop = errors.New("replay stop")
)
type replay struct {
cereal []byte
input anansi.InputReplay
cur anansi.InputReplay
frame anansi.InputFrame
pause time.Time
size image.Point
mouse struct {
Mouse
decay int
}
}
func (rep *replay) update(ctx *Context) {
if len(rep.cur) == 0 {
ctx.Err = errReplayDone
return
}
// Ctrl-C stops replay
if ctx.Input.HasTerminal('\x03') {
log.Printf("stopping replay")
ctx.Platform.replay = nil
return
}
defer func() {
if ctx.Err == nil {
rep.drawOverlay(ctx)
}
}()
// <Space> (un)pauses replay
if ctx.Input.CountRune(' ')%2 == 1 {
if rep.pause.IsZero() {
rep.pause = rep.frame.T
} else {
rep.pause = time.Time{}
}
}
switch {
// When paused '.' steps
case !rep.pause.IsZero() && ctx.Input.CountRune('.')%2 == 1:
rep.pause = rep.frame.T
fallthrough
case rep.pause.IsZero():
// next non-message frame
for rep.next() {
switch m := rep.frame.M; {
case bytes.HasPrefix(m, []byte("resize:")):
if sz, err := parseSize(m[7:]); err != nil {
log.Printf("invalid resize message %q in replay", m)
} else {
rep.size = sz
}
case len(m) > 0:
log.Printf("unrecognized replay message %q", m)
}
if !rep.frame.T.IsZero() {
break
}
}
if rep.frame.T.IsZero() {
ctx.Err = errReplayDone
return
}
// load replay frame input
ctx.events.Clear()
ctx.events.DecodeBytes(rep.frame.B)
// update mouse cursor state
if m, have := ctx.events.LastMouse(false); have {
rep.mouse.Mouse = m
rep.mouse.decay = 15
} else if rep.mouse.decay > 0 {
if rep.mouse.decay--; rep.mouse.decay == 0 {
rep.mouse.State = ansi.MouseNoButton
}
}
}
ctx.Err = rep.runClient(ctx)
}
func (rep *replay) runClient(ctx *Context) error {
ctx.Time = rep.frame.T
ctx.Output.Resize(rep.size) // TODO better afford size difference
err := rep.frame.E // TODO wrap so that it can be non-fatal to looper?
return errOr(err, ctx.runClient())
}
var buttonAttrs = []ansi.SGRAttr{
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.BG() | ansi.SGRWhite.FG(), // none
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRRed.BG(), // left
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRGreen.BG(), // middle
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRBlue.BG(), // right
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRMagenta.BG(), // wheel up
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRCyan.BG(), // wheel down
ansi.SGRAttrClear | ansi.SGRAttrBold | ansi.SGRWhite.FG() | ansi.SGRYellow.BG(), // inconceivable
}
func (rep *replay) drawOverlay(ctx *Context) {
// TODO better status, maybe integrate with more general debug overlay
ctx.Output.WriteESC(ansi.DECSC)
if rep.mouse.Mouse != ZM {
// TODO better mouse cursor drawing
if i, ok := ctx.Output.CellOffset(rep.mouse.Point); ok {
ctx.Output.Grid.Rune[i] = 'X'
ctx.Output.Grid.Attr[i] = buttonAttrs[rep.mouse.State.ButtonID()]
}
}
// TODO OSD for keyboard events?
ctx.Output.WriteESC(ansi.DECRC)
}
func (rep *replay) next() bool {
if len(rep.cur) == 0 {
return false
}
rep.frame = rep.cur[0]
rep.cur = rep.cur[1:]
return true
}
func (p *Platform) setRecording(f *os.File, err error) {
p.term.Input.SetRecording(nil)
if p.recording != nil {
if err := p.recording.Close(); err != nil {
log.Printf("failed to close record file %q: %v", p.recording.Name(), err)
}
p.recording = nil
}
if f != nil {
sw := sizedWriter{ws: f}
if err == nil {
err = p.writeState(&sw)
if err == nil {
err = sw.Finish()
p.recording = f
if err == nil {
err = p.recordSize()
}
}
}
if err != nil {
p.recording = nil
_ = os.Remove(f.Name())
_ = f.Close()
log.Printf("failed to encode platform state (aborting recording): %v", err)
return
}
p.term.Input.SetRecording(f)
log.Printf("recording input to %q", f.Name())
}
}
func (p *Platform) loadReplay(f *os.File) error {
p.setRecording(nil, nil)
rep, err := readReplay(f)
if err != nil {
return err
}
p.replay = rep
log.Printf("replaying %v frames over %v from %q",
len(p.replay.input), p.replay.input.Duration(), readerName(f))
return nil
}
func (p *Platform) toggleRecRep() error {
if p.recording == nil {
// TODO better filename selection
p.setRecording(os.Create("auto.rec"))
return nil
}
name := p.recording.Name()
p.setRecording(nil, nil)
f, err := os.Open(name)
if err == nil {
err = p.loadReplay(f)
err = errOr(err, f.Close())
}
return errOr(err, errReplayDone)
}
func (p *Platform) recordSize() error {
// APC "resize:" width "," height ST
if p.recording != nil {
sz := p.screen.Bounds().Size()
if _, err := fmt.Fprintf(p.recording, "\x1b_resize:%d,%d\x1b\\", sz.X, sz.Y); err != nil {
return fmt.Errorf("failed to record size: %v", err)
}
}
return nil
}
func parseSize(b []byte) (pt image.Point, err error) {
i := bytes.IndexByte(b, ',')
if i < 0 {
return image.ZP, errors.New("no ',' separator")
}
pt.X, _, err = ansi.DecodeNumber(b[:i])
if err != nil {
return image.ZP, err
}
pt.Y, _, err = ansi.DecodeNumber(b[i+1:])
if err != nil {
return image.ZP, err
}
return pt, err
}
func readReplay(f *os.File) (_ *replay, err error) {
var rep replay
if rep.cereal, err = readSized(f); err != nil {
return nil, err
}
if rep.input, err = anansi.ReadInputReplay(f); err != nil {
return nil, err
}
return &rep, nil
}
func readSized(r io.Reader) ([]byte, error) {
var tmp [8]byte
if _, err := r.Read(tmp[:]); err != nil {
return nil, err
}
size := binary.BigEndian.Uint64(tmp[:])
return ioutil.ReadAll(io.LimitReader(r, int64(size)))
}
func readerName(r io.Reader) string {
type named interface{ Name() string }
if nd, ok := r.(named); ok {
return nd.Name()
}
return "<unknown>"
}
type sizedWriter struct {
started bool
off int64
size uint64
ws io.WriteSeeker
}
func (sw *sizedWriter) Write(p []byte) (n int, err error) {
if !sw.started {
off, err := sw.ws.Seek(0, io.SeekCurrent)
if err == nil {
_, err = sw.ws.Seek(8, io.SeekCurrent)
}
if err != nil {
return 0, err
}
sw.off = off
sw.started = true
}
n, err = sw.ws.Write(p)
sw.size += uint64(n)
return n, err
}
func (sw *sizedWriter) Finish() error {
if !sw.started {
return nil
}
off, err := sw.ws.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if _, err := sw.ws.Seek(sw.off, io.SeekStart); err != nil {
return err
}
var tmp [8]byte
binary.BigEndian.PutUint64(tmp[:], sw.size)
if _, err = sw.ws.Write(tmp[:]); err != nil {
return err
}
if _, err := sw.ws.Seek(off, io.SeekStart); err != nil {
return err
}
sw.off += int64(sw.size)
sw.size = 0
sw.started = false
return nil
}
|
package neatly
import (
"bytes"
"compress/gzip"
"crypto/md5"
"encoding/json"
"fmt"
"github.com/gomarkdown/markdown"
"github.com/klauspost/pgzip"
"github.com/viant/toolbox"
"github.com/viant/toolbox/data"
"github.com/viant/toolbox/data/udf"
"github.com/viant/toolbox/storage"
"github.com/viant/toolbox/url"
"io"
"io/ioutil"
"os"
"path"
"strings"
"time"
)
//Md5 computes source md5
func Md5(source interface{}, state data.Map) (interface{}, error) {
hash := md5.New()
_, err := io.WriteString(hash, toolbox.AsString(source))
if err != nil {
return nil, err
}
var result = fmt.Sprintf("%x", hash.Sum(nil))
return result, nil
}
//GetOwnerDirectory returns owner neatly document directory
func GetOwnerDirectory(state data.Map) (string, error) {
if !state.Has(OwnerURL) {
return "", fmt.Errorf("OwnerURL was empty")
}
var resource = url.NewResource(state.GetString(OwnerURL))
return resource.DirectoryPath(), nil
}
//HasResource check if patg/url to external resource exists
func HasResource(source interface{}, state data.Map) (interface{}, error) {
filename := toolbox.AsString(source)
if !strings.HasPrefix(filename, "/") {
var parentDirectory = ""
if state.Has(OwnerURL) {
parentDirectory, _ = GetOwnerDirectory(state)
}
candidate := path.Join(parentDirectory, toolbox.AsString(source))
if toolbox.FileExists(candidate) {
return true, nil
}
}
var result = url.NewResource(filename).ParsedURL.Path
return toolbox.FileExists(result), nil
}
//LoadNeatly loads neatly document as data structure, source represents path to nearly document
func LoadNeatly(source interface{}, state data.Map) (interface{}, error) {
var filename = toolbox.AsString(source)
var parentDirectory = ""
if !strings.HasPrefix(filename, "/") {
if state.Has(OwnerURL) {
parentDirectory, _ = GetOwnerDirectory(state)
}
filename = path.Join(parentDirectory, filename)
}
if !toolbox.FileExists(filename) {
return nil, fmt.Errorf("File %v does not exists", filename)
}
var documentResource = url.NewResource(filename)
var dao, ok = state.Get(NeatlyDao).(*Dao)
if !ok {
return nil, fmt.Errorf("failed to get neatly loader %T", state.Get(NeatlyDao))
}
var aMap = make(map[string]interface{})
newState := data.NewMap()
newState.Put(OwnerURL, state.Get(OwnerURL))
newState.Put(NeatlyDao, state.Get(NeatlyDao))
for k, v := range state {
if toolbox.IsFunc(v) {
newState.Put(k, v)
}
}
err := dao.Load(newState, documentResource, &aMap)
return aMap, err
}
//WorkingDirectory return joined path with current directory, ../ is supported as subpath
func WorkingDirectory(source interface{}, state data.Map) (interface{}, error) {
currentDirectory, err := os.Getwd()
if err != nil {
return nil, err
}
var subPath = toolbox.AsString(source)
for strings.HasSuffix(subPath, "../") {
currentDirectory, _ = path.Split(currentDirectory)
if len(subPath) == 3 {
subPath = ""
} else {
subPath = string(subPath[3:])
}
}
if subPath == "" {
return currentDirectory, nil
}
return path.Join(currentDirectory, subPath), nil
}
//Unzip uncompress supplied []byte or error
func Unzip(source interface{}, state data.Map) (interface{}, error) {
payload, ok := source.([]byte)
if !ok {
return nil, fmt.Errorf("invalid Unzip input, expected %T, but had %T", []byte{}, source)
}
reader, err := pgzip.NewReader(bytes.NewReader(payload))
if err != nil {
return nil, fmt.Errorf("failed to create gzip reader %v", err)
}
payload, err = ioutil.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("failed to read gzip reader %v", err)
}
return payload, err
}
//UnzipText uncompress supplied []byte into text or error
func UnzipText(source interface{}, state data.Map) (interface{}, error) {
payload, err := Unzip(source, state)
if err != nil {
return nil, err
}
return toolbox.AsString(payload), nil
}
//Zip compresses supplied []byte or test or error
func Zip(source interface{}, state data.Map) (interface{}, error) {
payload, ok := source.([]byte)
if !ok {
if text, ok := source.(string); ok {
payload = []byte(text)
} else {
return nil, fmt.Errorf("invalid Zip input, expected %T, but had %T", []byte{}, source)
}
}
buffer := new(bytes.Buffer)
writer, err := pgzip.NewWriterLevel(buffer, gzip.BestSpeed)
if err != nil {
return nil, err
}
_, err = writer.Write(payload)
if err != nil {
return nil, fmt.Errorf("error in Zip, failed to write %v", err)
}
_ = writer.Flush()
err = writer.Close()
return buffer.Bytes(), err
}
//Markdown returns html fot supplied markdown
func Markdown(source interface{}, state data.Map) (interface{}, error) {
var input = toolbox.AsString(source)
response, err := Cat(input, state)
if err == nil && response != nil {
input = toolbox.AsString(response)
}
result := markdown.ToHTML([]byte(input), nil, nil)
return string(result), nil
}
//Cat returns content of supplied file name
func Cat(source interface{}, state data.Map) (interface{}, error) {
content, err := LoadBinary(source, state)
if err != nil {
return nil, err
}
return toolbox.AsString(content), err
}
//LoadBinary returns []byte content of supplied file name
func LoadBinary(source interface{}, state data.Map) (interface{}, error) {
filename := toolbox.AsString(source)
candidate := url.NewResource(filename)
if candidate != nil || candidate.ParsedURL != nil {
filename = candidate.ParsedURL.Path
}
if !toolbox.FileExists(filename) {
var parentDirectory = ""
if state.Has(OwnerURL) {
parentDirectory, _ = GetOwnerDirectory(state)
}
filename = path.Join(parentDirectory, toolbox.AsString(source))
}
if !toolbox.FileExists(filename) {
filename := toolbox.AsString(source)
var resource = url.NewResource(state.GetString(OwnerURL))
parentURL, _ := toolbox.URLSplit(resource.URL)
var URL = toolbox.URLPathJoin(parentURL, filename)
service, err := storage.NewServiceForURL(URL, "")
if err == nil {
if exists, _ := service.Exists(URL); exists {
resource = url.NewResource(URL)
if text, err := resource.DownloadText(); err == nil {
return text, nil
}
}
}
return nil, fmt.Errorf("no such file or directory %v", filename)
}
file, err := toolbox.OpenFile(filename)
if err != nil {
return nil, err
}
defer file.Close()
content, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
return content, nil
}
//AssetsToMap loads assets into map[string]string, it takes url, with optional list of extension as filter
func AssetsToMap(source interface{}, state data.Map) (interface{}, error) {
if source == nil {
return nil, nil
}
var result = make(map[string]string)
updator := func(key string, data []byte) {
result[key] = string(data)
}
return assetToMap(source, state, updator, result)
}
//BinaryAssetsToMap loads binary assets into map[string]string, it takes url, with optional list of extension as filter
func BinaryAssetsToMap(source interface{}, state data.Map) (interface{}, error) {
if source == nil {
return nil, nil
}
var result = make(map[string][]byte)
updator := func(key string, data []byte) {
result[key] = data
}
return assetToMap(source, state, updator, result)
}
func assetToMap(source interface{}, state data.Map, updator func(key string, data []byte), result interface{}) (interface{}, error) {
URL, ok := source.(string) //URL param case
if ok {
return result, loadAssetToMap(url.NewResource(URL), updator)
}
//url.Resource param case
resource := &url.Resource{}
if toolbox.IsStruct(source) || toolbox.IsMap(source) {
if err := toolbox.DefaultConverter.AssignConverted(&resource, source); err == nil {
return result, loadAssetToMap(resource, updator)
}
}
if toolbox.IsSlice(source) { //URL, credentials params case
params := toolbox.AsSlice(source)
return result, loadAssetToMap(url.NewResource(params...), updator)
}
return nil, fmt.Errorf("unsupported source %T", source)
}
func loadAssetToMap(resource *url.Resource, updator func(key string, data []byte)) error {
storageService, err := storage.NewServiceForURL(resource.URL, resource.Credentials)
if err != nil {
return err
}
objects, err := storageService.List(resource.URL)
if err != nil {
return err
}
for _, object := range objects {
if object.IsFolder() {
continue
}
reader, err := storageService.Download(object)
if err != nil {
return err
}
defer reader.Close()
content, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
info := object.FileInfo()
updator(info.Name(), content)
}
return err
}
// Validate if JSON file is a well-formed JSON
// Returns true if file content is valid JSON
func IsJSON(fileName interface{}, state data.Map) (interface{}, error) {
content, err := Cat(fileName, state)
if err != nil {
return false, err
}
var m json.RawMessage
if err := json.Unmarshal([]byte(toolbox.AsString(content)), &m); err != nil {
return false, err
}
return true, nil
}
// No parameters
// Returns the numeric current hour [0,23]
func CurrentHour(none interface{}, state data.Map) (interface{}, error) {
return time.Now().Hour(), nil
}
const pathKey = "path"
const valueKey = "value"
// valueAndPath expects a map with keys "path" being the path to the file which contains the rows to match and "value" containing the value to match
// Return value is a boolean. True if the compare value matches any row in the file. False if there is an error or no match is found
func MatchAnyRow(valueAndPath interface{}, state data.Map) (interface{}, error) {
if toolbox.IsMap(valueAndPath) { //URL, credentials params case
argumentsMap := toolbox.AsMap(valueAndPath)
if validateMatchAnyRowKeys(argumentsMap) {
value := toolbox.AsString(argumentsMap[valueKey])
path := toolbox.AsString(argumentsMap[pathKey])
binaryRows, err := LoadBinary(path, state)
if err != nil {
return false, nil
}
rows := strings.Split(strings.ReplaceAll(string(binaryRows.([]byte)), "\r\n", "\n"), "\n")
for _, row := range rows {
if row == value {
return true, nil
}
}
return false, nil
}
}
return false, fmt.Errorf("unsupported filename and value %T", valueAndPath)
}
func validateMatchAnyRowKeys(argumentsMap map[string]interface{}) bool {
if _, exists := argumentsMap[valueKey]; !exists {
return false
}
if _, exists := argumentsMap[pathKey]; !exists {
return false
}
return true
}
//AddStandardUdf register building udf to the context
func AddStandardUdf(aMap data.Map) {
udf.Register(aMap)
aMap.Put("IsJSON", IsJSON)
aMap.Put("WorkingDirectory", WorkingDirectory)
aMap.Put("Pwd", WorkingDirectory)
aMap.Put("HasResource", HasResource)
aMap.Put("Md5", Md5)
aMap.Put("LoadNeatly", LoadNeatly)
aMap.Put("Zip", Zip)
aMap.Put("Unzip", Unzip)
aMap.Put("UnzipText", UnzipText)
aMap.Put("Markdown", Markdown)
aMap.Put("Cat", Cat)
aMap.Put("LoadBinary", LoadBinary)
aMap.Put("AssetsToMap", AssetsToMap)
aMap.Put("BinaryAssetsToMap", BinaryAssetsToMap)
aMap.Put("CurrentHour", CurrentHour)
aMap.Put("MatchAnyRow", MatchAnyRow)
}
|
package event
import (
v1 "k8s.io/api/core/v1"
kscheme "k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/scheme"
)
const component string = "operator-lifecycle-manager"
var s = scheme.Scheme
func init() {
if err := kscheme.AddToScheme(s); err != nil {
panic(err)
}
}
// NewRecorder returns an EventRecorder type that can be
// used to post Events to different object's lifecycles.
func NewRecorder(event typedcorev1.EventInterface) (record.EventRecorder, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: event})
recorder := eventBroadcaster.NewRecorder(s, v1.EventSource{Component: component})
return recorder, nil
}
|
package socks
import "strings"
const UNKNOWN = "unknown"
type version byte
// SOCKS versions.
const (
SOCKS4 = version(0x04)
SOCKS5 = version(0x05)
)
func (v version) String() string {
switch v {
case SOCKS4:
return "SOCKS4/4a"
case SOCKS5:
return "SOCKS5"
}
return ""
}
func (v version) LabelValue() string {
raw := v.String()
if raw == "" {
return UNKNOWN
}
return strings.ReplaceAll(raw, "/", "_")
}
type commandType byte
// SOCKS commands.
const (
CmdConnect = commandType(0x01)
CmdBind = commandType(0x02)
CmdUDP = commandType(0x03)
)
func (c commandType) String() string {
switch c {
case CmdConnect:
return "connect"
case CmdBind:
return "bind"
case CmdUDP:
return "UDP associate"
}
return ""
}
type addressType byte
// SOCKS address types.
const (
AddrIPv4 = addressType(0x01)
AddrDomain = addressType(0x03)
AddrIPv6 = addressType(0x04)
)
func (at addressType) String() string {
switch at {
case AddrIPv4:
return "IPv4"
case AddrDomain:
return "Domain name"
case AddrIPv6:
return "IPv6"
}
return ""
}
func (at addressType) LabelValue() string {
raw := at.String()
if raw == "" {
return UNKNOWN
}
return strings.ReplaceAll(raw, " ", "_")
}
type authType byte
// SOCKS authentication types.
const (
AuthNo = authType(0x00)
AuthGSSAPI = authType(0x01)
AuthBasic = authType(0x02)
)
func (a authType) String() string {
switch a {
case AuthNo:
return "no auth"
case AuthGSSAPI:
return "GSSAPI"
case AuthBasic:
return "basic"
}
return ""
}
func (a authType) LabelValue() string {
raw := a.String()
if raw == "" {
return UNKNOWN
}
return strings.ReplaceAll(raw, " ", "_")
}
type socks4ResponseStatus byte
// SOCKS4 response status codes.
const (
Status4Granted = socks4ResponseStatus(0x5a)
Status4Rejected = socks4ResponseStatus(0x5b)
Status4NoIdentd = socks4ResponseStatus(0x5c)
Status4InvalidUser = socks4ResponseStatus(0x5d)
)
func (s socks4ResponseStatus) String() string {
switch s {
case Status4Granted:
return "granted"
case Status4Rejected:
return "rejected"
case Status4NoIdentd:
return "no identd"
case Status4InvalidUser:
return "invalid user"
}
return ""
}
func (s socks4ResponseStatus) LabelValue() string {
raw := s.String()
if raw == "" {
return UNKNOWN
}
return strings.ReplaceAll(raw, " ", "_")
}
type socks5ResponseStatus byte
// SOCKS5 response status codes.
const (
Status5Granted = socks5ResponseStatus(0x00)
Status5Failure = socks5ResponseStatus(0x01)
Status5DeniedByRuleset = socks5ResponseStatus(0x02)
Status5NetworkUnreachable = socks5ResponseStatus(0x03)
Status5HostUnreachable = socks5ResponseStatus(0x04)
Status5ConnectionRefused = socks5ResponseStatus(0x05)
Status5TTLExpired = socks5ResponseStatus(0x06)
Status5CommandNotSupported = socks5ResponseStatus(0x07)
Status5AddressNotSupported = socks5ResponseStatus(0x08)
)
func (s socks5ResponseStatus) String() string {
switch s {
case Status5Granted:
return "granted"
case Status5Failure:
return "failure"
case Status5DeniedByRuleset:
return "not allowed"
case Status5NetworkUnreachable:
return "network unreachable"
case Status5HostUnreachable:
return "host unreachable"
case Status5ConnectionRefused:
return "connection refused"
case Status5TTLExpired:
return "TTL expired"
case Status5CommandNotSupported:
return "command not supported"
case Status5AddressNotSupported:
return "address type not supported"
}
return ""
}
func (s socks5ResponseStatus) LabelValue() string {
raw := s.String()
if raw == "" {
return UNKNOWN
}
return strings.ReplaceAll(raw, " ", "_")
}
const (
logFieldType = "access"
)
|
package 链表
func deleteNode(head *ListNode, val int) *ListNode {
dummyHead := &ListNode{Next: head}
cur := dummyHead
for cur.Next != nil {
if cur.Next.Val == val {
cur.Next = cur.Next.Next
} else {
cur = cur.Next
}
}
return dummyHead.Next
}
/*
题目链接: https://leetcode-cn.com/problems/shan-chu-lian-biao-de-jie-dian-lcof/
总结:
1. 上面的代码可以删除链表中多个值为val的节点。
2. 这题和 _203. 移除链表元素_ 差不多。
*/
|
package intercom
type TestHTTPClient struct{}
func (h TestHTTPClient) Get(uri string, queryParams interface{}) ([]byte, error) { return nil, nil }
func (h TestHTTPClient) Post(uri string, body interface{}) ([]byte, error) { return nil, nil }
func (h TestHTTPClient) Patch(uri string, body interface{}) ([]byte, error) { return nil, nil }
func (h TestHTTPClient) Delete(uri string, body interface{}) ([]byte, error) { return nil, nil }
|
package main
import (
"fmt"
"net/http"
)
func main() {
resp, err := http.Head("https://xueyuanjun.com")
if err != nil {
fmt.Println("fail", err.Error())
return
}
defer resp.Body.Close()
for key, value := range resp.Header {
fmt.Println(key, ":", value)
}
} |
package object
import (
"time"
)
type ArticleThumbsUpMapping struct {
Id int `gorm:"primary_key"`
CreatedAt time.Time
UserId int `form:"userid"`
ArticleId int `form:"articleid"`
}
|
package callback
import (
"context"
"fmt"
"strings"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/cos"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/local"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/onedrive"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/s3"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/gin-gonic/gin"
)
// CallbackProcessService 上传请求回调正文接口
type CallbackProcessService interface {
GetBody(*serializer.UploadSession) serializer.UploadCallback
}
// RemoteUploadCallbackService 远程存储上传回调请求服务
type RemoteUploadCallbackService struct {
Data serializer.UploadCallback `json:"data" binding:"required"`
}
// GetBody 返回回调正文
func (service RemoteUploadCallbackService) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
return service.Data
}
// UploadCallbackService OOS/七牛云存储上传回调请求服务
type UploadCallbackService struct {
Name string `json:"name"`
SourceName string `json:"source_name"`
PicInfo string `json:"pic_info"`
Size uint64 `json:"size"`
}
// UpyunCallbackService 又拍云上传回调请求服务
type UpyunCallbackService struct {
Code int `form:"code" binding:"required"`
Message string `form:"message" binding:"required"`
SourceName string `form:"url" binding:"required"`
Width string `form:"image-width"`
Height string `form:"image-height"`
Size uint64 `form:"file_size"`
}
// OneDriveCallback OneDrive 客户端回调正文
type OneDriveCallback struct {
ID string `json:"id" binding:"required"`
Meta *onedrive.FileInfo
}
// COSCallback COS 客户端回调正文
type COSCallback struct {
Bucket string `form:"bucket"`
Etag string `form:"etag"`
}
// S3Callback S3 客户端回调正文
type S3Callback struct {
Bucket string `form:"bucket"`
Etag string `form:"etag"`
Key string `form:"key"`
}
// UploadCallbackService OOS/七牛云存储上传回调请求服务
type UfileCallback struct {
Name string `form:"name"`
SourceName string `form:"source_name"`
PicInfo string `form:"pic_info"`
Size uint64 `form:"size"`
}
// GetBody 返回回调正文
func (service UpyunCallbackService) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
res := serializer.UploadCallback{
Name: session.Name,
SourceName: service.SourceName,
Size: service.Size,
}
if service.Width != "" {
res.PicInfo = service.Width + "," + service.Height
}
return res
}
// GetBody 返回回调正文
func (service UploadCallbackService) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
return serializer.UploadCallback{
Name: service.Name,
SourceName: service.SourceName,
PicInfo: service.PicInfo,
Size: service.Size,
}
}
// GetBody 返回回调正文
func (service OneDriveCallback) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
var picInfo = "0,0"
if service.Meta.Image.Width != 0 {
picInfo = fmt.Sprintf("%d,%d", service.Meta.Image.Width, service.Meta.Image.Height)
}
return serializer.UploadCallback{
Name: session.Name,
SourceName: session.SavePath,
PicInfo: picInfo,
Size: session.Size,
}
}
// GetBody 返回回调正文
func (service COSCallback) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
return serializer.UploadCallback{
Name: session.Name,
SourceName: session.SavePath,
PicInfo: "",
Size: session.Size,
}
}
// GetBody 返回回调正文
func (service S3Callback) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
return serializer.UploadCallback{
Name: session.Name,
SourceName: session.SavePath,
PicInfo: "",
Size: session.Size,
}
}
// GetBody 返回回调正文
func (service UfileCallback) GetBody(session *serializer.UploadSession) serializer.UploadCallback {
return serializer.UploadCallback{
Name: session.Name,
SourceName: session.SavePath,
PicInfo: "",
Size: session.Size,
}
}
// ProcessCallback 处理上传结果回调
func ProcessCallback(service CallbackProcessService, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromCallback(c)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
}
defer fs.Recycle()
// 获取回调会话
callbackSessionRaw, _ := c.Get("callbackSession")
callbackSession := callbackSessionRaw.(*serializer.UploadSession)
callbackBody := service.GetBody(callbackSession)
// 获取父目录
exist, parentFolder := fs.IsPathExist(callbackSession.VirtualPath)
if !exist {
newFolder, err := fs.CreateDirectory(context.Background(), callbackSession.VirtualPath)
if err != nil {
return serializer.Err(serializer.CodeParamErr, "指定目录不存在", err)
}
parentFolder = newFolder
}
// 创建文件头
fileHeader := local.FileStream{
Size: callbackBody.Size,
VirtualPath: callbackSession.VirtualPath,
Name: callbackSession.Name,
}
// 生成上下文
ctx := context.WithValue(context.Background(), fsctx.FileHeaderCtx, fileHeader)
ctx = context.WithValue(ctx, fsctx.SavePathCtx, callbackBody.SourceName)
// 添加钩子
fs.Use("BeforeAddFile", filesystem.HookValidateFile)
fs.Use("BeforeAddFile", filesystem.HookValidateCapacity)
fs.Use("AfterValidateFailed", filesystem.HookGiveBackCapacity)
fs.Use("AfterValidateFailed", filesystem.HookDeleteTempFile)
fs.Use("BeforeAddFileFailed", filesystem.HookDeleteTempFile)
// 向数据库中添加文件
file, err := fs.AddFile(ctx, parentFolder)
if err != nil {
return serializer.Err(serializer.CodeUploadFailed, err.Error(), err)
}
// 如果是图片,则更新图片信息
if callbackBody.PicInfo != "" {
if err := file.UpdatePicInfo(callbackBody.PicInfo); err != nil {
util.Log().Debug("无法更新回调文件的图片信息:%s", err)
}
}
return serializer.Response{
Code: 0,
}
}
// PreProcess 对OneDrive客户端回调进行预处理验证
func (service *OneDriveCallback) PreProcess(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromCallback(c)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
}
defer fs.Recycle()
// 获取回调会话
callbackSessionRaw, _ := c.Get("callbackSession")
callbackSession := callbackSessionRaw.(*serializer.UploadSession)
// 获取文件信息
info, err := fs.Handler.(onedrive.Driver).Client.Meta(context.Background(), service.ID, "")
if err != nil {
return serializer.Err(serializer.CodeUploadFailed, "文件元信息查询失败", err)
}
// 验证与回调会话中是否一致
actualPath := strings.TrimPrefix(callbackSession.SavePath, "/")
if callbackSession.Size != info.Size || info.GetSourcePath() != actualPath {
fs.Handler.(onedrive.Driver).Client.Delete(context.Background(), []string{info.GetSourcePath()})
return serializer.Err(serializer.CodeUploadFailed, "文件信息不一致", err)
}
service.Meta = info
return ProcessCallback(service, c)
}
// PreProcess 对COS客户端回调进行预处理
func (service *COSCallback) PreProcess(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromCallback(c)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
}
defer fs.Recycle()
// 获取回调会话
callbackSessionRaw, _ := c.Get("callbackSession")
callbackSession := callbackSessionRaw.(*serializer.UploadSession)
// 获取文件信息
info, err := fs.Handler.(cos.Driver).Meta(context.Background(), callbackSession.SavePath)
if err != nil {
return serializer.Err(serializer.CodeUploadFailed, "文件信息不一致", err)
}
// 验证实际文件信息与回调会话中是否一致
if callbackSession.Size != info.Size || callbackSession.Key != info.CallbackKey {
return serializer.Err(serializer.CodeUploadFailed, "文件信息不一致", err)
}
return ProcessCallback(service, c)
}
// PreProcess 对S3客户端回调进行预处理
func (service *S3Callback) PreProcess(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromCallback(c)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
}
defer fs.Recycle()
// 获取回调会话
callbackSessionRaw, _ := c.Get("callbackSession")
callbackSession := callbackSessionRaw.(*serializer.UploadSession)
// 获取文件信息
info, err := fs.Handler.(s3.Driver).Meta(context.Background(), callbackSession.SavePath)
if err != nil {
return serializer.Err(serializer.CodeUploadFailed, "文件信息不一致", err)
}
// 验证实际文件信息与回调会话中是否一致
if callbackSession.Size != info.Size || service.Etag != info.Etag {
return serializer.Err(serializer.CodeUploadFailed, "文件信息不一致", err)
}
return ProcessCallback(service, c)
}
|
package day7
import (
"strconv"
"strings"
"github.com/littleajax/adventofcode/helpers"
)
//Build a distributed graph
//Going the other direction now, not parents, but children, with multiplicative counts
func ShinyGoldBagChildren(shinyGold *BagWithChildren) (totalChildren int) {
for _, child := range shinyGold.children {
if child.bag.visited == true {
//continue
}
childCount := child.count
child.bag.visited = true
subCount := ShinyGoldBagChildren(child.bag)
if subCount != 0 {
subCount = childCount * subCount
}
childCount += subCount
totalChildren += childCount
}
return
}
func ProcessWithChildrenInputs() *BagWithChildren {
inputs := helpers.FetchInputs("./inputs/day7.txt")
bags := make(map[string]*BagWithChildren)
//Create all the different bags
for _, rule := range inputs {
var bag BagWithChildren
splits := strings.Split(rule, " ")
name := splits[0] + splits[1]
bag.name = name
bags[name] = &bag
}
//Now set containers
for _, rule := range inputs {
var bagGroup BagGroup
splits := strings.Split(rule, " ")
parentName := splits[0] + splits[1]
if splits[4] == "no" {
continue //this bag is no ones container
}
//Jump through bag line
for i := 4; i <= len(splits)-1; i += 4 {
bagName := splits[i+1] + splits[i+2]
bagGroup.bag = bags[bagName]
bagGroup.count, _ = strconv.Atoi(splits[i]) //bad me, ignoring err
bags[parentName].children = append(bags[parentName].children, bagGroup)
}
}
return bags["shinygold"]
}
type BagWithChildren struct {
name string
children []BagGroup
visited bool //So we don't enter infinite bag loops
}
type BagGroup struct {
count int
bag *BagWithChildren
}
|
package main
import "github.com/codegangsta/cli"
var Commands = []cli.Command {
commandVolume,
commandImage,
commandNetwork,
}
var commandVolume = cli.Command {
Name: "volume",
ShortName: "v",
Usage: "Removed orphaned volumes from the host",
Action: doVolumes,
Flags: []cli.Flag {
cli.BoolFlag {
Name: "force, f",
Usage: "Force orphaned volumes to be removed",
},
},
}
var commandImage = cli.Command {
Name: "image",
ShortName: "i",
Usage: "Removes orphaned images from the host",
Action: doImages,
Flags: []cli.Flag {
cli.BoolFlag {
Name: "force, f",
Usage: "Force orphaned volumes to be removed",
},
cli.StringFlag {
Name: "name",
Usage: "Delete image specified by name",
},
cli.IntFlag {
Name: "age, a",
Usage: "Delete images whose age Created time is older than specified age in seconds",
},
},
}
var commandNetwork = cli.Command {
Name: "network",
ShortName: "n",
Usage: "Removes empty networks from the host",
Action: doNetworks,
Flags: []cli.Flag {
cli.BoolFlag{
Name: "force, f",
Usage: "Force empty networks to be removed",
},
},
} |
package main
import (
"context"
"flag"
"github.com/samkreter/go-core/example/services/frontend"
"github.com/samkreter/go-core/log"
"github.com/samkreter/go-core/trace"
"github.com/sirupsen/logrus"
)
const (
frontendAddr = ":8081"
customerAddr = "customers:8082"
serviceName = "frontend"
)
func main() {
logLevel := flag.String("log-level", "info", `set the log level, e.g. "trace", debug", "info", "warn", "error"`)
flag.Parse()
level, err := log.ParseLevel(*logLevel)
if err != nil {
log.G(context.TODO()).WithError(err).Fatal("Failed to parse log level")
}
logrus.SetLevel(level)
log.L = logrus.WithField("service", serviceName)
err = trace.SetupTracing(serviceName, "jaeger")
if err != nil {
log.G(context.TODO()).WithError(err).Fatal("Failed to initialize tracing")
}
// Start the frontend service
f, err := frontend.NewServer(frontendAddr, customerAddr)
if err != nil {
log.G(context.TODO()).WithError(err).Fatal("failed to create frontend server")
}
f.Run()
}
|
package ipfs
import (
"fmt"
// "os"
"testing"
// shell "github.com/ipfs/go-ipfs-api"
)
func Test(t *testing.T) {
fmt.Println("")
Initialize("127.0.0.1:5001", "192.168.189.141")
fmt.Println(AddFile("/root/mount.sh"))
}
|
package binance
import (
"context"
"net/http"
)
// CreateMarginOrderService create order
type CreateMarginOrderService struct {
c *Client
symbol string
side SideType
orderType OrderType
quantity *string
quoteOrderQty *string
price *string
stopPrice *string
newClientOrderID *string
icebergQuantity *string
newOrderRespType *NewOrderRespType
sideEffectType *SideEffectType
timeInForce *TimeInForceType
isIsolated *bool
}
// Symbol set symbol
func (s *CreateMarginOrderService) Symbol(symbol string) *CreateMarginOrderService {
s.symbol = symbol
return s
}
// IsIsolated sets the order to isolated margin
func (s *CreateMarginOrderService) IsIsolated(isIsolated bool) *CreateMarginOrderService {
s.isIsolated = &isIsolated
return s
}
// Side set side
func (s *CreateMarginOrderService) Side(side SideType) *CreateMarginOrderService {
s.side = side
return s
}
// Type set type
func (s *CreateMarginOrderService) Type(orderType OrderType) *CreateMarginOrderService {
s.orderType = orderType
return s
}
// TimeInForce set timeInForce
func (s *CreateMarginOrderService) TimeInForce(timeInForce TimeInForceType) *CreateMarginOrderService {
s.timeInForce = &timeInForce
return s
}
// Quantity set quantity
func (s *CreateMarginOrderService) Quantity(quantity string) *CreateMarginOrderService {
s.quantity = &quantity
return s
}
// QuoteOrderQty set quoteOrderQty
func (s *CreateMarginOrderService) QuoteOrderQty(quoteOrderQty string) *CreateMarginOrderService {
s.quoteOrderQty = "eOrderQty
return s
}
// Price set price
func (s *CreateMarginOrderService) Price(price string) *CreateMarginOrderService {
s.price = &price
return s
}
// NewClientOrderID set newClientOrderID
func (s *CreateMarginOrderService) NewClientOrderID(newClientOrderID string) *CreateMarginOrderService {
s.newClientOrderID = &newClientOrderID
return s
}
// StopPrice set stopPrice
func (s *CreateMarginOrderService) StopPrice(stopPrice string) *CreateMarginOrderService {
s.stopPrice = &stopPrice
return s
}
// IcebergQuantity set icebergQuantity
func (s *CreateMarginOrderService) IcebergQuantity(icebergQuantity string) *CreateMarginOrderService {
s.icebergQuantity = &icebergQuantity
return s
}
// NewOrderRespType set icebergQuantity
func (s *CreateMarginOrderService) NewOrderRespType(newOrderRespType NewOrderRespType) *CreateMarginOrderService {
s.newOrderRespType = &newOrderRespType
return s
}
// SideEffectType set sideEffectType
func (s *CreateMarginOrderService) SideEffectType(sideEffectType SideEffectType) *CreateMarginOrderService {
s.sideEffectType = &sideEffectType
return s
}
// Do send request
func (s *CreateMarginOrderService) Do(ctx context.Context, opts ...RequestOption) (res *CreateOrderResponse, err error) {
r := &request{
method: http.MethodPost,
endpoint: "/sapi/v1/margin/order",
secType: secTypeSigned,
}
m := params{
"symbol": s.symbol,
"side": s.side,
"type": s.orderType,
}
if s.quantity != nil {
m["quantity"] = *s.quantity
}
if s.quoteOrderQty != nil {
m["quoteOrderQty"] = *s.quoteOrderQty
}
if s.isIsolated != nil {
if *s.isIsolated {
m["isIsolated"] = "TRUE"
} else {
m["isIsolated"] = "FALSE"
}
}
if s.timeInForce != nil {
m["timeInForce"] = *s.timeInForce
}
if s.price != nil {
m["price"] = *s.price
}
if s.newClientOrderID != nil {
m["newClientOrderId"] = *s.newClientOrderID
}
if s.stopPrice != nil {
m["stopPrice"] = *s.stopPrice
}
if s.icebergQuantity != nil {
m["icebergQty"] = *s.icebergQuantity
}
if s.newOrderRespType != nil {
m["newOrderRespType"] = *s.newOrderRespType
}
if s.sideEffectType != nil {
m["sideEffectType"] = *s.sideEffectType
}
r.setFormParams(m)
res = new(CreateOrderResponse)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// CancelMarginOrderService cancel an order
type CancelMarginOrderService struct {
c *Client
symbol string
orderID *int64
origClientOrderID *string
newClientOrderID *string
isIsolated *bool
}
// Symbol set symbol
func (s *CancelMarginOrderService) Symbol(symbol string) *CancelMarginOrderService {
s.symbol = symbol
return s
}
// IsIsolated set isIsolated
func (s *CancelMarginOrderService) IsIsolated(isIsolated bool) *CancelMarginOrderService {
s.isIsolated = &isIsolated
return s
}
// OrderID set orderID
func (s *CancelMarginOrderService) OrderID(orderID int64) *CancelMarginOrderService {
s.orderID = &orderID
return s
}
// OrigClientOrderID set origClientOrderID
func (s *CancelMarginOrderService) OrigClientOrderID(origClientOrderID string) *CancelMarginOrderService {
s.origClientOrderID = &origClientOrderID
return s
}
// NewClientOrderID set newClientOrderID
func (s *CancelMarginOrderService) NewClientOrderID(newClientOrderID string) *CancelMarginOrderService {
s.newClientOrderID = &newClientOrderID
return s
}
// Do send request
func (s *CancelMarginOrderService) Do(ctx context.Context, opts ...RequestOption) (res *CancelMarginOrderResponse, err error) {
r := &request{
method: http.MethodDelete,
endpoint: "/sapi/v1/margin/order",
secType: secTypeSigned,
}
r.setFormParam("symbol", s.symbol)
if s.orderID != nil {
r.setFormParam("orderId", *s.orderID)
}
if s.origClientOrderID != nil {
r.setFormParam("origClientOrderId", *s.origClientOrderID)
}
if s.newClientOrderID != nil {
r.setFormParam("newClientOrderId", *s.newClientOrderID)
}
if s.isIsolated != nil {
if *s.isIsolated {
r.setFormParam("isIsolated", "TRUE")
} else {
r.setFormParam("isIsolated", "FALSE")
}
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
res = new(CancelMarginOrderResponse)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// GetMarginOrderService get an order
type GetMarginOrderService struct {
c *Client
symbol string
orderID *int64
origClientOrderID *string
isIsolated bool
}
// IsIsolated set isIsolated
func (s *GetMarginOrderService) IsIsolated(isIsolated bool) *GetMarginOrderService {
s.isIsolated = isIsolated
return s
}
// Symbol set symbol
func (s *GetMarginOrderService) Symbol(symbol string) *GetMarginOrderService {
s.symbol = symbol
return s
}
// OrderID set orderID
func (s *GetMarginOrderService) OrderID(orderID int64) *GetMarginOrderService {
s.orderID = &orderID
return s
}
// OrigClientOrderID set origClientOrderID
func (s *GetMarginOrderService) OrigClientOrderID(origClientOrderID string) *GetMarginOrderService {
s.origClientOrderID = &origClientOrderID
return s
}
// Do send request
func (s *GetMarginOrderService) Do(ctx context.Context, opts ...RequestOption) (res *Order, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/margin/order",
secType: secTypeSigned,
}
r.setParam("symbol", s.symbol)
if s.orderID != nil {
r.setParam("orderId", *s.orderID)
}
if s.origClientOrderID != nil {
r.setParam("origClientOrderId", *s.origClientOrderID)
}
if s.isIsolated {
r.setParam("isIsolated", "TRUE")
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
res = new(Order)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// ListMarginOpenOrdersService list margin open orders
type ListMarginOpenOrdersService struct {
c *Client
symbol string
isIsolated bool
}
// Symbol set symbol
func (s *ListMarginOpenOrdersService) Symbol(symbol string) *ListMarginOpenOrdersService {
s.symbol = symbol
return s
}
// IsIsolated set isIsolated
func (s *ListMarginOpenOrdersService) IsIsolated(isIsolated bool) *ListMarginOpenOrdersService {
s.isIsolated = isIsolated
return s
}
// Do send request
func (s *ListMarginOpenOrdersService) Do(ctx context.Context, opts ...RequestOption) (res []*Order, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/margin/openOrders",
secType: secTypeSigned,
}
if s.symbol != "" {
r.setParam("symbol", s.symbol)
}
if s.isIsolated {
r.setParam("isIsolated", "TRUE")
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return []*Order{}, err
}
res = make([]*Order, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return []*Order{}, err
}
return res, nil
}
// ListMarginOrdersService all account orders; active, canceled, or filled
type ListMarginOrdersService struct {
c *Client
symbol string
orderID *int64
startTime *int64
endTime *int64
limit *int
isIsolated bool
}
// Symbol set symbol
func (s *ListMarginOrdersService) Symbol(symbol string) *ListMarginOrdersService {
s.symbol = symbol
return s
}
// IsIsolated set isIsolated
func (s *ListMarginOrdersService) IsIsolated(isIsolated bool) *ListMarginOrdersService {
s.isIsolated = isIsolated
return s
}
// OrderID set orderID
func (s *ListMarginOrdersService) OrderID(orderID int64) *ListMarginOrdersService {
s.orderID = &orderID
return s
}
// StartTime set starttime
func (s *ListMarginOrdersService) StartTime(startTime int64) *ListMarginOrdersService {
s.startTime = &startTime
return s
}
// EndTime set endtime
func (s *ListMarginOrdersService) EndTime(endTime int64) *ListMarginOrdersService {
s.endTime = &endTime
return s
}
// Limit set limit
func (s *ListMarginOrdersService) Limit(limit int) *ListMarginOrdersService {
s.limit = &limit
return s
}
// Do send request
func (s *ListMarginOrdersService) Do(ctx context.Context, opts ...RequestOption) (res []*Order, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/margin/allOrders",
secType: secTypeSigned,
}
r.setParam("symbol", s.symbol)
if s.orderID != nil {
r.setParam("orderId", *s.orderID)
}
if s.startTime != nil {
r.setParam("startTime", *s.startTime)
}
if s.endTime != nil {
r.setParam("endTime", *s.endTime)
}
if s.limit != nil {
r.setParam("limit", *s.limit)
}
if s.isIsolated {
r.setParam("isIsolated", "TRUE")
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return []*Order{}, err
}
res = make([]*Order, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return []*Order{}, err
}
return res, nil
}
// CancelMarginOrderResponse define response of canceling order
type CancelMarginOrderResponse struct {
Symbol string `json:"symbol"`
OrigClientOrderID string `json:"origClientOrderId"`
OrderID string `json:"orderId"`
ClientOrderID string `json:"clientOrderId"`
TransactTime int64 `json:"transactTime"`
Price string `json:"price"`
OrigQuantity string `json:"origQty"`
ExecutedQuantity string `json:"executedQty"`
CummulativeQuoteQuantity string `json:"cummulativeQuoteQty"`
Status OrderStatusType `json:"status"`
TimeInForce TimeInForceType `json:"timeInForce"`
Type OrderType `json:"type"`
Side SideType `json:"side"`
}
// CreateMarginOCOService create a new OCO for a margin account
type CreateMarginOCOService struct {
c *Client
symbol string
isIsolated *bool
listClientOrderID *string
side SideType
quantity *string
limitClientOrderID *string
price *string
limitIcebergQty *string
stopClientOrderID *string
stopPrice *string
stopLimitPrice *string
stopIcebergQty *string
stopLimitTimeInForce *TimeInForceType
newOrderRespType *NewOrderRespType
sideEffectType *SideEffectType
}
// Symbol set symbol
func (s *CreateMarginOCOService) Symbol(symbol string) *CreateMarginOCOService {
s.symbol = symbol
return s
}
// IsIsolated set isIsolated
func (s *CreateMarginOCOService) IsIsolated(isIsolated bool) *CreateMarginOCOService {
s.isIsolated = &isIsolated
return s
}
// Side set side
func (s *CreateMarginOCOService) Side(side SideType) *CreateMarginOCOService {
s.side = side
return s
}
// Quantity set quantity
func (s *CreateMarginOCOService) Quantity(quantity string) *CreateMarginOCOService {
s.quantity = &quantity
return s
}
// ListClientOrderID set listClientOrderID
func (s *CreateMarginOCOService) ListClientOrderID(listClientOrderID string) *CreateMarginOCOService {
s.listClientOrderID = &listClientOrderID
return s
}
// LimitClientOrderID set limitClientOrderID
func (s *CreateMarginOCOService) LimitClientOrderID(limitClientOrderID string) *CreateMarginOCOService {
s.limitClientOrderID = &limitClientOrderID
return s
}
// Price set price
func (s *CreateMarginOCOService) Price(price string) *CreateMarginOCOService {
s.price = &price
return s
}
// LimitIcebergQuantity set limitIcebergQuantity
func (s *CreateMarginOCOService) LimitIcebergQuantity(limitIcebergQty string) *CreateMarginOCOService {
s.limitIcebergQty = &limitIcebergQty
return s
}
// StopClientOrderID set stopClientOrderID
func (s *CreateMarginOCOService) StopClientOrderID(stopClientOrderID string) *CreateMarginOCOService {
s.stopClientOrderID = &stopClientOrderID
return s
}
// StopPrice set stop price
func (s *CreateMarginOCOService) StopPrice(stopPrice string) *CreateMarginOCOService {
s.stopPrice = &stopPrice
return s
}
// StopLimitPrice set stop limit price
func (s *CreateMarginOCOService) StopLimitPrice(stopLimitPrice string) *CreateMarginOCOService {
s.stopLimitPrice = &stopLimitPrice
return s
}
// StopIcebergQty set stop limit price
func (s *CreateMarginOCOService) StopIcebergQty(stopIcebergQty string) *CreateMarginOCOService {
s.stopIcebergQty = &stopIcebergQty
return s
}
// StopLimitTimeInForce set stopLimitTimeInForce
func (s *CreateMarginOCOService) StopLimitTimeInForce(stopLimitTimeInForce TimeInForceType) *CreateMarginOCOService {
s.stopLimitTimeInForce = &stopLimitTimeInForce
return s
}
// NewOrderRespType set icebergQuantity
func (s *CreateMarginOCOService) NewOrderRespType(newOrderRespType NewOrderRespType) *CreateMarginOCOService {
s.newOrderRespType = &newOrderRespType
return s
}
// SideEffectType set sideEffectType
func (s *CreateMarginOCOService) SideEffectType(sideEffectType SideEffectType) *CreateMarginOCOService {
s.sideEffectType = &sideEffectType
return s
}
func (s *CreateMarginOCOService) createOrder(ctx context.Context, opts ...RequestOption) (data []byte, err error) {
r := &request{
method: http.MethodPost,
endpoint: "/sapi/v1/margin/order/oco",
secType: secTypeSigned,
}
m := params{
"symbol": s.symbol,
"side": s.side,
"quantity": *s.quantity,
"price": *s.price,
"stopPrice": *s.stopPrice,
}
if s.isIsolated != nil {
if *s.isIsolated {
m["isIsolated"] = "TRUE"
} else {
m["isIsolated"] = "FALSE"
}
}
if s.listClientOrderID != nil {
m["listClientOrderId"] = *s.listClientOrderID
}
if s.limitClientOrderID != nil {
m["limitClientOrderId"] = *s.limitClientOrderID
}
if s.limitIcebergQty != nil {
m["limitIcebergQty"] = *s.limitIcebergQty
}
if s.stopClientOrderID != nil {
m["stopClientOrderId"] = *s.stopClientOrderID
}
if s.stopLimitPrice != nil {
m["stopLimitPrice"] = *s.stopLimitPrice
}
if s.stopIcebergQty != nil {
m["stopIcebergQty"] = *s.stopIcebergQty
}
if s.stopLimitTimeInForce != nil {
m["stopLimitTimeInForce"] = *s.stopLimitTimeInForce
}
if s.newOrderRespType != nil {
m["newOrderRespType"] = *s.newOrderRespType
}
if s.sideEffectType != nil {
m["sideEffectType"] = *s.sideEffectType
}
r.setFormParams(m)
data, err = s.c.callAPI(ctx, r, opts...)
if err != nil {
return []byte{}, err
}
return data, nil
}
// Do send request
func (s *CreateMarginOCOService) Do(ctx context.Context, opts ...RequestOption) (res *CreateMarginOCOResponse, err error) {
data, err := s.createOrder(ctx, opts...)
if err != nil {
return nil, err
}
res = new(CreateMarginOCOResponse)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// CreateMarginOCOResponse define create order response
type CreateMarginOCOResponse struct {
OrderListID int64 `json:"orderListId"`
ContingencyType string `json:"contingencyType"`
ListStatusType string `json:"listStatusType"`
ListOrderStatus string `json:"listOrderStatus"`
ListClientOrderID string `json:"listClientOrderId"`
TransactionTime int64 `json:"transactionTime"`
Symbol string `json:"symbol"`
MarginBuyBorrowAmount string `json:"marginBuyBorrowAmount"`
MarginBuyBorrowAsset string `json:"marginBuyBorrowAsset"`
IsIsolated bool `json:"isIsolated"`
Orders []*MarginOCOOrder `json:"orders"`
OrderReports []*MarginOCOOrderReport `json:"orderReports"`
}
// MarginOCOOrder may be returned in an array of MarginOCOOrder in a CreateMarginOCOResponse
type MarginOCOOrder struct {
Symbol string `json:"symbol"`
OrderID int64 `json:"orderId"`
ClientOrderID string `json:"clientOrderId"`
}
// MarginOCOOrderReport may be returned in an array of MarginOCOOrderReport in a CreateMarginOCOResponse
type MarginOCOOrderReport struct {
Symbol string `json:"symbol"`
OrderID int64 `json:"orderId"`
OrderListID int64 `json:"orderListId"`
ClientOrderID string `json:"clientOrderId"`
TransactionTime int64 `json:"transactionTime"`
Price string `json:"price"`
OrigQuantity string `json:"origQty"`
ExecutedQuantity string `json:"executedQty"`
CummulativeQuoteQuantity string `json:"cummulativeQuoteQty"`
Status OrderStatusType `json:"status"`
TimeInForce TimeInForceType `json:"timeInForce"`
Type OrderType `json:"type"`
Side SideType `json:"side"`
StopPrice string `json:"stopPrice"`
}
// CancelMarginOCOService cancel an entire Order List for a margin account
type CancelMarginOCOService struct {
c *Client
symbol string
isIsolated *bool
listClientOrderID string
orderListID int64
newClientOrderID string
}
// Symbol set symbol
func (s *CancelMarginOCOService) Symbol(symbol string) *CancelMarginOCOService {
s.symbol = symbol
return s
}
// IsIsolated set isIsolated
func (s *CancelMarginOCOService) IsIsolated(isIsolated bool) *CancelMarginOCOService {
s.isIsolated = &isIsolated
return s
}
// ListClientOrderID sets listClientOrderId
func (s *CancelMarginOCOService) ListClientOrderID(listClientOrderID string) *CancelMarginOCOService {
s.listClientOrderID = listClientOrderID
return s
}
// OrderListID sets orderListId
func (s *CancelMarginOCOService) OrderListID(orderListID int64) *CancelMarginOCOService {
s.orderListID = orderListID
return s
}
// NewClientOrderID sets newClientOrderId
func (s *CancelMarginOCOService) NewClientOrderID(newClientOrderID string) *CancelMarginOCOService {
s.newClientOrderID = newClientOrderID
return s
}
// Do send request
func (s *CancelMarginOCOService) Do(ctx context.Context, opts ...RequestOption) (res *CancelMarginOCOResponse, err error) {
r := &request{
method: http.MethodDelete,
endpoint: "/sapi/v1/margin/orderList",
secType: secTypeSigned,
}
r.setFormParam("symbol", s.symbol)
if s.listClientOrderID != "" {
r.setFormParam("listClientOrderId", s.listClientOrderID)
}
if s.isIsolated != nil {
r.setFormParam("isIsolated", *s.isIsolated)
}
if s.orderListID != 0 {
r.setFormParam("orderListId", s.orderListID)
}
if s.newClientOrderID != "" {
r.setFormParam("newClientOrderId", s.newClientOrderID)
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
res = new(CancelMarginOCOResponse)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// CancelMarginOCOResponse define create cancelled oco response.
type CancelMarginOCOResponse struct {
OrderListID int64 `json:"orderListId"`
ContingencyType string `json:"contingencyType"`
ListStatusType string `json:"listStatusType"`
ListOrderStatus string `json:"listOrderStatus"`
ListClientOrderID string `json:"listClientOrderId"`
TransactionTime int64 `json:"transactionTime"`
Symbol string `json:"symbol"`
IsIsolated bool `json:"isIsolated"`
Orders []*MarginOCOOrder `json:"orders"`
OrderReports []*MarginOCOOrderReport `json:"orderReports"`
}
|
package migrations
import "gorm.io/gorm"
type IMigrations interface {
Up() error
Down() error
}
func _addColumnsToTable(db *gorm.DB, dst interface{}, column string) error {
if !db.Migrator().HasColumn(dst, column) {
if err := db.Migrator().AddColumn(dst, column); err != nil {
return err
}
}
return nil
} |
package database
import (
"context"
"database/sql"
"strings"
"sync"
"time"
logger "github.com/panlibin/vglog"
"github.com/panlibin/virgo"
// mysql driver
_ "github.com/go-sql-driver/mysql"
)
const defaultQueryChannelSize = 1024
const (
queryTypeQuery int32 = iota
queryTypeQueryRow
queryTypeExec
)
type mysqlQueryContext struct {
query string
args []interface{}
queryType int32
callbackChan chan []interface{}
async bool
cb func([]interface{})
ctx interface{}
}
type mysqlInstance struct {
p virgo.IProcedure
db *sql.DB
queryChan chan *mysqlQueryContext
wg *sync.WaitGroup
}
func (m *mysqlInstance) open(db *sql.DB, wg *sync.WaitGroup) {
m.db = db
m.wg = wg
m.queryChan = make(chan *mysqlQueryContext, defaultQueryChannelSize)
m.wg.Add(1)
go m.run()
return
}
func (m *mysqlInstance) close() {
if m.queryChan != nil {
m.queryChan <- nil
}
}
func (m *mysqlInstance) run() {
defer m.wg.Done()
for queryCtx := range m.queryChan {
if queryCtx == nil {
break
}
var ret interface{}
var err error
switch queryCtx.queryType {
case queryTypeQuery:
ret, err = m.db.Query(queryCtx.query, queryCtx.args...)
case queryTypeQueryRow:
ret = m.db.QueryRow(queryCtx.query, queryCtx.args...)
case queryTypeExec:
ret, err = m.db.Exec(queryCtx.query, queryCtx.args...)
default:
continue
}
if err != nil {
logger.Errorf("%v", err)
logger.Errorf(queryCtx.query+"; "+strings.Repeat("%v\t", len(queryCtx.args)), queryCtx.args...)
}
if queryCtx.async {
if queryCtx.cb != nil {
m.p.SyncTask(queryCtx.cb, queryCtx.ctx, ret, err)
}
} else {
if queryCtx.callbackChan != nil {
queryCtx.callbackChan <- []interface{}{ret, err}
}
}
}
close(m.queryChan)
}
func (m *mysqlInstance) addQuery(queryCtx *mysqlQueryContext) {
m.queryChan <- queryCtx
}
// Mysql 数据库管理对象
type Mysql struct {
arrDb []*mysqlInstance
p virgo.IProcedure
db *sql.DB
aliveTicker *time.Ticker
wg *sync.WaitGroup
cancelAliveCtx context.Context
cancelAliveFunc context.CancelFunc
}
// NewMysql 新建
func NewMysql(p virgo.IProcedure) *Mysql {
return &Mysql{
p: p,
wg: &sync.WaitGroup{},
}
}
// Open 连接数据库
func (m *Mysql) Open(dsn string, instNum int32) error {
var err error
m.arrDb = make([]*mysqlInstance, instNum)
var db *sql.DB
db, err = sql.Open("mysql", dsn)
if err != nil {
return err
}
err = db.Ping()
if err != nil {
return err
}
db.SetMaxOpenConns(int(instNum))
db.SetMaxIdleConns(int(instNum))
m.db = db
go m.keepAlive()
for i := int32(0); i < instNum; i++ {
pDbInst := new(mysqlInstance)
pDbInst.p = m.p
pDbInst.open(db, m.wg)
m.arrDb[i] = pDbInst
}
return err
}
// Close 关闭数据库连接
func (m *Mysql) Close() {
if m.aliveTicker != nil {
m.aliveTicker.Stop()
m.aliveTicker = nil
}
if m.cancelAliveFunc != nil {
m.cancelAliveFunc()
m.cancelAliveFunc = nil
}
if m.arrDb != nil {
for _, pDb := range m.arrDb {
if pDb != nil {
pDb.close()
}
}
}
m.wg.Wait()
if m.db != nil {
m.db.Close()
}
}
// Query 查询多行
func (m *Mysql) Query(dbIdx uint32, query string, args ...interface{}) (rows *sql.Rows, err error) {
callbackChan := m.pushOperator(dbIdx, query, args, queryTypeQuery, false, nil, nil)
ret := <-callbackChan
close(callbackChan)
if ret[0] != nil {
rows = ret[0].(*sql.Rows)
}
if ret[1] != nil {
err = ret[1].(error)
}
return
}
// QueryRow 查询一行
func (m *Mysql) QueryRow(dbIdx uint32, query string, args ...interface{}) (row *sql.Row) {
callbackChan := m.pushOperator(dbIdx, query, args, queryTypeQueryRow, false, nil, nil)
ret := <-callbackChan
close(callbackChan)
return ret[0].(*sql.Row)
}
// Exec 执行
func (m *Mysql) Exec(dbIdx uint32, query string, args ...interface{}) (res sql.Result, err error) {
callbackChan := m.pushOperator(dbIdx, query, args, queryTypeExec, false, nil, nil)
ret := <-callbackChan
close(callbackChan)
if ret[0] != nil {
res = ret[0].(sql.Result)
}
if ret[1] != nil {
err = ret[1].(error)
}
return
}
// AsyncQuery 查询多行,回调
func (m *Mysql) AsyncQuery(ctx interface{}, cb func([]interface{}), dbIdx uint32, query string, args ...interface{}) {
m.pushOperator(dbIdx, query, args, queryTypeQuery, true, ctx, cb)
}
// AsyncQueryRow 查询一行,回调
func (m *Mysql) AsyncQueryRow(ctx interface{}, cb func([]interface{}), dbIdx uint32, query string, args ...interface{}) {
m.pushOperator(dbIdx, query, args, queryTypeQueryRow, true, ctx, cb)
}
// AsyncExec 执行,回调
func (m *Mysql) AsyncExec(ctx interface{}, cb func([]interface{}), dbIdx uint32, query string, args ...interface{}) {
m.pushOperator(dbIdx, query, args, queryTypeExec, true, ctx, cb)
}
func (m *Mysql) pushOperator(dbIdx uint32, query string, args []interface{}, queryType int32, async bool, ctx interface{}, cb func([]interface{})) chan []interface{} {
dbCount := uint32(len(m.arrDb))
if dbIdx >= dbCount {
dbIdx %= dbCount
}
db := m.arrDb[dbIdx]
queryCtx := new(mysqlQueryContext)
queryCtx.query = query
queryCtx.args = args
queryCtx.queryType = queryType
queryCtx.async = async
var callbackChan chan []interface{}
if async {
queryCtx.ctx = ctx
queryCtx.cb = cb
} else {
callbackChan = make(chan []interface{}, 1)
queryCtx.callbackChan = callbackChan
}
db.addQuery(queryCtx)
return callbackChan
}
func (m *Mysql) keepAlive() {
m.cancelAliveCtx, m.cancelAliveFunc = context.WithCancel(context.Background())
m.aliveTicker = time.NewTicker(time.Minute * 10)
bQuit := false
for !bQuit {
select {
case <-m.aliveTicker.C:
m.db.Ping()
case <-m.cancelAliveCtx.Done():
bQuit = true
}
}
}
|
package main
import (
"os"
"github.com/jinmukeji/jiujiantang-services/api-jinmuid/config"
"github.com/jinmukeji/jiujiantang-services/api-jinmuid/rest"
"github.com/micro/cli/v2"
"github.com/micro/go-micro/v2/web"
)
var (
apiBase string
jwtSignInKey string
debug bool
)
func main() {
service := web.NewService(
// Service Basic Info
web.Name(config.FullServiceName()),
web.Version(config.ProductVersion),
// Fault Tolerance - Heartbeating
web.RegisterTTL(config.DefaultRegisterTTL),
web.RegisterInterval(config.DefaultRegisterInterval),
webOptions(),
)
// Init Micro service
err := service.Init(
web.Action(func(c *cli.Context) {
// Setup handler
app := rest.NewApp(apiBase, jwtSignInKey, debug)
service.Handle("/", app)
if c.Bool("version") {
config.PrintFullVersionInfo()
os.Exit(0)
}
}),
)
if err != nil {
log.Fatal(err)
}
log.Infoln("Service Name:", config.FullServiceName())
log.Infoln("Version:", config.ProductVersion)
log.Infof("API Base: /%s", apiBase)
// Run server
if err := service.Run(); err != nil {
log.Fatal(err)
}
}
func webOptions() web.Option {
return web.Flags(
&cli.StringFlag{
Name: "x_api_base",
Value: "",
Usage: "API Base URL",
EnvVars: []string{"X_API_BASE"},
Destination: &apiBase,
},
&cli.StringFlag{
Name: "x_jwt_sign_in_key",
Usage: "JWT Sign-in key",
EnvVars: []string{"X_JWT_SIGN_IN_KEY"},
Destination: &jwtSignInKey,
},
&cli.BoolFlag{
Name: "version",
Usage: "Show version information",
},
&cli.BoolFlag{
Name: "x_enable_debug",
Usage: "Enable debug",
EnvVars: []string{"X_ENABLE_DEBUG"},
Destination: &debug,
},
)
}
|
func minCostToMoveChips(position []int) int {
odd := 0
even := 0
for _, value := range position {
if value % 2 == 0 {
even++
} else {
odd++
}
}
if even < odd {
return even
} else {
return odd
}
} |
package main
import (
"fmt"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
type INFORMATRIONALL struct {
ID int `gorm:"column:id"`
Url string `gorm:"column:url"`
Title string `gorm:"column:title"`
Author string `gorm:"column:author"`
Source string `gorm:"column:source"`
Release_datetime string `gorm:"column:release_datetime"`
Content string `gorm:"column:content"`
Media_type string `gorm:"column:media_type"`
Original_title string `gorm:"column:original_title"`
Editor string `gorm:"column:editor"`
Reporter string `gorm:"column:reporter"`
Contents string `gorm:"column:contents"`
Reading_times string `gorm:"column:reading_times"`
Abstract_data string `gorm:"column:abstract_data"`
Media string `gorm:"column:media"`
Media_channel string `gorm:"column:media_channel"`
Location string `gorm:"column:location"`
Location_path string `gorm:"column:location_path"`
Collection_tool string `gorm:"column:collection_tool"`
User string `gorm:"column:user"`
Site_url string `gorm:"column:site_url"`
Leaf_id string `gorm:"column:leaf_id"`
Task_id string `gorm:"column:task_id"`
Task_name string `gorm:"column:task_name"`
Get_time string `gorm:"column:get_time"`
Keyword string `gorm:"column:keyword"`
Pub_time string `gorm:"column:pub_time"`
Project_id string `gorm:"column:project_id"`
Error_msg string `gorm:"column:error_msg"`
}
type BISHOUZHAN_FENGNIAO struct {
ID string `gorm:"column:id"`
MEDIA_TYPE string `gorm:"column:media_type"`
URL string `gorm:"column:url"`
TITLE string `gorm:"column:title"`
ORIGINAL_TITLE string `gorm:"column:original_title"`
ABSTRACT_DATA string `gorm:"column:abstract_data"`
CONTENT string `gorm:"column:content"`
MEDIA string `gorm:"column:media"`
MEDIA_CHANNEL string `gorm:"column:media_channel"`
MEDIA_CHANNEL_SND string `gorm:"column:media_channel_snd"`
MEDIA_CHANNEL_TRD string `gorm:"column:media_channel_trd"`
LOCATION string `gorm:"column:location"`
LOCATIONPATH string `gorm:"column:locationPath"`
SOURCE string `gorm:"column:source"`
RELEASE_DATETIME string `gorm:"column:release_datetime"`
CONTAINS_PICTURES string `gorm:"column:contains_pictures"`
CONTAINS_VIDEOS string `gorm:"column:contains_videos"`
EDITOR string `gorm:"column:editor"`
AUTHOR string `gorm:"column:author"`
REPORTER string `gorm:"column:reporter"`
CONTENTS string `gorm:"column:contents"`
READING_TIMES string `gorm:"column:reading_times"`
ANALYSIS_KEYWORD string `gorm:"column:analysis_keyword"`
SITE_URL string `gorm:"column:site_url"`
GET_TIME string `gorm:"column:get_time"`
HOTSPOT string `gorm:"column:hotspot"`
REPOSTS_COUNT string `gorm:"column:reposts_count"`
PRAISE_COUNT string `gorm:"column:praise_count"`
TERMINAL string `gorm:"column:terminal"`
USERS_TYPE string `gorm:"column:users_type"`
AGE int `gorm:"column:age"`
GENDER string `gorm:"column:gender"`
REGION string `gorm:"column:region"`
ATTENTION_COUNT string `gorm:"column:attention_count"`
FANS_COUNT string `gorm:"column:fans_count"`
STATUSES_COUNT string `gorm:"column:statuses_count"`
TEXT_TYPE string `gorm:"column:text_type"`
ARTICLE_COUNT string `gorm:"column:article_count"`
AVGREADING_TIMES string `gorm:"column:avgreading_times"`
PUBLIC_FUNCTION string `gorm:"column:public_function"`
PUBLIC_SUBJECT string `gorm:"column:public_subject"`
TIEBA_FOLLOWERS string `gorm:"column:tieba_followers"`
COLLECTION_TOOL string `gorm:"column:collection_tool"`
USER string `gorm:"column:user"`
KEYWORD string `gorm:"column:keyword"`
ATTITUDE string `gorm:"column:attitude"`
COMPANY_NAME string `gorm:"column:company_name"`
CLASSIFICATION string `gorm:"column:classification"`
MEDIA_REGION string `gorm:"column:media_region"`
CENTRAL_MEDIA string `gorm:"column:central_media"`
PRODUCT string `gorm:"column:product"`
PRODUCT_ISNEW string `gorm:"column:product_isnew"`
TECHNOLOGY string `gorm:"column:technology"`
TECHNOLOGY_ISNEW string `gorm:"column:technology_isnew"`
HEAT string `gorm:"column:heat"`
REPRODUCED_COUNT string `gorm:"column:reproduced_count"`
SIMILARITY string `gorm:"column:similarity"`
RELATION string `gorm:"column:relation"`
NEWS_AUDIENCES string `gorm:"column:news_audiences"`
PUBLICITY_VALUES string `gorm:"column:publicity_values"`
MEDIA_RANK string `gorm:"column:media_rank"`
WORD_COUNT string `gorm:"column:word_count"`
LEAFID string `gorm:"column:leafid"`
SOURCECODEID string `gorm:"column:sourcecodeid"`
TASKID string `gorm:"column:taskid"`
TASKNAME string `gorm:"column:taskname"`
SYNSTATUS string `gorm:"column:synstatus"`
ISREPEAT string `gorm:"column:isRepeat"`
MARK int `gorm:"column:mark"`
FLAG int `gorm:"column:flag"`
}
var (
db186 *gorm.DB
db168 *gorm.DB
err error
)
const (
//TimeFormart = "2006-01-02 15:04:05"
TimeFormart = "20060102"
)
func main() {
db_user_S := "funbird"
db_pass_S := "funbird2017"
db_host_S := "192.168.95.186"
db_port_S := 3306
//db_name_S := "funbird"
db_name_S := "funbird_storage"
dbConnectBaseStr := "%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local"
dbConnect_S := fmt.Sprintf(dbConnectBaseStr, db_user_S, db_pass_S, db_host_S, db_port_S, db_name_S)
fmt.Print(dbConnect_S, "\n")
db186, err = gorm.Open("mysql", dbConnect_S)
if err != nil {
fmt.Println(err)
}
defer db186.Close()
db186.LogMode(true)
db_user_D := "iscloud"
db_pass_D := "iscloud"
db_host_D := "192.168.182.168"
db_port_D := 3306
db_name_D := "test1"
dbConnect_D := fmt.Sprintf(dbConnectBaseStr, db_user_D, db_pass_D, db_host_D, db_port_D, db_name_D)
fmt.Print(dbConnect_D, "\n")
db168, err = gorm.Open("mysql", dbConnect_D)
if err != nil {
fmt.Println(err)
}
defer db168.Close()
db168.LogMode(true)
now := time.Now()
diff, err := time.ParseDuration("-24h")
if err != nil {
fmt.Println(err)
}
yd := now.Add(diff).Format(TimeFormart)
inforall := []*INFORMATRIONALL{}
db186.Table(fmt.Sprintf("%s_%s", "information_all_d7178819", yd)).Where("pub_time != ''").Find(&inforall)
for _, v := range inforall {
data := BISHOUZHAN_FENGNIAO{}
data.ID = v.Leaf_id
data.URL = v.Url
data.TITLE = v.Title
data.AUTHOR = v.Author
data.SOURCE = v.Source
data.RELEASE_DATETIME = v.Pub_time
data.CONTENT = v.Content
data.MEDIA_TYPE = v.Media_type
data.ORIGINAL_TITLE = v.Original_title
data.EDITOR = v.Editor
data.REPORTER = v.Reporter
data.CONTENTS = v.Contents
data.READING_TIMES = v.Reading_times
data.ABSTRACT_DATA = v.Abstract_data
data.MEDIA = v.Media
data.MEDIA_CHANNEL = v.Media_channel
data.LOCATION = v.Location
data.LOCATIONPATH = v.Location_path
data.COLLECTION_TOOL = v.Collection_tool
data.USER = v.User
data.SITE_URL = v.Site_url
data.LEAFID = v.Leaf_id
data.TASKID = v.Task_id
data.TASKNAME = v.Task_name
data.GET_TIME = v.Get_time
data.KEYWORD = v.Keyword
//data.TAG = "政策"
data.MEDIA_TYPE = "新闻"
db168.Table("bishouzhan_fengniao").Create(&data)
}
}
|
package puzzle
import (
"fmt"
"math/rand"
"sync"
"time"
)
func initPopulation(n int, size int, cm []int) ([][]int, []int) {
population := make([][]int, size)
fitness := make([]int, size)
for i := range population {
p := RandomPuzzle(n, cm)
population[i] = p
fitness[i], _ = Evaluate(n, p)
}
return population, fitness
}
func pickSurvivors(population [][]int, populationFitness []int, elitism int, survRate float32) [][]int {
numSurvived := int(float32(len(population)) * survRate)
survived := make([][]int, numSurvived)
for i := 0; i < elitism; i++ {
maxValue := populationFitness[i]
for j := i + 1; j < len(population); j++ {
if populationFitness[j] > maxValue {
maxValue = populationFitness[j]
population[i], population[j] = population[j], population[i]
populationFitness[i], populationFitness[j] = populationFitness[j], populationFitness[i]
}
}
survived[i] = append([]int{}, population[i]...)
}
sum := 0
for _, val := range populationFitness {
sum += val
}
var wg sync.WaitGroup
for i := elitism; i < len(survived); i++ {
indf := i
wg.Add(1)
go func() {
defer wg.Done()
random := rand.Intn(sum) + 1
sumCount := 0
for j := 0; j < len(population); j++ {
sumCount += populationFitness[j]
if random <= sumCount {
survived[indf] = append([]int{}, population[j]...)
break
}
}
}()
}
wg.Wait()
return survived
}
func crossover(parentA []int, parentB []int) ([]int, []int) {
crossPoint := int(float32(len(parentA)) * (0.25 + 0.5*rand.Float32()))
childA := make([]int, len(parentA))
childB := make([]int, len(parentA))
for i := range parentA {
if i < crossPoint {
childA[i] = parentA[i]
childB[i] = parentB[i]
} else {
childA[i] = parentB[i]
childB[i] = parentA[i]
}
}
return childA, childB
}
func mutate(n int, child []int, cm []int, mutRate float32) int {
for rand.Float32() < mutRate {
mutIndex := rand.Intn(len(child))
newValue := child[mutIndex] + rand.Intn(cm[mutIndex]-1) + 1
if newValue > cm[mutIndex] {
newValue -= cm[mutIndex]
}
child[mutIndex] = newValue
mutRate /= 3
}
fitness, _ := Evaluate(n, child)
return fitness
}
func GeneticPuzzle(n int, gens int, survRate float32, mutRate float32) ([]int, []int, int, []string) {
start := time.Now()
sizePop := 2400
mutRate *= float32(n * n)
elitism := 70
cm := ConstraintMatrix(n)
population, populationFitness := initPopulation(n, sizePop, cm)
bestPuzzle := population[0]
bestFitness := populationFitness[0]
for i := 0; i < gens; i++ {
if i%5000 == 0 {
fmt.Println("GEN", i, "FITNESS", bestFitness-n*n)
}
survivors := pickSurvivors(population, populationFitness, elitism, survRate)
if populationFitness[0] > bestFitness {
bestPuzzle = population[0]
bestFitness = populationFitness[0]
fmt.Println("GEN", i, "FITNESS", bestFitness-n*n)
}
var wg sync.WaitGroup
for j := elitism; j < len(population); j += 2 {
index := j
wg.Add(1)
go func() {
defer wg.Done()
numSurvived := len(survivors)
parentAIndex := rand.Intn(numSurvived)
parentBIndex := parentAIndex + rand.Intn(numSurvived-1) + 1
if parentBIndex >= numSurvived {
parentBIndex -= numSurvived
}
childA, childB := crossover(survivors[parentAIndex], survivors[parentBIndex])
populationFitness[index] = mutate(n, childA, cm, mutRate)
populationFitness[index+1] = mutate(n, childB, cm, mutRate)
population[index] = childA
population[index+1] = childB
}()
}
wg.Wait()
}
fit, dbfs := Evaluate(n, bestPuzzle)
elapsed := time.Since(start)
fmt.Printf("Fitness: %d, ", bestFitness-n*n)
fmt.Printf("Duration: %s\n", elapsed)
return bestPuzzle, dbfs, fit, Solution(n, dbfs)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/6/30 9:12 上午
# @File : jz_11_二进制中1的个数.go
# @Description :
# @Attention :
*/
package offer
func NumberOf1(n int) int {
count := 0
for n > 0 {
count++
n = n & (n - 1)
}
return count
}
|
package toolkit
import (
"testing"
)
func TestGenerateSectionIntSliceOfOrderly(t *testing.T) {
t.Logf("Generate orderly slice:%+v\n", GenerateSectionIntSliceOfOrderly(1, 20, 3))
}
func TestGenerateSectionIntSliceOfDisorderly(t *testing.T) {
t.Logf("Generate disorderly slice:%+v\n", GenerateSectionIntSliceOfDisorderly(1, 20))
}
func TestJoinItemOfStringSlice(t *testing.T) {
t.Logf("joined string: %s\n", JoinItemOfStringSlice("", "中", "华", "人", "民", "共", "和", "国"))
}
func TestGenerateSectionFibSlice(t *testing.T) {
t.Logf("generate fib: %+v\n", GenerateSectionFibSlice(20))
}
func TestFactorial(t *testing.T) {
t.Logf("%d is factorial value %+v\n", 10, Factorial(10))
}
func TestFactorialSliceOfUint64(t *testing.T) {
t.Logf("generate factorial: %+v\n", FactorialSliceOfUint64(10))
}
|
package middleware
import (
"fmt"
"net/http"
"os"
"time"
"github.com/agungdwiprasetyo/reverse-proxy/helper"
)
type responseWriter struct {
http.ResponseWriter
statusCode int
}
// WriteHeader implement http.ResponseWriter
func (rw *responseWriter) WriteHeader(code int) {
rw.statusCode = code
rw.ResponseWriter.WriteHeader(code)
}
// Logger for log all request in this gateway
func Logger(wrap http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
rw := &responseWriter{w, http.StatusOK}
wrap.ServeHTTP(rw, r)
end := time.Now()
statusColor := helper.ColorForStatus(rw.statusCode)
methodColor := helper.ColorForMethod(r.Method)
fmt.Fprintf(os.Stdout, "%s[GATEWAY]%s : %v | %s %3d %s | %13v | %15s | %s %-7s %s %s\n",
helper.White, helper.Reset,
time.Now().Format("2006/01/02 - 15:04:05"),
statusColor, rw.statusCode, helper.Reset,
end.Sub(start),
r.RemoteAddr,
methodColor, r.Method, helper.Reset,
r.RequestURI,
)
}
}
|
package eth
import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"go.uber.org/zap"
"github.com/hiromaily/go-crypto-wallet/pkg/account"
models "github.com/hiromaily/go-crypto-wallet/pkg/models/rdb"
)
// RawTx is raw transaction
type RawTx struct {
UUID string `json:"uuid"`
From string `json:"from"`
To string `json:"to"`
Value big.Int `json:"value"`
Nonce uint64 `json:"nonce"`
TxHex string `json:"txhex"`
Hash string `json:"hash"`
}
// TODO: WIP: logic is not fixed, it looks same
// when creating multiple transaction from same address, nonce should be increased
func (e *Ethereum) getNonce(fromAddr string, additionalNonce int) (uint64, error) {
// by calling GetTransactionCount()
nonce, err := e.GetTransactionCount(fromAddr, QuantityTagPending)
if err != nil {
return 0, errors.Wrap(err, "fail to call eth.GetTransactionCount()")
}
// result is same
//nonce2, err := e.ethClient.PendingNonceAt(e.ctx, common.HexToAddress(fromAddr))
//if err != nil {
// return 0, errors.Wrap(err, "fail to call ethClient.PendingNonceAt()")
//}
if additionalNonce != 0 {
nonce = nonce.Add(nonce, new(big.Int).SetUint64(uint64(additionalNonce)))
}
e.logger.Debug("nonce",
zap.Uint64("GetTransactionCount(fromAddr, QuantityTagPending)", nonce.Uint64()),
// zap.Uint64("ethClient.PendingNonceAt(e.ctx, common.HexToAddress(fromAddr))", nonce2),
)
return nonce.Uint64(), nil
}
// How to calculate transaction fee?
// https://ethereum.stackexchange.com/questions/19665/how-to-calculate-transaction-fee
func (e *Ethereum) calculateFee(fromAddr, toAddr common.Address, balance, gasPrice, value *big.Int) (*big.Int, *big.Int, *big.Int, error) {
msg := ðereum.CallMsg{
From: fromAddr,
To: &toAddr,
Gas: 0,
GasPrice: gasPrice,
Value: nil,
Data: nil,
}
// gasLimit
estimatedGas, err := e.EstimateGas(msg)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "fail to call EstimateGas()")
}
// txFee := gasPrice * estimatedGas
txFee := new(big.Int).Mul(gasPrice, estimatedGas)
// newValue := value - txFee
newValue := new(big.Int)
if value.Uint64() == 0 {
// receiver pays fee (deposit, transfer(pays all) action)
newValue = newValue.Sub(balance, txFee)
} else {
// sender pays fee (payment, transfer(pays partially)
newValue = value
// newValue = newValue.Sub(value, txFee)
// if balance.Cmp(value) == -1 {
if balance.Cmp(new(big.Int).Add(value, txFee)) == -1 {
// -1 if x < y
// 0 if x == y
// +1 if x > y
return nil, nil, nil, errors.Errorf("balance`%d` is insufficient to send `%d`", balance.Uint64(), newValue.Uint64())
}
}
return newValue, txFee, estimatedGas, nil
}
// CreateRawTransaction creates raw transaction for watch only wallet
// TODO: which QuantityTag should be used?
// - Creating offline/raw transactions with Go-Ethereum
// https://medium.com/@akshay_111meher/creating-offline-raw-transactions-with-go-ethereum-8d6cc8174c5d
// Note: sender acocunt takes fee
// - if sender sends 5ETH, receiver receives 5ETH
// - sender has to pay 5ETH + fee
func (e *Ethereum) CreateRawTransaction(fromAddr, toAddr string, amount uint64, additionalNonce int) (*RawTx, *models.EthDetailTX, error) {
// validation check
if e.ValidationAddr(fromAddr) != nil || e.ValidationAddr(toAddr) != nil {
return nil, nil, errors.New("address validation error")
}
e.logger.Debug("eth.CreateRawTransaction()",
zap.String("fromAddr", fromAddr),
zap.String("toAddr", toAddr),
zap.Uint64("amount", amount),
)
// TODO: pending status should be included in target balance??
// TODO: if block is still syncing, proper balance is not returned
balance, err := e.GetBalance(fromAddr, QuantityTagPending)
if err != nil {
return nil, nil, errors.Wrap(err, "fail to call eth.GetBalance()")
}
e.logger.Info("balance", zap.Int64("balance", balance.Int64()))
if balance.Uint64() == 0 {
return nil, nil, errors.New("balance is needed to send eth")
}
// nonce
nonce, err := e.getNonce(fromAddr, additionalNonce)
if err != nil {
return nil, nil, errors.Wrap(err, "fail to call eth.GetTransactionCount()")
}
// gasPrice
// e.ethClient.SuggestGasPrice()
gasPrice, err := e.GasPrice()
if err != nil {
return nil, nil, errors.Wrap(err, "fail to call eth.GasPrice()")
}
e.logger.Info("gas_price", zap.Int64("gas_price", gasPrice.Int64()))
// fromAddr, toAddr common.Address, gasPrice, value *big.Int
newValue, txFee, estimatedGas, err := e.calculateFee(
common.HexToAddress(fromAddr),
common.HexToAddress(toAddr),
balance,
gasPrice,
new(big.Int).SetUint64(amount),
)
if err != nil {
return nil, nil, errors.Wrap(err, "fail to call eth.calculateFee()")
}
// TODO: which value should be used for args of types.NewTransaction()
e.logger.Debug("comparison",
zap.Uint64("GasLimit", GasLimit),
zap.Uint64("estimatedGas", estimatedGas.Uint64()),
zap.Uint64("txFee", txFee.Uint64()))
// create transaction
// data is required when contract transaction
// NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction
// Note: tx may NOT be unique because fromAddr is not included and parameter is limited
tx := types.NewTransaction(nonce, common.HexToAddress(toAddr), newValue, GasLimit, gasPrice, nil)
txHash := tx.Hash().Hex()
rawTxHex, err := encodeTx(tx)
if err != nil {
return nil, nil, errors.Wrap(err, "fail to call encodeTx()")
}
// generate UUID to trace transaction because unsignedTx is not unique
uid := uuid.NewV4().String()
// create insert data for eth_detail_tx
txDetailItem := &models.EthDetailTX{
UUID: uid,
SenderAccount: "",
SenderAddress: fromAddr,
ReceiverAccount: "",
ReceiverAddress: toAddr,
Amount: newValue.Uint64(),
Fee: txFee.Uint64(),
GasLimit: uint32(estimatedGas.Uint64()),
Nonce: nonce,
UnsignedHexTX: *rawTxHex,
}
// RawTx
rawtx := &RawTx{
UUID: uid,
From: fromAddr,
To: toAddr,
Value: *newValue,
Nonce: nonce,
TxHex: *rawTxHex,
Hash: txHash,
}
return rawtx, txDetailItem, nil
}
// SignOnRawTransaction signs on raw transaction
// - https://ethereum.stackexchange.com/questions/16472/signing-a-raw-transaction-in-go
// - Note: this requires private key on this machine, if node is working remotely, it would not work.
func (e *Ethereum) SignOnRawTransaction(rawTx *RawTx, passphrase string, senderAccount account.AccountType) (*RawTx, error) {
txHex := rawTx.TxHex
fromAddr := rawTx.From
tx, err := decodeTx(txHex)
if err != nil {
return nil, errors.Wrap(err, "fail to call decodeTx(txHex)")
}
// get private key
key, err := e.GetPrivKey(fromAddr, passphrase, senderAccount)
if err != nil {
return nil, errors.Wrap(err, "fail to call e.GetPrivKey()")
}
// chain id
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md
chainID := big.NewInt(int64(e.netID))
if chainID.Uint64() == 0 {
return nil, errors.Errorf("chainID can't get from netID: %d", e.netID)
}
e.logger.Debug("call types.SignTx",
zap.Any("tx", tx),
zap.Uint64("chainID", chainID.Uint64()),
zap.Any("key.PrivateKey", key.PrivateKey),
)
// sign
signedTX, err := types.SignTx(tx, types.NewEIP155Signer(chainID), key.PrivateKey)
if err != nil {
return nil, errors.Wrap(err, "fail to call types.SignTx()")
}
msg, err := signedTX.AsMessage(types.NewEIP155Signer(chainID))
if err != nil {
return nil, errors.Wrap(err, "fail to cll signedTX.AsMessage()")
}
encodedTx, err := encodeTx(signedTX)
if err != nil {
return nil, errors.Wrap(err, "fail to call encodeTx()")
}
resTx := &RawTx{
UUID: rawTx.UUID,
From: msg.From().Hex(),
To: msg.To().Hex(),
Value: *msg.Value(),
Nonce: msg.Nonce(),
TxHex: *encodedTx,
Hash: signedTX.Hash().Hex(),
}
return resTx, nil
}
// SendSignedRawTransaction sends signed raw transaction
// - SendRawTransaction in rpc_eth_tx.go
// - SendRawTx in client.go
func (e *Ethereum) SendSignedRawTransaction(signedTxHex string) (string, error) {
decodedTx, err := decodeTx(signedTxHex)
if err != nil {
return "", errors.Wrap(err, "fail to call decodeTx(signedTxHex)")
}
txHash, err := e.SendRawTransactionWithTypesTx(decodedTx)
if err != nil {
return "", errors.Wrap(err, "fail to call SendRawTransactionWithTypesTx()")
}
return txHash, err
}
// GetConfirmation returns confirmation number
func (e *Ethereum) GetConfirmation(hashTx string) (uint64, error) {
txInfo, err := e.GetTransactionByHash(hashTx)
if err != nil {
return 0, err
}
if txInfo.BlockNumber == 0 {
return 0, errors.New("block number can't retrieved")
}
currentBlockNum, err := e.BlockNumber()
if err != nil {
return 0, err
}
confirmation := currentBlockNum.Int64() - txInfo.BlockNumber
return uint64(confirmation), nil
}
func encodeTx(tx *types.Transaction) (*string, error) {
txb, err := rlp.EncodeToBytes(tx)
if err != nil {
return nil, err
}
txHex := hexutil.Encode(txb)
return &txHex, nil
}
func decodeTx(txHex string) (*types.Transaction, error) {
txc, err := hexutil.Decode(txHex)
if err != nil {
return nil, err
}
var txde types.Transaction
err = rlp.Decode(bytes.NewReader(txc), &txde)
if err != nil {
return nil, err
}
return &txde, nil
}
|
/*
* Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
*
* This program and the accompanying materials are made available under
* the terms of the under the Apache License, Version 2.0 (the "License”);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eureka_test
import (
"bytes"
"errors"
"fmt"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/serviceutil/serviceutilfakes"
"io/ioutil"
"net/http"
"code.cloudfoundry.org/cli/plugin/models"
"code.cloudfoundry.org/cli/plugin/pluginfakes"
"github.com/fatih/color"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/eureka"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/format"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/httpclient"
"github.com/pivotal-cf/spring-cloud-services-cli-plugin/httpclient/httpclientfakes"
)
var _ = Describe("OperateOnApplication", func() {
const (
testAccessToken = "someaccesstoken"
testServiceInstanceName = "some-service-registry"
)
type operationArg struct {
accessToken string
eurekaUrl string
eurekaAppName string
instanceId string
}
var (
fakeCliConnection *pluginfakes.FakeCliConnection
fakeAuthClient *httpclientfakes.FakeAuthenticatedClient
fakeResolver *serviceutilfakes.FakeServiceInstanceResolver
progressWriter *bytes.Buffer
output string
fakeOperation eureka.InstanceOperation
operationCallCount int
operationArgs []operationArg
operationReturn error
err error
instanceIndex *int
)
BeforeEach(func() {
color.NoColor = false // ensure predictable colour behaviour independent of test environment
fakeCliConnection = &pluginfakes.FakeCliConnection{}
fakeAuthClient = &httpclientfakes.FakeAuthenticatedClient{}
fakeResolver = &serviceutilfakes.FakeServiceInstanceResolver{}
fakeAuthClient.DoAuthenticatedGetReturns(ioutil.NopCloser(bytes.NewBufferString("https://fake.com")), 200, nil)
fakeResolver.GetServiceInstanceUrlReturns("https://eureka-dashboard-url/", nil)
progressWriter = new(bytes.Buffer)
operationCallCount = 0
operationArgs = []operationArg{}
operationReturn = nil
fakeOperation = func(authClient httpclient.AuthenticatedClient, eurekaUrl string, eurekaAppName string, instanceId string, accessToken string) error {
operationCallCount++
operationArgs = append(operationArgs, operationArg{
accessToken: accessToken,
eurekaUrl: eurekaUrl,
eurekaAppName: eurekaAppName,
instanceId: instanceId,
})
return operationReturn
}
})
JustBeforeEach(func() {
output, err = eureka.OperateOnApplication(fakeCliConnection, testServiceInstanceName, "some-cf-app", fakeAuthClient, instanceIndex, progressWriter, fakeResolver, fakeOperation)
})
It("should attempt to obtain an access token", func() {
Expect(fakeCliConnection.AccessTokenCallCount()).To(Equal(1))
})
Context("when the access token is not available", func() {
BeforeEach(func() {
fakeCliConnection.AccessTokenReturns("", errors.New("some access token error"))
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("Access token not available: some access token error"))
})
})
Context("when the access token is available", func() {
BeforeEach(func() {
fakeCliConnection.AccessTokenReturns("bearer "+testAccessToken, nil)
})
Context("but the eureka URL cannot be resolved", func() {
BeforeEach(func() {
fakeResolver.GetServiceInstanceUrlReturns("", errors.New("resolution error"))
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("Error obtaining service registry URL: resolution error"))
})
})
Context("when the eureka URL can be resolved", func() {
var testErr error
BeforeEach(func() {
testErr = errors.New("failed")
fakeResolver.GetServiceInstanceUrlReturns("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/", nil)
fakeCliConnection.GetAppsStub = func() ([]plugin_models.GetAppsModel, error) {
apps := []plugin_models.GetAppsModel{}
app1 := plugin_models.GetAppsModel{
Name: "some-cf-app",
Guid: "062bd505-8b19-44ca-4451-4a932932143a",
}
return append(apps, app1), nil
}
fakeAuthClient.DoAuthenticatedGetReturns(ioutil.NopCloser(bytes.NewBufferString(`
{
"applications":{
"application":[
{
"instance":[
{
"app":"APP-1",
"instanceId":"instance-1",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"2"
}
}
]
}
]
}
}`)), 200, nil)
})
It("should resolve the service instance url", func() {
Expect(fakeResolver.GetServiceInstanceUrlCallCount()).To(Equal(1))
serviceInstanceName, accessToken := fakeResolver.GetServiceInstanceUrlArgsForCall(0)
Expect(serviceInstanceName).To(Equal(testServiceInstanceName))
Expect(accessToken).To(Equal(testAccessToken))
})
It("should pass the access token to the GET request", func() {
url, tok := fakeAuthClient.DoAuthenticatedGetArgsForCall(0)
Expect(url).To(Equal("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/eureka/apps"))
Expect(tok).To(Equal(testAccessToken))
})
It("should successfully operate on the service", func() {
Expect(operationCallCount).To(Equal(1))
args := operationArgs[0]
Expect(args.accessToken).To(Equal(testAccessToken))
Expect(args.eurekaUrl).To(Equal("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/"))
Expect(args.eurekaAppName).To(Equal("APP-1"))
Expect(args.instanceId).To(Equal("instance-1"))
})
It("should log progress", func() {
Expect(progressWriter.String()).To(Equal(fmt.Sprintf("Processing service instance %s with index %s\n", format.Bold(format.Cyan("APP-1")), format.Bold(format.Cyan("2")))))
})
Context("when the operation fails", func() {
BeforeEach(func() {
operationReturn = testErr
})
It("should return an error", func() {
Expect(err.Error()).To(ContainSubstring("Operation failed"))
})
})
Context("when obtaining the application instances from the service registry fails", func() {
BeforeEach(func() {
testErr = errors.New("failed")
fakeAuthClient.DoAuthenticatedGetReturns(nil, 0, testErr)
})
It("should return the error", func() {
Expect(err.Error()).To(ContainSubstring("Service registry error: failed"))
})
})
Context("when obtaining the application instances from the service registry returns a bad status code", func() {
BeforeEach(func() {
fakeAuthClient.DoAuthenticatedGetReturns(nil, http.StatusNotFound, nil)
})
It("should return the error", func() {
Expect(err.Error()).To(ContainSubstring("Service registry failed: 404"))
})
})
Context("but only two out of three eureka instance names can be resolved", func() {
BeforeEach(func() {
fakeAuthClient.DoAuthenticatedGetReturns(ioutil.NopCloser(bytes.NewBufferString(`
{
"applications":{
"application":[
{
"instance":[
{
"app":"APP-1",
"instanceId":"instance-1",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"1"
}
},
{
"app":"APP-2",
"instanceId":"instance-1",
"status":"UNKNOWN",
"metadata":{
"zone":"zone-a",
"cfInstanceIndex":"2"
}
},
{
"app":"APP-3",
"instanceId":"instance-1",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"3"
}
}
]
}
]
}
}`)), 200, nil)
})
It("should operate on the instances with guids", func() {
Expect(err).ToNot(HaveOccurred())
Expect(operationCallCount).To(Equal(2))
args := operationArgs[0]
Expect(args.accessToken).To(Equal(testAccessToken))
Expect(args.eurekaUrl).To(Equal("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/"))
Expect(args.eurekaAppName).To(Equal("APP-1"))
Expect(args.instanceId).To(Equal("instance-1"))
args = operationArgs[1]
Expect(args.accessToken).To(Equal(testAccessToken))
Expect(args.eurekaUrl).To(Equal("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/"))
Expect(args.eurekaAppName).To(Equal("APP-3"))
Expect(args.instanceId).To(Equal("instance-1"))
})
It("should inform the user that 2 instances are being processed", func() {
template := "Processing service instance %s with index %s\n"
line1 := fmt.Sprintf(template, format.Bold(format.Cyan("APP-1")), format.Bold(format.Cyan("1")))
line2 := fmt.Sprintf(template, format.Bold(format.Cyan("APP-3")), format.Bold(format.Cyan("3")))
Expect(output).To(BeEmpty()) // only output is progress indication
Expect(progressWriter.String()).To(ContainSubstring(line1 + line2))
})
})
Context("but the cf app name cannot be found", func() {
BeforeEach(func() {
fakeCliConnection.GetAppsStub = func() ([]plugin_models.GetAppsModel, error) {
apps := []plugin_models.GetAppsModel{}
app1 := plugin_models.GetAppsModel{
Name: "unknown-app",
Guid: "062bd505-8b19-44ca-4451-4a932932143a",
}
return append(apps, app1), nil
}
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("cf app name some-cf-app not found"))
})
})
Context("when an instance index is specified", func() {
BeforeEach(func() {
fakeCliConnection.GetAppsStub = func() ([]plugin_models.GetAppsModel, error) {
apps := []plugin_models.GetAppsModel{}
app1 := plugin_models.GetAppsModel{
Name: "some-cf-app",
Guid: "062bd505-8b19-44ca-4451-4a932932143a",
}
return append(apps, app1), nil
}
fakeAuthClient.DoAuthenticatedGetReturns(ioutil.NopCloser(bytes.NewBufferString(`
{
"applications":{
"application":[
{
"instance":[
{
"app":"APP-1",
"instanceId":"instance-1",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"0"
}
},
{
"app":"APP-1",
"instanceId":"instance-2",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"1"
}
},
{
"app":"APP-1",
"instanceId":"instance-3",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"2"
}
}
]
}
]
}
}`)), 200, nil)
//Set the instance index argument
var idx = 1
instanceIndex = &idx
})
It("should not raise an error", func() {
Expect(err).ToNot(HaveOccurred())
})
It("should process just the required instance", func() {
Expect(operationCallCount).To(Equal(1))
args := operationArgs[0]
Expect(args.accessToken).To(Equal(testAccessToken))
Expect(args.eurekaUrl).To(Equal("https://spring-cloud-broker.some.host.name/x/y/z/some-guid/"))
Expect(args.eurekaAppName).To(Equal("APP-1"))
Expect(args.instanceId).To(Equal("instance-2"))
})
It("should inform the user about the instance deregistration", func() {
template := "Processing service instance %s with index %s\n"
line1 := fmt.Sprintf(template, format.Bold(format.Cyan("APP-1")), format.Bold(format.Cyan("1")))
Expect(output).To(BeEmpty()) // only output is progress indication
Expect(progressWriter.String()).To(ContainSubstring(line1))
})
Context("when the operation fails", func() {
BeforeEach(func() {
operationReturn = testErr
})
It("should return a suitable error", func() {
Expect(err).To(MatchError("Operation failed"))
})
})
Context("when an incorrect instance index is specified", func() {
BeforeEach(func() {
var idx = 99
instanceIndex = &idx
})
It("should return a suitable error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("No instance found with index 99"))
})
})
})
Context("when an invalid instance index is returned in the metadata", func() {
BeforeEach(func() {
fakeCliConnection.GetAppsStub = func() ([]plugin_models.GetAppsModel, error) {
apps := []plugin_models.GetAppsModel{}
app1 := plugin_models.GetAppsModel{
Name: "some-cf-app",
Guid: "062bd505-8b19-44ca-4451-4a932932143a",
}
return append(apps, app1), nil
}
fakeAuthClient.DoAuthenticatedGetReturns(ioutil.NopCloser(bytes.NewBufferString(`
{
"applications":{
"application":[
{
"instance":[
{
"app":"APP-1",
"instanceId":"instance-1",
"status":"UP",
"metadata":{
"zone":"zone-a",
"cfAppGuid":"062bd505-8b19-44ca-4451-4a932932143a",
"cfInstanceIndex":"bad-integer"
}
}
]
}
]
}
}`)), 200, nil)
//Set the instance index argument
var idx = 1
instanceIndex = &idx
})
It("should raise a suitable error", func() {
Expect(err.Error()).To(ContainSubstring(`parsing "bad-integer": invalid syntax`))
})
})
})
})
})
|
package main
import (
"errors"
"fmt"
"os"
"github.com/spf13/cobra"
"path/filepath"
)
const desc = `
Merge one or more YAML files of values.
$ helm values mychart -f path/to/merging/file
To write to a file, instead of stdout, use '-o':
$ helm values mychart -f path/to/merging/file -o path/to/output/dir/
`
func main() {
valuesCmd := valuesCmd{}
cmd := &cobra.Command{
Use: "helm values [flags] CHART",
Short: fmt.Sprintf("merge one or more YAML files of values"),
Long: desc,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("chart is required")
}
// verify chart path exists
if _, err := os.Stat(args[0]); err == nil {
if valuesCmd.chartPath, err = filepath.Abs(args[0]); err != nil {
return err
}
} else {
return err
}
// verify that output-dir exists if provided
if valuesCmd.outputDir != "" {
_, err := os.Stat(valuesCmd.outputDir)
if os.IsNotExist(err) {
return fmt.Errorf("output-dir '%s' does not exist", valuesCmd.outputDir)
}
}
return valuesCmd.run()
},
}
f := cmd.Flags()
f.VarP(&valuesCmd.values, "values", "f", "specify values in a YAML file (can specify multiple)")
f.StringVarP(&valuesCmd.outputDir, "output-dir", "o", "", "writes the merged values to files in output-dir instead of stdout")
f.StringVarP(&valuesCmd.backupSuffix, "backup-suffix", "", ".bak", "suffix append to values.yaml if values.yaml already exist in output-dir")
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}
|
package main
import (
"fmt"
"time"
)
func main() {
i := 0
for i < 5 { // não há while em go
fmt.Println(i)
i++
}
for j := 0; j <= 20; j += 2 {
fmt.Println(j)
}
for { //laço infinito
fmt.Println("Loop infinito")
time.Sleep(time.Second)
}
}
|
package node
import (
"context"
"crypto/tls"
"fmt"
"time"
"github.com/drand/drand/cmd/relay-gossip/lp2p"
"github.com/drand/drand/log"
"github.com/drand/drand/protobuf/drand"
"github.com/gogo/protobuf/proto"
bds "github.com/ipfs/go-ds-badger2"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
pubsub "github.com/libp2p/go-libp2p-pubsub"
ma "github.com/multiformats/go-multiaddr"
"golang.org/x/xerrors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// GossipRelayConfig configures a gossip relay node.
type GossipRelayConfig struct {
// ChainHash is a hash that uniquely identifies the drand chain.
ChainHash string
PeerWith []string
Addr string
DataDir string
IdentityPath string
CertPath string
Insecure bool
DrandPublicGRPC string
}
// GossipRelayNode is a gossip relay runtime.
type GossipRelayNode struct {
l log.Logger
bootstrap []ma.Multiaddr
ds *bds.Datastore
priv crypto.PrivKey
h host.Host
ps *pubsub.PubSub
t *pubsub.Topic
opts []grpc.DialOption
addrs []ma.Multiaddr
done chan struct{}
}
// NewGossipRelayNode starts a new gossip relay node.
func NewGossipRelayNode(l log.Logger, cfg *GossipRelayConfig) (*GossipRelayNode, error) {
bootstrap, err := ParseMultiaddrSlice(cfg.PeerWith)
if err != nil {
return nil, xerrors.Errorf("parsing peer-with: %w", err)
}
ds, err := bds.NewDatastore(cfg.DataDir, nil)
if err != nil {
return nil, xerrors.Errorf("opening datastore: %w", err)
}
priv, err := lp2p.LoadOrCreatePrivKey(cfg.IdentityPath)
if err != nil {
return nil, xerrors.Errorf("loading p2p key: %w", err)
}
h, ps, err := lp2p.ConstructHost(ds, priv, cfg.Addr, bootstrap)
if err != nil {
return nil, xerrors.Errorf("constructing host: %w", err)
}
addrs, err := h.Network().InterfaceListenAddresses()
if err != nil {
return nil, xerrors.Errorf("getting InterfaceListenAddresses: %w", err)
}
for _, a := range addrs {
l.Info(fmt.Sprintf("%s/p2p/%s\n", a, h.ID()))
}
t, err := ps.Join(lp2p.PubSubTopic(cfg.ChainHash))
if err != nil {
return nil, xerrors.Errorf("joining topic: %w", err)
}
opts := []grpc.DialOption{}
if cfg.CertPath != "" {
creds, err := credentials.NewClientTLSFromFile(cfg.CertPath, "")
if err != nil {
return nil, xerrors.Errorf("loading cert file: %w", err)
}
opts = append(opts, grpc.WithTransportCredentials(creds))
} else if cfg.Insecure {
opts = append(opts, grpc.WithInsecure())
} else {
opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})))
}
g := &GossipRelayNode{
l: l,
bootstrap: bootstrap,
ds: ds,
priv: priv,
h: h,
ps: ps,
t: t,
opts: opts,
addrs: addrs,
done: make(chan struct{}),
}
go g.start(cfg.DrandPublicGRPC)
return g, nil
}
// Multiaddrs returns the gossipsub multiaddresses of this relay node.
func (g *GossipRelayNode) Multiaddrs() []ma.Multiaddr {
base := g.h.Addrs()
b := make([]ma.Multiaddr, len(base))
for i, a := range base {
m, err := ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", a, g.h.ID()))
if err != nil {
panic(err)
}
b[i] = m
}
return b
}
// Shutdown stops the relay node.
func (g *GossipRelayNode) Shutdown() {
close(g.done)
}
func ParseMultiaddrSlice(peers []string) ([]ma.Multiaddr, error) {
out := make([]ma.Multiaddr, len(peers))
for i, peer := range peers {
m, err := ma.NewMultiaddr(peer)
if err != nil {
return nil, xerrors.Errorf("parsing multiaddr\"%s\": %w", peer, err)
}
out[i] = m
}
return out, nil
}
func (g *GossipRelayNode) start(drandPublicGRPC string) {
for {
select {
case <-g.done:
return
default:
}
conn, err := grpc.Dial(drandPublicGRPC, g.opts...)
if err != nil {
g.l.Warn(fmt.Sprintf("error connecting to grpc: %+v", err))
time.Sleep(5 * time.Second)
continue
}
client := drand.NewPublicClient(conn)
err = g.workRelay(client)
if err != nil {
g.l.Warn(fmt.Sprintf("error relaying: %+v", err))
err = conn.Close()
if err != nil {
g.l.Warn(fmt.Sprintf("error while closing connection: %+v", err))
}
time.Sleep(5 * time.Second)
}
}
}
func (g *GossipRelayNode) workRelay(client drand.PublicClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
curr, err := client.PublicRand(ctx, &drand.PublicRandRequest{Round: 0})
if err != nil {
return xerrors.Errorf("getting initial round failed: %w", err)
}
g.l.Info(fmt.Sprintf("got latest rand: %d", curr.Round))
// context.Background() on purpose as this applies to whole, long lived stream
stream, err := client.PublicRandStream(context.Background(), &drand.PublicRandRequest{Round: curr.Round})
if err != nil {
return xerrors.Errorf("getting rand stream: %w", err)
}
for {
select {
case <-g.done:
return xerrors.Errorf("relay shutdown")
default:
}
rand, err := stream.Recv()
if err != nil {
return xerrors.Errorf("receving on stream: %w", err)
}
randB, err := proto.Marshal(rand)
if err != nil {
return xerrors.Errorf("marshaling: %w", err)
}
err = g.t.Publish(context.TODO(), randB)
if err != nil {
return xerrors.Errorf("publishing on pubsub: %w", err)
}
g.l.Info(fmt.Sprintf("Published randomness on pubsub, round: %d", rand.Round))
}
}
|
package gotest
import (
"github.com/mumoshu/gosh"
"github.com/mumoshu/gosh/context"
)
func New() *gosh.Shell {
sh := &gosh.Shell{}
sh.Export("hello", func(ctx context.Context, target string) {
context.Stdout(ctx).Write([]byte("hello " + target + "\n"))
})
return sh
}
func MustExec(osArgs []string) {
New().MustExec(osArgs)
}
|
package ffuf
const (
//VERSION holds the current version number
VERSION = "0.12git"
)
|
package main
import (
"fmt"
"testing"
c "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
)
func TestCidConv(t *testing.T) {
cidv0 := "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn"
cidv1 := "zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi"
cid, err := c.Decode(cidv0)
if err != nil {
t.Fatal(err)
}
cid, err = toCidV1(cid)
if err != nil {
t.Fatal(err)
}
if cid.String() != cidv1 {
t.Fatal("conversion failure")
}
cid, err = toCidV0(cid)
if err != nil {
t.Fatal(err)
}
cidStr := cid.String()
if cidStr != cidv0 {
t.Error(fmt.Sprintf("conversion failure, expected: %s; but got: %s", cidv0, cidStr))
}
}
func TestBadCidConv(t *testing.T) {
// this cid is a raw leaf and should not be able to convert to cidv0
cidv1 := "zb2rhhzX7uSKrtQ2ZZXFAabKiKFYZrJqKY2KE1cJ8yre2GSWZ"
cid, err := c.Decode(cidv1)
if err != nil {
t.Fatal(err)
}
cid, err = toCidV0(cid)
if err == nil {
t.Fatal("expected failure")
}
}
|
package app
func (a *App) GetNewestRecord() *Record {
a.Lock()
defer a.Unlock()
if len(a.records) == 0 {
return nil
}
rec := a.records[0]
for i := 1; i < len(a.records); i++ {
if rec.Start.Before(a.records[i].Start) {
rec = a.records[i]
}
}
return &rec
}
func (a *App) AddRecord(rec Record) error {
a.Lock()
defer a.Unlock()
// TODO Validate fields and conflicts, return error.
a.lastID++
rec.ID = a.lastID
a.records = append(a.records, rec)
a.emitRecordAdded(EventRecordAdded{rec})
return nil
}
|
package modules
import (
"bytes"
"encoding/json"
"fmt"
"github.com/logrusorgru/aurora"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
)
const (
BackendURL = "http://127.0.0.1:8080"
QueryURL = BackendURL + "/query"
LoginURL = BackendURL + "/login"
)
type Backend struct {
Name string
token string
}
var fctPtr = map[string]func(*Backend, []string) error{
"login": func(b *Backend, cmd []string) error { return b.Login(cmd) },
"query": func(b *Backend, cmd []string) error { return b.QueryData(cmd) },
}
func NewBackendModule() Executor {
return &Backend{Name: "Backend"}
}
func (b *Backend) String() string {
return b.Name
}
func (b *Backend) Login(cmd []string) error {
if len(cmd) != 2 {
return fmt.Errorf("wrong number of arguments : {%s}", strings.Join(cmd, " "))
}
u, _ := url.Parse(LoginURL)
values, _ := url.ParseQuery(u.RawQuery)
values.Set("login", cmd[0])
values.Set("pass", cmd[1])
u.RawQuery = values.Encode()
resp, err := http.Post(fmt.Sprintf("%v", u), "text/plain", nil)
if err != nil {
return err
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
b.token = string(bytes)
fmt.Println(aurora.Green(cmd[0] + " logged in !"))
return nil
}
func (b *Backend) QueryData(cmd []string) error {
if len(cmd) == 0 {
return b.JsonRequest("", "")
}
if cmd[0] == "exporter" {
if len(cmd) == 3 {
return b.JsonRequest(QueryURL+"/"+cmd[1], cmd[2])
} else {
return b.JsonRequest(QueryURL+"/"+cmd[1], "")
}
}
if len(cmd) > 2 {
return fmt.Errorf("wrong number of arguments : {%s}", strings.Join(cmd, " "))
}
return b.JsonRequest(QueryURL, cmd[0])
}
func (b *Backend) Execute(cmd []string) error {
fmt.Printf("Executing module : %v\n", b)
if fct, ok := fctPtr[cmd[0]]; !ok {
return fmt.Errorf("instruction '%s' for module '%s' unknown", cmd[0], b)
} else {
return fct(b, cmd[1:])
}
}
func (b *Backend) LoggedRequest(url string) (*http.Response, error) {
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("Authorization", "Bearer "+b.token)
return http.DefaultClient.Do(req)
}
func (b *Backend) JsonRequest(route, dataValue string) error {
u, _ := url.Parse(route)
values, _ := url.ParseQuery(u.RawQuery)
values.Set("datas", dataValue)
u.RawQuery = values.Encode()
resp, err := b.LoggedRequest(fmt.Sprintf("%v", u))
if err != nil {
return err
}
datas, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var out bytes.Buffer
err = json.Indent(&out, datas, "", " ")
if err != nil {
return err
}
_, err = out.WriteTo(os.Stdout)
fmt.Println()
return err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.