text stringlengths 11 4.05M |
|---|
package entity
import (
"bytes"
"crypto/md5"
"fmt"
)
//easyeasyjson:json
type StatsRow struct {
hash string
Hostname string
Process *Process
Packet *Packet
}
func (s *StatsRow) Hash() string {
if s.hash == "" {
var buff bytes.Buffer
buff.WriteString(fmt.Sprintf("%v", s.Process.Path))
buff.WriteString(fmt.Sprintf("%v", s.Process.CommunicationWithKnownNode))
buff.WriteString(fmt.Sprintf("%v", s.Process.Id))
buff.WriteString(s.Process.Name)
buff.WriteString(s.Process.Path)
buff.WriteString(fmt.Sprintf("%v", s.Process.Sender))
buff.WriteString(s.Packet.SourceIp)
buff.WriteString(fmt.Sprintf("%v", s.Packet.SourcePort))
buff.WriteString(s.Packet.TargetIp)
buff.WriteString(fmt.Sprintf("%v", s.Packet.TargetPort))
s.hash = fmt.Sprintf("%x", md5.Sum(buff.Bytes()))
}
return s.hash
}
|
package util
import "fmt"
// LogErr of passed functions, most often deferred and the error is of no real interest
func LogErr(f ...func() error) {
for i := len(f) - 1; i >= 0; i-- {
if err := f[i](); err != nil {
fmt.Println("Encountered error:", err)
}
}
}
|
package rt
import "testing"
func TestCreateJob(t *testing.T) {
job := NewJob("test", []byte("{\"some\": 1}"))
if string(job.data.([]byte)) != "{\"some\": 1}" {
t.Error("job data incorrect, expected '{\"some\": 1}', got", job.data)
}
if job.jobType != "test" {
t.Error("job type incorrect, expected 'test', got", job.jobType)
}
if job.result != nil {
t.Error("job's result should be empty, is not")
}
}
|
package db
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
// 构建一个连接器接口,所有实现这个接口的结构体都可以作为参数传入,每个单独的连接器
type IConnector interface {
SetConn(config map[string]string) // 设置连接器
GetConn() *sql.DB // 获取数据库连接器接口
Table(tableName string) IBuilder // 表 Builder,用于表的增删改查
Schema() ISchemaBuilder // 获取一个空Builder,用于数据库的增删改查表
}
// 基础连接器
type Connector struct {
db *sql.DB
}
func (c *Connector) SetConn(config map[string]string) {
}
func (c *Connector) GetConn() *sql.DB {
return nil
}
func (c *Connector) Table(tableName string) IBuilder {
return nil
}
func (c *Connector) Schema() IBuilder {
return nil
}
|
package main
import (
"fmt"
"mylog/test"
)
func main() {
cfg := LoadConfig()
fmt.Println("config:", cfg)
for i := 0; i < 10; i++ {
mainLog.Infof("this is my mainlog %v", i)
}
// test包就相当于我们项目中的子包
test.Test()
}
|
package main
import "fmt"
type pessoa struct {
nome string
idade int
}
//Seria o equivalente à um método do tipo, como se fosse uma classe.
func (p pessoa) apresentacao() {
fmt.Println("Eu sou um médoto de Pessoa!")
fmt.Print("Meu nome é ", p.nome, " e eu tenho ", p.idade, " anos.")
}
func main() {
jose := pessoa{"Jose", 42}
jose.apresentacao()
}
|
/*
* Amazon FreeRTOS Echo Server V1.1.1
* Copyright (C) 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://aws.amazon.com/freertos
* http://www.FreeRTOS.org
*/
package main
import (
"io"
"log"
"net"
"time"
)
//************** CONFIG SECTION *************//
//Folder were certs are put
const sUnSecureEchoPort string = "9001"
const xReadTimeoutSeconds = 300
func main() {
// listen on all interfaces
xUnsecureEchoServer, xStatus := net.Listen("tcp", ":"+sUnSecureEchoPort)
if xStatus != nil {
log.Printf("Error %s while trying to listen", xStatus)
}
log.Print("UnSecure server Listening to port " + sUnSecureEchoPort)
startEchoServer(xUnsecureEchoServer)
}
func startEchoServer(xEchoServer net.Listener) {
defer xEchoServer.Close()
for {
xConnection, xStatus := xEchoServer.Accept()
if xStatus != nil {
log.Printf("Error %s while trying to connect", xStatus)
} else {
go echoServerThread(xConnection)
}
}
}
func echoServerThread(xConnection net.Conn) {
defer xConnection.Close()
xDataBuffer := make([]byte, 4096)
for {
xConnection.SetReadDeadline(time.Now().Add(xReadTimeoutSeconds * time.Second))
xNbBytes, xStatus := xConnection.Read(xDataBuffer)
if xStatus != nil {
//EOF mean end of connection
if xStatus != io.EOF {
//If not EOF then it is an error
}
break
}
xNbBytes, xStatus = xConnection.Write(xDataBuffer[:xNbBytes])
if xStatus != nil {
log.Printf("Error %s while sending data", xStatus)
break
}
}
}
|
package crd
import (
"context"
v1 "github.com/rancher/rancher-operator/pkg/apis/rancher.cattle.io/v1"
"github.com/rancher/wrangler/pkg/crd"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
)
func List() []crd.CRD {
return []crd.CRD{
newCRD(&v1.Cluster{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Ready", ".status.ready").
WithColumn("Kubeconfig", ".status.clientSecretName")
}),
newCRD(&v1.Project{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Selector", ".spec.clusterSelector")
}),
newCRD(&v1.RoleTemplate{}, func(c crd.CRD) crd.CRD {
c.NonNamespace = true
return c
}),
newCRD(&v1.RoleTemplateBinding{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Role", ".spec.roleTemplateName")
}),
}
}
func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD {
crd := crd.CRD{
GVK: schema.GroupVersionKind{
Group: "rancher.cattle.io",
Version: "v1",
},
Status: true,
SchemaObject: obj,
}
if customize != nil {
crd = customize(crd)
}
return crd
}
func WriteFile(filename string) error {
return crd.WriteFile(filename, List())
}
func Create(ctx context.Context, cfg *rest.Config) error {
return crd.Create(ctx, cfg, List())
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// CreateExternalFeedGroups Create a new external feed for the course or group.
// https://canvas.instructure.com/doc/api/announcement_external_feeds.html
//
// Path Parameters:
// # Path.GroupID (Required) ID
//
// Form Parameters:
// # Form.Url (Required) The url to the external rss or atom feed
// # Form.HeaderMatch (Optional) If given, only feed entries that contain this string in their title will be imported
// # Form.Verbosity (Optional) . Must be one of full, truncate, link_onlyDefaults to "full"
//
type CreateExternalFeedGroups struct {
Path struct {
GroupID string `json:"group_id" url:"group_id,omitempty"` // (Required)
} `json:"path"`
Form struct {
Url string `json:"url" url:"url,omitempty"` // (Required)
HeaderMatch bool `json:"header_match" url:"header_match,omitempty"` // (Optional)
Verbosity string `json:"verbosity" url:"verbosity,omitempty"` // (Optional) . Must be one of full, truncate, link_only
} `json:"form"`
}
func (t *CreateExternalFeedGroups) GetMethod() string {
return "POST"
}
func (t *CreateExternalFeedGroups) GetURLPath() string {
path := "groups/{group_id}/external_feeds"
path = strings.ReplaceAll(path, "{group_id}", fmt.Sprintf("%v", t.Path.GroupID))
return path
}
func (t *CreateExternalFeedGroups) GetQuery() (string, error) {
return "", nil
}
func (t *CreateExternalFeedGroups) GetBody() (url.Values, error) {
return query.Values(t.Form)
}
func (t *CreateExternalFeedGroups) GetJSON() ([]byte, error) {
j, err := json.Marshal(t.Form)
if err != nil {
return nil, nil
}
return j, nil
}
func (t *CreateExternalFeedGroups) HasErrors() error {
errs := []string{}
if t.Path.GroupID == "" {
errs = append(errs, "'Path.GroupID' is required")
}
if t.Form.Url == "" {
errs = append(errs, "'Form.Url' is required")
}
if t.Form.Verbosity != "" && !string_utils.Include([]string{"full", "truncate", "link_only"}, t.Form.Verbosity) {
errs = append(errs, "Verbosity must be one of full, truncate, link_only")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *CreateExternalFeedGroups) Do(c *canvasapi.Canvas) (*models.ExternalFeed, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.ExternalFeed{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
package opencc
// #cgo LDFLAGS: -lopencc
/*
#include<opencc/opencc.h>
*/
import "C"
type Config struct {
cfFile C.opencc_t
}
type ConfigType int
const (
ConfigTypeS2T ConfigType = 1
ConfigTypeS2TW ConfigType = 2
ConfigTypeS2TWP ConfigType = 3
ConfigTypeTW2S ConfigType = 4
ConfigTypeTW2SP ConfigType = 5
)
var DefaultConfig *Config
func NewConfig(t ConfigType) *Config {
cf := Config{}
if t == ConfigTypeS2T {
cf.cfFile = C.opencc_open(C.CString("s2t.json"))
} else if t == ConfigTypeS2TW {
cf.cfFile = C.opencc_open(C.CString("s2tw.json"))
} else if t == ConfigTypeS2TWP {
cf.cfFile = C.opencc_open(C.CString("s2twp.json"))
} else if t == ConfigTypeTW2S {
cf.cfFile = C.opencc_open(C.CString("tw2s.json"))
} else if t == ConfigTypeTW2SP {
cf.cfFile = C.opencc_open(C.CString("tw2sp.json"))
}
return &cf
}
func (c *Config) Close() {
if c.cfFile != nil {
C.opencc_close(c.cfFile)
}
}
func Tr(str string, config *Config) string {
ccResult := C.opencc_convert_utf8(config.cfFile, C.CString(str), C.size_t(len(str)))
if ccResult == nil {
return ""
}
result := C.GoString(ccResult)
C.opencc_convert_utf8_free(ccResult)
return result
}
|
package rpcevents
import (
"fmt"
"context"
bcm "github.com/hyperledger/burrow/blockchain"
"github.com/hyperledger/burrow/consensus/tendermint"
"github.com/hyperledger/burrow/event"
"github.com/hyperledger/burrow/event/query"
"github.com/hyperledger/burrow/execution/events"
"github.com/hyperledger/burrow/execution/events/pbevents"
"github.com/tendermint/tendermint/libs/pubsub"
)
type executionEventsServer struct {
eventsProvider events.Provider
emitter event.Emitter
tip bcm.TipInfo
}
func NewExecutionEventsServer(eventsProvider events.Provider, emitter event.Emitter,
tip bcm.TipInfo) pbevents.ExecutionEventsServer {
return &executionEventsServer{
eventsProvider: eventsProvider,
emitter: emitter,
tip: tip,
}
}
func (ees *executionEventsServer) GetEvents(request *pbevents.GetEventsRequest,
stream pbevents.ExecutionEvents_GetEventsServer) error {
blockRange := request.GetBlockRange()
start, end, streaming := blockRange.Bounds(ees.tip.LastBlockHeight())
qry, err := query.NewBuilder(request.Query).Query()
if err != nil {
return fmt.Errorf("could not parse event query: %v", err)
}
if !streaming {
return ees.steamEvents(stream, start, end, 1, qry)
}
// Streaming
if err != nil {
return err
}
out, err := tendermint.SubscribeNewBlock(context.Background(), ees.emitter)
if err != nil {
return err
}
for newBlock := range out {
if newBlock == nil {
return fmt.Errorf("received non-new-block event when subscribed with query")
}
if newBlock.Block == nil {
return fmt.Errorf("new block contains no block info: %v", newBlock)
}
height := uint64(newBlock.Block.Height)
start = end
end = events.NewKey(height, 0)
err := ees.steamEvents(stream, start, end, 1, qry)
if err != nil {
return err
}
}
return nil
}
func (ees *executionEventsServer) steamEvents(stream pbevents.ExecutionEvents_GetEventsServer, start, end events.Key,
batchSize uint64, qry pubsub.Query) error {
var streamErr error
buf := new(pbevents.GetEventsResponse)
batchStart := start.Height()
_, err := ees.eventsProvider.GetEvents(start, end, func(ev *events.Event) (stop bool) {
if qry.Matches(ev) {
// Start a new batch, flush the last lot
if ev.Header.Index == 0 && (ev.Header.Height-batchStart)%batchSize == 0 {
streamErr = flush(stream, buf)
if streamErr != nil {
return true
}
batchStart = ev.Header.Height
buf = new(pbevents.GetEventsResponse)
}
buf.Events = append(buf.Events, pbevents.GetExecutionEvent(ev))
}
return false
})
if err != nil {
return err
}
if streamErr != nil {
return streamErr
}
// Flush any remaining events not filling batchSize many blocks
return flush(stream, buf)
}
func flush(stream pbevents.ExecutionEvents_GetEventsServer, buf *pbevents.GetEventsResponse) error {
if len(buf.Events) > 0 {
err := stream.Send(buf)
if err != nil {
return err
}
}
return nil
}
|
package dao
import (
"fmt"
"strconv"
log "github.com/cihub/seelog"
"github.com/gocql/gocql"
"github.com/juliendsv/go-cassadmin/domain"
)
const (
clusterNodes = "127.0.0.1"
port = 19043
consistency = gocql.Quorum
defaultLimit = 100
)
type CassandraStore struct {
cluster gocql.ClusterConfig
Session gocql.Session
}
func NewCassandraStore() (domain.NOSQLStore, error) {
c := gocql.NewCluster(clusterNodes)
c.Consistency = consistency
c.Port = port
session, err := c.CreateSession()
if err != nil {
return nil, err
}
return CassandraStore{
cluster: *c,
Session: *session,
}, nil
}
func (c CassandraStore) CreateKeyspace(keyspace_name string) error {
err := c.exec(fmt.Sprintf(`CREATE KEYSPACE %s
WITH replication = {
'class' : 'SimpleStrategy',
'replication_factor' : %d
}`, keyspace_name, 1))
return err
}
func (c CassandraStore) DropKeyspace(keyspace_name string) error {
err := c.exec(fmt.Sprintf(`DROP KEYSPACE %s`, keyspace_name))
return err
}
func (c CassandraStore) exec(query string) error {
if err := c.Session.Query(query).Consistency(consistency).Exec(); err != nil {
return fmt.Errorf("error executing query %s: %v", query, err)
}
return nil
}
func (c CassandraStore) ShowKeyspaces() ([]domain.Keyspace, error) {
ks, err_ks := c.Session.Query("SELECT keyspace_name FROM system.schema_keyspaces;").Iter().SliceMap()
if err_ks != nil {
return nil, err_ks
}
map_ks := make(map[string][]string, 0)
for _, r := range ks {
if r["keyspace_name"] != "system" && r["keyspace_name"] != "system_traces" {
map_ks[r["keyspace_name"].(string)] = []string{}
}
}
// TODO split this in two function kss ans cfs
cfs, err_cf := c.Session.Query("SELECT keyspace_name, columnfamily_name FROM system.schema_columnfamilies;").Iter().SliceMap()
if err_cf != nil {
return nil, err_cf
}
for _, r := range cfs {
if r["keyspace_name"] != "system" && r["keyspace_name"] != "system_traces" {
map_ks[r["keyspace_name"].(string)] = append(map_ks[r["keyspace_name"].(string)], r["columnfamily_name"].(string))
}
}
keyspaces := make([]domain.Keyspace, 0)
for ks, cf := range map_ks {
keyspaces = append(keyspaces, domain.Keyspace{
Name: ks,
Columnfamilies: makeColumnfamilies(cf),
})
}
return keyspaces, nil
}
func (c CassandraStore) ShowColumnFamily(ks, cf string) ([]map[string]string, error) {
rows, err := c.Session.Query("SELECT * FROM " + ks + "." + cf + ";").Iter().SliceMap()
if err != nil {
return nil, err
}
map_cf := make(map[string]string)
res := make([]map[string]string, len(rows))
for i, r := range rows {
for k, result := range r {
// TODO manage all types, save it
// and maybe we should save the value as bytes[] instead of string
switch v := result.(type) {
case int:
map_cf[k] = strconv.Itoa(v)
case string:
map_cf[k] = v
}
}
res[i] = map_cf
}
log.Infof("res: %v", res)
return res, nil
}
func makeColumnfamilies(names []string) []domain.Columnfamily {
columnfamilies := make([]domain.Columnfamily, len(names))
for i, name := range names {
columnfamilies[i] = domain.Columnfamily{Name: name}
}
return columnfamilies
}
|
package network
import (
"bytes"
"./messages"
"../config"
"../tools"
"../types"
)
func Process(connection *types.Connection) {
for {
msg := <-connection.Incoming
switch {
case msg.Type == 0x01:
if config.Password == "" {
connection.Outgoing<- messages.ConnectionApproved(connection.ConnectionList.Len() - 1)
} else {
connection.Outgoing<- messages.RequestPassword()
}
case msg.Type == 0x06:
connection.Outgoing<- messages.WorldInfo(connection.World)
case msg.Type == 0x08:
connection.Outgoing<- messages.StatusText("Receiving tile data");
connection.Outgoing<- messages.Spawn()
case msg.Type == 0x26:
len := tools.GetStringLen(msg.Payload[0:1])
if bytes.Equal(msg.Payload[1:len + 1], []byte(config.Password)) {
connection.Outgoing<- messages.ConnectionApproved(connection.ConnectionList.Len() - 1)
} else {
connection.Outgoing<- messages.Disconnect("Wrong password!")
}
}
}
} |
// Attribute mapper manager for dependency injection
package FlatFS
import (
"log"
)
type AttrMapperManager struct {
attrMappers map[string]AttrMapper
}
func NewAttrMapperManager() *AttrMapperManager {
return &AttrMapperManager{
attrMappers: make(map[string]AttrMapper, 0),
}
}
func (attrMapperManager *AttrMapperManager) Map() map[string]AttrMapper {
return attrMapperManager.attrMappers
}
func (attrMapperManager *AttrMapperManager) Has(id string) bool {
_, ok := attrMapperManager.attrMappers[id]
return ok
}
func (attrMapperManager *AttrMapperManager) Get(id string) AttrMapper {
if attrMapper, ok := attrMapperManager.attrMappers[id]; ok {
return attrMapper
}
log.Fatalf("Implementation %v not found in %v \n", id, attrMapperManager.attrMappers)
return nil
}
func (attrMapperManager *AttrMapperManager) Set(id string, attrMapper AttrMapper) AttrMapper {
attrMapperManager.attrMappers[id] = attrMapper
return attrMapperManager.attrMappers[id]
}
|
package helper
func StrArrContains(arr []string, e string) bool {
for _, str := range arr {
if str == e {
return true
}
}
return false
}
func IntArrContains(arr []int, e int) bool {
for _, n := range arr {
if n == e {
return true
}
}
return false
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package orphan_deployments_tests
import (
"os"
"testing"
"github.com/pborman/uuid"
"github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/service_helpers"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
bosh "github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/bosh_helpers"
cf "github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/cf_helpers"
"github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/siapi_helpers"
)
var (
siapiConfig siapi_helpers.SIAPIConfig
appName string
brokerInfo bosh.BrokerInfo
)
var _ = BeforeSuite(func() {
uniqueID := uuid.New()[:6]
appName = "si-api-" + uniqueID
siAPIURL := "https://" + appName + "." + os.Getenv("BROKER_SYSTEM_DOMAIN") + "/service_instances"
siAPIUsername := "siapi"
siAPIPassword := "siapipass"
cf.Cf("push",
"-p", os.Getenv("SI_API_PATH"),
"-f", os.Getenv("SI_API_PATH")+"/manifest.yml",
"--var", "app_name="+appName,
"--var", "username="+siAPIUsername,
"--var", "password="+siAPIPassword,
)
brokerInfo = bosh.DeployBroker(
"-orphan-deployment-with-siapi-"+uniqueID,
bosh.BrokerDeploymentOptions{},
service_helpers.Redis,
[]string{"update_service_catalog.yml", "add_si_api.yml"},
"--var", "service_instances_api_url="+siAPIURL,
"--var", "service_instances_api_username="+siAPIUsername,
"--var", "service_instances_api_password="+siAPIPassword,
)
siapiConfig = siapi_helpers.SIAPIConfig{
URL: siAPIURL,
Username: siAPIUsername,
Password: siAPIPassword,
}
})
var _ = AfterSuite(func() {
cf.Cf("delete", "-f", appName)
bosh.DeleteDeployment(brokerInfo.DeploymentName)
})
func TestOrphanDeploymentsTests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Orphan Deployments Errand With SIAPI Test Suite")
}
|
package main
import (
"encoding/json"
"net/http"
"time"
"christine.website/cmd/site/internal"
"within.website/ln"
"within.website/ln/opname"
)
var bootTime = time.Now()
var etag = internal.Hash(bootTime.String(), IncrediblySecureSalt)
// IncrediblySecureSalt *******
const IncrediblySecureSalt = "hunter2"
func (s *Site) createFeed(w http.ResponseWriter, r *http.Request) {
ctx := opname.With(r.Context(), "rss-feed")
fetag := "W/" + internal.Hash(bootTime.String(), IncrediblySecureSalt)
w.Header().Set("ETag", fetag)
if r.Header.Get("If-None-Match") == fetag {
http.Error(w, "Cached data OK", http.StatusNotModified)
ln.Log(ctx, ln.Info("cache hit"))
return
}
w.Header().Set("Content-Type", "application/rss+xml")
err := s.rssFeed.WriteRss(w)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
ln.Error(r.Context(), err, ln.F{
"remote_addr": r.RemoteAddr,
"action": "generating_rss",
"uri": r.RequestURI,
"host": r.Host,
})
}
}
func (s *Site) createAtom(w http.ResponseWriter, r *http.Request) {
ctx := opname.With(r.Context(), "atom-feed")
fetag := "W/" + internal.Hash(bootTime.String(), IncrediblySecureSalt)
w.Header().Set("ETag", fetag)
if r.Header.Get("If-None-Match") == fetag {
http.Error(w, "Cached data OK", http.StatusNotModified)
ln.Log(ctx, ln.Info("cache hit"))
return
}
w.Header().Set("Content-Type", "application/atom+xml")
err := s.rssFeed.WriteAtom(w)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
ln.Error(ctx, err, ln.F{
"remote_addr": r.RemoteAddr,
"action": "generating_atom",
"uri": r.RequestURI,
"host": r.Host,
})
}
}
func (s *Site) createJSONFeed(w http.ResponseWriter, r *http.Request) {
ctx := opname.With(r.Context(), "atom-feed")
fetag := "W/" + internal.Hash(bootTime.String(), IncrediblySecureSalt)
w.Header().Set("ETag", fetag)
if r.Header.Get("If-None-Match") == fetag {
http.Error(w, "Cached data OK", http.StatusNotModified)
ln.Log(ctx, ln.Info("cache hit"))
return
}
w.Header().Set("Content-Type", "application/json")
e := json.NewEncoder(w)
e.SetIndent("", "\t")
err := e.Encode(s.jsonFeed)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
ln.Error(ctx, err, ln.F{
"remote_addr": r.RemoteAddr,
"action": "generating_jsonfeed",
"uri": r.RequestURI,
"host": r.Host,
})
}
}
|
package main
import (
"github.com/vlad-doru/microhiro/gateway/app"
"os"
)
func main() {
// Get and start the application.
app := app.NewGateway()
app.Run(os.Args)
}
|
package serve
// NamespaceConfigration config for namespace object
type NamespaceConfigration struct{}
// ApplicationConfigration config for application object
type ApplicationConfigration struct {
Modules []string
Roles map[string][]string
}
// ModuleConfigration config for module object
type ModuleConfigration struct {
Permissions map[string][]string
}
|
package openshiftadmission
import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle"
mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/kubernetes/plugin/pkg/admission/gc"
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
buildsecretinjector "github.com/openshift/origin/pkg/build/apiserver/admission/secretinjector"
buildstrategyrestrictions "github.com/openshift/origin/pkg/build/apiserver/admission/strategyrestrictions"
imagepolicyapi "github.com/openshift/origin/pkg/image/apiserver/admission/apis/imagepolicy"
"github.com/openshift/origin/pkg/image/apiserver/admission/imagepolicy"
imageadmission "github.com/openshift/origin/pkg/image/apiserver/admission/limitrange"
projectrequestlimit "github.com/openshift/origin/pkg/project/apiserver/admission/requestlimit"
quotaclusterresourcequota "github.com/openshift/origin/pkg/quota/apiserver/admission/clusterresourcequota"
schedulerpodnodeconstraints "github.com/openshift/origin/pkg/scheduler/admission/podnodeconstraints"
)
// TODO register this per apiserver or at least per process
var OriginAdmissionPlugins = admission.NewPlugins()
func init() {
RegisterAllAdmissionPlugins(OriginAdmissionPlugins)
}
// RegisterAllAdmissionPlugins registers all admission plugins
func RegisterAllAdmissionPlugins(plugins *admission.Plugins) {
// register gc protection plugin
gc.Register(plugins)
resourcequota.Register(plugins)
genericapiserver.RegisterAllAdmissionPlugins(plugins)
RegisterOpenshiftAdmissionPlugins(plugins)
}
func RegisterOpenshiftAdmissionPlugins(plugins *admission.Plugins) {
projectrequestlimit.Register(plugins)
buildsecretinjector.Register(plugins)
buildstrategyrestrictions.Register(plugins)
imageadmission.Register(plugins)
schedulerpodnodeconstraints.Register(plugins)
imagepolicy.Register(plugins)
quotaclusterresourcequota.Register(plugins)
}
var (
// OpenShiftAdmissionPlugins gives the in-order default admission chain for openshift resources.
OpenShiftAdmissionPlugins = []string{
lifecycle.PluginName,
"OwnerReferencesPermissionEnforcement",
"project.openshift.io/ProjectRequestLimit",
"build.openshift.io/BuildConfigSecretInjector",
"build.openshift.io/BuildByStrategy",
imageadmission.PluginName,
"scheduling.openshift.io/PodNodeConstraints",
imagepolicyapi.PluginName,
"quota.openshift.io/ClusterResourceQuota",
mutatingwebhook.PluginName,
validatingwebhook.PluginName,
"ResourceQuota",
}
DefaultOnPlugins = sets.NewString(
lifecycle.PluginName,
"build.openshift.io/BuildConfigSecretInjector",
"build.openshift.io/BuildByStrategy",
imageadmission.PluginName,
"OwnerReferencesPermissionEnforcement",
imagepolicyapi.PluginName,
mutatingwebhook.PluginName,
validatingwebhook.PluginName,
"ResourceQuota",
"quota.openshift.io/ClusterResourceQuota",
"project.openshift.io/ProjectRequestLimit",
"PodNodeConstraints",
)
)
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64
// +build amd64
package pagetables
import (
"testing"
"gvisor.dev/gvisor/pkg/hostarch"
)
func Test2MAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a huge page.
pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*47)
checkMappings(t, pt, []mapping{
{0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
{0x00007f0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read}},
})
}
func Test1GAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a super page.
pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*47)
checkMappings(t, pt, []mapping{
{0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
{0x00007f0000000000, pudSize, pudSize * 47, MapOpts{AccessType: hostarch.Read}},
})
}
func TestSplit1GPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a super page and knock out the middle.
pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*42)
pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pudSize-(2*pteSize))
checkMappings(t, pt, []mapping{
{0x00007f0000000000, pteSize, pudSize * 42, MapOpts{AccessType: hostarch.Read}},
{0x00007f0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: hostarch.Read}},
})
}
func TestSplit2MPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a huge page and knock out the middle.
pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*42)
pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pmdSize-(2*pteSize))
checkMappings(t, pt, []mapping{
{0x00007f0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: hostarch.Read}},
{0x00007f0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: hostarch.Read}},
})
}
|
package plug
import (
"mqtts/core"
"mqtts/utils"
)
func MQTTUnauthCheck(opts *core.TargetOptions) bool {
client := core.GetMQTTClient(opts)
connectError := client.Connect()
if connectError != nil {
utils.OutputNotVulnMessage(opts.Host, opts.Port, "Unauthorized access vulnerability not Exists")
return false
} else {
core.SetClientToken(opts.Host, opts.Port, *client)
utils.OutputVulnMessage(opts.Host, opts.Port, "Unauthorized access vulnerability exists")
core.SetResult(opts, "Unauthorized access vulnerability exists")
return true
}
}
|
package handler
import (
"encoding/json"
"net/http"
"strconv"
"go-crud/app/model"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
)
func GetAllProducts(db *gorm.DB, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
categoryName := vars["name"]
category := getCategoryOr404(db, categoryName, w, r)
if category == nil {
return
}
products := []model.Product{}
if err := db.Model(&category).Related(&products).Error; err != nil {
respondError(w, http.StatusInternalServerError, err.Error())
return
}
respondJSON(w, http.StatusOK, products)
}
func CreateProduct(db *gorm.DB, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
categoryName := vars["name"]
category := getCategoryOr404(db, categoryName, w, r)
if category == nil {
return
}
product := model.Product{CategoryID: category.ID}
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&product); err != nil {
respondError(w, http.StatusBadRequest, err.Error())
return
}
defer r.Body.Close()
if err := db.Save(&product).Error; err != nil {
respondError(w, http.StatusInternalServerError, err.Error())
return
}
respondJSON(w, http.StatusCreated, product)
}
func GetProduct(db *gorm.DB, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
categoryName := vars["name"]
category := getCategoryOr404(db, categoryName, w, r)
if category == nil {
return
}
id, _ := strconv.Atoi(vars["id"])
product := getProductOr404(db, id, w, r)
if product == nil {
return
}
respondJSON(w, http.StatusOK, product)
}
func UpdateProduct(db *gorm.DB, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
categoryName := vars["name"]
category := getCategoryOr404(db, categoryName, w, r)
if category == nil {
return
}
id, _ := strconv.Atoi(vars["id"])
product := getProductOr404(db, id, w, r)
if product == nil {
return
}
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&product); err != nil {
respondError(w, http.StatusBadRequest, err.Error())
return
}
defer r.Body.Close()
if err := db.Save(&product).Error; err != nil {
respondError(w, http.StatusInternalServerError, err.Error())
return
}
respondJSON(w, http.StatusOK, product)
}
func DeleteProduct(db *gorm.DB, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
categoryName := vars["name"]
category := getCategoryOr404(db, categoryName, w, r)
if category == nil {
return
}
id, _ := strconv.Atoi(vars["id"])
product := getProductOr404(db, id, w, r)
if product == nil {
return
}
if err := db.Delete(&category).Error; err != nil {
respondError(w, http.StatusInternalServerError, err.Error())
return
}
respondJSON(w, http.StatusNoContent, nil)
}
func getProductOr404(db *gorm.DB, id int, w http.ResponseWriter, r *http.Request) *model.Product {
product := model.Product{}
if err := db.First(&product, id).Error; err != nil {
respondError(w, http.StatusNotFound, err.Error())
return nil
}
return &product
}
func getImageProductOr404(db *gorm.DB, name string, w http.ResponseWriter, r *http.Request) *model.Product {
imageProduct := model.Product{}
if err := db.First(&imageProduct, model.Product{Name: name}).Error; err != nil {
respondError(w, http.StatusNotFound, err.Error())
return nil
}
return &imageProduct
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tscache
import (
"context"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
)
// sklImpl implements the Cache interface. It maintains a collection of
// skiplists containing keys or key ranges and the timestamps at which
// they were most recently read or written. If a timestamp was read or
// written by a transaction, the txn ID is stored with the timestamp to
// avoid advancing timestamps on successive requests from the same
// transaction.
type sklImpl struct {
cache *intervalSkl
clock *hlc.Clock
metrics Metrics
}
var _ Cache = &sklImpl{}
// newSklImpl returns a new treeImpl with the supplied hybrid clock.
func newSklImpl(clock *hlc.Clock) *sklImpl {
tc := sklImpl{clock: clock, metrics: makeMetrics()}
tc.clear(clock.Now())
return &tc
}
// clear clears the cache and resets the low-water mark.
func (tc *sklImpl) clear(lowWater hlc.Timestamp) {
tc.cache = newIntervalSkl(tc.clock, MinRetentionWindow, tc.metrics.Skl)
tc.cache.floorTS = lowWater
}
// Add implements the Cache interface.
func (tc *sklImpl) Add(start, end roachpb.Key, ts hlc.Timestamp, txnID uuid.UUID) {
start, end = tc.boundKeyLengths(start, end)
val := cacheValue{ts: ts, txnID: txnID}
if len(end) == 0 {
tc.cache.Add(nonNil(start), val)
} else {
tc.cache.AddRange(nonNil(start), end, excludeTo, val)
}
}
// SetLowWater implements the Cache interface.
func (tc *sklImpl) SetLowWater(start, end roachpb.Key, ts hlc.Timestamp) {
tc.Add(start, end, ts, noTxnID)
}
// getLowWater implements the Cache interface.
func (tc *sklImpl) getLowWater() hlc.Timestamp {
return tc.cache.FloorTS()
}
// GetMax implements the Cache interface.
func (tc *sklImpl) GetMax(start, end roachpb.Key) (hlc.Timestamp, uuid.UUID) {
var val cacheValue
if len(end) == 0 {
val = tc.cache.LookupTimestamp(nonNil(start))
} else {
val = tc.cache.LookupTimestampRange(nonNil(start), end, excludeTo)
}
return val.ts, val.txnID
}
// boundKeyLengths makes sure that the key lengths provided are well below the
// size of each sklPage, otherwise we'll never be successful in adding it to
// an intervalSkl.
func (tc *sklImpl) boundKeyLengths(start, end roachpb.Key) (roachpb.Key, roachpb.Key) {
// We bound keys to 1/32 of the page size. These could be slightly larger
// and still not trigger the "key range too large" panic in intervalSkl,
// but anything larger could require multiple page rotations before it's
// able to fit in if other ranges are being added concurrently.
maxKeySize := int(maximumSklPageSize / 32)
// If either key is too long, truncate its length, making sure to always
// grow the [start,end) range instead of shrinking it. This will reduce the
// precision of the entry in the cache, which could allow independent
// requests to interfere, but will never permit consistency anomalies.
if l := len(start); l > maxKeySize {
start = start[:maxKeySize]
log.Warningf(context.TODO(), "start key with length %d exceeds maximum key length of %d; "+
"losing precision in timestamp cache", l, maxKeySize)
}
if l := len(end); l > maxKeySize {
end = end[:maxKeySize].PrefixEnd() // PrefixEnd to grow range
log.Warningf(context.TODO(), "end key with length %d exceeds maximum key length of %d; "+
"losing precision in timestamp cache", l, maxKeySize)
}
return start, end
}
// Metrics implements the Cache interface.
func (tc *sklImpl) Metrics() Metrics {
return tc.metrics
}
// intervalSkl doesn't handle nil keys the same way as empty keys. Cockroach's
// KV API layer doesn't make a distinction.
var emptyStartKey = []byte("")
func nonNil(b []byte) []byte {
if b == nil {
return emptyStartKey
}
return b
}
|
package gymydb
import (
"fmt"
"database/sql"
"strconv"
"gylib/common"
"strings"
"crypto/md5"
"encoding/hex"
"time"
)
var mysqldb *sql.DB
var Slavedb []*sql.DB
type Db_conn struct {
Db_host string
Db_port string
Db_name string
Db_password string
Db_perfix string
}
var Db_perfix string
var Db_Struct Db_conn
var Is_db_init bool = false
var G_dbtables map[string]interface{}
var G_fd_list map[string]interface{}
var G_tb_dict map[string]interface{}
var G_fd_dict map[string]interface{}
func init() {
G_dbtables = make(map[string]interface{})
G_fd_list = make(map[string]interface{})
G_tb_dict = make(map[string]interface{})
G_fd_dict = make(map[string]interface{})
//DataTable=make(map[string]string)
data := common.Getini("conf/app.ini", "database", map[string]string{"db_user": "root", "db_password": "",
"db_host": "127.0.0.1", "db_port": "3306", "db_name": "", "db_maxpool": "20", "db_minpool": "5", "db_perfix": "", "slavedb": ""})
con := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8", data["db_user"],
data["db_password"], data["db_host"],
data["db_port"], data["db_name"])
mysqldb, _ = sql.Open("mysql", con)
maxpool, _ := strconv.Atoi(data["db_maxpool"])
minpool, _ := strconv.Atoi(data["db_minpool"])
mysqldb.SetMaxOpenConns(maxpool)
mysqldb.SetMaxIdleConns(minpool)
mysqldb.SetConnMaxLifetime(time.Minute * 5)
mysqldb.Ping()
Slavedb = make([]*sql.DB, 0)
iplist := strings.Split(data["slavedb"], ",")
for _, v := range iplist {
con1 := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8", data["db_user"],
data["db_password"], v,
data["db_port"], data["db_name"])
sqldb1, _ := sql.Open("mysql", con1)
maxpool, _ := strconv.Atoi(data["db_maxpool"])
minpool, _ := strconv.Atoi(data["db_minpool"])
sqldb1.SetMaxOpenConns(maxpool)
sqldb1.SetMaxIdleConns(minpool)
sqldb1.SetConnMaxLifetime(time.Minute * 5)
sqldb1.Ping()
Slavedb = append(Slavedb, sqldb1)
}
Db_Struct.Db_perfix = data["db_perfix"]
Db_Struct.Db_name = data["db_name"]
Db_Struct.Db_host = data["db_host"]
Db_Struct.Db_port = data["db_port"]
Db_Struct.Db_password = data["db_password"]
Db_perfix = data["db_perfix"]
Init_redis_table_struct()
}
func Init_redis_table_struct() {
qb := new(Mysqlcon)
if (Is_db_init == false) {
Is_db_init = true
data := qb.Query("show TABLES", nil)
for _, v := range data {
qb.Dbinit()
tbname := v["Tables_in_"+Db_Struct.Db_name]
list := qb.Query("SHOW full COLUMNS FROM "+tbname, nil)
if (list != nil) {
data_list := make([]map[string]string, 0)
for _, val := range list {
col := make(map[string]string)
for key, _ := range val {
col[common.Tolow_map_name(key)] = val[key]
}
data_list = append(data_list, col)
}
G_dbtables[tbname] = data_list
tbname = strings.Replace(tbname, Db_perfix, "", -1)
Get_mysql_dict(tbname)
}
}
}
}
func GetMd5String(s string) string {
h := md5.New()
h.Write([]byte(s)) //使用zhifeiya名字做散列值,设定后不要变
return hex.EncodeToString(h.Sum(nil))
}
func Get_mysql_dict(tbname string) {
db := NewGymysqldb()
data := db.Tbname("db_tb_dict").Where(fmt.Sprintf("name='%v'", Db_perfix+tbname)).Find();
if (data == nil) {
return
}
db.Dbinit()
fd_data := db.Tbname("db_fd_dict").Where(fmt.Sprintf("t_id=%v", data["id"])).Select()
list_data := db.Tbname("db_fd_dict").Where(fmt.Sprintf("t_id=%v and list_tb_name<>'0'", data["id"])).Select()
G_tb_dict[tbname] = data
if (fd_data != nil) {
G_fd_dict[tbname] = fd_data
}
if (list_data != nil) {
G_fd_list[tbname] = list_data
}
}
func NewGymysqldb() (*Mysqlcon) {
this := new(Mysqlcon)
this.SqlTx = nil
this.Dbinit()
return this
}
|
package gw
import (
"fmt"
"github.com/oceanho/gw/conf"
)
type DefaultAuthManagerImpl struct {
store IStore
cnf *conf.ApplicationConfig
users map[string]*defaultUser
}
func DefaultAuthManager(state *ServerState) *DefaultAuthManagerImpl {
var authManager DefaultAuthManagerImpl
authManager.cnf = state.ApplicationConfig()
authManager.store = state.Store()
return &authManager
}
func (d *DefaultAuthManagerImpl) Login(param AuthParameter) (User, error) {
user, ok := d.users[param.Passport]
if ok && user.secret == param.Password {
return user.User, nil
}
return EmptyUser, fmt.Errorf("user:%s not found or serect not match", param.Passport)
}
func (d *DefaultAuthManagerImpl) Logout(user User) bool {
// Nothing to do.
return true
}
|
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs ,,const.go
package syscall
const (
BRKINT = 0x2
IGNPAR = 0x4
ICRNL = 0x100
INPCK = 0x10
ISTRIP = 0x20
IXON = 0x400
OPOST = 0x1
ECHO = 0x8
ICANON = 0x2
IEXTEN = 0x8000
ISIG = 0x1
VTIME = 0x5
VMIN = 0x6
CLOCAL = 0x800
CREAD = 0x80
CBAUD = 0x100f
B0 = 0x0
B50 = 0x1
B75 = 0x2
B110 = 0x3
B134 = 0x4
B150 = 0x5
B200 = 0x6
B300 = 0x7
B600 = 0x8
B1200 = 0x9
B1800 = 0xa
B2400 = 0xb
B4800 = 0xc
B9600 = 0xd
B19200 = 0xe
B38400 = 0xf
B57600 = 0x1001
B115200 = 0x1002
B230400 = 0x1003
B460800 = 0x1004
B500000 = 0x1005
B576000 = 0x1006
B921600 = 0x1007
B1000000 = 0x1008
B1152000 = 0x1009
B1500000 = 0x100a
B2000000 = 0x100b
B2500000 = 0x100c
B3000000 = 0x100d
B3500000 = 0x100e
B4000000 = 0x100f
CSIZE = 0x30
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
CS8 = 0x30
PARENB = 0x100
PARODD = 0x200
CRTSCTS = 0x80000000
CSTOPB = 0x40
TCGETS = 0x5401
TCSETS = 0x5402
TCSETSW = 0x5403
TCSETSF = 0x5404
TIOCMBIC = 0x5417
TIOCMBIS = 0x5416
TIOCMSET = 0x5418
TIOCMGET = 0x5415
TIOCM_LE = 0x1
TIOCM_DTR = 0x2
TIOCM_RTS = 0x4
TIOCM_ST = 0x8
TIOCM_SR = 0x10
TIOCM_CTS = 0x20
TIOCM_CAR = 0x40
TIOCM_RNG = 0x80
TIOCM_DSR = 0x100
)
|
package core
import (
"MCS_Server/global"
"fmt"
zaprotatelogs "github.com/lestrrat-go/file-rotatelogs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
"path"
"time"
)
var level zapcore.Level
func Zap() (logger *zap.Logger) {
_, err := os.Stat(global.MCS_Config.Zap.Dir)
if err != nil || os.IsNotExist(err) {
_ = os.Mkdir(global.MCS_Config.Zap.Dir, os.ModePerm)
}
level = zap.InfoLevel
logger = zap.New(getEncoderCore())
if global.MCS_Config.Zap.ShowLine {
logger = logger.WithOptions(zap.AddCaller())
}
return logger
}
func getEncoderCore() (core zapcore.Core) {
fileWriter, err := zaprotatelogs.New(
path.Join(global.MCS_Config.Zap.Dir, "%Y-%m-%d.log"),
zaprotatelogs.WithLinkName("latest_log"),
zaprotatelogs.WithMaxAge(7*24*time.Hour),
zaprotatelogs.WithRotationTime(24*time.Hour),
)
if err != nil {
fmt.Printf("Get Write Syncer Failed err:%v", err.Error())
}
writer := zapcore.NewMultiWriteSyncer(zapcore.AddSync(os.Stdout), zapcore.AddSync(fileWriter))
return zapcore.NewCore(getEncoder(), writer, level)
}
func getEncoder() zapcore.Encoder {
if global.MCS_Config.Zap.Format == "json" {
return zapcore.NewJSONEncoder(getEncoderConfig())
}
return zapcore.NewConsoleEncoder(getEncoderConfig())
}
func getEncoderConfig() (config zapcore.EncoderConfig) {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeTime = CustomTimeEncoder
encoderConfig.EncodeLevel = zapcore.LowercaseLevelEncoder
encoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder
encoderConfig.EncodeCaller = zapcore.FullCallerEncoder
return encoderConfig
}
func CustomTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format(global.MCS_Config.Zap.Header + "2006/01/02 - 15:04:05.000"))
}
|
package main
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"strings"
chartrender "github.com/go-echarts/go-echarts/v2/render"
)
// Code from https://blog.cubieserver.de/2020/how-to-render-standalone-html-snippets-with-go-echarts/
// adapted from
// https://github.com/go-echarts/go-echarts/blob/master/templates/base.go
// https://github.com/go-echarts/go-echarts/blob/master/templates/header.go
// Get assets removed from baseTpl to not get it for each chart
// {{- range .JSAssets.Values }}
// <script src="{{ . }}"></script>
// {{- end }}
var baseTpl = `
<div class="container">
<div class="item" id="{{ .ChartID }}" style="width:100%;height:{{ .Initialization.Height }};"></div>
</div>
<script type="text/javascript">
"use strict";
let goecharts_{{ .ChartID | safeJS }} = echarts.init(document.getElementById('{{ .ChartID | safeJS }}'), "{{ .Theme }}");
let option_{{ .ChartID | safeJS }} = {{ .JSON }};
goecharts_{{ .ChartID | safeJS }}.setOption(option_{{ .ChartID | safeJS }});
{{- range .JSFunctions.Fns }}
{{ . | safeJS }}
{{- end }}
</script>
`
type snippetRenderer struct {
c interface{}
before []func()
}
func newSnippetRenderer(c interface{}, before ...func()) chartrender.Renderer {
return &snippetRenderer{c: c, before: before}
}
func (r *snippetRenderer) Render(w io.Writer) error {
const tplName = "chart"
for _, fn := range r.before {
fn()
}
tpl := template.
Must(template.New(tplName).
Funcs(template.FuncMap{
"safeJS": func(s interface{}) template.JS {
return template.JS(fmt.Sprint(s))
},
}).
Parse(baseTpl),
)
err := tpl.ExecuteTemplate(w, tplName, r.c)
return err
}
func renderToHTML(c interface{}) template.HTML {
var buf bytes.Buffer
r := c.(chartrender.Renderer)
err := r.Render(&buf)
if err != nil {
log.Printf("Failed to render chart: %s", err)
return ""
}
// Remove weird JS function wrapper that breaks style
re := strings.NewReplacer("\"__f__", "", "__f__\"", "")
tpl := re.Replace(buf.String())
return template.HTML(tpl)
}
|
package main
import (
"log"
"net/http"
"time"
logrus "github.com/sirupsen/logrus"
)
func main() {
logrus.Info("dean") // never remove this line
router := NewRouter()
t := time.Tick(10 * time.Second)
go func() {
for {
<-t
CurrentTime = time.Now().UTC()
}
}()
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package main
import (
"fmt"
"log"
"net"
"net/http"
"net/http/fcgi"
)
// fcgi包实现了FastCGI协议。目前只支持响应器的角色。
// 协议定义的地址:http://www.fastcgi.com/drupal/node/6?q=node/22
func main() {
// 服务监听
listener, err:= net.Listen("tcp", "127.0.0.1:8081")
if err != nil {
log.Fatal(err)
}
// 初始化一个自定义handler
srv := new(FastCGIHandler)
// 接受从监视器l传入的FastCGI连接,为每一个FastCGI连接创建一个新的go程,该go程读取请求然后调用handler回复请求
// 如果l是nil,Serve将从os.Stdin接受连接
// 如果handler是nil,将使用http.DefaultServeMux
if err := fcgi.Serve(listener, srv); err != nil {
log.Fatal(err)
}
}
type FastCGIHandler struct{}
func (s FastCGIHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
fmt.Println(123)
resp.Write([]byte("<h1>Hello, World!</h1>\n<p>Hello Gopher.</p>"))
}
/*
需要nginx配置fastcgi
server {
listen 80;
server_name go.dev;
root /root/go/src/godev;
index index.html;
#gzip off;
#proxy_buffering off;
location / {
try_files $uri $uri/;
}
location ~ /.* {
include fastcgi.conf;
fastcgi_pass 127.0.0.1:8081;
}
try_files $uri $uri.html =404;
}
*/ |
package odoo
import (
"fmt"
)
// AccountReconcileModel represents account.reconcile.model model.
type AccountReconcileModel struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AccountId *Many2One `xmlrpc:"account_id,omptempty"`
Amount *Float `xmlrpc:"amount,omptempty"`
AmountType *Selection `xmlrpc:"amount_type,omptempty"`
AnalyticAccountId *Many2One `xmlrpc:"analytic_account_id,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
HasSecondLine *Bool `xmlrpc:"has_second_line,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
JournalId *Many2One `xmlrpc:"journal_id,omptempty"`
Label *String `xmlrpc:"label,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
SecondAccountId *Many2One `xmlrpc:"second_account_id,omptempty"`
SecondAmount *Float `xmlrpc:"second_amount,omptempty"`
SecondAmountType *Selection `xmlrpc:"second_amount_type,omptempty"`
SecondAnalyticAccountId *Many2One `xmlrpc:"second_analytic_account_id,omptempty"`
SecondJournalId *Many2One `xmlrpc:"second_journal_id,omptempty"`
SecondLabel *String `xmlrpc:"second_label,omptempty"`
SecondTaxId *Many2One `xmlrpc:"second_tax_id,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
TaxId *Many2One `xmlrpc:"tax_id,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountReconcileModels represents array of account.reconcile.model model.
type AccountReconcileModels []AccountReconcileModel
// AccountReconcileModelModel is the odoo model name.
const AccountReconcileModelModel = "account.reconcile.model"
// Many2One convert AccountReconcileModel to *Many2One.
func (arm *AccountReconcileModel) Many2One() *Many2One {
return NewMany2One(arm.Id.Get(), "")
}
// CreateAccountReconcileModel creates a new account.reconcile.model model and returns its id.
func (c *Client) CreateAccountReconcileModel(arm *AccountReconcileModel) (int64, error) {
ids, err := c.CreateAccountReconcileModels([]*AccountReconcileModel{arm})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountReconcileModel creates a new account.reconcile.model model and returns its id.
func (c *Client) CreateAccountReconcileModels(arms []*AccountReconcileModel) ([]int64, error) {
var vv []interface{}
for _, v := range arms {
vv = append(vv, v)
}
return c.Create(AccountReconcileModelModel, vv)
}
// UpdateAccountReconcileModel updates an existing account.reconcile.model record.
func (c *Client) UpdateAccountReconcileModel(arm *AccountReconcileModel) error {
return c.UpdateAccountReconcileModels([]int64{arm.Id.Get()}, arm)
}
// UpdateAccountReconcileModels updates existing account.reconcile.model records.
// All records (represented by ids) will be updated by arm values.
func (c *Client) UpdateAccountReconcileModels(ids []int64, arm *AccountReconcileModel) error {
return c.Update(AccountReconcileModelModel, ids, arm)
}
// DeleteAccountReconcileModel deletes an existing account.reconcile.model record.
func (c *Client) DeleteAccountReconcileModel(id int64) error {
return c.DeleteAccountReconcileModels([]int64{id})
}
// DeleteAccountReconcileModels deletes existing account.reconcile.model records.
func (c *Client) DeleteAccountReconcileModels(ids []int64) error {
return c.Delete(AccountReconcileModelModel, ids)
}
// GetAccountReconcileModel gets account.reconcile.model existing record.
func (c *Client) GetAccountReconcileModel(id int64) (*AccountReconcileModel, error) {
arms, err := c.GetAccountReconcileModels([]int64{id})
if err != nil {
return nil, err
}
if arms != nil && len(*arms) > 0 {
return &((*arms)[0]), nil
}
return nil, fmt.Errorf("id %v of account.reconcile.model not found", id)
}
// GetAccountReconcileModels gets account.reconcile.model existing records.
func (c *Client) GetAccountReconcileModels(ids []int64) (*AccountReconcileModels, error) {
arms := &AccountReconcileModels{}
if err := c.Read(AccountReconcileModelModel, ids, nil, arms); err != nil {
return nil, err
}
return arms, nil
}
// FindAccountReconcileModel finds account.reconcile.model record by querying it with criteria.
func (c *Client) FindAccountReconcileModel(criteria *Criteria) (*AccountReconcileModel, error) {
arms := &AccountReconcileModels{}
if err := c.SearchRead(AccountReconcileModelModel, criteria, NewOptions().Limit(1), arms); err != nil {
return nil, err
}
if arms != nil && len(*arms) > 0 {
return &((*arms)[0]), nil
}
return nil, fmt.Errorf("account.reconcile.model was not found with criteria %v", criteria)
}
// FindAccountReconcileModels finds account.reconcile.model records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountReconcileModels(criteria *Criteria, options *Options) (*AccountReconcileModels, error) {
arms := &AccountReconcileModels{}
if err := c.SearchRead(AccountReconcileModelModel, criteria, options, arms); err != nil {
return nil, err
}
return arms, nil
}
// FindAccountReconcileModelIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountReconcileModelIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountReconcileModelModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountReconcileModelId finds record id by querying it with criteria.
func (c *Client) FindAccountReconcileModelId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountReconcileModelModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.reconcile.model was not found with criteria %v and options %v", criteria, options)
}
|
package models
import (
"time"
)
// album represents data about a record album.
type LogEntry struct {
ID int `json:"id"`
ServiceId int `json:"service_id"`
Description string `json:"description"`
CreatedAt time.Time `json:"created_at"`
}
type Logs struct {
Logs []LogEntry `json:"logs"`
}
type LogEntryOutput struct {
URL string `json:"url"`
Description string `json:"description"`
Time time.Time `json:"time"`
}
type LogsOutput struct {
LogsOutput []LogEntryOutput `json:"logsOutput"`
} |
package strategy_pattern
import (
"fmt"
)
type Duck struct {
quacker QuackStrategy
flyer FlyStrategy
name string
}
/* Passing down calls to duck's quacker and flyer */
func (d Duck) Quack() string {
return d.quacker.Quack(d)
}
func (d Duck) Fly() string {
return d.flyer.Fly(d)
}
/* Allows us to define quacking and flying strategies */
type QuackStrategy interface {
Quack(Duck) string
}
type FlyStrategy interface {
Fly(Duck) string
}
/* Quack strategies */
type CityQuacker struct{}
func (q CityQuacker) Quack(d Duck) string {
return fmt.Sprintf("%s quacks like a city duck!\n", d.name)
}
type PondQuacker struct{}
func (q PondQuacker) Quack(d Duck) string {
return fmt.Sprintf("%s quacks like a pond duck!\n", d.name)
}
/* Fly strategies */
type CityFlyer struct{}
func (q CityFlyer) Fly(d Duck) string {
return fmt.Sprintf("%s flies like a city duck!\n", d.name)
}
type PondFlyer struct{}
func (q PondFlyer) Quack(d Duck) string {
return fmt.Sprintf("%s flies like a pond duck!\n", d.name)
}
|
package command
import (
"fmt"
"github.com/urfave/cli"
"github.com/wincentrtz/gobase/gobase/command/app"
"github.com/wincentrtz/gobase/gobase/command/db"
"github.com/wincentrtz/gobase/gobase/command/generate"
instance "github.com/wincentrtz/gobase/gobase/infrastructures/db"
)
func Command(c *cli.Context) error {
firstArgs := c.Args().Get(0)
secondArgs := c.Args().Get(1)
switch firstArgs {
case "generate":
switch secondArgs {
case "domain":
generate.Domain(c)
break
case "response":
generate.Response(c)
break
case "request":
generate.Request(c)
default:
fmt.Println("Command Does Not Exist")
}
case "db":
postgres := instance.Postgres()
switch secondArgs {
case "fresh":
db.Fresh(postgres)
case "clear":
db.Drop(postgres)
case "migrate":
db.Migrate(postgres)
default:
fmt.Println("Command Does Not Exist")
}
case "serve":
app.Serve()
default:
fmt.Println("Command Does Not Exist")
}
return nil
}
|
package main
import (
"log"
"time"
)
type PayApp struct {
Command string
Timestamp int64
Payer string
ResultState bool
AccountType string
Account string
Amount float64
Data PayAppResult
}
type PayAppResult struct {
Account string
Url string
Code int
Message string
Data map[string]interface{}
}
func GetPayApp(cmd string, account string, accountType string, payer string, amount float64) *PayApp {
var pa PayApp
pa.Command = cmd
switch accountType {
case "1":
pa.AccountType = "alipay"
case "2":
pa.AccountType = "wechat"
case "3":
pa.AccountType = "qq"
}
pa.Account = account
pa.Amount = amount
pa.Payer = payer
pa.Timestamp = time.Now().Unix()
return &pa
}
func (pa *PayApp) GetPay() {
err := GetAppPay(pa)
if err != nil {
log.Println("GetPay Error", err)
return
}
for !pa.ResultState {
if pa.ResultState {
break
}
if time.Now().Unix()-pa.Timestamp > 30 {
log.Println("Request App timeout")
break
}
time.Sleep(5 * time.Millisecond)
}
}
func (pa *PayApp) SetPay(input PayAppResult) {
pa.Data = input
pa.ResultState = true
}
|
// Copyright 2018-present The Yumcoder Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Author: yumcoder (omid.jn@gmail.com)
//
package datatype
import (
"crypto/rand"
"encoding/base64"
"github.com/gogo/protobuf/proto"
"testing"
"yumcoder.com/yumd/server/core/test2"
)
func Test_Size(t *testing.T) {
b := make([]byte, 500)
rand.Read(b)
m := &test2.PingRequest{
Message: string(b),
}
t.Log(m)
buf, _ := proto.Marshal(m)
t.Log(len(buf))
t.Log(len(base64.StdEncoding.EncodeToString(buf)))
} |
package main
import (
"encoding/json"
"errors"
"fmt"
"github.com/cactus/go-statsd-client/statsd"
"io/ioutil"
"log"
"net/http"
"time"
)
type NginxResponse struct {
Connections struct {
Accepted int64 `json:"accepted"`
Active int64 `json:"active"`
Dropped int64 `json:"dropped"`
Idle int64 `json:"idle"`
} `json:"connections"`
Upstreams map[string][]Backend `json:"upstreams"`
ServerZones map[string]interface{} `json:"server_zones"`
}
type Backend struct {
ID int `json:"id"`
Server string `json:"server"`
}
func NginxStatus() (*NginxResponse, error) {
var nginxStatusServer string = "demo.nginx.com"
resp, err := http.Get(fmt.Sprintf("http://%s/status", nginxStatusServer))
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.New("Non 200 OK")
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var er NginxResponse
if err := json.Unmarshal(data, &er); err != nil {
return nil, err
}
return &er, nil
}
func SendStatsD(gs string, gt string, gv int64) {
// connect to statsd
var sd string = "127.0.0.1:8125"
var gk string = "nginx"
client, err := statsd.NewClient(sd, gk)
if err != nil {
log.Println(err)
}
defer client.Close()
// send metrics to statsd
var fi float32 = 10.0
var mt string = "status.demo_nginx_com"
client.Inc(mt+"."+gs+"."+gt, gv, fi)
}
func main() {
for {
// grab nginx plus status json (delayed)
nt, err := NginxStatus()
if err != nil {
log.Println(err)
}
// sleep x seconds - time.Millisecond * 1000 = 1sec
time.Sleep(time.Millisecond * 1000)
// grab nginx plus status json
nr, err := NginxStatus()
if err != nil {
log.Println(err)
}
// send nginx plus connection metrics
SendStatsD("connections", "accepted_", nr.Connections.Accepted-nt.Connections.Accepted)
SendStatsD("connections", "dropped", nr.Connections.Dropped-nt.Connections.Dropped)
SendStatsD("connections", "active", nr.Connections.Active)
SendStatsD("connections", "idle", nr.Connections.Idle)
// testing loop of server zones
for _, zone := range nr.ServerZones {
fmt.Println(zone)
}
// loop through upstream
for _, backend := range nr.Upstreams {
fmt.Println(backend)
for _, server := range backend {
fmt.Println(server.Server)
}
}
}
}
|
//Clinical Text Spell Checker
//What the tool does: Uses a large clinical dictionary to check for spelling mistakes in a large csv file
package main
import (
"bufio"
"flag"
"io"
"log"
"encoding/csv"
"fmt"
"os"
"github.com/VertisPro/fasthealth-tools/pkg/textutils"
)
/*
"Database support is provided through the GO SQL (https://github.com/go-sql-driver/mysql) and mattn(https://github.com/mattn/go-sqlite) sqlite library and provides supports for MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) and sqlite.\r\n"+
"https://github.com/lib/pq.\r\n"+
*/
func printusage() {
fmt.Fprintf(os.Stderr, "\r\nClinical Text Spell Checker\r\nThis tool carries out spelling checks on blobs of clinical text. The input is a csv file along with information on which column contains the text blob. An output file is created containing the original blob and Spelling suggestions for easy correlation. . The tools requires a dictionary file and a large dictionary of (US/AU) medical and (US) english words has been provided. This tool uses the ENCHANT library (2.2.1) and is provided as part of the FastHealth open source tools.\r\n")
flag.PrintDefaults()
os.Exit(0)
}
func main() {
//Set up the command line usage
flag.Usage = printusage
infile := flag.String("i", "", "(Required) Input CSV file")
outfile := flag.String("o", "", "(Required) Output CSV file, overwritten if exists")
dicfile := flag.String("d", "ctscwords.dic", "(Optional) External dictionary file")
textcolumn := flag.Int("c", 1, "(optional) Column where the text is placed (defualt is 1 which means first column) ")
flag.Parse()
// Check if input file exists
if _, err := os.Stat(*dicfile); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "The dictionary file does not exist or the path is incorrect\r\n")
os.Exit(1)
}
// Check if input file exists
if _, err := os.Stat(*infile); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "The input file does not exist or the path is incorrect\r\n")
os.Exit(1)
}
if *outfile == "" {
fmt.Fprintf(os.Stderr, "The outfile has not been specified\r\n")
os.Exit(1)
}
spellchecker, err := textutils.NewSpellChecker(*dicfile)
if err != nil {
log.Fatal(err.Error())
os.Exit(1)
}
//Open input file
csvFile, _ := os.Open(*infile)
reader := csv.NewReader(bufio.NewReader(csvFile))
//setup writer for outout
csvOut, err := os.Create(*outfile)
if err != nil {
log.Fatal("Unable to open output")
os.Exit(1)
}
defer csvOut.Close()
w := csv.NewWriter(csvOut)
for {
line, error := reader.Read()
if error == io.EOF {
break
} else if error != nil {
log.Fatal(error)
os.Exit(1)
}
sentence := line[*textcolumn]
suggestions, err := spellchecker.CheckSentence(sentence)
if err != nil {
log.Fatal(err.Error())
os.Exit(1)
}
//strings.Join(suggestions, " ")
if err = w.Write(suggestions); err != nil {
log.Fatal(err)
os.Exit(1)
}
}
}
|
package models
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/bnagy/gapstone"
"strings"
)
func Disas(mem []byte, addr uint64, arch *Arch, pad ...int) (string, error) {
if len(mem) == 0 {
return "", nil
}
engine, err := gapstone.New(arch.CS_ARCH, arch.CS_MODE)
if err != nil {
return "", err
}
defer engine.Close()
asm, err := engine.Disasm(mem, addr, 0)
if err != nil {
return "", err
}
var width uint
if len(pad) > 0 {
width = uint(pad[0])
}
for _, insn := range asm {
if insn.Size > width {
width = insn.Size
}
}
var out []string
for _, insn := range asm {
pad := strings.Repeat(" ", int(width-insn.Size)*2)
data := pad + hex.EncodeToString(insn.Bytes)
out = append(out, fmt.Sprintf("0x%x: %s %s %s", insn.Address, data, insn.Mnemonic, insn.OpStr))
}
return strings.Join(out, "\n"), nil
}
func HexDump(base uint64, mem []byte, bits int) []string {
var clean = func(p []byte) string {
o := make([]byte, len(p))
for i, c := range p {
if c >= 0x20 && c <= 0x7e {
o[i] = c
} else {
o[i] = '.'
}
}
return string(o)
}
bsz := bits / 8
hexFmt := fmt.Sprintf("0x%%0%dx:", bsz*2)
padBlock := strings.Repeat(" ", bsz*2)
padTail := strings.Repeat(" ", bsz)
width := 80
addrSize := bsz*2 + 4
blockCount := ((width - addrSize) * 3 / 4) / ((bsz + 1) * 2)
lineSize := blockCount * bsz
var out []string
blocks := make([]string, blockCount)
tail := make([]string, blockCount)
for i := 0; i < len(mem); i += lineSize {
memLine := mem[i:]
for j := 0; j < blockCount; j++ {
if j*bsz < len(memLine) {
end := (j + 1) * bsz
var block []byte
if end > len(memLine) {
pad := bytes.Repeat([]byte{0}, end-len(memLine))
block = append(memLine[j*bsz:], pad...)
} else {
block = memLine[j*bsz : end]
}
blocks[j] = hex.EncodeToString(block)
tail[j] = clean(block)
} else {
blocks[j] = padBlock
tail[j] = padTail
}
}
line := []string{fmt.Sprintf(hexFmt, base+uint64(i))}
line = append(line, strings.Join(blocks, " "))
line = append(line, fmt.Sprintf("[%s]", strings.Join(tail, " ")))
out = append(out, strings.Join(line, " "))
}
return out
}
|
package manifest
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
// makeTestFile creates a temporary test file and writes data into it
// It returns full path to newly created path or error if the file fails to be created
func makeTestFile(data []byte) (string, error) {
// create temp file for testing
f, err := ioutil.TempFile("", "test")
if err != nil {
return "", err
}
if _, err := f.Write(data); err != nil {
return "", err
}
return f.Name(), nil
}
func TestParse(t *testing.T) {
// non-existen path
m, err := Parse("foobar.yaml")
assert.Error(t, err)
assert.Nil(t, m)
// invalid config
invalid := `
hosts:
init:
foo: bar
`
invPath, err := makeTestFile([]byte(invalid))
defer os.Remove(invPath)
assert.NoError(t, err)
m, err = Parse(invPath)
assert.Error(t, err)
assert.Nil(t, m)
// valid config
valid := `
hosts:
init:
- one
- two
mounts:
- path: vPath
type: pki
`
vPath, err := makeTestFile([]byte(valid))
defer os.Remove(vPath)
assert.NoError(t, err)
m, err = Parse(vPath)
assert.NoError(t, err)
assert.NotNil(t, m)
assert.EqualValues(t, m.Hosts.Init, []string{"one", "two"})
}
func TestGetHosts(t *testing.T) {
vaultHosts := []string{
"http://192.168.1.101:8200",
"http://192.168.1.102:8200",
}
// init hosts raw data
data := `hosts:
init:
- ` + vaultHosts[0] + `
- ` + vaultHosts[1]
initPath, err := makeTestFile([]byte(data))
defer os.Remove(initPath)
assert.NoError(t, err)
m, err := Parse(initPath)
assert.NoError(t, err)
assert.NotNil(t, m)
hosts, err := m.GetHosts("init")
assert.NoError(t, err)
for i := 0; i < len(hosts); i++ {
assert.Equal(t, hosts[i], vaultHosts[i])
}
// unseal hosts raw data
data = `hosts:
unseal:
- ` + vaultHosts[0] + `
- ` + vaultHosts[1]
unsealPath, err := makeTestFile([]byte(data))
defer os.Remove(unsealPath)
assert.NoError(t, err)
m, err = Parse(unsealPath)
assert.NoError(t, err)
assert.NotNil(t, m)
hosts, err = m.GetHosts("unseal")
assert.NoError(t, err)
for i := 0; i < len(hosts); i++ {
assert.Equal(t, hosts[i], vaultHosts[i])
}
// unsupported action causes error
_, err = m.GetHosts("foobar")
assert.Error(t, err)
}
|
package main
func decodeUint16(data []byte, offset int) uint16 {
return uint16(
data[offset]<<8 |
data[offset+1])
}
func encodeUint16(value uint16, data []byte, offset int) []byte {
data[offset] = (byte)(value >> 8)
data[offset+1] = (byte)(value)
return data
}
func decodeUint32(data []byte, offset int) uint32 {
return uint32(
data[offset+0]<<24 |
data[offset+1]<<16 |
data[offset+2]<<8 |
data[offset+3])
}
func encodeUint32(value uint32, data []byte, offset int) []byte {
data[offset] = byte(value >> 24)
data[offset+1] = byte(value >> 16)
data[offset+2] = byte(value >> 8)
data[offset+3] = byte(value)
return data
}
func decodeUint64(data []byte, offset int) uint64 {
return uint64(
data[offset+0]<<56 |
data[offset+1]<<48 |
data[offset+2]<<40 |
data[offset+3]<<32 |
data[offset+4]<<24 |
data[offset+5]<<16 |
data[offset+6]<<8 |
data[offset+7])
}
func encodeUint64(value uint64, data []byte, offset int) []byte {
data[offset] = byte(value >> 56)
data[offset+1] = byte(value >> 48)
data[offset+2] = byte(value >> 40)
data[offset+3] = byte(value >> 32)
data[offset+4] = byte(value >> 24)
data[offset+5] = byte(value >> 16)
data[offset+6] = byte(value >> 8)
data[offset+7] = byte(value)
return data
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package wevideo implements WeVideo operations.
package wevideo
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/cuj"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/pointer"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
const (
dragTime = 2 * time.Second // Used for dragging.
shortUITimeout = 3 * time.Second // Used for situations where UI response might be faster.
longUITimeout = 30 * time.Second // Used for situations where UI response might be slow.
retryTimes = 3 // retryTimes is the maximum number of times the action will be retried.
editorTabClass = "MuiListItem-button"
)
var weVideoWebArea = nodewith.Name("WeVideo").Role(role.RootWebArea)
var beginningOfTheClip = nodewith.NameContaining("the beginning of the clip").ClassName("trim-btn")
var endingOfTheClip = nodewith.NameContaining("the ending of the clip").ClassName("trim-btn")
// Clip defines the struct related to WeVideo's clip.
type Clip struct {
name string
startPoint coords.Point
endPoint coords.Point
}
// WeVideo defines the struct related to WeVideo web.
type WeVideo struct {
conn *chrome.Conn
tconn *chrome.TestConn
ui *uiauto.Context
uiHandler cuj.UIActionHandler
kb *input.KeyboardEventWriter
clips map[string]Clip
tabletMode bool
br *browser.Browser
}
// NewWeVideo creates an instance of WeVideo.
func NewWeVideo(tconn *chrome.TestConn, kb *input.KeyboardEventWriter, uiHandler cuj.UIActionHandler, tabletMode bool, br *browser.Browser) *WeVideo {
return &WeVideo{
tconn: tconn,
ui: uiauto.New(tconn),
kb: kb,
uiHandler: uiHandler,
clips: make(map[string]Clip),
tabletMode: tabletMode,
br: br,
}
}
// Open opens a WeVideo webpage on chrome browser.
func (w *WeVideo) Open() action.Action {
return func(ctx context.Context) (err error) {
w.conn, err = w.uiHandler.NewChromeTab(ctx, w.br, cuj.WeVideoURL, true)
if err != nil {
return errors.Wrap(err, "failed to connect to chrome")
}
return nil
}
}
// Login logs in WeVideo.
func (w *WeVideo) Login(account string) action.Action {
ui := w.ui
loginRequired := func(ctx context.Context) error {
if err := ui.Exists(weVideoWebArea)(ctx); err == nil {
return errors.New("It has been loged in")
}
return nil
}
loginButton := nodewith.Name("Log in").Role(role.Button)
loginReg := regexp.MustCompile(`(Login|Log in) to your account`)
loginWebArea := nodewith.NameRegex(loginReg).Role(role.RootWebArea)
googleLink := nodewith.Name("Log in with Google").Role(role.Link).Ancestor(loginWebArea)
targetAccount := nodewith.Name(account).Role(role.StaticText)
loginWithGoogle := uiauto.NamedCombine("login with Google",
ui.DoDefaultUntil(googleLink, ui.WithTimeout(shortUITimeout).WaitUntilExists(targetAccount)),
ui.LeftClickUntil(targetAccount, ui.WithTimeout(shortUITimeout).WaitUntilGone(targetAccount)),
)
// Sign up process.
signUpWebArea := nodewith.Name("WeVideo Checkout").Role(role.RootWebArea)
answer1 := nodewith.Name("Business / Marketing").Role(role.Button)
answer2 := nodewith.Name("Product demos").Role(role.Button)
createButton := nodewith.Name("Start creating!").Role(role.Button)
signUp := uiauto.NamedCombine("sign up WeVideo",
ui.LeftClick(answer1),
ui.LeftClick(answer2),
ui.LeftClick(createButton),
)
return uiauto.IfSuccessThen(loginRequired,
// There is a bug in Wevideo login process, sometimes it needs to login twice with google account.
// So add retry login here.
uiauto.Retry(retryTimes, uiauto.NamedCombine("log in WeVideo",
uiauto.IfSuccessThen(ui.Exists(loginButton), ui.DoDefault(loginButton)),
ui.WaitUntilExists(loginWebArea),
loginWithGoogle,
// Sign up if there is a sign up page.
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(signUpWebArea), signUp),
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(loginWebArea), loginWithGoogle),
ui.WaitUntilExists(weVideoWebArea),
)))
}
// Create creates the new video editing.
func (w *WeVideo) Create() action.Action {
promptWindow := nodewith.ClassName("Modal medium")
closeButton := nodewith.Name("CLOSE").Role(role.Button).Ancestor(promptWindow)
createNewRe := regexp.MustCompile("(?i)create new")
createNewButton := nodewith.NameRegex(createNewRe).Ancestor(weVideoWebArea).First()
videoText := nodewith.Name("Video").Role(role.StaticText).Ancestor(weVideoWebArea)
titleRe := regexp.MustCompile("(?i)my video")
titleText := nodewith.NameRegex(titleRe).Role(role.StaticText).Ancestor(weVideoWebArea)
// The pop-up prompt window display time is not necessarily, so add retry to ensure that the window is closed.
return uiauto.Retry(retryTimes, uiauto.NamedCombine("create the new video editing",
// Close the pop-up prompt window.
uiauto.IfSuccessThen(w.ui.WithTimeout(shortUITimeout).WaitUntilExists(closeButton), w.ui.LeftClick(closeButton)),
w.ui.DoDefault(createNewButton),
w.ui.DoDefault(videoText),
w.ui.WithTimeout(longUITimeout).WaitUntilExists(titleText),
))
}
// AddStockVideo adds stock video to expected track.
func (w *WeVideo) AddStockVideo(clipName, previousClipName, clipTime, expectedTrack string) action.Action {
ui := w.ui
searchVideo := nodewith.Name("Search videos").Role(role.TextField)
videosTab := nodewith.Name("Videos").Role(role.Tab).HasClass(editorTabClass)
tryItNowButton := nodewith.Name("TRY IT NOW").Role(role.Button)
openStockMedia := uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilGone(searchVideo),
// The pop-up prompt window display time is not necessarily, so add retry to ensure that the window is closed.
uiauto.Retry(retryTimes, uiauto.NamedCombine("open stock media",
w.closePromptWindow(),
ui.LeftClick(videosTab),
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(tryItNowButton), ui.LeftClick(tryItNowButton)),
ui.WaitUntilExists(searchVideo),
)))
findVideo := uiauto.NamedCombine("find video",
ui.LeftClick(searchVideo),
ui.WaitUntilExists(searchVideo.Focused()),
w.kb.AccelAction("Ctrl+A"),
w.kb.TypeAction(clipName),
w.kb.AccelAction("Enter"),
)
dragVideoToTrack := func(ctx context.Context) error {
clipButton := nodewith.NameContaining(clipTime).Role(role.StaticText)
// Finding video from WeVideo videos may take a long time to load.
clipLocation, err := ui.WithTimeout(longUITimeout).Location(ctx, clipButton)
if err != nil {
return err
}
dragUpStart := clipLocation.CenterPoint()
var dragUpEnd coords.Point
if previousClipName != "" {
dragUpEnd = w.clips[previousClipName].endPoint
} else {
expectedTrack := nodewith.Name(expectedTrack).Role(role.StaticText)
trackLocation, err := ui.Location(ctx, expectedTrack)
if err != nil {
return err
}
playHead := nodewith.NameContaining("Playhead").Role(role.GenericContainer)
playHeadLocation, err := ui.Location(ctx, playHead)
if err != nil {
return err
}
dragUpEnd = coords.NewPoint(playHeadLocation.Right(), trackLocation.CenterY())
}
insertAndPush := nodewith.NameContaining("Insert and push").Role(role.StaticText)
pc := pointer.NewMouse(w.tconn)
defer pc.Close()
testing.ContextLogf(ctx, "Drag video to track from %v to %v", dragUpStart, dragUpEnd)
// Sometimes it fails to drag the video, so add a retry here.
return uiauto.Retry(retryTimes, uiauto.Combine("drag video to track",
pc.Drag(dragUpStart, pc.DragTo(dragUpEnd, dragTime)),
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(insertAndPush), ui.LeftClick(insertAndPush)),
ui.WithTimeout(shortUITimeout).WaitUntilExists(beginningOfTheClip),
))(ctx)
}
addClip := func(ctx context.Context) error {
startLocation, err := ui.Location(ctx, beginningOfTheClip)
if err != nil {
return err
}
endLocation, err := ui.Location(ctx, endingOfTheClip)
if err != nil {
return err
}
w.clips[clipName] = Clip{
name: clipName,
startPoint: startLocation.CenterPoint(),
endPoint: endLocation.CenterPoint(),
}
return nil
}
return uiauto.NamedCombine(fmt.Sprintf("add stock video \"%s\"", clipName),
openStockMedia,
findVideo,
dragVideoToTrack,
addClip,
ui.LeftClick(endingOfTheClip),
)
}
// AddText adds static text to the expected track.
func (w *WeVideo) AddText(clipName, expectedTrack, text string) action.Action {
ui := w.ui
textTab := nodewith.Name("Text").Role(role.Tab).HasClass(editorTabClass)
// It removes the text info, so it can only capture "Basic text" node by classname.
// The first one is "Basic text".
basicText := nodewith.ClassName("ui-draggable-handle").Role(role.GenericContainer).First()
dragTextToTrack := func(ctx context.Context) error {
textLocation, err := ui.Location(ctx, basicText)
if err != nil {
return err
}
expectedTrack := nodewith.Name(expectedTrack).Role(role.StaticText)
trackLocation, err := ui.Location(ctx, expectedTrack)
if err != nil {
return err
}
dragUpStart := textLocation.CenterPoint()
dragUpEnd := coords.NewPoint(w.clips[clipName].startPoint.X, trackLocation.CenterY())
pc := pointer.NewMouse(w.tconn)
defer pc.Close()
testing.ContextLogf(ctx, "Drag text to track from %v to %v", dragUpStart, dragUpEnd)
return uiauto.Retry(retryTimes, uiauto.Combine("drag text to track",
pc.Drag(dragUpStart, pc.DragTo(dragUpEnd, dragTime)),
ui.WithTimeout(shortUITimeout).WaitUntilExists(beginningOfTheClip),
))(ctx)
}
sampleText := nodewith.Name("Sample text").Role(role.StaticText)
saveButtonRe := regexp.MustCompile("(?i)save changes")
saveButton := nodewith.NameRegex(saveButtonRe).Role(role.StaticText).Ancestor(weVideoWebArea)
editTextProperties := uiauto.NamedCombine("edit text",
w.closePromptWindow(),
w.kb.TypeAction("e"), // Type e to edit text.
ui.LeftClick(sampleText),
w.kb.AccelAction("Ctrl+A"),
w.kb.TypeAction(text),
ui.DoDefaultUntil(saveButton, ui.WithTimeout(shortUITimeout).WaitUntilGone(saveButton)),
)
return uiauto.NamedCombine(fmt.Sprintf("add text to clip \"%s\"", clipName),
w.closePromptWindow(),
ui.LeftClick(textTab),
dragTextToTrack,
editTextProperties,
)
}
// AddTransition adds transition to the expected clip.
func (w *WeVideo) AddTransition(clipName string) action.Action {
transitionTab := nodewith.Name("Transitions").Role(role.Tab).HasClass(editorTabClass)
transitionClip := nodewith.HasClass("clip-transition").Role(role.GenericContainer)
dragTransitionToClip := func(ctx context.Context) error {
crossFade := nodewith.Name("Cross fade").Role(role.StaticText)
crossFadeLocation, err := w.ui.Location(ctx, crossFade)
if err != nil {
return err
}
dragUpStart := crossFadeLocation.CenterPoint()
dragUpEnd := w.clips[clipName].startPoint
pc := pointer.NewMouse(w.tconn)
defer pc.Close()
testing.ContextLogf(ctx, "Drag transition to clip from %v to %v", dragUpStart, dragUpEnd)
return uiauto.Retry(retryTimes, uiauto.Combine("drag transition to clip",
pc.Drag(dragUpStart, pc.DragTo(dragUpEnd, dragTime)),
// Check the transition is added.
w.ui.WithTimeout(shortUITimeout).WaitUntilExists(transitionClip),
))(ctx)
}
return uiauto.NamedCombine(fmt.Sprintf("add transition \"Cross fade\" to clip \"%s\"", clipName),
w.closePromptWindow(),
w.ui.LeftClick(transitionTab),
dragTransitionToClip,
)
}
// PlayVideo plays the edited video from the beginning of expected clip.
func (w *WeVideo) PlayVideo(clipName string) action.Action {
return uiauto.NamedCombine("play the edited video from the beginning to the end",
w.ui.MouseClickAtLocation(0, w.clips[clipName].startPoint),
w.kb.AccelAction("Space"), // Press space to play video.
w.waitUntilPlaying(shortUITimeout),
w.waitUntilPaused(longUITimeout),
)
}
// waitUntilPlaying waits until the edited video is playing.
func (w *WeVideo) waitUntilPlaying(timeout time.Duration) action.Action {
const preparingText = "We're preparing your preview"
var err error
var startTime time.Time
var startPlayTime, currentPlayTime string
setStartPlayTime := func(ctx context.Context) error {
startTime = time.Now()
startPlayTime, err = w.currentTime(ctx)
return err
}
setCurrentPlayTime := func(ctx context.Context) error {
currentPlayTime, err = w.currentTime(ctx)
return err
}
checkIsPlaying := func(ctx context.Context) error {
clearAllButton := nodewith.Name("Clear All").Role(role.Button).Ancestor(weVideoWebArea)
preparingDialog := nodewith.Name(preparingText).Role(role.StaticText).Ancestor(weVideoWebArea)
if err := uiauto.Combine("clear popup",
uiauto.IfSuccessThen(w.ui.Exists(clearAllButton), w.ui.LeftClick(clearAllButton)),
w.ui.WaitUntilGone(preparingDialog),
)(ctx); err != nil {
return err
}
if currentPlayTime != startPlayTime {
duration := time.Now().Sub(startTime)
testing.ContextLogf(ctx, "Wait for %v to play video", duration)
return nil
}
return errors.New("video is not playing")
}
return uiauto.NamedCombine("wait until playing",
setStartPlayTime,
w.ui.WithTimeout(timeout+longUITimeout).RetryUntil(setCurrentPlayTime, checkIsPlaying),
)
}
// waitUntilPaused waits until the edited video is paused.
func (w *WeVideo) waitUntilPaused(timeout time.Duration) action.Action {
var startTime time.Time
var startPlayTime, currentPlayTime string
getStartTime := func(ctx context.Context) error {
startTime = time.Now()
return nil
}
getPlayTime := func(ctx context.Context) (err error) {
startPlayTime, err = w.currentTime(ctx)
if err != nil {
return err
}
// Sleep for a second to check whether the video is paused.
if err := uiauto.Sleep(time.Second)(ctx); err != nil {
return err
}
currentPlayTime, err = w.currentTime(ctx)
return err
}
checkIsPaused := func(ctx context.Context) error {
if currentPlayTime == startPlayTime {
duration := time.Now().Sub(startTime)
testing.ContextLogf(ctx, "Wait for %v to pause video", duration)
return nil
}
return errors.New("video is not paused")
}
return uiauto.NamedCombine("wait until paused",
getStartTime,
w.ui.WithTimeout(timeout).RetryUntil(getPlayTime, checkIsPaused),
)
}
// currentTime gets the current playing time (in string) of the edited video.
func (w *WeVideo) currentTime(ctx context.Context) (string, error) {
timeNodeRe := regexp.MustCompile("^(\\d+):(\\d+):(\\d+) / (\\d+):(\\d+):(\\d+)$")
timeNodeText := nodewith.NameRegex(timeNodeRe).Role(role.StaticText).Ancestor(weVideoWebArea)
if err := w.ui.Exists(timeNodeText)(ctx); err == nil {
info, err := w.ui.Info(ctx, timeNodeText)
if err != nil {
return "", err
}
currentTime := strings.Split(info.Name, " ")[0]
return currentTime, nil
}
playHead := nodewith.Name("Playhead").Role(role.GenericContainer).Ancestor(weVideoWebArea)
timeNodeText = nodewith.NameRegex(regexp.MustCompile("^(\\d+):(\\d+):(\\d+)$")).Role(role.StaticText).Ancestor(playHead)
info, err := w.ui.Info(ctx, timeNodeText)
if err != nil {
return "", err
}
return info.Name, nil
}
// closePromptWindow closes the pop-up prompt window.
func (w *WeVideo) closePromptWindow() action.Action {
promptReg := regexp.MustCompile("(?i)intercom live chat tour")
promptWindow := nodewith.NameRegex(promptReg).Role(role.Dialog)
closeButton := nodewith.Name("Close").Role(role.Button).Ancestor(promptWindow).First()
closeDialog := uiauto.NamedAction("close prompt window",
w.ui.LeftClickUntil(closeButton, w.ui.WithTimeout(shortUITimeout).WaitUntilGone(closeButton)))
return uiauto.IfSuccessThen(w.ui.WithTimeout(shortUITimeout).WaitUntilExists(closeButton), closeDialog)
}
// Close closes the WeVideo page.
func (w *WeVideo) Close(ctx context.Context) {
if w.conn == nil {
return
}
w.conn.CloseTarget(ctx)
w.conn.Close()
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filemanager
import (
"context"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/drivefs"
"chromiumos/tast/local/network"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DrivefsBlobDownloadOffline,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verify that a download while offline fails gracefully",
Contacts: []string{
"travislane@google.com",
"chromeos-files-syd@google.com",
},
SoftwareDeps: []string{
"chrome",
"chrome_internal",
"drivefs",
},
Attr: []string{
"group:drivefs-cq",
},
Data: []string{
"test_1KB.txt",
},
Timeout: 5 * time.Minute,
Params: []testing.Param{{
Fixture: "driveFsStarted",
}, {
Name: "chrome_networking",
Fixture: "driveFsStartedWithChromeNetworking",
}},
})
}
func DrivefsBlobDownloadOffline(ctx context.Context, s *testing.State) {
const (
retryAttempts = 20
retryInterval = 5 * time.Second
)
fixt := s.FixtValue().(*drivefs.FixtureData)
apiClient := fixt.APIClient
driveFsClient := fixt.DriveFs
// Give the Drive API enough time to remove the file.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
defer driveFsClient.SaveLogsOnError(cleanupCtx, s.HasError)
// Create the test file with the Drive API
testFileName := drivefs.GenerateTestFileName(s.TestName()) + ".txt"
driveFile, err := apiClient.CreateFileFromLocalFile(ctx,
testFileName, "root", s.DataPath("test_1KB.txt"))
if err != nil {
s.Fatal("Could not create test file: ", err)
}
s.Logf("Created %s with ID: %s", testFileName, driveFile.Id)
// Cleanup: Remove the file on the cloud
defer apiClient.RemoveFileByID(cleanupCtx, driveFile.Id)
// Wait for file to be available locally
testFilePath := driveFsClient.MyDrivePath(testFileName)
testFile, err := driveFsClient.NewFile(testFilePath)
if err != nil {
s.Fatal("Could not build DriveFS file: ", err)
}
err = action.RetrySilently(retryAttempts, testFile.ExistsAction(), retryInterval)(ctx)
if err != nil {
s.Fatal("File not available locally: ", err)
}
// Try to checksum the file while offline. This should fail in a timely
// manner. The read must be on a different goroutine to avoid blocking
// the test from re-enabling the network.
failMD5Sum := func(ctx context.Context) error {
errChan := make(chan error, 1)
startTime := time.Now()
go func() {
_, err := drivefs.MD5SumFile(testFilePath)
errChan <- err
}()
select {
case err := <-errChan:
if err == nil {
s.Error("Expected to fail to checksum file")
}
s.Logf("Read finished after: %+v with: %+v",
time.Now().Sub(startTime), err)
case <-time.After(2 * time.Minute):
s.Error("Expected read to fail before timeout")
case <-ctx.Done():
s.Error("Expected read to fail before context timeout")
}
return nil
}
if err = network.ExecFuncOnChromeOffline(ctx, failMD5Sum); err != nil {
s.Error("Failed to run checksum offline: ", err)
}
// Once online again, we should be able to download.
md5Sum, err := drivefs.MD5SumFile(testFilePath)
if err != nil {
s.Fatal("Failed to checksum file: ", err)
}
if !strings.EqualFold(md5Sum, driveFile.Md5Checksum) {
s.Errorf("Checksum mismatch! Got: %v Expected: %v", md5Sum, driveFile.Md5Checksum)
}
}
|
// Copyright (c) 2017 Kuguar <licenses@kuguar.io> Author: Adrian P.K. <apk@kuguar.io>
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package api
import (
"encoding/json"
"github.com/gorilla/mux"
"github.com/adrianpk/fundacja/app"
"github.com/adrianpk/fundacja/logger"
"github.com/adrianpk/fundacja/models"
"net/http"
"net/url"
"path"
_ "github.com/lib/pq" // Import pq without side effects
"github.com/adrianpk/fundacja/repo"
)
// GetPermissions - Returns a collection containing all permissions.
// Handler for HTTP Get - "/organizations/{organization}/permissions"
func GetPermissions(w http.ResponseWriter, r *http.Request) {
// Check permissions
// defer func() {
// recover()
// app.ShowError(w, app.ErrUnauthorized, app.ErrUnauthorized, http.StatusUnauthorized)
// }()
// services.IsAllowed("f254cfe5", loggedInUserID(r))
// Get ID
vars := mux.Vars(r)
orgid := vars["organization"]
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntityNotFound, err, http.StatusInternalServerError)
return
}
// Select
permissions, err := permissionRepo.GetAll(orgid)
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(PermissionsResource{Data: permissions})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusInternalServerError)
return
}
// Respond
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
// CreatePermission - Creates a new Permission.
// Handler for HTTP Post - "/organizations/{organization}/permissions/create"
func CreatePermission(w http.ResponseWriter, r *http.Request) {
// Decode
var res PermissionResource
err := json.NewDecoder(r.Body).Decode(&res)
if err != nil {
app.ShowError(w, app.ErrRequestParsing, err, http.StatusInternalServerError)
return
}
permission := &res.Data
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntityCreate, err, http.StatusInternalServerError)
return
}
// Persist
permissionRepo.Create(permission)
if err != nil {
app.ShowError(w, app.ErrEntityCreate, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(PermissionResource{Data: *permission})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusNoContent)
return
}
// Respond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
w.Write(j)
}
// GetPermission - Returns a single Permission by its id or permissionname.
// Handler for HTTP Get - "/organizations/{organization}/permissions/{permission}"
func GetPermission(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
key := vars["permission"]
if len(key) == 36 {
GetPermissionByID(w, r)
} else {
GetPermissionByName(w, r)
}
}
// GetPermissionByID - Returns a single Permission by its id.
// Handler for HTTP Get - "/organizations/{organization}/permissions/{permission}"
func GetPermissionByID(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
id := vars["permission"]
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Select
permission, err := permissionRepo.Get(id)
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(permission)
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusInternalServerError)
return
}
// Repsond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(j)
}
// GetPermissionByName - Returns a single Permission by its permissionname.
// Handler for HTTP Get - "/organizations/{organization}/permissions/{permission}"
func GetPermissionByName(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
permissionname := vars["permission"]
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Select
permission, err := permissionRepo.GetByName(permissionname)
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(permission)
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusInternalServerError)
return
}
// Repond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(j)
}
// UpdatePermission - Update an existing Permission.
// Handler for HTTP Put - "/organizations/{organization}/permissions/{permission}"
func UpdatePermission(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
id := vars["permission"]
// Decode
var res PermissionResource
err := json.NewDecoder(r.Body).Decode(&res)
if err != nil {
app.ShowError(w, app.ErrRequestParsing, err, http.StatusInternalServerError)
return
}
permission := &res.Data
permission.ID = models.ToNullsString(id)
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusInternalServerError)
return
}
// Check against current permission
currentPermission, err := permissionRepo.Get(id)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusUnauthorized)
return
}
// Avoid ID spoofing
err = verifyID(permission.IdentifiableModel, currentPermission.IdentifiableModel)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusUnauthorized)
return
}
// Update
err = permissionRepo.Update(permission)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(PermissionResource{Data: *permission})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusNoContent)
return
}
// Respond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusNoContent)
w.Write(j)
}
// DeletePermission - Deletes an existing Permission
// Handler for HTTP Delete - "/organizations/{organization}/permissions/{id}"
func DeletePermission(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
id := vars["permission"]
// Get repo
permissionRepo, err := repo.MakePermissionRepository()
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Delete
err = permissionRepo.Delete(id)
if err != nil {
app.ShowError(w, app.ErrEntityDelete, err, http.StatusInternalServerError)
return
}
// Respond
w.WriteHeader(http.StatusNoContent)
}
func permissionIDfromURL(r *http.Request) string {
u, _ := url.Parse(r.URL.Path)
dir := path.Dir(u.Path)
id := path.Base(dir)
logger.Debugf("Permission id in url is %s", id)
return id
}
func permissionnameFromURL(r *http.Request) string {
u, _ := url.Parse(r.URL.Path)
dir := path.Dir(u.Path)
permissionname := path.Base(dir)
logger.Debugf("Permissionname in url is %s", permissionname)
return permissionname
}
|
package layout
import (
"fmt"
"github.com/cstockton/go-conv"
"strings"
)
type CheckBoxWidget struct {
Choice
Value []interface{} `json:"value"`
Layout string `json:"layout"`
}
func (this *CheckBoxWidget) GetValue() interface{} {
if this.Value == nil {
this.Value = []interface{}{}
}
return this.Value
}
func (this *CheckBoxWidget) SetValue(val interface{}) error {
vals, flag := val.([]interface{})
if !flag {
return fmt.Errorf("字段: %s 的值不是有效的 Slice 格式", this.Field.GetLabel())
}
this.Value = make([]interface{}, 0)
for _, v := range vals {
number, err := conv.Float64(v)
if err != nil {
this.Value = append(this.Value, v)
} else {
this.Value = append(this.Value, number)
}
}
return nil
}
func (this *CheckBoxWidget) Diff(widget Widget) (Diff, bool) {
if this.Value == nil && widget.GetValue() == nil {
return Diff{}, false
}
records := make([]DiffItem, 0)
diff := widget.(*CheckBoxWidget)
if len(this.Value) != len(diff.Value) {
records = append(records, DiffItem{
Name: this.Field.GetLabel(),
FieldName: this.Field.GetName(),
Type: "change",
Original: this.value2string(),
Last: diff.value2string(),
})
return records, true
}
for i := 0; i < len(this.Value); i++ {
if this.Value[i] != diff.Value[i] {
records = append(records, DiffItem{
Name: this.Field.GetLabel(),
FieldName: this.Field.GetName(),
Type: "change",
Original: this.value2string(),
Last: diff.value2string(),
})
return records, true
}
}
return records, false
}
func (this *CheckBoxWidget) value2string() string {
if this.Value == nil || len(this.Value) == 0 {
return ""
}
str := ""
for _, v := range this.Value {
str += fmt.Sprintf("%v,", v)
}
str = strings.TrimRight(str, ",")
return str
}
func (this *CheckBoxWidget) String() string {
return this.value2string()
}
|
package keepassrpc
import (
"crypto/sha256"
"fmt"
"math/big"
"os"
"testing"
)
var mult = []byte("" +
"\xb7\x86\x7f\x12\x99\xda\x8c\xc2\x4a\xb9" +
"\x3e\x08\x98\x6e\xbc\x4d\x6a\x47\x8a\xd0" +
"")
var k = new(big.Int).SetBytes(mult)
var username = "username"
var salt = "salt"
var password = "password"
var snp = "13601bda4ea78e55a07b98866d2be6be0744e3866f13c00c811cab608a28f322"
var pkey = new(big.Int)
var value = big.NewInt(1)
var A = new(big.Int).Exp(Generator, value, Prime)
var server = big.NewInt(2)
var pubs = "785f3ec7eb32f30b90cd0fcf3657d388b5ff4297f2f9716ff66e9b69c05ddd09"
var testClient *SRPContext
func TestMultiplier(t *testing.T) {
p := testClient.Multiplier()
if k.Cmp(p) != 0 {
t.Error("Multipler() did not return standard result")
}
}
func TestSetServer(t *testing.T) {
if testClient.SetServer(server) != nil {
t.Error("SetServer() failed")
}
if server.Cmp(testClient.Server) != 0 {
t.Error("SetServer() set wrong value")
}
}
func TestSetServerPrime(t *testing.T) {
c := &SRPContext{
Private: new(big.Int).Set(value),
Public: new(big.Int).Exp(Generator, value, Prime),
}
B := new(big.Int).Set(Prime)
if c.SetServer(B) == nil {
t.Error("SetServer() didn't catch prime match")
}
}
func TestPrivateKey(t *testing.T) {
if testClient.privateKey().Text(16) != snp {
t.Error("privateKey() didn't match input")
}
}
func TestScramblingParameter(t *testing.T) {
if testClient.scramblingParameter().Text(16) != pubs {
t.Error("scramblingParameter() didn't match input")
}
}
func TestPremasterSecret(t *testing.T) {
x, _ := new(big.Int).SetString(snp, 16)
u, _ := new(big.Int).SetString(pubs, 16)
gxN := new(big.Int).Exp(Generator, x, Prime)
kgxN := new(big.Int).Exp(Generator, x, Prime).Mul(k, gxN)
ux := new(big.Int).Mul(u, x)
aux := new(big.Int).Add(value, ux)
tmp := new(big.Int).Sub(server, kgxN)
S := new(big.Int).Exp(tmp, aux, Prime)
if S.Cmp(testClient.premasterSecret()) != 0 {
t.Error("premasterSecret() didn't match input")
}
}
func TestVerifier(t *testing.T) {
x := new(big.Int).Exp(Generator, pkey, Prime)
x1, err := testClient.Verifier()
if err != nil {
t.Error("Verifier() returned an error")
}
if x.Cmp(x1) != 0 {
t.Error("Verifier() didn't match input")
}
}
func TestVerifierFailure(t *testing.T) {
c := &SRPContext{
Private: new(big.Int).Set(value),
Public: new(big.Int).Exp(Generator, value, Prime),
}
if _, err := c.Verifier(); err == nil {
t.Error("Verifier() failed to catch premature generation")
}
}
func TestEvidence(t *testing.T) {
S := testClient.premasterSecret()
h := sha256.New()
fmt.Fprintf(h, "%X%X%X", A, server, S)
M := new(big.Int).SetBytes(h.Sum(nil))
Mx, err := testClient.Evidence()
if err != nil {
t.Error("Evidence() returned an error")
}
if M.Cmp(Mx) != 0 {
t.Error("Evidence() didn't match input")
}
}
func TestEvidenceFailure(t *testing.T) {
c := &SRPContext{
Private: new(big.Int).Set(value),
Public: new(big.Int).Exp(Generator, value, Prime),
}
if _, err := c.Evidence(); err == nil {
t.Error("Evidence() failed to catch premature generation")
}
}
func TestServerEvidence(t *testing.T) {
S := testClient.premasterSecret()
M, err := testClient.Evidence()
if err != nil {
t.Error("Evidence() returned an error")
}
h := sha256.New()
fmt.Fprintf(h, "%X%x%X", A, M, S)
M1 := new(big.Int).SetBytes(h.Sum(nil))
M1x, err := testClient.ServerEvidence()
if err != nil {
t.Error("ServerEvidence() returned an error")
}
if M1.Cmp(M1x) != 0 {
t.Error("ServerEvidence() didn't match input")
}
}
func TestServerEvidenceFailure(t *testing.T) {
c := &SRPContext{
Private: new(big.Int).Set(value),
Public: new(big.Int).Exp(Generator, value, Prime),
}
if _, err := c.ServerEvidence(); err == nil {
t.Error("ServerEvidence() failed to catch premature generation")
}
}
func TestMain(m *testing.M) {
testClient = &SRPContext{
Private: new(big.Int).Set(value),
Public: new(big.Int).Exp(Generator, value, Prime),
Salt: salt,
Password: password,
}
testClient.SetServer(server)
var ok bool
if pkey, ok = new(big.Int).SetString(snp, 16); !ok {
panic("Conversion of snp failed")
}
os.Exit(m.Run())
}
|
package main
import (
"strconv"
"github.com/codegangsta/cli"
)
var ipOps []cli.Command
func init() {
ipdIdFlag := cli.StringFlag{
Name: "id, i",
Usage: "ID of the IP address",
}
ipOps = []cli.Command{
{
Name: "ip",
Description: "1&1 public IP operations",
Usage: "Public IP operations.",
Subcommands: []cli.Command{
{
Name: "create",
Usage: "Allocates new public IP.",
Flags: []cli.Flag{
cli.StringFlag{
Name: "type, t",
Usage: "IP address type, IPV4 or IPV6. Currently, only IPV4 is allowed.",
},
cli.StringFlag{
Name: "dns",
Usage: "Reverse DNS name.",
},
cli.StringFlag{
Name: "datacenterid",
Usage: "Data center ID of the IP address.",
},
},
Action: createPublicIP,
},
{
Name: "info",
Usage: "Shows information about public IP.",
Flags: []cli.Flag{ipdIdFlag},
Action: showIP,
},
{
Name: "list",
Usage: "Lists all available IPs.",
Flags: queryFlags,
Action: listIPs,
},
{
Name: "rm",
Usage: "Deletes public IP.",
Flags: []cli.Flag{ipdIdFlag},
Action: deleteIP,
},
{
Name: "update",
Usage: "Updates reverse DNS of IP.",
Flags: []cli.Flag{
ipdIdFlag,
cli.StringFlag{
Name: "dns",
Usage: "New reverse DNS name.",
},
},
Action: updateIP,
},
},
},
}
}
func createPublicIP(ctx *cli.Context) {
ipType := ctx.String("type")
dns := ctx.String("dns")
datacenterId := ctx.String("datacenterid")
_, ip, err := api.CreatePublicIp(ipType, dns, datacenterId)
exitOnError(err)
output(ctx, ip, okWaitMessage, false, nil, nil)
}
func listIPs(ctx *cli.Context) {
ips, err := api.ListPublicIps(getQueryParams(ctx))
exitOnError(err)
data := make([][]string, len(ips))
for i, ip := range ips {
var dhcp string
if ip.IsDhcp != nil {
dhcp = strconv.FormatBool(*ip.IsDhcp)
}
data[i] = []string{
ip.Id,
ip.IpAddress,
dhcp,
ip.ReverseDns,
ip.State,
getDatacenter(ip.Datacenter),
}
}
header := []string{"ID", "IP Address", "DHCP", "Reverse DNS", "State", "Data Center"}
output(ctx, ips, "", false, &header, &data)
}
func showIP(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
ip, err := api.GetPublicIp(id)
exitOnError(err)
output(ctx, ip, "", true, nil, nil)
}
func deleteIP(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
ip, err := api.DeletePublicIp(id)
exitOnError(err)
output(ctx, ip, okWaitMessage, false, nil, nil)
}
func updateIP(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
dns := ctx.String("dns")
ip, err := api.UpdatePublicIp(id, dns)
exitOnError(err)
output(ctx, ip, "", false, nil, nil)
}
|
package models
import "github.com/go-swagger/go-swagger/strfmt"
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
/*User User user
swagger:model User
*/
type User struct {
/* Email email
*/
Email *string `json:"email,omitempty"`
/* FirstName first name
*/
FirstName *string `json:"firstName,omitempty"`
/* ID id
*/
ID *int64 `json:"id,omitempty"`
/* LastName last name
*/
LastName *string `json:"lastName,omitempty"`
/* Password password
*/
Password *string `json:"password,omitempty"`
/* Phone phone
*/
Phone *string `json:"phone,omitempty"`
/* User Status
*/
UserStatus *int32 `json:"userStatus,omitempty"`
/* Username username
*/
Username *string `json:"username,omitempty"`
}
// Validate validates this user
func (m *User) Validate(formats strfmt.Registry) error {
return nil
}
|
package spotifyclient
import (
"github.com/ankjevel/spotify"
)
type MockSpotifyClient struct {
PlayerStateResponse struct {
p *spotify.PlayerState
e error
}
ShuffleResponse error
PlayResponse error
PauseResponse error
NextResponse error
PreviousResponse error
}
func (c *MockSpotifyClient) PlayerState() (*spotify.PlayerState, error) {
return c.PlayerStateResponse.p, c.PlayerStateResponse.e
}
func (c *MockSpotifyClient) Shuffle(bool) error {
return c.ShuffleResponse
}
func (c *MockSpotifyClient) Play() error {
return c.PlayResponse
}
func (c *MockSpotifyClient) Pause() error {
return c.PauseResponse
}
func (c *MockSpotifyClient) Next() error {
return c.NextResponse
}
func (c *MockSpotifyClient) Previous() error {
return c.PreviousResponse
}
|
package ecs
// Graph is an auto-relation: one where both the A-side and B-side are the
// same Core system.
type Graph struct {
Relation
}
// NewGraph creates a new graph relation for the given Core system.
func NewGraph(core *Core, flags RelationFlags) *Graph {
G := &Graph{}
G.Init(core, flags)
return G
}
// Init initializes the graph relation; useful for embedding.
func (G *Graph) Init(core *Core, flags RelationFlags) {
G.Relation.Init(core, flags, core, flags)
}
// Roots returns a slice of Entities that have no in-relation (i.e. there's no
// relation `a R b for all a in the result`).
func (G *Graph) Roots(
tcl TypeClause,
where func(ent, a, b Entity, r ComponentType) bool,
) []Entity {
triset, n := G.roots(tcl, where)
result := make([]Entity, 0, n)
for id, in := range triset {
if in {
result = append(result, G.aCore.Ref(id))
}
}
return result
}
func (G *Graph) roots(
tcl TypeClause,
where func(ent, a, b Entity, r ComponentType) bool,
) (map[EntityID]bool, int) {
// TODO: leverage index if available
it := G.Iter(tcl)
triset := make(map[EntityID]bool, it.Count())
n := 0
for it.Next() {
i := it.ID() - 1
if where != nil && !where(
it.Entity(),
G.aCore.Ref(G.aids[i]),
G.aCore.Ref(G.bids[i]),
it.Type(),
) {
continue
}
aid, bid := G.aids[i], G.bids[i]
if _, def := triset[aid]; !def {
triset[aid] = true
n++
}
if in := triset[bid]; in {
n--
}
triset[bid] = false
}
return triset, n
}
// Leaves returns a slice of Entities that have no out-relation (i.e. there's no
// relation `a R b for all b in the result`).
func (G *Graph) Leaves(
tcl TypeClause,
where func(ent, a, b Entity, r ComponentType) bool,
) []Entity {
triset, n := G.leaves(tcl, where)
result := make([]Entity, 0, n)
for id, in := range triset {
if in {
result = append(result, G.aCore.Ref(id))
}
}
return result
}
func (G *Graph) leaves(
tcl TypeClause,
where func(ent, a, b Entity, r ComponentType) bool,
) (map[EntityID]bool, int) {
// TODO: leverage index if available
it := G.Iter(tcl)
triset := make(map[EntityID]bool, it.Count())
n := 0
for it.Next() {
i := it.ID() - 1
if where != nil && !where(
it.Entity(),
G.aCore.Ref(G.aids[i]),
G.aCore.Ref(G.bids[i]),
G.types[i],
) {
continue
}
aid, bid := G.aids[i], G.bids[i]
if _, def := triset[bid]; !def {
triset[bid] = true
n++
}
if in := triset[aid]; in {
n--
}
triset[aid] = false
}
return triset, n
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"path"
"crypto/md5"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type User struct {
ID primitive.ObjectID `json:"_id,omitempty" bson:"_id,omitempty"`
Title string `json:"title" bson:"title,omitempty"`
Body string `json:"body" bson:"body,omitempty"`
Tags string `json:"tags" bson:"tags,omitempty"`
}
var collection = ConnecttoDB()
func main() {
//Init Router
router := httprouter.New()
//Routing for different HTTP methods
router.GET("/", showHome)
router.GET("/User", getUserPosts)
router.GET("/User/:id", getUser)
router.GET("/user/search?q=title", searchUser)
router.POST("/user", createPost)
router.POST("/user", createUser)
// set our port address as 8081
log.Fatal(http.ListenAndServe(":8081", router))
}
// ConnecttoDB : function to connect to mongoDB locally
func ConnecttoDB() *mongo.Collection {
// Set client options
//change the URI according to your database
clientOptions := options.Client().ApplyURI("mongodb+srv://abhitcr1:RandomPassword@cluster0.yg0tb.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
// Connect to MongoDB
client, err := mongo.Connect(context.TODO(), clientOptions)
//Error Handling
if err != nil {
log.Fatal(err)
}
fmt.Println("Connected to MongoDB!")
//DB collection address which we are going to use
//available to functions of all scope
collection := client.Database("Appointy").Collection("InstaPosts")
return collection
}
func findUser(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
//Extracting the Path of URL
url := r.URL.RequestURI()
key := path.Base(url)
var result bson.M
//Finding the user
err := collection.FindOne(context.TODO() , bson.M{"userId" : key}).Decode(&result)
if err != nil {
log.Fatal(err)
}
//Sending the data Back
json.NewEncoder(w).Encode(result)
}
//Function to get all user in DataBase
func getUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
// User array
var user []User
// bson.M{}, we passed empty filter of unordered map.
cur, err := collection.Find(context.TODO(), bson.M{})
//Error Handling
if err != nil {
log.Fatal(err)
}
// Close the cursor once finished
defer cur.Close(context.TODO())
//Loops over the cursor stream and appends to []User array
for cur.Next(context.TODO()) {
// create a value into which the single document can be decoded
var User User
// decode similar to deserialize process.
err := cur.Decode(&User)
//Error Handling
if err != nil {
log.Fatal(err)
}
// add item our array
user = append(user, User)
}
//Error Handling
if err := cur.Err(); err != nil {
log.Fatal(err)
}
//Encoding the data in Array to JSON format
json.NewEncoder(w).Encode(user)
}
//Function to create a new user in Database
func createUser(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
//Checking if the method is the post method or not
if r.Method == "POST"{
//Parsing the Values
r.ParseForm()
//Hashing the Passowrd
pass := md5.Sum([]byte(r.Form.Get("email")));
//Pushing the data into DB
result, err := collection.InsertOne(context.TODO(),bson.D{
{Key: "name", Value: r.Form.Get("name")},
{Key: "passowrd", Value: string(pass[:])},
{Key : "email" , Value : r.Form.Get("email")},
{Key : "userId" , Value : r.Form.Get("id")},
})
//Function to create a POST
func createPost(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
//Checking the Method is POST
if r.Method == "POST"{
//Parsing the Form
r.ParseForm()
//Adding the timestamp
dt := time.Now();
dt.Format("01-02-2006 15:04:05")
//Pushing the Data into DB
result, err := postCollection.InsertOne(context.TODO(),bson.D{
{Key: "id", Value: r.Form.Get("pid")},
{Key: "caption", Value: r.Form.Get("caption")},
{Key : "url" , Value : r.Form.Get("url")},
{Key : "time" , Value : dt.String()},
{Key : "userId" , Value : r.Form.Get("userId")},
})
//Function to search User by ID
func getUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
var User User
// string to primitive.ObjectID (typeCasting)
id, _ := primitive.ObjectIDFromHex(ps.ByName("id"))
// creating filter of unordered map with ID as input
filter := bson.M{"_id": id}
//Searching in DB with given ID as keyword
err := collection.FindOne(context.TODO(), filter).Decode(&User)
//Error Handling
if err != nil {
log.Fatal(err)
}
json.NewEncoder(w).Encode(User)
}
func userPost(w http.ResponseWriter, r *http.Request){
w.Header().Set("Content-Type", "application/json")
//Extracting the UserId
url := r.URL.RequestURI()
key := path.Base(url)
//Finding the posts made by that user
filterCursor, err := postCollection.Find(context.TODO(), bson.M{"userId": key})
if err != nil {
log.Fatal(err)
}
//Appending it to the array and sending it back
var postFilter []bson.M
if err = filterCursor.All(context.TODO(), &postFilter); err != nil {
log.Fatal(err)
}
json.NewEncoder(w).Encode(postFilter)
}
fmt.Fprintf(w, `Hello world`)
func searchUserUsingID(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var InstaPosts InstaPosts
//recovers the argument of search query present in URL after "q"
title := string(r.URL.Query().Get("q"))
//makes an unordered map filter of title
filter := bson.M{"title": title}
//Searching in DB with given title as keyword
err := collection.FindOne(context.TODO(), filter).Decode(&InstaPosts)
//Error Handling
if err != nil {
log.Fatal(err)
}
json.NewEncoder(w).Encode(InstaPosts)
}
|
package internal
import (
"testing"
"path/filepath"
"log"
"io/ioutil"
"os"
)
func createIndex(i string) (*Index, string) {
path := filepath.Join(os.TempDir(), "INDEX")
fd, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
defer fd.Close()
_, err = fd.WriteString(i)
if err != nil {
log.Fatal(err)
}
I, err := OpenIndex(os.TempDir())
if err != nil {
log.Fatal(err)
}
return I, path
}
func readindex(path string) string {
fd, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
defer fd.Close()
r, err := ioutil.ReadAll(fd)
if err != nil {
log.Fatal(err)
}
return string(r)
}
func TestIndex(t *testing.T) {
I, fp := createIndex("")
defer os.Remove(fp)
I.Update("foo")
I.Close()
if readindex(fp) != "foo\n" {
t.Errorf("Got %q != %q", readindex(fp), "foo\n")
}
if I.Get("foo")!= "foo" {
t.Errorf("Got %q != %q", I.Get("foo"), "foo")
}
}
|
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"crypto/rand"
"fmt"
"io"
"net"
"os"
"testing"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2"
)
type m map[string]interface{}
// newSimpleServer creates a nebula instance with many assumptions
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
l := NewTestLogger()
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
copy(vpnIpNet.IP, udpIp)
vpnIpNet.IP[1] += 128
udpAddr := net.UDPAddr{
IP: udpIp,
Port: 4242,
}
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM()
if err != nil {
panic(err)
}
mc := m{
"pki": m{
"ca": string(caB),
"cert": string(myPEM),
"key": string(myPrivKey),
},
//"tun": m{"disabled": true},
"firewall": m{
"outbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
"inbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
},
//"handshakes": m{
// "try_interval": "1s",
//},
"listen": m{
"host": udpAddr.IP.String(),
"port": udpAddr.Port,
},
"logging": m{
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
"level": l.Level.String(),
},
"timers": m{
"pending_deletion_interval": 2,
"connection_alive_interval": 2,
},
}
if overrides != nil {
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
if err != nil {
panic(err)
}
mc = overrides
}
cb, err := yaml.Marshal(mc)
if err != nil {
panic(err)
}
c := config.NewC(l)
c.LoadString(string(cb))
control, err := nebula.Main(c, false, "e2e-test", l, nil)
if err != nil {
panic(err)
}
return control, vpnIpNet, &udpAddr, c
}
// newTestCaCert will generate a CA cert
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// newTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}
type doneCb func()
func deadline(t *testing.T, seconds time.Duration) doneCb {
timeout := time.After(seconds * time.Second)
done := make(chan bool)
go func() {
select {
case <-timeout:
t.Fatal("Test did not finish in time")
case <-done:
}
}()
return func() {
done <- true
}
}
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
// Send a packet from them to me
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
bPacket := r.RouteForAllUntilTxTun(controlA)
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
// And once more from me to them
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
aPacket := r.RouteForAllUntilTxTun(controlB)
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
}
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
// Get both host infos
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
// Check that both vpn and real addr are correct
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
// Check that our indexes match
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
//TODO: Would be nice to assert this memory
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
//
// //TODO: remote indexes are susceptible to collision
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
//}
//
//// Check hostmap indexes too
//checkIndexes("hmA", hmA, hBinA)
//checkIndexes("hmB", hmB, hAinB)
}
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
assert.NotNil(t, v4, "No ipv4 data found")
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
assert.NotNil(t, udp, "No udp data found")
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
data := packet.ApplicationLayer()
assert.NotNil(t, data)
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
}
func NewTestLogger() *logrus.Logger {
l := logrus.New()
v := os.Getenv("TEST_LOGS")
if v == "" {
l.SetOutput(io.Discard)
l.SetLevel(logrus.PanicLevel)
return l
}
switch v {
case "2":
l.SetLevel(logrus.DebugLevel)
case "3":
l.SetLevel(logrus.TraceLevel)
default:
l.SetLevel(logrus.InfoLevel)
}
return l
}
|
package tokenizer
import (
"bytes"
"crypto/aes"
"crypto/sha256"
"encoding/base64"
"testing"
"time"
)
func newTokenizer() (*T, error) {
return New(
NewKey(aes.BlockSize),
NewKey(sha256.BlockSize),
nil,
)
}
func TestTokenizer(t *testing.T) {
_, err := New(NewKey(8), NewKey(8), nil)
if err == nil {
t.Fatal("short key is not supposed to work")
}
_, err = newTokenizer()
if err != nil {
t.Fatal(err)
}
}
func TestEncode(t *testing.T) {
tok, err := newTokenizer()
if err != nil {
t.Fatal(err)
}
_, err = tok.Encode(nil)
if err != nil {
t.Fatal(err)
}
}
func TestDecodeErrors(t *testing.T) {
tok, err := newTokenizer()
if err != nil {
t.Fatal(err)
}
_, _, err = tok.Decode([]byte("not base64"))
if err == nil {
t.Fatal("unexpected decode with invalid base64")
}
bad := base64.RawURLEncoding.EncodeToString([]byte("fail-me"))
_, _, err = tok.Decode([]byte(bad))
if err == nil {
t.Fatal("unexpected decode with invalid payload")
}
l := aes.BlockSize*2 + tok.hmac().Size()
bad = base64.RawURLEncoding.EncodeToString(make([]byte, l))
_, _, err = tok.Decode([]byte(bad))
if err != ErrInvalidTokenSignature {
t.Fatalf("unexpected error: %v", err)
}
}
func TestTokenEncoding(t *testing.T) {
tok, err := newTokenizer()
if err != nil {
t.Fatal(err)
}
want := NewKey(1e3)
token, err := tok.Encode(want)
if err != nil {
t.Fatal(err)
}
have, created, err := tok.Decode(token)
if err != nil {
t.Fatal(err)
}
if len(want) != len(have) {
t.Fatalf("unexpected length: want %d, have %d",
len(want), len(have))
}
if !bytes.Equal(want, have) {
t.Fatalf("unexpected text: want %q, have %q",
want[:20], have[:20])
}
if age := time.Since(created); age > 2*time.Second {
t.Fatalf("token is too old: %s", age)
}
}
func BenchmarkTokenEncode1block(b *testing.B) {
tok, err := newTokenizer()
data := make([]byte, aes.BlockSize)
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, err = tok.Encode(data); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenDecode1block(b *testing.B) {
tok, _ := newTokenizer()
data := make([]byte, aes.BlockSize)
token, err := tok.Encode(data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, _, err = tok.Decode(token); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenEncode10blocks(b *testing.B) {
tok, err := newTokenizer()
data := make([]byte, aes.BlockSize*10)
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, err = tok.Encode(data); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenDecode10blocks(b *testing.B) {
tok, _ := newTokenizer()
data := make([]byte, aes.BlockSize*10)
token, err := tok.Encode(data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, _, err = tok.Decode(token); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenEncode100blocks(b *testing.B) {
tok, err := newTokenizer()
data := make([]byte, aes.BlockSize*100)
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, err = tok.Encode(data); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenDecode100blocks(b *testing.B) {
tok, _ := newTokenizer()
data := make([]byte, aes.BlockSize*100)
token, err := tok.Encode(data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, _, err = tok.Decode(token); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenEncode1000blocks(b *testing.B) {
tok, err := newTokenizer()
data := make([]byte, aes.BlockSize*1000)
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, err = tok.Encode(data); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkTokenDecode1000blocks(b *testing.B) {
tok, _ := newTokenizer()
data := make([]byte, aes.BlockSize*1000)
token, err := tok.Encode(data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
if _, _, err = tok.Decode(token); err != nil {
b.Fatal(err)
}
}
}
|
package main
import (
"fmt"
"log"
"os/exec"
)
// DockerBuild ... カレントのDockerfileを元にenvタグを付けてbuildする
func DockerBuild() error {
if out, err := exec.Command(
"docker",
"build",
"--pull",
"-t",
ImageTags["env"],
".",
).CombinedOutput(); err != nil {
return fmt.Errorf("docker build: %s: %s", err, string(out))
}
fmt.Println()
log.Printf("docker build done: %s\n", ImageTags["env"])
fmt.Println()
return nil
}
// DockerSetTag ... 必要なtagを付与する
// Env=stg : stg, commitHash
// Env=prod: prod, commitHash, latest
func DockerSetTag() error {
if AlreadyBuildImage != "" {
if out, err := exec.Command(
"docker",
"tag",
AlreadyBuildImage,
ImageTags["env"],
).CombinedOutput(); err != nil {
return fmt.Errorf("docker tag: %s: %s", err, string(out))
}
log.Printf("docker SetTag from %s done: %s\n", AlreadyBuildImage, ImageTags["commitHash"])
}
if out, err := exec.Command(
"docker",
"tag",
ImageTags["env"],
ImageTags["commitHash"],
).CombinedOutput(); err != nil {
return fmt.Errorf("docker tag: %s: %s", err, string(out))
}
log.Printf("docker SetTag from %s done: %s\n", ImageTags["env"], ImageTags["commitHash"])
// For prod, tagging with latest
if _, ok := ImageTags["latest"]; ok {
if out, err := exec.Command(
"docker",
"tag",
ImageTags["env"],
ImageTags["latest"],
).CombinedOutput(); err != nil {
return fmt.Errorf("docker tag: %s: %s", err, string(out))
}
log.Printf("docker SetTag done: %s\n", ImageTags["latest"])
}
fmt.Println()
return nil
}
// DockerRun ... Docker処理を実行
func DockerRun() error {
if AlreadyBuildImage == "" {
if err := DockerBuild(); err != nil {
return err
}
}
if err := DockerSetTag(); err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
"strings"
)
func t11() {
s := []int{1,2,3,5}
fmt.Println(s[0])
fmt.Println(s[:2])
s = append(s[:2], s[3:]...)
fmt.Println(s)
}
//[1 2]
//[1 2 5]
type T struct {
t int
}
func t12() {
s := []*T{}
s = append(s, &T{1})
s = append(s, &T{2})
t3 := &T{3}
s = append(s, t3)
s = append(s, &T{5})
for _, ss := range s {
fmt.Println(ss)
}
for i, ss := range s {
if ss == t3 {
s = append(s[:i], s[i+1:]...)
break
}
}
for _, ss := range s {
fmt.Println(ss)
}
}
func t13() {
a := "sadf dfsdf"
b := strings.Split(a, " ")
var c string
c = b[0]
fmt.Println(c)
}
func t14() {
s := []int{0,1}
b := s[0:2]
fmt.Println(b)
}
func main() {
t14()
}
|
package main
import (
"os"
"path/filepath"
"flag"
"log"
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/apimachinery/pkg/watch"
)
func main() {
var _namespace,
_labelSelector,
_fieldSelector string
//Define kubeconfig file
kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config")
//Load kubernetes config
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.Fatal(err)
}
//Create a client set to use k8s apis
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
//create an api object
api := clientset.CoreV1()
// Read namesapce from command line
flag.StringVar(&_namespace, "n", "default", " namespace")
flag.StringVar(&_labelSelector, "l", "", "Label selector ")
flag.StringVar(&_fieldSelector, "f", "", "Field selector ")
flag.Parse()
opts := metav1.ListOptions{
LabelSelector: _labelSelector,
FieldSelector: _fieldSelector,
}
//Create a watcher on pods
fmt.Printf("Starting a Pod watcher in namespace [%s]\n",_namespace)
podWatcher, err := api.Pods(_namespace).Watch(opts)
if err != nil {
log.Fatal(err)
}
//Watch loop
podChannel := podWatcher.ResultChan()
for event := range podChannel {
pod,ok :=event.Object.(*v1.Pod)
if (! ok ) { log. Fatal(err )
}
switch event.Type{
case watch.Added:
log.Printf(" Pod %s added \n",pod.Name)
case watch.Deleted:
log.Printf(" Pod %s deleted \n",pod.Name)
}
}
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tasks
import (
"context"
"fmt"
"strings"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/util/sets"
"yunion.io/x/pkg/utils"
"yunion.io/x/sqlchemy"
"yunion.io/x/onecloud/pkg/apis/notify"
"yunion.io/x/onecloud/pkg/cloudcommon/db"
"yunion.io/x/onecloud/pkg/cloudcommon/db/lockman"
"yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
"yunion.io/x/onecloud/pkg/notify/models"
"yunion.io/x/onecloud/pkg/util/logclient"
)
type RepullSuncontactTask struct {
taskman.STask
}
func init() {
taskman.RegisterTask(RepullSuncontactTask{})
}
func (self *RepullSuncontactTask) taskFailed(ctx context.Context, config *models.SConfig, reason string) {
logclient.AddActionLogWithContext(ctx, config, logclient.ACT_PULL_SUBCONTACT, reason, self.UserCred, false)
self.SetStageFailed(ctx, jsonutils.NewString(reason))
}
type repullFailedReason struct {
ReceiverId string
Reason string
}
func (s repullFailedReason) String() string {
return fmt.Sprintf("receiver %q: %s", s.ReceiverId, s.Reason)
}
func (self *RepullSuncontactTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) {
config := obj.(*models.SConfig)
if !utils.IsInStringArray(config.Type, PullContactType) {
self.SetStageComplete(ctx, nil)
return
}
subq := models.SubContactManager.Query("receiver_id").Equals("type", config.Type).SubQuery()
q := models.ReceiverManager.Query()
if config.Attribution == notify.CONFIG_ATTRIBUTION_DOMAIN {
q = q.Equals("domain_id", config.DomainId)
} else {
// The system-level config update should not affect the receiver under the domain with config
configq := models.ConfigManager.Query("domain_id").Equals("attribution", notify.CONFIG_ATTRIBUTION_DOMAIN).SubQuery()
q = q.Join(configq, sqlchemy.NotEquals(q.Field("domain_id"), configq.Field("domain_id")))
}
q.Join(subq, sqlchemy.Equals(q.Field("id"), subq.Field("receiver_id")))
rs := make([]models.SReceiver, 0)
err := db.FetchModelObjects(models.ReceiverManager, q, &rs)
if err != nil {
self.taskFailed(ctx, config, fmt.Sprintf("unable to FetchModelObjects: %v", err))
return
}
var reasons []string
for i := range rs {
r := &rs[i]
func() {
lockman.LockObject(ctx, r)
defer lockman.ReleaseObject(ctx, r)
// unverify
cts, err := r.GetVerifiedContactTypes()
if err != nil {
reasons = append(reasons, repullFailedReason{
ReceiverId: r.Id,
Reason: fmt.Sprintf("unable to GetVerifiedContactTypes: %v", err),
}.String())
return
}
ctSets := sets.NewString(cts...)
if !ctSets.Has(config.Type) {
return
}
ctSets.Delete(config.Type)
err = r.SetVerifiedContactTypes(ctSets.UnsortedList())
if err != nil {
reasons = append(reasons, repullFailedReason{
ReceiverId: r.Id,
Reason: fmt.Sprintf("unable to SetVerifiedContactTypes: %v", err),
}.String())
return
}
// pull
params := jsonutils.NewDict()
params.Set("contact_types", jsonutils.NewArray(jsonutils.NewString(config.Type)))
err = r.StartSubcontactPullTask(ctx, self.UserCred, params, self.Id)
if err != nil {
reasons = append(reasons, repullFailedReason{
ReceiverId: r.Id,
Reason: fmt.Sprintf("unable to StartSubcontactPullTask: %v", err),
}.String())
}
}()
}
if len(reasons) > 0 {
self.taskFailed(ctx, config, strings.Join(reasons, "; "))
return
}
self.SetStageComplete(ctx, nil)
}
|
package subcmd
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/go-git/go-git/v5"
"github.com/vim-volt/volt/config"
"github.com/vim-volt/volt/fileutil"
"github.com/vim-volt/volt/gitutil"
"github.com/vim-volt/volt/lockjson"
"github.com/vim-volt/volt/logger"
"github.com/vim-volt/volt/pathutil"
"github.com/vim-volt/volt/plugconf"
"github.com/vim-volt/volt/subcmd/builder"
"github.com/vim-volt/volt/transaction"
multierror "github.com/hashicorp/go-multierror"
)
func init() {
cmdMap["get"] = &getCmd{}
}
type getCmd struct {
helped bool
lockJSON bool
upgrade bool
}
func (cmd *getCmd) ProhibitRootExecution(args []string) bool { return true }
func (cmd *getCmd) FlagSet() *flag.FlagSet {
fs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
fs.SetOutput(os.Stdout)
fs.Usage = func() {
fmt.Println(`
Usage
volt get [-help] [-l] [-u] [{repository} ...]
Quick example
$ volt get tyru/caw.vim # will install tyru/caw.vim plugin
$ volt get -u tyru/caw.vim # will upgrade tyru/caw.vim plugin
$ volt get -l -u # will upgrade all plugins in current profile
$ VOLT_DEBUG=1 volt get tyru/caw.vim # will output more verbosely
$ mkdir -p ~/volt/repos/localhost/local/hello/plugin
$ echo 'command! Hello echom "hello"' >~/volt/repos/localhost/local/hello/plugin/hello.vim
$ volt get localhost/local/hello # will add the local repository as a plugin
$ vim -c Hello # will output "hello"
Description
Install or upgrade given {repository} list, or add local {repository} list as plugins.
And fetch skeleton plugconf from:
https://github.com/vim-volt/plugconf-templates
and install it to:
$VOLTPATH/plugconf/{repository}.vim
Repository List
{repository} list (=target to perform installing, upgrading, and so on) is determined as followings:
* If -l option is specified, all plugins in current profile are used
* If one or more {repository} arguments are specified, the arguments are used
Action
The action (install, upgrade, or add only) is determined as follows:
1. If -u option is specified (upgrade):
* Upgrade git repositories in {repository} list (static repositories are ignored).
* Add {repository} list to lock.json (if not found)
2. Or (install):
* Fetch {repository} list from remotes
* Add {repository} list to lock.json (if not found)
Static repository
Volt can manage a local directory as a repository. It's called "static repository".
When you have unpublished plugins, or you want to manage ~/.vim/* files as one repository
(this is useful when you use profile feature, see "volt help profile" for more details),
static repository is useful.
All you need is to create a directory in "$VOLTPATH/repos/<repos>".
When -u was not specified (install) and given repositories exist, volt does not make a request to clone the repositories.
Therefore, "volt get" tries to fetch repositories but skip it because the directory exists.
then it adds repositories to lock.json if not found.
$ mkdir -p ~/volt/repos/localhost/local/hello/plugin
$ echo 'command! Hello echom "hello"' >~/volt/repos/localhost/local/hello/plugin/hello.vim
$ volt get localhost/local/hello # will add the local repository as a plugin
$ vim -c Hello # will output "hello"
Repository path
{repository}'s format is one of the followings:
1. {user}/{name}
This is same as "github.com/{user}/{name}"
2. {site}/{user}/{name}
3. https://{site}/{user}/{name}
4. http://{site}/{user}/{name}
Options`)
fs.PrintDefaults()
fmt.Println()
cmd.helped = true
}
fs.BoolVar(&cmd.lockJSON, "l", false, "use all plugins in current profile as targets")
fs.BoolVar(&cmd.upgrade, "u", false, "upgrade plugins")
return fs
}
func (cmd *getCmd) Run(args []string) *Error {
// Parse args
args, err := cmd.parseArgs(args)
if err == ErrShowedHelp {
return nil
}
if err != nil {
return &Error{Code: 10, Msg: "Failed to parse args: " + err.Error()}
}
// Read lock.json
lockJSON, err := lockjson.Read()
if err != nil {
return &Error{Code: 11, Msg: "Could not read lock.json: " + err.Error()}
}
reposPathList, err := cmd.getReposPathList(args, lockJSON)
if err != nil {
return &Error{Code: 12, Msg: "Could not get repos list: " + err.Error()}
}
if len(reposPathList) == 0 {
return &Error{Code: 13, Msg: "No repositories are specified"}
}
err = cmd.doGet(reposPathList, lockJSON)
if err != nil {
return &Error{Code: 20, Msg: err.Error()}
}
return nil
}
func (cmd *getCmd) parseArgs(args []string) ([]string, error) {
fs := cmd.FlagSet()
fs.Parse(args)
if cmd.helped {
return nil, ErrShowedHelp
}
if !cmd.lockJSON && len(fs.Args()) == 0 {
fs.Usage()
return nil, errors.New("repository was not given")
}
return fs.Args(), nil
}
func (cmd *getCmd) getReposPathList(args []string, lockJSON *lockjson.LockJSON) ([]pathutil.ReposPath, error) {
var reposPathList []pathutil.ReposPath
if cmd.lockJSON {
reposList, err := lockJSON.GetCurrentReposList()
if err != nil {
return nil, err
}
reposPathList = make([]pathutil.ReposPath, 0, len(reposList))
for i := range reposList {
reposPathList = append(reposPathList, reposList[i].Path)
}
} else {
reposPathList = make([]pathutil.ReposPath, 0, len(args))
for _, arg := range args {
reposPath, err := pathutil.NormalizeRepos(arg)
if err != nil {
return nil, err
}
// Get the existing entries if already have it
// (e.g. github.com/tyru/CaW.vim -> github.com/tyru/caw.vim)
if r := lockJSON.Repos.FindByPath(reposPath); r != nil {
reposPath = r.Path
}
reposPathList = append(reposPathList, reposPath)
}
}
return reposPathList, nil
}
func (cmd *getCmd) doGet(reposPathList []pathutil.ReposPath, lockJSON *lockjson.LockJSON) (err error) {
// Find matching profile
profile, err := lockJSON.Profiles.FindByName(lockJSON.CurrentProfileName)
if err != nil {
// this must not be occurred because lockjson.Read()
// validates if the matching profile exists
return
}
// Begin transaction
trx, err := transaction.Start()
if err != nil {
return
}
defer func() {
if e := trx.Done(); e != nil {
err = e
}
}()
// Read config.toml
cfg, err := config.Read()
if err != nil {
err = errors.Wrap(err, "could not read config.toml")
return
}
done := make(chan getParallelResult, len(reposPathList))
getCount := 0
// Invoke installing / upgrading tasks
for _, reposPath := range reposPathList {
repos := lockJSON.Repos.FindByPath(reposPath)
if repos == nil || repos.Type == lockjson.ReposGitType {
go cmd.getParallel(reposPath, repos, cfg, done)
getCount++
}
}
// Wait results
failed := false
statusList := make([]string, 0, getCount)
var updatedLockJSON bool
for i := 0; i < getCount; i++ {
r := <-done
status := cmd.formatStatus(&r)
// Update repos[]/version
if strings.HasPrefix(status, statusPrefixFailed) {
failed = true
} else {
added := cmd.updateReposVersion(lockJSON, r.reposPath, r.reposType, r.hash, profile)
if added && strings.Contains(status, "already exists") {
status = fmt.Sprintf(fmtAddedRepos, r.reposPath)
}
updatedLockJSON = true
}
statusList = append(statusList, status)
}
// Sort by status
sort.Strings(statusList)
if updatedLockJSON {
// Write to lock.json
err = lockJSON.Write()
if err != nil {
err = errors.Wrap(err, "could not write to lock.json")
return
}
}
// Build ~/.vim/pack/volt dir
err = builder.Build(false)
if err != nil {
err = errors.Wrap(err, "could not build "+pathutil.VimVoltDir())
return
}
// Show results
for i := range statusList {
fmt.Println(statusList[i])
}
if failed {
err = errors.New("failed to install some plugins")
return
}
return
}
func (*getCmd) formatStatus(r *getParallelResult) string {
if r.err == nil {
return r.status
}
var errs []error
if merr, ok := r.err.(*multierror.Error); ok {
errs = merr.Errors
} else {
errs = []error{r.err}
}
buf := make([]byte, 0, 4*1024)
buf = append(buf, r.status...)
for _, err := range errs {
buf = append(buf, "\n * "...)
buf = append(buf, err.Error()...)
}
return string(buf)
}
type getParallelResult struct {
reposPath pathutil.ReposPath
status string
hash string
reposType lockjson.ReposType
err error
}
const (
statusPrefixFailed = "!"
// Failed
fmtInstallFailed = "! %s > install failed"
fmtUpgradeFailed = "! %s > upgrade failed"
// No change
fmtNoChange = "# %s > no change"
fmtAlreadyExists = "# %s > already exists"
// Installed
fmtAddedRepos = "+ %s > added repository to current profile"
fmtInstalled = "+ %s > installed"
// Upgraded
fmtRevUpdate = "* %s > updated lock.json revision (%s..%s)"
fmtUpgraded = "* %s > upgraded (%s..%s)"
fmtFetched = "* %s > fetched objects (worktree is not updated)"
)
// This function is executed in goroutine of each plugin.
// 1. install plugin if it does not exist
// 2. install plugconf if it does not exist and createPlugconf=true
func (cmd *getCmd) getParallel(reposPath pathutil.ReposPath, repos *lockjson.Repos, cfg *config.Config, done chan<- getParallelResult) {
pluginDone := make(chan getParallelResult)
go cmd.installPlugin(reposPath, repos, cfg, pluginDone)
pluginResult := <-pluginDone
if pluginResult.err != nil || !*cfg.Get.CreateSkeletonPlugconf {
done <- pluginResult
return
}
plugconfDone := make(chan getParallelResult)
go cmd.installPlugconf(reposPath, &pluginResult, plugconfDone)
done <- (<-plugconfDone)
}
func (cmd *getCmd) installPlugin(reposPath pathutil.ReposPath, repos *lockjson.Repos, cfg *config.Config, done chan<- getParallelResult) {
// true:upgrade, false:install
fullReposPath := reposPath.FullPath()
doInstall := !pathutil.Exists(fullReposPath)
doUpgrade := cmd.upgrade && !doInstall
var fromHash string
var err error
if doUpgrade {
// Get HEAD hash string
fromHash, err = gitutil.GetHEAD(reposPath)
if err != nil {
result := errors.Wrap(err, "failed to get HEAD commit hash")
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtInstallFailed, reposPath),
err: result,
}
return
}
}
var status string
var upgraded bool
var checkRevision bool
if doUpgrade {
// when cmd.upgrade is true, repos must not be nil.
if repos == nil {
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtUpgradeFailed, reposPath),
err: errors.New("failed to upgrade plugin: -u was specified but repos == nil"),
}
return
}
// Upgrade plugin
logger.Debug("Upgrading " + reposPath + " ...")
err := cmd.upgradePlugin(reposPath, cfg)
if err != git.NoErrAlreadyUpToDate && err != nil {
result := errors.Wrap(err, "failed to upgrade plugin")
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtUpgradeFailed, reposPath),
err: result,
}
return
}
if err == git.NoErrAlreadyUpToDate {
status = fmt.Sprintf(fmtNoChange, reposPath)
} else {
upgraded = true
}
} else if doInstall {
// Install plugin
logger.Debug("Installing " + reposPath + " ...")
err := cmd.clonePlugin(reposPath, cfg)
if err != nil {
result := errors.Wrap(err, "failed to install plugin")
logger.Debug("Rollbacking " + fullReposPath + " ...")
err = cmd.removeDir(fullReposPath)
if err != nil {
result = multierror.Append(result, err)
}
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtInstallFailed, reposPath),
err: result,
}
return
}
status = fmt.Sprintf(fmtInstalled, reposPath)
} else {
status = fmt.Sprintf(fmtAlreadyExists, reposPath)
checkRevision = true
}
var toHash string
reposType, err := cmd.detectReposType(fullReposPath)
if err == nil && reposType == lockjson.ReposGitType {
// Get HEAD hash string
toHash, err = gitutil.GetHEAD(reposPath)
if err != nil {
result := errors.Wrap(err, "failed to get HEAD commit hash")
if doInstall {
logger.Debug("Rollbacking " + fullReposPath + " ...")
err = cmd.removeDir(fullReposPath)
if err != nil {
result = multierror.Append(result, err)
}
}
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtInstallFailed, reposPath),
err: result,
}
return
}
}
if upgraded {
if fromHash != toHash {
status = fmt.Sprintf(fmtUpgraded, reposPath, fromHash, toHash)
} else {
status = fmt.Sprintf(fmtFetched, reposPath)
}
}
if checkRevision && repos != nil && repos.Version != toHash {
status = fmt.Sprintf(fmtRevUpdate, reposPath, repos.Version, toHash)
}
done <- getParallelResult{
reposPath: reposPath,
status: status,
reposType: reposType,
hash: toHash,
}
}
func (cmd *getCmd) installPlugconf(reposPath pathutil.ReposPath, pluginResult *getParallelResult, done chan<- getParallelResult) {
// Install plugconf
logger.Debug("Installing plugconf " + reposPath + " ...")
err := cmd.downloadPlugconf(reposPath)
if err != nil {
result := errors.Wrap(err, "failed to install plugconf")
// TODO: Call cmd.removeDir() only when the repos *did not* exist previously
// and was installed newly.
// fullReposPath := reposPath.FullPath()
// logger.Debug("Rollbacking " + fullReposPath + " ...")
// err = cmd.removeDir(fullReposPath)
// if err != nil {
// result = multierror.Append(result, err)
// }
done <- getParallelResult{
reposPath: reposPath,
status: fmt.Sprintf(fmtInstallFailed, reposPath),
err: result,
}
return
}
done <- *pluginResult
}
func (*getCmd) detectReposType(fullpath string) (lockjson.ReposType, error) {
if pathutil.Exists(filepath.Join(fullpath, ".git")) {
if _, err := git.PlainOpen(fullpath); err != nil {
return "", err
}
return lockjson.ReposGitType, nil
}
return lockjson.ReposStaticType, nil
}
func (*getCmd) removeDir(fullReposPath string) error {
if pathutil.Exists(fullReposPath) {
err := os.RemoveAll(fullReposPath)
if err != nil {
return errors.Errorf("rollback failed: cannot remove '%s'", fullReposPath)
}
// Remove parent directories
fileutil.RemoveDirs(filepath.Dir(fullReposPath))
}
return nil
}
func (cmd *getCmd) upgradePlugin(reposPath pathutil.ReposPath, cfg *config.Config) error {
fullpath := reposPath.FullPath()
repos, err := git.PlainOpen(fullpath)
if err != nil {
return err
}
reposCfg, err := repos.Config()
if err != nil {
return err
}
remote, err := gitutil.GetUpstreamRemote(repos)
if err != nil {
return err
}
if reposCfg.Core.IsBare {
return cmd.gitFetch(repos, fullpath, remote, cfg)
}
return cmd.gitPull(repos, fullpath, remote, cfg)
}
var errRepoExists = errors.New("repository exists")
func (cmd *getCmd) clonePlugin(reposPath pathutil.ReposPath, cfg *config.Config) error {
fullpath := reposPath.FullPath()
if pathutil.Exists(fullpath) {
return errRepoExists
}
err := os.MkdirAll(filepath.Dir(fullpath), 0755)
if err != nil {
return err
}
// Clone repository to $VOLTPATH/repos/{site}/{user}/{name}
return cmd.gitClone(reposPath.CloneURL(), fullpath, cfg)
}
func (cmd *getCmd) downloadPlugconf(reposPath pathutil.ReposPath) error {
path := reposPath.Plugconf()
if pathutil.Exists(path) {
logger.Debugf("plugconf '%s' exists... skip", path)
return nil
}
// If non-nil error returned from FetchPlugconfTemplate(),
// create skeleton plugconf file
tmpl, err := plugconf.FetchPlugconfTemplate(reposPath)
if err != nil {
logger.Debug(err.Error())
// empty tmpl is returned when err != nil
}
content, merr := tmpl.Generate(path)
if merr.ErrorOrNil() != nil {
return errors.Errorf("parse error in fetched plugconf %s: %s", reposPath, merr.Error())
}
os.MkdirAll(filepath.Dir(path), 0755)
err = ioutil.WriteFile(path, content, 0644)
if err != nil {
return err
}
return nil
}
// * Add repos to 'repos' if not found
// * Add repos to 'profiles[]/repos_path' if not found
func (*getCmd) updateReposVersion(lockJSON *lockjson.LockJSON, reposPath pathutil.ReposPath, reposType lockjson.ReposType, version string, profile *lockjson.Profile) bool {
repos := lockJSON.Repos.FindByPath(reposPath)
added := false
if repos == nil {
// repos is not found in lock.json
// -> previous operation is install
repos = &lockjson.Repos{
Type: reposType,
Path: reposPath,
Version: version,
}
// Add repos to 'repos'
lockJSON.Repos = append(lockJSON.Repos, *repos)
added = true
} else {
// repos is found in lock.json
// -> previous operation is upgrade
repos.Version = version
}
if !profile.ReposPath.Contains(reposPath) {
// Add repos to 'profiles[]/repos_path'
profile.ReposPath = append(profile.ReposPath, reposPath)
added = true
}
return added
}
func (cmd *getCmd) gitFetch(r *git.Repository, workDir string, remote string, cfg *config.Config) error {
err := r.Fetch(&git.FetchOptions{
RemoteName: remote,
})
if err == nil || err == git.NoErrAlreadyUpToDate {
return err
}
// When fallback_git_cmd is true and git command is installed,
// try to invoke git-fetch command
if !*cfg.Get.FallbackGitCmd || !cmd.hasGitCmd() {
return err
}
logger.Warnf("failed to fetch, try to execute \"git fetch %s\" instead...: %s", remote, err.Error())
before, err := gitutil.GetHEADRepository(r)
fetch := exec.Command("git", "fetch", remote)
fetch.Dir = workDir
err = fetch.Run()
if err != nil {
return err
}
if changed, err := cmd.getWorktreeChanges(r, before); err != nil {
return err
} else if !changed {
return git.NoErrAlreadyUpToDate
}
return nil
}
func (cmd *getCmd) gitPull(r *git.Repository, workDir string, remote string, cfg *config.Config) error {
wt, err := r.Worktree()
if err != nil {
return err
}
err = wt.Pull(&git.PullOptions{
RemoteName: remote,
// TODO: Temporarily recursive clone is disabled, because go-git does
// not support relative submodule url in .gitmodules and it causes an
// error
RecurseSubmodules: 0,
})
if err == nil || err == git.NoErrAlreadyUpToDate {
return err
}
// When fallback_git_cmd is true and git command is installed,
// try to invoke git-pull command
if !*cfg.Get.FallbackGitCmd || !cmd.hasGitCmd() {
return err
}
logger.Warnf("failed to pull, try to execute \"git pull\" instead...: %s", err.Error())
before, err := gitutil.GetHEADRepository(r)
pull := exec.Command("git", "pull")
pull.Dir = workDir
err = pull.Run()
if err != nil {
return err
}
if changed, err := cmd.getWorktreeChanges(r, before); err != nil {
return err
} else if !changed {
return git.NoErrAlreadyUpToDate
}
return nil
}
func (cmd *getCmd) getWorktreeChanges(r *git.Repository, before string) (bool, error) {
after, err := gitutil.GetHEADRepository(r)
if err != nil {
return false, err
}
return before != after, nil
}
func (cmd *getCmd) gitClone(cloneURL, dstDir string, cfg *config.Config) error {
isBare := false
r, err := git.PlainClone(dstDir, isBare, &git.CloneOptions{
URL: cloneURL,
// TODO: Temporarily recursive clone is disabled, because go-git does
// not support relative submodule url in .gitmodules and it causes an
// error
RecurseSubmodules: 0,
})
if err != nil {
// When fallback_git_cmd is true and git command is installed,
// try to invoke git-clone command
if !*cfg.Get.FallbackGitCmd || !cmd.hasGitCmd() {
return err
}
logger.Warnf("failed to clone, try to execute \"git clone --recursive %s %s\" instead...: %s", cloneURL, dstDir, err.Error())
err = os.RemoveAll(dstDir)
if err != nil {
return err
}
out, err := exec.Command("git", "clone", "--recursive", cloneURL, dstDir).CombinedOutput()
if err != nil {
return errors.Errorf("\"git clone --recursive %s %s\" failed, out=%s: %s", cloneURL, dstDir, string(out), err.Error())
}
}
return gitutil.SetUpstreamRemote(r, "origin")
}
func (cmd *getCmd) hasGitCmd() bool {
exeName := "git"
if runtime.GOOS == "windows" {
exeName = "git.exe"
}
_, err := exec.LookPath(exeName)
return err == nil
}
|
package main
func main() {
Fib(20)
}
// Возьмем fibo как пример того, что производительность ПК для одноядерной работы почти стоит на месте
// можно запустить на своем старом маке код и посмотреть бенчмарк для fibo, и сравнить с этим
func Fib(n int64) int64 {
switch n {
case 0:
return 0
case 1:
return 1
default:
return Fib(n-1) + Fib(n-2)
}
}
|
package msgraph
import (
"fmt"
"testing"
)
func GetTestGroup(t *testing.T) Group {
t.Helper()
groups, err := graphClient.ListGroups()
if err != nil {
t.Fatalf("Cannot GraphClient.ListGroups(): %v", err)
}
groupTest, err := groups.GetByDisplayName(msGraphExistingGroupDisplayName)
if err != nil {
t.Fatalf("Cannot groups.GetByDisplayName(%v): %v", msGraphExistingGroupDisplayName, err)
}
return groupTest
}
func TestGroup_ListMembers(t *testing.T) {
groupTest := GetTestGroup(t)
tests := []struct {
name string
g Group
want Users
wantErr bool
}{
{
name: "GraphClient created Group",
g: groupTest,
want: Users{User{UserPrincipalName: msGraphExistingUserPrincipalInGroup}},
wantErr: false,
}, {
name: "Not GraphClient created Group",
g: Group{DisplayName: "Test"},
want: Users{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.g.ListMembers()
if (err != nil) != tt.wantErr {
t.Errorf("Group.ListMembers() error = %v, wantErr %v", err, tt.wantErr)
return
}
var found bool
for _, searchObj := range tt.want {
for _, checkObj := range got {
found = found || searchObj.UserPrincipalName == checkObj.UserPrincipalName
}
}
if !found && len(tt.want) > 0 {
t.Errorf("GraphClient.ListGroups() = %v, searching for one of %v", got, tt.want)
}
})
}
}
func TestGroup_String(t *testing.T) {
testGroup := GetTestGroup(t)
tests := []struct {
name string
g Group
want string
}{
{
name: "Test All Groups",
g: testGroup,
want: fmt.Sprintf("Group(ID: \"%v\", Description: \"%v\" DisplayName: \"%v\", CreatedDateTime: \"%v\", GroupTypes: \"%v\", Mail: \"%v\", MailEnabled: \"%v\", MailNickname: \"%v\", OnPremisesLastSyncDateTime: \"%v\", OnPremisesSecurityIdentifier: \"%v\", OnPremisesSyncEnabled: \"%v\", ProxyAddresses: \"%v\", SecurityEnabled \"%v\", Visibility: \"%v\", DirectAPIConnection: %v)",
testGroup.ID, testGroup.Description, testGroup.DisplayName, testGroup.CreatedDateTime, testGroup.GroupTypes, testGroup.Mail, testGroup.MailEnabled, testGroup.MailNickname, testGroup.OnPremisesLastSyncDateTime, testGroup.OnPremisesSecurityIdentifier, testGroup.OnPremisesSyncEnabled, testGroup.ProxyAddresses, testGroup.SecurityEnabled, testGroup.Visibility, testGroup.graphClient != nil),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.g.String(); got != tt.want {
t.Errorf("Group.String() = %v, want %v", got, tt.want)
}
})
}
}
func TestGroup_ListTransitiveMembers(t *testing.T) {
testGroup := GetTestGroup(t)
tests := []struct {
name string
g Group
wantErr bool
}{
{
name: "GraphClient created Group",
g: testGroup,
wantErr: false,
}, {
name: "Not GraphClient created Group",
g: Group{DisplayName: "Test not GraphClient sourced"},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.g.ListTransitiveMembers()
if (err != nil) != tt.wantErr {
t.Errorf("Group.ListTransitiveMembers() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr && len(got) == 0 {
t.Errorf("Group.ListTransitiveMembers() = %v, len(%d), want at least one member of that group", got, len(got))
}
})
}
}
func TestGroup_GetMemberGroupsAsStrings(t *testing.T) {
testGroup := GetTestGroup(t)
tests := []struct {
name string
g Group
opts []GetQueryOption
wantErr bool
}{
{
name: "Test group func GetMembershipGroupsAsStrings",
g: testGroup,
wantErr: false,
}, {
name: "Test group func GetMembershipGroupsAsStrings - no securityGroupsEnabeledF",
g: testGroup,
wantErr: false,
},
{
name: "Group not initialized by GraphClient",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.g.GetMemberGroupsAsStrings(tt.opts...)
if (err != nil) != tt.wantErr {
t.Errorf("Group.GetMemberGroupsAsStrings() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr && len(got) == 0 {
t.Errorf("Group.GetMemberGroupsAsStrings() = %v, len(%d), want at least one value", got, len(got))
}
})
}
}
|
package ravendb
import (
"bytes"
"io"
"net/http"
)
var (
_ IOperation = &PutAttachmentOperation{}
)
type PutAttachmentOperation struct {
Command *PutAttachmentCommand
_documentID string
_name string
_stream io.Reader
_contentType string
_changeVector *string
}
func NewPutAttachmentOperation(documentID string, name string, stream io.Reader, contentType string, changeVector *string) *PutAttachmentOperation {
return &PutAttachmentOperation{
_documentID: documentID,
_name: name,
_stream: stream,
_contentType: contentType,
_changeVector: changeVector,
}
}
func (o *PutAttachmentOperation) GetCommand(store *DocumentStore, conventions *DocumentConventions, cache *httpCache) (RavenCommand, error) {
var err error
o.Command, err = NewPutAttachmentCommand(o._documentID, o._name, o._stream, o._contentType, o._changeVector)
return o.Command, err
}
var _ RavenCommand = &PutAttachmentCommand{}
type PutAttachmentCommand struct {
RavenCommandBase
_documentID string
_name string
_stream io.Reader
_contentType string
_changeVector *string
Result *AttachmentDetails
}
// TODO: should stream be io.ReadCloser? Who owns closing the attachment
func NewPutAttachmentCommand(documentID string, name string, stream io.Reader, contentType string, changeVector *string) (*PutAttachmentCommand, error) {
if stringIsBlank(documentID) {
return nil, newIllegalArgumentError("documentId cannot be null")
}
if stringIsBlank(name) {
return nil, newIllegalArgumentError("name cannot be null")
}
cmd := &PutAttachmentCommand{
RavenCommandBase: NewRavenCommandBase(),
_documentID: documentID,
_name: name,
_stream: stream,
_contentType: contentType,
_changeVector: changeVector,
}
return cmd, nil
}
var noReader = true
func (c *PutAttachmentCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/attachments?id=" + urlUtilsEscapeDataString(c._documentID) + "&name=" + urlUtilsEscapeDataString(c._name)
if stringIsNotEmpty(c._contentType) {
url += "&contentType=" + urlUtilsEscapeDataString(c._contentType)
}
if noReader {
var buf bytes.Buffer
_, err := io.Copy(&buf, c._stream)
if err != nil {
return nil, err
}
req, err := newHttpPut(url, buf.Bytes())
if err != nil {
return nil, err
}
req.Header.Del("Content-Type")
addChangeVectorIfNotNull(c._changeVector, req)
return req, nil
}
req, err := newHttpPutReader(url, c._stream)
if err != nil {
return nil, err
}
addChangeVectorIfNotNull(c._changeVector, req)
return req, nil
}
func (c *PutAttachmentCommand) SetResponse(response []byte, fromCache bool) error {
return jsonUnmarshal(response, &c.Result)
}
|
package services
import (
"github.com/gobuffalo/validate/v3"
"github.com/gofrs/uuid"
movetaskorderops "github.com/transcom/mymove/pkg/gen/primeapi/primeoperations/move_task_order"
"github.com/transcom/mymove/pkg/models"
)
// HiddenMove struct used to store the MTO ID and the reason that the move is being hidden.
type HiddenMove struct {
MTOID uuid.UUID
Reason string
}
// HiddenMoves is the slice of HiddenMove to return in the handler call
type HiddenMoves []HiddenMove
// MoveTaskOrderHider is the service object interface for Hide
//go:generate mockery -name MoveTaskOrderHider
type MoveTaskOrderHider interface {
Hide() (HiddenMoves, error)
}
// MoveTaskOrderCreator is the service object interface for CreateMoveTaskOrder
//go:generate mockery -name MoveTaskOrderCreator
type MoveTaskOrderCreator interface {
CreateMoveTaskOrder(moveTaskOrder *models.Move) (*models.Move, *validate.Errors, error)
}
// MoveTaskOrderFetcher is the service object interface for FetchMoveTaskOrder
//go:generate mockery -name MoveTaskOrderFetcher
type MoveTaskOrderFetcher interface {
FetchMoveTaskOrder(moveTaskOrderID uuid.UUID, searchParams *FetchMoveTaskOrderParams) (*models.Move, error)
ListMoveTaskOrders(orderID uuid.UUID, searchParams *ListMoveTaskOrderParams) ([]models.Move, error)
ListAllMoveTaskOrders(searchParams *ListMoveTaskOrderParams) (models.Moves, error)
}
//MoveTaskOrderUpdater is the service object interface for updating fields of a MoveTaskOrder
//go:generate mockery -name MoveTaskOrderUpdater
type MoveTaskOrderUpdater interface {
MakeAvailableToPrime(moveTaskOrderID uuid.UUID, eTag string, includeServiceCodeMS bool, includeServiceCodeCS bool) (*models.Move, error)
UpdatePostCounselingInfo(moveTaskOrderID uuid.UUID, body movetaskorderops.UpdateMTOPostCounselingInformationBody, eTag string) (*models.Move, error)
ShowHide(moveTaskOrderID uuid.UUID, show *bool) (*models.Move, error)
}
//MoveTaskOrderChecker is the service object interface for checking if a MoveTaskOrder is in a certain state
//go:generate mockery -name MoveTaskOrderChecker
type MoveTaskOrderChecker interface {
MTOAvailableToPrime(moveTaskOrderID uuid.UUID) (bool, error)
}
// ListMoveTaskOrderParams is a public struct that's used to pass filter arguments to the ListMoveTaskOrders and ListAllMoveTaskOrders queries
type ListMoveTaskOrderParams struct {
IsAvailableToPrime bool // indicates if all MTOs returned must be Prime-available (only used in ListAllMoveTaskOrders)
IncludeHidden bool // indicates if hidden/disabled MTOs should be included in the output
Since *int64 // if filled, only MTOs that have been updated after this timestamp will be returned (only used in ListAllMoveTaskOrders)
}
// FetchMoveTaskOrderParams is a public struct that's used to pass filter arguments to the FetchMoveTaskOrder query
type FetchMoveTaskOrderParams struct {
IncludeHidden bool // indicates if hidden/disabled MTO should be included in the output
}
|
package validate_binary_search_tree
import (
"algorithm-trials/questions/utils"
"math"
)
func isValidBST(root *utils.TreeNode) bool {
var isValid func(root *utils.TreeNode) bool
pre := math.MinInt64
isValid = func(node *utils.TreeNode) bool {
if node == nil {
return true
}
if !isValid(node.Left) {
return false
}
if node.Val <= pre {
return false
}
pre = node.Val
return isValid(node.Right)
}
return isValid(root)
}
func isValidBSTByRreOrder(root *utils.TreeNode) bool {
var stack []*utils.TreeNode
pre := math.MinInt64
cur := root
for cur != nil || len(stack) > 0 {
for cur != nil {
stack = append(stack, cur)
cur = cur.Left
}
n := stack[len(stack)-1]
stack = stack[0 : len(stack)-1]
if n.Val <= pre {
return false
}
pre = n.Val
if n.Right != nil {
cur = n.Right
}
}
return true
}
|
package typedkey
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/iotaledger/hive.go/ds/types"
"github.com/iotaledger/hive.go/kvstore/mapdb"
)
func Test(t *testing.T) {
// create a new mapdb instance
storage := mapdb.NewMapDB()
// create new StorableCommitment instance
storableCommitment := NewGenericType[Commitment](storage, 1)
storableCommitment.Set(Commitment{
Index: 1,
PrevID: types.Identifier{1, 2, 3},
RootsID: types.Identifier{4, 5, 6},
CumulativeWeight: 789,
})
// create new StorableCommitment instance with the same storage and type
storableCommitment = NewGenericType[Commitment](storage, 1)
// load the stored commitment
require.Equal(t, Commitment{
Index: 1,
PrevID: types.Identifier{1, 2, 3},
RootsID: types.Identifier{4, 5, 6},
CumulativeWeight: 789,
}, storableCommitment.Get())
}
// Commitment is a somewhat complex type used to test the storable Type.
type Commitment struct {
Index int64
PrevID types.Identifier
RootsID types.Identifier
CumulativeWeight int64
}
|
package play
import (
"errors"
"log"
"github.com/joshprzybyszewski/cribbage/logic/pegging"
"github.com/joshprzybyszewski/cribbage/model"
"github.com/joshprzybyszewski/cribbage/server/interaction"
)
var _ PhaseHandler = (*peggingHandler)(nil)
type peggingHandler struct{}
func (*peggingHandler) Start(g *model.Game, pAPIs map[model.PlayerID]interaction.Player) error {
g.PeggedCards = g.PeggedCards[:0]
// put the player after the dealer as the blocking player
pIDs := playersToDealTo(g)
pID := pIDs[0]
addPlayerToBlocker(g, pID, model.PegCard, pAPIs, `please peg a card`)
return nil
}
func (*peggingHandler) HandleAction(g *model.Game,
action model.PlayerAction,
pAPIs map[model.PlayerID]interaction.Player,
) error {
// VALIDATE: check the action, then check the peg/go
if err := validateAction(g, action, model.PegCard); err != nil {
return err
}
pa, ok := action.Action.(model.PegAction)
if !ok {
return errors.New(`tried pegging with a different action`)
}
pID := action.ID
if err := validatePegAction(g, pID, pa); err != nil {
addPlayerToBlocker(g, pID, model.PegCard, pAPIs, err.Error())
return nil
}
// CLEAN: remove this player from the blockers
if len(g.BlockingPlayers) != 1 {
log.Printf("Expected one blocker for pegging, but had: %+v\n", g.BlockingPlayers)
}
removePlayerFromBlockers(g, action)
// ACT: do the "say go" or peg
if pa.SayGo {
doSayGo(g, action, pAPIs)
} else if err := doPeg(g, action, pa, pAPIs); err != nil {
return err
}
// PROGRESS: move the game state forward appropriately
progressAfterPeg(g, action, pAPIs)
return nil
}
func validatePegAction(g *model.Game, pID model.PlayerID, pa model.PegAction) error {
if pa.SayGo {
curPeg := g.CurrentPeg()
minCardVal := minUnpeggedValue(g.Hands[pID], g.PeggedCards)
if curPeg+minCardVal <= model.MaxPeggingValue {
return errors.New(`Cannot say go when has unpegged playable card`)
}
return nil
}
if !handContains(g.Hands[pID], pa.Card) {
return errors.New(`Cannot peg card you don't have`)
}
if hasBeenPegged(g.PeggedCards, pa.Card) {
return errors.New(`Cannot peg same card twice`)
}
if g.CurrentPeg()+pa.Card.PegValue() > model.MaxPeggingValue {
return errors.New(`Cannot peg card with this value`)
}
return nil
}
func doPeg(
g *model.Game,
action model.PlayerAction,
pa model.PegAction,
pAPIs map[model.PlayerID]interaction.Player,
) error {
pts, err := pegging.PointsForCard(g.PeggedCards, pa.Card)
if err != nil {
return err
}
addPoints(g, action.ID, pts, pAPIs, `pegging`)
g.PeggedCards = append(g.PeggedCards, model.PeggedCard{
Card: pa.Card,
PlayerID: action.ID,
Action: g.NumActions() + 1,
})
return nil
}
func doSayGo(g *model.Game,
action model.PlayerAction,
pAPIs map[model.PlayerID]interaction.Player,
) {
if len(g.PeggedCards) == 0 {
return
}
lastPeggerID := g.PeggedCards[len(g.PeggedCards)-1].PlayerID
if lastPeggerID == action.ID {
// The go's went all the way around. Take a point
addPoints(g, action.ID, 1, pAPIs, `the go`)
}
}
func progressAfterPeg(
g *model.Game,
action model.PlayerAction,
pAPIs map[model.PlayerID]interaction.Player,
) {
if g.IsOver() {
// we shouldn't do anything if the game is over
return
}
if len(g.PeggedCards) == 4*len(g.Players) {
// This was the last card: give one point to this player.
addPoints(g, action.ID, 1, pAPIs, `last card`)
return
}
// Set the next player to peg as the blocker
// If we don't require everyone to say go, then we'll need to change the logic
// in game.CurrentPeg
nextPlayerIndex := -1
for i, p := range g.Players {
if p.ID == action.ID {
nextPlayerIndex = (i + 1) % len(g.Players)
break
}
}
bp := g.Players[nextPlayerIndex]
addPlayerToBlocker(g, bp.ID, model.PegCard, pAPIs, ``)
}
|
package model
type Cart struct {
ID int `json:"id" gorm:"column:id;primaryKey`
UserID int `json:"user_id" gorm:"column:user_id"`
ProductID int `json:"product_id" gorm:"column:product_id"`
}
func (Cart) TableName() string {
return "carts"
}
|
package provider
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/filecoin-project/go-legs"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/go-resty/resty/v2"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-log/v2"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/datamodel"
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
"github.com/kenlabs/pando-store/pkg/config"
"github.com/kenlabs/pando-store/pkg/store"
store2 "github.com/kenlabs/pando-store/pkg/types/store"
"github.com/kenlabs/pando/pkg/types/schema"
"net/http"
"net/url"
datastoreSync "github.com/ipfs/go-datastore/sync"
leveldb "github.com/ipfs/go-ds-leveldb"
blockstore "github.com/ipfs/go-ipfs-blockstore"
link "github.com/kenlabs/pando/pkg/legs"
"github.com/kenlabs/pando/sdk/pkg"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
"time"
)
type core struct {
MutexDatastore *datastoreSync.MutexDatastore
Blockstore blockstore.Blockstore
LinkSys ipld.LinkSystem
}
type MetaProvider struct {
Host host.Host
PrivateKey crypto.PrivKey
LegsPublisher legs.Publisher
Core *core
HttpClient *resty.Client
ConnectTimeout time.Duration
PushTimeout time.Duration
}
const topic = "/pando/v0.0.1"
const latestMedataKey = "/sync/metadata"
var dsLatestMetadataKey = datastore.NewKey(latestMedataKey)
var logger = log.Logger("sdk-provider-DAG")
func NewMetaProvider(privateKeyStr string, pandoAPI string, connectTimeout time.Duration, pushTimeout time.Duration) (*MetaProvider, error) {
privateKeyBytes, err := base64.StdEncoding.DecodeString(privateKeyStr)
if err != nil {
return nil, err
}
privateKey, err := crypto.UnmarshalPrivateKey(privateKeyBytes)
if err != nil {
return nil, err
}
providerHost, err := libp2p.New(libp2p.Identity(privateKey))
if err != nil {
return nil, err
}
storageCore := &core{}
ds, err := leveldb.NewDatastore("", nil)
storageCore.MutexDatastore = datastoreSync.MutexWrap(ds)
storageCore.Blockstore = blockstore.NewBlockstore(storageCore.MutexDatastore)
ps, err := store.NewStoreFromDatastore(context.Background(), storageCore.MutexDatastore, &config.StoreConfig{
SnapShotInterval: "9999h",
CacheSize: config.DefaultCacheSize,
})
if err != nil {
return nil, err
}
storageCore.LinkSys = link.MkLinkSystem(ps, nil, nil)
legsPublisher, err := dtsync.NewPublisher(providerHost, storageCore.MutexDatastore, storageCore.LinkSys, topic)
_, err = url.Parse(pandoAPI)
if err != nil {
return nil, err
}
httpClient := resty.New().SetBaseURL(pandoAPI).SetTimeout(connectTimeout).SetDebug(false)
time.Sleep(2 * time.Second)
return &MetaProvider{
Host: providerHost,
PrivateKey: privateKey,
LegsPublisher: legsPublisher,
HttpClient: httpClient,
Core: storageCore,
ConnectTimeout: connectTimeout,
PushTimeout: pushTimeout,
}, nil
}
func (p *MetaProvider) ConnectPando(peerAddress string, peerID string) error {
pandoPeerInfo, err := pkg.NewPandoPeerInfo(peerAddress, peerID)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), p.ConnectTimeout)
defer cancel()
return p.Host.Connect(ctx, *pandoPeerInfo)
}
func (p *MetaProvider) Close() error {
return p.LegsPublisher.Close()
}
func (p *MetaProvider) NewMetadata(payload []byte) (*schema.Metadata, error) {
return schema.NewMetaWithBytesPayload(payload, p.Host.ID(), p.PrivateKey)
}
func (p *MetaProvider) NewMetadataWithLink(payload []byte, link datamodel.Link) (*schema.Metadata, error) {
return schema.NewMetadataWithLink(payload, p.Host.ID(), p.PrivateKey, link)
}
func (p *MetaProvider) Push(metadata schema.Meta) (cid.Cid, error) {
ctx, cancel := context.WithTimeout(context.Background(), p.PushTimeout)
defer cancel()
// Store the metadata locally.
c, err := p.PushLocal(ctx, metadata)
if err != nil {
return cid.Undef, fmt.Errorf("failed to publish metadata locally: %s", err)
}
logger.Infow("Publishing metadata in pubsub channel", "cid", c)
// Publish the metadata.
err = p.LegsPublisher.UpdateRoot(ctx, c)
if err != nil {
return cid.Undef, err
}
return c, nil
}
func (p *MetaProvider) PushLocal(ctx context.Context, metadata schema.Meta) (cid.Cid, error) {
metadataLink, err := schema.MetadataLink(p.Core.LinkSys, metadata)
if err != nil {
return cid.Undef, fmt.Errorf("cannot generate metadata link: %s", err)
}
c := metadataLink.(cidlink.Link).Cid
logger.Infow("Storing metadata locally", "cid", c.String())
err = p.putLatestMetadata(ctx, c.Bytes())
if err != nil {
return cid.Undef, fmt.Errorf("cannot store latest metadata in blockstore: %s", err)
}
return c, nil
}
func (p *MetaProvider) putLatestMetadata(ctx context.Context, metadataID []byte) error {
return p.Core.MutexDatastore.Put(ctx, dsLatestMetadataKey, metadataID)
}
type responseJson struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct{ Inclusion store2.MetaInclusion } `json:"Data"`
}
func (p *MetaProvider) CheckMetaState(ctx context.Context, c cid.Cid) (*store2.MetaInclusion, error) {
res, err := pkg.HandleResError(p.HttpClient.R().Get("/metadata/inclusion?cid=" + c.String()))
if err != nil {
return nil, err
}
resJson := responseJson{}
err = json.Unmarshal(res.Body(), &resJson)
if err != nil {
return nil, err
}
if resJson.Code != http.StatusOK {
return nil, fmt.Errorf("error msg: %s", resJson.Message)
}
return &resJson.Data.Inclusion, nil
}
|
package nats
import (
"time"
nats "github.com/nats-io/go-nats"
)
type Broker struct {
conn *nats.Conn
}
func NewBroker(conn *nats.Conn) *Broker {
return &Broker{conn: conn}
}
func (nb *Broker) Publish(topic, message string) error {
return nb.conn.Publish(topic, []byte(message))
}
func (n *Broker) Request(topic, message string) (interface{}, error) {
return n.conn.Request(topic, []byte(message), 1*time.Second)
}
|
package xsql
import (
"github.com/emqx/kuiper/plugins"
"strings"
)
// ONLY use NewFunctionValuer function to initialize
type FunctionValuer struct {
funcPlugins *funcPlugins
}
//Should only be called by stream to make sure a single instance for an operation
func NewFunctionValuer(p *funcPlugins) *FunctionValuer {
fv := &FunctionValuer{
funcPlugins: p,
}
return fv
}
func (*FunctionValuer) Value(_ string) (interface{}, bool) {
return nil, false
}
func (*FunctionValuer) Meta(_ string) (interface{}, bool) {
return nil, false
}
var aggFuncMap = map[string]string{"avg": "",
"count": "",
"max": "", "min": "",
"sum": "",
"collect": "",
"deduplicate": "",
}
var funcWithAsteriskSupportMap = map[string]string{
"collect": "",
"count": "",
}
var mathFuncMap = map[string]string{"abs": "", "acos": "", "asin": "", "atan": "", "atan2": "",
"bitand": "", "bitor": "", "bitxor": "", "bitnot": "",
"ceil": "", "cos": "", "cosh": "",
"exp": "",
"ln": "", "log": "",
"mod": "",
"power": "",
"rand": "", "round": "",
"sign": "", "sin": "", "sinh": "", "sqrt": "",
"tan": "", "tanh": "",
}
var strFuncMap = map[string]string{"concat": "",
"endswith": "",
"format_time": "",
"indexof": "",
"length": "", "lower": "", "lpad": "", "ltrim": "",
"numbytes": "",
"regexp_matches": "", "regexp_replace": "", "regexp_substr": "", "rpad": "", "rtrim": "",
"substring": "", "startswith": "", "split_value": "",
"trim": "",
"upper": "",
}
var convFuncMap = map[string]string{"concat": "", "cast": "", "chr": "",
"encode": "",
"trunc": "",
}
var hashFuncMap = map[string]string{"md5": "",
"sha1": "", "sha256": "", "sha384": "", "sha512": "",
}
var jsonFuncMap = map[string]string{
"json_path_query": "", "json_path_query_first": "", "json_path_exists": "",
}
var otherFuncMap = map[string]string{"isnull": "",
"newuuid": "", "tstamp": "", "mqtt": "", "meta": "",
}
func (fv *FunctionValuer) Call(name string, args []interface{}) (interface{}, bool) {
lowerName := strings.ToLower(name)
if _, ok := mathFuncMap[lowerName]; ok {
return mathCall(name, args)
} else if _, ok := strFuncMap[lowerName]; ok {
return strCall(lowerName, args)
} else if _, ok := convFuncMap[lowerName]; ok {
return convCall(lowerName, args)
} else if _, ok := hashFuncMap[lowerName]; ok {
return hashCall(lowerName, args)
} else if _, ok := jsonFuncMap[lowerName]; ok {
return jsonCall(lowerName, args)
} else if _, ok := otherFuncMap[lowerName]; ok {
return otherCall(lowerName, args)
} else if _, ok := aggFuncMap[lowerName]; ok {
return nil, false
} else {
nf, fctx, err := fv.funcPlugins.GetFuncFromPlugin(name)
if err != nil {
return err, false
}
if nf.IsAggregate() {
return nil, false
}
logger := fctx.GetLogger()
logger.Debugf("run func %s", name)
result, ok := nf.Exec(args, fctx)
logger.Debugf("run custom function %s, get result %v", name, result)
return result, ok
}
}
func IsAggStatement(node Node) bool {
var r = false
WalkFunc(node, func(n Node) {
if f, ok := n.(*Call); ok {
if ok := isAggFunc(f); ok {
r = true
return
}
} else if d, ok := n.(Dimensions); ok {
ds := d.GetGroups()
if ds != nil && len(ds) > 0 {
r = true
return
}
}
})
return r
}
func isAggFunc(f *Call) bool {
fn := strings.ToLower(f.Name)
if _, ok := aggFuncMap[fn]; ok {
return true
} else if _, ok := strFuncMap[fn]; ok {
return false
} else if _, ok := convFuncMap[fn]; ok {
return false
} else if _, ok := hashFuncMap[fn]; ok {
return false
} else if _, ok := otherFuncMap[fn]; ok {
return false
} else if _, ok := mathFuncMap[fn]; ok {
return false
} else {
if nf, err := plugins.GetFunction(f.Name); err == nil {
if nf.IsAggregate() {
//Add cache
aggFuncMap[fn] = ""
return true
}
}
}
return false
}
func HasAggFuncs(node Node) bool {
if node == nil {
return false
}
var r = false
WalkFunc(node, func(n Node) {
if f, ok := n.(*Call); ok {
if ok := isAggFunc(f); ok {
r = true
return
}
}
})
return r
}
func HasNoAggFuncs(node Node) bool {
if node == nil {
return false
}
var r = false
WalkFunc(node, func(n Node) {
if f, ok := n.(*Call); ok {
if ok := isAggFunc(f); !ok {
r = true
return
}
}
})
return r
}
|
package internal
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_Module_Run(t *testing.T) {
at := assert.New(t)
t.Run("success", func(t *testing.T) {
defer func() {
at.Nil(os.RemoveAll("testcase"))
}()
out, err := runCobraCmd(ModuleCmd, "testcase")
at.Nil(err)
at.Contains(out, "Done")
})
t.Run("invalid module name", func(t *testing.T) {
out, err := runCobraCmd(ModuleCmd, ".")
at.NotNil(err)
at.Contains(out, ".")
})
}
func Test_Module_CreateModule(t *testing.T) {
t.Parallel()
at := assert.New(t)
dir, err := ioutil.TempDir("", "test_create_module")
at.Nil(err)
defer func() { _ = os.RemoveAll(dir) }()
modulePath := fmt.Sprintf("%s%cmodule", dir, os.PathSeparator)
at.NotNil(createModule(modulePath, "invalid-name/"))
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package model
import (
log "github.com/sirupsen/logrus"
)
const (
// DatabaseMigrationStatusSetupIP indicates that database migration setup is still running.
DatabaseMigrationStatusSetupIP = "setup-in-progress"
// DatabaseMigrationStatusSetupComplete indicates that database migration setup is completed.
DatabaseMigrationStatusSetupComplete = "setup-complete"
// DatabaseMigrationStatusTeardownIP indicates that database migration teardown is still running.
DatabaseMigrationStatusTeardownIP = "teardown-in-progress"
// DatabaseMigrationStatusTeardownComplete indicates that database migration teardown is completed.
DatabaseMigrationStatusTeardownComplete = "teardown-complete"
// DatabaseMigrationStatusReplicationIP indicates that database migration replication process is still running.
DatabaseMigrationStatusReplicationIP = "replication-in-progress"
// DatabaseMigrationStatusReplicationComplete indicates that database migration process is completed.
DatabaseMigrationStatusReplicationComplete = "replication-complete"
)
// CIMigrationDatabase is the interface for managing Mattermost databases migration process.
type CIMigrationDatabase interface {
Setup(logger log.FieldLogger) (string, error)
Teardown(logger log.FieldLogger) (string, error)
Replicate(logger log.FieldLogger) (string, error)
}
|
package main
//872. 叶子相似的树
//请考虑一棵二叉树上所有的叶子,这些叶子的值按从左到右的顺序排列形成一个 叶值序列
//举个例子,如上图所示,给定一棵叶值序列为(6, 7, 4, 9, 8)的树。
//
//如果有两棵二叉树的叶值序列是相同,那么我们就认为它们是叶相似的。
//
//如果给定的两个根结点分别为root1 和root2的树是叶相似的,则返回true;否则返回 false 。
//输入:root1 = [1,2,3], root2 = [1,3,2]
//输出:false
//
//
//提示:
//
//给定的两棵树可能会有1到 200个结点。
//给定的两棵树上的值介于 0 到 200 之间。
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func leafSimilar(root1 *TreeNode, root2 *TreeNode) bool {
stack := make([]int, 0)
var search func(node *TreeNode)
search = func(node *TreeNode) {
if node == nil {
return
}
if node.Left == nil && node.Right == nil {
stack = append(stack, node.Val)
return
}
search(node.Left)
search(node.Right)
}
search(root1)
array1 := append([]int{}, stack...)
stack = make([]int, 0)
search(root2)
array2 := append([]int{}, stack...)
if len(array1) != len(array2) {
return false
}
for i, v := range array1 {
if v != array2[i] {
return false
}
}
return true
}
func main() {
leafSimilar(nil, nil)
}
|
package session
import (
"time"
uuid "github.com/satori/go.uuid"
"encoding/json"
"fmt"
"github.com/go-redis/redis"
)
// RedisStore represents a session.Store backed by redis.
type RedisStore struct {
//Redis client used to talk to redis server.
Client *redis.Client
//Used for key expiry time on redis.
SessionDuration time.Duration
}
// NewRedisStore constructs a new RedisStore
func NewRedisStore(client *redis.Client, sessionDuration time.Duration) *RedisStore {
//initialize and return a new RedisStore struct
if client == nil {
panic("No client provided!")
}
return &RedisStore{client, sessionDuration}
}
// Store implementation
// Save saves the provided `sessionState` and associated SessionID to the store.
//
// The `sessionState` parameter is typically a pointer to a struct containing all the data you want to be
// associated with the given SessionID.
func (rs *RedisStore) Save(sid SessionID, sessionUuid uuid.UUID, sessionState interface{}) error {
sesJson, err := json.Marshal(sessionState)
if err != nil {
return fmt.Errorf("error marshaling sessionState into json:\n%s", err.Error())
}
err = rs.Client.Set(getRedisKey(sid), sesJson, rs.SessionDuration).Err()
if err != nil {
return fmt.Errorf("error setting session state:\n%s", err.Error())
}
err = rs.Client.Set(getRedisUuidKey(sessionUuid), sid.String(), 0).Err()
if err != nil {
return fmt.Errorf("error setting session id refference:\n%s", err.Error())
}
return nil
}
// Get populates `sessionState` with the data previously saved for the given SessionID.
func (rs *RedisStore) Get(sid SessionID, sessionState interface{}) error {
pipe := rs.Client.Pipeline()
res := pipe.Get(getRedisKey(sid))
expErr := pipe.Expire(getRedisKey(sid), rs.SessionDuration).Err()
_, pipeErr := pipe.Exec()
if res.Err() != nil {
return ErrStateNotFound
}
if expErr != nil {
return fmt.Errorf("error changing expiration of session <%s>:\n%s", sid, expErr.Error())
}
if pipeErr != nil {
return fmt.Errorf("error getting sid <%s>:\n%v", string(sid), pipeErr.Error())
}
err := json.Unmarshal([]byte(res.Val()), sessionState)
if err != nil {
return fmt.Errorf("error unmarshaling sessionState: %s", err.Error())
}
return nil
}
func (rs *RedisStore) GetSessionId(sessionUuid uuid.UUID) (SessionID, error) {
res := rs.Client.Get(getRedisUuidKey(sessionUuid))
if res.Err() != nil {
return InvalidSessionID, ErrUnexpected
}
return SessionID(res.Val()), nil
}
// Exists determines if the session id is in the session store.
func (rs *RedisStore) Exists(sid SessionID) (bool, error) {
res := rs.Client.Exists(getRedisKey(sid))
if res.Err() != nil {
return false, res.Err()
}
exRes := res.Val()
return exRes == 1, nil
}
// Delete deletes all state data associated with the SessionID from the store.
func (rs *RedisStore) Delete(sid SessionID) error {
err := rs.Client.Del(getRedisKey(sid)).Err()
if err != nil {
return fmt.Errorf("error deleting the session <%s>:\n%s", sid, err.Error())
}
return nil
}
// getRedisKey() returns the redis key to use for the SessionID.
func getRedisKey(sid SessionID) string {
// convert the SessionID to a string and add the prefix "sid:" to keep
// SessionID keys separate from other keys that might end up in this
// redis instance.
return "sid:" + sid.String()
}
// getRedisUuidKey() returns the redis key to use for the SessionID.
func getRedisUuidKey(suuid uuid.UUID) string {
// convert the SessionID to a string and add the prefix "sid:" to keep
// SessionID keys separate from other keys that might end up in this
// redis instance.
return "suuid:" + suuid.String()
}
|
package main
import (
"fmt"
)
func main() {
var min, max int
fmt.Println("entrez la borne basse")
fmt.Scanln(&min)
fmt.Println("entrez la borne haute")
fmt.Scanln(&max)
fmt.Println("///////")
for i := min + 2; i < max; i += 2 {
if min%2 == 0 {
fmt.Println(i)
} else {
fmt.Println(i - 1)
}
}
}
|
/*
* Copyright(C) 2020 EASTCOM-BUPT Inc.
* Author: wangpeng_1@ebupt.com
* Date: 2020-08-21 11:16:48
* LastEditTime: 2020-08-21 17:53:20
* LastEditors: wangpeng_1@ebupt.com
* Description:
*/
package kunxunReproduction
import (
"io/ioutil"
"gopkg.in/yaml.v2"
"fmt"
)
type conf struct {
Disksign string `yaml:"diskSign"`
Version string `yaml:"version"`
Destination struct {
Ip string `yaml:"ip"`
Tcpport string `yaml:"tcpPort"`
Postport string `yaml:"postPort"`
}
}
type communicationModel struct{
var c conf
conf := c.getConf()
ip = conf.Destination.Ip
tcpPort = conf.Destination.Tcpport
postPort = conf.Destination.Postport
}
func (c *conf) getConf() *conf {
yamlFile ,err := ioutil.ReadFile("conf.yaml")
if err != nil {
fmt.Println(err.Error())
}
err = yaml.Unmarsshal(yamlFile, c)
if err != nil {
fmt.Println(err.Error())
}
return c
}
func (httpcode *communicationModel) {
} |
package arrays
import "fmt"
// Return the concepts of the Chapter 4, about arrays.
func Call() {
fmt.Println("Arrays")
/*
Array declaration sintax with values:
Check that arrays have a max length predetermined, if the values at
those positions are not informed, they will be evalued with the default
value of the array type.
*/
array := [5]int{10, 20, 30}
fmt.Println("Array declared:", array)
/*
If we inform values beyound the max length, Go compiler will not accept
We can also use "go vet" command to check this concept out
*/
// array2 := [5]int{10, 20, 30, 40, 50, 60}
// fmt.Println("Array declared:", array2)
// We can also declared a array, calculating it's max length
array3 := [...]int{10, 20, 30, 40, 50, 60, 70, 80, 90}
fmt.Println("Array3 declared with calculated length:", array3)
// Declaring a array with specific elements
array4 := [5]int{1: 10, 2: 20}
fmt.Println("Array4 declared with specific elements:", array4)
// Accessing array elements
array5 := [5]int{10, 20, 30, 40, 50}
fmt.Println("Array5 declared:", array5)
// Changing value of array5 at index 2
array5[2] = 35
fmt.Println("Array5 received value 35 at index 2:", array5)
// We can also have a array of pointers, the new bult-in function allocate
// memory to create the pointer
array6 := [5]*int{0: new(int), 1: new(int)}
fmt.Println("Array6, array of int pointers:", array6)
// Atribuing values to the pointers
*array6[0] = 10
*array6[1] = 20
fmt.Println("Array6, received 10 at index 0 and 20 at index 1:\n", array6)
/*
This is only to be clear what a *(pointer) represent
and a &(ampersand) represent
*/
fmt.Println("*array6[1], represent memory value:", *array6[1])
fmt.Println("&array6[1], represent memory adress:", &array6[1])
/*
In Go, arrays are a value, with that in mind we can use them in
operations of atribuation, only arrays of same type can be atribuated
one to another
*/
var array7 [5]string
array8 := [5]string{"Red", "Blue", "Green", "Yellow", "Pink"}
array7 = array8
// We have made a complete copy of array8, check the memory adresses
fmt.Println("array7 index 1 memory adress:", &array7[1])
fmt.Println("array8 index 1 memory adress:", &array8[1])
/*
Error of atribuiting arrays with different types, Go undestand
that [4]string and [5]string are different types
*/
// var array10 [4]string
// array9 := [5]string{"Red", "Blue", "Green", "Yellow", "Pink"}
// array10 = array9
// Obs: Copying a array of pointers, copy the pointers values
var array11 [3]*string
array12 := [3]*string{new(string), new(string), new(string)}
*array12[0] = "Red"
*array12[1] = "Green"
*array12[2] = "Blue"
array11 = array12
// Two arrays pointing to the same value but different adresses
fmt.Println(&array12[0], &array11[0])
// Declare bidimensional arrays (matrix) 4x2
var array13 [4][2]int
fmt.Println(array13)
// Use a literal array to declare and initialize a bidimensional array
array14 := [4][2]int{{10, 11}, {20, 21}, {30, 31}, {40, 41}}
fmt.Println(array14)
// Declare and initialize the values of the external array
array15 := [4][2]int{1: {20, 21}, 2: {40, 41}}
fmt.Println(array15)
// Declare and initialize the values of the internal array
array16 := [4][2]int{1: {0: 21}, 3: {1: 41}}
fmt.Println(array16)
// Accessing a bidimensional array value
fmt.Println(array16[1][0])
// Copying bidimensional arrays 2x2
var matrix1 [2][2]int
var matrix2 [2][2]int
matrix2[0][0] = 10
matrix2[0][1] = 20
matrix2[1][0] = 30
matrix2[1][1] = 40
matrix1 = matrix2
fmt.Printf("matrix1: %v\nmatrix2: %v\n", matrix1, matrix2)
// Copy a index to another array of the same type
var matrix3 [2]int = matrix1[1]
fmt.Println(matrix3)
/*
Passing a array between functions can be a operation who use a lot of
resources, cause functions pass variables by value. If your variable are
a array, that mean your array idenpendent of it size, will be copy and
passed to the function.
*/
// array with a million elements
var array1e6 [1e6]int
// This will pass that big array and create a copy in the function 8MB
foo(array1e6)
// This will pass a pointer of that big array to the function 8bytes
fooPointingIsBetter(&array1e6)
/*
Slices deal with those problems in a inherint way
*/
}
// Warning this can be very dangerous ;)
func foo(array [1e6]int) {
fmt.Println("Passed a 8MB array and created a copy, results in 16MB")
return
}
// Pointing is a better way, but beware that this variable is shared
// and that can be tricky
func fooPointingIsBetter(array *[1e6]int) {
fmt.Println("Passed a 8bytes pointer of a 8MB array")
return
}
|
package main
import (
"go_code/execrise/redisexec/redisexec03/utils"
_ "fmt"
)
func main() {
conn := utils.Pool.Get()
defer conn.Close()
conn.Do("set", "k1", "v1")
conn.Do("flushdb")
} |
package main
import (
"fmt"
"sync"
)
var wg sync.WaitGroup
var once sync.Once
//单向通道,只能写入
func senttoch(ch1 chan<- int) {
for i := 0; i < 100; i++ {
ch1 <- i
}
//写入数据完成后,关闭chan,此时只能读不能写入
close(ch1)
wg.Done()
}
//单向通道 ch1只能读取,ch2只能写入
func getfromch(ch1 <-chan int, ch2 chan<- int) {
fmt.Println("start get")
//遍历chan中的数据,使用for循环
for {
tmp, ok := <-ch1
//已经读完ch1中的数据
if !ok {
break
}
ch2 <- tmp * tmp
fmt.Println("tmp=", tmp)
}
wg.Done()
once.Do(func() {
close(ch2)
fmt.Println("ch2 is closed")
})
}
func main() {
ch1 := make(chan int, 100)
ch2 := make(chan int, 100)
wg.Add(3)
// 1个生产者
go senttoch(ch1)
//2个消费者
go getfromch(ch1, ch2)
go getfromch(ch1, ch2)
//遍历chan中的数据,使用for-range
for v := range ch2 {
fmt.Println(v)
}
wg.Wait()
}
|
package controllers
import (
"encoding/json"
"net/http"
"peribahasa/app/models"
"peribahasa/app/utils"
"strconv"
"github.com/gorilla/mux"
)
// CreateAsal controller
var CreateAsal = func(w http.ResponseWriter, r *http.Request) {
asal := &models.Asal{}
defer r.Body.Close()
err := json.NewDecoder(r.Body).Decode(asal)
if err != nil {
utils.Respond(w, utils.Message(false, "Invalid request payload"))
return
}
if e := asal.Create(); e != nil {
utils.Respond(w, utils.Message(false, e.Error()))
return
}
resp := utils.Message(true, "Success")
resp["data"] = asal
utils.Respond(w, resp)
}
// GetAsal Controller
var GetAsal = func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
c := &models.Asal{}
id, err := strconv.Atoi(vars["id"])
if err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
if err := c.Get(id); err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
resp := utils.Message(true, "Success")
resp["data"] = c
utils.Respond(w, resp)
}
// UpdateAsal Controller
var UpdateAsal = func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
asal := &models.Asal{}
defer r.Body.Close()
err := json.NewDecoder(r.Body).Decode(asal)
if err != nil {
utils.Respond(w, utils.Message(false, "Invalid request payload"))
return
}
id, err := strconv.Atoi(vars["id"])
if err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
if err := asal.Update(id); err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
asal.ID = uint(id)
resp := utils.Message(true, "Success")
resp["data"] = asal
utils.Respond(w, resp)
}
// DeleteAsal Controller
var DeleteAsal = func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
asal := &models.Asal{}
id, err := strconv.Atoi(vars["id"])
if err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
if err := asal.Delete(id); err != nil {
utils.Respond(w, utils.Message(false, err.Error()))
return
}
resp := utils.Message(true, "Deleted")
utils.Respond(w, resp)
}
|
package interpreter
import "errors"
import actor "github.com/filecoin-project/specs/systems/filecoin_vm/actor"
import addr "github.com/filecoin-project/specs/systems/filecoin_vm/actor/address"
import market "github.com/filecoin-project/specs/systems/filecoin_markets/storage_market"
import spc "github.com/filecoin-project/specs/systems/filecoin_blockchain/storage_power_consensus"
import sysactors "github.com/filecoin-project/specs/systems/filecoin_vm/sysactors"
import vmr "github.com/filecoin-project/specs/systems/filecoin_vm/runtime"
var (
ErrActorNotFound = errors.New("Actor Not Found")
)
// CodeCIDs for system actors
var (
InitActorCodeCID = actor.CodeCID("filecoin/1.0/InitActor")
CronActorCodeCID = actor.CodeCID("filecoin/1.0/CronActor")
AccountActorCodeCID = actor.CodeCID("filecoin/1.0/AccountActor")
StoragePowerActorCodeCID = actor.CodeCID("filecoin/1.0/StoragePowerActor")
StorageMinerActorCodeCID = actor.CodeCID("filecoin/1.0/StorageMinerActor")
StorageMarketActorCodeCID = actor.CodeCID("filecoin/1.0/StorageMarketActor")
PaymentChannelActorCodeCID = actor.CodeCID("filecoin/1.0/PaymentChannelActor")
)
var staticActorCodeRegistry = &actorCodeRegistry{}
type actorCodeRegistry struct {
code map[actor.CodeCID]vmr.ActorCode
}
func (r *actorCodeRegistry) _registerActor(cid actor.CodeCID, actor vmr.ActorCode) {
r.code[cid] = actor
}
func (r *actorCodeRegistry) _loadActor(cid actor.CodeCID) (vmr.ActorCode, error) {
a, ok := r.code[cid]
if !ok {
return nil, ErrActorNotFound
}
return a, nil
}
func RegisterActor(cid actor.CodeCID, actor vmr.ActorCode) {
staticActorCodeRegistry._registerActor(cid, actor)
}
func LoadActor(cid actor.CodeCID) (vmr.ActorCode, error) {
return staticActorCodeRegistry._loadActor(cid)
}
// init is called in Go during initialization of a program.
// this is an idiomatic way to do this. Implementations should approach this
// howevery they wish. The point is to initialize a static registry with
// built in pure types that have the code for each actor. Once we have
// a way to load code from the StateTree, use that instead.
func init() {
_registerBuiltinActors()
}
func _registerBuiltinActors() {
// TODO
cron := &sysactors.CronActorCode_I{}
RegisterActor(InitActorCodeCID, &sysactors.InitActorCode_I{})
RegisterActor(CronActorCodeCID, cron)
RegisterActor(AccountActorCodeCID, &sysactors.AccountActorCode_I{})
RegisterActor(StoragePowerActorCodeCID, &spc.StoragePowerActorCode_I{})
RegisterActor(StorageMarketActorCodeCID, &market.StorageMarketActorCode_I{})
// wire in CRON actions.
// TODO: there's probably a better place to put this, but for now, do it here.
cron.Actors_ = append(cron.Actors_, addr.StoragePowerActorAddr)
cron.Actors_ = append(cron.Actors_, addr.StorageMarketActorAddr)
}
|
package watcher
import (
"os"
)
type fileEventLog struct {
file *os.File
}
func (f fileEventLog) Emit(event Event) error {
_, err := f.file.Write(event.MarshalYAML())
f.file.WriteString("\n")
return err
}
func (f fileEventLog) Close() error {
return nil
}
func NewFileEventLog(file *os.File) (EventLog, error) {
o := &fileEventLog{}
o.file = file
return o, nil
}
|
package main
import (
"fmt"
"time"
)
// 每个任务Task类型,可以抽象成一个函数;
type Task struct {
f func() error // 一个无参的函数类型
}
//通过一个NewTask 来创建一个task
func NewTask(fx func() error) *Task {
t := Task{
f: fx,
}
return &t
}
// Task 执行任务的方法
func (t *Task) Execute() {
t.f() // 调用任务所绑定的函数
}
// 有关协程池的定义及操作
type Pool struct {
InternalInter chan *Task
ExternalInter chan *Task
WorkNum int
}
// 创建一个协程池;
func NewPool(cap int) *Pool {
p := Pool{
InternalInter: make(chan *Task),
ExternalInter: make(chan *Task),
WorkNum: cap,
}
return &p
}
// 协程池创建一个worker 并且开始工作
func (p *Pool) worker(workId int) {
// worker 不断的从internal内部任务中拿任务
for task := range p.InternalInter {
//如果拿到任务,则执行task 任务;
task.Execute()
fmt.Println("workid:", workId, " 执行任务完毕")
}
}
// 让协程池pool 开始工作
func (p *Pool) Run() {
//1.首先根据协程池的worker 数量限定,开启固定的worker.
for i := 0; i < p.WorkNum; i++ {
go p.worker(i)
}
//2.从external 协程池入口获取外界传递过来的任务;
for task := range p.ExternalInter {
p.InternalInter <- task
}
//3.执行完毕需要关闭ExternalInter
//4.执行完毕需要关闭InternalInter
}
func printMyName() error {
fmt.Println("my name is xhaoge")
return nil
}
func CountAAndB() error {
fmt.Println("1 + 2 = 3")
return nil
}
func main() {
fmt.Println("this is pool test")
// 创建一个task
t := NewTask(func() error {
fmt.Println(time.Now())
time.Sleep(time.Second)
return nil
})
printTask := NewTask(printMyName)
// 创建一个协程池,最大开启四个worker
p := NewPool(4)
go func() {
p.ExternalInter <- printTask
}()
//开一个协程,不断的向pool 输送打印一条时间的task任务;
go func() {
for {
p.ExternalInter <- t
}
}()
go func() {
t := NewTask(CountAAndB)
p.ExternalInter <- t
}()
// 启动协程池
p.Run()
}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package leaderelect
import (
"time"
"github.com/MatrixAINetwork/go-matrix/ca"
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/params/manversion"
)
func (self *controller) handleMsg(data interface{}) {
if nil == data {
log.Warn(self.logInfo, "消息处理", "收到nil消息")
return
}
switch data.(type) {
case *startControllerMsg:
msg, _ := data.(*startControllerMsg)
self.handleStartMsg(msg)
case *mc.BlockPOSFinishedNotify:
msg, _ := data.(*mc.BlockPOSFinishedNotify)
self.handleBlockPOSFinishedNotify(msg)
case *mc.HD_ReelectInquiryReqMsg:
msg, _ := data.(*mc.HD_ReelectInquiryReqMsg)
self.handleInquiryReq(msg)
case *mc.HD_ReelectInquiryRspMsg:
msg, _ := data.(*mc.HD_ReelectInquiryRspMsg)
self.handleInquiryRsp(msg)
case *mc.HD_ReelectLeaderReqMsg:
msg, _ := data.(*mc.HD_ReelectLeaderReqMsg)
self.handleRLReq(msg)
case *mc.HD_ConsensusVote:
msg, _ := data.(*mc.HD_ConsensusVote)
self.handleRLVote(msg)
case *mc.HD_ReelectBroadcastMsg:
msg, _ := data.(*mc.HD_ReelectBroadcastMsg)
self.handleBroadcastMsg(msg)
case *mc.HD_ReelectBroadcastRspMsg:
msg, _ := data.(*mc.HD_ReelectBroadcastRspMsg)
self.handleBroadcastRsp(msg)
default:
log.Warn(self.logInfo, "消息处理", "未知消息类型")
}
}
func (self *controller) SetSelfAddress(addr common.Address, nodeAddr common.Address) {
self.dc.selfAddr = addr
self.dc.selfNodeAddr = nodeAddr
self.selfCache.selfAddr = addr
self.selfCache.selfNodeAddr = nodeAddr
}
func (self *controller) handleStartMsg(msg *startControllerMsg) {
if nil == msg || nil == msg.parentHeader {
log.Warn(self.logInfo, "开始消息处理", ErrParamsIsNil)
return
}
if manversion.VersionCmp(string(msg.parentHeader.Version), manversion.VersionGamma) >= 0 {
log.Trace(self.logInfo, "开始消息处理", "版本号不匹配, 不处理消息", "header version", string(msg.parentHeader.Version))
return
}
a0Address := ca.GetDepositAddress()
nodeAddress := ca.GetSignAddress()
self.SetSelfAddress(a0Address, nodeAddress)
log.Debug(self.logInfo, "开始消息处理", "start", "高度", self.dc.number, "preLeader", msg.parentHeader.Leader.Hex(), "header time", msg.parentHeader.Time.Int64())
if err := self.dc.AnalysisState(msg.parentHeader, msg.parentStateDB); err != nil {
log.Error(self.logInfo, "开始消息处理", "分析状态树信息错误", "err", err)
return
}
if self.dc.role != common.RoleValidator {
log.Debug(self.logInfo, "开始消息处理", "身份错误, 不是验证者", "高度", self.dc.number)
self.mp.SaveParentHeader(msg.parentHeader)
return
}
if self.dc.bcInterval.IsBroadcastNumber(self.dc.number) {
log.Debug(self.logInfo, "开始消息处理", "区块为广播区块,不开启定时器")
self.dc.state = stIdle
self.publishLeaderMsg()
self.mp.SaveParentHeader(msg.parentHeader)
self.dc.state = stWaiting
return
}
if self.dc.turnTime.SetBeginTime(mc.ConsensusTurnInfo{}, msg.parentHeader.Time.Int64()) {
self.mp.SaveParentHeader(msg.parentHeader)
if isFirstConsensusTurn(self.ConsensusTurn()) {
curTime := time.Now().Unix()
st, remainTime, reelectTurn := self.dc.turnTime.CalState(mc.ConsensusTurnInfo{}, curTime)
log.Debug(self.logInfo, "开始消息处理", "完成", "状态计算结果", st.String(), "剩余时间", remainTime, "重选轮次", reelectTurn)
self.dc.state = st
self.dc.curReelectTurn = 0
self.setTimer(remainTime, self.timer)
if st == stPos {
self.processPOSState()
} else if st == stReelect {
self.startReelect(reelectTurn)
}
}
}
//公布leader身份
self.publishLeaderMsg()
}
func (self *controller) handleBlockPOSFinishedNotify(msg *mc.BlockPOSFinishedNotify) {
if nil == msg || nil == msg.Header {
log.Warn(self.logInfo, "POS完成通知消息处理", ErrParamsIsNil)
return
}
if err := self.mp.SavePOSNotifyMsg(msg); err == nil {
log.Debug(self.logInfo, "POS完成通知消息处理", "缓存成功", "高度", msg.Number, "leader", msg.Header.Leader, "leader轮次", msg.ConsensusTurn.String())
}
self.processPOSState()
}
func (self *controller) timeOutHandle() {
curTime := time.Now().Unix()
st, remainTime, reelectTurn := self.dc.turnTime.CalState(self.dc.curConsensusTurn, curTime)
switch self.State() {
case stPos:
log.Warn(self.logInfo, "超时事件", "POS未完成", "轮次", self.curTurnInfo(), "高度", self.Number(),
"状态计算结果", st.String(), "下次超时时间", remainTime, "计算的重选轮次", reelectTurn,
"轮次开始时间", self.dc.turnTime.GetBeginTime(*self.ConsensusTurn()), "leader", self.dc.GetConsensusLeader().Hex())
case stReelect:
log.Warn(self.logInfo, "超时事件", "重选未完成", "轮次", self.curTurnInfo(), "高度", self.Number(),
"状态计算结果", st.String(), "下次超时时间", remainTime, "计算的重选轮次", reelectTurn,
"轮次开始时间", self.dc.turnTime.GetBeginTime(*self.ConsensusTurn()), "master", self.dc.GetReelectMaster().Hex())
default:
log.Error(self.logInfo, "超时事件", "当前状态错误", "state", self.State().String(), "轮次", self.curTurnInfo(), "高度", self.Number(),
"轮次开始时间", self.dc.turnTime.GetBeginTime(*self.ConsensusTurn()), "当前时间", curTime)
return
}
self.setTimer(remainTime, self.timer)
self.dc.state = st
self.startReelect(reelectTurn)
}
func (self *controller) processPOSState() {
if self.State() != stPos {
log.Debug(self.logInfo, "执行检查POS状态", "状态不正常,不执行", "当前状态", self.State().String())
return
}
if _, err := self.mp.GetPOSNotifyMsg(self.dc.GetConsensusLeader(), self.dc.curConsensusTurn); err != nil {
log.Debug(self.logInfo, "执行检查POS状态", "获取POS完成消息失败", "err", err)
return
}
log.Debug(self.logInfo, "POS完成", "状态切换为<挖矿结果等待阶段>")
self.setTimer(0, self.timer)
self.dc.state = stMining
}
|
package main
import (
"github.com/BurntSushi/toml"
"log"
"path"
)
type OptionalLimits struct {
Hourly *int
Daily *int
Weekly *int
Monthly *int
}
type Config struct {
Defaults struct {
Limits OptionalLimits
Remote struct {
Limits OptionalLimits
}
}
Snapshot []struct {
Directory string
Destination string
Limits OptionalLimits
Remote []struct {
Host string
Port string
User string
Exec string
Directory string
Limits OptionalLimits
}
}
}
func parseFile(configFile string) (config Config, err error) {
_, err = toml.DecodeFile(configFile, &config)
return
}
func parseConfig(config Config) (subvolumes []Subvolume) {
var localDefaults Limits
localDefaults = localDefaults.Merge(config.Defaults.Limits)
remoteDefaults := localDefaults.Merge(config.Defaults.Remote.Limits)
for _, snapshot := range config.Snapshot {
var subvolume Subvolume
var destination string
subvolume.Directory = snapshot.Directory
if snapshot.Destination == "" {
destination = path.Join(subvolume.Directory, subDir)
} else {
destination = snapshot.Destination
}
subvolume.SnapshotsLoc = SnapshotsLoc{
Directory: destination,
Limits: localDefaults.Merge(snapshot.Limits)}
for _, remote := range snapshot.Remote {
var remoteSnapshotsLoc RemoteSnapshotsLoc
remoteSnapshotsLoc.User = remote.User
remoteSnapshotsLoc.Host = remote.Host
remoteSnapshotsLoc.Port = remote.Port
if remoteSnapshotsLoc.Port == "" {
remoteSnapshotsLoc.Port = "22"
}
remoteSnapshotsLoc.Exec = remote.Exec
if remoteSnapshotsLoc.Exec == "" {
remoteSnapshotsLoc.Exec = "incrbtrfs"
}
if remote.Directory == "" {
log.Fatalln("No remote directory specified for snapshot '" + subvolume.Directory + "'")
}
remoteSnapshotsLoc.SnapshotsLoc = SnapshotsLoc{
Directory: remote.Directory,
Limits: remoteDefaults.Merge(snapshot.Limits, remote.Limits)}
subvolume.Remotes = append(subvolume.Remotes, remoteSnapshotsLoc)
}
subvolumes = append(subvolumes, subvolume)
}
return subvolumes
}
|
package html5_test
import (
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("cross references", func() {
Context("using shorthand syntax", func() {
It("with custom id to section above with rich title", func() {
source := `[[thetitle]]
== a *title*
with some content linked to <<thetitle>>!`
expected := `<div class="sect1">
<h2 id="thetitle">a <strong>title</strong></h2>
<div class="sectionbody">
<div class="paragraph">
<p>with some content linked to <a href="#thetitle">a <strong>title</strong></a>!</p>
</div>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with custom id to section afterwards", func() {
source := `see <<thetitle>>
[#thetitle]
== a *title*
`
expected := `<div class="paragraph">
<p>see <a href="#thetitle">a <strong>title</strong></a></p>
</div>
<div class="sect1">
<h2 id="thetitle">a <strong>title</strong></h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with custom id and label", func() {
source := `[[thetitle]]
== a title
with some content linked to <<thetitle,a label to the title>>!`
expected := `<div class="sect1">
<h2 id="thetitle">a title</h2>
<div class="sectionbody">
<div class="paragraph">
<p>with some content linked to <a href="#thetitle">a label to the title</a>!</p>
</div>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to paragraph defined later in the document", func() {
source := `a reference to <<a-paragraph>>
[#a-paragraph]
.another paragraph
some content`
expected := `<div class="paragraph">
<p>a reference to <a href="#a-paragraph">another paragraph</a></p>
</div>
<div id="a-paragraph" class="paragraph">
<div class="title">another paragraph</div>
<p>some content</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to section defined later in the document", func() {
source := `a reference to <<section>>
[#section]
== A section with a link to https://example.com
some content`
expected := `<div class="paragraph">
<p>a reference to <a href="#section">A section with a link to https://example.com</a></p>
</div>
<div class="sect1">
<h2 id="section">A section with a link to <a href="https://example.com" class="bare">https://example.com</a></h2>
<div class="sectionbody">
<div class="paragraph">
<p>some content</p>
</div>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to term in labeled list", func() {
source := `[[a_term]]term::
// a comment
Here's a reference to the definition of <<a_term>>.`
expected := `<div class="dlist">
<dl>
<dt class="hdlist1"><a id="a_term"></a>term</dt>
</dl>
</div>
<div class="paragraph">
<p>Here’s a reference to the definition of <a href="#a_term">term</a>.</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("invalid section reference", func() {
source := `[[thetitle]]
== a title
with some content linked to <<thewrongtitle>>!`
expected := `<div class="sect1">
<h2 id="thetitle">a title</h2>
<div class="sectionbody">
<div class="paragraph">
<p>with some content linked to <a href="#thewrongtitle">[thewrongtitle]</a>!</p>
</div>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("natural ref to section with plaintext title with trailing spaces", func() {
source := `see <<Section 1>>.
== Section 1 ` // trailing spaces in the title
expected := `<div class="paragraph">
<p>see <a href="#_section_1">Section 1</a>.</p>
</div>
<div class="sect1">
<h2 id="_section_1">Section 1</h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("natural ref to section with rich title", func() {
source := `see <<Section *1*>>.
== Section *1*`
expected := `<div class="paragraph">
<p>see <a href="#_section_1">Section <strong>1</strong></a>.</p>
</div>
<div class="sect1">
<h2 id="_section_1">Section <strong>1</strong></h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("natural ref to section from within list element", func() {
source := `== Somewhere Else
term:: see <<Somewhere Else>>.
`
expected := `<div class="sect1">
<h2 id="_somewhere_else">Somewhere Else</h2>
<div class="sectionbody">
<div class="dlist">
<dl>
<dt class="hdlist1">term</dt>
<dd>
<p>see <a href="#_somewhere_else">Somewhere Else</a>.</p>
</dd>
</dl>
</div>
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("natural ref to section afterwards from within list element", func() {
source := `term:: see <<Somewhere Else>>.
== Somewhere Else`
expected := `<div class="dlist">
<dl>
<dt class="hdlist1">term</dt>
<dd>
<p>see <a href="#_somewhere_else">Somewhere Else</a>.</p>
</dd>
</dl>
</div>
<div class="sect1">
<h2 id="_somewhere_else">Somewhere Else</h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to image with title", func() {
source := `see <<thecookie>>
[#thecookie]
.A cookie
image::cookie.jpg[]`
expected := `<div class="paragraph">
<p>see <a href="#thecookie">A cookie</a></p>
</div>
<div id="thecookie" class="imageblock">
<div class="content">
<img src="cookie.jpg" alt="cookie">
</div>
<div class="title">Figure 1. A cookie</div>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to image in table cell", func() {
source := `a reference to <<cookie>>
|===
a|
[#cookie]
.A cookie
image::cookie.png[Cookie]
|===`
expected := `<div class="paragraph">
<p>a reference to <a href="#cookie">A cookie</a></p>
</div>
<table class="tableblock frame-all grid-all stretch">
<colgroup>
<col style="width: 100%;">
</colgroup>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><div class="content"><div id="cookie" class="imageblock">
<div class="content">
<img src="cookie.png" alt="Cookie">
</div>
<div class="title">Figure 1. A cookie</div>
</div></div></td>
</tr>
</tbody>
</table>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
})
Context("using macro syntax", func() {
It("to other doc with plain text location and rich label", func() {
source := `some content linked to xref:another-doc.adoc[*another doc*]!`
expected := `<div class="paragraph">
<p>some content linked to <a href="another-doc.html"><strong>another doc</strong></a>!</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to other doc with document attribute in location and label", func() {
source := `:foo: foo-doc
some content linked to xref:{foo}.adoc[another_doc()]!`
expected := `<div class="paragraph">
<p>some content linked to <a href="foo-doc.html">another_doc()</a>!</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to other section with empty label", func() {
source := `some content linked to xref:section_a[]!`
expected := `<div class="paragraph">
<p>some content linked to <a href="#section_a">[section_a]</a>!</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("to other doc with empty label", func() {
source := `some content linked to xref:foo.adoc[]!`
expected := `<div class="paragraph">
<p>some content linked to <a href="foo.html">foo.html</a>!</p>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
})
})
|
package main
import (
"database/sql"
"log"
"net/http"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/ksbeasle/GoLang/pkg/models"
"github.com/ksbeasle/GoLang/pkg/models/mysql"
)
/*Run the command in this comment to run app
***************IMPORTANT******************
******************************************
go run $(ls -1 *.go | grep -v _test.go)
******************************************
******************************************
*/
//Dependencies for use across the entire application
//We changed vgmodel to an interface in order to use mocks for testing
//As long as those methods are satisfied everything should run fine
type application struct {
infoLog *log.Logger
errorLog *log.Logger
vgmodel interface {
Insert(title string, genre string, rating int, platform string, releaseDate string) (int, error)
All() ([]*models.Game, error)
Get(id int) (*models.Game, error)
}
}
func main() {
//logs
infoLog := log.New(os.Stdout, "info: ", log.Ltime|log.Lshortfile)
errorLog := log.New(os.Stdout, "error: ", log.Ltime|log.Lshortfile)
//Initialize Database
db, err := startDB()
if err != nil {
errorLog.Fatal(err)
}
defer db.Close()
//Dependencies
app := &application{
infoLog: infoLog,
errorLog: errorLog,
vgmodel: &mysql.VGModel{DB: db},
}
//Created this struct for a cleaner look
serve := &http.Server{
ErrorLog: errorLog,
Addr: ":8080",
Handler: app.routes(),
}
//Start server
infoLog.Println("STARTING SERVER ON PORT 8080: ...")
err = serve.ListenAndServe()
if err != nil {
errorLog.Fatal(err)
}
}
//Database - Connect to DB
func startDB() (*sql.DB, error) {
db, err := sql.Open("mysql", "web1:pass@tcp(localhost:3306)/videogames")
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
log.Println("DATABASE SUCCESSFULLY CONNECTED")
return db, nil
}
|
// Copyright © 2020 Weald Technology Trading
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bitcask
import (
"encoding/gob"
"path/filepath"
"github.com/prologic/bitcask"
"github.com/wealdtech/walletd/core"
lua "github.com/yuin/gopher-lua"
)
func init() {
gob.Register(lua.LNumber(0))
gob.Register(lua.LString(""))
gob.Register(lua.LBool(false))
gob.Register(lua.LTable{})
}
// Store holds key/value pairs in a bitcask database.
type Store struct {
db *bitcask.Bitcask
}
// New creates a new bitcask storage.
func New(base string) (*Store, error) {
db, err := bitcask.Open(filepath.Join(base, "bitcask"), bitcask.WithMaxKeySize(2048), bitcask.WithMaxValueSize(1048576))
if err != nil {
return nil, err
}
return &Store{
db: db,
}, nil
}
// Fetch fetches the value for a given key.
func (s *Store) Fetch(key []byte) ([]byte, error) {
value, err := s.db.Get(key)
if err != nil {
if err == bitcask.ErrKeyNotFound {
return nil, core.ErrNotFound
}
return nil, err
}
return value, nil
}
// Store stores the value for a given key.
func (s *Store) Store(key []byte, value []byte) error {
if err := s.db.Put(key, value); err != nil {
return err
}
return s.db.Sync()
}
|
package BLC
type PHBBlockData struct {
PHBAddrFrom string
PHBBlock []byte
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package macaroon
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"storj.io/common/testcontext"
)
func TestSerializeParseRestrictAndCheck(t *testing.T) {
ctx := context.Background()
secret, err := NewSecret()
require.NoError(t, err)
key, err := NewAPIKey(secret)
require.NoError(t, err)
serialized := key.Serialize()
parsedKey, err := ParseAPIKey(serialized)
require.NoError(t, err)
require.True(t, bytes.Equal(key.Head(), parsedKey.Head()))
require.True(t, bytes.Equal(key.Tail(), parsedKey.Tail()))
restricted, err := key.Restrict(WithNonce(Caveat{
AllowedPaths: []*Caveat_Path{{
Bucket: []byte("a-test-bucket"),
EncryptedPathPrefix: []byte("a-test-path"),
}},
}))
require.NoError(t, err)
serialized = restricted.Serialize()
parsedKey, err = ParseAPIKey(serialized)
require.NoError(t, err)
require.True(t, bytes.Equal(key.Head(), parsedKey.Head()))
require.False(t, bytes.Equal(key.Tail(), parsedKey.Tail()))
now := time.Now()
action1 := Action{
Op: ActionRead,
Time: now,
Bucket: []byte("a-test-bucket"),
EncryptedPath: []byte("a-test-path"),
}
action2 := Action{
Op: ActionRead,
Time: now,
Bucket: []byte("another-test-bucket"),
EncryptedPath: []byte("another-test-path"),
}
require.NoError(t, key.Check(ctx, secret, action1, nil))
require.NoError(t, key.Check(ctx, secret, action2, nil))
require.NoError(t, parsedKey.Check(ctx, secret, action1, nil))
err = parsedKey.Check(ctx, secret, action2, nil)
require.True(t, ErrUnauthorized.Has(err), err)
}
func TestRevocation(t *testing.T) {
ctx := testcontext.New(t)
secret, err := NewSecret()
require.NoError(t, err)
key, err := NewAPIKey(secret)
require.NoError(t, err)
now := time.Now()
action := Action{
Op: ActionWrite,
Time: now,
}
// Nil revoker should not revoke anything
require.NoError(t, key.Check(ctx, secret, action, nil))
// No error returned when nothing is revoked
nothingRevoked := testRevoker{}
require.NoError(t, key.Check(ctx, secret, action, nothingRevoked))
// Check that revoked results in a ErrRevoked error
revoked := testRevoker{revoked: true}
require.True(t, ErrRevoked.Has(key.Check(ctx, secret, action, revoked)))
// Check that an error while checking revocations results in an ErrRevoked err
revokeErr := testRevoker{revoked: false, err: errs.New("some revoke err")}
require.True(t, ErrRevoked.Has(key.Check(ctx, secret, action, revokeErr)))
}
func TestExpiration(t *testing.T) {
ctx := context.Background()
secret, err := NewSecret()
require.NoError(t, err)
key, err := NewAPIKey(secret)
require.NoError(t, err)
now := time.Now()
minuteAgo := now.Add(-time.Minute)
minuteFromNow := now.Add(time.Minute)
twoMinutesAgo := now.Add(-2 * time.Minute)
twoMinutesFromNow := now.Add(2 * time.Minute)
notBeforeMinuteFromNow, err := key.Restrict(WithNonce(Caveat{
NotBefore: &minuteFromNow,
}))
require.NoError(t, err)
notAfterMinuteAgo, err := key.Restrict(WithNonce(Caveat{
NotAfter: &minuteAgo,
}))
require.NoError(t, err)
for i, test := range []struct {
keyToTest *APIKey
timestampToTest time.Time
errClass *errs.Class
}{
{key, time.Time{}, &Error},
{notBeforeMinuteFromNow, time.Time{}, &Error},
{notAfterMinuteAgo, time.Time{}, &Error},
{key, now, nil},
{notBeforeMinuteFromNow, now, &ErrUnauthorized},
{notAfterMinuteAgo, now, &ErrUnauthorized},
{key, twoMinutesAgo, nil},
{notBeforeMinuteFromNow, twoMinutesAgo, &ErrUnauthorized},
{notAfterMinuteAgo, twoMinutesAgo, nil},
{key, twoMinutesFromNow, nil},
{notBeforeMinuteFromNow, twoMinutesFromNow, nil},
{notAfterMinuteAgo, twoMinutesFromNow, &ErrUnauthorized},
} {
err := test.keyToTest.Check(ctx, secret, Action{
Op: ActionRead,
Time: test.timestampToTest,
}, nil)
if test.errClass == nil {
require.NoError(t, err, fmt.Sprintf("test #%d", i+1))
} else {
require.False(t, !test.errClass.Has(err), fmt.Sprintf("test #%d", i+1))
}
}
}
func TestGetAllowedBuckets(t *testing.T) {
ctx := context.Background()
secret, err := NewSecret()
require.NoError(t, err)
key, err := NewAPIKey(secret)
require.NoError(t, err)
restricted, err := key.Restrict(WithNonce(Caveat{
AllowedPaths: []*Caveat_Path{{Bucket: []byte("test1")}, {Bucket: []byte("test2")}},
}))
require.NoError(t, err)
restricted, err = restricted.Restrict(WithNonce(Caveat{
AllowedPaths: []*Caveat_Path{{Bucket: []byte("test1")}, {Bucket: []byte("test3")}},
}))
require.NoError(t, err)
now := time.Now()
action := Action{
Op: ActionRead,
Time: now,
}
allowed, err := restricted.GetAllowedBuckets(ctx, action)
require.NoError(t, err)
require.Equal(t, allowed, AllowedBuckets{
All: false,
Buckets: map[string]struct{}{"test1": {}},
})
restricted, err = restricted.Restrict(WithNonce(Caveat{DisallowWrites: true}))
require.NoError(t, err)
allowed, err = restricted.GetAllowedBuckets(ctx, action)
require.NoError(t, err)
require.Equal(t, allowed, AllowedBuckets{
All: false,
Buckets: map[string]struct{}{"test1": {}},
})
}
func TestNonce(t *testing.T) {
secret, err := NewSecret()
require.NoError(t, err)
key1, err := NewAPIKey(secret)
require.NoError(t, err)
key2, err := ParseAPIKey(key1.Serialize())
require.NoError(t, err)
// Key 1 and 2 should be exactly equal.
require.True(t, bytes.Equal(key1.Head(), key2.Head()))
require.True(t, bytes.Equal(key1.Tail(), key2.Tail()))
caveat := Caveat{
DisallowReads: true,
}
t.Run("WithoutNonce", func(t *testing.T) {
key1r, err := key1.Restrict(caveat)
require.NoError(t, err)
key2r, err := key2.Restrict(caveat)
require.NoError(t, err)
// Key 1 and 2 should be exactly equal when the caveat does not have a
// nonce.
require.True(t, bytes.Equal(key1r.Head(), key2r.Head()))
require.True(t, bytes.Equal(key1r.Tail(), key2r.Tail()))
})
t.Run("WithNonce", func(t *testing.T) {
key1r, err := key1.Restrict(WithNonce(caveat))
require.NoError(t, err)
key2r, err := key2.Restrict(WithNonce(caveat))
require.NoError(t, err)
// Key 1 and 2 should share the same head, but have different
// tails when the caveats have a nonce.
require.True(t, bytes.Equal(key1r.Head(), key2r.Head()))
require.False(t, bytes.Equal(key1r.Tail(), key2r.Tail()))
})
}
func BenchmarkAPIKey_Check(b *testing.B) {
ctx := context.Background()
secret, err := NewSecret()
require.NoError(b, err)
key, err := NewAPIKey(secret)
require.NoError(b, err)
now := time.Now()
denyBefore := now.Add(-10 * time.Hour)
denyAfter := now.Add(10 * time.Hour)
key2, err := key.Restrict(WithNonce(Caveat{
NotBefore: &denyBefore,
AllowedPaths: []*Caveat_Path{
{Bucket: []byte("test1")},
{Bucket: []byte("test3")},
},
}))
require.NoError(b, err)
key3, err := key2.Restrict(WithNonce(Caveat{
NotAfter: &denyAfter,
}))
require.NoError(b, err)
b.ResetTimer()
revoker := &testRevoker{}
for i := 0; i < b.N; i++ {
err := key3.Check(ctx, secret, Action{Bucket: []byte("test1"), Op: ActionRead, Time: now}, revoker)
if err != nil {
b.Fatal(err)
}
}
}
type testRevoker struct {
revoked bool
err error
}
func (tr testRevoker) Check(ctx context.Context, tails [][]byte) (bool, error) {
return tr.revoked, tr.err
}
|
// 网页服务器1
package main
import (
"GoNet/goWeb"
"io"
"net/http"
)
// 一个文本框和一个提交按钮
const form2 = `<html><body><form action="#" method="post" name="bar">
<input type="text" name="in"/>
<input type="submit" value="Submit"/>
</form></html></body>`
func SimpleServer(w http.ResponseWriter, req *http.Request) {
_, err := io.WriteString(w, "<h1>hello, world</h1>")
goWeb.ErrorHandle(err,"SimpleServer :")
}
func FromServer(w http.ResponseWriter, req *http.Request){
// 在写入返回之前将 header 的 content-type 设置为 text/html, content-type 会让浏览器认为它可以使用函数 http.DetectContentType([]byte(form)) 来处理收到的数据
w.Header().Set("Content-Type","text/html")
switch req.Method {
case "GET":
_, err := io.WriteString(w, form2)
goWeb.ErrorHandle(err,"FromServer GET:")
case "POST":
// 使用 request.FormValue("in") 通过文本框的 name 属性 in 来获取内容,并写回浏览器页面
_, err := io.WriteString(w, req.FormValue("in"))
goWeb.ErrorHandle(err,"FromServer POST:")
}
}
func main() {
// 使用LogPanics处理panic
http.HandleFunc("/simple", LogPanics(SimpleServer))
http.HandleFunc("/from", LogPanics(FromServer))
err := http.ListenAndServe("localhost:8080", nil)
goWeb.ErrorHandle(err,"ListenAndServe :")
} |
package response_test
import (
"testing"
"github.com/shandysiswandi/echo-service/internal/infrastructure/app/response"
"github.com/stretchr/testify/assert"
)
func TestSuccess(t *testing.T) {
act := response.Success("message", nil)
ref := act.(response.SuccessBody)
assert.Equal(t, false, ref.Error)
assert.Equal(t, "message", ref.Message)
assert.Equal(t, nil, ref.Data)
}
func TestSuccessForTest(t *testing.T) {
body := `{"error":false,"message":"message","data":null}`
act, err := response.SuccessForTest(body)
assert.NoError(t, err)
assert.Equal(t, false, act.Error)
assert.Equal(t, "message", act.Message)
assert.Equal(t, nil, act.Data)
body = "error"
act, err = response.SuccessForTest(body)
assert.Error(t, err)
assert.Empty(t, act)
}
|
package commands
import (
"fmt"
"github.com/jessevdk/go-flags"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/devpod"
"github.com/loft-sh/devspace/pkg/devspace/pipeline/types"
"github.com/loft-sh/devspace/pkg/util/stringutil"
"github.com/pkg/errors"
"strings"
)
// StartDevOptions describe how deployments should get deployed
type StartDevOptions struct {
devpod.Options
Set []string `long:"set" description:"Set configuration"`
SetString []string `long:"set-string" description:"Set configuration as string"`
From []string `long:"from" description:"Reuse an existing configuration"`
FromFile []string `long:"from-file" description:"Reuse an existing configuration from a file"`
All bool `long:"all" description:"Start all dev configurations"`
Except []string `long:"except" description:"If used with --all, will exclude the following dev configs"`
}
func StartDev(ctx devspacecontext.Context, pipeline types.Pipeline, args []string) error {
ctx.Log().Debugf("start_dev %s", strings.Join(args, " "))
err := pipeline.Exclude(ctx)
if err != nil {
return err
}
if ctx.KubeClient() == nil {
return errors.Errorf(ErrMsg)
}
options := &StartDevOptions{
Options: pipeline.Options().DevOptions,
}
args, err = flags.ParseArgs(options, args)
if err != nil {
return errors.Wrap(err, "parse args")
}
if options.All {
args = []string{}
for devConfig := range ctx.Config().Config().Dev {
if stringutil.Contains(options.Except, devConfig) {
continue
}
args = append(args, devConfig)
ctx, err = applySetValues(ctx, "dev", devConfig, options.Set, options.SetString, options.From, options.FromFile)
if err != nil {
return err
}
}
if len(args) == 0 {
return nil
}
} else if len(args) > 0 {
for _, devConfig := range args {
ctx, err = applySetValues(ctx, "dev", devConfig, options.Set, options.SetString, options.From, options.FromFile)
if err != nil {
return err
}
if ctx.Config().Config().Dev == nil || ctx.Config().Config().Dev[devConfig] == nil {
return fmt.Errorf("couldn't find dev %v", devConfig)
}
}
} else {
return fmt.Errorf("either specify 'start_dev --all' or 'dev devConfig1 devConfig2'")
}
return pipeline.DevPodManager().StartMultiple(ctx, args, options.Options)
}
|
package main
import "fmt"
func main() {
x := retornaUmaFuncao()
y := x(3)
fmt.Println(y)
}
func retornaUmaFuncao() func(int) int {
return func(i int) int {
return i * 10
}
}
|
// Copyright 2022 Saferwall. All rights reserved.
// Use of this source code is governed by Apache v2 license
// license that can be found in the LICENSE file.
package exiftool
import (
"strings"
"unicode"
"github.com/saferwall/saferwall/internal/utils"
)
const (
// Command to invoke exiftool scanner
cmd = "exiftool"
)
// Scan a file using exiftool
// This will execute exigtool command line tool and read the stdout
func Scan(FilePath string) (map[string]string, error) {
args := []string{FilePath}
output, err := utils.ExecCommand(cmd, args...)
// exiftool returns exit status 1 for unknown files.
if err != nil {
return nil, err
}
return ParseOutput(output), nil
}
// ParseOutput convert exiftool output into map of string|string.
func ParseOutput(exifout string) map[string]string {
var ignoreTags = []string{
"Directory",
"File Name",
"File Permissions",
}
lines := strings.Split(exifout, "\n")
if utils.StringInSlice("File not found", lines) {
return nil
}
datas := make(map[string]string, len(lines))
for _, line := range lines {
keyvalue := strings.Split(line, ":")
if len(keyvalue) != 2 {
continue
}
if !utils.StringInSlice(strings.TrimSpace(keyvalue[0]), ignoreTags) {
datas[strings.TrimSpace(camelCase(keyvalue[0]))] =
strings.TrimSpace(keyvalue[1])
}
}
return datas
}
// camelCase convert a string to camelcase
func camelCase(s string) string {
s = strings.TrimSpace(s)
buffer := make([]rune, 0, len(s))
stringIter(s, func(prev, curr, next rune) {
if !isDelimiter(curr) {
if isDelimiter(prev) || (prev == 0) {
buffer = append(buffer, unicode.ToUpper(curr))
} else if unicode.IsLower(prev) {
buffer = append(buffer, curr)
} else {
buffer = append(buffer, unicode.ToLower(curr))
}
}
})
return string(buffer)
}
// isDelimiter checks if a character is some kind of whitespace or '_' or '-'.
func isDelimiter(ch rune) bool {
return ch == '-' || ch == '_' || unicode.IsSpace(ch)
}
// stringIter iterates over a string, invoking the callback for every single rune in the string.
func stringIter(s string, callback func(prev, curr, next rune)) {
var prev rune
var curr rune
for _, next := range s {
if curr == 0 {
prev = curr
curr = next
continue
}
callback(prev, curr, next)
prev = curr
curr = next
}
if len(s) > 0 {
callback(prev, curr, 0)
}
}
|
package utils
import (
"regexp"
)
// Contains find is string in slices
func Contains(slices []string, comparizon string) bool {
for _, a := range slices {
if a == comparizon {
return true
}
}
return false
}
// IsEmail check email the real email string
func IsEmail(email string) bool {
re := regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
return re.MatchString(email)
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// TokenFilterMinHash token filter that hashes each token of the token stream
// and divdes the resulting hashes into buckets, keeping the lowest-valued hashes
// per bucket. It then returns these hashes as tokens.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/analysis-minhash-tokenfilter.html
// for details.
type TokenFilterMinHash struct {
TokenFilter
name string
// fields specific to min hash token filter
hashCount *int
bucketCount *int
hashSetSize *int
withRotation *bool
}
// NewTokenFilterMinHash initializes a new TokenFilterMinHash.
func NewTokenFilterMinHash(name string) *TokenFilterMinHash {
return &TokenFilterMinHash{
name: name,
}
}
// Name returns field key for the Token Filter.
func (h *TokenFilterMinHash) Name() string {
return h.name
}
// HashCount sets the number of hases to has the token stream with.
// Defaults to 1.
func (h *TokenFilterMinHash) HashCount(hashCount int) *TokenFilterMinHash {
h.hashCount = &hashCount
return h
}
// BucketCount sets the number of buckets to divde the minhashes into.
// Defaults to 512.
func (h *TokenFilterMinHash) BucketCount(bucketCount int) *TokenFilterMinHash {
h.bucketCount = &bucketCount
return h
}
// HashSetSize sets the number of minhashes to keep per bucket.
// Defaults to 1.
func (h *TokenFilterMinHash) HashSetSize(hashSetSize int) *TokenFilterMinHash {
h.hashSetSize = &hashSetSize
return h
}
// WithRotation sets whether or not to kill empty buckets with the value of the
// first non-empty bucket to its circular right. Only takes effect if `hash_set_size`
// is equal to one.
// Defaults to true if `bucket_count` is greater than one, else false.
func (h *TokenFilterMinHash) WithRotation(withRotation bool) *TokenFilterMinHash {
h.withRotation = &withRotation
return h
}
// Validate validates TokenFilterMinHash.
func (h *TokenFilterMinHash) Validate(includeName bool) error {
var invalid []string
if includeName && h.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields or invalid values: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (h *TokenFilterMinHash) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "min_hash",
// "hash_count": 1,
// "bucket_count": 512,
// "hash_set_size": 1,
// "with_rotation": true
// }
// }
options := make(map[string]interface{})
options["type"] = "min_hash"
if h.hashCount != nil {
options["hash_count"] = h.hashCount
}
if h.bucketCount != nil {
options["bucket_count"] = h.bucketCount
}
if h.hashSetSize != nil {
options["hash_set_size"] = h.hashSetSize
}
if h.withRotation != nil {
options["with_rotation"] = h.withRotation
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[h.name] = options
return source, nil
}
|
package rt
// Option is a function that modifies workerOpts
type Option func(workerOpts) workerOpts
//PoolSize returns an Option to set the worker pool size
func PoolSize(size int) Option {
return func(opts workerOpts) workerOpts {
opts.poolSize = size
return opts
}
}
//TimeoutSeconds returns an Option with the job timeout seconds set
func TimeoutSeconds(timeout int) Option {
return func(opts workerOpts) workerOpts {
opts.jobTimeoutSeconds = timeout
return opts
}
}
//RetrySeconds returns an Option to set the worker retry seconds
func RetrySeconds(secs int) Option {
return func(opts workerOpts) workerOpts {
opts.retrySecs = secs
return opts
}
}
//MaxRetries returns an Option to set the worker maximum retry count
func MaxRetries(count int) Option {
return func(opts workerOpts) workerOpts {
opts.numRetries = count
return opts
}
}
// PreWarm sets the worker to pre-warm itself to minimize cold start time.
// if not enabled, worker will "warm up" when it receives its first job.
func PreWarm() Option {
return func(opts workerOpts) workerOpts {
opts.preWarm = true
return opts
}
}
|
package gorm2
import (
"context"
"errors"
"fmt"
"net/url"
"github.com/google/uuid"
"github.com/traPtitech/trap-collection-server/src/domain"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/repository"
"github.com/traPtitech/trap-collection-server/src/repository/gorm2/migrate"
"gorm.io/gorm"
)
type GameURL struct {
db *DB
}
func NewGameURL(db *DB) *GameURL {
return &GameURL{
db: db,
}
}
func (gu *GameURL) SaveGameURL(ctx context.Context, gameVersionID values.GameVersionID, gameURL *domain.GameURL) error {
db, err := gu.db.getDB(ctx)
if err != nil {
return fmt.Errorf("failed to get db: %w", err)
}
err = db.Create(&migrate.GameURLTable{
ID: uuid.UUID(gameURL.GetID()),
GameVersionID: uuid.UUID(gameVersionID),
URL: (*url.URL)(gameURL.GetLink()).String(),
CreatedAt: gameURL.GetCreatedAt(),
}).Error
if err != nil {
return fmt.Errorf("failed to create game url: %w", err)
}
return nil
}
func (gu *GameURL) GetGameURL(ctx context.Context, gameVersionID values.GameVersionID) (*domain.GameURL, error) {
db, err := gu.db.getDB(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get db: %w", err)
}
var gameURLTable migrate.GameURLTable
err = db.
Where("game_version_id = ?", uuid.UUID(gameVersionID)).
Take(&gameURLTable).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, repository.ErrRecordNotFound
}
if err != nil {
return nil, fmt.Errorf("failed to get game url: %w", err)
}
urlGameLink, err := url.Parse(gameURLTable.URL)
if err != nil {
return nil, fmt.Errorf("failed to parse game url: %w", err)
}
return domain.NewGameURL(
values.NewGameURLIDFromUUID(gameURLTable.ID),
values.NewGameURLLink(urlGameLink),
gameURLTable.CreatedAt,
), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.