text stringlengths 11 4.05M |
|---|
package cmd
import (
"bufio"
"bytes"
"fmt"
"os"
"time"
pub "github.com/go-ap/activitypub"
pubcl "github.com/go-ap/client"
"github.com/go-ap/errors"
"github.com/go-ap/fedbox/app"
"github.com/go-ap/fedbox/internal/config"
"github.com/go-ap/fedbox/internal/env"
"github.com/go-ap/processing"
"github.com/go-ap/storage"
"github.com/openshift/osin"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
"gopkg.in/urfave/cli.v2"
)
type Control struct {
Conf config.Options
AuthStorage osin.Storage
Storage storage.Store
Saver processing.C2SProcessor
}
func New(authDB osin.Storage, actorDb storage.Store, conf config.Options) *Control {
baseIRI := pub.IRI(conf.BaseURL)
clientErrLogger := func(...pubcl.Ctx) pubcl.LogFn {
return logger.Errorf
}
clientInfoLogger := func(...pubcl.Ctx) pubcl.LogFn {
return logger.Infof
}
p, _, _ := processing.New(
processing.SetIRI(baseIRI),
processing.SetStorage(actorDb),
processing.SetIDGenerator(app.GenerateID(baseIRI)),
processing.SetClient(pubcl.New(
pubcl.SetInfoLogger(clientInfoLogger),
pubcl.SetErrorLogger(clientErrLogger),
pubcl.SkipTLSValidation(!conf.Env.IsProd()),
)),
)
return &Control{
Conf: conf,
AuthStorage: authDB,
Storage: actorDb,
Saver: p,
}
}
var ctl Control
var logger = logrus.New()
func Before(c *cli.Context) error {
logger.Level = logrus.WarnLevel
fields := logrus.Fields{}
if c.Command != nil {
fields["cli"] = c.Command.Name
}
ct, err := setup(c, logger.WithFields(fields))
if err != nil {
// Ensure we don't print the default help message, which is not useful here
c.App.CustomAppHelpTemplate = "Failed"
logger.WithError(err).Error("Error")
return err
}
ctl = *ct
// the level enums have same values
logger.Level = logrus.Level(ct.Conf.LogLevel)
return nil
}
func setup(c *cli.Context, l logrus.FieldLogger) (*Control, error) {
path := c.String("path")
environ := env.Type(c.String("env"))
conf, err := config.LoadFromEnv(environ, time.Second)
if err != nil {
l.Errorf("Unable to load config files for environment %s: %s", environ, err)
}
if path != "." {
conf.StoragePath = path
}
typ := c.String("type")
if typ != "" {
conf.Storage = config.StorageType(typ)
}
if conf.Storage == config.StoragePostgres {
host := c.String("host")
if host == "" {
host = "localhost"
}
port := c.Int64("port")
if port == 0 {
host = path
}
user := c.String("user")
if user == "" {
user = "fedbox"
}
pw, err := loadPwFromStdin(true, "%s@%s's", user, host)
if err != nil {
return nil, err
}
_ = config.BackendConfig{
Enabled: false,
Host: host,
Port: port,
User: user,
Pw: string(pw),
Name: user,
}
}
db, aDb, err := app.Storage(conf, l)
if err != nil {
return nil, err
}
return New(aDb, db, conf), nil
}
func loadPwFromStdin(confirm bool, s string, params ...interface{}) ([]byte, error) {
fmt.Printf(s+" pw: ", params...)
pw1, _ := terminal.ReadPassword(0)
fmt.Println()
if confirm {
fmt.Printf("pw again: ")
pw2, _ := terminal.ReadPassword(0)
fmt.Println()
if !bytes.Equal(pw1, pw2) {
return nil, errors.Errorf("Passwords do not match")
}
}
return pw1, nil
}
func loadFromStdin(s string, params ...interface{}) ([]byte, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Printf(s+": ", params...)
input, _ := reader.ReadBytes('\n')
fmt.Println()
return input[:len(input)-1], nil
}
func Errf(s string, par ...interface{}) {
fmt.Fprintf(os.Stderr, s+"\n", par...)
}
|
package main
import (
"log"
"net/http"
"os"
"github.com/connorjcantrell/toolint/postgres"
"github.com/connorjcantrell/toolint/web"
)
func main() {
dsn := os.Getenv("TOOLINT_DB")
store, err := postgres.NewStore(dsn)
if err != nil {
log.Fatal(err)
}
sessions, err := web.NewSessionManager(dsn)
if err != nil {
log.Fatal(err)
}
key := os.Getenv("CSRF_KEY")
csrfKey := []byte(key)
h := web.NewHandler(store, sessions, csrfKey)
http.ListenAndServe(":3000", h)
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package aws
import (
"context"
"github.com/aws/aws-sdk-go-v2/service/ec2"
)
// EC2API represents the series of calls we require from the AWS SDK v2 EC2 Client
type EC2API interface {
CreateTags(ctx context.Context, input *ec2.CreateTagsInput, optFns ...func(*ec2.Options)) (*ec2.CreateTagsOutput, error)
DeleteTags(ctx context.Context, input *ec2.DeleteTagsInput, optFns ...func(*ec2.Options)) (*ec2.DeleteTagsOutput, error)
DescribeImages(ctx context.Context, input *ec2.DescribeImagesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeImagesOutput, error)
DescribeVpcs(ctx context.Context, input *ec2.DescribeVpcsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeVpcsOutput, error)
DescribeSubnets(ctx context.Context, input *ec2.DescribeSubnetsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSubnetsOutput, error)
DescribeSecurityGroups(ctx context.Context, input *ec2.DescribeSecurityGroupsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSecurityGroupsOutput, error)
AuthorizeSecurityGroupIngress(ctx context.Context, input *ec2.AuthorizeSecurityGroupIngressInput, optFns ...func(*ec2.Options)) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
RevokeSecurityGroupIngress(ctx context.Context, input *ec2.RevokeSecurityGroupIngressInput, optFns ...func(*ec2.Options)) (*ec2.RevokeSecurityGroupIngressOutput, error)
DescribeAvailabilityZones(ctx context.Context, input *ec2.DescribeAvailabilityZonesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeAvailabilityZonesOutput, error)
CreateLaunchTemplate(context.Context, *ec2.CreateLaunchTemplateInput, ...func(*ec2.Options)) (*ec2.CreateLaunchTemplateOutput, error)
CreateLaunchTemplateVersion(ctx context.Context, params *ec2.CreateLaunchTemplateVersionInput, optFns ...func(*ec2.Options)) (*ec2.CreateLaunchTemplateVersionOutput, error)
DescribeLaunchTemplates(context.Context, *ec2.DescribeLaunchTemplatesInput, ...func(*ec2.Options)) (*ec2.DescribeLaunchTemplatesOutput, error)
DeleteLaunchTemplate(context.Context, *ec2.DeleteLaunchTemplateInput, ...func(*ec2.Options)) (*ec2.DeleteLaunchTemplateOutput, error)
}
|
package purchaseorderapi
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/dolittle/platform-api/pkg/platform"
"github.com/dolittle/platform-api/pkg/platform/microservice/k8s"
"github.com/dolittle/platform-api/pkg/platform/microservice/parser"
"github.com/dolittle/platform-api/pkg/platform/microservice/rawdatalog"
"github.com/dolittle/platform-api/pkg/platform/storage"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
)
type Error struct {
StatusCode int
Err error
}
func newInternalError(err error) *Error {
return &Error{http.StatusInternalServerError, err}
}
func newBadRequest(err error) *Error {
return &Error{http.StatusBadRequest, err}
}
func newForbidden(err error) *Error {
return &Error{http.StatusForbidden, err}
}
func newConflict(err error) *Error {
return &Error{http.StatusConflict, err}
}
func (e *Error) Error() string {
return fmt.Sprintf("status %d: Err %v", e.StatusCode, e.Err)
}
type Handler struct {
parser parser.Parser
repo Repo
gitRepo storage.Repo
rawdatalogRepo rawdatalog.RawDataLogIngestorRepo
logContext logrus.FieldLogger
}
func NewHandler(parser parser.Parser, repo Repo, gitRepo storage.Repo, rawDataLogIngestorRepo rawdatalog.RawDataLogIngestorRepo, logContext logrus.FieldLogger) *Handler {
return &Handler{parser, repo, gitRepo, rawDataLogIngestorRepo, logContext}
}
// Create creates a new PurchaseOrderAPI microservice and creates a RawDataLog microservice too if it didn't already exist
func (s *Handler) Create(inputBytes []byte, applicationInfo platform.Application, customerTenants []platform.CustomerTenantInfo) (platform.HttpInputPurchaseOrderInfo, *Error) {
// Function assumes access check has taken place
var ms platform.HttpInputPurchaseOrderInfo
logger := s.logContext.WithFields(logrus.Fields{
"handler": "PurchaseOrderAPI",
"method": "Create",
})
msK8sInfo, parserError := s.parser.Parse(inputBytes, &ms, applicationInfo)
if parserError != nil {
logger.WithError(parserError).Error("Failed to parse input")
return ms, newBadRequest(fmt.Errorf("failed to parse input: %w", parserError))
}
logger = logger.WithFields(logrus.Fields{
"customer_id": applicationInfo.Customer.ID,
"application_id": applicationInfo.ID,
"environment": ms.Environment,
})
logger.Debug("Starting to create a PurchaseOrderAPI microservice")
exists, statusErr := s.purchaseOrderApiExists(msK8sInfo, ms, logger)
if statusErr != nil {
logger.WithError(statusErr).Error("Failed to check whether Purchase Order API exists")
return ms, newInternalError(fmt.Errorf("failed to whether Purchase Order API exists: %w", statusErr))
}
if exists {
logger.WithField("microserviceID", ms.Dolittle.MicroserviceID).Warn("A Purchase Order API Microservice with the same name already exists in kubernetes or git storage")
return ms, newConflict(fmt.Errorf("a Purchase Order API Microservice with the same name already exists in kubernetes or git storage"))
}
if statusErr := s.ensureRawDataLogExists(msK8sInfo, ms, customerTenants, logger); statusErr != nil {
return ms, statusErr
}
return ms, s.createPurchaseOrderAPI(msK8sInfo, ms, customerTenants, logger)
}
// Update updates an existing PurchaseOrderAPI microservice and creates a RawDataLog microservice too if it didn't already exist
func (s *Handler) UpdateWebhooks(inputBytes []byte, applicationInfo platform.Application, customerTenants []platform.CustomerTenantInfo) (platform.HttpInputPurchaseOrderInfo, *Error) {
// Function assumes access check has taken place
var ms platform.HttpInputPurchaseOrderInfo
logger := s.logContext.WithFields(logrus.Fields{
"handler": "PurchaseOrderAPI",
"method": "UpdateWebhooks",
})
msK8sInfo, parserError := s.parser.Parse(inputBytes, &ms, applicationInfo)
if parserError != nil {
logger.WithError(parserError).Error("Failed to parse input")
return ms, newBadRequest(fmt.Errorf("failed to parse input: %w", parserError))
}
logger = logger.WithFields(logrus.Fields{
"customer_id": applicationInfo.Customer.ID,
"application_id": applicationInfo.ID,
"environment": ms.Environment,
})
logger.Debug("Starting to update PurchaseOrderAPI microservice")
exists, statusErr := s.purchaseOrderApiExists(msK8sInfo, ms, logger)
if statusErr != nil {
logger.WithError(statusErr).Error("Failed to check whether Purchase Order API exists")
return ms, newInternalError(fmt.Errorf("failed to whether Purchase Order API exists: %w", statusErr))
}
if !exists {
logger.WithField("microserviceID", ms.Dolittle.MicroserviceID).Warn("A Purchase Order API Microservice does not exist in kubernetes or git storage")
return ms, newConflict(fmt.Errorf("a Purchase Order API Microservice does not exist in kubernetes or git storage"))
}
if statusErr := s.ensureRawDataLogExists(msK8sInfo, ms, customerTenants, logger); statusErr != nil {
return ms, statusErr
}
return ms, s.updatePurchaseOrderAPIWebhooks(msK8sInfo, ms.Extra.Webhooks, ms.Environment, ms.Dolittle.MicroserviceID, logger)
}
func (s *Handler) Delete(applicationID, environment, microserviceID string) error {
if err := s.repo.Delete(applicationID, environment, microserviceID); err != nil {
return fmt.Errorf("failed to delete Purchase Order API: %w", err)
}
return nil
}
func (s *Handler) GetDataStatus(dns, customerID, applicationID, environment, microserviceID string) (platform.PurchaseOrderStatus, *Error) {
logger := s.logContext.WithFields(logrus.Fields{
"handler": "PurchaseOrderAPI",
"method": "GetDataStatus",
"customer_id": customerID,
"application_id": applicationID,
"environment": environment,
"microservice_id": microserviceID,
"dns": dns,
})
var status platform.PurchaseOrderStatus
url := fmt.Sprintf("http://%s%s", dns, "/api/purchaseorders/datastatus")
resp, err := http.Get(url)
if err != nil {
logger.WithError(err).Error("Failed to request Purchase Order API microservices status")
return status, newInternalError(fmt.Errorf("failed to get Purchase Order API microservices data status"))
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err := errors.New(resp.Status)
error := &Error{StatusCode: resp.StatusCode, Err: err}
logger.WithError(err).Error("Purchase Order API microservice data status didn't return 2** status")
return status, error
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logger.WithError(err).Error("Failed to read response for Purchase Order API microservices data status")
return status, newInternalError(fmt.Errorf("failed to read response for Purchase Order API microservices data status"))
}
err = json.Unmarshal(body, &status)
if err != nil {
logger.WithError(err).Error("Failed to parse response for Purchase Order API microservices status")
return status, newInternalError(fmt.Errorf("failed to parse response for Purchase Order API microservices status"))
}
return status, nil
}
func (s *Handler) purchaseOrderApiExists(msK8sInfo k8s.MicroserviceK8sInfo, ms platform.HttpInputPurchaseOrderInfo, logger *logrus.Entry) (bool, *Error) {
microservices, err := s.gitRepo.GetMicroservices(msK8sInfo.Customer.ID, msK8sInfo.Application.ID)
if err != nil {
logger.WithError(err).Error("Failed to get microservices from GitRepo")
return false, newInternalError(fmt.Errorf("failed to get microservices from GitRepo: %w", err))
}
for _, microservice := range microservices {
if microservice.Kind == platform.MicroserviceKindPurchaseOrderAPI && strings.EqualFold(microservice.Environment, ms.Environment) {
return true, nil
}
}
exists, err := s.repo.EnvironmentHasPurchaseOrderAPI(msK8sInfo.Namespace, ms)
if err != nil {
logger.WithError(err).Error("Failed to check if environment has Purchase Order API with K8sRepo")
return false, newInternalError(fmt.Errorf("failed to check if environment has Purchase Order API with K8sRepo: %w", err))
}
if exists {
return true, nil
}
return false, nil
}
func (s *Handler) createPurchaseOrderAPI(msK8sInfo k8s.MicroserviceK8sInfo, ms platform.HttpInputPurchaseOrderInfo, customerTenants []platform.CustomerTenantInfo, logger *logrus.Entry) *Error {
if err := s.repo.Create(msK8sInfo.Namespace, msK8sInfo.Customer, msK8sInfo.Application, customerTenants, ms); err != nil {
logger.WithError(err).Error("Failed to create Purchase Order API")
return newInternalError(fmt.Errorf("failed to create Purchase Order API: %w", err))
}
if err := s.gitRepo.SaveMicroservice(ms.Dolittle.CustomerID, ms.Dolittle.ApplicationID, ms.Environment, ms.Dolittle.MicroserviceID, ms); err != nil {
// TODO change
logger.WithError(err).Error("Failed to save Purchase Order API in GitRepo")
return newInternalError(fmt.Errorf("failed to save Purchase Order API in GitRepo"))
}
return nil
}
func (s *Handler) ensureRawDataLogExists(msK8sInfo k8s.MicroserviceK8sInfo, ms platform.HttpInputPurchaseOrderInfo, customerTenants []platform.CustomerTenantInfo, logger *logrus.Entry) *Error {
rawDataLogExists, microserviceID, err := s.rawdatalogRepo.Exists(msK8sInfo.Namespace, ms.Environment)
if err != nil {
logger.WithError(err).Error("Failed to check if Raw Data Log exists")
return newInternalError(fmt.Errorf("failed to check if Raw Data Log exists: %w", err))
}
if !rawDataLogExists {
logger.Debug("Raw Data Log does not exist, creating a new one")
return s.createRawDataLog(msK8sInfo, ms, customerTenants, logger)
} else {
return s.updateRawDataLogWebhooks(msK8sInfo, ms.Extra.Webhooks, ms.Environment, microserviceID, logger)
}
}
func (s *Handler) updatePurchaseOrderAPIWebhooks(msK8sInfo k8s.MicroserviceK8sInfo, webhooks []platform.RawDataLogIngestorWebhookConfig, environment, microserviceID string, logger *logrus.Entry) *Error {
var storedMicroservice platform.HttpInputPurchaseOrderInfo
bytes, err := s.gitRepo.GetMicroservice(msK8sInfo.Customer.ID, msK8sInfo.Application.ID, environment, microserviceID)
if err != nil {
logger.WithError(err).Error("Failed to get Purchase Order API microservice from GitRepo")
return newInternalError(fmt.Errorf("failed to get Purchase Order API microservice from GitRepo: %w", err))
}
json.Unmarshal(bytes, &storedMicroservice)
storedMicroservice.Extra.Webhooks = webhooks
if err := s.gitRepo.SaveMicroservice(storedMicroservice.Dolittle.CustomerID, storedMicroservice.Dolittle.ApplicationID, storedMicroservice.Environment, storedMicroservice.Dolittle.MicroserviceID, storedMicroservice); err != nil {
logger.WithError(err).Error("Failed to save Purchase Order API in GitRepo")
return newInternalError(fmt.Errorf("failed to save Purchase Order API in GitRepo: %w", err))
}
return nil
}
func (s *Handler) createRawDataLog(msK8sInfo k8s.MicroserviceK8sInfo, ms platform.HttpInputPurchaseOrderInfo, customerTenants []platform.CustomerTenantInfo, logger *logrus.Entry) *Error {
rawDataLogMicroservice := s.extractRawDataLogInfo(ms)
if err := s.rawdatalogRepo.Create(msK8sInfo.Namespace, msK8sInfo.Customer, msK8sInfo.Application, customerTenants, rawDataLogMicroservice); err != nil {
logger.WithError(err).Error("Failed to create Raw Data Log")
return newInternalError(fmt.Errorf("failed to create Raw Data Log: %w", err))
}
if err := s.gitRepo.SaveMicroservice(rawDataLogMicroservice.Dolittle.CustomerID, rawDataLogMicroservice.Dolittle.ApplicationID, rawDataLogMicroservice.Environment, rawDataLogMicroservice.Dolittle.MicroserviceID, rawDataLogMicroservice); err != nil {
logger.WithError(err).Error("Failed to save Raw Data Log in GitRepo")
return newInternalError(fmt.Errorf("failed to save Raw Data Log in GitRepo: %w", err))
}
return nil
}
func (s *Handler) updateRawDataLogWebhooks(msK8sInfo k8s.MicroserviceK8sInfo, webhooks []platform.RawDataLogIngestorWebhookConfig, environment, microserviceID string, logger *logrus.Entry) *Error {
var storedMicroservice platform.HttpInputRawDataLogIngestorInfo
bytes, err := s.gitRepo.GetMicroservice(msK8sInfo.Customer.ID, msK8sInfo.Application.ID, environment, microserviceID)
if err != nil {
logger.WithError(err).Error("Failed to get Raw Data Log microservice from GitRepo")
return newInternalError(fmt.Errorf("failed to get Raw Data Log microservice from GitRepo: %w", err))
}
json.Unmarshal(bytes, &storedMicroservice)
storedMicroservice.Extra.Webhooks = webhooks
if err := s.rawdatalogRepo.Update(msK8sInfo.Namespace, msK8sInfo.Customer, msK8sInfo.Application, storedMicroservice); err != nil {
logger.WithError(err).Error("Failed to update Raw Data Log")
return newInternalError(fmt.Errorf("failed to update Raw Data Log: %w", err))
}
if err := s.gitRepo.SaveMicroservice(storedMicroservice.Dolittle.CustomerID, storedMicroservice.Dolittle.ApplicationID, storedMicroservice.Environment, storedMicroservice.Dolittle.MicroserviceID, storedMicroservice); err != nil {
logger.WithError(err).Error("Failed to save Raw Data Log in GitRepo")
return newInternalError(fmt.Errorf("failed to save Raw Data Log in GitRepo: %w", err))
}
return nil
}
func (s *Handler) extractRawDataLogInfo(ms platform.HttpInputPurchaseOrderInfo) platform.HttpInputRawDataLogIngestorInfo {
return platform.HttpInputRawDataLogIngestorInfo{
MicroserviceBase: platform.MicroserviceBase{
Name: ms.Extra.RawDataLogName,
Kind: platform.MicroserviceKindRawDataLogIngestor,
Environment: ms.Environment,
Dolittle: platform.HttpInputDolittle{
ApplicationID: ms.Dolittle.ApplicationID,
CustomerID: ms.Dolittle.CustomerID,
MicroserviceID: uuid.New().String(),
},
},
Extra: platform.HttpInputRawDataLogIngestorExtra{
// TODO these images won't evolve automatically
Headimage: "dolittle/platform-api:latest",
Runtimeimage: "dolittle/runtime:6.1.0",
Ingress: ms.Extra.Ingress,
WriteTo: "nats",
Webhooks: ms.Extra.Webhooks,
},
}
}
|
package main
import (
"flag"
"net"
log "github.com/sirupsen/logrus"
"github.com/shimmerglass/rgbx/device"
"github.com/shimmerglass/rgbx/render"
"github.com/shimmerglass/rgbx/rgbx"
"github.com/shimmerglass/rgbx/server"
"google.golang.org/grpc"
)
func main() {
listen := flag.String("listen", "127.0.0.1:1342", "Listen addr")
flag.Parse()
// log.SetLevel(log.DebugLevel)
compositors := render.NewCompositors(device.NewDriverPool(
device.NewRazerKeyboardDriver(),
))
lis, err := net.Listen("tcp", *listen)
if err != nil {
log.Fatal(err)
}
grpcServer := grpc.NewServer()
rgbx.RegisterRGBizerServer(grpcServer, server.New(compositors))
grpcServer.Serve(lis)
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package planners
import (
"testing"
"parsers"
"parsers/sqlparser"
"github.com/stretchr/testify/assert"
)
func TestShowTablesPlan(t *testing.T) {
query := "show tables where names like 'xx' limit 2"
statement, err := parsers.Parse(query)
assert.Nil(t, err)
plan := NewShowTablesPlan(statement.(*sqlparser.Show))
err = plan.Build()
assert.Nil(t, err)
err = plan.Walk(func(plan IPlan) (bool, error) {
return true, nil
})
assert.Nil(t, err)
expect := `{
"Name": "ShowTablesPlan",
"SubPlan": {
"Name": "SelectPlan",
"SubPlan": {
"Name": "MapPlan",
"SubPlans": [
{
"Name": "ScanPlan",
"Table": "tables",
"Schema": "system"
},
{
"Name": "SelectionPlan",
"Projects": {
"Name": "MapPlan"
},
"GroupBys": {
"Name": "MapPlan"
},
"SelectionMode": "NormalSelection"
},
{
"Name": "SinkPlan"
}
]
}
}
}`
actual := plan.String()
assert.Equal(t, expect, actual)
}
|
package keybindings
import (
"errors"
"../printout"
"../layout"
"../ds"
"../file"
"strings"
"github.com/jroimartin/gocui"
)
func prompt_Entry_Date(g *gocui.Gui, v *gocui.View) error {
var err error
if err = layout.Create_Prompt_View(g, "entryDate", "[Entry Date ]", true); err != nil {
return err
}
printout.Status(g, "Update entry Date and press [Enter] to save. Press [Esc] to abort.")
return nil
}
func confirm_Prompt_Date(g *gocui.Gui, v *gocui.View) error {
var year, month, day string
var date string
var index int
date = strings.TrimSuffix(v.ViewBuffer(), "\n")
//date = date[:len(date)-1]
//year = date[6:len(date)-1]
//month = date[0:2]
//day = date[3:5]
index = convert_Active_Entry_To_AllEntries_Index(g)
year = ds.AllEntries[index].Year
month = ds.AllEntries[index].Month
day = ds.AllEntries[index].Day
ds.Set_Date_Entry(index, date)
printout.Overview(g)
printout.Content(g)
//Write Old log
file.Write_Log(year, month, day)
//Write out File!
write_Active_Entry(g)
abort_Prompt_Date(g, v)
return nil
}
func abort_Prompt_Date(g *gocui.Gui, v *gocui.View) error {
var err error
printout.Status(g, "")
if err = g.DeleteView("entryDate"); err != nil {
return errors.New("(abortEditNoteTitle) error deleting editNoteTitle view " + err.Error())
}
if _, err = g.SetCurrentView("overview"); err != nil {
return errors.New("(abortEditNoteTitle) error setting current view to toc " + err.Error())
}
return nil
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package serviceadapter
import (
"log"
"strings"
"encoding/json"
sdk "github.com/pivotal-cf/on-demand-services-sdk/serviceadapter"
"gopkg.in/yaml.v2"
)
type manifest struct {
Name string
Releases []struct {
Version string
}
Stemcells []struct {
Version string
}
}
type manifestValidator struct {
deploymentName string
}
func (c *Client) GenerateManifest(
serviceDeployment sdk.ServiceDeployment,
plan sdk.Plan,
requestParams map[string]interface{},
previousManifest []byte,
previousPlan *sdk.Plan,
previousSecrets map[string]string,
previousConfigs map[string]string,
uaaClient map[string]string,
logger *log.Logger,
) (sdk.MarshalledGenerateManifest, error) {
serialisedServiceDeployment, err := json.Marshal(serviceDeployment)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
plan.Properties = SanitiseForJSON(plan.Properties)
serialisedPlan, err := json.Marshal(plan)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
serialisedRequestParams, err := json.Marshal(requestParams)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
if previousPlan != nil {
previousPlan.Properties = SanitiseForJSON(previousPlan.Properties)
}
serialisedPreviousPlan, err := json.Marshal(previousPlan)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
serialisedPreviousSecrets, err := json.Marshal(previousSecrets)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
serialisedPreviousConfigs, err := json.Marshal(previousConfigs)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
serialisedUAAClient, err := json.Marshal(uaaClient)
if err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
var stdout, stderr []byte
var output sdk.MarshalledGenerateManifest
var exitCode *int
var jsonErr error
if c.UsingStdin {
inputParams := sdk.InputParams{
GenerateManifest: sdk.GenerateManifestJSONParams{
ServiceDeployment: string(serialisedServiceDeployment),
Plan: string(serialisedPlan),
RequestParameters: string(serialisedRequestParams),
PreviousPlan: string(serialisedPreviousPlan),
PreviousManifest: string(previousManifest),
PreviousSecrets: string(serialisedPreviousSecrets),
PreviousConfigs: string(serialisedPreviousConfigs),
ServiceInstanceUAAClient: string(serialisedUAAClient),
},
}
stdout, stderr, exitCode, err = c.CommandRunner.RunWithInputParams(
inputParams,
c.ExternalBinPath, "generate-manifest",
)
} else {
stdout, stderr, exitCode, err = c.CommandRunner.Run(
c.ExternalBinPath, "generate-manifest",
string(serialisedServiceDeployment),
string(serialisedPlan), string(serialisedRequestParams),
string(previousManifest), string(serialisedPreviousPlan),
)
}
if err != nil {
return sdk.MarshalledGenerateManifest{}, adapterError(c.ExternalBinPath, stdout, stderr, err)
}
if err := ErrorForExitCode(*exitCode, string(stdout)); err != nil {
logger.Printf(adapterFailedMessage(*exitCode, c.ExternalBinPath, stdout, stderr))
return sdk.MarshalledGenerateManifest{}, err
}
if c.UsingStdin {
jsonErr = json.Unmarshal(stdout, &output)
if jsonErr != nil {
return sdk.MarshalledGenerateManifest{}, adapterError(c.ExternalBinPath, stdout, stderr, jsonErr)
}
} else { // old adapter format
output = sdk.MarshalledGenerateManifest{Manifest: string(stdout)}
}
logger.Printf("service adapter ran generate-manifest successfully, stderr logs: %s", string(stderr))
validator := manifestValidator{deploymentName: serviceDeployment.DeploymentName}
if err := validator.validateManifest(c.ExternalBinPath, output.Manifest, stderr); err != nil {
return sdk.MarshalledGenerateManifest{}, err
}
return output, nil
}
func (v manifestValidator) validateManifest(adapterPath string, genManifest string, stderr []byte) error {
var generatedManifest manifest
if err := yaml.Unmarshal([]byte(genManifest), &generatedManifest); err != nil {
return invalidYAMLError(adapterPath, stderr)
}
if generatedManifest.Name != v.deploymentName {
return incorrectDeploymentNameError(adapterPath, stderr, v.deploymentName, generatedManifest.Name)
}
for _, release := range generatedManifest.Releases {
if strings.HasSuffix(release.Version, "latest") {
return invalidVersionError(adapterPath, stderr, release.Version)
}
}
for _, stemcell := range generatedManifest.Stemcells {
if strings.HasSuffix(stemcell.Version, "latest") {
return invalidVersionError(adapterPath, stderr, stemcell.Version)
}
}
return nil
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package pcap
import (
"context"
"fmt"
"os"
"path/filepath"
"chromiumos/tast/common/network/tcpdump"
"chromiumos/tast/common/wificell/router"
"chromiumos/tast/errors"
remotetcpdump "chromiumos/tast/remote/network/tcpdump"
"chromiumos/tast/remote/wificell/fileutil"
"chromiumos/tast/ssh"
"chromiumos/tast/testing"
)
// Option is the type of options to start Capturer object.
type Option func(*Capturer)
// Snaplen returns an option which sets a Capturer's snapshot length.
func Snaplen(s uint64) Option {
return func(c *Capturer) {
c.snaplen = s
}
}
// Capturer controls a tcpdump process to capture packets on an interface.
type Capturer struct {
host *ssh.Conn
name string
iface string
workDir string
snaplen uint64
stdoutFile *os.File
stderrFile *os.File
downloaded bool
runner *tcpdump.Runner
}
// StartCapturer creates and starts a Capturer.
// After getting a Capturer instance, c, the caller should call c.Close() at the end, and use the
// shortened ctx (provided by c.ReserveForClose()) before c.Close() to reserve time for it to run.
func StartCapturer(ctx context.Context, host *ssh.Conn, name, iface, workDir string, opts ...Option) (*Capturer, error) {
c := &Capturer{
host: host,
name: name,
iface: iface,
workDir: workDir,
}
for _, opt := range opts {
opt(c)
}
if err := c.start(ctx); err != nil {
return nil, err
}
return c, nil
}
func (c *Capturer) start(ctx context.Context) (err error) {
c.stdoutFile, err = fileutil.PrepareOutDirFile(ctx, c.filename("stdout"))
if err != nil {
return errors.Wrap(err, "failed to open stdout log of tcpdump")
}
c.stderrFile, err = fileutil.PrepareOutDirFile(ctx, c.filename("stderr"))
if err != nil {
return errors.Wrap(err, "failed to open stderr log of tcpdump")
}
c.runner = remotetcpdump.NewRemoteRunner(c.host)
if c.snaplen != 0 {
c.runner.SetSnaplen(c.snaplen)
}
err = c.runner.StartTcpdump(ctx, c.iface, c.packetPathOnRemote(), c.stdoutFile, c.stderrFile)
if err != nil {
return errors.Wrap(err, "failed to start tcpdump")
}
return nil
}
// ReserveForClose returns a shortened ctx with cancel function.
// The shortened ctx is used for running things before c.Close() to reserve time for it to run.
func (c *Capturer) ReserveForClose(ctx context.Context) (context.Context, context.CancelFunc) {
return c.runner.ReserveForClose(ctx)
}
// Close terminates the capturer and downloads the pcap file from host to OutDir.
func (c *Capturer) Close(ctx context.Context) error {
cmdExists := c.runner.CmdExists()
c.runner.Close(ctx)
if cmdExists {
return c.downloadPacket(ctx)
}
return nil
}
// Interface returns the interface the capturer runs on.
func (c *Capturer) Interface() string {
return c.iface
}
// filename returns a filename for c to store different type of information.
// suffix can be the type of stored information. e.g. conf, stdout, stderr ...
func (c *Capturer) filename(suffix string) string {
return fmt.Sprintf("pcap-%s.%s", c.name, suffix)
}
// packetPathOnRemote returns the temporary path on c.host for tcpdump to write parsed packets.
func (c *Capturer) packetPathOnRemote() string {
return filepath.Join(c.workDir, c.filename("pcap.tmp"))
}
// packetFilename returns the path under OutDir that capturer save the pcap file on Close call.
func (c *Capturer) packetFilename() string {
return c.filename("pcap")
}
// packetPath returns the path of the result pcap file.
func (c *Capturer) packetPath(ctx context.Context) (string, error) {
outDir, ok := testing.ContextOutDir(ctx)
if !ok {
return "", errors.New("failed to get OutDir")
}
return filepath.Join(outDir, c.packetFilename()), nil
}
// PacketPath returns the path of the result pcap file so that the tests can
// verify the content of captured packets. This function should be called
// after Close (i.e. packet downloaded), otherwise it will return error.
func (c *Capturer) PacketPath(ctx context.Context) (string, error) {
if !c.downloaded {
return "", errors.New("pcap not yet downloaded")
}
return c.packetPath(ctx)
}
// downloadPacket downloads result pcap file from host to OutDir.
func (c *Capturer) downloadPacket(ctx context.Context) error {
dst, err := c.packetPath(ctx)
if err != nil {
return err
}
src := c.packetPathOnRemote()
if c.downloaded {
return errors.Errorf("packet already downloaded from %s to %s", src, dst)
}
if err := router.GetSingleFile(ctx, c.host, src, dst); err != nil {
return errors.Wrapf(err, "unable to download packet from %s to %s", src, dst)
}
c.downloaded = true
if err := c.host.CommandContext(ctx, "rm", src).Run(ssh.DumpLogOnError); err != nil {
return errors.Wrapf(err, "failed to clean up remote file %s", src)
}
return nil
}
|
/*
* 存储后端接口
*/
package cachex
import "time"
// Storage 存储后端接口
type Storage interface {
// Get 获取缓存的数据。value必须是非nil指针。没找到返回NotFound;数据已经过期返回过期数据加NotFound
Get(key, value interface{}) error
// Set 缓存数据
Set(key, value interface{}) error
}
// DeletableStorage 支持删除操作的存储后端接口
type DeletableStorage interface {
Storage
Del(key interface{}) error
}
// ClearableStorage 支持清理操作的存储后端接口
type ClearableStorage interface {
Storage
Clear() error
}
// SetWithTTLableStorage 支持定制TTL的存储后端接口
type SetWithTTLableStorage interface {
Storage
SetWithTTL(key, value interface{}, TTL time.Duration) error
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
)
func main() {
r := gin.Default()
v1 := r.Group("v1")
{
v1.GET("/login", login)
v1.GET("/submit", submit)
}
v2 := r.Group("v2")
{
v2.POST("/login", login)
v2.POST("/submit", submit)
}
r.Run(":8888")
}
func login(c *gin.Context) {
name := c.DefaultQuery("name", "mylogin")
c.String(200, fmt.Sprintf("name=%s\n", name))
}
func submit(c *gin.Context) {
name := c.DefaultQuery("name", "mysubmit")
c.String(200, fmt.Sprintf("name=%s\n", name))
}
|
package microservicesconfiguration
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"sync"
"time"
)
type springCloudConfig struct {
Name string `json:"name"`
Profiles []string `json:"profiles"`
Label string `json:"label"`
Version string `json:"version"`
PropertySources []propertySource `json:"propertySources"`
}
type propertySource struct {
Name string `json:"name"`
Source map[string]interface{} `json:"source"`
}
type ConfigServerLoader struct {
ConfigServerURL string `json:"configServerUrl"`
Apps []string
Profiles []string
Labels []string `json:"labels"`
Chans chan map[string]*Configuration
}
func (loader ConfigServerLoader) Load() map[string]*Configuration {
configMaplist := make(map[string]*Configuration)
var wg sync.WaitGroup
configChannel := make(chan map[string]*Configuration, len(loader.Apps)*len(loader.Profiles)*len(loader.Labels))
errorChannel := make(chan error, 1000)
//loader.Chans = make(chan map[string]*Configuration, len(loader.Apps)*len(loader.Profiles)*len(loader.Labels))
for _, appName := range loader.Apps {
for _, profile := range loader.Profiles {
if profile == "" {
continue
}
for _, label := range loader.Labels {
if label == "" {
continue
}
wg.Add(1)
go func(configserverURL string, appName string, profile string, branch string) {
//log.Println("starting")
defer func() {
//log.Println("ending")
wg.Done()
}()
url := fmt.Sprintf("%s%s/%s/%s", configserverURL, appName, profile, branch)
//log.Println("Loading config from " + url)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 100,
MaxIdleConnsPerHost: 32,
}
c := &http.Client{Transport: tr}
//log.Println("get config starting")
//start := time.Now()
resp, err := c.Get(url)
//secs := time.Since(start).Seconds()
//log.Println(fmt.Sprintf("get config finished and costing %.2f", secs))
if err != nil {
log.Println("Couldn't load configuration, cannot start. Terminating. Error: " + err.Error())
errorChannel <- err
return
}
if resp.StatusCode > 299 || resp.StatusCode < 200 {
log.Println("Non-200 rcode of ", resp.StatusCode)
return
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
//resp.Close = true
if err != nil {
log.Println("Couldn't load configuration, cannot start. Terminating. Error: " + err.Error())
errorChannel <- err
return
}
//log.Println(body)
var cloudConfig springCloudConfig
err = json.Unmarshal(body, &cloudConfig)
//log.Println("json unmarshal config")
if err != nil {
log.Println("Couldn't load configuration, cannot start. Terminating. Error: " + err.Error())
errorChannel <- err
} else {
log.Println("starting process config")
configs := make(map[string]*Configuration)
for _, propertySource := range cloudConfig.PropertySources {
configkey := fmt.Sprintf("%s_%s_%s", appName, profile, branch)
config := &Configuration{
Name: appName,
Profile: profile,
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
if strings.Contains(propertySource.Name, "application.") {
configkey = fmt.Sprintf("global_basic_%s", branch)
config = &Configuration{
Name: "global",
Profile: "basic",
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
} else if strings.Contains(propertySource.Name, fmt.Sprintf("%s-%s.", appName, profile)) {
configkey = fmt.Sprintf("%s_%s_%s", appName, profile, branch)
config = &Configuration{
Name: appName,
Profile: profile,
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
} else {
configkey = fmt.Sprintf("%s_basic_%s", appName, branch)
config = &Configuration{
Name: appName,
Profile: "basic",
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
}
configs[configkey] = config
}
//log.Println("finishing process config")
configChannel <- configs
}
}(loader.ConfigServerURL, appName, profile, label)
//go loader.LoadConfigurationFromBranch(appName, profile, label, &wg)
}
}
}
if waitTimeout(&wg, 30*time.Second) {
fmt.Println("Timed out waiting for wait group")
} else {
fmt.Println("Wait group finished")
}
close(configChannel)
close(errorChannel)
for config := range configChannel {
for k, v := range config {
configMaplist[k] = v
}
}
//fmt.Println("return configs ", configMaplist)
return configMaplist
}
func IsExistAPPConfig(configServerUrl, appName, profile string) bool {
url := fmt.Sprintf("%s%s/%s", configServerUrl, appName, profile)
resp, err := http.Get(url)
if err != nil {
log.Println("Couldn't load configuration.Error: " + err.Error())
return false
}
if resp.StatusCode > 299 || resp.StatusCode < 200 {
log.Println("Non-200 rcode of ", resp.StatusCode, url)
return false
}
return true
}
// Loads config
func (loader ConfigServerLoader) LoadConfigurationFromBranch(appName string, profile string, branch string, wg *sync.WaitGroup) {
defer wg.Done()
url := fmt.Sprintf("%s%s/%s/%s", loader.ConfigServerURL, appName, profile, branch)
fmt.Printf("Loading config from %s\n", url)
resp, err := http.Get(url)
if err != nil {
log.Println("Couldn't load configuration, cannot start. Terminating. Error: " + err.Error())
return
}
if resp.StatusCode > 299 || resp.StatusCode < 200 {
log.Println("Non-200 rcode of ", resp.StatusCode)
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println("Couldn't load configuration, cannot start. Terminating. Error: " + err.Error())
return
}
loader.parseConfiguration(body, appName, profile, branch)
}
/*
* This will take the Response and unmarshall into structure
*/
func (loader ConfigServerLoader) parseConfiguration(body []byte, appName string, profile string, branch string) {
var cloudConfig springCloudConfig
err := json.Unmarshal(body, &cloudConfig)
if err != nil {
panic("Cannot parse configuration, message: " + err.Error())
}
configs := make(map[string]*Configuration)
for _, propertySource := range cloudConfig.PropertySources {
configkey := fmt.Sprintf("%s_%s_%s", appName, profile, branch)
config := &Configuration{
Name: appName,
Profile: profile,
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
if strings.Contains(propertySource.Name, "application.") {
configkey = fmt.Sprintf("global_basic_%s", branch)
config = &Configuration{
Name: "global",
Profile: "basic",
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
} else if strings.Contains(propertySource.Name, fmt.Sprintf("%s-%s.", appName, profile)) {
configkey = fmt.Sprintf("%s_%s_%s", appName, profile, branch)
config = &Configuration{
Name: appName,
Profile: profile,
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
} else {
configkey = fmt.Sprintf("%s_basic_%s", appName, branch)
config = &Configuration{
Name: appName,
Profile: "basic",
Label: branch,
FileName: propertySource.Name,
Source: propertySource.Source,
}
}
configs[configkey] = config
}
log.Println("start send config to chans ", configs)
loader.Chans <- configs
}
// waitTimeout waits for the waitgroup for the specified max timeout.
// Returns true if waiting timed out.
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false // completed normally
case <-time.After(timeout):
return true // timed out
}
}
|
package main
import (
"fmt"
"log"
)
func printError(err error, format string, v ...interface{}) {
log.Printf("ERROR: %s error: %v", fmt.Sprintf(format, v...), err)
}
func printInfo(format string, v ...interface{}) {
log.Printf("INFO: %s", fmt.Sprintf(format, v...))
}
|
package main
import (
"bytes"
"context"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"github.com/sfomuseum/go-flags/multi"
"github.com/whosonfirst/go-whosonfirst-feature/properties"
"github.com/whosonfirst/go-whosonfirst-travel/traveler"
"io"
"log"
"os"
"sort"
"strconv"
"strings"
"sync"
)
// please move all the BelongsToResult and BelongsToResultSet stuff
// in to a proper re-usalbe package (20180814/thisisaaronland)
type BelongsToResultSet struct {
results []*BelongsToResult
mu *sync.RWMutex
}
func NewBelongsToResultSet() (*BelongsToResultSet, error) {
results := make([]*BelongsToResult, 0)
mu := new(sync.RWMutex)
rs := BelongsToResultSet{
results: results,
mu: mu,
}
return &rs, nil
}
func (rs *BelongsToResultSet) AddResult(r *BelongsToResult) error {
rs.mu.Lock()
defer rs.mu.Unlock()
rs.results = append(rs.results, r)
return nil
}
func (rs *BelongsToResultSet) Results() []*BelongsToResult {
return rs.results
}
func (rs *BelongsToResultSet) Sort() {
sort.Slice(rs.results, func(i, j int) bool {
str_i := rs.results[i].Placetype
str_j := rs.results[j].Placetype
switch strings.Compare(str_i, str_j) {
case -1:
return true
case 1:
return false
}
return rs.results[i].Label < rs.results[j].Label
})
}
func (rs *BelongsToResultSet) AsJSON(wr io.Writer) error {
b, err := json.Marshal(rs.Results())
if err != nil {
return err
}
br := bytes.NewReader(b)
_, err = io.Copy(wr, br)
if err != nil {
return err
}
return nil
}
func (rs *BelongsToResultSet) AsMarkdown(wr io.Writer) error {
for i, r := range rs.Results() {
if i == 0 {
head, _ := r.ToCSVHeader()
for _, col := range head {
wr.Write([]byte(fmt.Sprintf("| %s ", col)))
}
wr.Write([]byte("|\n"))
for range head {
wr.Write([]byte("| --- "))
}
wr.Write([]byte("|\n"))
}
out, _ := r.ToCSVRow()
for _, col := range out {
wr.Write([]byte(fmt.Sprintf("| %s ", col)))
}
wr.Write([]byte("|\n"))
}
return nil
}
func (rs *BelongsToResultSet) AsCSV(wr io.Writer, header bool) error {
csv_wr := csv.NewWriter(wr)
for i, r := range rs.Results() {
if i == 0 && header {
head, _ := r.ToCSVHeader()
err := csv_wr.Write(head)
if err != nil {
return err
}
}
out, _ := r.ToCSVRow()
err := csv_wr.Write(out)
if err != nil {
return err
}
}
csv_wr.Flush()
return nil
}
type BelongsToResult struct {
BelongsToId int64 `json:"belongs_to"`
Id int64 `json:"id"`
ParentId int64 `json:"parent_id"`
Placetype string `json:"placetype"`
Label string `json:"label"`
}
func (r *BelongsToResult) ToCSVHeader() ([]string, error) {
head := []string{
"belongs_to",
"id",
"parent_id",
"placetype",
"label",
}
return head, nil
}
func (r *BelongsToResult) ToCSVRow() ([]string, error) {
out := []string{
strconv.FormatInt(r.BelongsToId, 10),
strconv.FormatInt(r.Id, 10),
strconv.FormatInt(r.ParentId, 10),
r.Placetype,
r.Label,
}
return out, nil
}
func main() {
var belongs_to multi.MultiInt64
flag.Var(&belongs_to, "belongs-to", "...")
var include_placetype multi.MultiString
flag.Var(&include_placetype, "include-placetype", "...")
var exclude_placetype multi.MultiString
flag.Var(&exclude_placetype, "exclude-placetype", "...")
mode := flag.String("mode", "repo", "...")
as_json := flag.Bool("json", false, "...")
as_markdown := flag.Bool("markdown", false, "...")
as_ids := flag.Bool("ids", false, "...")
csv_header := flag.Bool("csv-header", false, "...")
sort_rs := flag.Bool("sort", false, "...")
flag.Parse()
ctx := context.Background()
rs, err := NewBelongsToResultSet()
if err != nil {
log.Fatal(err)
}
cb := func(r *BelongsToResult) error {
return rs.AddResult(r)
}
// we should make this a canned TravelFunc once we figure out
// what the method signature looks like... (20180314/thisisaaronland)
filter_cb := func(ctx context.Context, f []byte, belongsto_id int64) error {
pt, err := properties.Placetype(f)
if err != nil {
return fmt.Errorf("Faild to derive placetype, %w", err)
}
if len(include_placetype) > 0 {
if !include_placetype.Contains(pt) {
return nil
}
}
if len(exclude_placetype) > 0 {
if exclude_placetype.Contains(pt) {
return nil
}
}
id, err := properties.Id(f)
if err != nil {
return fmt.Errorf("Faild to derive ID, %w", err)
}
name, err := properties.Name(f)
if err != nil {
return fmt.Errorf("Faild to derive name, %w", err)
}
parent_id, err := properties.ParentId(f)
if err != nil {
return fmt.Errorf("Faild to derive parent ID, %w", err)
}
r := BelongsToResult{
BelongsToId: belongsto_id,
Id: id,
ParentId: parent_id,
Placetype: pt,
Label: name, // whosonfirst.LabelOrDerived(f),
}
return cb(&r)
}
t, err := traveler.NewDefaultBelongsToTraveler()
t.IteratorURI = *mode
t.BelongsTo = belongs_to
t.Callback = filter_cb
paths := flag.Args()
err = t.Travel(ctx, paths...)
if err != nil {
log.Fatal(err)
}
if *sort_rs {
rs.Sort()
}
if *as_ids {
for _, r := range rs.Results() {
fmt.Println(r.Id)
}
} else if *as_json {
err := rs.AsJSON(os.Stdout)
if err != nil {
log.Fatal(err)
}
} else if *as_markdown {
err := rs.AsMarkdown(os.Stdout)
if err != nil {
log.Fatal(err)
}
} else {
err := rs.AsCSV(os.Stdout, *csv_header)
if err != nil {
log.Fatal(err)
}
}
}
|
package sender
import (
cutils "github.com/open-falcon/falcon-plus/common/utils"
"github.com/open-falcon/falcon-plus/modules/transfer/g"
rings "github.com/toolkits/consistent/rings"
)
func initNodeRings() {
cfg := g.Config()
JudgeNodeRing = rings.NewConsistentHashNodesRing(int32(cfg.Judge.Replicas), cutils.KeysOfMap(cfg.Judge.Cluster))
GraphNodeRing = rings.NewConsistentHashNodesRing(int32(cfg.Graph.Replicas), cutils.KeysOfMap(cfg.Graph.Cluster))
}
|
package service
import (
"github.com/16francs/examin_go/domain/model"
"github.com/16francs/examin_go/domain/repository"
)
// TProblemService - 講師向け 問題集 モデルの操作
type TProblemService interface {
CreateProblem(problem *model.Problem) (*model.Problem, error)
}
type tProblemService struct {
repository repository.TProblemRepository
}
// NewTProblemService - tProblemService の生成
func NewTProblemService(r repository.TProblemRepository) TProblemService {
return &tProblemService{r}
}
func (s *tProblemService) CreateProblem(problem *model.Problem) (*model.Problem, error) {
createdProblem, err := s.repository.CreateProblem(problem)
if err != nil {
return nil, err
}
return createdProblem, nil
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"net/url"
"os"
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func makeUnmigratedTestSQLStore(tb testing.TB, logger log.FieldLogger) *SQLStore {
// TODO: fix this dirty workaround
// https://github.com/golang/go/issues/33633
dsn := os.Getenv("CLOUD_DATABASE")
require.NotEmpty(tb, dsn, "CLOUD_DATABASE must be set")
dsnURL, err := url.Parse(dsn)
require.NoError(tb, err)
switch dsnURL.Scheme {
case "postgres", "postgresql":
q := dsnURL.Query()
q.Add("pg_temp", "true")
dsnURL.RawQuery = q.Encode()
dsn = dsnURL.String()
}
sqlStore, err := New(dsn, logger)
require.NoError(tb, err)
// For testing with mode=memory and pg_temp above, restrict to a single connection,
// otherwise multiple goroutines may not see consistent views / have consistent access.
sqlStore.db.SetMaxOpenConns(1)
return sqlStore
}
// MakeTestSQLStore creates a SQLStore for use with unit tests.
func MakeTestSQLStore(tb testing.TB, logger log.FieldLogger) *SQLStore {
sqlStore := makeUnmigratedTestSQLStore(tb, logger)
err := sqlStore.Migrate()
require.NoError(tb, err)
return sqlStore
}
// CloseConnection closes underlying database connection.
func CloseConnection(tb testing.TB, sqlStore *SQLStore) {
err := sqlStore.db.Close()
assert.NoError(tb, err)
}
|
package slack
import (
"fmt"
"io/ioutil"
"net/http"
"github.com/labstack/echo/v4"
"github.com/slack-go/slack/slackevents"
)
func (h handlerImpl) SlackEvents(c echo.Context) error {
ctx := c.Request().Context()
r := c.Request()
w := c.Response()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return err
}
eventsAPIEvent, err := slackevents.ParseEvent(body, slackevents.OptionNoVerifyToken())
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return err
}
// 認証
err = h.eventVerify(r.Header, body, w, eventsAPIEvent.Type)
if err != nil {
fmt.Print(err)
w.WriteHeader(http.StatusBadRequest)
return err
}
if eventsAPIEvent.Type == slackevents.CallbackEvent {
innerEvent := eventsAPIEvent.InnerEvent
err := h.slackRouter.EventsRoute(ctx, innerEvent)
if err != nil {
fmt.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return err
}
}
return nil
}
|
package main
import (
"TPForum/internal/app/server"
"TPForum/internal/pkg/config"
)
func main() {
server.RunServer(config.Get().Main.Port)
}
|
package process
import (
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"github.com/jasontconnell/crawl/data"
)
var hrefRegex *regexp.Regexp = regexp.MustCompile(`(href|src)="(.*?)"`)
func parse(site *data.Site, referrer, content string, gatheredUrls *sync.Map) []data.Link {
urls := []data.Link{}
matches := hrefRegex.FindAllStringSubmatch(content, -1)
for _, m := range matches {
u, err := url.Parse(m[2])
if err != nil {
continue
}
add := (!u.IsAbs() || u.Hostname() == site.RootUrl.Hostname()) && !strings.HasPrefix(u.String(), "#") && !strings.Contains(u.String(), ".js")
if !add {
continue
}
if !u.IsAbs() {
u = site.RootUrl.ResolveReference(u)
}
_, contains := gatheredUrls.Load(u.String())
// _, contains2 := gatheredUrls.Load(href + "/")
// _, contains3 := gatheredUrls.Load(strings.TrimSuffix(href, "/"))
// contains = contains || contains2 || contains3
if !contains {
if strings.Contains(u.String(), "statistics") {
fmt.Println(u.String())
}
gatheredUrls.Store(u.String(), u.String())
if add {
urls = append(urls, data.Link{Referrer: referrer, Url: u.String()})
}
}
}
return urls
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/kardianos/service"
"github.com/sirupsen/logrus"
)
// HookLogger routes the logrus logs through the service logger so that they end up in the Windows Event Viewer
// logrus output will be discarded
func HookLogger(l *logrus.Logger) {
l.AddHook(newLogHook(logger))
l.SetOutput(ioutil.Discard)
}
type logHook struct {
sl service.Logger
}
func newLogHook(sl service.Logger) *logHook {
return &logHook{sl: sl}
}
func (h *logHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return h.sl.Error(line)
case logrus.FatalLevel:
return h.sl.Error(line)
case logrus.ErrorLevel:
return h.sl.Error(line)
case logrus.WarnLevel:
return h.sl.Warning(line)
case logrus.InfoLevel:
return h.sl.Info(line)
case logrus.DebugLevel:
return h.sl.Info(line)
default:
return nil
}
}
func (h *logHook) Levels() []logrus.Level {
return logrus.AllLevels
}
|
package sdp
import (
"bufio"
"io"
"strconv"
"time"
)
type reader interface {
ReadLine() (string, error)
}
// A Decoder reads and decodes SDP description from an input stream or buffer.
type Decoder struct {
r reader
p []string
err error
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: &bufferedReader{buf: bufio.NewReader(r)}}
}
// NewDecoderString returns a new decoder that reads from v.
func NewDecoderString(v string) *Decoder {
return &Decoder{r: &stringReader{buf: v}}
}
// Decode reads and decodes a SDP description from its input.
func (dec *Decoder) Decode() (*Description, error) {
desc := &Description{}
for {
v, err := dec.r.ReadLine()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
if !dec.split(v, '=', 2, true) {
return nil, dec.err
}
k := dec.p[0]
if len(k) != 1 {
return nil, decodeError(v)
}
if err = dec.decodeLine(desc, k[0], dec.p[1]); err != nil {
return nil, err
}
}
if err := dec.decodeSessionAttributes(desc); err != nil {
return nil, err
}
for _, m := range desc.Media {
if err := dec.decodeMediaAttributes(m); err != nil {
return nil, err
}
}
return desc, nil
}
func (dec *Decoder) decodeSessionAttributes(desc *Description) error {
n := 0
for _, it := range desc.Attributes {
switch it.Name {
case ModeSendRecv, ModeRecvOnly, ModeSendOnly, ModeInactive:
desc.Mode = it.Name
case "group":
dec.split(it.Value, ' ', 255, false)
desc.Groups = append(desc.Groups, &Group{
Semantics: dec.p[0],
Media: dec.p[1:],
})
case "setup":
desc.Setup = it.Value
default:
desc.Attributes[n] = it
n++
}
}
desc.Attributes = desc.Attributes[:n]
return nil
}
func (dec *Decoder) decodeMediaAttributes(m *Media) (err error) {
n := 0
for _, it := range m.Attributes {
switch it.Name {
case ModeSendRecv, ModeRecvOnly, ModeSendOnly, ModeInactive:
m.Mode = it.Name
case "mid":
m.ID = it.Value
case "setup":
m.Setup = it.Value
case "rtpmap":
err = dec.decodeMediaMap(m, it.Value)
case "rtcp":
err = dec.decodeControl(m, it.Value)
case "rtcp-fb":
err = dec.decodeControlFeedback(m, it.Value)
case "rtcp-mux":
if m.Control == nil {
m.Control = &Control{Muxed: true}
}
case "fmtp":
err = dec.decodeMediaParams(m, it.Value)
default:
m.Attributes[n] = it
n++
}
if err != nil {
return err
}
}
m.Attributes = m.Attributes[:n]
return nil
}
func (dec *Decoder) decodeControl(m *Media, v string) (err error) {
if m.Control == nil {
m.Control = &Control{}
}
if !dec.split(v, ' ', 4, true) {
return dec.err
}
c := m.Control
if c.Port, err = strconv.Atoi(dec.p[0]); err != nil {
return
}
c.Network = dec.p[1]
c.Type = dec.p[2]
c.Address = dec.p[3]
return nil
}
func (dec *Decoder) decodeControlFeedback(m *Media, v string) (err error) {
if !dec.split(v, ' ', 2, true) {
return dec.err
}
p, err := strconv.Atoi(dec.p[0])
if err != nil {
return err
}
f := dec.touchMediaFormat(m, p)
f.Feedback = append(f.Feedback, dec.p[1])
return nil
}
func (dec *Decoder) decodeMediaMap(m *Media, v string) error {
if !dec.split(v, ' ', 2, true) {
return dec.err
}
p, err := strconv.Atoi(dec.p[0])
if err != nil {
return err
}
if !dec.split(dec.p[1], '/', 2, true) {
return dec.err
}
f := dec.touchMediaFormat(m, p)
f.Codec = dec.p[0]
if dec.split(dec.p[1], '/', 2, false) {
if f.Channels, err = strconv.Atoi(dec.p[1]); err != nil {
return err
}
}
if f.Clock, err = strconv.Atoi(dec.p[0]); err != nil {
return err
}
return nil
}
func (dec *Decoder) decodeMediaParams(m *Media, v string) error {
if !dec.split(v, ' ', 2, true) {
return dec.err
}
p, err := strconv.Atoi(dec.p[0])
if err != nil {
return err
}
f := dec.touchMediaFormat(m, p)
f.Params = append(f.Params, dec.p[1])
return nil
}
func (dec *Decoder) touchMediaFormat(m *Media, p int) *Format {
if m.Formats == nil {
m.Formats = make(map[int]*Format)
}
f, ok := m.Formats[p]
if !ok {
f = &Format{Payload: p}
m.Formats[p] = f
}
return f
}
func (dec *Decoder) split(v string, sep rune, n int, required bool) bool {
p := dec.p[:0]
off := 0
for i, it := range v {
if it == sep {
p = append(p, v[off:i])
off = i + 1
}
if len(p)+1 == n {
dec.p = append(p, v[off:])
return true
}
}
if required {
dec.err = decodeError(v)
} else {
dec.p = append(p, v[off:])
}
return false
}
func (dec *Decoder) parseInt(v string) (int64, error) {
return strconv.ParseInt(v, 10, 64)
}
func (dec *Decoder) parseTime(v string) (t time.Time, err error) {
if v == "0" {
return
}
var ts int64
if ts, err = dec.parseInt(v); err != nil {
return
}
t = ntpEpoch.Add(time.Second * time.Duration(ts))
return
}
func (dec *Decoder) parseDuration(v string) (d time.Duration, err error) {
mul := int64(1)
if n := len(v) - 1; n >= 0 {
switch v[n] {
case 'd':
mul, v = 86400, v[:n]
case 'h':
mul, v = 3600, v[:n]
case 'm':
mul, v = 60, v[:n]
case 's':
v = v[:n]
}
}
var sec int64
if sec, err = dec.parseInt(v); err != nil {
return
}
d = time.Duration(sec*mul) * time.Second
return
}
func (dec *Decoder) decodeLine(desc *Description, k byte, v string) (err error) {
if n := len(desc.Media); n > 0 && k != 'm' {
return dec.decodeMediaDesc(desc.Media[n-1], k, v)
}
return dec.decodeSessionDesc(desc, k, v)
}
func (dec *Decoder) decodeSessionDesc(desc *Description, k byte, v string) (err error) {
switch k {
case 'v':
desc.Version, err = strconv.Atoi(v)
case 'o':
desc.Origin, err = dec.decodeOrigin(v)
case 's':
desc.Session = v
case 'i':
desc.Information = v
case 'u':
desc.URI = v
case 'e':
desc.Email = append(desc.Email, v)
case 'p':
desc.Phone = append(desc.Phone, v)
case 'c':
desc.Connection, err = dec.decodeConn(v)
case 'b':
if desc.Bandwidth == nil {
desc.Bandwidth = make(map[string]int)
}
err = dec.decodeBandwidth(desc.Bandwidth, v)
case 't':
desc.Timing, err = dec.decodeTiming(v)
case 'r':
if desc.Timing != nil {
desc.Timing.Repeat, err = dec.decodeRepeats(v)
}
case 'z':
err = dec.decodeTimezones(desc, v)
case 'k':
desc.Key, err = dec.decodeKey(v)
case 'a':
var attr *Attr
if attr, err = dec.decodeAttr(v); err == nil {
desc.Attributes = append(desc.Attributes, attr)
}
case 'm':
var m *Media
if m, err = dec.decodeMedia(v); err == nil {
desc.Media = append(desc.Media, m)
}
default:
err = decodeError(k)
}
return
}
func (dec *Decoder) decodeMediaDesc(m *Media, k byte, v string) (err error) {
switch k {
case 'i':
m.Information = v
case 'c':
m.Connection, err = dec.decodeConn(v)
case 'b':
if m.Bandwidth == nil {
m.Bandwidth = make(map[string]int)
}
err = dec.decodeBandwidth(m.Bandwidth, v)
case 'k':
m.Key, err = dec.decodeKey(v)
case 'a':
var attr *Attr
if attr, err = dec.decodeAttr(v); err == nil {
m.Attributes = append(m.Attributes, attr)
}
default:
err = decodeError(k)
}
return
}
func (dec *Decoder) decodeAttr(v string) (*Attr, error) {
if dec.split(v, ':', 2, false) {
return &Attr{Name: dec.p[0], Value: dec.p[1]}, nil
}
return &Attr{Name: dec.p[0]}, nil
}
func (dec *Decoder) decodeKey(v string) (*Key, error) {
if dec.split(v, ':', 2, false) {
return &Key{Type: dec.p[0], Value: dec.p[1]}, nil
}
return &Key{Type: dec.p[0]}, nil
}
func (dec *Decoder) decodeOrigin(v string) (o *Origin, err error) {
if !dec.split(v, ' ', 6, true) {
return nil, dec.err
}
o = &Origin{
Username: dec.p[0],
Network: dec.p[3],
Type: dec.p[4],
Address: dec.p[5],
}
if o.SessionID, err = dec.parseInt(dec.p[1]); err != nil {
return nil, err
}
if o.SessionVersion, err = dec.parseInt(dec.p[2]); err != nil {
return nil, err
}
return
}
func (dec *Decoder) decodeConn(v string) (c *Connection, err error) {
if !dec.split(v, ' ', 3, true) {
return nil, dec.err
}
c = &Connection{
Network: dec.p[0],
Type: dec.p[1],
}
dec.split(dec.p[2], '/', 3, false)
c.Address = dec.p[0]
if len(dec.p) > 1 {
if c.TTL, err = strconv.Atoi(dec.p[1]); err != nil {
return
}
if len(dec.p) > 2 {
c.AddressNum, err = strconv.Atoi(dec.p[2])
}
}
return
}
func (dec *Decoder) decodeBandwidth(b map[string]int, v string) (err error) {
if !dec.split(v, ':', 2, true) {
return dec.err
}
b[dec.p[0]], err = strconv.Atoi(dec.p[1])
return
}
func (dec *Decoder) decodeTiming(v string) (t *Timing, err error) {
if !dec.split(v, ' ', 2, true) {
return nil, dec.err
}
t = &Timing{}
t.Start, err = dec.parseTime(dec.p[0])
if err == nil {
t.Stop, err = dec.parseTime(dec.p[1])
}
return
}
func (dec *Decoder) decodeRepeats(v string) (r *Repeat, err error) {
if !dec.split(v, ' ', 3, true) {
return nil, dec.err
}
r = &Repeat{}
r.Interval, err = dec.parseDuration(dec.p[0])
if err == nil {
r.Duration, err = dec.parseDuration(dec.p[1])
}
if err == nil {
var d time.Duration
dec.split(dec.p[2], ' ', 255, false)
for _, it := range dec.p {
if d, err = dec.parseDuration(it); err != nil {
return
}
r.Offsets = append(r.Offsets, d)
}
}
return
}
func (dec *Decoder) decodeTimezones(desc *Description, v string) (err error) {
dec.split(v, ' ', 255, false)
for i, n := 0, len(dec.p)-1; i < n; i += 2 {
z := &TimeZone{}
if z.Time, err = dec.parseTime(dec.p[i]); err != nil {
return
}
if z.Offset, err = dec.parseDuration(dec.p[i+1]); err != nil {
return
}
desc.TimeZones = append(desc.TimeZones, z)
}
return
}
func (dec *Decoder) decodeMedia(v string) (m *Media, err error) {
if !dec.split(v, ' ', 4, true) {
return nil, dec.err
}
m = &Media{
Type: dec.p[0],
Proto: dec.p[2],
}
var formats = dec.p[3]
if dec.split(dec.p[1], '/', 2, false) {
m.PortNum, err = strconv.Atoi(dec.p[1])
}
if err == nil {
m.Port, err = strconv.Atoi(dec.p[0])
}
if err == nil {
dec.split(formats, ' ', 255, false)
for _, it := range dec.p {
p, err := strconv.Atoi(it)
if err != nil {
return nil, err
}
dec.touchMediaFormat(m, p)
}
}
return
}
type bufferedReader struct {
buf *bufio.Reader
}
func (r *bufferedReader) ReadLine() (string, error) {
ln, p, err := r.buf.ReadLine()
if p {
err = decodeError("line is too large")
}
if err != nil {
return "", err
}
return string(ln), nil
}
type stringReader struct {
buf string
}
func (r *stringReader) ReadLine() (string, error) {
n := len(r.buf)
if n == 0 {
return "", io.EOF
}
i, j := 0, 0
for j < n {
if c := r.buf[j]; c == '\n' {
break
} else if c == '\r' {
j++
} else {
j++
i = j
}
}
v := r.buf[:i]
if n > j {
r.buf = r.buf[j+1:]
} else {
r.buf = ""
}
return v, nil
}
type decodeError string
func (d decodeError) Error() string {
return "sdp: decode error '" + string(d) + "'"
}
var ntpEpoch = time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC)
|
package main
import (
"fmt"
"phrasefinder"
// "github.com/mtrenkmann/phrasefinder-client-go/src/phrasefinder"
)
func main() {
// Set up your query.
query := "I like ???"
// Set the optional parameter topk to 10.
options := phrasefinder.DefaultOptions()
options.Topk = 10
// Perform a request.
result, err := phrasefinder.Search(query, options)
if err != nil {
fmt.Println("Some error happened: ", err)
return
}
// Print phrases line by line.
for _, phrase := range result.Phrases {
fmt.Printf("%6f", phrase.Score)
for _, token := range phrase.Tokens {
fmt.Printf(" %v_%v", token.Text, token.Tag)
}
fmt.Println()
}
fmt.Println("Remaining quota: ", result.Quota)
}
|
package oddtype
type Hha struct {
A string `json:"A" odd:"type=hha"`
H string `json:"H" odd:"type=hha"`
D string `json:"D" odd:"type=hha"`
ID string `json:"ID" odd:"id"`
POOLSTATUS string `json:"POOLSTATUS"`
INPLAY string `json:"INPLAY"`
ALLUP string `json:"ALLUP"`
HG string `json:"HG"`
AG string `json:"AG"`
Cur string `json:"Cur"`
}
|
package images
import (
"errors"
"github.com/joonazan/imagick/imagick"
"path/filepath"
"testing"
)
func TestResize1(t *testing.T) {
doTest(t, "toresize.jpg", "resized.jpg", true)
}
func TestResize2(t *testing.T) {
doTest(t, "toresize2.jpg", "resized2.jpg", true)
}
func TestResize3(t *testing.T) {
doTest(t, "toresize.jpg", "resized_bad.jpg", false)
}
func TestResizeWebp1(t *testing.T) {
doTest(t, "toresizeWebp1.webp", "resizedWebp1.webp", true)
}
func doTest(t *testing.T, to_resize_fn, resized_fn string, should_pass bool) {
const tolerance = 0.002
distortion, err := GetDistortion(to_resize_fn, resized_fn)
if err != nil {
t.Fatal(err)
}
if distortion > tolerance == should_pass {
t.Fatal("Resize failed. Distortion:", distortion, "Tolerance:", tolerance, "Should pass?:", should_pass)
}
}
func GetDistortion(filename, filename_cmp string) (distortion float64, err error) {
const image_folder = "../testimages/resize/"
imagick.Initialize()
defer imagick.Terminate()
mw_cmp := imagick.NewMagickWand()
defer mw_cmp.Destroy()
err = mw_cmp.ReadImage(filepath.FromSlash(image_folder + filename_cmp))
if err != nil {
err = errors.New("Could not load reference image:" + err.Error())
return
}
mw := imagick.NewMagickWand()
defer mw.Destroy()
image, err := LoadImage(filepath.FromSlash(image_folder + filename))
if err != nil {
err = errors.New("LoadImage failed:" + err.Error())
return
}
resized, err := image.Resized(100, 100)
if err != nil {
err = errors.New("Resize failed:" + err.Error())
return
}
mw.ReadImageBlob(resized.ToBlob())
trash, distortion := mw.CompareImages(mw_cmp, imagick.METRIC_MEAN_SQUARED_ERROR)
trash.Destroy()
err = mw.WriteImage(filepath.FromSlash("../testresults/resize/" + filename))
return
}
|
package odoo
import (
"fmt"
)
// AccountReconcileModelTemplate represents account.reconcile.model.template model.
type AccountReconcileModelTemplate struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AccountId *Many2One `xmlrpc:"account_id,omptempty"`
Amount *Float `xmlrpc:"amount,omptempty"`
AmountType *Selection `xmlrpc:"amount_type,omptempty"`
ChartTemplateId *Many2One `xmlrpc:"chart_template_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
HasSecondLine *Bool `xmlrpc:"has_second_line,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Label *String `xmlrpc:"label,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
SecondAccountId *Many2One `xmlrpc:"second_account_id,omptempty"`
SecondAmount *Float `xmlrpc:"second_amount,omptempty"`
SecondAmountType *Selection `xmlrpc:"second_amount_type,omptempty"`
SecondLabel *String `xmlrpc:"second_label,omptempty"`
SecondTaxId *Many2One `xmlrpc:"second_tax_id,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
TaxId *Many2One `xmlrpc:"tax_id,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountReconcileModelTemplates represents array of account.reconcile.model.template model.
type AccountReconcileModelTemplates []AccountReconcileModelTemplate
// AccountReconcileModelTemplateModel is the odoo model name.
const AccountReconcileModelTemplateModel = "account.reconcile.model.template"
// Many2One convert AccountReconcileModelTemplate to *Many2One.
func (armt *AccountReconcileModelTemplate) Many2One() *Many2One {
return NewMany2One(armt.Id.Get(), "")
}
// CreateAccountReconcileModelTemplate creates a new account.reconcile.model.template model and returns its id.
func (c *Client) CreateAccountReconcileModelTemplate(armt *AccountReconcileModelTemplate) (int64, error) {
ids, err := c.CreateAccountReconcileModelTemplates([]*AccountReconcileModelTemplate{armt})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountReconcileModelTemplate creates a new account.reconcile.model.template model and returns its id.
func (c *Client) CreateAccountReconcileModelTemplates(armts []*AccountReconcileModelTemplate) ([]int64, error) {
var vv []interface{}
for _, v := range armts {
vv = append(vv, v)
}
return c.Create(AccountReconcileModelTemplateModel, vv)
}
// UpdateAccountReconcileModelTemplate updates an existing account.reconcile.model.template record.
func (c *Client) UpdateAccountReconcileModelTemplate(armt *AccountReconcileModelTemplate) error {
return c.UpdateAccountReconcileModelTemplates([]int64{armt.Id.Get()}, armt)
}
// UpdateAccountReconcileModelTemplates updates existing account.reconcile.model.template records.
// All records (represented by ids) will be updated by armt values.
func (c *Client) UpdateAccountReconcileModelTemplates(ids []int64, armt *AccountReconcileModelTemplate) error {
return c.Update(AccountReconcileModelTemplateModel, ids, armt)
}
// DeleteAccountReconcileModelTemplate deletes an existing account.reconcile.model.template record.
func (c *Client) DeleteAccountReconcileModelTemplate(id int64) error {
return c.DeleteAccountReconcileModelTemplates([]int64{id})
}
// DeleteAccountReconcileModelTemplates deletes existing account.reconcile.model.template records.
func (c *Client) DeleteAccountReconcileModelTemplates(ids []int64) error {
return c.Delete(AccountReconcileModelTemplateModel, ids)
}
// GetAccountReconcileModelTemplate gets account.reconcile.model.template existing record.
func (c *Client) GetAccountReconcileModelTemplate(id int64) (*AccountReconcileModelTemplate, error) {
armts, err := c.GetAccountReconcileModelTemplates([]int64{id})
if err != nil {
return nil, err
}
if armts != nil && len(*armts) > 0 {
return &((*armts)[0]), nil
}
return nil, fmt.Errorf("id %v of account.reconcile.model.template not found", id)
}
// GetAccountReconcileModelTemplates gets account.reconcile.model.template existing records.
func (c *Client) GetAccountReconcileModelTemplates(ids []int64) (*AccountReconcileModelTemplates, error) {
armts := &AccountReconcileModelTemplates{}
if err := c.Read(AccountReconcileModelTemplateModel, ids, nil, armts); err != nil {
return nil, err
}
return armts, nil
}
// FindAccountReconcileModelTemplate finds account.reconcile.model.template record by querying it with criteria.
func (c *Client) FindAccountReconcileModelTemplate(criteria *Criteria) (*AccountReconcileModelTemplate, error) {
armts := &AccountReconcileModelTemplates{}
if err := c.SearchRead(AccountReconcileModelTemplateModel, criteria, NewOptions().Limit(1), armts); err != nil {
return nil, err
}
if armts != nil && len(*armts) > 0 {
return &((*armts)[0]), nil
}
return nil, fmt.Errorf("account.reconcile.model.template was not found with criteria %v", criteria)
}
// FindAccountReconcileModelTemplates finds account.reconcile.model.template records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountReconcileModelTemplates(criteria *Criteria, options *Options) (*AccountReconcileModelTemplates, error) {
armts := &AccountReconcileModelTemplates{}
if err := c.SearchRead(AccountReconcileModelTemplateModel, criteria, options, armts); err != nil {
return nil, err
}
return armts, nil
}
// FindAccountReconcileModelTemplateIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountReconcileModelTemplateIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountReconcileModelTemplateModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountReconcileModelTemplateId finds record id by querying it with criteria.
func (c *Client) FindAccountReconcileModelTemplateId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountReconcileModelTemplateModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.reconcile.model.template was not found with criteria %v and options %v", criteria, options)
}
|
package cloudformation
// AWSECSTaskDefinition_HealthCheck AWS CloudFormation Resource (AWS::ECS::TaskDefinition.HealthCheck)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html
type AWSECSTaskDefinition_HealthCheck struct {
// Command AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html#cfn-ecs-taskdefinition-healthcheck-command
Command []string `json:"Command,omitempty"`
// Interval AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html#cfn-ecs-taskdefinition-healthcheck-interval
Interval int `json:"Interval,omitempty"`
// Retries AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html#cfn-ecs-taskdefinition-healthcheck-retries
Retries int `json:"Retries,omitempty"`
// StartPeriod AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html#cfn-ecs-taskdefinition-healthcheck-startperiod
StartPeriod int `json:"StartPeriod,omitempty"`
// Timeout AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-healthcheck.html#cfn-ecs-taskdefinition-healthcheck-timeout
Timeout int `json:"Timeout,omitempty"`
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSECSTaskDefinition_HealthCheck) AWSCloudFormationType() string {
return "AWS::ECS::TaskDefinition.HealthCheck"
}
|
package blockchain
import (
"github.com/hyperledger/burrow/crypto"
)
type ValidatorsWindow struct {
Buckets []Validators
Total Validators
head int
}
// Provides a sliding window over the last size buckets of validator power changes
func NewValidatorsWindow(size int) ValidatorsWindow {
if size < 1 {
size = 1
}
vw := ValidatorsWindow{
Buckets: make([]Validators, size),
Total: NewValidators(),
}
vw.Buckets[vw.head] = NewValidators()
return vw
}
// Updates the current head bucket (accumulator)
func (vw *ValidatorsWindow) AlterPower(publicKey crypto.PublicKey, power uint64) error {
return vw.Buckets[vw.head].AlterPower(publicKey, power)
}
func (vw *ValidatorsWindow) CommitInto(validatorsToUpdate *Validators) error {
var err error
if vw.Buckets[vw.head].Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
// Update the sink validators
err = validatorsToUpdate.AlterPower(publicKey, power)
if err != nil {
return true
}
// Add to total power
err = vw.Total.AddPower(publicKey, power)
if err != nil {
return true
}
return false
}) {
// If iteration stopped there was an error
return err
}
// move the ring buffer on
vw.head = (vw.head + 1) % len(vw.Buckets)
// Subtract the tail bucket (if any) from the total
if vw.Buckets[vw.head].Iterate(func(publicKey crypto.PublicKey, power uint64) (stop bool) {
err = vw.Total.SubtractPower(publicKey, power)
if err != nil {
return false
}
return true
}) {
return err
}
// Clear new head bucket (and possibly previous tail)
vw.Buckets[vw.head] = NewValidators()
return nil
}
|
package models
type Plan struct {
PlanId string `json:"plan_id"`
PlanName string `json:"plan_name"`
Lob string `json:"lob"`
}
type Level struct {
LevelId string `json:"level_id"`
LevelName string `json:"level_name"`
LevelType string `json:"level_type"`
Lob string `json:"lob"`
LevelValues []LevelValue `json:"level_values"`
}
type LevelValue struct {
Value string `json:"value"`
ValueBasis string `json:"value_basis"`
}
|
package nodefindergo
import (
"fmt"
"os"
"path"
"reflect"
"testing"
"github.com/zxjsdp/nodefinder-go/nodefindergo"
)
var (
rawTree string = "((a, ((b, c), (ddd,\t e))), (f, g));\n\n"
cleanTree string = "((a,((b,c),(ddd,e))),(f,g));"
nameA string = "ddd"
nameB string = "b"
caliInfo string = ">0.1<0.2"
branchLabel string = "@0.3"
)
func Test_getCleanTreeStr(t *testing.T) {
if nodefindergo.GetCleanTreeStr(rawTree) != cleanTree {
t.Error("GetCleanTreeStr (nodefindergo.go): Clean tree string failed")
}
}
func Test_GetRightIndexOfName(t *testing.T) {
expectedRightIndexOfName := 15
result := nodefindergo.GetRightIndexOfName(cleanTree, nameA)
if result != expectedRightIndexOfName {
t.Error("GetRightIndexOfName (nodefindergo.go):\n" +
fmt.Sprintf("Get right index of name failed (result: %d)", result))
}
}
func Test_GetInsertionList(t *testing.T) {
expectedInsertionList := []int{18, 19, 20, 27}
result := nodefindergo.GetInsertionList(cleanTree, nameA)
if !reflect.DeepEqual(result, expectedInsertionList) {
t.Error("GetInsertionList (nodefindergo.go):\n" +
fmt.Sprintf("Failed to get insertion list (result: %v)", result))
}
}
func Test_GetIndexOfTMRCA(t *testing.T) {
expectedIndexOfTMRCA := 19
result := nodefindergo.GetIndexOfTMRCA(cleanTree, nameA, nameB)
if result != expectedIndexOfTMRCA {
t.Error("GetIndexOfTMRCA (nodefindergo.go):\n" +
fmt.Sprintf("result: %v, expect: %v", result, expectedIndexOfTMRCA))
}
}
func Test_SingleCalibration(t *testing.T) {
expectedTreeWithCali := "((a,((b,c),(ddd,e))>0.1<0.2),(f,g));"
result := nodefindergo.SingleCalibration(cleanTree, nameA, nameB, caliInfo)
if result != expectedTreeWithCali {
t.Error("SingleCalibration (nodefindergo.go):\n" +
fmt.Sprintf("result: %v, expected: %v", result, expectedTreeWithCali))
}
}
func Test_AddSingleBranchLabel(t *testing.T) {
expectedTreeWithBranchLabel := "((a,((b,c),(ddd@0.3,e))),(f,g));"
result := nodefindergo.AddSingleBranchLabel(cleanTree, nameA, branchLabel)
if !reflect.DeepEqual(result, expectedTreeWithBranchLabel) {
t.Error("AddSingleBranchLabel (nodefindergo.go):\n" +
fmt.Sprintf("result: %v, expected: %v", result, expectedTreeWithBranchLabel))
}
}
func Test_MultipleCalibration(t *testing.T) {
expectedTreeStrWithMultiCalis := "((a, ((b, c)>0.03<0.05, (ddd, e))>0.1<0.2), (f#3, g));"
calibrations := []nodefindergo.Calibration{
{
ID: 0,
CaliType: nodefindergo.CALI_OR_CLADE_LABEL_TYPE,
NameA: "b",
NameB: "ddd",
CaliInfo: ">0.1<0.2",
Description: "First calibration"},
{
ID: 1,
CaliType: nodefindergo.CALI_OR_CLADE_LABEL_TYPE,
NameA: "b",
NameB: "c",
CaliInfo: ">0.03<0.05",
Description: "Second calibration"},
{
ID: 2,
CaliType: nodefindergo.BRANCH_LABEL_TYPE,
NameA: "f",
NameB: "",
CaliInfo: "#3",
Description: "First branchLabel"},
}
result := nodefindergo.MultipleCalibration(cleanTree, calibrations)
if !(reflect.DeepEqual(result, expectedTreeStrWithMultiCalis)) {
t.Error("MultipleCalibration (nodefindergo.go):\n" +
fmt.Sprintf("result: %v, expected: %v", result, expectedTreeStrWithMultiCalis))
}
}
func Test_ParseConfig(t *testing.T) {
workingDir, err := os.Getwd()
if err != nil {
t.Error("Get working dir failed")
}
caliFilePath := path.Join(workingDir, "calibration.txt")
expectedCalibrations := []nodefindergo.Calibration{
{
ID: 0,
CaliType: nodefindergo.CALI_OR_CLADE_LABEL_TYPE,
NameA: "b",
NameB: "ddd",
CaliInfo: ">0.1<0.2",
Description: "Normal calibration or clade label."},
{
ID: 1,
CaliType: nodefindergo.CALI_OR_CLADE_LABEL_TYPE,
NameA: "b",
NameB: "c",
CaliInfo: "$5",
Description: "Normal calibration or clade label."},
{
ID: 2,
CaliType: nodefindergo.BRANCH_LABEL_TYPE,
NameA: "f",
NameB: "",
CaliInfo: "#3",
Description: "Branch label description"},
}
result := nodefindergo.ParseConfig(caliFilePath)
if !reflect.DeepEqual(result, expectedCalibrations) {
t.Error("ParseConfig (nodefinder.go):\n" +
fmt.Sprintf("result: %v,\nexpected: %v", result, expectedCalibrations))
}
}
|
package cli
import (
"bufio"
"fmt"
"os"
"strings"
)
// Prompt is a simple implementation of keepassrpc.Passworder.
func Prompt() (string, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter the code provided by KeePass: ")
text, err := reader.ReadString('\n')
if err != nil {
return "", err
}
return strings.TrimSpace(text), nil
}
|
// Copyright 2019 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"bufio"
"fmt"
"os"
"path"
"path/filepath"
"strings"
)
const (
cgroupTasks = "tasks"
CpusetCgroupDir = "/sys/fs/cgroup/cpuset"
)
// GetContainerCgroupDir brute-force searches for a container directory under parentDir.
func GetContainerCgroupDir(parentDir, containerID string) string {
var containerDir string
filepath.Walk(parentDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if !info.IsDir() {
return nil
}
if containerDir != "" {
return filepath.SkipDir
}
// Assume any directory that contains containerID is the one we look for.
if strings.Contains(filepath.Base(path), containerID) {
containerDir = path
}
return nil
})
return containerDir
}
// isProcess finds out whether the task is a process or a thread.
func isProcess(processID string) bool {
file, err := os.Open("/proc/" + processID + "/status")
if err != nil {
return true
}
defer file.Close()
tgid := ""
pid := ""
scanner := bufio.NewScanner(file)
for scanner.Scan() {
entry := scanner.Text()
if strings.HasPrefix(entry, "Tgid:") {
tgid = strings.TrimSpace(entry[len("Tgid:"):])
if len(pid) > 0 {
return tgid == pid
}
} else if strings.HasPrefix(entry, "Pid:") {
pid = strings.TrimSpace(entry[len("Pid:"):])
if len(tgid) > 0 {
return tgid == pid
}
}
}
return true
}
func getTasksInContainer(cgroupParentDir, podID, containerID string, onlyProcesses bool) ([]string, error) {
var entries []string
// Find Cpuset sub-cgroup directory of this container
containerDir := ""
// Probe known per-container directories
if cgroupParentDir != "" {
dirs := []string{
filepath.Join(CpusetCgroupDir, cgroupParentDir, "cri-containerd-"+containerID+".scope"),
filepath.Join(CpusetCgroupDir, cgroupParentDir, "crio-"+containerID+".scope"),
filepath.Join(CpusetCgroupDir, cgroupParentDir, "docker-"+containerID+".scope"),
filepath.Join(CpusetCgroupDir, cgroupParentDir, containerID),
filepath.Join(CpusetCgroupDir, cgroupParentDir, "kata_"+podID),
}
for _, d := range dirs {
info, err := os.Stat(d)
if err == nil && info.IsDir() {
containerDir = d
break
}
}
}
// Try generic way to search container directory under one cgroups subsytem directory
if containerDir == "" {
containerDir = GetContainerCgroupDir(CpusetCgroupDir, containerID)
if containerDir == "" {
return nil, fmt.Errorf("failed to find corresponding cgroups directory for container %s", containerID)
}
}
// Find all processes listed in cgroup tasks file and apply to RDT CLOS
cgroupTasksFileName := path.Join(containerDir, cgroupTasks)
file, err := os.Open(cgroupTasksFileName)
if err != nil {
return nil, fmt.Errorf("failed to open file %s", cgroupTasksFileName)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
entry := scanner.Text()
if !onlyProcesses || isProcess(entry) {
entries = append(entries, entry)
}
}
return entries, nil
}
// GetProcessesInContainer gets the IDs of all processes in the container.
func GetProcessesInContainer(cgroupParentDir, podID, containerID string) ([]string, error) {
return getTasksInContainer(cgroupParentDir, podID, containerID, true)
}
// GetTasksInContainer gets the IDs of all tasks in the container.
func GetTasksInContainer(cgroupParentDir, podID, containerID string) ([]string, error) {
return getTasksInContainer(cgroupParentDir, podID, containerID, false)
}
|
package paymentrequest
import (
"database/sql"
"fmt"
"time"
"github.com/benbjohnson/clock"
"github.com/gobuffalo/pop/v5"
"go.uber.org/zap"
"github.com/transcom/mymove/pkg/services/invoice"
"github.com/transcom/mymove/pkg/db/sequence"
ediinvoice "github.com/transcom/mymove/pkg/edi/invoice"
"github.com/transcom/mymove/pkg/models"
paymentrequesthelper "github.com/transcom/mymove/pkg/payment_request"
"github.com/transcom/mymove/pkg/services"
)
type paymentRequestReviewedProcessor struct {
db *pop.Connection
logger Logger
reviewedPaymentRequestFetcher services.PaymentRequestReviewedFetcher
ediGenerator services.GHCPaymentRequestInvoiceGenerator
runSendToSyncada bool // if false, do not send to Syncada, e.g. UT shouldn't send to Syncada
gexSender services.GexSender
sftpSender services.SyncadaSFTPSender
}
// NewPaymentRequestReviewedProcessor returns a new payment request reviewed processor
func NewPaymentRequestReviewedProcessor(db *pop.Connection,
logger Logger,
fetcher services.PaymentRequestReviewedFetcher,
generator services.GHCPaymentRequestInvoiceGenerator,
runSendToSyncada bool,
gexSender services.GexSender,
sftpSender services.SyncadaSFTPSender) services.PaymentRequestReviewedProcessor {
return &paymentRequestReviewedProcessor{
db: db,
logger: logger,
reviewedPaymentRequestFetcher: fetcher,
ediGenerator: generator,
gexSender: gexSender,
sftpSender: sftpSender,
runSendToSyncada: runSendToSyncada}
}
// InitNewPaymentRequestReviewedProcessor initialize NewPaymentRequestReviewedProcessor for production use
func InitNewPaymentRequestReviewedProcessor(db *pop.Connection, logger Logger, sendToSyncada bool, icnSequencer sequence.Sequencer) (services.PaymentRequestReviewedProcessor, error) {
reviewedPaymentRequestFetcher := NewPaymentRequestReviewedFetcher(db)
generator := invoice.NewGHCPaymentRequestInvoiceGenerator(db, icnSequencer, clock.New())
var sftpSession services.SyncadaSFTPSender
sftpSession, err := invoice.InitNewSyncadaSFTPSession()
if err != nil {
// just log the error, sftpSession is set to nil if there is an error
logger.Error(fmt.Errorf("configuration of SyncadaSFTPSession failed: %w", err).Error())
return nil, err
}
var gexSender services.GexSender
gexSender = nil
return NewPaymentRequestReviewedProcessor(
db,
logger,
reviewedPaymentRequestFetcher,
generator,
sendToSyncada,
gexSender,
sftpSession), nil
}
func (p *paymentRequestReviewedProcessor) ProcessAndLockReviewedPR(pr models.PaymentRequest) error {
var transactionError error
transactionError = p.db.Transaction(func(tx *pop.Connection) error {
var lockedPR models.PaymentRequest
query := `
SELECT * FROM payment_requests
WHERE id = $1 FOR UPDATE SKIP LOCKED;
`
err := p.db.RawQuery(query, pr.ID).First(&lockedPR)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return fmt.Errorf("failure retrieving payment request with ID: %s. Err: %w", pr.ID, err)
}
// generate EDI file
var edi858c ediinvoice.Invoice858C
edi858c, err = p.ediGenerator.Generate(lockedPR, false)
if err != nil {
return fmt.Errorf("function ProcessReviewedPaymentRequest failed call to generator.Generate: %w", err)
}
var edi858cString string
edi858cString, err = edi858c.EDIString(p.logger)
if err != nil {
return fmt.Errorf("function ProcessReviewedPaymentRequest failed call to edi858c.EDIString: %w", err)
}
p.logger.Info("858 Processor calling SendToSyncada...",
zap.Int64("858 ICN", edi858c.ISA.InterchangeControlNumber),
zap.String("ShipmentIdentificationNumber/PaymentRequestNumber", edi858c.Header.ShipmentInformation.ShipmentIdentificationNumber),
zap.String("ReferenceIdentification/PaymentRequestNumber", edi858c.Header.PaymentRequestNumber.ReferenceIdentification),
zap.String("Date", edi858c.ISA.InterchangeDate),
zap.String("Time", edi858c.ISA.InterchangeTime),
)
// Send EDI string to Syncada
// If sent successfully to GEX, update payment request status to SENT_TO_GEX.
err = paymentrequesthelper.SendToSyncada(edi858cString, p.gexSender, p.sftpSender, p.runSendToSyncada, p.logger)
if err != nil {
return fmt.Errorf("error sending the following EDI (PaymentRequest.ID: %s, error string) to Syncada: %s", lockedPR.ID, err)
}
sentToGexAt := time.Now()
lockedPR.SentToGexAt = &sentToGexAt
lockedPR.Status = models.PaymentRequestStatusSentToGex
err = p.db.Update(&lockedPR)
if err != nil {
return fmt.Errorf("failure updating payment request status: %w", err)
}
return nil
})
if transactionError != nil {
return transactionError
}
return nil
}
func (p *paymentRequestReviewedProcessor) ProcessReviewedPaymentRequest() error {
// Store/log metrics about EDI processing upon exiting this method.
numProcessed := 0
start := time.Now()
defer func() {
ediProcessing := models.EDIProcessing{
EDIType: models.EDIType858,
ProcessStartedAt: start,
ProcessEndedAt: time.Now(),
NumEDIsProcessed: numProcessed,
}
p.logger.Info("EDIs processed", zap.Object("EDIs processed", &ediProcessing))
verrs, err := p.db.ValidateAndCreate(&ediProcessing)
if err != nil {
p.logger.Error("failed to create EDIProcessing record", zap.Error(err))
}
if verrs.HasAny() {
p.logger.Error("failed to validate EDIProcessing record", zap.Error(err))
}
}()
// Fetch all payment request that have been reviewed
reviewedPaymentRequests, err := p.reviewedPaymentRequestFetcher.FetchReviewedPaymentRequest()
if err != nil {
return fmt.Errorf("function ProcessReviewedPaymentRequest failed call to FetchReviewedPaymentRequest: %w", err)
}
if len(reviewedPaymentRequests) == 0 {
// No reviewed payment requests to process
return nil
}
// Send all reviewed payment request to Syncada
for _, pr := range reviewedPaymentRequests {
err := p.ProcessAndLockReviewedPR(pr)
if err != nil {
return err
}
numProcessed++
}
return nil
}
|
package main
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestAlphasCase0(t *testing.T) {
in := "All your base are belong to us."
exp := "@11 `/0|_||Z 8@$3 @|Z3 8310[]\\[]6 ']['0 |_|$."
assert.Equal(t, exp, ConvAlphabet(in), "FeelsUnluckyMan")
}
func TestAlphasCase1(t *testing.T) {
in := "What's the Frequency, Kenneth?"
exp := "\\/\\/[-]@'][''$ ']['[-]3 #|Z3(,)|_|3[]\\[](`/, |<3[]\\[][]\\[]3']['[-]?"
assert.Equal(t, exp, ConvAlphabet(in), "FeelsUnluckyMan")
}
func TestAlphasCase2(t *testing.T) {
in := "A new alphabet!"
exp := "@ []\\[]3\\/\\/ @1|D[-]@83']['!"
assert.Equal(t, exp, ConvAlphabet(in), "FeelsUnluckyMan")
}
func TestAlphasCase3(t *testing.T) {
in := "Hello World!"
exp := "[-]3110 \\/\\/0|Z1|)!"
assert.Equal(t, exp, ConvAlphabet(in), "FeelsUnluckyMan")
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package messagebus
import (
"container/list"
"fmt"
"reflect"
"strconv"
"sync"
"golang.org/x/crypto/sha3"
)
// for select the default queue size
const Default = -1
// internal constants
const (
defaultQueueSize = 1000 // if specific size is not specified
)
// Message - message to put into a queue
type Message struct {
Command string // type of packed data
Parameters [][]byte // array of parameters
}
// Queue - a 1:1 queue
type Queue struct {
c chan Message
size int
used bool
}
// to cache messages
type signature [32]byte
// BroadcastQueue - a 1:M queue
// out is synchronous, so messages to routines not waiting are dropped
type BroadcastQueue struct {
sync.RWMutex
out []chan Message
defaultSize int
cache map[signature]struct{}
index list.List
}
// the exported message queues and their sizes
// any item with a size option will be allocated that size
// absent then default size is used
type busses struct {
Broadcast *BroadcastQueue `size:"1000"` // to broadcast to other nodes
Connector *Queue `size:"50"` // to control connector
Announce *Queue `size:"50"` // to control the announcer
Blockstore *Queue `size:"50"` // to sequentially store blocks
TestQueue *Queue `size:"50"` // for testing use
}
// Bus - all available message queues
var Bus busses
// initialise all queues with preset size
func init() {
// this will be a struct type
busType := reflect.TypeOf(Bus)
// get write access by using pointer + Elem()
busValue := reflect.ValueOf(&Bus).Elem()
// scan each field
for i := 0; i < busType.NumField(); i += 1 {
fieldInfo := busType.Field(i)
sizeTag := fieldInfo.Tag.Get("size")
queueSize := defaultQueueSize
// if size specified and valid positive integer override default
if len(sizeTag) > 0 {
s, err := strconv.Atoi(sizeTag)
if nil == err && s > 0 {
queueSize = s
} else {
m := fmt.Sprintf("queue: %v has invalid size: %q", fieldInfo, sizeTag)
panic(m)
}
}
switch qt := busValue.Field(i).Type(); qt {
case reflect.TypeOf((*BroadcastQueue)(nil)):
q := &BroadcastQueue{
out: make([]chan Message, 0, 10),
defaultSize: queueSize,
cache: make(map[signature]struct{}),
}
newQueue := reflect.ValueOf(q)
busValue.Field(i).Set(newQueue)
case reflect.TypeOf((*Queue)(nil)):
q := &Queue{
c: make(chan Message, queueSize),
size: queueSize,
used: false,
}
newQueue := reflect.ValueOf(q)
busValue.Field(i).Set(newQueue)
default:
panic(fmt.Sprintf("queue type: %q is not handled", qt))
}
}
}
// Send - send a message to a 1:1 queue
// but only if listener is connected
func (queue *Queue) Send(command string, parameters ...[]byte) {
queue.c <- Message{
Command: command,
Parameters: parameters,
}
}
// Chan - channel to read from 1:1 queue
// can only be called once
func (queue *Queue) Chan() <-chan Message {
if queue.used {
panic("cannot get a second receive channel from a 1:1 queue")
}
queue.used = true
return queue.c
}
// Release - give the channel back
func (queue *Queue) Release() {
queue.used = false
close(queue.c)
queue.c = make(chan Message, queue.size)
}
// Send - send a message to a 1:M queue
func (queue *BroadcastQueue) Send(command string, parameters ...[]byte) {
m := Message{
Command: command,
Parameters: parameters,
}
h := sha3.New256()
h.Write([]byte(command))
for _, p := range parameters {
h.Write(p)
}
var sum signature
copy(sum[:], h.Sum([]byte{}))
queue.Lock()
if _, ok := queue.cache[sum]; ok {
queue.Unlock()
return
}
queue.cache[sum] = struct{}{}
queue.index.PushBack(sum)
if queue.index.Len() > 100 {
s := queue.index.Remove(queue.index.Front()).(signature)
delete(queue.cache, s)
}
queue.Unlock()
for _, out := range queue.out {
// check for more than one free entry
if len(out) < cap(out)-1 {
out <- m
} else if "block" == command {
// allow block messages to use the last free entry
select {
case out <- m:
default:
}
}
}
}
// Chan - get a new channel to read from a 1:M queue
// each call gets a distinct channel
func (queue *BroadcastQueue) Chan(size int) <-chan Message {
if size < 0 {
size = queue.defaultSize
}
c := make(chan Message, size)
queue.out = append(queue.out, c)
return c
}
// Release - release the incoming and outgoing queue
func (queue *BroadcastQueue) Release() {
for _, o := range queue.out {
close(o)
}
// empty the list
queue.out = make([]chan Message, 0, 10)
}
|
package main
import (
"fmt"
"gopkg.in/go-playground/validator.v9"
)
var v = validator.New()
func validate(item interface{}) error {
err := v.Struct(item)
if err != nil {
if _, ok := err.(*validator.InvalidValidationError); ok {
return err
}
for _, err := range err.(validator.ValidationErrors) {
return fmt.Errorf("The %v field is %v", err.Field(), err.Tag())
}
}
return nil
}
|
package admin
import (
"myapp/models"
"time"
"fmt"
"myapp/util"
)
type RoleController struct {
baseController
}
func (this *RoleController) Index() {
var (
page int
pagesize int = 8
offset int
role_list []*models.Role
)
if page, _ = this.GetInt("page"); page < 1 {
page = 1
}
offset = (page - 1) * pagesize
query := this.o.QueryTable( new(models.Role).TableName())
count, _ := query.Count()
query.OrderBy("created").Limit(pagesize,offset).All(&role_list)
this.Data["role_list"] = role_list
this.Data["pagebar"] = util.NewPager(page, int(count), pagesize,fmt.Sprintf("/role/index.html"), true).ToString()
this.Layout = "admin/layouts/starter.html"
this.TplName = "admin/role/index.html"
}
func (this *RoleController) Save() {
name := this.GetString("name")
desc := this.GetString("desc")
id,_ := this.GetInt("id")
role := models.Role{}
role.Name = name
role.Desc = desc
if id == 0 {
role.Created = time.Now()
if _, err := this.o.Insert(&role);err == nil {
this.jsonResult(200,"添加成功",nil)
}else{
this.jsonResult(201,"添加失败",nil)
}
}else{
role.Id = id
if _,err := this.o.Update(&role);err == nil {
this.jsonResult(200,"修改成功",nil)
}else{
this.jsonResult(201,"修改失败",nil)
}
}
this.jsonResult(200,name,nil)
}
|
package command
import (
"fmt"
"path/filepath"
"github.com/codegangsta/cli"
"github.com/dnaeon/gru/module"
"github.com/gosuri/uitable"
)
// NewModuleCommand creates a new sub-command for
// displaying the list of available modules
func NewModuleCommand() cli.Command {
cmd := cli.Command{
Name: "module",
Usage: "display available modules",
Action: execModuleCommand,
Flags: []cli.Flag{
cli.StringFlag{
Name: "siterepo",
Value: "",
Usage: "path/url to the site repo",
EnvVar: "GRU_SITEREPO",
},
},
}
return cmd
}
// Executes the "module" command
func execModuleCommand(c *cli.Context) {
modulePath := filepath.Join(c.String("siterepo"), "modules")
registry, err := module.Discover(modulePath)
if err != nil {
displayError(err, 1)
}
table := uitable.New()
table.AddRow("MODULE", "PATH")
for m, p := range registry {
table.AddRow(m, p)
}
fmt.Println(table)
}
|
package core
import (
"fmt"
"math/rand"
"os"
"os/exec"
)
const DeadCell = 0
const LivingCell = 1
const GrowingCell = 2
const DyingCell = 3
type Map struct {
cells [][]int
}
func (myMap *Map) Initialize(size int) {
myMap.cells = make([][]int, size)
for i := 0; i < size; i++ {
myMap.cells[i] = make([]int, size)
}
}
func (myMap Map) Display() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
for i, _ := range myMap.cells {
for _, v := range myMap.cells[i] {
if v == 0 {
fmt.Print("- ")
} else {
fmt.Print("* ")
}
}
fmt.Println()
}
fmt.Println()
}
func (myMap *Map) Evolve() {
for i, _ := range myMap.cells {
for j, _ := range myMap.cells[i] {
score := myMap.ScoreCell(i, j)
if score == 3 {
myMap.cells[i][j] = GrowingCell
} else {
if score != 2 && myMap.cells[i][j] == LivingCell {
myMap.cells[i][j] = DyingCell
}
}
}
}
}
func (myMap Map) ScoreCell(i, j int) int {
score := 0
cellsToCheck := [][]int{}
cellsToCheck = append(cellsToCheck, []int{i - 1, j - 1})
cellsToCheck = append(cellsToCheck, []int{i, j - 1})
cellsToCheck = append(cellsToCheck, []int{i + 1, j - 1})
cellsToCheck = append(cellsToCheck, []int{i - 1, j})
cellsToCheck = append(cellsToCheck, []int{i + 1, j})
cellsToCheck = append(cellsToCheck, []int{i - 1, j + 1})
cellsToCheck = append(cellsToCheck, []int{i, j + 1})
cellsToCheck = append(cellsToCheck, []int{i + 1, j + 1})
for _, v := range cellsToCheck {
if v[0] >= 0 && v[0] < len(myMap.cells) {
if v[1] >= 0 && v[1] < len(myMap.cells) {
if myMap.cells[v[0]][v[1]] == LivingCell || myMap.cells[v[0]][v[1]] == DyingCell {
score += 1
}
}
}
}
return score
}
func (myMap *Map) Update() {
for i, _ := range myMap.cells {
for j, _ := range myMap.cells[i] {
if myMap.cells[i][j] == GrowingCell {
myMap.cells[i][j] = LivingCell
} else {
if myMap.cells[i][j] == DyingCell {
myMap.cells[i][j] = DeadCell
}
}
}
}
}
func (myMap *Map) Randomize() {
for i, _ := range myMap.cells {
for j, _ := range myMap.cells[i] {
random := rand.Intn(100)
if random > 80 {
myMap.cells[i][j] = LivingCell
}
}
}
}
|
package main
import (
"flag"
"is105gruppe20/is105-ica03/bfrequence"
)
func main() {
var filnavn string
flag.StringVar(&filnavn, "f", "", "Name of file to be inspected")
flag.Parse()
bfrequence.BuffFileInfo(filnavn)
}
|
package dbtool
import (
"fmt"
"os"
"time"
)
type (
// Config config for postgres tool
Config struct {
DBName string
DBUser string
DBPass string
Host string
Port string
}
)
// CreateMigrationFile createa migration file
func CreateMigrationFile(migrationSrc, name string) {
epoch := time.Now().Unix()
upScript := fmt.Sprintf("%s/%d_%s.up.sql", migrationSrc, epoch, name)
downScript := fmt.Sprintf("%s/%d_%s.down.sql", migrationSrc, epoch, name)
if _, err := os.Create(upScript); err == nil {
fmt.Println(upScript)
}
if _, err := os.Create(downScript); err == nil {
fmt.Println(downScript)
}
}
|
/*
* @lc app=leetcode.cn id=312 lang=golang
*
* [312] 戳气球
*/
// @lc code=start
func maxCoins(nums []int) int {
size := len(nums)
points := make([]int, size+2)
points[0] = 1
points[size+1]=1
for i := 1; i < size+1; i++ {
points[i] = nums[i-1]
}
// 定义dp数组
dp := make([][]int,size+2)
for i := 0; i < (size+2); i++ {
dp[i] = make([]int,size+2)
}
// 循环定义
n := len(points)-1
for i := size; i >=0 ; i++ {
for j := i+1; j < size+2; j++ {
for k := i+1; k < j; k++ {
dp[i][j] = max(dp[i][j],
dp[i][k] + dp[k][j] + points[i] * points[j] * points[k])
}
}
}
return dp[0][n+1]
}
func max(a,b int) int {
if a > b {
return a
}
return b
}
// @lc code=end
|
package middlewares
import (
"time"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
)
func Ginrus(logger *logrus.Logger) gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
// some evil middlewares modify this values
path := c.Request.URL.Path
c.Next()
end := time.Now()
latency := end.Sub(start)
entry := logger.WithFields(logrus.Fields{
"path": path,
"status": c.Writer.Status(),
"method": c.Request.Method,
"latency": latency,
"request_id": c.Request.Header.Get("X-Request-Id"),
"ip": c.ClientIP(),
})
if len(c.Errors) > 0 {
// Append error field if this is an erroneous request.
entry.Error(c.Errors.String())
} else {
entry.Info()
}
}
}
|
package problems
import (
"modelos/table"
)
func (p *P) Iterate() string {
s := ""
if p.Minmax == Max {
positives, j := AllPositives(p.T.Zks, p.T.N+p.T.M)
for !positives {
s += "\n\n" + i(p.T, j)
positives, j = AllPositives(p.T.Zks, p.T.N+p.T.M)
}
}
return s
}
func (p *P) IterateD() string {
s := ""
if p.Minmax == Max {
negatives, j := AllNegatives(p.D.Zks, p.D.N+p.D.M)
for !negatives {
s += "\n\n" + i(p.D, j)
negatives, j = AllNegatives(p.D.Zks, p.D.N+p.D.M)
}
}
return s
}
func (p *P) IterateDCambio(col int, value float64) string {
p.D.Cks[col] = value
var zcol float64
for i := 0; i < p.D.M; i++ {
zcol += p.D.Cols[col].Values[i] * p.D.Cks[p.D.Xks[i]-1]
}
zcol -= value
updateZs(p.D)
return "\n" + p.D.S() + p.IterateD()
}
func i(t *table.T, cpivot int) string {
var min float64
min = 1000
fpivot := 0
for i := 0; i < t.M; i++ {
if t.Cols[cpivot].Values[i] != 0 {
div := t.Bks[i] * t.Cols[cpivot].Values[i]
if div > 0 && div < min {
min = div
fpivot = i
}
}
}
celpivot := t.Cols[cpivot].Values[fpivot]
multi := 1 / celpivot
for i := 0; i < t.N+t.M; i++ {
t.Cols[i].Values[fpivot] *= multi
}
t.Bks[fpivot] *= multi
for i := 0; i < t.M; i++ {
if i != fpivot {
k := t.Cols[cpivot].Values[i]
for j := 0; j < t.N+t.M; j++ {
t.Cols[j].Values[i] -= k * t.Cols[j].Values[fpivot]
}
t.Bks[i] -= k * t.Bks[fpivot]
}
}
t.Xks[fpivot] = cpivot + 1
updateZs(t)
return t.S()
}
func AllNegatives(xs []float64, s int) (bool, int) {
i := 0
for i < s {
if xs[i] > 0 {
return false, i
} else {
i++
}
}
return true, 0
}
func AllPositives(xs []float64, s int) (bool, int) {
i := 0
for i < s {
if xs[i] < 0 {
return false, i
} else {
i++
}
}
return true, 0
}
func updateZs(t *table.T) {
t.Z = 0
for i := 0; i < t.M; i++ {
t.Z += t.Cks[t.Xks[i]-1] * t.Bks[i]
}
for i := 0; i < t.N+t.M; i++ {
t.Zks[i] = 0
for j := 0; j < t.M; j++ {
t.Zks[i] += t.Cols[i].Values[j] * t.Cks[t.Xks[j]-1]
}
t.Zks[i] -= t.Cks[i]
}
}
func (p *P) GetInversa() []table.C {
cols := make([]table.C, p.T.M)
for i := p.T.N; i < p.T.N+p.T.M; i++ {
c := table.NewCol(p.T.M)
for j := 0; j < p.T.M; j++ {
if p.T.May[i-p.T.N] {
c.Values[j] = -p.T.Cols[i].Values[j]
} else {
c.Values[j] = p.T.Cols[i].Values[j]
}
}
cols[i-p.T.N] = c
}
return cols
}
func (p *P) GetInversaD() []table.C {
cols := make([]table.C, p.D.M)
for i := p.D.N; i < p.D.N+p.D.M; i++ {
c := table.NewCol(p.D.M)
for j := 0; j < p.D.M; j++ {
if p.T.Xks[i-p.D.N] > 0 {
c.Values[j] = -p.D.Cols[i].Values[j]
} else {
c.Values[j] = p.D.Cols[i].Values[j]
}
}
cols[i-p.D.N] = c
}
return cols
}
func (p *P) Optimo() (bool, string) {
if p.Minmax == Max {
o, _ := AllPositives(p.T.Zks, p.T.N+p.T.M)
return o, "positivos"
} else {
o, _ := AllNegatives(p.T.Zks, p.T.N+p.T.M)
return o, "negativos"
}
}
func (p *P) OptimoD() (bool, string) {
if p.Minmax == Max {
o, _ := AllPositives(p.D.Zks, p.D.N+p.D.M)
return o, "positivos"
} else {
o, _ := AllNegatives(p.D.Zks, p.D.N+p.D.M)
return o, "negativos"
}
}
|
package main
import (
"context"
"flag"
"fmt"
"time"
"github.com/testlinkerd/pkg/world"
"google.golang.org/grpc"
)
var (
target = flag.String("target", ":50040", "specify target")
)
func main() {
flag.Parse()
cc, err := grpc.Dial(
*target,
grpc.WithInsecure(),
)
if err != nil {
panic(err)
}
defer cc.Close()
worldClient := world.NewWorldClient(cc)
for {
resp, err := worldClient.SayHello(context.Background(), &world.HelloReq{})
if err != nil {
fmt.Println("err:", err)
}
if resp != nil {
fmt.Println("resp:", resp)
}
time.Sleep(1 * time.Second)
}
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package helm
import (
"os/exec"
"strings"
"github.com/pkg/errors"
)
// RunGenericCommand runs any given helm command.
func (c *Cmd) RunGenericCommand(arg ...string) error {
_, _, err := c.run(arg...)
if err != nil {
return errors.Wrap(err, "failed to invoke helm command")
}
return nil
}
// RunCommandRaw runs any given helm command returning raw output.
func (c *Cmd) RunCommandRaw(arg ...string) ([]byte, error) {
cmd := exec.Command(c.helmPath, arg...)
return cmd.Output()
}
// Version invokes helm version and returns the value.
func (c *Cmd) Version() (string, error) {
stdout, _, err := c.run("version")
trimmed := strings.TrimSuffix(string(stdout), "\n")
if err != nil {
return trimmed, errors.Wrap(err, "failed to invoke helm version")
}
return trimmed, nil
}
|
package config
import (
"fmt"
"github.com/spf13/viper"
)
type Config struct {
Database struct {
MongoDB struct {
URL string `json:"url"`
Name string `json:"name"`
} `json:'mongodb"`
} `json:"database"`
}
func Loader() *Config {
var conf Config
viper.SetConfigName("config") // name of config file (without extension)
viper.SetConfigType("json") // REQUIRED if the config file does not have the extension in the name
// viper.AddConfigPath("/etc/appname/") // path to look for the config file in
// viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
viper.AddConfigPath(".") // optionally look for config in the working directory
err := viper.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
// v := viper.AllSettings()
conf.Database.MongoDB.URL = viper.GetString("database.mongodb.url")
conf.Database.MongoDB.Name = viper.GetString("database.mongodb.name")
// fmt.Println(conf)
return &conf
}
|
package handlers
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/CassioRoos/poc-lazy-loading/models"
"github.com/CassioRoos/poc-lazy-loading/services"
)
type Withdraw struct {
service services.WithdrawService
}
func NewWithdraw(service services.WithdrawService) *Withdraw {
return &Withdraw{service: service}
}
func (h *Withdraw) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
return
}
var request models.WithdrawRequest
err = json.Unmarshal(body, &request)
if err := h.service.Withdraw(r.Context(), request); err != nil{
rw.WriteHeader(http.StatusInternalServerError)
}
rw.WriteHeader(http.StatusOK)
}
func (h *Withdraw) Method() string{
return "POST"
}
func (h *Withdraw) Pattern() string{
return "/withdrawlazy"
} |
package rbac
import (
"fmt"
//"errors"
//"time"
. "cms_admin/admin/src/lib"
m "cms_admin/admin/src/models"
)
type UserController struct {
CommonController
}
func (this *UserController) List() {
page, _ := this.GetInt64("page")
page_size, _ := this.GetInt64("page_size")
sort := this.GetString("sort")
order := this.GetString("order")
if len(order) > 0 {
if order == "desc" {
sort = "-" + sort
}
} else {
sort = "-Id"
}
useres, count := m.GetUserList(page, page_size, sort)
this.Data["useres"] = &useres
this.Data["count"] = &count
userinfo := this.GetSession("userinfo")
this.Data["userinfo"] = userinfo
this.TplName = this.GetTemplatetype() + "/rbac/user.tpl"
}
// 添加频道
func (this *UserController) Add() {
user := m.User{}
user.Username = this.GetString("name")
password := this.GetString("password")
repassword := this.GetString("repassword")
user.Status, _ = this.GetInt("status")
if len(user.Username) == 0 {
this.Rsp(false, "name is empty")
return
}
if password != repassword {
this.Rsp(false, "两次密码不同")
return
}
if len(password) < 6 {
this.Rsp(false, "密码不能少于6位")
return
}
user.Nickname = user.Username
user.Password = PwdHash(password)
fmt.Println("password:", user.Password)
id, err := m.AddUser(&user)
if err == nil && id > 0 {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
// 编辑管理员
func (this *UserController) Edit() {
Id, _ := this.GetInt64("id")
fmt.Println("id:",Id)
user := m.GetUserById(Id)
if user.Id == 0 {
this.Rsp(false, "用户不存在")
return
}
this.Rsp(true, user)
return
}
// 更新管理员
func (this *UserController) Update() {
user := m.User{}
//channel.Name = this.GetString("name")
password := this.GetString("password")
if len(password) < 6 {
this.Rsp(false, "密码不能少于6位")
return
}
user.Password = PwdHash(password)
user.Status, _ = this.GetInt("status")
user.Id, _ = this.GetInt64("id")
Id, err := m.UpdateUser(&user)
if Id > 0 && err == nil {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
// 删除频道
func (this *UserController) Delete() {
Id, _ := this.GetInt64("id")
fmt.Println("id:",Id)
res, err := m.DeleteUserById(Id)
if err == nil && res > 0 {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
|
package service
import (
. "imageserver/models"
"github.com/emicklei/go-restful"
"strconv"
"encoding/json"
"io/ioutil"
"net/http"
)
// Запросы категорий
func (self *App) routNeuralReply(req *restful.Request, resp *restful.Response) {
id, err := strconv.ParseInt(req.PathParameter("id"), 10, 64)
if err != nil {
cat,err := self.Model.Session.GetReplys()
if err != nil {
resp.WriteEntity(map[string]interface{}{"Error":err})
return
}
resp.WriteEntity(cat)
return
}
cat,err := self.Model.Session.GetReply(id)
if err != nil {
resp.WriteEntity(map[string]interface{}{"Error":err})
return
}
resp.WriteEntity(cat)
}
// Создание категории
func (self *App) createNeuralReply(req *restful.Request, resp *restful.Response) {
formData := struct {
Title string `json:"title"`
CatId int64 `json:"catId"`
}{}
body, err := ioutil.ReadAll(req.Request.Body)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
err = json.Unmarshal(body, &formData)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
cat,err := self.Model.Session.SaveReply(formData.CatId,formData.Title)
if err != nil {
resp.WriteEntity(map[string]interface{}{"Error":err})
return
}
resp.WriteEntity(cat)
}
// Обновление категории
func (self *App) updateNeuralReply(req *restful.Request, resp *restful.Response) {
formData := NeuralRandomReply{}
body, err := ioutil.ReadAll(req.Request.Body)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
err = json.Unmarshal(body, &formData)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
cat,err := self.Model.Session.UpdateReply(formData)
if err != nil {
resp.WriteEntity(map[string]interface{}{"Error":err})
return
}
resp.WriteEntity(cat)
}
// Удаление категории
func (self *App) delNeuralReply(req *restful.Request, resp *restful.Response) {
formData := NeuralRandomReply{}
body, err := ioutil.ReadAll(req.Request.Body)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
err = json.Unmarshal(body, &formData)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusBadRequest,err)
return
}
resp.WriteEntity(map[string]interface{}{"succes":self.Model.Session.DeleteReply(formData)})
}
|
package main
import (
"errors"
"fmt"
"go/ast"
"log"
)
func (g *Gen) mapGenNKeys(n string, count int) error {
err := mapUnmarshalTpl["nKeys"].tpl.Execute(g.b, struct {
NKeys int
StructName string
}{
NKeys: count,
StructName: n,
})
return err
}
func (g *Gen) mapGenUnmarshalObj(n string, s *ast.MapType) error {
err := mapUnmarshalTpl["def"].tpl.Execute(g.b, struct {
TypeName string
}{
TypeName: n,
})
if err != nil {
return err
}
switch t := s.Value.(type) {
case *ast.Ident:
var err error
err = g.mapGenUnmarshalIdent(t, false)
if err != nil {
return err
}
case *ast.StarExpr:
switch ptrExp := t.X.(type) {
case *ast.Ident:
var err error
err = g.mapGenUnmarshalIdent(ptrExp, true)
if err != nil {
return err
}
default:
return fmt.Errorf("Unknown type %s", n)
}
}
_, err = g.b.Write(structUnmarshalClose)
if err != nil {
return err
}
return nil
}
func (g *Gen) mapGenUnmarshalIdent(i *ast.Ident, ptr bool) error {
switch i.String() {
case "string":
g.mapUnmarshalString(ptr)
case "bool":
g.mapUnmarshalBool(ptr)
case "int":
g.mapUnmarshalInt("", ptr)
case "int64":
g.mapUnmarshalInt("64", ptr)
case "int32":
g.mapUnmarshalInt("32", ptr)
case "int16":
g.mapUnmarshalInt("16", ptr)
case "int8":
g.mapUnmarshalInt("8", ptr)
case "uint64":
g.mapUnmarshalUint("64", ptr)
case "uint32":
g.mapUnmarshalUint("32", ptr)
case "uint16":
g.mapUnmarshalUint("16", ptr)
case "uint8":
g.mapUnmarshalUint("8", ptr)
case "float64":
g.mapUnmarshalFloat("64", ptr)
case "float32":
g.mapUnmarshalFloat("32", ptr)
default:
// if ident is already in our spec list
if sp, ok := g.genTypes[i.Name]; ok {
err := g.mapUnmarshalNonPrim(sp, ptr)
if err != nil {
return err
}
} else if i.Obj != nil {
// else check the obj infos
switch t := i.Obj.Decl.(type) {
case *ast.TypeSpec:
err := g.mapUnmarshalNonPrim(t, ptr)
if err != nil {
return err
}
default:
return errors.New("could not determine what to do with type " + i.String())
}
} else {
return fmt.Errorf("Unknown type %s", i.Name)
}
}
return nil
}
func (g *Gen) mapUnmarshalNonPrim(sp *ast.TypeSpec, ptr bool) error {
switch sp.Type.(type) {
case *ast.StructType:
g.mapUnmarshalStruct(sp, ptr)
return nil
case *ast.ArrayType:
g.mapUnmarshalArr(sp, ptr)
return nil
}
return errors.New("Unknown type")
}
func (g *Gen) mapUnmarshalString(ptr bool) {
if ptr {
err := mapUnmarshalTpl["string"].tpl.Execute(g.b, struct {
Ptr string
}{"&"})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["string"].tpl.Execute(g.b, struct {
Ptr string
}{""})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalBool(ptr bool) {
if ptr {
err := mapUnmarshalTpl["bool"].tpl.Execute(g.b, struct {
Ptr string
}{"&"})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["bool"].tpl.Execute(g.b, struct {
Ptr string
}{""})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalInt(intLen string, ptr bool) {
if ptr {
err := mapUnmarshalTpl["int"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, "&"})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["int"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, ""})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalUint(intLen string, ptr bool) {
if ptr {
err := mapUnmarshalTpl["uint"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, "&"})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["uint"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, ""})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalFloat(intLen string, ptr bool) {
if ptr {
err := mapUnmarshalTpl["float"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, "&"})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["float"].tpl.Execute(g.b, struct {
IntLen string
Ptr string
}{intLen, ""})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalStruct(st *ast.TypeSpec, ptr bool) {
if ptr {
err := mapUnmarshalTpl["structPtr"].tpl.Execute(g.b, struct {
StructName string
}{st.Name.String()})
if err != nil {
log.Fatal(err)
}
} else {
err := mapUnmarshalTpl["struct"].tpl.Execute(g.b, struct {
StructName string
}{st.Name.String()})
if err != nil {
log.Fatal(err)
}
}
}
func (g *Gen) mapUnmarshalArr(st *ast.TypeSpec, ptr bool) {
err := mapUnmarshalTpl["arr"].tpl.Execute(g.b, struct {
TypeName string
}{st.Name.String()})
if err != nil {
log.Fatal(err)
}
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package http
import (
"fmt"
"io/ioutil"
"net/http"
"config"
"base/errors"
"base/xlog"
)
type HTTPHandler struct {
httpServer *http.Server
log *xlog.Log
conf *config.Config
}
func NewHTTPHandler(log *xlog.Log, conf *config.Config) *HTTPHandler {
s := &HTTPHandler{
log: log,
conf: conf,
httpServer: &http.Server{Addr: fmt.Sprintf("%v:%v", conf.Server.ListenHost, conf.Server.HTTPPort)},
}
s.httpServer.Handler = s
return s
}
func (s *HTTPHandler) Start() {
log := s.log
go func() {
log.Fatal("%v", s.httpServer.ListenAndServe())
}()
}
func (s *HTTPHandler) Stop() {
}
func (s *HTTPHandler) Address() string {
return fmt.Sprintf(":%v", s.conf.Server.HTTPPort)
}
func (s *HTTPHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
log := s.log
// Catch panics, and close the connection in any case.
defer func() {
if x := recover(); x != nil {
log.Error("%+v", errors.Errorf("%+v", x))
}
}()
// extract the query SQL
var query = req.URL.Query().Get("query")
if query == "" && req.Method == http.MethodPost {
bs, err := ioutil.ReadAll(req.Body)
if err != nil {
fmt.Fprintf(rw, "%v", err.Error())
return
}
defer req.Body.Close()
query = string(bs)
}
if err := s.processQuery(query, rw); err != nil {
fmt.Fprintf(rw, "%v", err.Error())
return
}
}
|
package main
import (
"crypto/rand"
"crypto/tls"
"crypto/x509"
"log"
"net"
"net/rpc"
)
func main() {
cert, err := tls.LoadX509KeyPair("server1.crt", "server.key")
if err != nil {
log.Fatalf("Error: %s when load server keys", err)
}
if len(cert.Certificate) != 2 {
log.Fatal("server1.crt should have 2 concatenated certificates: server + CA")
}
ca, err := x509.ParseCertificate(cert.Certificate[1])
if err != nil {
log.Fatal(err)
}
certPool := x509.NewCertPool()
certPool.AddCert(ca)
config := tls.Config{
Certificates: []tls.Certificate{cert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
config.Rand = rand.Reader
service := "127.0.0.1:2012"
listener, err := tls.Listen("tcp", service, &config)
if err != nil {
log.Fatalf("Error: %s when listening", err)
}
if err := rpc.Register(new(MyServer)); err != nil {
log.Fatal("Failed to register RPC method")
}
log.Print("Listening ")
for {
conn, err := listener.Accept()
if err != nil {
log.Printf("Error: %s when acceptiong connection", err)
break
}
defer conn.Close()
log.Printf("Accept connection from : %s", conn.RemoteAddr())
go handleClient(conn)
}
}
func handleClient(conn net.Conn) {
defer conn.Close()
rpc.ServeConn(conn)
log.Println("Connection closed")
}
type MyServer struct{}
func (srv *MyServer) Sum(args *ArgsSum, reply *int) error {
*reply = args.Item1 + args.Item2
return nil
}
type ArgsSum struct {
Item1, Item2 int
}
|
/*
* Copyright 2018 The NATS Authors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
"github.com/stretchr/testify/require"
)
func Test_AddCluster(t *testing.T) {
ts := NewTestStore(t, "add_cluster")
defer ts.Done(t)
_, bar, _ := CreateClusterKey(t)
_, badBar, _ := CreateAccountKey(t)
tests := CmdTests{
{createAddClusterCmd(), []string{"add", "cluster"}, nil, []string{"cluster name is required"}, true},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "foo"}, nil, []string{"Generated cluster key", "added cluster"}, false},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "foo"}, nil, []string{"the cluster \"foo\" already exists"}, true},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "foo"}, nil, []string{"the cluster \"foo\" already exists"}, true},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "bar", "--public-key", bar}, nil, nil, false},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "badbar", "--public-key", badBar}, nil, []string{"invalid cluster key"}, true},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "badexp", "--expiry", "2018-01-01"}, nil, []string{"expiry \"2018-01-01\" is in the past"}, true},
{createAddClusterCmd(), []string{"add", "cluster", "--name", "badexp", "--expiry", "30d"}, nil, nil, false},
}
tests.Run(t, "root", "add")
}
func Test_AddClusterNoStore(t *testing.T) {
// reset the store
ngsStore = nil
ForceStoreRoot(t, "")
_, _, err := ExecuteCmd(createAddClusterCmd())
require.Equal(t, "no stores available", err.Error())
}
func Test_AddClusterOutput(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
_, _, err := ExecuteCmd(createAddClusterCmd(), "--name", "a", "--start", "2018-01-01", "--expiry", "2050-01-01")
require.NoError(t, err)
validateClusterClaims(t, ts)
}
func Test_AddClusterFailsOnManagedStores(t *testing.T) {
ts := NewTestStoreWithOperator(t, "test", nil)
defer ts.Done(t)
_, _, err := ExecuteCmd(createAddClusterCmd(), "--name", "a")
require.Error(t, err)
require.Equal(t, "clusters cannot be created on managed configurations", err.Error())
}
func Test_AddClusterInteractive(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
inputs := []interface{}{"a", true, "2018-01-01", "2050-01-01"}
cmd := createAddClusterCmd()
HoistRootFlags(cmd)
_, _, err := ExecuteInteractiveCmd(cmd, inputs)
require.NoError(t, err)
validateClusterClaims(t, ts)
}
func validateClusterClaims(t *testing.T, ts *TestStore) {
kp, err := ts.KeyStore.GetClusterKey("a")
_, err = kp.Seed()
require.NoError(t, err, "stored key should be a seed")
ac, err := ts.Store.ReadClusterClaim("a")
require.NoError(t, err, "reading cluster claim")
pub, err := kp.PublicKey()
require.Equal(t, ac.Subject, pub, "public key is subject")
okp, err := ts.KeyStore.GetOperatorKey("test")
require.NoError(t, err)
oppub, err := okp.PublicKey()
require.NoError(t, err, "getting public key for operator")
require.Equal(t, ac.Issuer, oppub, "operator signed it")
start, err := ParseExpiry("2018-01-01")
require.NoError(t, err)
require.Equal(t, start, ac.NotBefore)
expire, err := ParseExpiry("2050-01-01")
require.NoError(t, err)
require.Equal(t, expire, ac.Expires)
}
|
package models
type Keyboard struct {
OneTime bool `json:"one_time"`
Inline bool `json:"inline"`
Buttons [][]Button `json:"buttons"`
}
type Button struct {
Action `json:"action"`
Color string `json:"color,omitempty"`
}
type Action struct {
Type string `json:"type"`
Label string `json:"label"`
Link string `json:"link,omitempty"`
Payload `json:"payload,omitempty"`
}
type Payload struct {
Button string `json:"button"`
}
|
package Week_02
func preorderTraversal(root *TreeNode) []int {
res := make([]int, 0)
res = helper(root)
return res
}
func helper2(root *TreeNode) []int {
r := make([]int, 0)
if root != nil {
r = append(r, root.Val)
if root.Left != nil {
r = append(r, helper(root.Left)...)
}
if root.Right != nil {
r = append(r, helper(root.Right)...)
}
}
return r
}
|
package main
import (
"context"
"encoding/json"
"os"
//"encoding/json"
"flag"
"fmt"
"log"
"time"
"github.com/BurntSushi/toml"
"github.com/dgraph-io/dgo"
"github.com/dgraph-io/dgo/protos/api"
"github.com/jmoiron/sqlx"
"github.com/jmoiron/sqlx/types"
_ "github.com/lib/pq"
"google.golang.org/grpc"
)
type database struct {
Server string
Port int
Database string
User string
Password string
}
type Config struct {
Database database `toml:"database"`
DGraph dgraph `toml:"dgraph"`
}
type dgraph struct {
Url string
}
// ********** database json column structs:
// NOTE: this is *not* an independent resource, should it be?
type Keyword struct {
// not sure an 'id' makes sense - they are like #mesh, LOC etc...
Uri string
Label string
}
// neither is this -in RDF it has to be, but seems like overkill
type DateResolution struct {
DateTime string
Resolution string
}
type ResourceFundingRole struct {
Id string
Uri string
GrantId string
PersonId string
RoleName string
}
type ResourceGrant struct {
Id string
Uri string
Label string
PrincipalInvestigatorId string
Start DateResolution
End DateResolution
}
type ResourcePerson struct {
Id string
Uri string
AlternateId string
FirstName string
LastName string
MiddleName *string
PrimaryTitle string
ImageUri string
ImageThumbnailUri string
Type string
Overview string
Keywords []Keyword
}
type PersonKeyword struct {
// not sure an 'id' makes sense - they are like #mesh, LOC etc...
Uri string `json:"keywordUri,omitempty"`
Label string `json:"keywordLabel,omitempty"`
}
type Person struct {
Id string `json:"personId,omitempty"`
Uri string `json:"personUri,omitempty"`
AlternateId string `json:"personAlternateId,omitempty"`
FirstName string `json:"personFirstName,omitempty"`
LastName string `json:"personLastName,omitempty"`
MiddleName *string `json:"personMiddleName,omitempty"`
PrimaryTitle string `json:"personPrimaryTitle,omitempty"`
ImageUri string `json:"personImageUri,omitempty"`
ImageThumbnailUri string `json:"personImageThumbnailUri,omitempty"`
Type string `json:"personType,omitempty"`
Overview string `json:"personOverview,omitempty"`
Keywords []PersonKeyword `json:"personKeywords,omitempty"`
}
type ResourcePosition struct {
Id string
Uri string
PersonId string
Label string
Start DateResolution
OrganizationId string
OrganizationLabel string
}
type ResourceInstitution struct {
Id string
Uri string
Label string
}
type ResourceEducation struct {
Id string
Uri string
PersonId string
Label string
InsitutionId string
InstitutionLabel string
}
type ResourceAuthorship struct {
Id string
Uri string
PublicationId string
PersonId string
AuthorshipType string
}
type ResourcePublication struct {
Id string
Uri string
Label string
AuthorList string
Doi string
PublishedIn string
PublicationVenueUri string
}
type ResourceOrganization struct {
Id string
Uri string
Label string
}
/*** end database json column object maps */
// this is the raw structure in the database
// two json columms:
// * 'data' can be used for change comparison with hash
// * 'data_b' can be used for searches
type Resource struct {
Uri string `db:"uri"`
Type string `db:"type"`
Hash string `db:"hash"`
Data types.JSONText `db:"data"`
DataB types.JSONText `db:"data_b"`
}
// ********** end database json structs
func getList(typeName string) []Resource {
db = GetConnection()
resources := []Resource{}
err := db.Select(&resources, "SELECT uri, type, hash, data, data_b FROM resources WHERE type = $1",
typeName)
if err != nil {
panic(err)
}
return resources
}
func listType(typeName string) {
db = GetConnection()
resources := []Resource{}
err := db.Select(&resources, "SELECT uri, type, hash, data, data_b FROM resources WHERE type = $1",
typeName)
for _, element := range resources {
log.Println(element)
}
if err != nil {
log.Fatalln(err)
}
}
func makePeopleIndex() {
conn, err := grpc.Dial("127.0.0.1:9080", grpc.WithInsecure())
if err != nil {
log.Fatal("While trying to dial gRPC")
}
defer conn.Close()
dc := api.NewDgraphClient(conn)
dg := dgo.NewDgraphClient(dc)
op := &api.Operation{}
op.Schema = `
personId: string @index(exact) .
personFirstName: string @index(exact) .
personLastName: string @index(exact) .
personAlternateId: string @index(exact) .
personPrimaryTitle: string @index(exact) .
personOverview: string @index(fulltext) .
`
ctx := context.Background()
err = dg.Alter(ctx, op)
if err != nil {
log.Fatal(err)
}
}
func addPeople() {
conn, err := grpc.Dial("127.0.0.1:9080", grpc.WithInsecure())
if err != nil {
log.Fatal("While trying to dial gRPC")
}
defer conn.Close()
dc := api.NewDgraphClient(conn)
dg := dgo.NewDgraphClient(dc)
ctx := context.Background()
mu := &api.Mutation{
CommitNow: true,
}
people := getList("Person")
for _, row := range people {
log.Println(row)
personJson := row.DataB
// 1. get db stract
var resource ResourcePerson
json.Unmarshal(personJson, &resource)
// 2. copy PersonResource->Person (for dgraph)
// conversion?
var personKeywords []PersonKeyword
for _, keyword := range resource.Keywords {
// implicit conversion (since they are exactly the same)
personKeywords = append(personKeywords, PersonKeyword(keyword))
}
person := Person{resource.Id,
resource.Uri,
resource.AlternateId,
resource.FirstName,
resource.LastName,
resource.MiddleName,
resource.PrimaryTitle,
resource.ImageUri,
resource.ImageThumbnailUri,
resource.Type,
resource.Overview,
personKeywords}
// then back to Json
json, err := json.Marshal(person)
if err != nil {
continue
}
log.Println(json)
mu.SetJson = json
_, err = dg.NewTxn().Mutate(ctx, mu)
if err != nil {
log.Fatal(err)
}
//variables := map[string]string{"$id": assigned.Uids["blank-0"]}
//log.Println(variables)
}
}
func clearResources(typeName string) {
switch typeName {
case "people":
fmt.Println("not implemented")
}
}
func listPeople() {
listType("Person")
}
func persistResources(dryRun bool, typeName string) {
if dryRun {
switch typeName {
case "people":
listPeople()
}
} else {
switch typeName {
case "people":
makePeopleIndex()
addPeople()
}
}
}
var psqlInfo string
var db *sqlx.DB
var conf Config
func GetConnection() *sqlx.DB {
return db
}
func main() {
start := time.Now()
var err error
var configFile string
flag.StringVar(&configFile, "config", "./config.toml", "a config filename")
typeName := flag.String("type", "people", "type of records to import")
dryRun := flag.Bool("dry-run", false, "just examine resources to be saved")
remove := flag.Bool("remove", false, "remove existing records")
flag.Parse()
if _, err := toml.DecodeFile(configFile, &conf); err != nil {
fmt.Println("could not find config file, use -c option")
os.Exit(1)
}
psqlInfo = fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
conf.Database.Server, conf.Database.Port,
conf.Database.User, conf.Database.Password,
conf.Database.Database)
db, err = sqlx.Open("postgres", psqlInfo)
if err != nil {
log.Println("m=GetPool,msg=connection has failed", err)
}
if *remove {
clearResources(*typeName)
} else {
persistResources(*dryRun, *typeName)
}
defer db.Close()
elapsed := time.Since(start)
fmt.Printf("%s\n", elapsed)
}
|
// Copyright © 2020 author from config
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/jedib0t/go-pretty/table"
"github.com/spf13/cobra"
"reflect"
)
// userGetCmd represents the userGet command
var userGetCmd = &cobra.Command{
Use: "get [id]",
Short: "get user by id",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
getUser(args[0])
},
}
func init() {
usersCmd.AddCommand(userGetCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// userGetCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// userGetCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func getUser(id string) {
u, err := m.User.Read(id)
if err != nil {
exitWithMessage(err, 0)
}
ttx := table.NewWriter()
ttx.AppendHeader(table.Row{"ID", "Name", "Email", "Nickname", "Last login"})
ttx.SetAutoIndex(true)
ttx.AppendRow(table.Row{
getVal(u.ID),
getVal(u.Name),
getVal(u.Email),
getVal(u.Nickname),
getVal(u.LastLogin),
})
// Render output
switch outFormat {
case "csv":
fmt.Println(ttx.RenderCSV())
default:
fmt.Println(ttx.Render())
}
}
func getVal(v interface{}) interface{} {
if !reflect.ValueOf(v).IsNil() {
return reflect.ValueOf(v).Elem()
}
return "N/A"
}
|
package main
import (
"github.com/markberger/tally"
)
func main() {
tally.InitLogging()
bot := tally.NewBot()
bot.Connect()
bot.Run()
} |
package main
import "fmt"
//归并排序:是一种分治思想,应用递归编程技巧来实现
//递推公式:mergeSort(p...r) = merge(mergeSort(p..q),mergeSort(q...r)),p>=r 停止
func mergeSort(a []int32, n int32) {
mergeSort_go(a, 0, n-1)
}
func mergeSort_go(a []int32, p int32, r int32) {
if p >= r {
return
}
q := (p + r) / 2
mergeSort_go(a, p, q)
mergeSort_go(a, q+1, r)
merge(a, p, q, r)
}
func merge(a []int32, p, q, r int32) {
i := p
j := q + 1
k := 0
tmp := make([]int32, r-p+1, r-p+1)
for i <= q && j <= r {
if a[i] <= a[j] { //加上等于号就是稳定排序
tmp[k] = a[i]
i++
} else {
tmp[k] = a[j]
j++
}
k++
}
begin := i
end := q
if j <= r {
begin = j
end = r
}
for begin <= end {
tmp[k] = a[begin]
k++
begin++
}
//复制合并后的数据
for x := int32(0); x < r-p+1; x++ {
a[p+x] = tmp[x]
}
}
func main() {
a := []int32{5, 4, 65}
mergeSort(a, int32(len(a)))
fmt.Println(a)
}
|
package main
import (
"net/http"
"fmt"
"mime"
"io"
"encoding/json"
"mime/multipart"
"github.com/gorilla/mux"
"github.com/urfave/negroni"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
)
type VideoMetadata struct{
Id string
}
type ContentRangeHeader struct{
Start int
End int
Total int
}
func indexVideo(w http.ResponseWriter, req *http.Request) {
}
func createVideo(w http.ResponseWriter, req *http.Request) {
contentType := req.Header.Get("Content-Type")
if contentType != "" {
fmt.Println(contentType)
mediaType, params, err := mime.ParseMediaType(contentType)
if err == nil {
if mediaType == "application/json" {
postVideoMetadata(req.Body)
// TODO: reply to client
} else if mediaType == "multipart/related" {
handleMultipartRelated(req, params)
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
resp, _ := json.Marshal("{\"code\": 400}")
w.Write(resp)
}
} // TODO: else send 415
} // TODO: else send 415
}
func handleMultipartRelated(req *http.Request, params map[string]string) (error, error) {
boundary, ok := params["boundary"]
if !ok {
}
mpReader := multipart.NewReader(req.Body, boundary)
jsonPart, err := mpReader.NextPart()
if err != nil {
return nil, err
}
contentType := jsonPart.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
charset, ok := params["charset"]
if (err != nil) || (mediaType != "application/json") || (ok && charset != "utf-8") {
return nil, err // FIXME: create a new error
}
videoMetadata, err := postVideoMetadata(jsonPart)
if err != nil {
return nil, err
}
videoPart, err := mpReader.NextPart()
if err != nil {
return nil, err
}
_, err = postVideoData(videoPart, videoMetadata)
if err != nil {
return nil, err
}
return nil, nil
}
func postVideoMetadata(r io.Reader) (*VideoMetadata, error) {
fmt.Println("Sending Video Metadata")
return nil, nil
}
func postVideoData(r io.Reader, v *VideoMetadata) (*VideoMetadata, error){
fmt.Println("Sending Video Data")
basicPost(r)
return nil, nil
}
func basicPost(r io.Reader) {
auth, err := aws.EnvAuth()
if err != nil {
fmt.Println(err)
}
client := s3.New(auth, aws.USEast)
bucket := client.Bucket("mg4-video-staging")
err = bucket.PutReader("whatever", r, 355856563, "video/mp4", "")
if err != nil {
fmt.Println(err)
}
}
func showVideo(w http.ResponseWriter, req *http.Request) {
}
func updateVideo(w http.ResponseWriter, req *http.Request) {
}
func getVideoStatus(w http.ResponseWriter, req *http.Request) {
}
func main() {
router := mux.NewRouter()
router.HandleFunc("/", indexVideo).
Methods("GET")
router.HandleFunc("/", createVideo).
Methods("POST")
router.HandleFunc("/{videoId}", showVideo).
Methods("GET")
router.HandleFunc("/{videoId}", updateVideo).
Methods("PATCH", "PUT")
router.HandleFunc("/{videoId}", getVideoStatus).
Methods("HEAD")
n := negroni.Classic()
n.UseHandler(router)
n.Run(":3000")
}
|
package sql
import (
"github.com/phogolabs/orm/dialect/sql/scan"
)
// DeleteMutation represents a delete mutation
type DeleteMutation struct {
builder *DeleteBuilder
}
// NewDelete creates a Mutation that deletes the entity with given primary key.
func NewDelete(table string) *DeleteMutation {
return &DeleteMutation{
builder: Delete(table),
}
}
// Entity returns the builder
func (d *DeleteMutation) Entity(src interface{}) *DeleteBuilder {
var (
deleter = d.builder
iterator = scan.IteratorOf(src)
)
for iterator.Next() {
column := iterator.Column()
if column.HasOption("primary_key") {
deleter = deleter.Where(EQ(column.Name, iterator.Value().Interface()))
}
}
return deleter
}
// InsertMutation represents an insert mutation
type InsertMutation struct {
builder *InsertBuilder
}
// NewInsert creates a Mutation that will save the entity src into the db
func NewInsert(table string) *InsertMutation {
return &InsertMutation{
builder: Insert(table),
}
}
// Entity returns the builder
func (d *InsertMutation) Entity(src interface{}) *InsertBuilder {
var (
iterator = scan.IteratorOf(src)
columns = make([]string, 0)
values = make([]interface{}, 0)
)
for iterator.Next() {
var (
column = iterator.Column()
value = iterator.Value().Interface()
)
if column.HasOption("auto") {
if scan.IsEmpty(value) {
continue
}
}
columns = append(columns, column.Name)
values = append(values, value)
}
return d.builder.
Columns(columns...).
Values(values...)
}
// UpdateMutation represents an update mutation
type UpdateMutation struct {
builder *UpdateBuilder
}
// NewUpdate creates a Mutation that updates the entity into the db
func NewUpdate(table ...string) *UpdateMutation {
table = append(table, "")
return &UpdateMutation{
builder: Update(table[0]),
}
}
// Entity returns the builder
func (d *UpdateMutation) Entity(src interface{}, columns ...string) *UpdateBuilder {
var (
updater = d.builder
empty = len(columns) == 0
iterator = scan.IteratorOf(src)
updateable = make(map[string]interface{})
)
for iterator.Next() {
column := iterator.Column()
if empty {
columns = append(columns, column.Name)
}
if !column.HasOption("read_only") {
updateable[column.Name] = iterator.Value().Interface()
}
// if the update statement does not have table name
// means that we are in DO UPDATE case
if updater.table != "" {
if column.HasOption("primary_key") {
updater.Where(EQ(column.Name, iterator.Value().Interface()))
}
}
}
for _, name := range columns {
if value, ok := updateable[name]; ok {
if scan.IsNil(value) {
updater.SetNull(name)
} else {
updater.Set(name, value)
}
}
}
return updater
}
|
package api
import (
"net/http"
"github.com/ken-aio/go-echo-base/model"
"github.com/Sirupsen/logrus"
"github.com/labstack/echo"
. "gopkg.in/check.v1"
)
func (s *ApiSuite) Test_PostXxx(c *C) {
expectedName := "Test_PostXxx"
xxx_json := `{"name":"` + expectedName + `"}`
context, rec := buildContext(c, echo.POST, "/api/v1/xxxs", xxx_json)
tx, code, resp := requestAPI(c, PostXxx(), context, rec)
resXxx := new(model.Xxx)
tx.Select("*").From("xxxs").Where("name = ?", expectedName).LoadStruct(resXxx)
tx.Rollback()
logrus.Info(resXxx)
c.Assert(expectedName, Equals, resXxx.Name)
c.Assert(http.StatusCreated, Equals, code)
c.Assert("", Equals, resp)
}
func (s *ApiSuite) Test_GetXxx(c *C) {
expected := `{"id":1,"name":"name1"}`
context, rec := buildContext(c, echo.GET, "dummy", "")
context.SetPath("/api/v1/xxxs/:id")
context.SetParamNames("id")
context.SetParamValues("1")
tx, code, resp := requestAPI(c, GetXxx(), context, rec)
tx.Rollback()
c.Assert(http.StatusOK, Equals, code)
c.Assert(expected, Equals, resp)
}
func (s *ApiSuite) Test_GetXxxs(c *C) {
expected := `[{"id":1,"name":"name1"},{"id":2,"name":"name2"}]`
context, rec := buildContext(c, echo.GET, "dummy", "")
context.SetPath("/api/v1/xxxs")
tx, code, resp := requestAPI(c, GetXxxs(), context, rec)
tx.Rollback()
c.Assert(http.StatusOK, Equals, code)
c.Assert(expected, Equals, resp)
}
|
// author: ashing
// time: 2019/12/25 12:08 下午
// mail: axingfly@gmail.com
// Less is more.
package route
import (
"github.com/gin-gonic/gin"
"github.com/ronething/mp-wechat-go/controller"
)
func Register(g *gin.Engine) {
g.Use(gin.Recovery())
g.Use(gin.Logger())
registerApi(g)
}
func registerApi(g *gin.Engine) {
g.GET("/", controller.Hello)
g.POST("/", controller.Hello)
}
|
package files
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
const initialFileContent = `# %s
## %s
## %s
## %s
`
var validPrefixes = [...]string{
// Board Name
"# ",
// List Name
"## ",
// Task
"- ",
// Task Description
"> ",
// Subtask
"* ",
}
// CheckFile checks if the file(taskgo.md) is present in the current directory or not.
func CheckFile(fileName string) bool {
dir, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
filePath := dir + fileName
_, err = os.Stat(filePath)
return err == nil
}
// CreateFile Creates the file(taskgo.md).
func CreateFile(fileName string) {
f, err := OpenFileWriteOnly(fileName)
defer f.Close()
if err != nil {
log.Fatalf("Cannot create file %q, ERR: %v", fileName, err)
}
}
// WriteInitialContent Writes initial content to the file.
func WriteInitialContent(fileName string) {
f, err := OpenFileWriteOnly(fileName)
defer f.Close()
if err != nil {
log.Fatalf("Cannot Open file %q, ERR: %v", fileName, err)
}
// TODO: Make these customizable
_, err = f.WriteString(fmt.Sprintf(initialFileContent, GetDirectoryName(), "TODO", "DOING", "DONE"))
if err != nil {
log.Fatalf("Cannot write contents to file (%v): %v", fileName, err)
}
}
// OpenFileWriteOnly opens file in write only mode.
func OpenFileWriteOnly(fileName string) (*os.File, error) {
dir, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("Cannot open file: %v", err)
}
return os.OpenFile(dir+fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
}
// GetDirectoryName returns the name of current working directory.
func GetDirectoryName() string {
dir, err := os.Getwd()
if err != nil {
log.Fatalf("Cannot get directory name: %v", err)
}
dirs := strings.Split(dir, "/")
dirName := dirs[len(dirs)-1]
return dirName
}
// CheckPrefix checks prefix if it matches to the given set of prefix.
func CheckPrefix(line string) bool {
result := false
for _, prefix := range validPrefixes {
if strings.HasPrefix(strings.TrimSpace(line), prefix) {
result = true
break
}
}
return result
}
// FilePath returns the complete path of file given the fileName.
func FilePath(fileName string) (string, error) {
dir, err := os.Getwd()
if err != nil {
return "", err
}
filePath := dir + fileName
return filePath, nil
}
// OpenFile opens the given file and returns the content of file line by line
// as a slice of string.
func OpenFile(fileName string) []string {
filePath, err := FilePath(fileName)
if err != nil {
log.Fatal(err)
}
file, err := os.Open(filePath)
defer file.Close()
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
var fileContent []string
for scanner.Scan() {
fileContent = append(fileContent, scanner.Text())
}
return fileContent
}
// WriteFile writes a slice of string to a file line by line.
// It returns an error if file cannot be opened or the content can't be written.
func WriteFile(fileContent []string, fileName string) error {
filePath, err := FilePath(fileName)
if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
defer file.Close()
w := bufio.NewWriter(file)
for _, line := range fileContent {
fmt.Fprintln(w, line)
}
return w.Flush()
}
|
import "sort"
/*
* @lc app=leetcode id=561 lang=golang
*
* [561] Array Partition I
*
* https://leetcode.com/problems/array-partition-i/description/
*
* algorithms
* Easy (71.69%)
* Likes: 796
* Dislikes: 2463
* Total Accepted: 211.6K
* Total Submissions: 294.9K
* Testcase Example: '[1,4,3,2]'
*
*
* Given an array of 2n integers, your task is to group these integers into n
* pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of
* min(ai, bi) for all i from 1 to n as large as possible.
*
*
* Example 1:
*
* Input: [1,4,3,2]
*
* Output: 4
* Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3,
* 4).
*
*
*
* Note:
*
* n is a positive integer, which is in the range of [1, 10000].
* All the integers in the array will be in the range of [-10000, 10000].
*
*
*/
// @lc code=start
func arrayPairSum(nums []int) int {
sort.Ints(nums)
sum := 0
for i := 0; i < len(nums); i += 2 {
sum += nums[i]
}
return sum
}
// @lc code=end
|
// Copyright 2021 The Perses Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shared
import (
"fmt"
"github.com/labstack/echo/v4"
v1 "github.com/perses/perses/pkg/model/api/v1"
)
const (
ParamName = "name"
ParamProject = "project"
APIV1Prefix = "/api/v1"
PathDashboard = "dashboards"
PathDatasource = "datasources"
PathGlobalDatasource = "globaldatasources"
PathProject = "projects"
PathUser = "users"
)
func getNameParameter(ctx echo.Context) string {
return ctx.Param(ParamName)
}
func getProjectParameter(ctx echo.Context) string {
return ctx.Param(ParamProject)
}
func validateMetadata(metadata interface{}) error {
switch met := metadata.(type) {
case *v1.ProjectMetadata:
if len(met.Project) == 0 {
return fmt.Errorf("metadata.project cannot be empty")
}
if len(met.Name) == 0 {
return fmt.Errorf("metadata.name cannot be empty")
}
case *v1.Metadata:
if len(met.Name) == 0 {
return fmt.Errorf("metadata.name cannot be empty")
}
}
return nil
}
|
package gbvideo
import (
"time"
"github.com/Lavos/gbvideo/giantbomb"
)
type VideoDownload struct {
giantbomb.Video
DownloadDate *time.Time
Queued bool
}
|
package lxd
import "net/http"
import "log"
import "crypto/tls"
import "time"
import "cointhink/config"
func NewClient() http.Client {
certFile := config.C.QueryString("lxd.certFile")
keyFile := config.C.QueryString("lxd.keyFile")
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
log.Fatal(err)
}
transport := http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true,
Certificates: []tls.Certificate{cert}}}
client := http.Client{
Timeout: (time.Second * 10),
Transport: &transport}
return client
}
var clientCache *http.Client
func Client() *http.Client {
if clientCache == nil {
fixedClient := NewClient()
clientCache = &fixedClient
}
return clientCache
}
|
/**
* @website: https://vvotm.github.io
* @author luowen<bigpao.luo@gmail.com>
* @date 2017/12/26 20:52
* @description:
*/
package request
type ReqReplies struct {
JokerId int `json:"jokerId" form:"jokerId" query:"jokerId" validate:"required"`
Uid int `json:"uid" form:"uid" query:"uid" validate:"required"`
Content string `json:"content" form:"content" query:"content" validate:"required"`
ImageList string `json:"imageList" form:"imageList" query:"imageList"`
}
func NewReqReplies() *ReqReplies {
return &ReqReplies{}
}
|
package ds
var KConfig Config
type Config struct {
LogPath string
ShowDisabled bool
Mode int
TodosOnly bool
}
|
package state
import "time"
// PostgresVacuumProgress - PostgreSQL vacuum thats currently running
//
// See https://www.postgresql.org/docs/10/static/progress-reporting.html
type PostgresVacuumProgress struct {
VacuumIdentity uint64 // Combination of vacuum "query" start time and PID, used to identify a vacuum over time
BackendIdentity uint64 // Combination of process start time and PID, used to identify a process over time
DatabaseName string
SchemaName string
RelationName string
RoleName string
StartedAt time.Time
Autovacuum bool
Toast bool
Phase string
HeapBlksTotal int64
HeapBlksScanned int64
HeapBlksVacuumed int64
IndexVacuumCount int64
MaxDeadTuples int64
NumDeadTuples int64
}
|
package gerrit
import (
"strconv"
"time"
)
// These types are based on the entity definitions at
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html ,
// https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html , etc.
// Note that although they represent much the same things, these types are not the same as the
// types used for streaming events, which can be found in gerrit/events/types.go. These two
// classes of type are, for the most part, wholly incompatible with each other.
// AccountInfo contains information about a Gerrit account.
type AccountInfo struct {
// AccountID is the numeric ID of the account.
AccountID int `json:"_account_id,omitempty"`
// Name is the full name of the user. Only set if detailed account information is requested
// with DescribeDetailedAccounts (for change queries) or DescribeDetails (for account
// queries).
Name string
// Email is the email address the user prefers to be contacted through. Only set if detailed
// account information is requested with DescribeDetailedAccounts (for change queries) or
// DescribeDetails (for account queries).
Email string
// SecondaryEmails is a list of the secondary email addresses of the user. Only set for
// account queries when DescribeAllEmails is given, and if the calling user has the
// ModifyAccount capability.
SecondaryEmails []string `json:"secondary_emails,omitempty"`
// Username is the username of the user. Only set if detailed account information is
// requested with DescribeDetailedAccounts (for change queries) or DescribeDetails (for
// account queries).
Username string
// Avatars is a list of usable avatar icons for the user.
Avatars []struct {
URL string
Height int
} `json:",omitempty"`
// MoreAccounts indicates whether an account query would deliver more results if not
// limited. Only set on the last account that is returned.
MoreAccounts bool `json:"_more_accounts,omitempty"`
}
// LabelInfo contains information about a label on a change, always corresponding to the
// current patch set.
type LabelInfo struct {
// Optional is whether the label is optional. Optional means the label may be set, but
// it's neither necessary for submission nor does it block submission if set.
Optional bool
// ----------------------------------------------------------------------------
// The following fields are only set when DescribeLabels is requested.
// ----------------------------------------------------------------------------
// Approved is one user who approved this label on the change (voted the maximum value).
Approved *AccountInfo
// Rejected is one user who rejected this label on the change (voted the minimum value).
Rejected *AccountInfo
// Recommended is one user who recommended this label on the change (voted positively, but
// not the maximum value).
Recommended *AccountInfo
// Disliked is one user who disliked this label on the change (voted negatively, but not
// the minimum value).
Disliked *AccountInfo
// Blocking is whether the labels blocks submit operation.
Blocking bool
// Value is the voting value of the user who recommended/disliked this label on the change
// if it is not "+1"/"-1".
Value int
// DefaultValue is the default voting value for the label. This value may be outside the
// range specified in PermittedLabels.
DefaultValue int `json:"default_value"`
// ----------------------------------------------------------------------------
// The following fields are only set when DescribeDetailedLabels is requested.
// ----------------------------------------------------------------------------
// All is a list of all approvals for this label as a list of ApprovalInfo entities. Items
// in this list may not represent actual votes cast by users; if a user votes on any label,
// a corresponding ApprovalInfo will appear in this list for all labels.
All []ApprovalInfo
// Values is a map of all values that are allowed for this label. The map maps the values
// ("-2","-1","0","+1","+2") to the value descriptions.
Values map[string]string
}
type ApprovalInfo struct {
AccountInfo
// Value is the vote that the user has given for the label. If present and zero, the user
// is permitted to vote on the label. If absent, the user is not permitted to vote on that
// label.
Value *int
// PermittedVotingRange is the VotingRangeInfo the user is authorized to vote on that
// label. If present, the user is permitted to vote on the label regarding the range
// values. If absent, the user is not permitted to vote on that label.
PermittedVotingRange VotingRangeInfo `json:"permitted_voting_range"`
// Date is the time and date describing when the approval was made.
Date string
// Tag is the value of the tag field from ReviewInput set while posting the review.
// Votes/comments that contain tag with 'autogenerated:' prefix can be filtered out in the
// web UI. NOTE: To apply different tags on different votes/comments multiple invocations
// of the REST call are required.
Tag string
// PostSubmit indicates that this vote was made after the change was submitted.
PostSubmit bool `json:"post_submit,omitempty"`
}
// VotingRangeInfo describes the continuous voting range from min to max values.
type VotingRangeInfo struct {
// Min is the minimum voting value.
Min int
// Max is the maximum voting value.
Max int
}
// ChangeInfo refers to a change being reviewed, or that was already reviewed.
type ChangeInfo struct {
// ID gives the ID of the change in the format "<project>~<branch>~<Change-Id>", where
// 'project', 'branch', and 'Change-Id' are URL encoded. For 'branch' the refs/heads/
// prefix is omitted.
ID string
// Project is the name of the project.
Project string
// Branch is the name of the target branch. The refs/heads/ prefix is omitted.
Branch string
// Topic is the topic to which this change belongs.
Topic string
// Assignee is the assignee of the change.
Assignee AccountInfo
// Hashtags is a list of hashtags that are set on the change (only populated when NoteDb is
// enabled).
Hashtags []string
// ChangeID is the Change-ID of the change.
ChangeID string `json:"change_id"`
// Subject is the subject of the change (header line of the commit message).
Subject string
// Status is the status of the change ("NEW"/"MERGED"/"ABANDONED").
Status string
// Created is the timestamp of when the change was created.
Created string
// Updated is the timestamp of when the change was last updated.
Updated string
// Submitted is the timestamp of when the change was submitted.
Submitted string
// Submitter is the user who submitted the change.
Submitter AccountInfo
// Starred indicates whether the calling user has starred this change with the default label.
Starred bool `json:",omitempty"`
// Stars is a list of star labels that are applied by the calling user to this change. The
// labels are lexicographically sorted.
Stars []string
// Reviewed indicates whether the change was reviewed by the calling user. Only set if
// DescribeReviewed is requested.
Reviewed bool `json:",omitempty"`
// SubmitType is the submit type of the change ("INHERIT"/"FAST_FORWARD_ONLY"/
// "MERGE_IF_NECESSARY"/"ALWAYS_MERGE"/"CHERRY_PICK"/"REBASE_IF_NECESSARY"/"REBASE_ALWAYS").
// Not set for merged changes.
// Mergeable indicates whether the change is mergeable. Not set for merged changes, if the
// change has not yet been tested, or if DescribeSkipMergeable is passed or when
// change.api.excludeMergeableInChangeInfo is set in the Gerrit config.
Mergeable bool
// Submittable is whether the change has been approved by the project submit rules. Only
// set if requested with DescribeSubmittable.
Submittable bool
// Insertions is the number of inserted lines.
Insertions int
// Deletions is the number of deleted lines.
Deletions int
// TotalCommentCount is the total number of inline comments across all patch sets. Not set
// if the current change index doesn't have the data.
TotalCommentCount int `json:"total_comment_count"`
// UnresolvedCommentCount is the number of unresolved inline comment threads across all
// patch sets. Not set if the current change index doesn't have the data.
UnresolvedCommentCount int `json:"unresolved_comment_count"`
// Number is the legacy numeric ID of the change.
Number int `json:"_number,omitempty"`
// Owner is the owner of the change.
Owner AccountInfo
// Actions is actions the caller might be able to perform on this revision. The information
// is a map of view name to ActionInfo entries.
Actions map[string]ActionInfo
// Requirements is a list of the requirements to be met before this change can be submitted.
Requirements []Requirement
// Labels is the labels of the change as a map that maps the label names to LabelInfo
// entries. Only set if DescribeLabels or DescribeDetailedLabels are requested.
Labels map[string]LabelInfo
// PermittedLabels is a map of the permitted labels that maps a label name to the list of
// values that are allowed for that label. Only set if DescribeDetailedLabels is requested.
PermittedLabels map[string][]string `json:"permitted_labels"`
// RemovableReviewers is the reviewers that can be removed by the calling user as a list of
// AccountInfo entities. Only set if DescribeDetailedLabels is requested.
RemovableReviewers []AccountInfo `json:"removable_reviewers"`
// Reviewers is a map that maps a reviewer state to a list of AccountInfo entities. Possible
// reviewer states are "REVIEWER", "CC", and "REMOVED". Only set if DescribeDetailedLabels
// is requested.
Reviewers map[string][]AccountInfo
// PendingReviewers is updates to Reviewers that have been made while the change was in the
// WIP state. Only present on WIP changes and only if there are pending reviewer updates to
// report. These are reviewers who have not yet been notified about being added to or
// removed from the change.
PendingReviewers map[string]AccountInfo `json:"pending_reviewers"`
// ReviewerUpdates is updates to Reviewers set for the change as ReviewerUpdateInfo
// entities. Only set if DescribeReviewerUpdates is requested and if NoteDb is enabled.
ReviewerUpdates []ReviewerUpdateInfo `json:"reviewer_updates"`
// Messages is messages associated with the change as a list of ChangeMessageInfo
// entities. Only set if DescribeMessages is requested.
Messages []ChangeMessageInfo
// CurrentRevision is the commit ID of the current patch set of this change. Only set if
// DescribeCurrentRevision or DescribeAllRevisions are requested.
CurrentRevision string `json:"current_revision"`
// Revisions is all patch sets of this change as a map that maps the commit ID of the
// patch set to a RevisionInfo entity. Only set if DescribeCurrentRevision is requested
// (in which case it will only contain a key for the current revision) or if
// DescribeAllRevisions is requested.
Revisions map[string]RevisionInfo
// TrackingIDs is a list of TrackingIDInfo entities describing references to external
// tracking systems. Only set if DescribeTrackingIDs is requested.
TrackingIDs []TrackingIDInfo `json:"tracking_ids"`
// MoreChanges indicates whether the query would deliver more results if not limited.
// Only set on the last change that is returned.
MoreChanges bool `json:"_more_changes,omitempty"`
// Problems is a list of ProblemInfo entities describing potential problems with this
// change. Only set if DescribeCheck is requested.
Problems []ProblemInfo
// IsPrivate indicates whether the change is marked as private.
IsPrivate bool `json:"is_private"`
// WorkInProgress indicates whether the change is marked as Work In Progress.
WorkInProgress bool `json:"work_in_progress"`
// HasReviewStarted indicates whether the change has been marked Ready at some point in
// time.
HasReviewStarted bool `json:"has_review_started"`
// RevertOf gives the numeric Change-Id of the change that this change reverts.
RevertOf string `json:"revert_of,omitempty"`
}
// ActionInfo describes a REST API call the client can make to manipulate a resource. These are
// frequently implemented by plugins and may be discovered at runtime.
type ActionInfo struct {
// Method is the HTTP method to use with the action. Most actions use POST, PUT or DELETE
// to cause state changes.
Method string
// Label is a short title to display to a user describing the action. In the Gerrit web
// interface the label is used as the text on the button presented in the UI.
Label string
// Title is longer text to display describing the action. In a web UI this should be the
// title attribute of the element, displaying when the user hovers the mouse.
Title string
// Enabled indicates that the action is permitted at this time and the caller is likely
// allowed to execute it. This may change if state is updated at the server or permissions
// are modified.
Enabled bool
}
// Requirement contains information about a requirement relative to a change.
type Requirement struct {
// Status is the status of the requirement. Can be either "OK", "NOT_READY" or "RULE_ERROR".
Status string
// FallbackText is a human readable reason.
FallbackText string `json:"fallback_text"`
// Type is an alphanumerical (plus hyphens or underscores) string to identify what the
// requirement is and why it was triggered. Can be seen as a class: requirements sharing
// the same type were created for a similar reason, and the data structure will follow one
// set of rules.
Type string
// Data holds custom key-value strings, used in templates to render richer status messages.
// (Not sure what structure that data takes.)
Data interface{}
}
// ReviewerUpdateInfo contains information about updates to change’s reviewers set.
type ReviewerUpdateInfo struct {
// Updated is the Timestamp of the update.
Updated string
// UpdatedBy is the account which modified state of the reviewer in question as AccountInfo
// entity.
UpdatedBy AccountInfo `json:"updated_by"`
// Reviewer is the reviewer account added or removed from the change as an AccountInfo
// entity.
Reviewer *AccountInfo
// State is the reviewer state, one of "REVIEWER", "CC" or "REMOVED".
State string
}
// ReviewerInfo entity contains information about a reviewer and its votes on a change.
type ReviewerInfo struct {
AccountInfo
// Approvals is the approvals of the reviewer as a map that maps the label names to the
// approval values (“-2”, “-1”, “0”, “+1”, “+2”).
Approvals map[string]string
}
// ChangeMessageInfo contains information about a message attached to a change.
type ChangeMessageInfo struct {
// ID is the ID of the message.
ID string
// Author is the author of the message as an AccountInfo entity. Unset if written by the
// Gerrit system.
Author AccountInfo
// RealAuthor is the real author of the message as an AccountInfo entity. Set if the
// message was posted on behalf of another user.
RealAuthor *AccountInfo `json:"real_author"`
// Date is the timestamp this message was posted.
Date string
// Message is the text left by the user.
Message string
// Tag is the value of the tag field from ReviewInput set while posting the review.
// Votes/comments that contain tag with 'autogenerated:' prefix can be filtered out in the
// web UI. NOTE: To apply different tags on different votes/comments multiple invocations
// of the REST call are required.
Tag string
// RevisionNumber indicates which patchset (if any) generated this message.
RevisionNumber int `json:"_revision_number"`
}
// RevisionInfo contains information about a patch set. Not all fields are returned by default.
// Additional fields can be obtained by settings fields on QueryChangesOpts.
type RevisionInfo struct {
// Kind is the change kind. Valid values are "REWORK", "TRIVIAL_REBASE",
// "MERGE_FIRST_PARENT_UPDATE", "NO_CODE_CHANGE", and "NO_CHANGE".
Kind string
// Number is the patch set number, or "edit" if the patch set is an edit.
Number PatchSetNumber `json:"_number"`
// Created is the timestamp of when the patch set was created.
Created string
// Uploader is the uploader of the patch set as an AccountInfo entity.
Uploader AccountInfo
// Ref is the Git reference for the patch set.
Ref string
// Fetch is information about how to fetch this patch set. The fetch information is
// provided as a map that maps the protocol name (“git”, “http”, “ssh”) to FetchInfo
// entities. This information is only included if a plugin implementing the download
// commands interface is installed.
Fetch map[string]FetchInfo
// Commit is the commit of the patch set as CommitInfo entity.
Commit CommitInfo
// Files is the files of the patch set as a map that maps the file names to FileInfo
// entities. Only set if DescribeCurrentFiles or DescribeAllFiles options are requested.
Files map[string]FileInfo
// Actions is actions the caller might be able to perform on this revision. The information
// is a map of view name to ActionInfo entities.
Actions map[string]ActionInfo
// Reviewed indicates whether the caller is authenticated and has commented on the
// current revision. Only set if DescribeReviewed option is requested.
Reviewed bool
// MessageWithFooter contains the full commit message with Gerrit-specific commit footers,
// as if this revision were submitted using the Cherry Pick submit type. Only set when
// the DescribeCommitFooters option is requested and when this is the current patch set.
MessageWithFooter string `json:"message_with_footer"`
// PushCertificate contains the push certificate provided by the user when uploading this
// patch set as a PushCertificateInfo entity. This field is set if and only if the
// DescribePushCertificates option is requested; if no push certificate was provided, it
// is set to an empty object.
PushCertificate *PushCertificateInfo `json:"push_certificate"`
// Description is the description of this patchset, as displayed in the patchset selector
// menu. May be empty if no description is set.
Description string
}
// TrackingIDInfo describes a reference to an external tracking system.
type TrackingIDInfo struct {
// System is the name of the external tracking system.
System string
// ID is the tracking id.
ID string
}
// ProblemInfo contains a description of a potential consistency problem with a change. These are
// not related to the code review process, but rather indicate some inconsistency in Gerrit’s
// database or repository metadata related to the enclosing change.
type ProblemInfo struct {
// Message is a plaintext message describing the problem with the change.
Message string
// Status is the status of fixing the problem ("FIXED", "FIX_FAILED"). Only set if a fix
// was attempted.
Status string
// Outcome is an additional plaintext message describing the outcome of the fix, if Status
// is set.
Outcome string
}
// FetchInfo contains information about how to fetch a patch set via a certain protocol.
type FetchInfo struct {
// URL is the URL of the project.
URL string
// Ref is the ref of the patch set.
Ref string
// Commands gives the download commands for this patch set as a map that maps the command
// names to the commands. Only set if DescribeDownloadCommands is requested.
Commands map[string]string
}
// CommitInfo contains information about a commit.
type CommitInfo struct {
// Commit is the commit ID. Not set if included in a RevisionInfo entity that is contained
// in a map which has the commit ID as key.
Commit string
// Parents is the parent commits of this commit as a list of CommitInfo entities. In each
// parent only the commit and subject fields are populated.
Parents []CommitInfo
// Author is the author of the commit as a GitPersonInfo entity.
Author GitPersonInfo
// Committer is the committer of the commit as a GitPersonInfo entity.
Committer GitPersonInfo
// Subject is the subject of the commit (header line of the commit message).
Subject string
// Message is the commit message.
Message string
// WebLinks is links to the commit in external sites as a list of WebLinkInfo entities.
WebLinks []WebLinkInfo `json:"web_links"`
}
// The FileInfo entity contains information about a file in a patch set.
type FileInfo struct {
// Status is the status of the file (“A”=Added, “D”=Deleted, “R”=Renamed, “C”=Copied,
// “W”=Rewritten). Not set if the file was Modified (“M”).
Status string
// Binary indicates Whether the file is binary.
Binary bool
// OldPath is the old file path. Only set if the file was renamed or copied.
OldPath string `json:"old_path"`
// LinesInserted is the number of inserted lines. Not set for binary files or if no lines
// were inserted. An empty last line is not included in the count and hence this number can
// differ by one from details provided in DiffInfo.
LinesInserted int `json:"lines_inserted"`
// LinesDeleted is the number of deleted lines. Not set for binary files or if no lines
// were deleted. An empty last line is not included in the count and hence this number can
// differ by one from details provided in DiffInfo.
LinesDeleted int `json:"lines_deleted"`
// SizeDelta is the number of bytes by which the file size increased/decreased.
SizeDelta int `json:"size_delta"`
// Size is the file size in bytes.
Size int
}
// PushCertificateInfo contains information about a push certificate provided when the user pushed
// for review with git push --signed HEAD:refs/for/<branch>. Only used when signed push is enabled
// on the server.
type PushCertificateInfo struct {
// Certificate is the signed certificate payload and GPG signature block.
Certificate string
// Key is information about the key that signed the push, along with any problems found
// while checking the signature or the key itself, as a GpgKeyInfo entity.
Key GpgKeyInfo
}
// GitPersonInfo contains information about the author/committer of a commit.
type GitPersonInfo struct {
// Name is the name of the author/committer.
Name string
// Email is the email address of the author/committer.
Email string
// Date is the timestamp of when this identity was constructed.
Date string
// TZ is the timezone offset from UTC of when this identity was constructed.
TZ string
}
// WebLinkInfo describes a link to an external site.
type WebLinkInfo struct {
// Name is the link name.
Name string
// URL is the link URL.
URL string
// ImageURL is the URL to the icon of the link.
ImageURL string `json:"image_url"`
}
// GpgKeyInfo contains information about a GPG public key.
type GpgKeyInfo struct {
// ID is the 8-char hex GPG key ID. Not set in map context.
ID string
// Fingerprint is the 40-char (plus spaces) hex GPG key fingerprint. Not set for deleted
// keys.
Fingerprint string
// UserIDs is a list of OpenPGP User IDs associated with the public key. Not set for
// deleted keys.
UserIDs []string `json:"user_ids"`
// Key is ASCII armored public key material. Not set for deleted keys.
Key string
// Status is the result of server-side checks on the key; one of "BAD", "OK", or "TRUSTED".
// BAD keys have serious problems and should not be used. If a key is OK, inspecting only
// that key found no problems, but the system does not fully trust the key’s origin. A
// TRUSTED key is valid, and the system knows enough about the key and its origin to trust
// it. Not set for deleted keys.
Status string
// Problems is a list of human-readable problem strings found in the course of checking
// whether the key is valid and trusted. Not set for deleted keys.
Problems []string
}
// CommentInfo contains information about an inline comment.
type CommentInfo struct {
// PatchSet is the patch set number for the comment; only set in contexts where comments
// may be returned for multiple patch sets.
PatchSet int `json:"patch_set"`
// ID is the URL encoded UUID of the comment.
ID string
// Path is the path of the file for which the inline comment was done. Not set if returned
// in a map where the key is the file path.
Path string
// Side is the side on which the comment was added. Allowed values are REVISION and PARENT.
// If not set, the default is REVISION.
Side string
// Parent is the 1-based parent number. Used only for merge commits when side == PARENT.
// When not set the comment is for the auto-merge tree.
Parent int
// Line is the number of the line for which the comment was done. If range is set, this
// equals the end line of the range. If neither line nor range is set, it’s a file comment.
Line int
// Range is the range of the comment as a CommentRange entity.
Range *CommentRange
// InReplyTo is the URL encoded UUID of the comment to which this comment is a reply.
InReplyTo string `json:"in_reply_to"`
// Message is the comment message.
Message string
// Updated is the timestamp of when this comment was written.
Updated string
// Author is the author of the message as an AccountInfo entity. Unset for draft comments,
// assumed to be the calling user.
Author AccountInfo
// Tag is the value of the tag field from ReviewInput set while posting the review. NOTE:
// To apply different tags on different votes/comments multiple invocations of the REST
// call are required.
Tag string
// Unresolved is whether or not the comment must be addressed by the user. The state of
// resolution of a comment thread is stored in the last comment in that thread
// chronologically.
Unresolved bool
}
// CommentRange describes the range of an inline comment.
//
// The comment range is a range from the start position, specified by start_line and
// start_character, to the end position, specified by end_line and end_character. The start
// position is inclusive and the end position is exclusive.
//
// So, a range over part of a line will have start_line equal to end_line; however a range with
// end_line set to 5 and end_character equal to 0 will not include any characters on line 5.
type CommentRange struct {
// StartLine is the start line number of the range. (1-based)
StartLine int `json:"start_line"`
// StartLine is the character position in the start line. (0-based)
StartCharacter int `json:"start_character"`
// EndLine is the end line number of the range. (1-based)
EndLine int `json:"end_line"`
// EndCharacter is the character position in the end line. (0-based)
EndCharacter int `json:"end_character"`
}
const TimeLayout = "2006-01-02 15:04:05.000000000"
// ParseTimestamp converts a timestamp from the Gerrit API to a time.Time in UTC.
func ParseTimestamp(timeStamp string) time.Time {
t, err := time.ParseInLocation(TimeLayout, timeStamp, time.UTC)
if err != nil {
return time.Time{}
}
return t
}
// PatchSetNumber exists to allow parsing one stupid field in RevisionInfo which can end up
// being either a number or the string "edit".
type PatchSetNumber int
const PatchSetIsEdit PatchSetNumber = -1
func (p *PatchSetNumber) UnmarshalJSON(b []byte) error {
jsonStr := string(b)
if jsonStr == "null" {
return nil
}
if jsonStr == "\"edit\"" {
*p = PatchSetIsEdit
return nil
}
num, err := strconv.Atoi(jsonStr)
if err != nil {
return err
}
*p = PatchSetNumber(num)
return nil
}
|
package search
import (
"encoding/json"
"os"
)
const dateFile = "data/data.json"
//Feed包含我们需要处理的数据源的信息
type Feed struct {
Name string `json:"site"`
URI string `json:"link"`
Type string `json:"type"`
}
//RetrieveFeeds读取并反序列化源数据文件
func RetrieveFeeds()([]*Feed,error) {
//打开 文件
file,err := os.Open(dateFile)
if err != nil{
return nil,err
}
//当函数返回时
//关闭文件
defer file.Close()
//将文件解码到一个切片里
//这个切片的每一项是一个指向一个Feed类型值的指针
var feeds []*Feed
err = json.NewDecoder(file).Decode(&feeds)
//这个函数不需要检查错误,调用者会做这件事
return feeds ,nil
} |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adb
import (
"bufio"
"context"
"fmt"
"io"
"net"
"sync"
"time"
"github.com/google/gapid/core/app"
"github.com/google/gapid/core/fault"
"github.com/google/gapid/core/log"
)
const (
reconnectDelay = time.Millisecond * 100
connectionTimeout = time.Second * 30
ErrServiceTimeout = fault.Const("Timeout connecting to service")
)
// ForwardAndConnect forwards the local-abstract-socket las and connects to it.
// When the returned ReadCloser is closed the forwarded port is removed.
// The function takes care of the quirky behavior of ADB forwarded sockets.
func ForwardAndConnect(ctx context.Context, d Device, las string) (io.ReadCloser, error) {
port, err := LocalFreeTCPPort()
if err != nil {
return nil, log.Err(ctx, err, "Finding free port")
}
if err := d.Forward(ctx, TCPPort(port), NamedAbstractSocket(las)); err != nil {
return nil, log.Err(ctx, err, "Setting up port forwarding")
}
once := sync.Once{}
unforward := func() {
once.Do(func() { d.RemoveForward(ctx, port) })
}
app.AddCleanup(ctx, unforward)
start := time.Now()
for time.Since(start) < connectionTimeout {
if sock, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)); err == nil {
reader := bufio.NewReader(sock)
if _, err := reader.Peek(1); err == nil {
close := func() error {
unforward()
return sock.Close()
}
return readerCustomCloser{reader, close}, nil
}
sock.Close()
}
time.Sleep(reconnectDelay)
}
return nil, log.Errf(ctx, ErrServiceTimeout, "")
}
type readerCustomCloser struct {
io.Reader
onClose func() error
}
func (r readerCustomCloser) Close() error {
return r.onClose()
}
|
package main
import (
"reflect"
"testing"
)
func TestSmallest(t *testing.T) {
type args struct {
n int64
}
tests := []struct {
name string
args args
want []int64
}{
{name: "1", args: args{n: 261235}, want: []int64{126235, 2, 0}},
{name: "2", args: args{n: 209917}, want: []int64{29917, 0, 1}},
{name: "3", args: args{n: 132}, want: []int64{123, 1, 2}},
{name: "3", args: args{n: 111111111}, want: []int64{111111111, 0, 0}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Smallest(tt.args.n); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Smallest() = %v, want %v", got, tt.want)
}
})
}
}
|
package printer
type PrintableList interface {
ToJson() string
ToHtml() string
ToTable() string
}
type Printable interface {
}
|
package mining
import (
"../account"
"../block"
"log"
"testing"
)
func TestMineBig(t *testing.T) {
miner := account.CreateAccount()
bl := block.GetTestBlock()
privKey := account.RestorePrivKey(miner.PrivateKey)
log.Print("Start minig...")
MineBig(&bl, privKey)
log.Print(bl)
}
|
package main
import (
"fmt"
"github.com/abnereel/lottery/bootstrap"
"github.com/abnereel/lottery/web/middleware/identity"
"github.com/abnereel/lottery/web/routes"
)
var port = 8080
func newApp() *bootstrap.Bootstrapper {
// 初始化应用
app := bootstrap.New("Go抽奖系统", "Abner")
app.Boostrap()
app.Configure(identity.Configure, routes.Congifure)
return app
}
func main() {
app := newApp()
app.Listen(fmt.Sprintf(":%d", port))
}
|
package filecreator
import (
"io/ioutil"
"log"
"os"
_ "github.com/joho/godotenv/autoload"
)
// ImportHTMLToFile ...
func ImportHTMLToFile(code string, token string) {
// Files have to be created there because they have to be in the same level or further than the main index.html which is in public directory
pathToFile := tempFilesDir + "temp-" + token + ".html"
doesFileExist := fileExists(pathToFile)
if doesFileExist {
RemoveFile(token)
log.Println("REMOVING FILE")
}
f, err := os.Create(pathToFile)
if err != nil {
log.Fatal("Error creating: ", err)
}
l, err := f.WriteString(code)
if err != nil {
log.Fatal(err)
f.Close()
}
log.Println(l, "bytes written successfully")
err = f.Close()
if err != nil {
log.Fatal(err)
}
}
func fileExists(path string) bool {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// RemoveFile ..
func RemoveFile(token string) int {
err := os.Remove(tempFilesDir + "temp-" + token + ".html")
if err != nil {
log.Fatal("Error removing file", err)
return 500
}
return 200
}
var tempFilesDir string
// GetTempFilesDir ...
func GetTempFilesDir() {
tempFilesDir = os.Getenv("TEMP_FILES_DIR")
log.Println("TEMP_FILES_DIR: ", tempFilesDir)
}
//RemoveAllFiles ..
func RemoveAllFiles(wipe bool, oldMap map[string]bool) map[string]bool {
log.Println("Removing unecessary temp files in: ", tempFilesDir)
files, err := ioutil.ReadDir(tempFilesDir)
if err != nil {
log.Fatal("Error reading files: ", err)
}
resultMap := oldMap
if wipe == true {
for _, f := range files {
if f.Name()[:5] == "temp-" {
log.Println("Wipe: ", f.Name()[:5])
if resultMap[f.Name()] == true {
err = os.Remove(tempFilesDir + f.Name())
if err != nil {
log.Fatal(err)
}
}
}
}
} else {
for _, f := range files {
if f.Name()[:5] == "temp-" {
log.Println("Wipe check: ", f.Name()[:5])
resultMap[f.Name()] = true
}
}
}
return resultMap
}
|
// Homework 4: Concurrency
// Due February 21, 2017 at 11:59pm
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"sync"
)
func main() {
// Feel free to use the main function for testing your functions
hello := map[string]string{
"こんにちは": "世界",
"你好": "世界",
"안녕하세요": "세계",
}
for k, v := range hello {
fmt.Printf("%s, %s\n", strings.Title(k), v)
}
}
// Problem 1a: File processing
// You will be provided an input file consisting of integers, one on each line.
// Your task is to read the input file, sum all the integers, and write the
// result to a separate file.
// FileSum sums the integers in input and writes them to an output file.
// The two parameters, input and output, are the filenames of those files.
// You should expect your input to end with a newline, and the output should
// have a newline after the result.
func FileSum(input, output string) {
// Open input file
fi, err := os.Open(input)
defer fi.Close()
if err != nil {
log.Fatal(err)
}
// Open output file
// fo, err := os.Open(output)
fo, err := os.OpenFile(output, os.O_WRONLY|os.O_CREATE, 0644)
defer fo.Close()
if err != nil {
log.Fatal(err)
}
// SUM
sum := 0
// Read buffer
scanner := bufio.NewScanner(fi)
for scanner.Scan() {
num, err := strconv.Atoi(scanner.Text())
if err != nil {
log.Fatalf("strconv.Atoi() error: %v\n", err)
}
sum += num
}
if err := scanner.Err(); err != nil {
log.Fatal("scanner error:", err)
}
// Write buffer
writer := bufio.NewWriter(fo)
writer.WriteString(strconv.Itoa(sum))
writer.Flush()
}
// Problem 1b: IO processing with interfaces
// You must do the exact same task as above, but instead of being passed 2
// filenames, you are passed 2 interfaces: io.Reader and io.Writer.
// See https://golang.org/pkg/io/ for information about these two interfaces.
// Note that os.Open returns an io.Reader, and os.Create returns an io.Writer.
// IOSum sums the integers in input and writes them to output
// The two parameters, input and output, are interfaces for io.Reader and
// io.Writer. The type signatures for these interfaces is in the Go
// documentation.
// You should expect your input to end with a newline, and the output should
// have a newline after the result.
func IOSum(input io.Reader, output io.Writer) {
// SUM
sum := 0
// Read buffer
scanner := bufio.NewScanner(input)
for scanner.Scan() {
num, err := strconv.Atoi(scanner.Text())
if err != nil {
log.Fatalf("strconv.Atoi() error: %v\n", err)
}
sum += num
}
if err := scanner.Err(); err != nil {
log.Fatal("scanner error:", err)
}
// Write buffer
writer := bufio.NewWriter(output)
writer.WriteString(strconv.Itoa(sum))
writer.Flush()
}
// Problem 2: Concurrent map access
// Maps in Go [are not safe for concurrent use](https://golang.org/doc/faq#atomic_maps).
// For this assignment, you will be building a custom map type that allows for
// concurrent access to the map using mutexes.
// The map is expected to have concurrent readers but only 1 writer can have
// access to the map.
// PennDirectory is a mapping from PennID number to PennKey (12345678 -> adelq).
// You may only add *private* fields to this struct.
// Hint: Use an embedded sync.RWMutex, see lecture 2 for a review on embedding
type PennDirectory struct {
mu sync.RWMutex
directory map[int]string
}
// Add inserts a new student to the Penn Directory.
// Add should obtain a write lock, and should not allow any concurrent reads or
// writes to the map.
// You may NOT write over existing data - simply raise a warning.
func (d *PennDirectory) Add(id int, name string) {
d.mu.Lock()
defer d.mu.Unlock()
if n, ok := d.directory[id]; ok {
log.Printf("Warning: entry existed id:%d name:\n", id, n)
} else {
d.directory[id] = name
}
}
// Get fetches a student from the Penn Directory by their PennID.
// Get should obtain a read lock, and should allow concurrent read access but
// not write access.
func (d *PennDirectory) Get(id int) string {
d.mu.RLock()
defer d.mu.RUnlock()
name, ok := d.directory[id]
if !ok {
log.Printf("Warning: no such entry existed")
}
return name
}
// Remove deletes a student to the Penn Directory.
// Remove should obtain a write lock, and should not allow any concurrent reads
// or writes to the map.
func (d *PennDirectory) Remove(id int) {
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.directory[id]; !ok {
log.Printf("Warning: no such entry existed")
} else {
delete(d.directory, id)
}
}
|
// Copyright 2021 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package state
import (
"context"
"sort"
"testing"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
"go.chromium.org/luci/common/clock/testclock"
"go.chromium.org/luci/common/errors"
cfgpb "go.chromium.org/luci/cv/api/config/v2"
"go.chromium.org/luci/cv/internal/changelist"
"go.chromium.org/luci/cv/internal/cvtesting"
"go.chromium.org/luci/cv/internal/prjmanager"
"go.chromium.org/luci/cv/internal/prjmanager/impl/state/componentactor"
"go.chromium.org/luci/cv/internal/prjmanager/pmtest"
"go.chromium.org/luci/cv/internal/prjmanager/prjpb"
"go.chromium.org/luci/cv/internal/prjmanager/runcreator"
"go.chromium.org/luci/cv/internal/run"
. "github.com/smartystreets/goconvey/convey"
. "go.chromium.org/luci/common/testing/assertions"
)
func TestEarliestDecisionTime(t *testing.T) {
t.Parallel()
Convey("earliestDecisionTime works", t, func() {
earliest := func(cs []*prjpb.Component) time.Time {
t, tPB := earliestDecisionTime(cs)
if t.IsZero() {
So(tPB, ShouldBeNil)
} else {
So(tPB.AsTime(), ShouldResemble, t)
}
return t
}
t0 := testclock.TestRecentTimeUTC
cs := []*prjpb.Component{
{DecisionTime: nil},
}
So(earliest(cs), ShouldResemble, time.Time{})
cs = append(cs, &prjpb.Component{DecisionTime: timestamppb.New(t0.Add(time.Second))})
So(earliest(cs), ShouldResemble, t0.Add(time.Second))
cs = append(cs, &prjpb.Component{})
So(earliest(cs), ShouldResemble, t0.Add(time.Second))
cs = append(cs, &prjpb.Component{DecisionTime: timestamppb.New(t0.Add(time.Hour))})
So(earliest(cs), ShouldResemble, t0.Add(time.Second))
cs = append(cs, &prjpb.Component{DecisionTime: timestamppb.New(t0)})
So(earliest(cs), ShouldResemble, t0)
})
}
func TestComponentsActions(t *testing.T) {
t.Parallel()
Convey("Component actions logic work in the abstract", t, func() {
ct := cvtesting.Test{}
ctx, cancel := ct.SetUp()
defer cancel()
now := ct.Clock.Now()
const lProject = "luci-project"
// scanComponents needs config to exist, but this test doesn't actually care
// about what's inside due to mock componentActor.
ct.Cfg.Create(ctx, lProject, &cfgpb.Config{ConfigGroups: []*cfgpb.ConfigGroup{
{
Name: "main",
Gerrit: []*cfgpb.ConfigGroup_Gerrit{
{
Url: "https://example.com",
Projects: []*cfgpb.ConfigGroup_Gerrit_Project{
{Name: "re/po"},
},
},
},
},
}})
meta := ct.Cfg.MustExist(ctx, lProject)
state := &State{
PB: &prjpb.PState{
LuciProject: lProject,
Status: prjpb.Status_STARTED,
ConfigHash: meta.Hash(),
Pcls: []*prjpb.PCL{
{Clid: 1},
{Clid: 2},
{Clid: 3},
{Clid: 999},
},
Components: []*prjpb.Component{
{Clids: []int64{999}}, // never sees any action.
{Clids: []int64{1}},
{Clids: []int64{2}},
{Clids: []int64{3}, DecisionTime: timestamppb.New(now.Add(3 * time.Minute))},
},
NextEvalTime: timestamppb.New(now.Add(3 * time.Minute)),
},
PMNotifier: prjmanager.NewNotifier(ct.TQDispatcher),
RunNotifier: run.NewNotifier(ct.TQDispatcher),
}
pb := backupPB(state)
makeDirtySetup := func(indexes ...int) {
for _, i := range indexes {
state.PB.GetComponents()[i].Dirty = true
}
pb = backupPB(state)
}
unDirty := func(c *prjpb.Component) *prjpb.Component {
So(c.GetDirty(), ShouldBeTrue)
o := c.CloneShallow()
o.Dirty = false
return o
}
Convey("noop at preevaluation", func() {
state.testComponentActorFactory = (&componentActorSetup{}).factory
as, cs, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
So(as, ShouldBeNil)
So(cs, ShouldBeNil)
So(state.PB, ShouldResembleProto, pb)
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldBeNil)
So(state2, ShouldEqual, state) // pointer comparison
So(sideEffect, ShouldBeNil)
So(pmtest.ETAsOF(ct.TQ.Tasks(), lProject), ShouldBeEmpty)
})
})
Convey("updates future DecisionTime in scan", func() {
makeDirtySetup(1, 2, 3)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) {
switch cl {
case 1:
return time.Time{}, nil
case 2:
return now.Add(2 * time.Minute), nil
case 3:
return state.PB.Components[3].GetDecisionTime().AsTime(), nil // same
}
panic("unrechable")
},
}).factory
actions, components, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
So(actions, ShouldBeNil)
So(components, ShouldResembleProto, []*prjpb.Component{
pb.GetComponents()[0], // #999 unchanged
unDirty(pb.GetComponents()[1]),
{Clids: []int64{2}, DecisionTime: timestamppb.New(now.Add(2 * time.Minute))},
unDirty(pb.GetComponents()[3]),
})
So(state.PB, ShouldResembleProto, pb)
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldBeNil)
So(sideEffect, ShouldBeNil)
pb.Components = components
pb.NextEvalTime = timestamppb.New(now.Add(2 * time.Minute))
So(state2.PB, ShouldResembleProto, pb)
So(pmtest.ETAsWithin(ct.TQ.Tasks(), lProject, time.Second, now.Add(2*time.Minute)), ShouldNotBeEmpty)
})
})
Convey("purges CLs", func() {
makeDirtySetup(1, 2, 3)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) { return now, nil },
purgeCLs: []int64{1, 3},
}).factory
actions, components, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
So(actions, ShouldHaveLength, 3)
So(components, ShouldResembleProto, pb.GetComponents())
So(state.PB, ShouldResembleProto, pb)
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldBeNil)
expectedDeadline := timestamppb.New(now.Add(maxPurgingCLDuration))
So(state2.PB.GetPurgingCls(), ShouldResembleProto, []*prjpb.PurgingCL{
{Clid: 1, OperationId: "1580640000-1", Deadline: expectedDeadline},
{Clid: 3, OperationId: "1580640000-3", Deadline: expectedDeadline},
})
So(sideEffect, ShouldHaveSameTypeAs, &TriggerPurgeCLTasks{})
ps := sideEffect.(*TriggerPurgeCLTasks).payloads
So(ps, ShouldHaveLength, 2)
// Unlike PB.PurgingCls, the tasks aren't necessarily sorted.
sort.Slice(ps, func(i, j int) bool { return ps[i].GetPurgingCl().GetClid() < ps[j].GetPurgingCl().GetClid() })
So(ps[0].GetPurgingCl(), ShouldResembleProto, state2.PB.GetPurgingCls()[0]) // CL#1
So(ps[0].GetTrigger(), ShouldResembleProto, state2.PB.GetPcls()[1 /*CL#1*/].GetTrigger())
So(ps[0].GetLuciProject(), ShouldEqual, lProject)
So(ps[1].GetPurgingCl(), ShouldResembleProto, state2.PB.GetPurgingCls()[1]) // CL#3
})
})
Convey("partial failure in scan", func() {
makeDirtySetup(1, 2, 3)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) {
switch cl {
case 1:
return time.Time{}, errors.New("oops1")
case 2, 3:
return now, nil
}
panic("unrechable")
},
}).factory
actions, components, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
So(components, ShouldResembleProto, []*prjpb.Component{
pb.GetComponents()[0], // #999 unchanged
{Clids: []int64{1}, Dirty: true, DecisionTime: timestamppb.New(now)},
pb.GetComponents()[2], // #2 unchanged
pb.GetComponents()[3], // #3 unchanged
})
So(state.PB, ShouldResembleProto, pb)
_, err = state.execComponentActions(ctx, actions, components)
So(err, ShouldBeNil)
// Must modify passed components only.
So(state.PB, ShouldResembleProto, pb)
So(components, ShouldResembleProto, []*prjpb.Component{
pb.GetComponents()[0],
{Clids: []int64{1}, Dirty: true, DecisionTime: timestamppb.New(now)}, // errored on
{Clids: []int64{2}}, // acted upon
{Clids: []int64{3}}, // acted upon
})
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldBeNil)
So(sideEffect, ShouldBeNil)
pb.Components = components
pb.NextEvalTime = timestamppb.New(now)
So(state2.PB, ShouldResembleProto, pb)
// Self-poke task must be scheduled for earliest possible from now.
So(pmtest.ETAsWithin(ct.TQ.Tasks(), lProject, time.Second, now.Add(prjpb.PMTaskInterval)), ShouldNotBeEmpty)
})
})
Convey("100% failure in scan", func() {
makeDirtySetup(1, 2)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) {
switch cl {
case 1, 2:
return time.Time{}, errors.New("oops")
}
panic("unrechable")
},
}).factory
_, _, err := state.scanComponents(ctx)
So(err, ShouldErrLike, "oops")
So(state.PB, ShouldResembleProto, pb)
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldNotBeNil)
So(sideEffect, ShouldBeNil)
So(state2, ShouldBeNil)
})
})
Convey("partial failure in exec", func() {
makeDirtySetup(1, 2, 3)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) { return now, nil },
actErrOnCLs: []int64{1, 2},
}).factory
actions, components, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
_, err = state.execComponentActions(ctx, actions, components)
So(err, ShouldBeNil)
// Must modify passed components only.
So(state.PB, ShouldResembleProto, pb)
So(components, ShouldResembleProto, []*prjpb.Component{
pb.GetComponents()[0], // #999 unchanged
{Clids: []int64{1}, Dirty: true, DecisionTime: timestamppb.New(now)}, // errored on
{Clids: []int64{2}, Dirty: true, DecisionTime: timestamppb.New(now)}, // errored on
{Clids: []int64{3}}, // acted upon
})
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldBeNil)
So(sideEffect, ShouldBeNil)
pb.Components = components
pb.NextEvalTime = timestamppb.New(now)
So(state2.PB, ShouldResembleProto, pb)
// Self-poke task must be scheduled for earliest possible from now.
So(pmtest.ETAsWithin(ct.TQ.Tasks(), lProject, time.Second, now.Add(prjpb.PMTaskInterval)), ShouldNotBeEmpty)
})
})
Convey("100% failure in exec", func() {
makeDirtySetup(1, 2, 3)
state.testComponentActorFactory = (&componentActorSetup{
nextAction: func(cl int64, now time.Time) (time.Time, error) { return now, nil },
actErrOnCLs: []int64{1, 2, 3},
}).factory
actions, components, err := state.scanComponents(ctx)
So(err, ShouldBeNil)
_, err = state.execComponentActions(ctx, actions, components)
So(err, ShouldErrLike, "act-oops")
So(state.PB, ShouldResembleProto, pb)
Convey("ExecDeferred", func() {
state2, sideEffect, err := state.ExecDeferred(ctx)
So(err, ShouldNotBeNil)
So(sideEffect, ShouldBeNil)
So(state2, ShouldBeNil)
})
})
})
}
type componentActorSetup struct {
nextAction func(clid int64, now time.Time) (time.Time, error)
actErrOnCLs []int64
purgeCLs []int64
}
func (s *componentActorSetup) factory(c *prjpb.Component, _ componentactor.Supporter) componentActor {
return &testCActor{s, c}
}
type testCActor struct {
parent *componentActorSetup
c *prjpb.Component
}
func (t *testCActor) NextActionTime(_ context.Context, now time.Time) (time.Time, error) {
return t.parent.nextAction(t.c.GetClids()[0], now)
}
func (t *testCActor) Act(context.Context, runcreator.PM, runcreator.RM) (*prjpb.Component, []*prjpb.PurgeCLTask, error) {
for _, clid := range t.parent.actErrOnCLs {
if t.c.GetClids()[0] == clid {
return nil, nil, errors.Reason("act-oops %v", t.c).Err()
}
}
c := t.c.CloneShallow()
c.Dirty = false
c.DecisionTime = nil
for _, clid := range t.parent.purgeCLs {
if t.c.GetClids()[0] == clid {
ps := []*prjpb.PurgeCLTask{{
PurgingCl: &prjpb.PurgingCL{
Clid: clid,
},
Reasons: []*changelist.CLError{
{Kind: &changelist.CLError_OwnerLacksEmail{OwnerLacksEmail: true}},
},
}}
return c, ps, nil
}
}
return c, nil, nil
}
|
package main
import "flag"
var equipo = flag.String("equipo", "resuelve.json", "El equipo a analizar")
var niveles = flag.String("niveles", "niveles.json", "Los niveles con los cuales calcular")
func main() {
flag.Parse()
abrir(*equipo, *niveles)
}
|
package create
import (
"encoding/json"
"fmt"
"net/http"
"github.com/ocoscope/face/db"
"github.com/ocoscope/face/utils"
"github.com/ocoscope/face/utils/answer"
"github.com/ocoscope/face/utils/recognition"
)
func Visit(w http.ResponseWriter, r *http.Request) {
type tbody struct {
UserID, CompanyID int64
StatusVisit uint
AccessToken, Photo string
}
var body tbody
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
fmt.Println(err)
utils.Message(w, answer.WRONG_DATA, 400)
return
}
database, err := db.CopmanyDB(body.CompanyID)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.NOT_FOUND_COMPANY, 400)
return
}
defer database.Close()
if err = db.CheckUserAccessToken(database, body.UserID, body.AccessToken); err != nil {
fmt.Println(err)
utils.Message(w, answer.UNAUTHORIZED, 401)
return
}
userRoleID, err := db.GetUserRoleID(database, body.UserID)
if err != nil || userRoleID != 1 {
fmt.Println(err)
utils.Message(w, answer.ACCESS_DENIED, 400)
return
}
collection, err := db.GetCompanyCollection(body.CompanyID)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.FR, 500)
return
}
// ИД статусов посещения
const (
_prihod uint = 1
_uhod uint = 2
_go_lunch uint = 3
_from_lunch uint = 4
_go_departure uint = 5
_from_departure uint = 6
_go_respite uint = 7
_from_respite uint = 8
)
userFace, err := db.GetUserFace(database, body.UserID)
// если нету лица, то создаем
if !utils.ParseStrToBoolean(userFace) || err != nil {
result, err := recognition.FaceSave(body.Photo, collection)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.N_RECORD, 500)
return
}
if err = db.UserFaceUpdate(database, *result.FaceRecords[0].Face.FaceId, body.UserID); err != nil {
fmt.Println(err)
utils.Message(w, answer.F_REGISTER_FACE_DATA, 500)
return
}
if _, err = db.CreateVisit(database, body.UserID, _prihod); err != nil {
fmt.Println(err)
utils.Message(w, answer.N_RECORD, 500)
return
}
utils.Message(w, answer.FIRST_CREATE_VISIT, 200)
return
}
result, err := recognition.SearchImageInCollection(body.Photo, collection)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.F_FACE, 400)
return
}
if len(result.FaceMatches) == 0 || len(result.FaceMatches) > 1 {
utils.Message(w, answer.U_DONT_IN_SYSTEM, 400)
return
}
userFaceID, err := db.SearchByPhoto(database, *result.FaceMatches[0].Face.FaceId)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.U_DONT_IN_SYSTEM, 400)
return
}
lastVisitStatus, err := db.GetUserNowDayLastVisitStatus(database, body.UserID)
// в случае если нету ничего в таблице
if err != nil || !utils.ParseIntToBool(lastVisitStatus) {
fmt.Println(err)
if _, err = db.CreateVisit(database, userFaceID, _prihod); err != nil {
fmt.Println(err)
utils.Message(w, answer.N_RECORD, 500)
return
}
utils.MessageResult(w, lastVisitStatus, 200)
return
}
// какой тип посещение будем присваивать
switch lastVisitStatus {
case _prihod:
lastVisitStatus = _uhod
case _go_lunch:
lastVisitStatus = _from_lunch
case _from_lunch:
lastVisitStatus = _uhod
case _go_departure:
lastVisitStatus = _from_departure
case _from_departure:
lastVisitStatus = _uhod
case _go_respite:
lastVisitStatus = _from_respite
case _from_respite:
lastVisitStatus = _uhod
default:
lastVisitStatus = _prihod
}
_, err = db.CreateVisit(database, userFaceID, lastVisitStatus)
if err != nil {
fmt.Println(err)
utils.Message(w, answer.N_RECORD, 500)
return
}
utils.MessageResult(w, lastVisitStatus, 200)
}
|
package redisearch
import (
"sort"
"strings"
)
const (
field_tokenization = ",.<>{}[]\"':;!@#$%^&*()-+=~"
)
// Document represents a single document to be indexed or returned from a query.
// Besides a score and id, the Properties are completely arbitrary
type Document struct {
Id string
Score float32
Payload []byte
Properties map[string]interface{}
}
// NewDocument creates a document with the specific id and score
func NewDocument(id string, score float32) Document {
return Document{
Id: id,
Score: score,
Properties: make(map[string]interface{}),
}
}
// SetPayload Sets the document payload
func (d *Document) SetPayload(payload []byte) {
d.Payload = payload
}
// Set sets a property and its value in the document
func (d Document) Set(name string, value interface{}) Document {
d.Properties[name] = value
return d
}
// All punctuation marks and whitespaces (besides underscores) separate the document and queries into tokens.
// e.g. any character of `,.<>{}[]"':;!@#$%^&*()-+=~` will break the text into terms.
// So the text `foo-bar.baz...bag` will be tokenized into `[foo, bar, baz, bag]`
// Escaping separators in both queries and documents is done by prepending a backslash to any separator.
// e.g. the text `hello\-world hello-world` will be tokenized as `[hello-world, hello, world]`.
// **NOTE** that in most languages you will need an extra backslash when formatting the document or query,
// to signify an actual backslash, so the actual text in redis-cli for example, will be entered as `hello\\-world`.
// Underscores (`_`) are not used as separators in either document or query.
// So the text `hello_world` will remain as is after tokenization.
func EscapeTextFileString(value string) (string) {
for _, char := range field_tokenization {
value = strings.Replace(value, string(char), ("\\"+string(char)), -1 )
}
return value
}
// DocumentList is used to sort documents by descending score
type DocumentList []Document
func (l DocumentList) Len() int { return len(l) }
func (l DocumentList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l DocumentList) Less(i, j int) bool { return l[i].Score > l[j].Score } //reverse sorting
// Sort the DocumentList
func (l DocumentList) Sort() {
sort.Sort(l)
}
func (d *Document) EstimateSize() (sz int) {
sz = len(d.Id)
if d.Payload != nil {
sz += len(d.Payload)
}
for k, v := range d.Properties {
sz += len(k)
switch s := v.(type) {
case string:
sz += len(s)
case []byte:
sz += len(s)
case []rune:
sz += len(s)
}
}
return
}
|
package metas
import (
"fmt"
"testing"
)
func Test_RangePartitionItem(t *testing.T) {
name1 := "p_01"
values := []interface{}{0, '1', "a"}
part1 := NewRangePartitionItem(name1, values, false)
part1Str, err := part1.GetMetaStr()
if err != nil {
t.Fatal(err.Error())
}
fmt.Println(part1Str)
nameMax := "p_max"
partMax := NewRangePartitionItem(nameMax, values, true)
partMaxStr, err := partMax.GetMetaStr()
if err != nil {
t.Fatal(err.Error())
}
fmt.Println(partMaxStr)
}
func Test_RangePartition(t *testing.T) {
rangePartition := NewRangePartition()
part1 := NewRangePartitionItem("p_01", []interface{}{1, 2, 3, 4}, false)
part2 := NewRangePartitionItem("p_02", []interface{}{"1", "2", "3", "4"}, false)
part3 := NewRangePartitionItem("p_03", []interface{}{5, 6, 7, 8}, false)
part4 := NewRangePartitionItem("p_04", []interface{}{"a", "B", "c", "d"}, false)
partMax := NewRangePartitionItem("p_max", []interface{}{1, 2, 3, 4}, true)
rangePartition.AddPartitionItem(part1).AddPartitionItem(part2).AddPartitionItem(part3).AddPartitionItem(part4).AddPartitionItem(partMax)
partStr, err := rangePartition.GetMetaStr()
if err != nil {
t.Fatal(err.Error())
}
fmt.Println(partStr)
}
func Test_ListPartitionItem(t *testing.T) {
part1 := NewListPartitionItem("p_1", []interface{}{1, 2, 3, "2"})
partStr1, err := part1.GetMetaStr()
if err != nil {
t.Fatal(err.Error())
}
fmt.Println(partStr1)
}
func Test_ListPartition(t *testing.T) {
listPartition := NewListPartition()
part1 := NewListPartitionItem("p_01", []interface{}{1, 2, 3, 4})
part2 := NewListPartitionItem("p_02", []interface{}{"1", "2", "3", "4"})
part3 := NewListPartitionItem("p_03", []interface{}{5, 6, 7, 8})
part4 := NewListPartitionItem("p_04", []interface{}{"a", "B", "c", "d"})
listPartition.AddPartitionItem(part1).AddPartitionItem(part2).AddPartitionItem(part3).AddPartitionItem(part4)
partStr, err := listPartition.GetMetaStr()
if err != nil {
t.Fatal(err.Error())
}
fmt.Println(partStr)
}
|
package block
import (
"bytes"
"encoding/gob"
"time"
"github.com/yopming/Berify/util"
)
// Block represents a block in the blockchain
type Block struct {
Timestamp int64
Transactions []*Transaction
PreviousBlockHash []byte
Hash []byte
Nonce int
Height int
}
// NewBlock creates and returns a new block
func NewBlock(transactions []*Transaction, previousBlockHash []byte, height int) *Block {
block := &Block{
time.Now().Unix(),
transactions,
previousBlockHash,
[]byte{},
0,
height,
}
pow := NewProofOfWork(block)
nonce, hash := pow.Run()
block.Hash = hash[:]
block.Nonce = nonce
return block
}
// NewGenesisBlock creates a genesis block
func NewGenesisBlock(coinbase *Transaction) *Block {
return NewBlock([]*Transaction{coinbase}, []byte{}, 0)
}
// HashTransactions returns the hash of the transactions in the block
func (b *Block) HashTransactions() []byte {
var transactions [][]byte
for _, tx := range b.Transactions {
transactions = append(transactions, tx.Serialize())
}
merkleTree := util.NewMerkleTree(transactions)
return merkleTree.RootNode.Data
}
// Serialize serializes the block
// BoltDB values can be only of []byte, so the Block struct need be serialized
func (b *Block) Serialize() []byte {
var result bytes.Buffer
encoder := gob.NewEncoder(&result)
err := encoder.Encode(b)
util.PanicError(err)
return result.Bytes()
}
// DeserializeBlock de-serializes a block, convert []byte to Block
func DeserializeBlock(d []byte) *Block {
var block Block
decoder := gob.NewDecoder(bytes.NewReader(d))
err := decoder.Decode(&block)
util.PanicError(err)
return &block
}
|
package deck
import (
"errors"
"math/rand"
"github.com/google/uuid"
"toggl-card/internal/card"
)
// Deck is defined as a cobination of card objects
type Deck struct {
ID uuid.UUID `json:"deck_id"`
Shuffled bool `json:"shuffled"`
Remaining int `json:"remaining"`
Cards []card.Card `json:"cards"`
}
// Decks is a slice to store decks since no persistence is implemented
var Decks []Deck
// New creates a default sequential card objets deck
func New(shuffle bool) Deck {
cards := card.Default()
return NewPartial(shuffle, cards)
}
// NewPartial creates a custom card objets deck
func NewPartial(shuffle bool, cards []card.Card) Deck {
return Deck{ID: uuid.New(), Shuffled: shuffle, Remaining: len(cards), Cards: cards}
}
// SetCards of a deck
func (d *Deck) SetCards(c []card.Card) {
d.Cards = c
}
// Signature to represent a deck and its cards order
func (d *Deck) Signature() (string) {
var sig string
for _, card := range d.Cards {
sig += card.Code
}
return sig
}
// Shuffle deck's cards following Fisher-Yates algorithm
func (d *Deck) Shuffle() {
cards := d.Cards
for i := len(cards) - 1; i > 0; i-- {
r := rand.Intn(i + 1)
cards[r], cards[i] = cards[i], cards[r]
}
}
// Draw n cards from the deck if enough cards in it
func (d *Deck) Draw(n int) ([]card.Card, error) {
if len(d.Cards) < n {
return []card.Card{}, errors.New("Error. Not enough cards in the deck")
}
cards := d.Cards
drawn := cards[:n]
d.SetCards(cards[n:])
return drawn, nil
} |
package server
import (
"bytes"
"context"
"crypto/tls"
"encoding/gob"
"errors"
"log"
"net"
"net/http"
"sync"
"golang.org/x/net/http2"
"golang.org/x/net/websocket"
"golang.org/x/tools/godoc/vfs"
"golang.org/x/tools/godoc/vfs/mapfs"
"github.com/donovanhide/eventsource"
"github.com/neelance/gopath-tunnel/protocol"
)
type Server struct {
cache map[protocol.FileID][]byte
mu sync.Mutex
cl *http.Client
}
func (s *Server) Client() *http.Client {
s.mu.Lock()
cl := s.cl
s.mu.Unlock()
return cl
}
func (s *Server) SetClient(cl *http.Client) {
s.mu.Lock()
s.cl = cl
s.mu.Unlock()
}
func New() *Server {
return &Server{
cache: make(map[protocol.FileID][]byte),
}
}
func (s *Server) Handler() http.Handler {
return websocket.Handler(func(ws *websocket.Conn) {
dialed := false
s.mu.Lock()
s.cl = &http.Client{
Transport: &withDummyScheme{&http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
if dialed {
panic("already dialed")
}
dialed = true
return ws, nil
},
}},
}
s.mu.Unlock()
var version int
if err := post(s.Client(), "/version", nil, &version); err != nil {
log.Print(err)
return
}
if version != 4 {
req := &protocol.ErrorRequest{
Error: "Incompatible client version. Please upgrade gopath-tunnel: go get -u github.com/neelance/gopath-tunnel",
}
if err := post(s.Client(), "/error", req, nil); err != nil {
log.Print(err)
return
}
ws.Close()
return
}
<-ws.Request().Context().Done()
})
}
type withDummyScheme struct {
t http.RoundTripper
}
func (t *withDummyScheme) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Scheme = "https"
return t.t.RoundTrip(req)
}
func (s *Server) List(ctx context.Context) ([]string, error) {
var pkgs []string
if err := post(s.Client(), "/packages", nil, &pkgs); err != nil {
return nil, err
}
return pkgs, nil
}
func (s *Server) Fetch(ctx context.Context, importPath string, includeTests bool) (vfs.FileSystem, error) {
var cached []protocol.FileID
for id := range s.cache {
cached = append(cached, id)
}
req := &protocol.FetchRequest{
SrcID: protocol.SrcID{
ImportPath: importPath,
IncludeTests: includeTests,
},
Cached: cached,
}
var resp protocol.FetchResponse
if err := post(s.Client(), "/fetch", req, &resp); err != nil {
return nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
}
for id, contents := range resp.Contents {
s.cache[id] = contents
}
files := make(map[string]string)
for name, id := range resp.Files {
files[name] = string(s.cache[id])
}
return mapfs.New(files), nil
}
func (s *Server) Watch(ctx context.Context, importPath string, includeTests bool) (<-chan struct{}, error) {
reqData := &protocol.FetchRequest{
SrcID: protocol.SrcID{
ImportPath: importPath,
IncludeTests: includeTests,
},
}
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(reqData); err != nil {
panic(err)
}
req, err := http.NewRequest("POST", "/watch", &buf)
if err != nil {
panic(err)
}
resp, err := s.Client().Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
c := make(chan struct{})
go func() {
defer resp.Body.Close()
dec := eventsource.NewDecoder(resp.Body)
for {
event, err := dec.Decode()
if err != nil {
if err == context.Canceled {
break
}
log.Println(err)
break
}
if event.Data() == "changed" {
c <- struct{}{}
}
}
}()
return c, nil
}
func post(c *http.Client, url string, reqData, respData interface{}) error {
var buf bytes.Buffer
if reqData != nil {
if err := gob.NewEncoder(&buf).Encode(reqData); err != nil {
return err
}
}
resp, err := c.Post(url, "application/json", &buf)
if err != nil {
return err
}
defer resp.Body.Close()
if respData != nil {
if err := gob.NewDecoder(resp.Body).Decode(respData); err != nil {
return err
}
}
return nil
}
|
package jago
/*21 (0X15)*/
func ILOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.index8()
f.push(f.loadVar(uint(index)).(Int))
}
/*22 (0X16)*/
func LLOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.index8()
f.push(f.loadVar(uint(index)).(Long))
}
/*23 (0X17)*/
func FLOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.index8()
f.push(f.loadVar(uint(index)).(Float))
}
/*24 (0X18)*/
func DLOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.index8()
f.push(f.loadVar(uint(index)).(Double))
}
/*25 (0X19)*/
func ALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.index8()
f.push(f.loadVar(uint(index)).(Reference))
}
/*26 (0X1A)*/
func ILOAD_0(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(0).(Int))
}
/*27 (0X1B)*/
func ILOAD_1(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(1).(Int))
}
/*28 (0X1C)*/
func ILOAD_2(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(2).(Int))
}
/*29 (0X1D)*/
func ILOAD_3(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(3).(Int))
}
/*30 (0X1E)*/
func LLOAD_0(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(0)).(Long))
}
/*31 (0X1F)*/
func LLOAD_1(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(1)).(Long))
}
/*32 (0X20)*/
func LLOAD_2(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(2)).(Long))
}
/*33 (0X21)*/
func LLOAD_3(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(3)).(Long))
}
/*34 (0X22)*/
func FLOAD_0(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(0)).(Float))
}
/*35 (0X23)*/
func FLOAD_1(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(1)).(Float))
}
/*36 (0X24)*/
func FLOAD_2(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(2)).(Float))
}
/*37 (0X25)*/
func FLOAD_3(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(3)).(Float))
}
/*38 (0X26)*/
func DLOAD_0(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(0)).(Double))
}
/*39 (0X27)*/
func DLOAD_1(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(1)).(Double))
}
/*40 (0X28)*/
func DLOAD_2(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(2)).(Double))
}
/*41 (0X29)*/
func DLOAD_3(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(uint(3)).(Double))
}
/*42 (0X2A)*/
func ALOAD_0(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(0).(Reference))
}
/*43 (0X2B)*/
func ALOAD_1(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(1).(Reference))
}
/*44 (0X2C)*/
func ALOAD_2(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(2).(Reference))
}
/*45 (0X2D)*/
func ALOAD_3(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
f.push(f.loadVar(3).(Reference))
}
/*46 (0X2E)
Run-time Exceptions
If arrayref is null, iaload throws a NullPointerException.
Otherwise, if index is not within the bounds of the array referenced by arrayref, the iaload instruction throws an ArrayIndexOutOfBoundsException.
*/
func IALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != INT_TYPE {
Fatal("Not an int array")
}
f.push(arrayref.elements[index])
}
/*47 (0X2F)*/
func LALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != LONG_TYPE {
Fatal("Not a long array")
}
f.push(arrayref.elements[index])
}
/*48 (0X30)*/
func FALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != FLOAT_TYPE {
Fatal("Not an float array")
}
f.push(arrayref.elements[index])
}
/*49 (0X31)*/
func DALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != DOUBLE_TYPE {
Fatal("Not an double array")
}
f.push(arrayref.elements[index])
}
/*50 (0X32)*/
func AALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
_, ok := arrayref.class.componentType.(ClassType)
if !ok {
Fatal("Not an reference array")
}
f.push(arrayref.elements[index])
}
/*51 (0X33)*/
func BALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != BOOLEAN_TYPE {
Fatal("Not a boolean array")
}
f.push(arrayref.elements[index])
}
/*52 (0X34)*/
func CALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != CHAR_TYPE {
Fatal("Not a char array")
}
//zero-extended to an int value
value := Int(arrayref.elements[index].(Char))
f.push(value)
}
/*53 (0X35)*/
func SALOAD(opcode uint8, f *Frame, t *Thread, c *Class, m *Method) {
index := f.pop().(Int)
arrayref := f.pop().(ArrayRef)
if arrayref.IsNull() {
Throw("NullPointerException", "")
}
if arrayref.class.componentType != SHORT_TYPE {
Fatal("Not a short array")
}
f.push(arrayref.elements[index])
} |
package main
type Resolver struct{}
type ThreadResolver struct {
thread Thread
}
func (t *ThreadResolver) id() *string {
return &t.thread.id
}
func (t *ThreadResolver) author() *string {
return &t.thread.author
}
func (t *ThreadResolver) title() *string {
return &t.thread.title
}
func (t *ThreadResolver) date_posted() *string {
return &t.thread.date_posted
}
|
package passingcars
import "testing"
func TestPassingCars(t *testing.T) {
entries := []struct {
input []int
result int
}{
{[]int{0, 1, 0, 1, 1}, 5},
{[]int{1, 0}, 0},
}
for _, entry := range entries {
result := PassingCars(entry.input)
// check if result and expected result are same
if entry.result != result {
t.Errorf("PassingCars for %d failed, expected result %d, got, %d", entry.input, entry.result, result)
}
}
}
|
package _0_Decorator_Pattern
import (
"../01_Factory_Pattern"
)
//步骤 1
//创建一个接口:
type Shape = _1_Factory_Pattern.Shape
//步骤 2
//创建实现接口的实体类。
type Rectangle = _1_Factory_Pattern.Rectangle
type Circle = _1_Factory_Pattern.Circle
//步骤 3
//创建实现了 Shape 接口的抽象装饰类。
type ShapeDecorator interface {
Draw() string
setDecoratedShape(decoratedShape Shape)
}
//步骤 4
//创建扩展了 ShapeDecorator 类的实体装饰类。
type RedShapeDecorator struct {
decoratedShape Shape
}
func (receiver *RedShapeDecorator) setDecoratedShape(decoratedShape Shape) {
receiver.decoratedShape = decoratedShape
}
func (receiver *RedShapeDecorator) Draw() string {
return receiver.decoratedShape.Draw() + "| " + receiver.setRedBorder()
}
func (receiver *RedShapeDecorator) setRedBorder() string {
return "RedBorder"
}
|
package main
import (
"log"
"net/http"
)
type statusWriter struct {
http.ResponseWriter
status int
}
func (w *statusWriter) WriteHeader(status int) {
w.status = status
w.ResponseWriter.WriteHeader(status)
}
func RequestLogger(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
sw := &statusWriter{ResponseWriter: w, status: http.StatusOK}
next.ServeHTTP(sw, req)
log.Printf("[%v] %d %s %s\n", req.Context().Value("id"), sw.status, req.Method, req.URL)
})
}
|
package service
import "github.com/jerolan/slack-poll/domain/entity"
type PollService interface {
GetPollByID(pollID string) (entity.Poll, error)
CreatePoll(*entity.Poll) error
DeletePoll(pollID string) error
FindPollAnswers(pollID string) ([]entity.PollAnswer, error)
CreatePollAnswer(*entity.PollAnswer) error
DeletePollAnswer(pollAnswerID string) error
}
|
package adapter
import "fmt"
type Wireles struct {
}
func (w Wireles) WirelessCharging() {
fmt.Println("The device is charging using wireless charging.")
}
|
package faregate
import (
"fmt"
"math/rand"
"time"
)
func Must(c <-chan struct{}, err error) <-chan struct{} {
if err != nil {
panic(err)
}
return c
}
func Example() {
rnd := rand.New(rand.NewSource(42))
fg, err := New(RefreshInterval(time.Second), TokenCount(100), ConcurrencyLevel(1))
if err != nil {
panic(err)
}
defer fg.Close()
for {
q := rnd.Intn(10)
<-Must(fg.Acquire(uint64(q)))
fmt.Println("acquired", q)
}
}
|
package main
import (
"encoding/json"
"fmt"
)
func main() {
type Pessoa struct {
Nome string
Idade int
}
p1 := Pessoa{"Paula", 42}
sliceBytes, err := json.Marshal(p1)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(sliceBytes))
}
|
package main
import (
"encoding/json"
"errors"
"github.com/satori/go.uuid"
"sort"
"sync"
)
type Application struct {
UniqueId string `json:"id" bson:"_id,omitempty"`
Name string `json:"name" bson:"name"`
BaseUrl string `json:"baseUrl" bson:"baseUrl"`
Services map[string]string `json:"services" bson:"services"`
State State `json:"state" bson:"state"`
servicesSorted []*Service
sync.RWMutex `json:"-" bson:"-"`
}
func (a *Application) Id() string {
return a.UniqueId
}
func (a *Application) ServicesCopy() map[string]string {
a.RLock()
defer a.RUnlock()
result := make(map[string]string, len(a.Services))
for k, v := range a.Services {
result[k] = v
}
return result
}
func (a *Application) ServicesSorted() []*Service {
//@todo: too many locking, refactor
a.Lock()
defer a.Unlock()
if a.servicesSorted != nil {
return a.servicesSorted
}
a.sortServices()
return a.servicesSorted
}
func (a *Application) ContainsService(id string) bool {
a.RLock()
defer a.RUnlock()
return a.Services[id] != ""
}
func (a *Application) AddService(service *Service) error {
a.Lock()
defer a.Unlock()
if a.Services[service.UniqueId] != "" {
return AlreadyPresentError
}
for s, _ := range a.Services {
svc, err := store.GetService(s)
if err != nil {
return InvalidStoreStateError
}
if svc.ServiceUrl == service.ServiceUrl {
return AlreadyPresentError
}
}
a.Services[service.UniqueId] = service.ServiceUrl
a.sortServices()
return nil
}
func (a *Application) RemoveServiceId(id string) {
a.Lock()
defer a.Unlock()
delete(a.Services, id)
a.sortServices()
}
func (a *Application) sortServices() {
a.servicesSorted = make([]*Service, 0, len(a.Services))
for k, _ := range a.Services {
service := registry.GetService(k)
a.servicesSorted = append(a.servicesSorted, service)
}
sort.Sort(SortByDESCServiceUrlLength(a.servicesSorted))
}
func (a *Application) SetState(state State) {
a.Lock()
defer a.Unlock()
a.State = state
}
func (a *Application) Init() error {
if a.Name == "" || a.BaseUrl == "" {
return errors.New("Missing name and/or baseUrl definition")
}
if a.UniqueId == "" {
a.UniqueId = uuid.NewV4().String()
}
if a.State == "" {
a.SetState(Active)
}
if a.Services == nil {
a.Services = make(map[string]string)
}
return nil
}
func (a *Application) String() string {
j, err := json.Marshal(a)
if err != nil {
return err.Error()
}
return string(j)
}
|
package taskfile_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
"github.com/go-task/task/v3/taskfile"
)
func TestPreconditionParse(t *testing.T) {
tests := []struct {
content string
v any
expected any
}{
{
"test -f foo.txt",
&taskfile.Precondition{},
&taskfile.Precondition{Sh: `test -f foo.txt`, Msg: "`test -f foo.txt` failed"},
},
{
"sh: '[ 1 = 0 ]'",
&taskfile.Precondition{},
&taskfile.Precondition{Sh: "[ 1 = 0 ]", Msg: "[ 1 = 0 ] failed"},
},
{
`
sh: "[ 1 = 2 ]"
msg: "1 is not 2"
`,
&taskfile.Precondition{},
&taskfile.Precondition{Sh: "[ 1 = 2 ]", Msg: "1 is not 2"},
},
{
`
sh: "[ 1 = 2 ]"
msg: "1 is not 2"
`,
&taskfile.Precondition{},
&taskfile.Precondition{Sh: "[ 1 = 2 ]", Msg: "1 is not 2"},
},
}
for _, test := range tests {
err := yaml.Unmarshal([]byte(test.content), test.v)
require.NoError(t, err)
assert.Equal(t, test.expected, test.v)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.