text
stringlengths 11
4.05M
|
|---|
package main
import (
"context"
"log"
"github.com/fergusn/capi-controller/pkg/clusters"
"k8s.io/apimachinery/pkg/runtime"
controller "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
corev1 "k8s.io/client-go/kubernetes/scheme"
capiv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
)
func main() {
scheme := runtime.NewScheme()
capiv1.AddToScheme(scheme)
corev1.AddToScheme(scheme)
controller.SetLogger(zap.New(zap.UseDevMode(true)))
mgr, err := manager.New(controller.GetConfigOrDie(), manager.Options{
LeaderElection: false,
Scheme: scheme,
})
if err != nil {
log.Fatal(err)
}
mc, err := clusters.NewManagementController(mgr)
if err != nil {
log.Fatal(err)
}
err = buildController(mgr, mc)
if err != nil {
log.Fatal(err)
}
log.Fatal(mgr.Start(context.Background()))
}
|
package queries
import (
"database/sql"
"encoding/json"
"net/url"
"time"
"github.com/pwang347/cs304/server/common"
)
func QueryEventLogsForVirtualMachine(db *sql.DB, params url.Values) (data []byte, err error) {
var (
response = SQLResponse{}
tx *sql.Tx
vmIp string
)
if tx, err = db.Begin(); err != nil {
return nil, err
}
if vmIp, err = common.GetRequiredParam(params, "vmIp"); err != nil {
return
}
if response.Data, response.AffectedRows, err = common.QueryJSON(tx,
"SELECT * FROM EventLog WHERE VirtualMachineIpAddress = ?;", vmIp); err != nil {
tx.Rollback()
return
}
if err = tx.Commit(); err != nil {
return
}
data, err = json.Marshal(response)
return
}
func CreateEventLog(db *sql.DB, params url.Values) (data []byte, err error) {
var (
result sql.Result
response = SQLResponse{}
tx *sql.Tx
logData string
eventType string
vmIp string
)
if tx, err = db.Begin(); err != nil {
return nil, err
}
if logData, err = common.GetRequiredParam(params, "logData"); err != nil {
return
}
if eventType, err = common.GetRequiredParam(params, "eventType"); err != nil {
return
}
if eventType, err = common.GetRequiredParam(params, "vmIp"); err != nil {
return
}
t := time.Now()
ts := t.Format("2006-01-02 15:04:05")
if result, err = tx.Exec("INSERT INTO EventLog (logNumber, timestamp, data, eventType, VirtualMachineIpAddress) "+
"VALUES((SELECT COALESCE(MAX(logNumber)+1,1) FROM EventLog WHERE VirtualMachineIpAddress = "+vmIp+
"),?,?,?,?);",
ts, logData, eventType, vmIp); err != nil {
tx.Rollback()
return
}
if err = tx.Commit(); err != nil {
return
}
if response.AffectedRows, err = result.RowsAffected(); err != nil {
return
}
data, err = json.Marshal(response)
return
}
|
package main
import (
"log"
"strconv"
)
type Alpha struct {
Number int
Length int
}
var alphaMap = map[string]Alpha{
"ze": {0, 4},
"on": {1, 3},
"tw": {2, 3},
"th": {3, 5},
"fo": {4, 4},
"fi": {5, 4},
"si": {6, 3},
"se": {7, 5},
"ei": {8, 5},
"ni": {9, 4},
}
func solution(s string) int {
res := 0
length := len(s)
idx := 0
for idx < length {
if s[idx] >= 48 && s[idx] <= 57 {
val, _ := strconv.Atoi(string(s[idx]))
res = (res * 10) + val
idx++
continue
}
alpha := alphaMap[s[idx:idx+2]]
res = (res * 10) + alpha.Number
idx += alpha.Length
}
return res
}
func main() {
log.Println(solution("one4seveneight"))
}
|
package nifi
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sync"
"time"
)
type Client struct {
Config Config
Client *http.Client
// The mutex is used by the plugin to prevent parallel execution of some update/delete operations.
// There are scenarios when updating a connection involves modifying related processors and vice versa.
// This breaks Terraform model to some extent but at the same time is unavoidable in NiFi world.
// Currently only flows that involve cross-resource interactions are wrapped into lock/unlock sections.
// Most of operations can still be performed in parallel.
Lock sync.Mutex
}
func NewClient(config Config) *Client {
return &Client{
Config: config,
Client: &http.Client{},
}
}
// Common section
type Revision struct {
Version int `json:"version"`
}
type Position struct {
X float64 `json:"x"`
Y float64 `json:"y"`
}
func (c *Client) JsonCall(method string, url string, bodyIn interface{}, bodyOut interface{}) (int, error) {
var requestBody io.Reader = nil
if bodyIn != nil {
var buffer = new(bytes.Buffer)
json.NewEncoder(buffer).Encode(bodyIn)
requestBody = buffer
}
request, err := http.NewRequest(method, url, requestBody)
if err != nil {
return 0, err
}
if bodyIn != nil {
request.Header.Add("Content-Type", "application/json; charset=utf-8")
}
response, err := c.Client.Do(request)
if err != nil {
return 0, err
}
if response.StatusCode >= 300 {
return response.StatusCode, fmt.Errorf("The call has failed with the code of %d", response.StatusCode)
}
defer response.Body.Close()
if bodyOut != nil {
err = json.NewDecoder(response.Body).Decode(bodyOut)
if err != nil {
return response.StatusCode, err
}
}
return response.StatusCode, nil
}
// Process Group section
type ProcessGroupComponent struct {
Id string `json:"id,omitempty"`
ParentGroupId string `json:"parentGroupId"`
Name string `json:"name"`
Position Position `json:"position"`
}
type ProcessGroup struct {
Revision Revision `json:"revision"`
Component ProcessGroupComponent `json:"component"`
}
func (c *Client) CreateProcessGroup(processGroup *ProcessGroup) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s/process-groups",
c.Config.Host, c.Config.ApiPath, processGroup.Component.ParentGroupId)
_, err := c.JsonCall("POST", url, processGroup, processGroup)
return err
}
func (c *Client) GetProcessGroup(processGroupId string) (*ProcessGroup, error) {
url := fmt.Sprintf("http://%s/%s/process-groups/%s",
c.Config.Host, c.Config.ApiPath, processGroupId)
processGroup := ProcessGroup{}
code, err := c.JsonCall("GET", url, nil, &processGroup)
if 404 == code {
return nil, fmt.Errorf("not_found")
}
if nil != err {
return nil, err
}
return &processGroup, nil
}
func (c *Client) UpdateProcessGroup(processGroup *ProcessGroup) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s",
c.Config.Host, c.Config.ApiPath, processGroup.Component.Id)
_, err := c.JsonCall("PUT", url, processGroup, processGroup)
return err
}
func (c *Client) DeleteProcessGroup(processGroup *ProcessGroup) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s?version=%d",
c.Config.Host, c.Config.ApiPath, processGroup.Component.Id, processGroup.Revision.Version)
_, err := c.JsonCall("DELETE", url, nil, nil)
return err
}
func (c *Client) GetProcessGroupConnections(processGroupId string) (*Connections, error) {
url := fmt.Sprintf("http://%s/%s/process-groups/%s/connections",
c.Config.Host, c.Config.ApiPath, processGroupId)
connections := Connections{}
_, err := c.JsonCall("GET", url, nil, &connections)
if nil != err {
return nil, err
}
return &connections, nil
}
// Processor section
type ProcessorRelationship struct {
Name string `json:"name"`
AutoTerminate bool `json:"autoTerminate"`
}
type ProcessorConfig struct {
SchedulingStrategy string `json:"schedulingStrategy"`
SchedulingPeriod string `json:"schedulingPeriod"`
ExecutionNode string `json:"executionNode"`
ConcurrentlySchedulableTaskCount int `json:"concurrentlySchedulableTaskCount"`
Properties map[string]interface{} `json:"properties"`
AutoTerminatedRelationships []string `json:"autoTerminatedRelationships"`
}
type ProcessorComponent struct {
Id string `json:"id,omitempty"`
ParentGroupId string `json:"parentGroupId,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
Position *Position `json:"position,omitempty"`
State string `json:"state,omitempty"`
Config *ProcessorConfig `json:"config,omitempty"`
Relationships []ProcessorRelationship `json:"relationships,omitempty"`
}
type Processor struct {
Revision Revision `json:"revision"`
Component ProcessorComponent `json:"component"`
}
func ProcessorStub() *Processor {
return &Processor{
Component: ProcessorComponent{
Position: &Position{},
Config: &ProcessorConfig{},
},
}
}
func (c *Client) CleanupNilProperties(properties map[string]interface{}) error {
for k, v := range properties {
if v == nil {
delete(properties, k)
}
}
return nil
}
func (c *Client) CreateProcessor(processor *Processor) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s/processors",
c.Config.Host, c.Config.ApiPath, processor.Component.ParentGroupId)
_, err := c.JsonCall("POST", url, processor, processor)
if nil != err {
return err
}
c.CleanupNilProperties(processor.Component.Config.Properties)
return nil
}
func (c *Client) GetProcessor(processorId string) (*Processor, error) {
url := fmt.Sprintf("http://%s/%s/processors/%s",
c.Config.Host, c.Config.ApiPath, processorId)
processor := ProcessorStub()
code, err := c.JsonCall("GET", url, nil, &processor)
if 404 == code {
return nil, fmt.Errorf("not_found")
}
if nil != err {
return nil, err
}
c.CleanupNilProperties(processor.Component.Config.Properties)
relationships := []string{}
for _, v := range processor.Component.Relationships {
if v.AutoTerminate {
relationships = append(relationships, v.Name)
}
}
processor.Component.Config.AutoTerminatedRelationships = relationships
return processor, nil
}
func (c *Client) UpdateProcessor(processor *Processor) error {
url := fmt.Sprintf("http://%s/%s/processors/%s",
c.Config.Host, c.Config.ApiPath, processor.Component.Id)
_, err := c.JsonCall("PUT", url, processor, processor)
if nil != err {
return err
}
c.CleanupNilProperties(processor.Component.Config.Properties)
return nil
}
func (c *Client) DeleteProcessor(processor *Processor) error {
url := fmt.Sprintf("http://%s/%s/processors/%s?version=%d",
c.Config.Host, c.Config.ApiPath, processor.Component.Id, processor.Revision.Version)
_, err := c.JsonCall("DELETE", url, nil, nil)
return err
}
func (c *Client) SetProcessorState(processor *Processor, state string) error {
stateUpdate := Processor{
Revision: Revision{
Version: processor.Revision.Version,
},
Component: ProcessorComponent{
Id: processor.Component.Id,
State: state,
},
}
url := fmt.Sprintf("http://%s/%s/processors/%s",
c.Config.Host, c.Config.ApiPath, processor.Component.Id)
_, err := c.JsonCall("PUT", url, stateUpdate, processor)
return err
}
func (c *Client) StartProcessor(processor *Processor) error {
return c.SetProcessorState(processor, "RUNNING")
}
func (c *Client) StopProcessor(processor *Processor) error {
return c.SetProcessorState(processor, "STOPPED")
}
// Connection section
type ConnectionHand struct {
Type string `json:"type"`
Id string `json:"id"`
GroupId string `json:"groupId"`
}
type ConnectionComponent struct {
Id string `json:"id,omitempty"`
ParentGroupId string `json:"parentGroupId"`
BackPressureDataSizeThreshold string `json:"backPressureDataSizeThreshold"`
BackPressureObjectThreshold int `json:"backPressureObjectThreshold"`
Source ConnectionHand `json:"source"`
Destination ConnectionHand `json:"destination"`
SelectedRelationships []string `json:"selectedRelationships"`
Bends []Position `json:"bends"`
}
type Connection struct {
Revision Revision `json:"revision"`
Component ConnectionComponent `json:"component"`
}
type Connections struct {
Connections []Connection `json:"connections"`
}
type ConnectionDropRequest struct {
DropRequest struct {
Id string `json:"id"`
Finished bool `json:"finished"`
} `json:"dropRequest"`
}
func (c *Client) CreateConnection(connection *Connection) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s/connections",
c.Config.Host, c.Config.ApiPath, connection.Component.ParentGroupId)
_, err := c.JsonCall("POST", url, connection, connection)
return err
}
func (c *Client) GetConnection(connectionId string) (*Connection, error) {
url := fmt.Sprintf("http://%s/%s/connections/%s",
c.Config.Host, c.Config.ApiPath, connectionId)
connection := Connection{}
code, err := c.JsonCall("GET", url, nil, &connection)
if 404 == code {
return nil, fmt.Errorf("not_found")
}
if nil != err {
return nil, err
}
return &connection, nil
}
func (c *Client) UpdateConnection(connection *Connection) error {
url := fmt.Sprintf("http://%s/%s/connections/%s",
c.Config.Host, c.Config.ApiPath, connection.Component.Id)
_, err := c.JsonCall("PUT", url, connection, connection)
return err
}
func (c *Client) DeleteConnection(connection *Connection) error {
url := fmt.Sprintf("http://%s/%s/connections/%s?version=%d",
c.Config.Host, c.Config.ApiPath, connection.Component.Id, connection.Revision.Version)
_, err := c.JsonCall("DELETE", url, nil, nil)
return err
}
func (c *Client) DropConnectionData(connection *Connection) error {
// Create a request to drop the contents of the queue in this connection
url := fmt.Sprintf("http://%s/%s/flowfile-queues/%s/drop-requests",
c.Config.Host, c.Config.ApiPath, connection.Component.Id)
dropRequest := ConnectionDropRequest{}
_, err := c.JsonCall("POST", url, nil, &dropRequest)
if nil != err {
return err
}
// Give it some time to complete
maxAttempts := 10
for iteration := 0; iteration < maxAttempts; iteration++ {
// Check status of the request
url = fmt.Sprintf("http://%s/%s/flowfile-queues/%s/drop-requests/%s",
c.Config.Host, c.Config.ApiPath, connection.Component.Id, dropRequest.DropRequest.Id)
_, err = c.JsonCall("GET", url, nil, &dropRequest)
if nil != err {
continue
}
if dropRequest.DropRequest.Finished {
break
}
// Log progress
log.Printf("[INFO] Purging Connection data %s %d...", dropRequest.DropRequest.Id, iteration+1)
// Wait a bit
time.Sleep(3 * time.Second)
if maxAttempts-1 == iteration {
log.Printf("[INFO] Failed to purge the Connection %s", dropRequest.DropRequest.Id)
}
}
// Remove a request to drop the contents of this connection
url = fmt.Sprintf("http://%s/%s/flowfile-queues/%s/drop-requests/%s",
c.Config.Host, c.Config.ApiPath, connection.Component.Id, dropRequest.DropRequest.Id)
_, err = c.JsonCall("DELETE", url, nil, nil)
if nil != err {
return err
}
return nil
}
// Controller Service section
type ControllerServiceComponent struct {
Id string `json:"id,omitempty"`
ParentGroupId string `json:"parentGroupId,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
State string `json:"state,omitempty"`
Properties map[string]interface{} `json:"properties"`
}
type ControllerService struct {
Revision Revision `json:"revision"`
Component ControllerServiceComponent `json:"component"`
}
func (c *Client) CreateControllerService(controllerService *ControllerService) error {
url := fmt.Sprintf("http://%s/%s/process-groups/%s/controller-services",
c.Config.Host, c.Config.ApiPath, controllerService.Component.ParentGroupId)
_, err := c.JsonCall("POST", url, controllerService, controllerService)
if nil != err {
return err
}
c.CleanupNilProperties(controllerService.Component.Properties)
return nil
}
func (c *Client) GetControllerService(controllerServiceId string) (*ControllerService, error) {
url := fmt.Sprintf("http://%s/%s/controller-services/%s",
c.Config.Host, c.Config.ApiPath, controllerServiceId)
controllerService := ControllerService{}
code, err := c.JsonCall("GET", url, nil, &controllerService)
if 404 == code {
return nil, fmt.Errorf("not_found")
}
if nil != err {
return nil, err
}
c.CleanupNilProperties(controllerService.Component.Properties)
return &controllerService, nil
}
func (c *Client) UpdateControllerService(controllerService *ControllerService) error {
url := fmt.Sprintf("http://%s/%s/controller-services/%s",
c.Config.Host, c.Config.ApiPath, controllerService.Component.Id)
_, err := c.JsonCall("PUT", url, controllerService, controllerService)
if nil != err {
return err
}
c.CleanupNilProperties(controllerService.Component.Properties)
return nil
}
func (c *Client) DeleteControllerService(controllerService *ControllerService) error {
url := fmt.Sprintf("http://%s/%s/controller-services/%s?version=%d",
c.Config.Host, c.Config.ApiPath, controllerService.Component.Id, controllerService.Revision.Version)
_, err := c.JsonCall("DELETE", url, nil, nil)
return err
}
func (c *Client) SetControllerServiceState(controllerService *ControllerService, state string) error {
stateUpdate := ControllerService{
Revision: Revision{
Version: controllerService.Revision.Version,
},
Component: ControllerServiceComponent{
Id: controllerService.Component.Id,
State: state,
},
}
url := fmt.Sprintf("http://%s/%s/controller-services/%s",
c.Config.Host, c.Config.ApiPath, controllerService.Component.Id)
_, err := c.JsonCall("PUT", url, stateUpdate, controllerService)
return err
}
func (c *Client) EnableControllerService(controllerService *ControllerService) error {
return c.SetControllerServiceState(controllerService, "ENABLED")
}
func (c *Client) DisableControllerService(controllerService *ControllerService) error {
return c.SetControllerServiceState(controllerService, "DISABLED")
}
|
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/davecgh/go-spew/spew"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/client/go/teams"
)
type ChainFile struct {
Chain []teams.SCChainLink `json:"chain"`
}
func main() {
err := main2()
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(1)
}
}
func main2() (err error) {
if len(os.Args) != 2 {
return fmt.Errorf("Usage: tool <jsonfile>")
}
filepath := os.Args[1]
cf, err := readChainFile(filepath)
if err != nil {
return err
}
g := libkb.NewGlobalContext().Init()
g.Log = logger.New("sc")
g.ConfigureCaches()
mctx := libkb.NewMetaContextBackground(g)
var reader keybase1.UserVersion
var state *teams.TeamSigChainState
for _, prelink := range cf.Chain {
link, err := teams.UnpackChainLink(&prelink)
if err != nil {
return err
}
signerX := teams.NewSignerX(keybase1.NewUserVersion(prelink.UID, prelink.EldestSeqno), false)
newState, err := teams.AppendChainLink(mctx.Ctx(), g, reader, state, link, &signerX)
if err != nil {
return err
}
state = &newState
}
fmt.Printf("%v\n", spew.Sdump(state))
return nil
}
func readChainFile(path string) (res ChainFile, err error) {
f, err := os.Open(path)
if err != nil {
return res, err
}
err = json.NewDecoder(f).Decode(&res)
return res, err
}
|
package master
import (
"github.com/OHopiak/fractal-load-balancer/core"
"github.com/jinzhu/gorm"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
type (
UserMiddlewareConfig struct {
// Skipper defines a function to skip middleware.
Skipper middleware.Skipper
DB *gorm.DB
}
)
var (
// DefaultUserMiddlewareConfig is the default user middleware config.
DefaultUserMiddlewareConfig = UserMiddlewareConfig{
Skipper: middleware.DefaultSkipper,
}
)
func UserMiddleware(db *gorm.DB) echo.MiddlewareFunc {
c := DefaultUserMiddlewareConfig
c.DB = db
return UserMiddlewareWithConfig(c)
}
func UserMiddlewareWithConfig(config UserMiddlewareConfig) echo.MiddlewareFunc {
// Defaults
if config.Skipper == nil {
config.Skipper = DefaultUserMiddlewareConfig.Skipper
}
if config.DB == nil {
panic("db in user middleware must be set")
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if config.Skipper(c) {
return next(c)
}
sess, err := GetSession(c)
if err != nil {
return err
}
userIdRaw, ok := sess.Values["user_id"]
if !ok {
return next(c)
}
userId, ok := userIdRaw.(uint)
if !ok {
c.Logger().Error("the user ID is not uint")
return next(c)
}
if userId == 0 {
c.Logger().Error("the user ID is set to 0")
return next(c)
}
user := core.User{}
config.DB.First(&user, userId)
if user.ID == 0 {
return next(c)
}
c.Set("user", user)
return next(c)
}
}
}
|
package crypto
import (
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"github.com/lestrrat/go-jwx/jwk"
)
func PublicKeysJWK(keys map[string]*rsa.PublicKey) ([]byte, error) {
set := &jwk.Set{}
for kid, key := range keys {
rk, err := jwk.New(key)
if err != nil {
return nil, err
}
rk.Set(jwk.KeyIDKey, kid)
set.Keys = append(set.Keys, rk)
}
body, err := json.MarshalIndent(set, "", " ")
if err != nil {
return nil, err
}
return body, nil
}
func LoadPublicKeyFromJWK(jwkString, kid string) (*rsa.PublicKey, error) {
set, err := jwk.ParseString(jwkString)
if err != nil {
return nil, err
}
keys := set.LookupKeyID(kid)
if len(keys) == 0 {
return nil, fmt.Errorf("indicated key id not found")
} else {
pk, ok := keys[0].(*jwk.RSAPublicKey)
if !ok {
return nil, errors.New("indicated key is not a public key")
}
rsaKey, err := pk.Materialize()
return rsaKey.(*rsa.PublicKey), err
}
}
func LoadPublicKeyFromFile(pemPath string) (*rsa.PublicKey, error) {
pem, err := ioutil.ReadFile(pemPath)
if err != nil {
return nil, fmt.Errorf("failed to read PEM file: %s", err)
}
return LoadPublicKeyFromData(pem)
}
func LoadPublicKeyFromText(data string) (*rsa.PublicKey, error) {
return LoadPublicKeyFromData([]byte(data))
}
func LoadPublicKeyFromData(data []byte) (*rsa.PublicKey, error) {
block, _ := pem.Decode(data)
if block == nil {
return nil, fmt.Errorf("failed to decode pem block")
}
if block.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("PEM block isn't a RSA PRIVATE KEY")
}
pubkeyInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("public key can't be decoded: %s", err)
}
pubkey, ok := pubkeyInterface.(*rsa.PublicKey)
if !ok {
return nil, errors.New("couldn't convert to a RSA public key")
}
return pubkey, nil
}
func LoadPrivateKeyFromFile(pemPath string) (*rsa.PrivateKey, error) {
pem, err := ioutil.ReadFile(pemPath)
if err != nil {
return nil, fmt.Errorf("failed to read PEM file: %s", err)
}
return LoadPrivateKeyFromData(pem)
}
func LoadPrivateKeyFromText(data string) (*rsa.PrivateKey, error) {
return LoadPrivateKeyFromData([]byte(data))
}
func LoadPrivateKeyFromData(data []byte) (*rsa.PrivateKey, error) {
block, _ := pem.Decode(data)
if block == nil {
return nil, fmt.Errorf("failed to decode pem block")
}
if block.Type != "RSA PRIVATE KEY" {
return nil, fmt.Errorf("PEM block isn't a RSA PRIVATE KEY")
}
privkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, fmt.Errorf("private key can't be decoded: %s", err)
}
return privkey, nil
}
|
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"time"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/integr8ly/delorean/pkg/services"
"github.com/integr8ly/delorean/pkg/utils"
olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/xanzy/go-gitlab"
)
const (
gitlabTokenKey = "gitlab_token"
// Base URL for gitlab API and for the managed-tenenats fork and origin repos
gitlabURL = "https://gitlab.cee.redhat.com"
gitlabAPIEndpoint = "api/v4"
// Base URL for the integreatly-opeartor repo
githubURL = "https://github.com"
// The branch to target with the merge request
managedTenantsMainBranch = "main"
// Info for the commit and merge request
branchNameTemplate = "%s-%s-v%s"
commitMessageTemplate = "update %s %s to %s"
commitAuthorName = "Delorean"
commitAuthorEmail = "cloud-services-delorean@redhat.com"
mergeRequestTitleTemplate = "Update %s %s to %s" // channel, version
envVarNameUseClusterStorage = "USE_CLUSTER_STORAGE"
envVarNameAlertEmailAddress = "ALERTING_EMAIL_ADDRESS"
envVarNameAlertEmailAddressValue = "{{ alertingEmailAddress }}"
)
type releaseChannel struct {
Name string `json:"name"`
Directory string `json:"directory"`
Environment string `json:"environment"`
AllowPreRelease bool `json:"allow_pre_release"`
}
type addonCSVConfig struct {
Repo string `json:"repo"`
Path string `json:"path"`
}
type deploymentContainerEnvVar struct {
Name string `json:"name"`
Value string `json:"value"`
}
type deploymentContainer struct {
Name string `json:"name"`
EnvVars []deploymentContainerEnvVar `json:"env_vars"`
}
type deployment struct {
Name string `json:"name"`
Container deploymentContainer `json:"container"`
}
type override struct {
Deployment deployment `json:"deployment"`
}
type addonConfig struct {
Name string `json:"name"`
CSV addonCSVConfig `json:"csv"`
Channels []releaseChannel `json:"channels"`
Override *override `json:"override,omitempty"`
}
type addons struct {
Addons []addonConfig `json:"addons"`
}
// directory returns the relative path of the managed-teneants repo to the
// addon for the given channel
func (c *releaseChannel) bundlesDirectory() string {
return fmt.Sprintf("addons/%s/bundles", c.Directory)
}
func (c *releaseChannel) addonFile() string {
return fmt.Sprintf("addons/%s/metadata/%s/addon.yaml", c.Directory, c.Environment)
}
type osdAddonReleaseFlags struct {
version string
channel string
mergeRequestDescription string
managedTenantsOrigin string
managedTenantsFork string
addonName string
addonsConfig string
}
type osdAddonReleaseCmd struct {
flags *osdAddonReleaseFlags
gitlabToken string
version *utils.RHMIVersion
gitlabMergeRequests services.GitLabMergeRequestsService
gitlabProjects services.GitLabProjectsService
managedTenantsDir string
managedTenantsRepo *git.Repository
gitPushService services.GitPushService
addonConfig *addonConfig
currentChannel *releaseChannel
addonDir string
}
type addon struct {
content string
}
func (a *addon) setCurrentCSV(currentCSV string) {
r := regexp.MustCompile(`currentCSV: .*`)
s := r.ReplaceAllString(a.content, fmt.Sprintf("currentCSV: %s", currentCSV))
a.content = s
}
func newAddon(addonPath string) (*addon, error) {
c, err := ioutil.ReadFile(addonPath)
if err != nil {
return nil, err
}
return &addon{content: string(c)}, nil
}
func init() {
f := &osdAddonReleaseFlags{}
cmd := &cobra.Command{
Use: "osd-addon",
Short: "Create a MR to the managed-tenants repo for the giving addon to update its version",
Run: func(cmd *cobra.Command, args []string) {
gitlabToken, err := requireValue(gitlabTokenKey)
if err != nil {
handleError(err)
}
// Prepare
c, err := newOSDAddonReleaseCmd(f, gitlabToken)
if err != nil {
handleError(err)
}
// Run
err = c.run()
if err != nil {
handleError(err)
}
},
}
releaseCmd.AddCommand(cmd)
cmd.Flags().StringVar(&f.addonName, "name", "", "Name of the addon to update")
cmd.MarkFlagRequired("name")
cmd.Flags().StringVar(
&f.version, "version", "",
"The version to push to the managed-tenants repo (ex \"2.0.0\", \"2.0.0-er4\")")
cmd.MarkFlagRequired("version")
cmd.Flags().StringVar(&f.addonsConfig, "addons-config", "", "Configuration files for the addons")
cmd.MarkFlagRequired("addons-config")
cmd.Flags().StringVar(
&f.channel, "channel", "stage",
fmt.Sprintf("The OSD channel to which push the release. The channel values are defined in the addons-config file"),
)
cmd.Flags().String(
"gitlab-token",
"",
"GitLab token to Push the changes and open the MR")
viper.BindPFlag(gitlabTokenKey, cmd.Flags().Lookup("gitlab-token"))
cmd.Flags().StringVar(
&f.mergeRequestDescription,
"merge-request-description",
"",
"Optional merge request description that can be used to notify secific users (ex \"ping: @dbizzarr\")",
)
cmd.Flags().StringVar(
&f.managedTenantsOrigin,
"managed-tenants-origin",
"service/managed-tenants",
"managed-tenants origin repository from where to fork the main branch")
cmd.Flags().StringVar(
&f.managedTenantsFork,
"managed-tenants-fork",
"integreatly-qe/managed-tenants",
"managed-tenants fork repository where to push the release files")
}
func findAddon(config *addons, addonName string) *addonConfig {
var currentAddon *addonConfig
for _, a := range config.Addons {
v := a
if a.Name == addonName {
currentAddon = &v
break
}
}
return currentAddon
}
func findChannel(addon *addonConfig, channelName string) *releaseChannel {
var currentChannel *releaseChannel
for _, c := range addon.Channels {
v := c
if c.Name == channelName {
currentChannel = &v
break
}
}
return currentChannel
}
func newOSDAddonReleaseCmd(flags *osdAddonReleaseFlags, gitlabToken string) (*osdAddonReleaseCmd, error) {
version, err := utils.NewVersion(flags.version, olmType)
if err != nil {
return nil, err
}
addonsConfig := &addons{}
if err := utils.PopulateObjectFromYAML(flags.addonsConfig, addonsConfig); err != nil {
return nil, err
}
currentAddon := findAddon(addonsConfig, flags.addonName)
if currentAddon == nil {
return nil, fmt.Errorf("can not find configuration for addon %s in config file %s", flags.addonName, flags.addonsConfig)
}
currentChannel := findChannel(currentAddon, flags.channel)
if currentChannel == nil {
return nil, fmt.Errorf("can not find channel %s for addon %s in config file %s", flags.channel, flags.addonName, flags.addonsConfig)
}
fmt.Printf("create osd addon release for %s %s to the %s channel\n", flags.addonName, version.TagName(), flags.channel)
// Prepare the GitLab Client
gitlabClient, err := gitlab.NewClient(
gitlabToken,
gitlab.WithBaseURL(fmt.Sprintf("%s/%s", gitlabURL, gitlabAPIEndpoint)),
)
if err != nil {
return nil, err
}
fmt.Print("gitlab client initialized and authenticated\n")
gitCloneService := &services.DefaultGitCloneService{}
// Clone the managed tenants
// TODO: Move the clone functions inside the run() method to improve the test covered code
managedTenantsDir, managedTenantsRepo, err := gitCloneService.CloneToTmpDir(
"managed-tenants-",
fmt.Sprintf("%s/%s", gitlabURL, flags.managedTenantsOrigin),
plumbing.NewBranchReferenceName(managedTenantsMainBranch),
)
if err != nil {
return nil, err
}
fmt.Printf("managed-tenants repo cloned to %s\n", managedTenantsDir)
// Add the fork remote to the managed-tenats repo
_, err = managedTenantsRepo.CreateRemote(&config.RemoteConfig{
Name: "fork",
URLs: []string{fmt.Sprintf("%s/%s", gitlabURL, flags.managedTenantsFork)},
})
if err != nil {
return nil, err
}
fmt.Print("added the fork remote to the managed-tenants repo\n")
// Clone the repo to get the csv for the addon
csvDir, _, err := gitCloneService.CloneToTmpDir(
"addon-csv-",
currentAddon.CSV.Repo,
plumbing.NewTagReferenceName(version.TagName()),
)
if err != nil {
return nil, err
}
fmt.Printf("addon cloned to %s\n", csvDir)
return &osdAddonReleaseCmd{
flags: flags,
gitlabToken: gitlabToken,
version: version,
gitlabMergeRequests: gitlabClient.MergeRequests,
gitlabProjects: gitlabClient.Projects,
managedTenantsDir: managedTenantsDir,
managedTenantsRepo: managedTenantsRepo,
gitPushService: &services.DefaultGitPushService{},
currentChannel: currentChannel,
addonConfig: currentAddon,
addonDir: csvDir,
}, nil
}
func (c *osdAddonReleaseCmd) run() error {
if c.currentChannel == nil {
return fmt.Errorf("currentChannel is not valid: %v", c.currentChannel)
}
if c.version.IsPreRelease() && !c.currentChannel.AllowPreRelease {
return fmt.Errorf("the prerelease version %s can't be pushed to the %s channel", c.version, c.currentChannel.Name)
}
managedTenantsHead, err := c.managedTenantsRepo.Head()
if err != nil {
return err
}
// Verify that the repo is on master
if managedTenantsHead.Name() != plumbing.NewBranchReferenceName(managedTenantsMainBranch) {
return fmt.Errorf("the managed-tenants repo is pointing to %s instead of main", managedTenantsHead.Name())
}
managedTenantsTree, err := c.managedTenantsRepo.Worktree()
if err != nil {
return err
}
// Create a new branch on the managed-tenants repo
managedTenantsBranch := fmt.Sprintf(branchNameTemplate, c.addonConfig.Name, c.currentChannel.Name, c.version)
branchRef := plumbing.NewBranchReferenceName(managedTenantsBranch)
fmt.Printf("create the branch %s in the managed-tenants repo\n", managedTenantsBranch)
err = managedTenantsTree.Checkout(&git.CheckoutOptions{
Branch: branchRef,
Create: true,
})
if err != nil {
return err
}
// Copy the OLM manifests from the integreatly-operator repo to the the managed-tenats repo
manifestsDirectory, err := c.copyTheOLMManifests()
if err != nil {
return err
}
// Add all changes
err = managedTenantsTree.AddGlob(fmt.Sprintf("%s/*", manifestsDirectory))
if err != nil {
return err
}
// Update the addon.yaml file
addonFile, err := c.updateTheAddonFile()
if err != nil {
return err
}
// Add the addon.yaml file
_, err = managedTenantsTree.Add(addonFile)
if err != nil {
return err
}
//Update the integreatly-operator.vx.x.x.clusterserviceversion.yaml
_, err = c.updateTheCSVManifest()
if err != nil {
return err
}
csvTemplate, err := c.renameCSVFile()
if err != nil {
return err
}
_, err = managedTenantsTree.Add(csvTemplate)
if err != nil {
return err
}
// Commit
fmt.Print("commit all changes in the managed-tenants repo\n")
_, err = managedTenantsTree.Commit(
fmt.Sprintf(commitMessageTemplate, c.addonConfig.Name, c.currentChannel.Name, c.version),
&git.CommitOptions{
All: true,
Author: &object.Signature{
Name: commitAuthorName,
Email: commitAuthorEmail,
When: time.Now(),
},
},
)
if err != nil {
return err
}
// Verify tha the tree is clean
status, err := managedTenantsTree.Status()
if err != nil {
return err
}
if len(status) != 0 {
return fmt.Errorf("the tree is not clean, uncommited changes:\n%+v", status)
}
// Push to fork
fmt.Printf("push the managed-tenants repo to the fork remote\n")
err = c.gitPushService.Push(c.managedTenantsRepo, &git.PushOptions{
RemoteName: "fork",
Auth: &http.BasicAuth{Password: c.gitlabToken},
RefSpecs: []config.RefSpec{
config.RefSpec(branchRef + ":" + branchRef),
},
})
if err != nil {
return err
}
// Create the merge request
targetProject, _, err := c.gitlabProjects.GetProject(c.flags.managedTenantsOrigin, &gitlab.GetProjectOptions{})
if err != nil {
return err
}
fmt.Print("create the MR to the managed-tenants origin\n")
mr, _, err := c.gitlabMergeRequests.CreateMergeRequest(c.flags.managedTenantsFork, &gitlab.CreateMergeRequestOptions{
Title: gitlab.String(fmt.Sprintf(mergeRequestTitleTemplate, c.addonConfig.Name, c.currentChannel.Name, c.version)),
Description: gitlab.String(c.flags.mergeRequestDescription),
SourceBranch: gitlab.String(managedTenantsBranch),
TargetBranch: gitlab.String(managedTenantsMainBranch),
TargetProjectID: gitlab.Int(targetProject.ID),
RemoveSourceBranch: gitlab.Bool(true),
})
if err != nil {
return err
}
fmt.Printf("merge request for version %s and channel %s created successfully\n", c.version, c.currentChannel.Name)
fmt.Printf("MR: %s\n", mr.WebURL)
// Reset the managed repostiroy to master
err = managedTenantsTree.Checkout(&git.CheckoutOptions{Branch: plumbing.NewBranchReferenceName(managedTenantsMainBranch)})
if err != nil {
return err
}
return nil
}
func (c *osdAddonReleaseCmd) copyTheOLMManifests() (string, error) {
source := path.Join(c.addonDir, fmt.Sprintf("%s/%s", c.addonConfig.CSV.Path, c.version.Base()))
relativeDestination := fmt.Sprintf("%s/%s", c.currentChannel.bundlesDirectory(), c.version.Base())
destination := path.Join(c.managedTenantsDir, relativeDestination)
fmt.Printf("copy files from %s to %s\n", source, destination)
err := utils.CopyDirectory(source, destination)
if err != nil {
return "", err
}
return relativeDestination, nil
}
func (c *osdAddonReleaseCmd) updateTheAddonFile() (string, error) {
relative := c.currentChannel.addonFile()
addonFilePath := path.Join(c.managedTenantsDir, relative)
fmt.Printf("update the currentCSV value in addon file %s to %s\n", relative, c.version)
addon, err := newAddon(addonFilePath)
if err != nil {
return "", err
}
// Set currentCSV value
addon.setCurrentCSV(fmt.Sprintf("%s.v%s", c.addonConfig.Name, c.version.Base()))
err = ioutil.WriteFile(addonFilePath, []byte(addon.content), os.ModePerm)
if err != nil {
return "", err
}
return relative, nil
}
func (c *osdAddonReleaseCmd) updateTheCSVManifest() (string, error) {
relative := fmt.Sprintf("%s/%s/%s.clusterserviceversion.yaml", c.currentChannel.bundlesDirectory(), c.version.Base(), c.addonConfig.Name)
csvFile := path.Join(c.managedTenantsDir, relative)
fmt.Printf("update csv manifest file %s\n", relative)
csv := &olmapiv1alpha1.ClusterServiceVersion{}
err := utils.PopulateObjectFromYAML(csvFile, csv)
if err != nil {
return "", err
}
if c.addonConfig.Override != nil {
_, deployment := utils.FindDeploymentByName(csv.Spec.InstallStrategy.StrategySpec.DeploymentSpecs, c.addonConfig.Override.Deployment.Name)
if deployment != nil {
i, container := utils.FindContainerByName(deployment.Spec.Template.Spec.Containers, c.addonConfig.Override.Deployment.Container.Name)
if container != nil {
for _, envVar := range c.addonConfig.Override.Deployment.Container.EnvVars {
container.Env = utils.AddOrUpdateEnvVar(container.Env, envVar.Name, envVar.Value)
}
}
deployment.Spec.Template.Spec.Containers[i] = *container
}
}
//Set SingleNamespace install mode to true
mi, m := utils.FindInstallMode(csv.Spec.InstallModes, olmapiv1alpha1.InstallModeTypeSingleNamespace)
if m != nil {
m.Supported = true
}
csv.Spec.InstallModes[mi] = *m
err = utils.WriteK8sObjectToYAML(csv, csvFile)
if err != nil {
return "", err
}
return relative, nil
}
func (c *osdAddonReleaseCmd) renameCSVFile() (string, error) {
o := fmt.Sprintf("%s/%s/%s.clusterserviceversion.yaml", c.currentChannel.bundlesDirectory(), c.version.Base(), c.addonConfig.Name)
n := fmt.Sprintf("%s/%s/%s.v%s.clusterserviceversion.yaml.j2", c.currentChannel.bundlesDirectory(), c.version.Base(), c.addonConfig.Name, c.version.Base())
fmt.Println(fmt.Sprintf("Rename file from %s to %s", o, n))
oldPath := path.Join(c.managedTenantsDir, o)
newPath := path.Join(c.managedTenantsDir, n)
return n, os.Rename(oldPath, newPath)
}
|
package que_test
import (
"bytes"
"context"
"crypto/rand"
"database/sql"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"strings"
"sync"
"testing"
"time"
_ "github.com/lib/pq"
"github.com/tnclong/go-que"
"github.com/tnclong/go-que/pg"
)
func TestQueue_Queue(t *testing.T) {
queue := "tt-queue"
q := newQueue(t, queue)
if q.Queue() != queue {
t.Fatalf("want queue is %s but get %s", queue, q.Queue())
}
err := q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_EnqueueLockUnlock(t *testing.T) {
q := newQueue(t, "")
type user struct {
Name string
}
runAt := time.Now()
amap := map[string]int{"1": 1}
astruct := user{Name: "name"}
id, err := q.Enqueue(
context.Background(), nil, runAt,
1, 2.0, math.MaxInt64, "text", true,
runAt, amap, astruct,
)
if err != nil {
t.Fatalf("Enqueue get err: %v", err)
}
if id <= 0 {
t.Fatalf("want id greater than zero but get %d", id)
}
jobs, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatalf("Lock get err: %v", err)
}
if len(jobs) != 1 {
t.Fatalf("want length of jobs is 1 but get %d", len(jobs))
}
job := jobs[0]
if job.ID() != id {
t.Fatalf("want id is %d but get %d", id, job.ID())
}
if job.Queue() != q.Queue() {
t.Fatalf("want queue is %s but get %s", job.Queue(), q.Queue())
}
if !job.RunAt().Equal(runAt) {
t.Fatalf("want run at %s but get %s", job.RunAt().String(), runAt.String())
}
if job.RetryCount() != 0 {
t.Fatalf("want retry count is 0 but get %d", job.RetryCount())
}
if job.LastErrMsg() != "" {
t.Fatalf("want last err msg is %q but get %q", "", job.LastErrMsg())
}
if job.LastErrStack() != "" {
t.Fatalf("want last err stack is %q but get %q", "", job.LastErrStack())
}
argsBytes := job.Args()
enc := json.NewDecoder(bytes.NewReader(argsBytes))
tok, err := enc.Token()
if err != nil {
t.Fatal(err)
}
delim, ok := tok.(json.Delim)
if !ok {
t.Fatalf("want a json.Delim but get %T(%v)", tok, tok)
}
if delim != '[' {
t.Fatalf("want %c but get %c", '[', delim)
}
var intv int // 0
if err = enc.Decode(&intv); err != nil {
t.Fatal(err)
}
if intv != 1 {
t.Fatalf("want 1 but get %d", intv)
}
var float64v float64 // 1
if err = enc.Decode(&float64v); err != nil {
t.Fatal(err)
}
if float64v != 2.0 {
t.Fatalf("want 2.0 but get %f", float64v)
}
var int64v int64 // 2
if err = enc.Decode(&int64v); err != nil {
t.Fatal(err)
}
if int64v != math.MaxInt64 {
t.Fatalf("want %d but get %d", math.MaxInt64, int64v)
}
var stringv string // 3
if err = enc.Decode(&stringv); err != nil {
t.Fatal(err)
}
if stringv != "text" {
t.Fatalf("want %s but get %s", "test", stringv)
}
var boolv bool // 4
if err = enc.Decode(&boolv); err != nil {
t.Fatal(err)
}
if boolv != true {
t.Fatalf("want true but get %v", boolv)
}
var timev time.Time // 5
if err = enc.Decode(&timev); err != nil {
t.Fatal(err)
}
if !timev.Equal(runAt) {
t.Fatalf("want %s but get %s", runAt.String(), timev.String())
}
var mapv map[string]int // 6
if err = enc.Decode(&mapv); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(mapv, amap) {
t.Fatalf("want %#v but get %#v", amap, mapv)
}
var structv user // 7
if err = enc.Decode(&structv); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(structv, astruct) {
t.Fatalf("want %#v but get %#v", astruct, structv)
}
tok, err = enc.Token()
if err != nil {
t.Fatal(err)
}
delim, ok = tok.(json.Delim)
if !ok {
t.Fatalf("want a json.Delim but get %T(%v)", tok, tok)
}
if delim != ']' {
t.Fatalf("want %c but get %c", ']', delim)
}
tok, err = enc.Token()
if tok != nil || err != io.EOF {
t.Fatalf("want <nil>, EOF but get %v, %v", tok, err)
}
jobs2, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 0 {
t.Fatalf("want length of job2 is zero but get %d", len(jobs2))
}
err = q.Unlock(context.Background(), []int64{id})
if err != nil {
t.Fatal(err)
}
err = q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_ErrNotLockedJobsInLocal(t *testing.T) {
q := newQueue(t, "")
var id int64 = 100000
err := q.Unlock(context.Background(), []int64{id})
if !errors.Is(err, que.ErrNotLockedJobsInLocal) {
t.Fatalf("want err is %v but get %v", que.ErrNotLockedJobsInLocal, err)
}
var errQue *que.ErrQueue
if !errors.As(err, &errQue) {
t.Fatalf("want %T but get %T", errQue, err)
}
if errQue.Queue != q.Queue() {
t.Fatalf("want queue is %s but get %s", q.Queue(), errQue.Queue)
}
var errUnlock *que.ErrUnlock
if !errors.As(errQue.Err, &errUnlock) {
t.Fatalf("want %T but get %T", errUnlock, errQue.Err)
}
if len(errUnlock.IDs) != 1 {
t.Fatalf("want length of id is 1 but get %d", len(errUnlock.IDs))
}
aid := errUnlock.IDs[0]
if aid != id {
t.Fatalf("want id is %d but get %d", id, aid)
}
err = q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_LockSuccessAfterUnlock(t *testing.T) {
q := newQueue(t, "")
id, err := q.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs1, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs1) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs1))
}
err = q.Unlock(context.Background(), []int64{id})
if err != nil {
t.Fatal(err)
}
jobs2, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs2))
}
err = q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_SameQueueDiffInstance(t *testing.T) {
// q1 locked
q1 := newQueue(t, "")
id, err := q1.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs1, err := q1.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs1) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs1))
}
// q1 locked
// q2 get nothinng
q2 := newQueue(t, q1.Queue())
jobs2, err := q2.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 0 {
t.Fatalf("want lock 0 job but get %d", len(jobs2))
}
// q1 unlock
err = q1.Unlock(context.Background(), []int64{id})
if err != nil {
t.Fatal(err)
}
// q1 unlock, q2 get job
jobs3, err := q2.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs3) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs3))
}
// q1 enqueue another job
_, err = q1.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
// q1 enqueue another job, q2 get another job.
jobs4, err := q2.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs4) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs3))
}
err = q1.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
err = q2.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_DiffQueue(t *testing.T) {
// q1 enqueue
q1 := newQueue(t, "")
_, err := q1.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
// q2 get nothinng
q2 := newQueue(t, "")
jobs2, err := q2.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 0 {
t.Fatalf("want lock 0 job but get %d", len(jobs2))
}
err = q1.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
err = q2.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_Close(t *testing.T) {
q1 := newQueue(t, "")
err := q1.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
q1.Queue()
_, err = q1.Enqueue(context.Background(), nil, time.Now())
if !errors.Is(err, que.ErrQueueAlreadyClosed) {
t.Fatalf("want err %v but get %v", que.ErrQueueAlreadyClosed, err)
}
_, err = q1.Lock(context.Background(), 1)
if !errors.Is(err, que.ErrQueueAlreadyClosed) {
t.Fatalf("want err %v but get %v", que.ErrQueueAlreadyClosed, err)
}
err = q1.Close()
if !errors.Is(err, que.ErrQueueAlreadyClosed) {
t.Fatalf("want err %v but get %v", que.ErrQueueAlreadyClosed, err)
}
}
func TestQueue_LockSucessAfterClose(t *testing.T) {
// q1 locked
q1 := newQueue(t, "")
_, err := q1.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs1, err := q1.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs1) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs1))
}
err = q1.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
q2 := newQueue(t, q1.Queue())
jobs2, err := q2.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs2))
}
err = q2.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func testQueue_Resolve(t *testing.T, q que.Queue, resolve func(jb que.Job) error) {
_, err := q.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs1, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs1) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs1))
}
jb1 := jobs1[0]
err = resolve(jb1)
if err != nil {
t.Fatal(err)
}
_, err = q.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs2, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs2))
}
jb2 := jobs2[0]
dbTx(t, false, func(tx *sql.Tx) {
jb2.In(tx)
err2 := resolve(jb2)
if err2 != nil {
t.Fatal(err2)
}
})
_, err = q.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs3, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs3) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs3))
}
jb3 := jobs3[0]
dbTx(t, true, func(tx *sql.Tx) {
jb3.In(tx)
err3 := resolve(jb3)
if err3 != nil {
t.Fatal(err3)
}
})
err = q.Unlock(context.Background(), []int64{jb1.ID(), jb2.ID(), jb3.ID()})
if err != nil {
t.Fatal(err)
}
jobs4, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs4) != 1 {
t.Fatalf("want lock 1 rollback job(%d)", jb3.ID())
}
jb4 := jobs4[0]
if jb4.ID() != jb3.ID() {
t.Fatalf("rollback job(%d) should lock but get %v", jb3.ID(), jb4.ID())
}
err = q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestQueue_Done(t *testing.T) {
testQueue_Resolve(t, newQueue(t, ""), func(jb que.Job) error {
return jb.Done(context.Background())
})
}
func TestQueue_Destroy(t *testing.T) {
testQueue_Resolve(t, newQueue(t, ""), func(jb que.Job) error {
return jb.Destroy(context.Background())
})
}
func TestQueue_Expire(t *testing.T) {
testQueue_Resolve(t, newQueue(t, ""), func(jb que.Job) error {
return jb.Expire(context.Background())
})
}
func TestQueue_RetryIn(t *testing.T) {
testQueue_Resolve(t, newQueue(t, ""), func(jb que.Job) error {
return jb.RetryIn(context.Background(), 10*time.Second, nil)
})
}
func TestQueue_RetryInLock(t *testing.T) {
q := newQueue(t, "")
_, err := q.Enqueue(context.Background(), nil, time.Now())
if err != nil {
t.Fatal(err)
}
jobs1, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs1) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs1))
}
jb1 := jobs1[0]
err = jb1.RetryIn(context.Background(), 1*time.Second, errors.New("test retry"))
if err != nil {
t.Fatal(err)
}
err = q.Unlock(context.Background(), []int64{jb1.ID()})
if err != nil {
t.Fatal(err)
}
jobs2, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs2) != 0 {
t.Fatalf("want lock 0 job but get %d", len(jobs2))
}
time.Sleep(1 * time.Second)
jobs3, err := q.Lock(context.Background(), 1)
if err != nil {
t.Fatal(err)
}
if len(jobs3) != 1 {
t.Fatalf("want lock 1 job but get %d", len(jobs3))
}
jb3 := jobs3[0]
if jb3.ID() != jb1.ID() {
t.Fatalf("want job(%d) but get %d", jb1.ID(), jb3.ID())
}
if jb3.RetryCount() != 1 {
t.Fatalf("want retry count is 1 but get %d", jb3.RetryCount())
}
if jb3.LastErrMsg() != "test retry" {
t.Fatalf("want LastErrMsg is %q but get %q", "test retry", jb3.LastErrMsg())
}
if !strings.Contains(jb3.LastErrStack(), "queue_test.go") {
t.Fatalf("want last err stack contains %s but get %s", "queue_test.go", jb3.LastErrStack())
}
err = q.Close()
if err != nil {
t.Fatalf("want Close() returns nil but get err %v", err)
}
}
func TestCheckQueue(t *testing.T) {
var testCases = []struct {
queue string
err string
}{
{
queue: "",
err: ": queue must not be empty string",
},
{
queue: strings.Repeat("1", 101),
err: strings.Repeat("1", 101) + ": the length of queue greater than 100",
},
{
queue: "check-queue",
err: "",
},
}
for _, tc := range testCases {
err := que.CheckQueue(tc.queue)
if err == nil {
if tc.err != "" {
t.Fatalf("queue(%q): want err %q but get %v", tc.queue, tc.err, err)
}
continue
}
if err.Error() != tc.err {
t.Fatalf("queue(%q): want err %q but get %q", tc.queue, tc.err, err.Error())
}
}
}
func dbTx(t *testing.T, rollback bool, f func(*sql.Tx)) {
tx, err := _db.Begin()
if err != nil {
t.Fatal(err)
}
defer func() {
if rollback {
err2 := tx.Rollback()
if err != nil {
t.Fatal(err2)
}
} else {
err2 := tx.Commit()
if err2 != nil {
t.Fatal(err2)
}
}
}()
f(tx)
}
var dbOnce sync.Once
var _db *sql.DB
var _driver string
func newQueue(t testing.TB, queue string) que.Queue {
dbOnce.Do(func() {
_driver = os.Getenv("QUE_DB_DRIVER")
source := os.Getenv("QUE_DB_SOURCE")
switch _driver {
case "postgres":
var err error
_db, err = sql.Open(_driver, source)
if err != nil {
panic(err)
}
err = _db.Ping()
if err != nil {
panic(err)
}
default:
panic(fmt.Sprintf("unsupported driver: %q", _driver))
}
})
if queue == "" {
var err error
queue, err = randQueue()
if err != nil {
panic(err)
}
if t != nil {
t.Log("queue:", queue)
}
}
switch _driver {
case "postgres":
q, err := pg.New(_db, queue)
if err != nil {
panic(err)
}
return q
default:
panic("already check supported driver in dbOnce")
}
}
func randQueue() (string, error) {
const bufLen = 10
var buf [bufLen]byte
_, err := rand.Read(buf[:])
if err != nil {
return "", err
}
var q [3 + 2*bufLen]byte
q[0] = 't'
q[1] = 't'
q[2] = '-'
hex.Encode(q[3:], buf[:])
return string(q[:]), nil
}
|
// Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package utilities
import (
"log"
"os"
"os/user"
"path/filepath"
"time"
)
const (
defaultTimeout = 5 * time.Second
)
// GetHomeDir returns current user's home directory
func GetHomeDir() string {
usr, err := user.Current()
if err != nil {
panic(err)
}
return usr.HomeDir
}
// IsExists returns true if the folder exists, false otherwise.
func IsExists(path string) bool {
_, err := os.Stat(path)
if err != nil {
return os.IsExist(err)
}
return true
}
// CreateFolderIfNotExist first checks if the @param folderPath exists, if not, it will create one
func CreateFolderIfNotExist(folderPath string) {
exist := IsExists(folderPath)
if !exist {
err := os.Mkdir(folderPath, os.ModePerm)
if err != nil {
log.Print("error when creating " + folderPath + ": " + err.Error())
}
}
}
// InitFolders create all required folders under ~/.autoai/.aid
func InitFolders() {
vendorDir := filepath.Join(GetHomeDir(), ".autoai")
CreateFolderIfNotExist(vendorDir)
targetDir := filepath.Join(vendorDir, ".aid")
CreateFolderIfNotExist(targetDir)
requiredFolders := [5]string{"logs", "models", "plugins", "datasets", "temp"}
for _, each := range requiredFolders {
CreateFolderIfNotExist(filepath.Join(targetDir, each))
Formatter.Success("created " + each + " folder")
}
initConfig := SystemConfig{RemoteReport: true}
SaveConfig(initConfig)
}
// GetBasePath returns the base filepath for aid
func GetBasePath() string {
vendorDir := filepath.Join(GetHomeDir(), ".autoai")
CreateFolderIfNotExist(vendorDir)
targetDir := filepath.Join(vendorDir, ".aid")
return targetDir
}
// GetDirSizeMB returns the size of path
func GetDirSizeMB(path string) float64 {
var dirSize int64
readSize := func(path string, file os.FileInfo, err error) error {
if !file.IsDir() {
dirSize += file.Size()
}
return nil
}
filepath.Walk(path, readSize)
sizeMB := float64(dirSize) / 1024.0 / 1024.0
return sizeMB
}
|
// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
package controller
import (
"fmt"
pbc "github.com/swinslow/peridot-core/pkg/controller"
pbs "github.com/swinslow/peridot-core/pkg/status"
)
// Start tries to start the Controller, and returns error message explaining
// why not if it can't.
func (c *Controller) Start() error {
// do NOT grab a writer lock here, the controller's own Start function
// will get one and will return the error result to us
return c.tryToStart()
}
// GetStatus returns the overall status info for the controller as a whole.
func (c *Controller) GetStatus() (pbs.Status, pbs.Health, string, string) {
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
return c.runStatus, c.healthStatus, c.outputMsg, c.errorMsg
}
// Stop tries to stop the Controller.
func (c *Controller) Stop() {
// do not grab a lock; we're just hitting the cancel button
c.controllerCancel()
}
// AddAgent asks the Controller to add the requested new agent,
// prior to starting the JobController. It returns nil if the agent
// is added to the configuration structure for JobController, or a
// non-nil error if unsuccessful.
func (c *Controller) AddAgent(cfg *pbc.AgentConfig) error {
// grab a writer lock; we cannot unlock after we check on availability
c.m.Lock()
defer c.m.Unlock()
// first check whether an agent with this name is already registered
_, ok := c.agents[cfg.Name]
if ok {
// an agent already exists in the config with this name; error out
return fmt.Errorf("agent with name %s is already registered", cfg.Name)
}
// name is available, so we'll register it
c.agents[cfg.Name] = *cfg
return nil
}
// GetAgent returns config information about the Agent with the given name,
// or error if not found. It does not provide status info (e.g., is the
// Agent running?) since that would be better addressed by checking the
// applicable pod's health via Kubernetes.
func (c *Controller) GetAgent(agentName string) (*pbc.AgentConfig, error) {
// grab a reader lock
c.m.RLocker().Lock()
ac, ok := c.agents[agentName]
c.m.RLocker().Unlock()
if !ok {
// no agent found with this name
return nil, fmt.Errorf("no agent found with name %s", agentName)
}
return &ac, nil
}
// GetAllAgents returns the config information for all current agents.
func (c *Controller) GetAllAgents() []*pbc.AgentConfig {
cfgs := []*pbc.AgentConfig{}
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
for _, cfg := range c.agents {
// make a copy -- don't return the pointer to the actual record
// FIXME this might be unnecessary; cfg may already be a copy.
// FIXME but, not currently sure whether the same memory location
// FIXME is used for cfg across the entire loop.
ac := cfg
cfgs = append(cfgs, &ac)
}
return cfgs
}
func cloneStepTemplate(inSteps []*StepTemplate) []*StepTemplate {
steps := []*StepTemplate{}
for _, inStep := range inSteps {
newStep := &StepTemplate{T: inStep.T}
switch newStep.T {
case StepTypeAgent:
newStep.AgentName = inStep.AgentName
case StepTypeJobSet:
newStep.JSTemplateName = inStep.JSTemplateName
case StepTypeConcurrent:
newStep.ConcurrentStepTemplates = cloneStepTemplate(inStep.ConcurrentStepTemplates)
}
steps = append(steps, newStep)
}
return steps
}
// AddJobSetTemplate asks the Controller to register a new jobSetTemplate.
// It returns nil if the jobSetTemplate was successfully added, or a non-nil
// error if unsuccessful.
func (c *Controller) AddJobSetTemplate(name string, inSteps []*StepTemplate) error {
// first, before we grab the lock, let's prepare the actual template
// structure so we're ready to add it if the name is available
steps := cloneStepTemplate(inSteps)
jst := &JobSetTemplate{Name: name, Steps: steps}
// grab a writer lock; we cannot unlock after we check on availability
// until we have actually registered the template
c.m.Lock()
defer c.m.Unlock()
// first check whether a template with this name is already registered
_, ok := c.jobSetTemplates[name]
if ok {
// a template is already registered with this name; error out
return fmt.Errorf("template with name %s is already registered", name)
}
// name is available, so we'll register it
c.jobSetTemplates[name] = jst
return nil
}
// GetJobSetTemplate requests information about the JobSetTemplate with the given name.
func (c *Controller) GetJobSetTemplate(name string) ([]*StepTemplate, error) {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
jst, ok := c.jobSetTemplates[name]
if !ok {
return nil, fmt.Errorf("no template found with name %s", name)
}
// copy the StepTemplate into a separate data structure to return
steps := cloneStepTemplate(jst.Steps)
return steps, nil
}
// GetAllJobSetTemplates requests information about all registered JobSetTemplates.
func (c *Controller) GetAllJobSetTemplates() map[string][]*StepTemplate {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
// copy each StepTemplate into a separate data structure to return
templates := map[string][]*StepTemplate{}
for name, jst := range c.jobSetTemplates {
stepsCopy := cloneStepTemplate(jst.Steps)
templates[name] = stepsCopy
}
return templates
}
// GetJob requests information about the Job with the given ID.
func (c *Controller) GetJob(jobID uint64) (*Job, error) {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
jd, ok := c.jobs[jobID]
if !ok {
return nil, fmt.Errorf("no job found with ID %d", jobID)
}
// make a copy
jobDetails := &Job{
JobID: jd.JobID,
JobSetID: jd.JobSetID,
JobSetStepID: jd.JobSetStepID,
JobSetStepOrder: jd.JobSetStepOrder,
AgentName: jd.AgentName,
Cfg: jd.Cfg,
Status: jd.Status,
}
return jobDetails, nil
}
// GetAllJobs requests information about all known Jobs.
func (c *Controller) GetAllJobs() []*Job {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
jobs := []*Job{}
for _, jd := range c.jobs {
// make a copy
jobDetails := &Job{
JobID: jd.JobID,
JobSetID: jd.JobSetID,
JobSetStepID: jd.JobSetStepID,
JobSetStepOrder: jd.JobSetStepOrder,
AgentName: jd.AgentName,
Cfg: jd.Cfg,
Status: jd.Status,
}
jobs = append(jobs, jobDetails)
}
return jobs
}
// GetAllJobsForJobSet requests information about all Jobs within a given JobSet.
func (c *Controller) GetAllJobsForJobSet(jobSetID uint64) []*Job {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
jobs := []*Job{}
for _, jd := range c.jobs {
if jd.JobSetID == jobSetID {
// make a copy
jobDetails := &Job{
JobID: jd.JobID,
JobSetID: jd.JobSetID,
JobSetStepID: jd.JobSetStepID,
JobSetStepOrder: jd.JobSetStepOrder,
AgentName: jd.AgentName,
Cfg: jd.Cfg,
Status: jd.Status,
}
jobs = append(jobs, jobDetails)
}
}
return jobs
}
func cloneSteps(inSteps []*Step) []*Step {
if inSteps == nil {
return nil
}
steps := []*Step{}
for _, inStep := range inSteps {
newStep := &Step{
T: inStep.T,
JobSetID: inStep.JobSetID,
StepID: inStep.StepID,
StepOrder: inStep.StepOrder,
RunStatus: inStep.RunStatus,
HealthStatus: inStep.HealthStatus,
AgentJobID: inStep.AgentJobID,
AgentName: inStep.AgentName,
SubJobSetID: inStep.SubJobSetID,
SubJobSetTemplateName: inStep.SubJobSetTemplateName,
ConcurrentSteps: cloneSteps(inStep.ConcurrentSteps),
}
steps = append(steps, newStep)
}
return steps
}
// StartJobSet sends a request to start a JobSet with the given template
// name and configuration.
func (c *Controller) StartJobSet(jstName string, cfg []*pbc.JobSetConfig) (uint64, error) {
// create a JobSetRequest
jsr := JobSetRequest{TemplateName: jstName}
// copy Configs one-by-one
jsr.Configs = map[string]string{}
for _, jsConfig := range cfg {
jsr.Configs[jsConfig.Key] = jsConfig.Value
}
// grab a writer lock, just long enough to reserve a JobSet ID
var requestedJobSetID uint64
c.m.Lock()
requestedJobSetID = c.nextJobSetID
c.nextJobSetID++
c.m.Unlock()
jsr.RequestedJobSetID = requestedJobSetID
// submit the JobSetRequest
c.inJobSetStream <- jsr
return jsr.RequestedJobSetID, nil
}
// GetJobSet requests information about the JobSet with the given ID.
func (c *Controller) GetJobSet(jobSetID uint64) (*JobSet, error) {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
js, ok := c.jobSets[jobSetID]
if !ok {
return nil, fmt.Errorf("no jobSet found with ID %d", jobSetID)
}
// make a copy
jobSetDetails := &JobSet{
JobSetID: js.JobSetID,
TemplateName: js.TemplateName,
RunStatus: js.RunStatus,
HealthStatus: js.HealthStatus,
Steps: cloneSteps(js.Steps),
}
// copy Configs one-by-one as well
jobSetDetails.Configs = map[string]string{}
for k, v := range js.Configs {
jobSetDetails.Configs[k] = v
}
return jobSetDetails, nil
}
// GetAllJobSets requests information about all JobSets.
func (c *Controller) GetAllJobSets() []*JobSet {
// grab a reader lock
c.m.RLocker().Lock()
defer c.m.RLocker().Unlock()
jobSets := []*JobSet{}
for _, js := range c.jobSets {
// make a copy
jobSetDetails := &JobSet{
JobSetID: js.JobSetID,
TemplateName: js.TemplateName,
RunStatus: js.RunStatus,
HealthStatus: js.HealthStatus,
Steps: cloneSteps(js.Steps),
}
// copy Configs one-by-one as well
jobSetDetails.Configs = map[string]string{}
for k, v := range js.Configs {
jobSetDetails.Configs[k] = v
}
jobSets = append(jobSets, jobSetDetails)
}
return jobSets
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00300105 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.003.001.05 Document"`
Message *MeetingEntitlementNotificationV05 `xml:"MtgEntitlmntNtfctn"`
}
func (d *Document00300105) AddMessage() *MeetingEntitlementNotificationV05 {
d.Message = new(MeetingEntitlementNotificationV05)
return d.Message
}
// Scope
// An account servicer sends the MeetingEntitlementNotification to an issuer, its agent, an intermediary or an account owner to advise the entitlement in relation to a shareholders meeting.
// Usage
// This message is sent to advise the quantity of securities held by an account owner. The balance is specified for the securities for which the meeting is taking place.
// This entitlement message is sent by the account servicer or the registrar to an intermediary, the issuer's agent or the issuer. It is also sent between the account servicer and the account owner or the party holding the right to vote.
// The message is also used to amend a previously sent MeetingEntitlementNotification. To notify an update, the RelatedReference must be included in the message.
// This message definition is intended for use with the Business Application Header (head.001.001.01).
type MeetingEntitlementNotificationV05 struct {
// Identifies the meeting entitlement message to be modified.
RelatedReference *iso20022.MessageIdentification `xml:"RltdRef,omitempty"`
// Series of elements which allow to identify a meeting.
MeetingReference *iso20022.MeetingReference7 `xml:"MtgRef"`
// Identifies the security for which the meeting is organised, the account and the positions of the security holder.
Security []*iso20022.SecurityPosition9 `xml:"Scty"`
// Defines the dates determining eligibility.
Eligibility *iso20022.EligibilityDates1 `xml:"Elgblty"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (m *MeetingEntitlementNotificationV05) AddRelatedReference() *iso20022.MessageIdentification {
m.RelatedReference = new(iso20022.MessageIdentification)
return m.RelatedReference
}
func (m *MeetingEntitlementNotificationV05) AddMeetingReference() *iso20022.MeetingReference7 {
m.MeetingReference = new(iso20022.MeetingReference7)
return m.MeetingReference
}
func (m *MeetingEntitlementNotificationV05) AddSecurity() *iso20022.SecurityPosition9 {
newValue := new(iso20022.SecurityPosition9)
m.Security = append(m.Security, newValue)
return newValue
}
func (m *MeetingEntitlementNotificationV05) AddEligibility() *iso20022.EligibilityDates1 {
m.Eligibility = new(iso20022.EligibilityDates1)
return m.Eligibility
}
func (m *MeetingEntitlementNotificationV05) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
m.SupplementaryData = append(m.SupplementaryData, newValue)
return newValue
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rule
import (
"testing"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/planner/core/internal"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
)
type Input []string
// Rule should bot be applied for TiKV.
func TestPushDerivedTopnNegative(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set tidb_opt_derive_topn=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, primary key(b,a))")
tk.MustExec("drop table if exists tt")
tk.MustExec("create table tt(a int, b int, c int, primary key(b,a) nonclustered)")
tk.MustExec("drop table if exists ti")
tk.MustExec("create table ti(a int, b int, c int unique)")
tk.MustExec("drop table if exists td")
tk.MustExec("create table td(a int, b int as (a+1) stored, primary key(b,a));")
var input Input
var output []struct {
SQL string
Plan []string
}
suiteData := GetDerivedTopNSuiteData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
}
}
// TiFlash cases. TopN pushed down to storage only when no partition by.
func TestPushDerivedTopnFlash(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
dom := domain.GetDomain(tk.Session())
tk.MustExec("set tidb_opt_derive_topn=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, primary key(b,a))")
internal.SetTiFlashReplica(t, dom, "test", "t")
tk.MustExec("set tidb_enforce_mpp=1")
tk.MustExec("set @@session.tidb_allow_mpp=ON;")
var input Input
var output []struct {
SQL string
Plan []string
}
suiteData := GetDerivedTopNSuiteData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
}
}
// Rule should be applied for TiKV.
func TestPushDerivedTopnPositive(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set tidb_opt_derive_topn=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, primary key(b,a))")
tk.MustExec("drop table if exists tt")
tk.MustExec("create table tt(a int, b int, c int, primary key(b,a) nonclustered)")
tk.MustExec("drop table if exists ti")
tk.MustExec("create table ti(a int, b int, c int unique)")
tk.MustExec("drop table if exists customer")
tk.MustExec("create table customer(primary_key VARBINARY(1024), secondary_key VARBINARY(1024), c_timestamp BIGINT, value MEDIUMBLOB, PRIMARY KEY (primary_key, secondary_key, c_timestamp) clustered);")
tk.MustExec("drop table if exists td")
tk.MustExec("create table td(a int, b int as (a+1) stored, primary key(b,a));")
tk.MustExec("insert into t values(1,1)")
tk.MustExec("insert into t values(2,1)")
tk.MustExec("insert into t values(3,2)")
tk.MustExec("insert into t values(4,2)")
tk.MustExec("insert into t values(5,2)")
tk.MustExec("insert into tt select *,55 from t")
tk.MustExec("insert into ti select *,a from t")
tk.MustExec("insert into td(a) select a from t")
var input Input
var output []struct {
SQL string
Plan []string
Res []string
}
suiteData := GetDerivedTopNSuiteData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
res := tk.MustQuery(sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
output[i].Res = testdata.ConvertRowsToStrings(res.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
res.Check(testkit.Rows(output[i].Res...))
}
}
// Negative test when tidb_opt_derive_topn is off
func TestPushDerivedTopnFlagOff(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set tidb_opt_derive_topn=0")
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, primary key(b,a))")
var input Input
var output []struct {
SQL string
Plan []string
}
suiteData := GetDerivedTopNSuiteData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
}
}
|
package user
import "context"
type Storage interface {
GetByID(id string) (u User, err error)
FindOneByNickName(nickname string) (u User, err error)
PingPool(ctx context.Context) error
Close()
}
|
package pool
import "errors"
type op string
const (
// Hash is the bcrypt work type
Hash op = "encrypt"
// Compare is bcrypt compare work
Compare = "decrypt"
)
// WorkRequest is a worker req
type WorkRequest struct {
Op op
Text []byte
Compare []byte // optional
}
// WorkResponse is a worker resp
type WorkResponse struct {
Wr WorkRequest
Result []byte
Matched bool
Err error
}
// Process dispatches work to the worker pool channel
func Process(wr WorkRequest) WorkResponse {
switch wr.Op {
case Hash:
return hashWork(wr)
case Compare:
return compareWork(wr)
default:
return WorkResponse{Err: errors.New("unsupported operation")}
}
}
|
package sync
import (
syncComm "github.com/HNB-ECO/HNB-Blockchain/HNB/sync/common"
)
type blockSyncReq struct {
chainID string
beginIndex uint64
endIndex uint64
replyChain chan error
peerID uint64
notifyHandler syncComm.NotifyFunc
version uint32
}
func (sh *SyncHandler) blockSyncThread() error {
syncLogger.Infof(LOGTABLE_SYNC, "* sync thread start")
for {
select {
case req := <-sh.syncReq:
if req == nil {
continue
}
sch := sh.getSyncHandlerByChainID(req.chainID)
if sch == nil {
break
}
sch.blockSyncProcess(req)
}
}
}
func (sch *syncChainHandler) clearTaskChannel() {
for {
select {
case <-sch.exitTask:
default:
return
}
}
}
func (sch *syncChainHandler) blockSyncProcess(req *blockSyncReq) {
sch.clearTaskChannel()
syncLogger.Infof(LOGTABLE_SYNC, "* chainID(%s).(pro sync req (%d,%d) %v)",
req.chainID,
req.beginIndex,
req.endIndex,
req.peerID)
sch.notifyHandler = req.notifyHandler
errSync := sch.syncBlock(req)
// TODO
if errSync != nil {
notify := &syncComm.SyncNotify{FinishFlag: false, Version: req.version, Block: nil, SenderID: req.peerID}
sch.notifyHandler(notify)
} else {
notify := &syncComm.SyncNotify{FinishFlag: true, Version: req.version, Block: nil, SenderID: req.peerID}
sch.notifyHandler(notify)
}
if req.replyChain != nil {
req.replyChain <- errSync
}
sch.setSyncState(0, 0, 0, 0)
sch.notifyHandler = nil
}
func (sch *syncChainHandler) syncBlock(req *blockSyncReq) error {
var syncOver bool = false
endIndex := req.endIndex
for {
interval, err := sch.GetInterval(req.peerID)
if err != nil || (req.endIndex-req.beginIndex) > 20 {
interval = int(sch.sh.getMaxSyncBlockCount())
sch.SetInterval(req.peerID, interval)
}
if (endIndex - req.beginIndex + 1) > uint64(interval) {
req.endIndex = req.beginIndex + uint64(interval) - 1
} else {
req.endIndex = endIndex
}
blocks, errSync := sch.getRemoteBlocks(req.beginIndex, req.endIndex, req.peerID)
if errSync != nil {
syncLogger.Infof(LOGTABLE_SYNC, "chainID(%s) sync blks err %v", sch.chainID, errSync)
return errSync
}
if blocks == nil && errSync == nil {
syncLogger.Infof(LOGTABLE_SYNC, "chainID(%s) sync blks==nil", sch.chainID)
return nil
}
//err = sch.sh.lg.CheckBlocks(sch.chainID, blocks)
//if err == nil {
length := len(blocks)
req.endIndex = blocks[length-1].Header.BlockNum
syncLogger.Infof(LOGTABLE_SYNC, "* chainID(%s).(sub sync blks(%d,%d)).(succ)",
sch.chainID,
req.beginIndex,
req.endIndex)
req.beginIndex = req.endIndex + 1
if req.beginIndex > endIndex {
syncOver = true
}
if blocks != nil {
if sch.notifyHandler != nil {
syncLogger.Debugf(LOGTABLE_SYNC, "chainID(%s) notify sync blk(%d->%d)", sch.chainID, blocks[0].Header.BlockNum, blocks[len(blocks)-1].Header.BlockNum)
for _, v := range blocks {
notify := &syncComm.SyncNotify{FinishFlag: false,
Version: req.version, SenderID: req.peerID}
notify.Block = v
sch.notifyHandler(notify)
syncLogger.Debugf(LOGTABLE_SYNC, "chainID(%s) notify sync blk(%d)", sch.chainID, v.Header.BlockNum)
}
} else {
syncLogger.Errorf(LOGTABLE_SYNC, "chainID(%s).(notify handler = nil)", sch.chainID)
break
}
}
if syncOver {
syncLogger.Infof(LOGTABLE_SYNC, "chainID(%s) sync end -> blk(%d)", sch.chainID, endIndex)
break
}
//} else {
// syncLogger.Warningf(LOGTABLE_SYNC,"chainID(%s).(chk hash).(%s)", sch.chainID, err)
// continue
//}
}
return nil
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collate
// Collation of utf8mb4_zh_pinyin_tidb_as_cs
type zhPinyinTiDBASCSCollator struct {
}
// Compare is not implemented.
func (*zhPinyinTiDBASCSCollator) Compare(_, _ string) int {
panic("implement me")
}
// Key is not implemented.
func (*zhPinyinTiDBASCSCollator) Key(_ string) []byte {
panic("implement me")
}
// KeyWithoutTrimRightSpace is not implemented.
func (*zhPinyinTiDBASCSCollator) KeyWithoutTrimRightSpace(_ string) []byte {
panic("implement me")
}
// Pattern is not implemented.
func (*zhPinyinTiDBASCSCollator) Pattern() WildcardPattern {
panic("implement me")
}
|
package main
import (
"fmt"
)
type Bootcamp struct {
Lat float32
Lon float32
}
func main() {
x := new(Bootcamp)
x.Lat = 12.3123
y := Bootcamp{}
y.Lat = 12.3123
fmt.Println(x)
fmt.Println(y)
fmt.Println(*x == y)
}
|
package bot
import (
"strings"
"fmt"
)
const (
success = "SUCCESS"
failure = "FAILURE"
)
const (
build = "build"
status = "status"
alias = "alias"
)
type BotCommand interface {
Execute([]string) (string, error)
Usage() string
Options() []Option
}
type argError struct {
arg string
prob string
}
func (e *argError) Error() string {
return fmt.Sprintf("%s - %s", e.arg, e.prob)
}
func (e *argError) String() string {
return fmt.Sprintf("%s - %s", e.arg, e.prob)
}
func Parse(line string) (BotCommand, error) {
lexes := strings.Split(line, " ")
command := lexes[0]
switch command {
case build:
return new(BuildCommand), nil
case alias:
return new(AliasCommand), nil
case status:
return new(StatusCommand), nil
default:
return nil, &argError{command, "No such command"}
}
}
func RunCommand(command *CommandLine) (string, error) {
return "ok", nil
}
func ParseAndRun(line string) (string, error) {
cmd := strings.Split(line, " ")
command, err := Parse(cmd[0])
if err != nil {
return line, err
}
execute, err := command.Execute(cmd[1:])
return execute, err
}
|
package lib
import (
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
log "github.com/sirupsen/logrus"
)
const contentTypeHeader = "content-type"
var Chan chan []*http.Request
func ConfigureProxy(proxy string) {
proxyUrl, err := url.Parse(proxy)
if err != nil {
log.Warnf("Error configuring proxy %s: %s", proxy, err)
}
http.DefaultTransport = &http.Transport{Proxy: http.ProxyURL(proxyUrl)}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
func CreateNewClient(config *Configuration) HttpAPI {
if config.Proxy != "" {
log.Infof("Registering Proxy %s", config.Proxy)
ConfigureProxy(config.Proxy)
}
defaultHeaders := map[string]string{
"accept": "text/html,application/xhtml+xml,application/xml;q=0.6,image/webp,*/*;q=0.5",
"user-agent": "Mozilla/5.0 (Windows NT 8.0; Win64; x64; rv:69.0) Gecko/20100115 Firefox/89.85",
"accept-language": "en-US,en;q=0.5",
"accept-encoding": "gzip, deflate",
"dnt": "1",
"connection": "close",
"upgrade-insecure-requests": "1",
}
return &HttpClient{
Client: &http.Client{},
Headers: defaultHeaders,
}
}
func MergeURLParams(path string, data []byte) string {
if len(data) > 0 {
return fmt.Sprintf("%s?%s", path, data)
}
return path
}
func PrepareRequests(client HttpAPI, req Request) (*http.Request, error) {
switch req.Method {
case "get":
_, data := PrepareFormData(req.Headers, req.Params)
url := MergeURLParams(req.Path, data)
return client.PrepareGet(url, req.Headers), nil
case "post":
headers, data := PrepareData(req.Headers, req.Params)
return client.PreparePost(req.Path, headers, data), nil
}
return nil, fmt.Errorf("Method %s not supported for %s. Only get or post", req.Method, req.Path)
}
func PrepareData(headers map[string]string, params []Param) (map[string]string, []byte) {
contentType := strings.ToLower(headers[contentTypeHeader])
if strings.Contains(contentType, "json") {
return PrepareJson(headers, params)
}
return PrepareFormData(headers, params)
}
func PrepareJson(headers map[string]string, params []Param) (map[string]string, []byte) {
values := make(map[string]string)
for _, p := range params {
values[p.Name] = GetParamValue(p)
}
jsonValue, _ := json.Marshal(values)
return headers, jsonValue
}
func PrepareFormData(headers map[string]string, params []Param) (map[string]string, []byte) {
if headers == nil {
headers = make(map[string]string)
}
form := url.Values{}
for _, p := range params {
v := GetParamValue(p)
form.Add(p.Name, v)
}
headers[contentTypeHeader] = "application/x-www-form-urlencoded; charset=UTF-8"
return headers, []byte(form.Encode())
}
func LoopRequests(client HttpAPI, requests []Request) {
for {
var preparedReqs []*http.Request
for _, rawRequest := range requests {
req, err := PrepareRequests(client, rawRequest)
if err != nil {
log.Warn(err)
continue
}
log.Debugf("Prepared request for %s %s", req.Method, req.URL.Path)
preparedReqs = append(preparedReqs, req)
}
Chan <- preparedReqs
}
}
func Perform(client HttpAPI) {
for {
reqs := <-Chan
log.Infof("sending %d requests", len(reqs))
client.Perform(reqs)
}
}
func GetParamValue(p Param) string {
if p.Value == "" && p.Type != "" {
return GenerateFake(p.Type)
}
return p.Value
}
|
package main
func main() {
switch tag {
case 0, 1, 2, 3: s1()
case 4, 5, 6, 7: s2()
default: s3()
}
switch x := f(); { // missing switch expression means "true"
case x < 0: return -x
default: return x
}
switch {
case x < y: f1()
case x < z: f2()
case x == 4: f3()
}
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/yddeng/raft/kv"
"net/http"
)
var (
// raft 对外地址
peers = map[string]string{
"kv1": "127.0.0.1:7000",
"kv2": "127.0.0.1:7001",
"kv3": "127.0.0.1:7002",
}
list []string
)
func makeList(leader string) {
list = []string{leader}
for name := range peers {
if name != leader {
list = append(list, name)
}
}
}
func set(k, v string) {
obj := kv.SetReq{Key: k, Value: v}
byts, _ := json.Marshal(obj)
for _, name := range list {
url := fmt.Sprintf("http://%s/set", peers[name])
reader := bytes.NewReader(byts)
resp, err := http.Post(url, "application/json", reader)
if err != nil {
fmt.Println("set post err", err)
continue
}
msg := kv.SetResp{}
_ = json.NewDecoder(resp.Body).Decode(&msg)
_ = resp.Body.Close()
if !msg.IsLeader {
continue
}
if msg.OK {
makeList(name)
fmt.Println("set ok", obj)
return
}
}
fmt.Println("set failed", obj)
}
func get(k string) string {
obj := kv.GetReq{Key: k}
byts, _ := json.Marshal(obj)
for _, name := range list {
url := fmt.Sprintf("http://%s/get", peers[name])
reader := bytes.NewReader(byts)
resp, err := http.Post(url, "application/json", reader)
if err != nil {
fmt.Println("get post err", err)
continue
}
msg := kv.GetResp{}
_ = json.NewDecoder(resp.Body).Decode(&msg)
_ = resp.Body.Close()
if msg.OK {
makeList(name)
fmt.Println("get ok", obj, msg)
return msg.Value
}
}
return ""
}
func main() {
makeList("kv1")
for {
var op int
fmt.Println("<<< op 1 -> set, 2 -> get >>>")
fmt.Print(">>> ")
fmt.Scan(&op)
switch op {
case 1:
var k, v string
fmt.Println("<<< set, input k,v >>>")
fmt.Print(">>> ")
fmt.Scan(&k, &v)
set(k, v)
fmt.Println()
case 2:
var k string
fmt.Println("<<< get, input k >>>")
fmt.Print(">>> ")
fmt.Scan(&k)
v := get(k)
fmt.Printf("====> %s -> %s \n", k, v)
fmt.Println()
default:
}
}
}
|
package network
import (
"fmt"
"math"
)
//MLNetwork .
type MLNetwork struct {
netdef []int
in *mlLayer
layers []*mlLayer
out *mlLayer
phi float64
biais bool
created bool
}
var dataFileMap = make(map[string]*MlDataSet)
func sigmoid(value float64) float64 {
return 1 / (1 + math.Exp(-value))
}
func sigmoidp(value float64) float64 {
vv := sigmoid(value)
return vv * (1 - vv)
}
// NewNetwork .
func NewNetwork(layers []int) (*MLNetwork, error) {
if layers == nil || len(layers) < 2 {
return nil, fmt.Errorf("Invalide layers definition")
}
n := &MLNetwork{
netdef: layers,
phi: 0.5,
biais: true,
}
n.in = n.newLayer("in", int(layers[0]))
n.layers = make([]*mlLayer, len(layers)-2, len(layers)-2)
preLayer := n.in
for ii := 1; ii < len(layers)-1; ii++ {
n.layers[ii-1] = n.newLayer(fmt.Sprintf("%d", ii), int(layers[ii]))
preLayer.ConnectLayerTo(n.layers[ii-1])
preLayer = n.layers[ii-1]
}
n.out = n.newLayer("out", int(layers[len(layers)-1]))
preLayer.ConnectLayerTo(n.out)
n.created = true
return n, nil
}
// NetNetworkFromDataSet .
func NetNetworkFromDataSet(name string) (*MLNetwork, error) {
dataSet, ok := dataFileMap[name]
if !ok {
return nil, fmt.Errorf("The logical train set %s doesn't exist", name)
}
return NewNetwork(dataSet.Layers)
}
// Getdef .
func (n *MLNetwork) Getdef() []int {
return n.netdef
}
//IsCreated .
func (n *MLNetwork) IsCreated() bool {
return n.created
}
// Display .
func (n *MLNetwork) Display(coef bool) []string {
lines := make([]string, 0, 0)
if n.in == nil {
return lines
}
n.in.print(&lines)
if coef {
n.in.printCoef(&lines)
}
for _, layer := range n.layers {
layer.print(&lines)
if coef {
layer.printCoef(&lines)
}
}
n.out.print(&lines)
return lines
}
//ClearIn .
func (n *MLNetwork) ClearIn() {
for _, neuron := range n.in.neurons {
neuron.value = 0
}
}
//SetInValue .
func (n *MLNetwork) SetInValue(add bool, index int, value float64) {
if add {
n.in.neurons[index].value += value
} else {
n.in.neurons[index].value = value
}
}
//GetInValue .
func (n *MLNetwork) GetInValue(index int) float64 {
return n.in.neurons[index].value
}
// Propagate .
func (n *MLNetwork) Propagate(values []float64, retOut bool) []float64 {
for ii, neuron := range n.in.neurons {
if n.biais {
if ii == 0 {
neuron.value = 1
} else {
neuron.value = values[ii-1]
}
} else {
neuron.value = values[ii]
}
}
n.in.propagate()
for _, layer := range n.layers {
layer.propagate()
//layer.print("layer")
}
if retOut {
outs := n.getOutArray()
//fmt.Printf("outs: %v\n", outs)
return outs
}
return nil
}
func (n *MLNetwork) getOutArray() []float64 {
ret := make([]float64, 0)
for _, neuron := range n.out.neurons {
ret = append(ret, neuron.value)
}
return ret
}
func (n *MLNetwork) matchRate(dataOut []float64) float64 {
var errorRate float64
for ii, neuron := range n.out.neurons {
errorRate += ((dataOut[ii] - neuron.value) * (dataOut[ii] - neuron.value))
}
return math.Sqrt(errorRate)
}
// BackPropagate .
func (n *MLNetwork) BackPropagate(values []float64) {
n.retroPropagateErrorDiff(values)
n.updateCoef()
}
func (n *MLNetwork) retroPropagateErrorDiff(values []float64) {
//compute out error diff
for i, neuron := range n.out.neurons {
neuron.errorDiff = (values[i] - neuron.value) // * sigmoidp(neuron.sum)
}
//compute all other layers error diff
for l := len(n.layers) - 1; l >= 0; l-- {
n.layers[l].retroPropagateErrorDiff()
}
n.in.retroPropagateErrorDiff()
}
func (n *MLNetwork) updateCoef() {
n.in.updateCoef(n.phi)
for _, layer := range n.layers {
layer.updateCoef(n.phi)
}
}
// Copy .
func (n *MLNetwork) Copy() (*MLNetwork, error) {
net, err := NewNetwork(n.netdef)
if err != nil {
return nil, err
}
net.in.copyCoef(n.in)
for ii, layer := range n.layers {
net.layers[ii].copyCoef(layer)
}
net.out.copyCoef(n.out)
return net, nil
}
//ComputeDistinctOut .
func (n *MLNetwork) ComputeDistinctOut() int {
maxes := make(map[int]int)
for ii := range n.in.neurons {
ins := make([]float64, len(n.in.neurons), len(n.in.neurons))
ins[ii] = 1
outs := n.Propagate(ins, true)
max := 0
maxVal := 0.0
for jj, val := range outs {
if val > maxVal {
maxVal = val
max = jj
}
}
maxes[max] = 1
}
return len(maxes)
}
|
package collectors
import (
"time"
cfclient "github.com/cloudfoundry-community/go-cfclient"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type IsolationSegmentsCollector struct {
namespace string
environment string
deployment string
cfClient *cfclient.Client
isolationSegmentInfoMetric *prometheus.GaugeVec
isolationSegmentsScrapesTotalMetric prometheus.Counter
isolationSegmentsScrapeErrorsTotalMetric prometheus.Counter
lastIsolationSegmentsScrapeErrorMetric prometheus.Gauge
lastIsolationSegmentsScrapeTimestampMetric prometheus.Gauge
lastIsolationSegmentsScrapeDurationSecondsMetric prometheus.Gauge
}
func NewIsolationSegmentsCollector(
namespace string,
environment string,
deployment string,
cfClient *cfclient.Client,
) *IsolationSegmentsCollector {
isolationSegmentInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "isolation_segment",
Name: "info",
Help: "Labeled Cloud Foundry Isolation Segment information with a constant '1' value.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"isolation_segment_id", "isolation_segment_name"},
)
isolationSegmentsScrapesTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "isolation_segments_scrapes",
Name: "total",
Help: "Total number of scrapes for Cloud Foundry Isolation Segments.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
isolationSegmentsScrapeErrorsTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "isolation_segments_scrape_errors",
Name: "total",
Help: "Total number of scrape error of Cloud Foundry Isolation Segments.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastIsolationSegmentsScrapeErrorMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_isolation_segments_scrape_error",
Help: "Whether the last scrape of Isolation Segments metrics from Cloud Foundry resulted in an error (1 for error, 0 for success).",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastIsolationSegmentsScrapeTimestampMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_isolation_segments_scrape_timestamp",
Help: "Number of seconds since 1970 since last scrape of Isolation Segments metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastIsolationSegmentsScrapeDurationSecondsMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_isolation_segments_scrape_duration_seconds",
Help: "Duration of the last scrape of Isolation Segments metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
return &IsolationSegmentsCollector{
namespace: namespace,
environment: environment,
deployment: deployment,
cfClient: cfClient,
isolationSegmentInfoMetric: isolationSegmentInfoMetric,
isolationSegmentsScrapesTotalMetric: isolationSegmentsScrapesTotalMetric,
isolationSegmentsScrapeErrorsTotalMetric: isolationSegmentsScrapeErrorsTotalMetric,
lastIsolationSegmentsScrapeErrorMetric: lastIsolationSegmentsScrapeErrorMetric,
lastIsolationSegmentsScrapeTimestampMetric: lastIsolationSegmentsScrapeTimestampMetric,
lastIsolationSegmentsScrapeDurationSecondsMetric: lastIsolationSegmentsScrapeDurationSecondsMetric,
}
}
func (c IsolationSegmentsCollector) Collect(ch chan<- prometheus.Metric) {
var begun = time.Now()
errorMetric := float64(0)
if err := c.reportIsolationSegmentsMetrics(ch); err != nil {
errorMetric = float64(1)
c.isolationSegmentsScrapeErrorsTotalMetric.Inc()
}
c.isolationSegmentsScrapeErrorsTotalMetric.Collect(ch)
c.isolationSegmentsScrapesTotalMetric.Inc()
c.isolationSegmentsScrapesTotalMetric.Collect(ch)
c.lastIsolationSegmentsScrapeErrorMetric.Set(errorMetric)
c.lastIsolationSegmentsScrapeErrorMetric.Collect(ch)
c.lastIsolationSegmentsScrapeTimestampMetric.Set(float64(time.Now().Unix()))
c.lastIsolationSegmentsScrapeTimestampMetric.Collect(ch)
c.lastIsolationSegmentsScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds())
c.lastIsolationSegmentsScrapeDurationSecondsMetric.Collect(ch)
}
func (c IsolationSegmentsCollector) Describe(ch chan<- *prometheus.Desc) {
c.isolationSegmentInfoMetric.Describe(ch)
c.isolationSegmentsScrapesTotalMetric.Describe(ch)
c.isolationSegmentsScrapeErrorsTotalMetric.Describe(ch)
c.lastIsolationSegmentsScrapeErrorMetric.Describe(ch)
c.lastIsolationSegmentsScrapeTimestampMetric.Describe(ch)
c.lastIsolationSegmentsScrapeDurationSecondsMetric.Describe(ch)
}
func (c IsolationSegmentsCollector) reportIsolationSegmentsMetrics(ch chan<- prometheus.Metric) error {
c.isolationSegmentInfoMetric.Reset()
isolationSegments, err := c.cfClient.ListIsolationSegments()
if err != nil {
log.Errorf("Error while listing isolation segments: %v", err)
return err
}
for _, isolationSegment := range isolationSegments {
c.isolationSegmentInfoMetric.WithLabelValues(
isolationSegment.GUID,
isolationSegment.Name,
).Set(float64(1))
}
c.isolationSegmentInfoMetric.Collect(ch)
return nil
}
|
package port
import (
"github.com/mirzaakhena/danarisan/domain/repository"
"github.com/mirzaakhena/danarisan/domain/service"
)
// BukaAplikasiOutport ...
type BukaAplikasiOutport interface {
repository.FindOnePesertaRepo
repository.FindOneArisanRepo
repository.FindAllPesertaRepo
repository.FindAllSlotRepo
repository.FindAllUndianRepo
repository.FindAllTagihanByArisanIDRepo
repository.FindAllJurnalRepo
repository.FindAllSaldoAkunRepo
service.ReadOnlyDB
}
|
package ewallet
import (
"os"
"github.com/xendit/xendit-go/ewallet"
"github.com/imrenagi/go-payment/invoice"
)
// NewLinkAja is factory for LinkAja payment with xendit latest charge API
func NewLinkAja(inv *invoice.Invoice) (*ewallet.CreateEWalletChargeParams, error) {
props := map[string]string{
"success_redirect_url": os.Getenv("LINKAJA_SUCCESS_REDIRECT_URL"),
}
return newBuilder(inv).
SetPaymentMethod(EWalletIDLinkAja).
SetChannelProperties(props).
Build()
}
|
package main
import (
"github.com/miekg/dns"
"testing"
)
func TestFilters(t *testing.T) {
// Enable debug logging
log_debug = true
}
func TestNoFilters(t *testing.T) {
filterer := QueryFilterer{}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
}
func TestSimpleAccept(t *testing.T) {
filterer := QueryFilterer{acceptFilters: parseFilters([]string{"net:A"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestSimpleReject(t *testing.T) {
filterer := QueryFilterer{rejectFilters: parseFilters([]string{"net:A"})}
msg := generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestSimpleAcceptFullDomain(t *testing.T) {
filterer := QueryFilterer{acceptFilters: parseFilters([]string{"net:"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeANY)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeANY)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestSimpleRejectFullDomain(t *testing.T) {
filterer := QueryFilterer{rejectFilters: parseFilters([]string{"net:"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeANY)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeANY)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
}
func TestSimpleAcceptSpecificTypes(t *testing.T) {
filterer := QueryFilterer{acceptFilters: parseFilters([]string{":A"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestSimpleAcceptMultipleTypes(t *testing.T) {
filterer := QueryFilterer{acceptFilters: parseFilters([]string{":A,PTR"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypePTR)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
}
func TestSimpleRejectSpecificTypes(t *testing.T) {
filterer := QueryFilterer{rejectFilters: parseFilters([]string{":A"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
}
func TestSimpleRejectMultipleTypes(t *testing.T) {
filterer := QueryFilterer{rejectFilters: parseFilters([]string{":A,PTR"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypePTR)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestMultipleAccept(t *testing.T) {
filterer := QueryFilterer{acceptFilters: parseFilters([]string{"net:A", "com:AAAA"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
}
func TestMultipleReject(t *testing.T) {
filterer := QueryFilterer{rejectFilters: parseFilters([]string{"net:A", "com:AAAA"})}
msg := generateDNSMessage("discodns.net", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.net", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeAAAA)
if filterer.ShouldAcceptQuery(msg) != false {
t.Error("Expected the query to be rejected")
t.Fatal()
}
msg = generateDNSMessage("discodns.com", dns.TypeA)
if filterer.ShouldAcceptQuery(msg) != true {
t.Error("Expected the query to be accepted")
t.Fatal()
}
}
// generateDNSMessage returns a simple DNS query with a single question,
// comprised of the domain and rrType given.
func generateDNSMessage(domain string, rrType uint16) *dns.Msg {
domain = dns.Fqdn(domain)
msg := dns.Msg{Question: []dns.Question{dns.Question{Name: domain, Qtype: rrType}}}
return &msg
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net"
"net/rpc"
"os"
"strconv"
"strings"
"time"
)
//struttura dei topic
type Topic struct {
//argomento del messaggio
topic string
//IP []byte
IP []string
//numero di porta
port []int
}
var msgs *[]Message
var topics []Topic
var semantica int
var timeoutUtente int
//Lista che fornisce l'elenco dei messaggi che sono presenti nella coda
func (l *Listener) ListaMsg(line []byte, list *[]Message) error {
//fmt.Println("Ho ricevuto: " + string(line))
fmt.Printf("Stampo lista messaggi: %+v\n", (*msgs)[:])
switch semantica {
case 1:
//Semantica at least once: stampo tutti i messaggi presenti all'interno della coda
*list = *msgs
case 2:
//Semantica timeout-based: stampo tutti i messaggi presenti all'interno della coda
// che hanno il valore visibility = 0
var newlist []Message
for i := 0; i < len(*msgs); i++ {
if (*msgs)[i].Visibility == 0 {
newlist = append(newlist, (*msgs)[i])
}
}
*list = newlist
}
return nil
}
//funzione che riceve i messaggi da parte dei pub
func (l *Listener) RiceviMsg(line []byte, reply *string) error {
var m Message
var topic Topic
//Unmarshal dei dati inseriti
err := json.Unmarshal([]byte(line), &m)
if err != nil {
log.Fatal(err)
}
//imposto la visibilità di default a 0
m.Visibility = 0
//fmt.Printf("ProvaPub: %+v\n", m)
msgs = inserisci(*msgs, &m)
topic.topic = m.Topic
flag := 0
//Controllo che il topic non sia già presente
for i := 0; i < len(topics); i++ {
if topics[i].topic == topic.topic {
flag = 1
break
}
}
if flag == 0 {
topics = append(topics, topic)
}
*reply = "ACK"
//in base al valore della semantica che è stato aggiunto dall'utente eseguirà
//o la routine per la semantica at leasta once o la routine per la semantica timeout based
switch semantica {
case 1:
//Semantica at least once
go invioSubAtLeastOnce()
case 2:
//Semantica timeout-based
go invioSubTimeoutBased()
}
return nil
}
//funzione che permette la sottoscrizione dei sub a un particolare topic
func (l *Listener) Subscribe(line string, res *string) error {
// output message received
fmt.Print("Messaggio ricevuto del subscribe: \n", line)
// check to see which comand was received
splits := strings.Split(line, " ")
fmt.Printf("Stampo lista topic: %+v\n", topics[:])
//fmt.Printf("Stampo splits: %+v\n", splits[:])
flag := 0
remoteIp := strings.Split(splits[1], ":")[0]
port, err := strconv.Atoi(splits[2])
if err != nil {
// handle error
fmt.Println(err)
os.Exit(2)
}
// controllo che esista il topic
for i := 0; i < len(topics); i++ {
//fmt.Println("Topic received \n: " + splits[0])
if topics[i].topic == splits[0] {
flag = 1
//controllo che non sia già sottoscritto
for j := 0; j < len(topics[i].IP); j++ {
if topics[i].IP[j] == remoteIp && topics[i].port[j] == port {
flag = 2
break
}
}
if flag == 1 {
topics[i].IP = append(topics[i].IP, remoteIp)
topics[i].port = append(topics[i].port, port)
}
break
}
}
//invia la risposta al sub
if flag == 0 {
*res = "Topic not found!"
} else if flag == 1 {
*res = "Subscribed to " + splits[0]
} else {
*res = "You are already subscribed"
}
switch semantica {
case 1:
//invio dei messaggi dalla cosa al rispettivo sub con semantica at least once
go invioSubAtLeastOnce()
case 2:
//invio dei messaggi dalla cosa al rispettivo sub con semantica timeout-based
go invioSubTimeoutBased()
}
return nil
}
//rpc go che permette di rimuovere la sottoscrizione di un sub a un topic
func (l *Listener) RimuoviSotTopic(line string, res *string) error {
//fmt.Println("Ho ricevuto: " + string(line))
splits := strings.Split(line, " ")
fmt.Printf("Stampo lista topic: %+v\n", topics[:])
//fmt.Printf("Stampo splits: %+v\n", splits[:])
flag := 0
remoteIp := strings.Split(splits[1], ":")[0]
port, err := strconv.Atoi(splits[2])
if err != nil {
// handle error
fmt.Println(err)
os.Exit(2)
}
var i int
var j int
// controllo che esista il topic
for i = 0; i < len(topics); i++ {
fmt.Println("Topic ricevuto : " + splits[0])
if topics[i].topic == splits[0] {
flag = 1
//controllo che non sia già sottoscritto
for j = 0; j < len(topics[i].IP); j++ {
if topics[i].IP[j] == remoteIp && topics[i].port[j] == port {
flag = 2
break
}
}
if flag == 2 {
copy(topics[i].IP[j:], topics[i].IP[j+1:])
topics[i].IP = topics[i].IP[:len(topics[i].IP)-1]
copy(topics[i].port[j:], topics[i].port[j+1:])
topics[i].port = topics[i].port[:len(topics[i].port)-1]
}
break
}
}
//in base al valore del flag invio una risposta al sub
if flag == 0 {
*res = "Topic non trovato!"
} else if flag == 2 {
*res = "Eliminata la sottoscrizione al topic " + splits[0]
} else {
*res = "Non eri più sottoscritto"
}
return nil
}
func main() {
var topics []Topic
//richiamo la routine per inizializzare la connessione con il sub
go initPub(msgs, &topics)
//richiamo la routine per inizializzare la connessione con i subs
go initSub()
//richiamo le routine per rimuovere una sottoscrizione e stampare la lista dei messaggi
go initRimuovi()
go initList()
for {
}
}
//inizializzo la connessione con il pub
func initPub(queue *[]Message, topics *[]Topic) {
msgs = creaCoda()
//socket TCP
fmt.Println("Launching server...")
//definisco il tipo di semantica da utilizzare
fmt.Println("1. at least once\n" +
"2.timeout-based\n" +
"Inserire il tipo di semantica da utilizzare: ")
_, err := fmt.Scanf("%d\n", &semantica)
if err != nil {
log.Fatal(err)
}
switch semantica {
case 1:
fmt.Println("Hai scelto la semantica at-least-once")
//richiedo il valore del timeout
fmt.Println("Inserire il valore del timeout di ritrasmissione: ")
_, err := fmt.Scanf("%d\n", &timeoutUtente)
if err != nil {
log.Fatal(err)
}
case 2:
fmt.Println("Hai scelto la semantica timeout-based")
//richiedo il valore del timeout
fmt.Println("Inserire il valore del timeout di visibilità: ")
_, err := fmt.Scanf("%d\n", &timeoutUtente)
if err != nil {
log.Fatal(err)
}
}
// run loop forever
for {
addy, err := net.ResolveTCPAddr("tcp", "0.0.0.0:8080")
if err != nil {
log.Fatal(err)
}
l, e := net.ListenTCP("tcp", addy)
if e != nil {
log.Fatal("listen error:", e)
}
listener := new(Listener)
rpc.Register(listener)
for {
rpc.Accept(l)
}
}
}
//funzione per inizializzare la connessione con il sub
func initSub() {
addy, err := net.ResolveTCPAddr("tcp", "0.0.0.0:2345")
if err != nil {
log.Fatal(err)
}
l, e := net.ListenTCP("tcp", addy)
if e != nil {
log.Fatal("listen error:", e)
}
listener := new(Listener)
rpc.Register(listener)
for {
rpc.Accept(l)
}
}
//inizializzo la connessione con il sub per ricevere la lista dei messaggi presenti nella coda
func initList() {
addy, err := net.ResolveTCPAddr("tcp", "0.0.0.0:1234")
if err != nil {
log.Fatal(err)
}
l, e := net.ListenTCP("tcp", addy)
if e != nil {
log.Fatal("listen error:", e)
}
listener := new(Listener)
rpc.Register(listener)
for {
rpc.Accept(l)
}
}
//inizializzo la conennessione per andare poi a rimuvore una sottoscrizione
func initRimuovi() {
addy, err := net.ResolveTCPAddr("tcp", "0.0.0.0:3456")
if err != nil {
log.Fatal(err)
}
l, e := net.ListenTCP("tcp", addy)
if e != nil {
log.Fatal("listen error:", e)
}
listener := new(Listener)
rpc.Register(listener)
for {
rpc.Accept(l)
}
}
//funzione che gestisce l'invio dei messaggi al sub con una semantica di tipo at least once
func invioSubAtLeastOnce() {
var i int
var k int
var flag int
//numero del messaggio corrente
var currmess int
//lunghezza della lista dei messaggi
var lenlist int
if len(*msgs) == 0 {
return
}
if len(topics) == 0 {
return
}
currmess = 0
lenlist = len(*msgs)
for k = 0; k < lenlist; k++ {
// prendo il messaggio nella coda
m := (*msgs)[currmess]
topic := m.Topic
flag = 0
for i = 0; i < len(topics); i++ {
if topic == topics[i].topic {
break
}
}
for j := 0; j < len(topics[i].IP); j++ {
// Prova ad inviare il messaggio finchè non ci riesce
for {
client, err := rpc.Dial("tcp", string(topics[i].IP[j])+":"+strconv.Itoa(topics[i].port[j]))
if err != nil {
log.Fatal("dialing:", err)
continue
}
s, err := json.Marshal(&m)
if err != nil {
log.Fatal(err)
client.Close()
continue
}
var reply string
divCall := client.Go("Listener.Ricevi", s, &reply, nil)
replyCall := <-divCall.Done //Will be equal to divCall
if replyCall == nil {
log.Fatal(replyCall)
client.Close()
continue
}
fmt.Println("Messaggio ricevuto :", reply)
client.Close()
flag++
break
}
}
if len(topics[i].IP) != 0 && flag == len(topics[i].IP) {
//se il messsaggio è stato ricevuto correttamente rimuovilo
*msgs = rimuovi(*msgs, currmess)
} else {
currmess++
}
//timeout della semantica at least once
timeout := make(chan bool, 1)
go func() {
time.After(time.Duration(timeoutUtente) * time.Second)
timeout <- true
}()
}
}
//funzione che gestisce l'invio dei messaggi al sub con una semantica di tipo timeout-based
func invioSubTimeoutBased() {
var i int
var k int
var flag int
//numero del messaggio corrente
var currmess int
//lunghezza della lista dei messaggi
var lenlist int
if len(*msgs) == 0 {
return
}
if len(topics) == 0 {
return
}
currmess = 0
lenlist = len(*msgs)
for k = 0; k < lenlist; k++ {
// prendo il messaggio nella coda
m := (*msgs)[currmess]
topic := m.Topic
flag = 0
for i = 0; i < len(topics); i++ {
if topic == topics[i].topic {
break
}
}
for j := 0; j < len(topics[i].IP); j++ {
// Prova ad inviare il messaggio finchè non ci riesce
for {
m.Visibility = 1
client, err := rpc.Dial("tcp", string(topics[i].IP[j])+":"+strconv.Itoa(topics[i].port[j]))
if err != nil {
fmt.Println("dialing:" + err.Error())
m.Visibility = 0
break
}
//Eseguo il marshaling dei dati
s, err := json.Marshal(&m)
if err != nil {
log.Fatal(err)
client.Close()
m.Visibility = 0
break
}
var reply string
divCall := client.Go("Listener.Ricevi", s, &reply, nil)
replyCall := <-divCall.Done //Will be equal to divCall
if replyCall == nil {
fmt.Println(replyCall)
client.Close()
m.Visibility = 0
break
}
fmt.Println("Messaggio ricevuto :", reply)
client.Close()
flag++
*msgs = rimuovi(*msgs, currmess)
//timeout
timeout := make(chan bool, 1)
go func() {
time.After(time.Duration(timeoutUtente) * time.Second)
timeout <- true
}()
break
}
if flag > 0 {
break
}
}
if flag == 0 {
currmess++
}
}
}
|
package main
import (
"net/http"
"html/template"
"github.com/gorilla/mux"
)
var t *template.Template
func init(){
t=template.Must(template.ParseFiles("catindex.html"))
}
func main() {
router:=mux.NewRouter()
router.Handle("/background.jpg",http.StripPrefix("/",http.FileServer(http.Dir("./"))))
router.HandleFunc("/index",catindex)
router.Handle("/favicon.ico",http.NotFoundHandler())
http.ListenAndServe(":8000",router)
}
func catindex(w http.ResponseWriter,r *http.Request){
w.Header().Set("Content-Type","text/html; charset=utf-8")
t.ExecuteTemplate(w,"catindex.html",nil)
}
|
package main
import "os"
func getNodeName() string {
h, err := os.Hostname()
if err != nil {
return "unknown"
}
return h
}
|
/*
Challenge:
Given a string s on the characters a-z, A-Z, 0-9, append the length of s to itself, counting the additional character(s) in the length as part of the total length of s.
Input:
Just a string of arbitrary length (can be empty).
Output:
The same string, but with its length appended to the end. The characters that represent the length should also be counted as part of the length. In cases where there are multiple valid lengths to append, choose the smallest one possible (see test cases for examples).
Test Cases:
INPUT -> OUTPUT // Comment
aaa -> aaa4
-> 1 // Empty string
aaaaaaaa -> aaaaaaaa9 // aaaaaaaa10 would also normally be valid, but violates using the smallest number rule mentioned above
aaaaaaaaa -> aaaaaaaaa11
a1 -> a13 // Input can contain numbers at the end of the string, you do not have to handle the fact that it looks like 13 rather than 3.
Longer test case(s):
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa101
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa102
Rules:
This is code-golf, so shortest code in bytes wins. Standard loopholes are forbidden. Submissions may be an entire program or a function, and you may either print the result to stdout or return it as a variable from a function.
*/
package main
import (
"fmt"
"unicode/utf8"
)
func main() {
assert(strlen("aaa") == "aaa4")
assert(strlen("") == "1")
assert(strlen("aaaaaaaa") == "aaaaaaaa9")
assert(strlen("aaaaaaaaa") == "aaaaaaaaa11")
assert(strlen("a1") == "a13")
assert(strlen("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") == "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa101")
assert(strlen("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") == "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa102")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func strlen(s string) string {
r := ""
n := utf8.RuneCountInString(s)
m := 0
for {
u := fmt.Sprintf("%d", n+m)
m = n + len(u)
t := fmt.Sprintf("%d", m)
r = s + t
if len(r) == m {
break
}
m = len(r) - n
}
return r
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentdefinition
import (
"context"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
oamCore "github.com/oam-dev/kubevela/apis/core.oam.dev"
)
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var controllerDone context.CancelFunc
var r Reconciler
var defRevisionLimit = 5
func TestComponentDefinition(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ComponentDefinition Suite")
}
var _ = BeforeSuite(func() {
By("Bootstrapping test environment")
useExistCluster := false
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: time.Minute,
ControlPlaneStopTimeout: time.Minute,
CRDDirectoryPaths: []string{
filepath.Join("../../../../../../..", "charts/vela-core/crds"), // this has all the required CRDs,
},
UseExistingCluster: &useExistCluster,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = oamCore.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
Expect(crdv1.AddToScheme(scheme.Scheme)).Should(BeNil())
By("Create the k8s client")
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
By("Starting the controller in the background")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: "0",
Port: 48081,
})
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context
ctx, controllerDone = context.WithCancel(context.Background())
go func() {
defer GinkgoRecover()
Expect(mgr.Start(ctx)).ToNot(HaveOccurred())
}()
})
var _ = AfterSuite(func() {
By("Stop the controller")
controllerDone()
By("Tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
package main
import (
"net/http"
"html/template"
)
type controller string
func (h controller) Fire(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles("views/example.html") //setp 1
t.Execute(w, "Programmer") //step 2
}
// Controller exported name
var Controller controller
|
package trigger
// ButtonTrigger is trigger for creating events on each push button.
type ButtonTrigger struct {
triggersChan chan bool
}
// NewButtonTrigger creates a new NewButtonTrigger.
func NewButtonTrigger(ch chan bool) *ButtonTrigger {
return &ButtonTrigger{triggersChan: ch}
}
// TriggersChan returns the channel with triggered triggers.
func (b ButtonTrigger) TriggersChan() chan bool {
return b.triggersChan
}
// RunAsync Starts trigger in async mode.
func (b ButtonTrigger) RunAsync() {
go func(c chan bool) {
// TODO: GPIO input implementation
}(b.triggersChan)
}
|
package main
import (
"github.com/valyala/fasthttp"
)
/*
circuit breaker, response for handle requests, decide reject it or not, record response
status.
*/
// Breaker is circuit breaker, it's a collection of Application
type Breaker struct {
apps map[string]*Application
}
// NewBreaker return a brand new circuit breaker, with nothing in mapper
func NewBreaker() *Breaker {
return &Breaker{
make(map[string]*Application),
}
}
func (b *Breaker) ServeHTTP(ctx *fasthttp.RequestCtx) {
appName := string(ctx.Host())
var app *Application
var exist bool
if app, exist = b.apps[appName]; !exist {
ctx.WriteString("app " + appName + " not exist")
ctx.SetStatusCode(fasthttp.StatusNotFound)
return
}
app.ServeHTTP(ctx)
}
|
package records
import (
"testing"
"github.com/coredns/caddy"
)
func TestRecordsParse(t *testing.T) {
tests := []struct {
input string
shouldErr bool
expectedOrigins []string
}{
{
`records {
@ IN MX 10 mx1.example.org.
}
`,
false, []string{"."},
},
}
for i, test := range tests {
c := caddy.NewTestController("dns", test.input)
re, err := recordsParse(c)
if err == nil && test.shouldErr {
t.Fatalf("Test %d expected errors, but got no error", i)
} else if err != nil && !test.shouldErr {
t.Fatalf("Test %d expected no errors, but got '%v'", i, err)
} else {
if len(re.origins) != len(test.expectedOrigins) {
t.Fatalf("Test %d expected %v, got %v", i, test.expectedOrigins, re.origins)
}
for j, name := range test.expectedOrigins {
if re.origins[j] != name {
t.Fatalf("Test %d expected %v for %d th zone, got %v", i, name, j, re.origins[j])
}
}
}
}
}
|
package leetcode
/*Given the root of a binary search tree with distinct values, modify it so that every node has a new value equal to the sum of the values of the original tree that are greater than or equal to node.val.
As a reminder, a binary search tree is a tree that satisfies these constraints:
The left subtree of a node contains only nodes with keys less than the node's key.
The right subtree of a node contains only nodes with keys greater than the node's key.
Both the left and right subtrees must also be binary search trees.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/binary-search-tree-to-greater-sum-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func bstToGst(root *TreeNode) *TreeNode {
sum := 0
add(root, &sum)
return root
}
func add(root *TreeNode, sum *int) {
if root != nil {
add(root.Right, sum)
*sum += root.Val
root.Val = *sum
add(root.Left, sum)
}
}
|
package cache
import (
"sync"
"container/list"
)
// LFU(Least Frequently Used)
// paper :http://dhruvbird.com/lfu.pdf
// head 1 2 5 9 freqNode list
// x z b c dataNode
// y a
// or:
// 3 map:(k-v)、(k-count)、(count-keySet)。
// get:O(1);put:O(1):Space: O(n);
type dataNode struct {
key string
val interface{}
freqNode *list.Element
}
type freqNode struct {
freq int
item map[*dataNode]struct{}
}
// doubly-linked list + hashmap
type LFUCache struct {
sync.Mutex
capacity int // 容量
freqNodes *list.List // 频率节点list,每个节点包括 频率数+数据Map(freqNode)
data map[string]*dataNode // 数据存储
}
func NewLFUCache(cap int) *LFUCache {
return &LFUCache{
capacity: cap,
data: make(map[string]*dataNode),
freqNodes: list.New(),
}
}
func (c LFUCache) IsEmpty() bool {
c.Lock()
defer c.Unlock()
return len(c.data) == 0
}
func (c LFUCache) IsFull() bool {
return c.isFull()
}
func (c LFUCache) isFull() bool {
c.Lock()
defer c.Unlock()
return len(c.data) >= c.capacity
}
func (c LFUCache) Size() int {
c.Lock()
defer c.Unlock()
return len(c.data)
}
func (c *LFUCache) Put(key string, value interface{}) {
c.Lock()
defer c.Unlock()
// 1 exist ?
if v, ok := c.data[key]; ok {
// exists: update
v.val = value
} else {
// not exist: add
// 2 full ?
if c.isFull() {
// delete
c.evict(1)
}
// add
dn := &dataNode{
key: key,
val: value,
}
if e := c.freqNodes.Front(); e == nil {
// empty
head := &freqNode{
freq: 1,
item: make(map[*dataNode]struct{}),
}
head.item[dn] = struct{}{}
c.freqNodes.PushFront(head) // 构建freq=1
} else {
if fn := e.Value.(*freqNode); fn.freq == 1 {
fn.item[dn] = struct{}{}
dn.freqNode = e
} else {
head := &freqNode{
freq: 1,
item: make(map[*dataNode]struct{}),
}
head.item[dn] = struct{}{}
c.freqNodes.PushFront(head) // 构建freq=1
}
}
c.data[key] = dn // 数据添加
}
}
// 淘汰频率最低的count个元素
func (c *LFUCache) evict(count int) {
for i := 0; i < count; i++ {
// freqNode ele {freq,itemMap}
if ele := c.freqNodes.Front(); ele != nil {
// freqNode map[*dataNode]struct{}
for dataNode, _ := range ele.Value.(*freqNode).item {
if i < count {
delete(c.data, dataNode.key) // 数据删除
delete(ele.Value.(*freqNode).item, dataNode) // 同频率map删除dataNode
if len(ele.Value.(*freqNode).item) == 0 {
c.freqNodes.Remove(ele) // 删除频率节点
}
i++
}
}
}
}
}
func (c *LFUCache) Get(key string) interface{} {
c.Lock()
defer c.Unlock()
if dn, ok := c.data[key]; ok {
c.increment(dn)
return dn.val
}
return nil
}
// update
func (c *LFUCache) increment(dn *dataNode) {
fnEle := dn.freqNode
fn := fnEle.Value.(*freqNode)
nextFreq := fn.freq + 1
delete(fn.item, dn) // 原频率列表删除
nextFnEle := fnEle.Next() // 下一个freqNode
if nextFnEle == nil {
nextFnEle = c.freqNodes.InsertAfter(&freqNode{
freq: nextFreq,
item: make(map[*dataNode]struct{}),
}, fnEle)
}
nextFnEle.Value.(*freqNode).item[dn] = struct{}{}
dn.freqNode = nextFnEle
}
func (c *LFUCache) Remove(key string) {
}
|
package main
import (
"context"
"fmt"
"github.com/olivere/elastic/v7"
)
// 构造一个student结构体
type Student struct {
Name string `json:"name"`
Age int `json:"age"`
Married bool `json:"married"`
}
func (s *Student)run() *Student {
fmt.Printf("%s在跑...", s.Name)
return s
}
func (s *Student)play()*Student {
fmt.Printf("%s在玩...", s.Name)
return s
}
func main() {
moxi := Student{
Name: "陌溪",
Age: 9000,
Married: true,
}
moxi.run()
moxi.run()
moxi.play().run()
// 使用 elastic库中的NewClient方法
client, err := elastic.NewClient(elastic.SetURL("http://127.0.0.1:9200"))
if err != nil {
// 抛出异常
panic(err)
}
fmt.Println("connect to es success")
// 构造一条数据
p1 := Student{Name: "rion", Age: 22, Married: false}
put1, err := client.Index().
Index("user"). // 拿到索引库
BodyJson(p1). // 将对象转换成json
Do(context.Background()) // 插入,同时可以设置context的超时
if err != nil {
// Handle error
panic(err)
}
fmt.Printf("Indexed student %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
}
|
package wire
import (
"bytes"
"io"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/protocol"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("BLOCKED frame", func() {
Context("when parsing", func() {
It("accepts sample frame", func() {
data := []byte{0x08}
data = append(data, encodeVarInt(0x12345678)...)
b := bytes.NewReader(data)
frame, err := parseBlockedFrame(b, versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
Expect(frame.Offset).To(Equal(protocol.ByteCount(0x12345678)))
Expect(b.Len()).To(BeZero())
})
It("errors on EOFs", func() {
data := []byte{0x08}
data = append(data, encodeVarInt(0x12345678)...)
_, err := parseBlockedFrame(bytes.NewReader(data), versionIETFFrames)
Expect(err).ToNot(HaveOccurred())
for i := range data {
_, err := parseBlockedFrame(bytes.NewReader(data[:i]), versionIETFFrames)
Expect(err).To(MatchError(io.EOF))
}
})
})
Context("when writing", func() {
It("writes a sample frame", func() {
b := &bytes.Buffer{}
frame := BlockedFrame{Offset: 0xdeadbeef}
err := frame.Write(b, protocol.VersionWhatever)
Expect(err).ToNot(HaveOccurred())
expected := []byte{0x08}
expected = append(expected, encodeVarInt(0xdeadbeef)...)
Expect(b.Bytes()).To(Equal(expected))
})
It("has the correct min length", func() {
frame := BlockedFrame{Offset: 0x12345}
Expect(frame.Length(versionIETFFrames)).To(Equal(1 + utils.VarIntLen(0x12345)))
})
})
})
|
package node
import (
"context"
"errors"
"log"
"sync"
"time"
data "github.com/bgokden/veri/data"
"github.com/bgokden/veri/util"
pb "github.com/bgokden/veri/veriservice"
)
func GetDataSourceClient(p *pb.Peer, name string, idOfPeer string, connectionCache *util.ConnectionCache) data.DataSource {
return &DataSourceClient{
Ids: []string{idOfPeer},
Name: name,
IdOfPeer: idOfPeer,
ConnectionCache: connectionCache,
}
}
type DataSourceClient struct {
Ids []string
Name string
IdOfPeer string
ConnectionCache *util.ConnectionCache
}
func (dcs *DataSourceClient) StreamSearch(datum *pb.Datum, scoredDatumStream chan<- *pb.ScoredDatum, queryWaitGroup *sync.WaitGroup, config *pb.SearchConfig) error {
defer queryWaitGroup.Done()
clientCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conn := dcs.ConnectionCache.Get(dcs.IdOfPeer)
if conn == nil {
return errors.New("Connection failure")
}
defer dcs.ConnectionCache.Put(conn)
client := pb.NewVeriServiceClient(conn.Conn)
searchRequest := &pb.SearchRequest{
Datum: []*pb.Datum{datum},
Config: config,
}
stream, err := client.SearchStream(clientCtx, searchRequest)
if err != nil {
return err
}
for {
protoScoredDatum, err := stream.Recv()
if err != nil {
// log.Printf("Error: (%v)", err)
break
}
// log.Printf("Received From: %v for Score: %v Label: %v", dcs.Ids, protoScoredDatum.Score, string(protoScoredDatum.GetDatum().GetValue().GetLabel()))
scoredDatumStream <- protoScoredDatum
}
return err
}
func (dcs *DataSourceClient) Insert(datum *pb.Datum, config *pb.InsertConfig) error {
clientCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conn := dcs.ConnectionCache.Get(dcs.IdOfPeer)
if conn == nil {
return errors.New("Connection failure")
}
defer dcs.ConnectionCache.Put(conn)
client := pb.NewVeriServiceClient(conn.Conn)
request := &pb.InsertionRequest{
Config: config,
Datum: datum,
DataName: dcs.Name,
}
_, err := client.Insert(clientCtx, request)
return err
}
func (dcs *DataSourceClient) GetDataInfo() *pb.DataInfo {
clientCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conn := dcs.ConnectionCache.Get(dcs.IdOfPeer)
if conn == nil {
log.Printf("Connection failure\n")
return nil
}
defer dcs.ConnectionCache.Put(conn)
client := pb.NewVeriServiceClient(conn.Conn)
request := &pb.GetDataRequest{
Name: dcs.Name,
}
dataInfo, err := client.GetDataInfo(clientCtx, request)
if err != nil {
log.Printf("GetDataInfo Error: %v\n", err.Error())
return nil
}
return dataInfo
}
func (dcs *DataSourceClient) GetID() string {
return SerializeStringArray(dcs.Ids)
}
|
package main
import (
"fmt"
)
func main() {
var a, b = 1, "This is string"
fmt.Printf("\nType: %T, Data: %d", a, a)
fmt.Printf("\nType: %T, Data: %s", b, b)
fmt.Println()
var c, d, e = 1, 3.14, true
fmt.Printf("\nType: %T, Data: %d", c, c)
fmt.Printf("\nType: %T, Data: %f", d, d)
fmt.Printf("\nType: %T, Data: %t", e, e)
fmt.Println()
var f, g, h int = 1, 2, 3
fmt.Printf("\nType: %T, Data: %d", f, f)
fmt.Printf("\nType: %T, Data: %d", g, g)
fmt.Printf("\nType: %T, Data: %d", h, h)
}
|
package main
import (
"bufio"
"bytes"
"os"
"strings"
)
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func cleanStringValue(value string) string {
value = strings.TrimRight(value, " \t\n")
value = string(bytes.Replace([]byte(value), []byte("\x00"), []byte("\n"), -1))
return value
}
|
package log
import (
"net/http"
log "github.com/Sirupsen/logrus"
)
func LogError(r *http.Request, err error, info string, logger *log.Logger) {
logger.WithFields(log.Fields{
"error": err.Error(),
"method": r.Method,
"url": r.URL.String(),
}).Error(info)
}
func Error(msg string) map[string]interface{} {
return log.Fields{"error": msg}
}
func Debug(msg string) map[string]interface{} {
return log.Fields{"debug": msg}
}
func Info(msg string) map[string]interface{} {
return log.Fields{"info": msg}
}
func Warn(msg string) map[string]interface{} {
return log.Fields{"warn": msg}
}
func Fatal(msg string) map[string]interface{} {
return log.Fields{"fatal": msg}
}
func Panic(msg string) map[string]interface{} {
return log.Fields{"panic": msg}
}
|
package cachegroup_test
import (
"reflect"
"testing"
"github.com/221bytes/negroniredis/cachegroup"
)
func Test(t *testing.T) {
cg0 := cachegroup.CreateCacheGroup("test", "tolo", "yolo")
cg1 := cachegroup.CreateCacheGroup("tata", "tolo", "yolo")
cg2 := cachegroup.CreateCacheGroup("toto", "tolo", "yolo")
cgm := cachegroup.NewCacheGroupManager()
cgm.AddCacheGroup(cg0, cg1, cg2)
test := []int{0}
v := cgm.GetGroupCacheIndexes("test")
if !reflect.DeepEqual(v, test) {
t.Error(
"expected", test,
"got", v,
)
}
}
|
// Template Declare Start
//
// ${function_name}
// @Description:${todo}
// @receiver ${receiver}
// @param ${params}
// @return ${ret_type}
//
// Template Declare End
// Methods Declare
//
// Method1
// @Description:
//
func Method1() {
}
//
// Method1_1
// @Description:
// @return func(int64) error
//
func Method1_1() func(int64) error {
}
//
// Method1_2
// @Description:
// @param val
// @return error
// @return func(int64) error
//
func Method1_2(val Type) (error, func(int64) error) {
}
//
// Method1_3
// @Description:
// @param val
// @return error
// @return func(int64) error
// @return func(int64, int32) (error, func(Type) int64
//
func Method1_3(val Type) (error, func(int64) error, func(int64, int32) (error, func(Type) int64) {
}
//
// Method1_4
// @Description:
// @param val
// @param handler
// @return error
// @return func(int64) error
// @return func(int64, int32) (error, func(Type) int64
//
func Method1_4(val Type, handler func(int64) error) (error, func(int64) error, func(int64, int32) (error, func(Type) int64) {
}
//
// Method1_4
// @Description:
// @param val
// @param handler
// @param handler2
// @return error
// @return func(int64) error
// @return func(int64, int32) (error, func(Type) int64
//
func Method1_4(val Type, handler func(int64) error, handler2 func(int64, int32) int32) (error, func(int64) error, func(int64, int32) (error, func(Type) int64) {
}
//
// DoService1
// @Description:
//
// start_interface_method
DoService1()
// end_interface_method
//
// DoService2
// @Description:
// @return error
//
// start_interface_method
DoService2() error
// end_interface_method
//
// DoService3
// @Description:
// @return Type
// @return func (int32, int64) error
//
// start_interface_method
DoService3() (t Type, a func (int32, int64) error)
// end_interface_method
|
package lintcode
/**
* Definition for a binary tree node.
*
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
* @param inorder: A list of integers that inorder traversal of a tree
* @param postorder: A list of integers that postorder traversal of a tree
* @return: Root of a tree
*/
func buildTree(inorder []int, postorder []int) *TreeNode {
return build(inorder, postorder, 0, len(inorder)-1, 0, len(postorder)-1)
}
func build(inorder []int, postorder []int, inLeft int, inRight int, postLeft int, postRight int) *TreeNode {
if len(inorder) == 0 || len(postorder) == 0 {
return nil
}
// 注意特殊情况
if inRight < 0 || postRight < 0 || inLeft > inRight || postLeft > postRight {
return nil
}
if inLeft == inRight {
return &TreeNode{
inorder[inLeft],
nil,
nil,
}
}
if postLeft == postRight {
return &TreeNode{
postorder[postRight],
nil,
nil,
}
}
rootVal := postorder[postRight]
root := &TreeNode{
rootVal,
nil,
nil,
}
rootLoc := 0
for i := 0; i < len(inorder); i++ {
if rootVal == inorder[i] {
rootLoc = i
break
}
}
// 根据中序遍历得到左子树(或右子树)节点个数,由此推算后续遍历中的左子树和右子树下标
leftNum := rootLoc - inLeft
// rightNum := inRight - rootLoc
// 中序下标: [inLeft:rootLoc-1],rootLoc(根节点),[rootLoc+1:inRight]
// 后序下标: [postLeft:postLeft+左子树节点个数],[postLeft+左子树节点个数+1:postRight-1],postRight(根节点)
root.Left = build(inorder, postorder, inLeft, rootLoc-1, postLeft, postLeft+leftNum-1)
root.Right = build(inorder, postorder, rootLoc+1, inRight, postLeft+leftNum, postRight-1)
return root
}
|
package main
import (
"net/http"
)
type AddHeaderMiddleware struct {
key string
val string
}
func (a AddHeaderMiddleware) Process(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set(a.key, a.val)
handler(w, r)
}
}
func New(params map[string]interface{}) (interface{}, error) {
return AddHeaderMiddleware{
key: params["key"].(string),
val: params["val"].(string),
}, nil
}
|
package rstreams
import (
"context"
"io/ioutil"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber/validate"
)
var _ = Describe("Redis Streams Backend", func() {
var r *RedisStreams
var relayOpts *opts.RelayOptions
BeforeEach(func() {
r = &RedisStreams{
connArgs: &args.RedisStreamsConn{},
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
relayOpts = &opts.RelayOptions{
RedisStreams: &opts.RelayGroupRedisStreamsOptions{
Args: &args.RedisStreamsReadArgs{
Streams: []string{"test"},
},
},
}
})
Context("validateRelayOptions", func() {
It("validates nil relay options", func() {
err := validateRelayOptions(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyRelayOpts))
})
It("validates missing backend group", func() {
relayOpts.RedisStreams = nil
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendGroup))
})
It("validates missing backend args", func() {
relayOpts.RedisStreams.Args = nil
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendArgs))
})
It("validates missing backend stream", func() {
relayOpts.RedisStreams.Args.Streams = nil
err := validateRelayOptions(relayOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrMissingStream))
})
})
Context("Relay", func() {
It("validates relay options", func() {
err := r.Relay(context.Background(), nil, nil, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyRelayOpts.Error()))
})
})
})
|
package client
type IdleChannel struct {
input chan Command
output chan Command
buffer Command
}
func NewIdleChannel() *IdleChannel {
ch := &IdleChannel{
input: make(chan Command),
output: make(chan Command),
}
go ch.run()
return ch
}
func (ch *IdleChannel) In() chan<- Command {
return ch.input
}
func (ch *IdleChannel) Out() <-chan Command {
return ch.output
}
func (ch *IdleChannel) Close() {
close(ch.input)
}
func (ch *IdleChannel) run() {
var input = ch.input
var output chan Command
var open, full bool
var next Command
for input != nil || output != nil {
select {
case output <- next:
full = false
default:
select {
case next, open = <-input:
if open {
full = true
} else {
input = nil
}
case output <- next:
full = false
}
}
if full {
output = ch.output
} else {
output = nil
}
}
close(ch.output)
}
|
package main
import "fmt"
func main() {
x := []string{"Donald", "Duck", "Andeby", "Norge"}
y := []string{"Fantomet", "Walker", "Hodeskallegrotten", "Afrika"}
fmt.Println(x)
fmt.Println(y)
x = append(x[:1], x[2:]...) //:1 tar med seg index 0, men dropper index 1. [:1 går til 1 man tar ikke med 1]
//,og fortsetter så fra Index 2 med [2:]
//Resultatet er at du slicer bort index nr. 1 siden alt igjen blir appended til x.
fmt.Println("Fjerner index nr.1 som er etternavnet = ", x)
x = append(x, y...) //Husk ... hvis man appender en annen slice
fmt.Println("Etter at y er appended til x, så er x = ", x)
fmt.Println("´n")
for i, v := range x { //range gir samme funksjonalitet som foreach i perl
fmt.Printf("%v\t%v\n", i, v)
}
}
|
package main
import (
"fmt"
"os"
"io"
"bufio"
"bytes"
)
func main() {
file, err := os.Open("/tmp/aa.txt")
if err != nil {
fmt.Println(err.Error())
}
defer file.Close()
br := bufio.NewReader(file)
var buf bytes.Buffer
for {
ba, isPrefix, err := br.ReadLine()
if err != nil {
if err == io.EOF {
break
}
fmt.Println("Error: %s\n", err)
break
}
buf.Write(ba)
if !isPrefix {
buf.WriteByte('\n')
}
}
str := buf.String()
fmt.Println(str)
}
|
package main
import (
"fmt"
"github.com/AnhNguyenQuoc/go-blog/routes"
"log"
"net/http"
"os"
"github.com/AnhNguyenQuoc/go-blog/lib"
"github.com/AnhNguyenQuoc/go-blog/migrate"
"github.com/jinzhu/gorm"
"github.com/joho/godotenv"
"github.com/julienschmidt/httprouter"
)
var dbConfig migrate.DBConfig
var db *gorm.DB
var err error
func init() {
godotenv.Load()
dbConfig = migrate.DBConfig{
Host: os.Getenv("DB_HOST"),
Port: os.Getenv("DB_PORT"),
User: os.Getenv("DB_USER"),
Password: os.Getenv("DB_PASSWORD"),
DBName: os.Getenv("DB_NAME"),
}
db, err = migrate.InitDB(dbConfig)
if err != nil {
log.Panic(err)
}
}
func main() {
defer db.Close()
router := httprouter.New()
// Initialize router
routes.InitRoute(router, db)
// Static file router
router.ServeFiles("/static/*filepath", http.Dir("assets/"))
// Start server
fmt.Println("Listening from server...")
log.Fatal(http.ListenAndServe(lib.GetPort(), router))
}
|
package flatten_test
import (
"testing"
"github.com/google/go-cmp/cmp"
"go.jlucktay.dev/golang-workbench/flatten/pkg/flatten"
)
func TestFlatten(t *testing.T) {
testCases := map[string]struct {
input interface{}
expected []int
}{
"Hello world - [[1,2,[3]],4] -> [1,2,3,4]": {
input: []interface{}{
[]interface{}{
1,
2,
[]int{3},
},
4,
},
expected: []int{1, 2, 3, 4},
},
"Nested": {
input: []interface{}{
[]interface{}{
1,
[]int{2},
[]interface{}{
3,
[]int{4, 5},
[]interface{}{
6,
[]int{7, 8},
9, 10,
},
11,
},
12,
},
13, 14,
},
expected: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
},
}
for desc, tC := range testCases {
tC := tC // pin!
t.Run(desc, func(t *testing.T) {
actual := flatten.Flatten(tC.input)
if diff := cmp.Diff(tC.expected, actual); diff != "" {
t.Errorf("Got '%#v', want '%#v': mismatch (-want +got):\n%s", actual, tC.expected, diff)
}
})
}
}
|
package cron
import (
"encoding/json"
"fmt"
"log"
"os"
"strings"
"github.com/argoproj/pkg/errors"
"github.com/argoproj/pkg/humanize"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo/cmd/argo/commands/client"
"github.com/argoproj/argo/pkg/apiclient/cronworkflow"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
)
func NewGetCommand() *cobra.Command {
var (
output string
)
var command = &cobra.Command{
Use: "get CRON_WORKFLOW...",
Short: "display details about a cron workflow",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
ctx, apiClient := client.NewAPIClient()
serviceClient := apiClient.NewCronWorkflowServiceClient()
namespace := client.Namespace()
for _, arg := range args {
cronWf, err := serviceClient.GetCronWorkflow(ctx, &cronworkflow.GetCronWorkflowRequest{
Name: arg,
Namespace: namespace,
})
errors.CheckError(err)
printCronWorkflow(cronWf, output)
}
},
}
command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: json|yaml|wide")
return command
}
func printCronWorkflow(wf *wfv1.CronWorkflow, outFmt string) {
switch outFmt {
case "name":
fmt.Println(wf.ObjectMeta.Name)
case "json":
outBytes, _ := json.MarshalIndent(wf, "", " ")
fmt.Println(string(outBytes))
case "yaml":
outBytes, _ := yaml.Marshal(wf)
fmt.Print(string(outBytes))
case "wide", "":
fmt.Print(getCronWorkflowGet(wf))
default:
log.Fatalf("Unknown output format: %s", outFmt)
}
}
func getCronWorkflowGet(wf *wfv1.CronWorkflow) string {
const fmtStr = "%-30s %v\n"
out := ""
out += fmt.Sprintf(fmtStr, "Name:", wf.ObjectMeta.Name)
out += fmt.Sprintf(fmtStr, "Namespace:", wf.ObjectMeta.Namespace)
out += fmt.Sprintf(fmtStr, "Created:", humanize.Timestamp(wf.ObjectMeta.CreationTimestamp.Time))
out += fmt.Sprintf(fmtStr, "Schedule:", wf.Spec.Schedule)
out += fmt.Sprintf(fmtStr, "Suspended:", wf.Spec.Suspend)
if wf.Spec.Timezone != "" {
out += fmt.Sprintf(fmtStr, "Timezone:", wf.Spec.Timezone)
}
if wf.Spec.StartingDeadlineSeconds != nil {
out += fmt.Sprintf(fmtStr, "StartingDeadlineSeconds:", *wf.Spec.StartingDeadlineSeconds)
}
if wf.Spec.ConcurrencyPolicy != "" {
out += fmt.Sprintf(fmtStr, "ConcurrencyPolicy:", wf.Spec.ConcurrencyPolicy)
}
if wf.Status.LastScheduledTime != nil {
out += fmt.Sprintf(fmtStr, "LastScheduledTime:", humanize.Timestamp(wf.Status.LastScheduledTime.Time))
}
next, err := wf.GetNextRuntime()
if err == nil {
out += fmt.Sprintf(fmtStr, "NextScheduledTime:", humanize.Timestamp(next))
}
if len(wf.Status.Active) > 0 {
var activeWfNames []string
for _, activeWf := range wf.Status.Active {
activeWfNames = append(activeWfNames, activeWf.Name)
}
out += fmt.Sprintf(fmtStr, "Active Workflows:", strings.Join(activeWfNames, ", "))
}
if len(wf.Status.Conditions) > 0 {
out += wf.Status.Conditions.DisplayString(fmtStr, map[wfv1.ConditionType]string{wfv1.ConditionTypeSubmissionError: "✖"})
}
if len(wf.Spec.WorkflowSpec.Arguments.Parameters) > 0 {
out += fmt.Sprintf(fmtStr, "Workflow Parameters:", "")
for _, param := range wf.Spec.WorkflowSpec.Arguments.Parameters {
if param.Value == nil {
continue
}
out += fmt.Sprintf(fmtStr, " "+param.Name+":", *param.Value)
}
}
return out
}
|
/*
--- Day 3: Spiral Memory ---
You come across an experimental new kind of memory stored on an infinite two-dimensional grid.
Each square on the grid is allocated in a spiral pattern starting at a location marked 1 and then counting up while spiraling outward. For example, the first few squares are allocated like this:
17 16 15 14 13
18 5 4 3 12
19 6 1 2 11
20 7 8 9 10
21 22 23---> ...
While this is very space-efficient (no squares are skipped), requested data must be carried back to square 1 (the location of the only access port for this memory system) by programs that can only move up, down, left, or right.
They always take the shortest path: the Manhattan Distance between the location of the data and square 1.
For example:
Data from square 1 is carried 0 steps, since it's at the access port.
Data from square 12 is carried 3 steps, such as: down, left, left.
Data from square 23 is carried only 2 steps: up twice.
Data from square 1024 must be carried 31 steps.
How many steps are required to carry the data from the square identified in your puzzle input all the way to the access port?
--- Part Two ---
As a stress test on the system, the programs here clear the grid and then store the value 1 in square 1. Then, in the same allocation order as shown above, they store the sum of the values in all adjacent squares, including diagonals.
So, the first few squares' values are chosen as follows:
Square 1 starts with the value 1.
Square 2 has only one adjacent filled square (with value 1), so it also stores 1.
Square 3 has both of the above squares as neighbors and stores the sum of their values, 2.
Square 4 has all three of the aforementioned squares as neighbors and stores the sum of their values, 4.
Square 5 only has the first and fourth squares as neighbors, so it gets the value 5.
Once a square is written, its value does not change. Therefore, the first few squares would receive the following values:
147 142 133 122 59
304 5 4 2 57
330 10 1 1 54
351 11 23 25 26
362 747 806---> ...
What is the first value written that is larger than your puzzle input?
*/
package main
import (
"fmt"
"math"
)
func main() {
N := 347991
fmt.Println(part1(N))
fmt.Println(part2(N))
}
func part1(n int) int {
x, y := spiral(n)
return abs(x) + abs(y)
}
func part2(n int) int {
for i := 0; i < n; i++ {
v := spiralsum(i)
if v > n || v < 0 {
return v
}
}
return -1
}
// https://math.stackexchange.com/questions/163080/on-a-two-dimensional-grid-is-there-a-formula-i-can-use-to-spiral-coordinates-in
// https://upload.wikimedia.org/wikipedia/commons/1/1d/Ulam_spiral_howto_all_numbers.svg
func spiral(n int) (x, y int) {
k := int(math.Ceil((math.Sqrt(float64(n)) - 1) / 2))
t := 2*k + 1
m := t * t
t = t - 1
if n >= m-t {
return k - (m - n), -k
}
m -= t
if n >= m-t {
return -k, -k + (m - n)
}
m -= t
if n >= m-t {
return -k + (m - n), k
}
return k, k - (m - n - t)
}
// https://oeis.org/A141481
func spiralsum(n int) int {
tab := []int{
1, 1, 2, 4, 5, 10, 11,
23, 25, 26, 54, 57, 59, 122, 133,
142, 147, 304, 330, 351, 362, 747, 806,
880, 931, 957, 1968, 2105, 2275, 2391, 2450,
5022, 5336, 5733, 6155, 6444, 6591, 13486, 14267,
15252, 16295, 17008, 17370, 35487, 37402, 39835, 42452,
45220, 47108, 48065, 98098, 103128, 109476, 116247, 123363,
128204, 130654, 266330, 279138, 295229, 312453, 330785, 349975,
363010, 369601, 752688, 787032, 830037, 875851, 924406, 975079,
1009457, 1026827, 2089141, 2179400, 2292124, 2411813, 2539320, 2674100,
2814493, 2909666, 2957731, 6013560, 6262851, 6573553, 6902404, 7251490,
7619304, 8001525, 8260383, 8391037, 17048404, 17724526, 18565223, 19452043,
20390510, 21383723, 22427493, 23510079, 24242690, 24612291, 49977270, 51886591,
54256348, 56749268, 59379562, 62154898, 65063840, 68075203, 70111487, 71138314,
144365769, 149661137, 156221802, 163105139, 170348396, 177973629, 186001542, 194399801,
203081691, 208949088, 211906819, 429827198, 445061340, 463911304, 483650112, 504377559,
526150757, 549023076, 572904288, 597557233, 614208653, 622599690, 1262247784, 1305411751,
1358749904, 1414491696, 1472899472, 1534125748, 1598327474, 1665648769, 1735829031, 1808194091,
1857049072, 1881661363,
}
if n >= len(tab) {
return -1
}
return tab[n]
}
func abs(x int) int {
if x < 0 {
x = -x
}
return x
}
|
package alchemyapi
import (
"testing"
)
func TestEntryPointsHasArrange(t *testing.T) {
eps := make(EntryPoints)
if got := eps.hasArrange("foo"); got == true {
t.Errorf("want %t, but %t", !got, got)
}
eps["foo"] = make(map[string]string)
eps["foo"]["bar"] = "uri"
if got := eps.hasArrange("foo"); got == false {
t.Errorf("want %t, but %t", got, !got)
}
}
func TestEntryPointsUpdate(t *testing.T) {
eps := make(EntryPoints)
eps.update("foo", "bar", "uri")
if len(eps) != 1 {
t.Error("The EntryPoints length shoule be 1.")
}
if _, foo := eps["foo"]; !foo {
t.Errorf("want %t, but %t", foo, !foo)
}
if _, bar := eps["foo"]["bar"]; !bar {
t.Errorf("want %t, but %t", bar, !bar)
}
if len(eps["foo"]) != 1 {
t.Error("The EntryPoints 'foo' map length shoule be 1.")
}
if eps["foo"]["bar"] != "uri" {
t.Errorf("want %s, but %s", "uri", eps["foo"]["bar"])
}
}
func TestEntryPointsHasFlavor(t *testing.T) {
eps := make(EntryPoints)
if got := eps.hasFlavor("foo_bar", "foo"); got {
t.Errorf("want %t, but %t", !got, got)
}
eps.update("foo_bar", "foo", "")
if got := eps.hasFlavor("foo_bar", "foo"); got {
t.Errorf("want %t, but %t", !got, got)
}
eps.update("foo_bar", "foo", "bar")
if got := eps.hasFlavor("foo_bar", "foo"); !got {
t.Errorf("want %t, but %t", !got, got)
}
}
func TestEntryPointsInitialized(t *testing.T) {
seeds := make(EntryPoints)
seeds.update("sentiment", "url", "/url/URLGetTextSentiment")
seeds.update("sentiment", "text", "/text/TextGetTextSentiment")
seeds.update("sentiment", "html", "/html/HTMLGetTextSentiment")
seeds.update("sentiment_targeted", "url", "/url/URLGetTargetedSentiment")
seeds.update("sentiment_targeted", "text", "/text/TextGetTargetedSentiment")
seeds.update("sentiment_targeted", "html", "/html/HTMLGetTargetedSentiment")
seeds.update("taxonomy", "url", "/url/URLGetRankedTaxonomy")
seeds.update("taxonomy", "text", "/text/TextGetRankedTaxonomy")
seeds.update("taxonomy", "html", "/html/HTMLGetRankedTaxonomy")
seeds.update("concepts", "url", "/url/URLGetRankedConcepts")
seeds.update("concepts", "text", "/text/TextGetRankedConcepts")
seeds.update("concepts", "html", "/html/HTMLGetRankedConcepts")
seeds.update("entities", "url", "/url/URLGetRankedNamedEntities")
seeds.update("entities", "text", "/text/TextGetRankedNamedEntities")
seeds.update("entities", "html", "/html/HTMLGetRankedNamedEntities")
seeds.update("keywords", "url", "/url/URLGetRankedKeywords")
seeds.update("keywords", "text", "/text/TextGetRankedKeywords")
seeds.update("keywords", "html", "/html/HTMLGetRankedKeywords")
seeds.update("relations", "url", "/url/URLGetRelations")
seeds.update("relations", "text", "/text/TextGetRelations")
seeds.update("relations", "html", "/html/HTMLGetRelations")
seeds.update("text", "url", "/url/URLGetText")
seeds.update("text", "html", "/html/HTMLGetText")
seeds.update("text_raw", "url", "/url/URLGetRawText")
seeds.update("text_raw", "html", "/html/HTMLGetRawText")
seeds.update("title", "url", "/url/URLGetTitle")
seeds.update("title", "html", "/html/HTMLGetTitle")
seeds.update("face", "url", "/url/URLGetRankedImageFaceTags")
seeds.update("face", "image", "/image/ImageGetRankedImageFaceTags")
seeds.update("image_extract", "url", "/url/URLGetImage")
seeds.update("image_extract", "html", "/html/HTMLGetImage")
seeds.update("image_tag", "url", "/url/URLGetRankedImageKeywords")
seeds.update("image_tag", "image", "/image/ImageGetRankedImageKeywords")
seeds.update("authors", "url", "/url/URLGetAuthors")
seeds.update("authors", "html", "/html/HTMLGetAuthors")
seeds.update("language", "url", "/url/URLGetLanguage")
seeds.update("language", "text", "/text/TextGetLanguage")
seeds.update("language", "html", "/html/HTMLGetLanguage")
seeds.update("feeds", "url", "/url/URLGetFeedLinks")
seeds.update("feeds", "html", "/html/HTMLGetFeedLinks")
seeds.update("microformats", "url", "/url/URLGetMicroformatData")
seeds.update("microformats", "html", "/html/HTMLGetMicroformatData")
seeds.update("combined", "url", "/url/URLGetCombinedData")
seeds.update("combined", "text", "/text/TextGetCombinedData")
seeds.update("combined", "html", "/html/HTMLGetCombinedData")
seeds.update("publication_date", "url", "/url/URLGetPubDate")
seeds.update("publication_date", "html", "/html/HTMLGetPubDate")
for k, v := range seeds {
if entryPoints[k] == nil {
t.Errorf("The %s should not be empty", k)
}
for flavor, uri := range v {
if entryPoints[k][flavor] != uri {
t.Errorf("EntryPoint %s, Flavor %s want %s, but %#v",
k, flavor, uri,
entryPoints[k][flavor],
)
}
}
}
}
func TestGetEntryPoints(t *testing.T) {
if got := entryPoints.hasArrange("foo_bar"); got {
t.Errorf("want %t, but %t", !got, got)
}
entryPoints.update("foo_bar", "foo", "bar")
eps := GetEntryPoints()
if got := eps.hasArrange("foo_bar"); !got {
t.Errorf("want %t, but %t", !got, got)
}
if uri := eps["foo_bar"]["foo"]; uri != "bar" {
t.Errorf("want %s, but %#v", "bar", uri)
}
}
|
package handler
import (
"fmt"
"github.com/gin-gonic/gin"
"proxy_download/model"
)
func NameValidate(context *gin.Context, tableName string) (result bool, err error) {
var params = struct {
Name string `json:"name"`
Id int `json:"id"`
UserId int `json:"user_id"`
}{}
err1 := context.BindJSON(¶ms)
if err1 != nil {
fmt.Println("context.BindJSON NameValidate err = ", err1)
return
}
//name := strings.Trim(params.Name, " ")
name := params.Name
id := params.Id
userId := params.UserId
if id != 0 {
result, err = model.UpdateNameValidate(name, tableName, id, userId)
return
}
result, err = model.SaveNameValidate(name, tableName, userId)
return
}
|
package task
type DailyTask struct {
TaskBase
}
func NewDailyTask() *DailyTask {
dt := &DailyTask{}
return dt
}
|
package tyme
import (
"fmt"
"time"
)
// LocalHour represents year, month and daae without Location
// e.g) January 2, 2006
type LocalHour struct {
date LocalDate
hour int
}
// NewLocalHour returns instance of LocalHour with given year, month and date
func NewLocalHour(year int, month time.Month, date int, hour int) LocalHour {
return LocalHour{
date: NewLocalDate(year, month, date),
hour: hour,
}
}
// Year returns year
func (h *LocalHour) Year() int {
return h.date.Year()
}
// Month returns month
func (h *LocalHour) Month() time.Month {
return h.date.Month()
}
// Date returns number of date
func (h *LocalHour) Date() int {
return h.date.Date()
}
// Hour returns number of date
func (h *LocalHour) Hour() int {
return h.hour
}
// String returns "yyyy-mm-ddThh" format
func (d LocalHour) String() string {
return fmt.Sprintf("%04d-%02d-%02dT%02d", d.Year(), int(d.Month()), d.Date(), d.Hour())
}
|
package options
import (
"github.com/spf13/pflag"
)
type Config struct {
ListenAddress string
TlsCertFile string
TlsPrivateKey string
KeystoneUrl string
}
func NewConfig() *Config {
return &Config{}
}
func (c *Config) AddFlag(fs *pflag.FlagSet) {
fs.StringVar(&c.ListenAddress, "listen", "127.0.0.1:8443", "address:port to listen on")
fs.StringVar(&c.TlsCertFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS")
fs.StringVar(&c.TlsPrivateKey, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file")
fs.StringVar(&c.KeystoneUrl, "keystone-url", "http://localhost/identity/v3/", "url for openstack keystone")
}
|
package main
import "fmt"
//结构体模拟实现其他语言中的“继承”
type animal struct {
name string
}
//给animal实现一个移动的方法
func (a animal) move() {
fmt.Printf("%s会动!\n", a.name)
}
//狗类
type dog struct {
feet uint8
animal //animal拥有的方法dog也拥有了(利用匿名结构体的语法糖)
}
//给dog实现一个汪汪汪的方法
func (d dog) wang() {
fmt.Printf("%s:汪汪汪\n", d.name)
}
func main() {
s1 := dog{
feet: 4,
animal: animal{
name: "小黑",
},
}
s1.wang()
s1.move()
fmt.Printf("type:%T,value:%d", s1.feet, s1.feet)
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alicloud
import (
"fmt"
"k8s.io/api/core/v1"
"testing"
"github.com/denverdino/aliyungo/slb"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func loadbalancerAttrib(loadbalancer *slb.LoadBalancerType) *slb.LoadBalancerType {
lports := struct {
ListenerPort []int
}{
ListenerPort: []int{442, 80},
}
pproto := struct {
ListenerPortAndProtocol []slb.ListenerPortAndProtocolType
}{
ListenerPortAndProtocol: []slb.ListenerPortAndProtocolType{
{
ListenerPort: 442,
ListenerProtocol: "tcp",
},
{
ListenerPort: 80,
ListenerProtocol: "tcp",
},
},
}
backend := struct {
BackendServer []slb.BackendServerType
}{
BackendServer: []slb.BackendServerType{
{
ServerId: "i-bp152coo41mv2dqry64j",
Weight: 100,
},
{
ServerId: "i-bp152coo41mv2dqry64i",
Weight: 100,
},
},
}
loadbalancer.ListenerPorts = lports
loadbalancer.ListenerPortsAndProtocol = pproto
loadbalancer.BackendServers = backend
return loadbalancer
}
func TestUpdateListenerPorts(t *testing.T) {
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "service-test",
UID: "abcdefghigklmnopqrstu",
Annotations: map[string]string{
//ServiceAnnotationLoadBalancerId: LOADBALANCER_ID,
},
},
Spec: v1.ServiceSpec{
Type: "LoadBalancer",
Ports: []v1.ServicePort{
{
Name: "tcp",
Protocol: protocolTcp,
Port: 80,
TargetPort: intstr.FromInt(80),
NodePort: 30480,
}, {
Name: "tcp",
Protocol: protocolTcp,
Port: 443,
TargetPort: intstr.FromInt(443),
NodePort: 30443,
},
},
},
}
base := newBaseLoadbalancer()
detail := loadbalancerAttrib(&base[0])
mgr, _ := NewMockClientMgr(&mockClientSLB{
startLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
return nil
},
stopLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
return nil
},
createLoadBalancerTCPListener: func(args *slb.CreateLoadBalancerTCPListenerArgs) (err error) {
li := slb.ListenerPortAndProtocolType{
ListenerPort: args.ListenerPort,
ListenerProtocol: "tcp",
}
t.Logf("PPPP: %v\n", li)
detail.ListenerPorts.ListenerPort = append(detail.ListenerPorts.ListenerPort, args.ListenerPort)
detail.ListenerPortsAndProtocol.ListenerPortAndProtocol = append(detail.ListenerPortsAndProtocol.ListenerPortAndProtocol, li)
return nil
},
deleteLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
response := []slb.ListenerPortAndProtocolType{}
ports := detail.ListenerPortsAndProtocol.ListenerPortAndProtocol
for _, p := range ports {
if p.ListenerPort == port {
continue
}
response = append(response, p)
}
listports := []int{}
lports := detail.ListenerPorts.ListenerPort
for _, po := range lports {
if po == port {
continue
}
listports = append(listports, po)
}
detail.ListenerPortsAndProtocol.ListenerPortAndProtocol = response
detail.ListenerPorts.ListenerPort = listports
return nil
},
describeLoadBalancerTCPListenerAttribute: func(loadBalancerId string, port int) (response *slb.DescribeLoadBalancerTCPListenerAttributeResponse, err error) {
ports := detail.ListenerPortsAndProtocol.ListenerPortAndProtocol
for _, p := range ports {
if p.ListenerPort == port {
return &slb.DescribeLoadBalancerTCPListenerAttributeResponse{
DescribeLoadBalancerListenerAttributeResponse: slb.DescribeLoadBalancerListenerAttributeResponse{
Status: slb.Running,
},
TCPListenerType: slb.TCPListenerType{
LoadBalancerId: loadBalancerId,
ListenerPort: port,
BackendServerPort: 31789,
Bandwidth: 50,
},
}, nil
}
}
return nil, errors.New("not found")
},
})
err := NewListenerManager(mgr.loadbalancer.c, service, detail).Apply()
if err != nil {
t.Fatal("listener update error! ")
}
t.Log(PrettyJson(service))
t.Log(PrettyJson(detail))
for _, sport := range service.Spec.Ports {
found := false
for _, port := range detail.ListenerPortsAndProtocol.ListenerPortAndProtocol {
if int(sport.Port) == port.ListenerPort {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("1. listen port protocol not found [%d]\n", sport.Port))
}
found = false
for _, port := range detail.ListenerPorts.ListenerPort {
if int(sport.Port) == port {
found = true
}
}
if !found {
t.Fatal(fmt.Sprintf("2. listen port not found [%d]\n", sport.Port))
}
}
for _, sport := range detail.ListenerPortsAndProtocol.ListenerPortAndProtocol {
found := false
for _, port := range service.Spec.Ports {
if int(port.Port) == sport.ListenerPort {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("3. port not found [%d]\n", sport.ListenerPort))
}
}
for _, sport := range detail.ListenerPorts.ListenerPort {
found := false
for _, port := range service.Spec.Ports {
if int(port.Port) == sport {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("4. port not found [%d]\n", sport))
}
}
}
func TestUpdateListenerBackendPorts(t *testing.T) {
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "service-test",
UID: "abcdefghigklmnopqrstu",
Annotations: map[string]string{
//ServiceAnnotationLoadBalancerId: LOADBALANCER_ID,
},
},
Spec: v1.ServiceSpec{
Type: "LoadBalancer",
Ports: []v1.ServicePort{
{
Name: "tcp",
Protocol: protocolTcp,
Port: 80,
TargetPort: intstr.FromInt(80),
NodePort: 30480,
}, {
Name: "tcp",
Protocol: protocolTcp,
Port: 442,
TargetPort: intstr.FromInt(443),
NodePort: 30443,
},
},
},
}
base := newBaseLoadbalancer()
detail := loadbalancerAttrib(&base[0])
mgr, _ := NewMockClientMgr(&mockClientSLB{
startLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
return nil
},
stopLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
return nil
},
createLoadBalancerTCPListener: func(args *slb.CreateLoadBalancerTCPListenerArgs) (err error) {
li := slb.ListenerPortAndProtocolType{
ListenerPort: args.ListenerPort,
ListenerProtocol: "tcp",
}
detail.ListenerPorts.ListenerPort = append(detail.ListenerPorts.ListenerPort, args.ListenerPort)
detail.ListenerPortsAndProtocol.ListenerPortAndProtocol = append(detail.ListenerPortsAndProtocol.ListenerPortAndProtocol, li)
return nil
},
deleteLoadBalancerListener: func(loadBalancerId string, port int) (err error) {
response := []slb.ListenerPortAndProtocolType{}
ports := detail.ListenerPortsAndProtocol.ListenerPortAndProtocol
for _, p := range ports {
if p.ListenerPort == port {
continue
}
response = append(response, p)
}
listports := []int{}
lports := detail.ListenerPorts.ListenerPort
for _, po := range lports {
if po == port {
continue
}
listports = append(listports, po)
}
detail.ListenerPortsAndProtocol.ListenerPortAndProtocol = response
detail.ListenerPorts.ListenerPort = listports
return nil
},
describeLoadBalancerTCPListenerAttribute: func(loadBalancerId string, port int) (response *slb.DescribeLoadBalancerTCPListenerAttributeResponse, err error) {
ports := detail.ListenerPortsAndProtocol.ListenerPortAndProtocol
for _, p := range ports {
if p.ListenerPort == port {
return &slb.DescribeLoadBalancerTCPListenerAttributeResponse{
DescribeLoadBalancerListenerAttributeResponse: slb.DescribeLoadBalancerListenerAttributeResponse{
Status: slb.Running,
},
TCPListenerType: slb.TCPListenerType{
LoadBalancerId: loadBalancerId,
ListenerPort: port,
BackendServerPort: 31789,
Bandwidth: 50,
},
}, nil
}
}
return nil, errors.New("not found")
},
})
err := NewListenerManager(mgr.loadbalancer.c, service, detail).Apply()
if err != nil {
t.Fatal("listener update error! ")
}
t.Log(PrettyJson(service))
t.Log(PrettyJson(detail))
for _, sport := range service.Spec.Ports {
found := false
for _, port := range detail.ListenerPortsAndProtocol.ListenerPortAndProtocol {
if int(sport.Port) == port.ListenerPort {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("1. listen port protocol not found [%d]\n", sport.Port))
}
found = false
for _, port := range detail.ListenerPorts.ListenerPort {
if int(sport.Port) == port {
found = true
}
}
if !found {
t.Fatal(fmt.Sprintf("2. listen port not found [%d]\n", sport.Port))
}
}
for _, sport := range detail.ListenerPortsAndProtocol.ListenerPortAndProtocol {
found := false
for _, port := range service.Spec.Ports {
if int(port.Port) == sport.ListenerPort {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("3. port not found [%d]\n", sport.ListenerPort))
}
}
for _, sport := range detail.ListenerPorts.ListenerPort {
found := false
for _, port := range service.Spec.Ports {
if int(port.Port) == sport {
found = true
break
}
}
if !found {
t.Fatal(fmt.Sprintf("4. port not found [%d]\n", sport))
}
}
}
|
package kshortestpaths
import (
"fmt"
"testing"
)
func TestGraphCreation(t *testing.T) {
g := MakeStationGraph()
s1 := MakeStation("BOSSST", 42.361829, -71.05599)
s2 := MakeStation("NYCPAS", 40.750315, -73.992865)
g.AddEdge(s1, s2)
// fmt.Println(g)
fmt.Println(g.Distance(s1, s2))
}
|
// logic-clock project main.go
package main
import (
"fmt"
)
type Processor struct {
channel chan Msg
timeSys TimeSys
}
type Msg struct {
time TimeMsg
data interface{}
}
func (self *Processor) Send(msg interface{}) {
self.channel <- msg
}
func NewProcessor(n int, timeSys TimeSys) Processor {
var ret Processor
ret.channel = make(chan Msg)
ret.timeSys = timeSys
TimeSys.Init(n)
return ret
}
type TimeMsg interface {
AddTo(t *Time)
}
type Time []int
const (
TIME_LT = '<'
TIME_GT = '>'
TIME_EQ = '='
TIME_ID = '|'
)
type TimeSys interface {
Init(n int)
TimeMsgTo(n int) TimeMsg
Time() Time
}
type SimpleTimeSys struct {
time Time
}
type SimpleTimeMsg Time
func (self *SimpleTimeSys) Init(n int) {
self.time = new([]int, n)
}
func main() {
fmt.Println("Hello World!")
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
)
func (q *DeleteWithUsingJoinBlock1Query) Exec(c gosql.Conn) error {
const queryString = `DELETE FROM "test_user" AS u
USING "test_post" AS p
WHERE u."id" = p."user_id" AND p."is_spam" IS TRUE` // `
_, err := c.Exec(queryString)
return err
}
|
package filter
import (
"time"
"github.com/geoirb/rss-aggregator/pkg/models"
)
// Filter ...
type Filter struct {
}
// News filtering news
func (f *Filter) News(src []models.News, format string, interval time.Duration) (dst []models.News) {
dst = make([]models.News, 0, len(src))
for _, news := range src {
border := time.Now().Add(-interval)
pubDate, _ := time.Parse(format, news.PubDate)
if pubDate.Sub(border) > 0 {
dst = append(dst, news)
}
}
return
}
// NewFilter construct
func NewFilter() *Filter {
return &Filter{}
}
|
package main
import "fmt"
/* 当函数的连续多个参数是同一类型,则除了最后一个参数需类型声明之外,其他的都可以去省略 */
func add(x, y int) int {
return x + y
}
/* 函数可以返回任意数量的返回值 */
func swap(x, y string) (string, string) {
return y, x
}
func split(sum int) (x, y int) {
x = sum * 4 / 9
y = sum - x
return
}
func main() {
fmt.Println(add(1, 2))
fmt.Println(swap("hello", "word"))
fmt.Println(split(17))
}
|
package intersect
// Numbers provides interface for numbers struct
type Numbers interface {
Intersect() []int
}
type numbers struct {
nums1 []int
nums2 []int
}
// Intersect main algorithm function
func (n *numbers) Intersect() []int {
m := map[int]int{}
for _, el := range n.nums1 {
m[el]++
}
out := make([]int, 0)
for _, el := range n.nums2 {
if m[el] > 0 {
m[el]--
out = append(out, el)
}
}
return out
}
// NewNumbers ...
func NewNumbers(nums1, nums2 []int) Numbers {
return &numbers{
nums1: nums1,
nums2: nums2,
}
}
|
package main
import "math/bits"
func intMax(x, y int) int {
if x > y {
return x
}
return y
}
func bitsLen(b byte) int {
i := uint(b)
return bits.Len(i)
}
|
package cursor
import "fmt"
import "os"
const csi = "\033["
func Up(lines int) string {
if os.Getenv("TERM") == "" {
return ""
} else {
return fmt.Sprintf("%s%dA", csi, lines)
}
}
func ClearToEndOfLine() string {
if os.Getenv("TERM") == "" {
return ""
} else {
return fmt.Sprintf("%s%dK", csi, 0)
}
}
func ClearToEndOfDisplay() string {
if os.Getenv("TERM") == "" {
return ""
} else {
return fmt.Sprintf("%s%dJ", csi, 0)
}
}
func Show() string {
if os.Getenv("TERM") == "" {
return ""
} else {
return csi + "?25h"
}
}
func Hide() string {
if os.Getenv("TERM") == "" {
return ""
} else {
return csi + "?25l"
}
}
|
package crawler
import (
"go.core/lesson4/pkg/crawler/stub"
"testing"
)
var s stub.Scanner
func TestScan(t *testing.T) {
tests := []struct {
name string
urls []string
depth int
}{
{"1", []string{"test.com", "test2.com"}, 2},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
documents, err := Scan(s, tt.urls, tt.depth)
if err != nil || documents[0].Title() != "Some test 1 url title" {
t.Errorf("Scan() gotDocuments = %v", documents)
}
})
}
}
|
package apis
import (
"encoding/base64"
"fmt"
"io/ioutil"
"project/utils/config"
"strings"
"project/app/admin/models"
"project/pkg/tools"
"project/utils"
"project/utils/app"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
// UploadFile 文件上传(任意类型文件)
// @Summary 文件上传(任意类型文件)
// @Description Author:JiaKunLi 2021/01/27
// @Tags 文件:文件管理 File Controller
// @Accept multipart/form-data
// @Produce application/json
// @Param file formData file true "file"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseFile
// @Router /api/file/uploadFile [post]
func UploadFile(c *gin.Context) {
urlPerfix := fmt.Sprintf("http://%s/", c.Request.Host)
var fileResponse models.FileResponse
fileResponse, done := singleFile(c, fileResponse, urlPerfix, false)
if done {
return
}
app.ResponseSuccess(c, fileResponse)
}
// UploadFileImage 文件上传(图片)
// @Summary 文件上传(图片)
// @Description Author:JiaKunLi 2021/01/27
// @Tags 文件:文件管理 File Controller
// @Accept multipart/form-data
// @Produce application/json
// @Param file formData file true "file"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseFile
// @Router /api/file/uploadImage [post]
func UploadImage(c *gin.Context) {
urlPerfix := fmt.Sprintf("http://%s/", c.Request.Host)
var fileResponse models.FileResponse
fileResponse, done := singleFile(c, fileResponse, urlPerfix, true)
if done {
return
}
app.ResponseSuccess(c, fileResponse)
}
//func UploadFile(c *gin.Context) {
// tag, _ := c.GetPostForm("type")
// urlPerfix := fmt.Sprintf("http://%s/", c.Request.Host)
// var fileResponse FileResponse
// if tag == "" {
// app.ResponseErrorWithMsg(c, 200, "缺少标识")
// return
// } else {
// switch tag {
// case "1": // 单图
// fileResponse, done := singleFile(c, fileResponse, urlPerfix)
// if done {
// return
// }
// app.ResponseSuccess(c, fileResponse)
// return
// case "2": // 多图
// multipartFile := multipleFile(c, urlPerfix)
// app.ResponseSuccess(c, multipartFile)
// return
// case "3": // base64
// fileResponse = baseImg(c, fileResponse, urlPerfix)
// app.ResponseSuccess(c, fileResponse)
// }
// }
//}
func baseImg(c *gin.Context, fileResponse models.FileResponse, urlPerfix string) models.FileResponse {
files, _ := c.GetPostForm("file")
file2list := strings.Split(files, ",")
ddd, _ := base64.StdEncoding.DecodeString(file2list[1])
guid := uuid.New().String()
fileName := guid + ".jpg"
base64File := "static/uploadfile/" + fileName
_ = ioutil.WriteFile(base64File, ddd, 0666)
typeStr := strings.Replace(strings.Replace(file2list[0], "data:", "", -1), ";base64", "", -1)
fileResponse = models.FileResponse{
Size: utils.GetFileSize(base64File),
Path: base64File,
FullPath: urlPerfix + base64File,
Name: "",
Type: typeStr,
}
return fileResponse
}
func multipleFile(c *gin.Context, urlPerfix string) []models.FileResponse {
files := c.Request.MultipartForm.File["file"]
var multipartFile []models.FileResponse
for _, f := range files {
guid := uuid.New().String()
fileName := guid + utils.GetExt(f.Filename)
multipartFileName := "static/uploadfile/" + fileName
e := c.SaveUploadedFile(f, multipartFileName)
fileType, _ := tools.GetType(multipartFileName)
if e == nil {
fileResponse := models.FileResponse{
Size: utils.GetFileSize(multipartFileName),
Path: multipartFileName,
FullPath: urlPerfix + multipartFileName,
Name: f.Filename,
Type: fileType,
}
multipartFile = append(multipartFile, fileResponse)
}
}
return multipartFile
}
func singleFile(c *gin.Context, fileResponse models.FileResponse, urlPerfix string, image bool) (models.FileResponse, bool) {
files, err := c.FormFile("file")
if err != nil {
app.ResponseError(c, app.CodeImageIsNotNull)
return models.FileResponse{}, true
}
if image && utils.GetFileType(tools.GetExt(files.Filename)[1:]) != "image" {
app.ResponseError(c, app.CodeFileImageFail)
return models.FileResponse{}, true
}
// 上传文件至指定目录
guid := uuid.New().String()
fileName := guid + tools.GetExt(files.Filename)
singleFile := config.ApplicationConfig.StaticPath + fileName
err = c.SaveUploadedFile(files, singleFile)
if err != nil {
app.ResponseError(c, app.CodeFileUploadFail)
return models.FileResponse{}, true
}
fileType, _ := tools.GetType(singleFile)
fileResponse = models.FileResponse{
Size: utils.GetFileSize(singleFile),
Path: fileName,
FullPath: urlPerfix + config.ApplicationConfig.StaticFileUrl + fileName,
Name: files.Filename,
Type: fileType,
}
return fileResponse, false
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/codenotary/immudb/embedded/tbtree"
"github.com/codenotary/immudb/embedded/watchers"
)
type indexer struct {
path string
store *ImmuStore
index *tbtree.TBtree
cancellation chan struct{}
wHub *watchers.WatchersHub
state int
stateCond *sync.Cond
closed bool
compactionMutex sync.Mutex
mutex sync.Mutex
}
type runningState = int
const (
running runningState = iota
stopped
paused
)
func newIndexer(path string, store *ImmuStore, indexOpts *tbtree.Options, maxWaitees int) (*indexer, error) {
index, err := tbtree.Open(path, indexOpts)
if err != nil {
return nil, err
}
var wHub *watchers.WatchersHub
if maxWaitees > 0 {
wHub = watchers.New(0, maxWaitees)
}
indexer := &indexer{
store: store,
path: path,
index: index,
wHub: wHub,
state: stopped,
stateCond: sync.NewCond(&sync.Mutex{}),
}
indexer.resume()
return indexer, nil
}
func (idx *indexer) Ts() uint64 {
idx.mutex.Lock()
defer idx.mutex.Unlock()
return idx.index.Ts()
}
func (idx *indexer) Get(key []byte) (value []byte, tx uint64, hc uint64, err error) {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return nil, 0, 0, ErrAlreadyClosed
}
return idx.index.Get(key)
}
func (idx *indexer) History(key []byte, offset uint64, descOrder bool, limit int) (txs []uint64, err error) {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return nil, ErrAlreadyClosed
}
return idx.index.History(key, offset, descOrder, limit)
}
func (idx *indexer) Snapshot() (*tbtree.Snapshot, error) {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return nil, ErrAlreadyClosed
}
return idx.index.Snapshot()
}
func (idx *indexer) SnapshotSince(tx uint64) (*tbtree.Snapshot, error) {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return nil, ErrAlreadyClosed
}
return idx.index.SnapshotSince(tx)
}
func (idx *indexer) ExistKeyWith(prefix []byte, neq []byte, smaller bool) (bool, error) {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return false, ErrAlreadyClosed
}
return idx.index.ExistKeyWith(prefix, neq, smaller)
}
func (idx *indexer) Sync() error {
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return ErrAlreadyClosed
}
return idx.index.Sync()
}
func (idx *indexer) Close() error {
idx.compactionMutex.Lock()
defer idx.compactionMutex.Unlock()
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return ErrAlreadyClosed
}
idx.stop()
idx.wHub.Close()
idx.closed = true
return idx.index.Close()
}
func (idx *indexer) WaitForIndexingUpto(txID uint64, cancellation <-chan struct{}) error {
if idx.wHub != nil {
return idx.wHub.WaitFor(txID, cancellation)
}
return watchers.ErrMaxWaitessLimitExceeded
}
func (idx *indexer) CompactIndex() (err error) {
idx.compactionMutex.Lock()
defer idx.compactionMutex.Unlock()
idx.store.notify(Info, true, "Compacting index '%s'...", idx.store.path)
defer func() {
if err == nil {
idx.store.notify(Info, true, "Index '%s' sucessfully compacted", idx.store.path)
} else {
idx.store.notify(Info, true, "Compaction of index '%s' returned: %v", idx.store.path, err)
}
}()
compactedIndexID, err := idx.index.CompactIndex()
if err != nil {
return err
}
idx.mutex.Lock()
defer idx.mutex.Unlock()
if idx.closed {
return ErrAlreadyClosed
}
idx.stop()
defer idx.resume()
return idx.replaceIndex(compactedIndexID)
}
func (idx *indexer) stop() {
idx.stateCond.L.Lock()
idx.state = stopped
close(idx.cancellation)
idx.stateCond.L.Unlock()
idx.stateCond.Signal()
idx.store.notify(Info, true, "Indexing gracefully stopped at '%s'", idx.store.path)
}
func (idx *indexer) resume() {
idx.stateCond.L.Lock()
idx.state = running
idx.cancellation = make(chan struct{})
go idx.doIndexing(idx.cancellation)
idx.stateCond.L.Unlock()
idx.store.notify(Info, true, "Indexing in progress at '%s'", idx.store.path)
}
func (idx *indexer) replaceIndex(compactedIndexID uint64) error {
opts := idx.index.GetOptions()
err := idx.index.Close()
if err != nil {
return err
}
nLogPath := filepath.Join(idx.path, "nodes")
err = os.RemoveAll(nLogPath)
if err != nil {
return err
}
cLogPath := filepath.Join(idx.path, "commit")
err = os.RemoveAll(cLogPath)
if err != nil {
return err
}
cnLogPath := filepath.Join(idx.path, fmt.Sprintf("nodes_%d", compactedIndexID))
ccLogPath := filepath.Join(idx.path, fmt.Sprintf("commit_%d", compactedIndexID))
err = os.Rename(cnLogPath, nLogPath)
if err != nil {
return err
}
err = os.Rename(ccLogPath, cLogPath)
if err != nil {
return err
}
index, err := tbtree.Open(idx.path, opts)
if err != nil {
return err
}
idx.index = index
return nil
}
func (idx *indexer) Resume() {
idx.stateCond.L.Lock()
idx.state = running
idx.stateCond.L.Unlock()
idx.stateCond.Signal()
}
func (idx *indexer) Pause() {
idx.stateCond.L.Lock()
idx.state = paused
idx.stateCond.L.Unlock()
}
func (idx *indexer) doIndexing(cancellation <-chan struct{}) {
for {
lastIndexedTx := idx.index.Ts()
if idx.wHub != nil {
idx.wHub.DoneUpto(lastIndexedTx)
}
err := idx.store.wHub.WaitFor(lastIndexedTx+1, cancellation)
if err == watchers.ErrCancellationRequested || err == watchers.ErrAlreadyClosed {
return
}
if err != nil {
idx.store.notify(Error, true, "Indexing failed at '%s' due to error: %v", idx.store.path, err)
time.Sleep(60 * time.Second)
}
committedTxID, _, _ := idx.store.commitState()
txsToIndex := committedTxID - lastIndexedTx
idx.store.notify(Info, false, "%d transaction/s to be indexed at '%s'", txsToIndex, idx.store.path)
idx.stateCond.L.Lock()
for {
if idx.state == stopped {
return
}
if idx.state == running {
break
}
idx.stateCond.Wait()
}
idx.stateCond.L.Unlock()
err = idx.indexSince(lastIndexedTx+1, 10)
if err == ErrAlreadyClosed || err == tbtree.ErrAlreadyClosed {
return
}
if err != nil {
idx.store.notify(Error, true, "Indexing failed at '%s' due to error: %v", idx.store.path, err)
time.Sleep(60 * time.Second)
}
}
}
func (idx *indexer) indexSince(txID uint64, limit int) error {
tx, err := idx.store.fetchAllocTx()
if err != nil {
return err
}
defer idx.store.releaseAllocTx(tx)
txReader, err := idx.store.newTxReader(txID, false, tx)
if err != nil {
return err
}
for i := 0; i < limit; i++ {
tx, err := txReader.Read()
if err == ErrNoMoreEntries {
break
}
if err != nil {
return err
}
txEntries := tx.Entries()
for i, e := range txEntries {
var b [szSize + offsetSize + sha256.Size]byte
binary.BigEndian.PutUint32(b[:], uint32(e.vLen))
binary.BigEndian.PutUint64(b[szSize:], uint64(e.vOff))
copy(b[szSize+offsetSize:], e.hVal[:])
idx.store._kvs[i].K = e.key()
idx.store._kvs[i].V = b[:]
}
err = idx.index.BulkInsert(idx.store._kvs[:len(txEntries)])
if err != nil {
return err
}
}
return nil
}
|
package persistence
import (
"context"
"github.com/dsukesato/go13/pbl/app1-backend/domain/model"
"github.com/dsukesato/go13/pbl/app1-backend/domain/repository"
"time"
)
type PostsPersistence struct {}
func NewPostsPersistence() repository.PostsRepository {
return &PostsPersistence{}
}
//int, int, int, string, int, string, time.Time, time.Time, time.Time, error
func (pp PostsPersistence) GetPosts(context.Context) ([]*model.Post, error) {
post1 := model.Post{}
post1.PostId = 1
post1.PostUserId = 1
post1.PostRestaurantId = 1
post1.PostImage = "aaa"
post1.Good = 1
post1.Text = "very delicious!"
post1.CreatedAt = time.Now()
post1.UpdatedAt = time.Now()
post1.DeletedAt = time.Now()
post2 := model.Post{}
post2.PostId = 2
post2.PostUserId = 2
post2.PostRestaurantId = 2
post2.PostImage = "bbb"
post2.Good = 2
post2.Text = "soso"
post2.CreatedAt = time.Now()
post2.UpdatedAt = time.Now()
post2.DeletedAt = time.Now()
//return post1.PostId, post1.PostUserId, post1.PostRestaurantId, post1.PostImage,
// post1.Good, post1.Text, post1.CreatedAt, post1.UpdatedAt, post1.DeletedAt, nil
return []*model.Post{ &post1, &post2 }, nil
}
func (pp PostsPersistence) PostPosts(context.Context) ([]*model.Post, error) {
post3 := model.Post{}
post3.PostId = 3
post3.PostUserId = 3
post3.PostRestaurantId = 3
post3.PostImage = "ccc"
post3.Good = 3
post3.Text = "good!"
post3.CreatedAt = time.Now()
post3.UpdatedAt = time.Now()
post3.DeletedAt = time.Now()
post4 := model.Post{}
post4.PostId = 4
post4.PostUserId = 4
post4.PostRestaurantId = 4
post4.PostImage = "ddd"
post4.Good = 4
post4.Text = "Uhhhn"
post4.CreatedAt = time.Now()
post4.UpdatedAt = time.Now()
post4.DeletedAt = time.Now()
return []*model.Post{ &post3, &post4 }, nil
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Split(bufio.ScanLines)
printBlank := false
for scanner.Scan() {
var a int
fmt.Sscanf(scanner.Text(), "%d", &a)
if a == 0 {
break
}
if printBlank {
fmt.Println()
}
printBlank = true
var b []string
for i := 0; i < a; i++ {
scanner.Scan()
b = append(b, scanner.Text())
}
sort.SliceStable(b, func(i, j int) bool {
if b[i][0] < b[j][0] {
return true
}
return b[i][0] == b[j][0] && b[i][1] < b[j][1]
})
for i := 0; i < a; i++ {
fmt.Println(b[i])
}
}
}
|
/*
Copyright 2021 Digitalis.IO.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
ldapv1 "ldap-accounts-controller/api/v1"
ldap "ldap-accounts-controller/ldap"
)
// LdapGroupReconciler reconciles a LdapGroup object
type LdapGroupReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
var (
ldapGroupOwnerKey = ".metadata.controller"
)
// +kubebuilder:rbac:groups=ldap.digitalis.io,resources=ldapgroups,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=ldap.digitalis.io,resources=ldapgroups/status,verbs=get;update;patch
func (r *LdapGroupReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("ldapgroup", req.NamespacedName)
// your logic here
var ldapgroup ldapv1.LdapGroup
if err := r.Get(ctx, req.NamespacedName, &ldapgroup); err != nil {
// Check if it was deleted and ignore
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
log.Error(err, "unable to fetch ldap group")
return ctrl.Result{}, err
}
//! [finalizer]
ldapgroupFinalizerName := "ldap.digitalis.io/finalizer"
if ldapgroup.ObjectMeta.DeletionTimestamp.IsZero() {
if !containsString(ldapgroup.GetFinalizers(), ldapgroupFinalizerName) {
ldapgroup.SetFinalizers(append(ldapgroup.GetFinalizers(), ldapgroupFinalizerName))
if err := r.Update(context.Background(), &ldapgroup); err != nil {
return ctrl.Result{}, err
}
}
} else {
// The object is being deleted
if containsString(ldapgroup.GetFinalizers(), ldapgroupFinalizerName) {
// our finalizer is present, so lets handle any external dependency
if err := ldap.DeleteGroup(ldapgroup.Spec); err != nil {
log.Error(err, "Error deleting from LDAP")
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
ldapgroup.SetFinalizers(removeString(ldapgroup.GetFinalizers(), ldapgroupFinalizerName))
if err := r.Update(context.Background(), &ldapgroup); err != nil {
return ctrl.Result{}, err
}
}
// Stop reconciliation as the item is being deleted
return ctrl.Result{}, nil
}
//! [finalizer]
log.Info("Adding or updating LDAP group")
err := ldap.AddGroup(ldapgroup.Spec)
if err != nil {
log.Error(err, "cannot add group to ldap")
}
ldapgroup.Status.CreatedOn = time.Now().Format("2006-01-02 15:04:05")
var ldapGroups ldapv1.LdapGroupList
if err := r.List(ctx, &ldapGroups, client.InNamespace(req.Namespace), client.MatchingFields{ldapGroupOwnerKey: req.Name}); err != nil {
log.Error(err, "unable to list ldap accounts")
return ctrl.Result{}, err
}
for _, acc := range ldapGroups.Items {
msg := fmt.Sprintf("Checking group %s", acc.Spec.Name)
log.Info(msg)
acc.Status.CreatedOn = time.Now().Format("2006-01-02 15:04:05")
}
return ctrl.Result{}, nil
}
func (r *LdapGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(&ldapv1.LdapGroup{}, ldapGroupOwnerKey, func(rawObj runtime.Object) []string {
acc := rawObj.(*ldapv1.LdapGroup)
return []string{acc.Name}
}); err != nil {
return err
}
//! [pred]
pred := predicate.Funcs{
CreateFunc: func(event.CreateEvent) bool { return true },
DeleteFunc: func(event.DeleteEvent) bool { return false },
GenericFunc: func(event.GenericEvent) bool { return true },
UpdateFunc: func(e event.UpdateEvent) bool {
oldGeneration := e.MetaOld.GetGeneration()
newGeneration := e.MetaNew.GetGeneration()
// Generation is only updated on spec changes (also on deletion),
// not metadata or status
// Filter out events where the generation hasn't changed to
// avoid being triggered by status updates
return oldGeneration != newGeneration
},
}
//! [pred]
return ctrl.NewControllerManagedBy(mgr).
For(&ldapv1.LdapGroup{}).
WithEventFilter(pred).
Complete(r)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
)
const (
numberOfRows = 6
numberOfCols = 25
colorBlack = 0
colorWhite = 1
)
// Layer represents a 2D grid with pixels.
type Layer [][]int
func main() {
if len(os.Args) < 2 {
log.Fatal("missing file as input")
}
line, err := ioutil.ReadFile(os.Args[1])
if err != nil {
log.Fatalf("could not read file: %s", err.Error())
}
var layers = createLayers(line)
partOne(layers)
partTwo(layers)
}
func partOne(layers []Layer) {
var (
fewestZeros = 0
fewestZeroLayerIndex = 0
)
for i, layer := range layers {
zero := findOccurancesInlayerOf(layer, 0)
if fewestZeros == 0 || zero < fewestZeros {
fewestZeros = zero
fewestZeroLayerIndex = i
}
}
ones := findOccurancesInlayerOf(layers[fewestZeroLayerIndex], 1)
twos := findOccurancesInlayerOf(layers[fewestZeroLayerIndex], 2)
fmt.Println("part one: ", ones*twos)
}
func partTwo(layers []Layer) {
var (
layerToDraw = layers[0]
)
for _, layer := range layers {
for i, row := range layer {
for j, col := range row {
// Never re-fill transparent
if col != colorBlack && col != colorWhite {
continue
}
// If there's no active color in the pixil, add it.
if layerToDraw[i][j] != colorBlack && layerToDraw[i][j] != colorWhite {
layerToDraw[i][j] = col
}
}
}
}
fmt.Println("rendering the image:")
printLayerLetter(layerToDraw)
}
func createLayers(line []byte) []Layer {
var (
layer = Layer{}
layers = []Layer{}
currentRow = []int{}
)
for i, v := range strings.TrimSpace(string(line)) {
intVal, _ := strconv.Atoi(string(v))
currentRow = append(currentRow, intVal)
if (i+1)%numberOfCols == 0 {
// Add the row to the current layer, reset the row.
layer = append(layer, currentRow)
currentRow = []int{}
if len(layer)%numberOfRows == 0 {
// Add the current layer to the list of layers ever 6 rows.
layers = append(layers, layer)
layer = Layer{}
}
}
}
return layers
}
func findOccurancesInlayerOf(layer Layer, n int) int {
var count = 0
for _, row := range layer {
for _, col := range row {
if col == n {
count++
}
}
}
return count
}
func printLayer(layer Layer) {
for _, row := range layer {
fmt.Println(row)
}
}
func printLayerLetter(layer Layer) {
var (
black = "█"
white = "░"
transparent = " "
)
for _, row := range layer {
for _, col := range row {
switch col {
case colorBlack:
fmt.Print(black)
case colorWhite:
fmt.Print(white)
default:
fmt.Print(transparent)
}
}
fmt.Println("")
}
}
|
package main
import (
"fmt" // Print
"os" // Arguments passed to the program
"strconv" // Atoi
"time"
)
var nRect uint32 = 0
var nRutinas uint32 = 0
var anchRect float64 = 0
var areaTotal float64 = 0
func f(x float64) (y float64) {
return 4.0 / (1.0 + x*x)
}
// Tarea que cada goroutine tiene que realizar
func calculaArea(nRutina int, nAreas uint32, rectInicio uint32, ptr *float64) {
var devolver float64 = 0
for i := rectInicio; i <= rectInicio+nAreas; i++ {
devolver = devolver + f(anchRect*(float64(i)+0.5))
}
fmt.Printf("Rutina %d, %f (%d áreas[%d-%d])\n", nRutina+1, devolver, nAreas, rectInicio, rectInicio+nAreas-1)
*ptr = devolver
}
func main() {
// Comprobar número de parámetros
if len(os.Args) < 3 {
fmt.Println("Uso: pintegral-go <nº de rectangulos> <nº de hilos>")
os.Exit(1)
}
inicio := time.Now()
// Parsear parámetros
nRect, _ := strconv.Atoi(os.Args[1])
nRutinas, _ := strconv.Atoi(os.Args[2])
if nRect == 0 || nRutinas == 0 {
fmt.Println("Los parámetros deben ser mayores de 0")
}
// Calcular anchura del intervalo
anchRect = 1.0 / float64(nRect)
// Array de áreas parciales
//var areasParciales [nRutinas]float64
areasParciales := make([]float64, nRutinas)
// Array de rectángulos por cada goroutine
//var tareas [nRutinas]uint32
tareas := make([]uint32, nRutinas)
// Rellenar el array de areas parciales (si no se usa no compila)
for i := 0; i < nRutinas; i++ {
areasParciales[i] = float64(-1.0)
}
// Resto de dividir el número de rectángulos entre el de goroutines
restoRect := nRect % nRutinas
// Calcular el número de rectángulos que tocan a cada goroutine
for i := 0; i < nRutinas; i++ {
tareas[i] = uint32(nRect / nRutinas)
// Si quedan tareas por repartir van a las primeras goroutines
if restoRect > 0 {
tareas[i]++
restoRect--
}
}
fmt.Printf("Integral de %d rectángulos, anchura %f (%d goroutines)\n", nRect, anchRect, nRutinas)
var rectInicio uint32 = 0
// Llamada a cada tarea y creación de las goroutines
for i := 0; i < nRutinas; i++ {
go calculaArea(i, tareas[i], rectInicio, &areasParciales[i])
// El siguiente tendrá su primer rectángulo
rectInicio += tareas[i]
}
for i := 0; i < nRutinas; i++ {
// Recorremos las áreas parciales para ver si están calculadas y si no, nos quedamos ahí
if areasParciales[i] == -1.0 {
i--
} else {
areaTotal += areasParciales[i]
}
}
areaTotal *= anchRect
fin := time.Since(inicio)
fmt.Printf("π = %f (%v)\n", areaTotal, fin)
os.Exit(0)
}
|
package models
import (
"reflect"
"strings"
)
func contains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
func ParseFields(fields string) ([]string, map[string][]string) {
fieldsParse := strings.Split(fields, ",")
roop := make([]string, len(fieldsParse))
copy(roop, fieldsParse)
nestFields := make(map[string][]string)
offset := 0
for k, v := range roop {
l := strings.Split(v, ".")
ok := false
if len(l) > 1 {
_, ok = nestFields[l[0]]
nestFields[l[0]] = append(nestFields[l[0]], l[1])
}
if ok {
fieldsParse = append(fieldsParse[:(k-offset)], fieldsParse[(k+1-offset):]...)
offset += 1
} else {
fieldsParse[k-offset] = l[0]
}
}
return fieldsParse, nestFields
}
func FieldToMap(model interface{}, fields []string, nestFields map[string][]string) map[string]interface{} {
u := make(map[string]interface{})
ts, vs := reflect.TypeOf(model), reflect.ValueOf(model)
for i := 0; i < ts.NumField(); i++ {
var jsonKey string
field := ts.Field(i)
jsonTag := field.Tag.Get("json")
if jsonTag == "" {
jsonKey = field.Name
} else {
jsonKey = strings.Split(jsonTag, ",")[0]
}
if fields[0] == "*" || contains(fields, jsonKey) {
_, ok := nestFields[jsonKey]
if ok {
f, n := ParseFields(strings.Join(nestFields[jsonKey], ","))
if vs.Field(i).Kind() == reflect.Ptr {
if !vs.Field(i).IsNil() {
u[jsonKey] = FieldToMap(vs.Field(i).Elem().Interface(), f, n)
} else {
u[jsonKey] = nil
}
} else if vs.Field(i).Kind() == reflect.Slice {
var fieldMap []interface{}
s := reflect.ValueOf(vs.Field(i).Interface())
for i := 0; i < s.Len(); i++ {
fieldMap = append(fieldMap, FieldToMap(s.Index(i).Interface(), f, n))
}
u[jsonKey] = fieldMap
} else {
u[jsonKey] = FieldToMap(vs.Field(i).Interface(), f, n)
}
} else {
u[jsonKey] = vs.Field(i).Interface()
}
}
}
return u
}
|
package seccomp
import (
"fmt"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// BruteForceSource represents a system calls source based on a brute force approach.
type BruteForceSource struct {
options []string
runner BruteForceRunner
}
// BruteForceRunner defines the interface for brute force runners.
type BruteForceRunner interface {
RunWithSeccomp(profile *specs.LinuxSeccomp) error
}
// NewBruteForceSource initialises BruteForceSource.
func NewBruteForceSource(runner BruteForceRunner) *BruteForceSource {
s := getMostFrequentSyscalls()
return &BruteForceSource{
runner: runner,
options: s,
}
}
func isEssentialCall(syscall string) bool {
switch syscall {
case "close", "exit", "execve", "exit_group", "futex":
return true
}
return false
}
func (s *BruteForceSource) canRunBlockingSyscall(syscall string) bool {
if isEssentialCall(syscall) {
return false
}
tmpSyscalls := s.excludeItemFromSlice(s.options, syscall)
err := s.runner.RunWithSeccomp(&specs.LinuxSeccomp{
DefaultAction: specs.ActErrno,
Syscalls: []specs.LinuxSyscall{
{Names: tmpSyscalls, Action: specs.ActAllow},
},
})
return err == nil
}
// GetSystemCalls returns all system calls found by brute forcing the profile using a runner.
func (s *BruteForceSource) GetSystemCalls() (*specs.LinuxSyscall, error) {
mustHaves := make([]string, 0, 60)
if err := s.runner.RunWithSeccomp(nil); err != nil {
return nil, fmt.Errorf("execution aborted, command could not be executed: %v", err)
}
process := func(scs []string) []string {
items := make([]string, 0, 60)
for _, syscall := range scs {
if !s.canRunBlockingSyscall(syscall) {
items = append(items, syscall)
}
}
return items
}
mustHaves = append(mustHaves, process(s.options)...)
return &specs.LinuxSyscall{
Action: specs.ActAllow,
Names: mustHaves,
}, nil
}
func (s *BruteForceSource) indexesOf(source []string, item string) []int {
indexes := make([]int, 0, len(source))
for i, currentItem := range source {
if currentItem == item {
indexes = append(indexes, i)
}
}
return indexes
}
func (s *BruteForceSource) excludeItemFromSlice(source []string, itemToExclude string) []string {
indexes := s.indexesOf(source, itemToExclude)
if len(indexes) == 0 {
return source
}
newSlice := make([]string, 0, len(source))
nextFirstIndex := 0
for _, i := range indexes {
newSlice = append(newSlice, source[nextFirstIndex:i]...)
nextFirstIndex = i + 1
}
newSlice = append(newSlice, source[nextFirstIndex:]...)
return newSlice
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package policygen implements the Policy Generator.
package policygen
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/cmd"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/fileutil"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/hcl"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/licenseutil"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/runner"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/template"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/version"
"github.com/hashicorp/terraform/states"
"github.com/otiai10/copy"
)
const (
forsetiOutputRoot = "forseti_policies"
)
// RunArgs is the struct representing the arguments passed to Run().
type RunArgs struct {
ConfigPath string
StatePaths []string
OutputPath string
}
// Run executes main policygen logic.
func Run(ctx context.Context, rn runner.Runner, args *RunArgs) error {
var err error
configPath, err := fileutil.Expand(args.ConfigPath)
if err != nil {
return fmt.Errorf("normalize path %q: %v", args.ConfigPath, err)
}
c, err := loadConfig(configPath)
if err != nil {
return fmt.Errorf("load config: %v", err)
}
compat, err := version.IsCompatible(c.Version)
if err != nil {
return err
}
if !compat {
return fmt.Errorf("binary version %v incompatible with template version constraint %v in %v", cmd.Version, c.Version, configPath)
}
var statePaths []string
for _, p := range args.StatePaths {
p, err = fileutil.Expand(p)
if err != nil {
return fmt.Errorf("normalize path %q: %v", p, err)
}
statePaths = append(statePaths, p)
}
outputPath, err := fileutil.Expand(args.OutputPath)
if err != nil {
return fmt.Errorf("normalize path %q: %v", args.OutputPath, err)
}
cacheDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(cacheDir)
pp, err := fileutil.Fetch(c.TemplateDir, filepath.Dir(args.ConfigPath), cacheDir)
if err != nil {
return fmt.Errorf("resolve policy template path: %v", err)
}
c.TemplateDir = pp
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
// Policy Library templates are released in a backwards compatible way, and old templates will be
// kept in the repository as well, so it's relatively safe to pull from 'master' branch all the time.
tp, err := fileutil.Fetch("github.com/forseti-security/policy-library?ref=master", "", cacheDir)
if err != nil {
return fmt.Errorf("fetch policy templates and utils: %v", err)
}
if err := copy.Copy(filepath.Join(tp, "policies"), filepath.Join(tmpDir, forsetiOutputRoot, "policies")); err != nil {
return err
}
if err := copy.Copy(filepath.Join(tp, "lib"), filepath.Join(tmpDir, forsetiOutputRoot, "lib")); err != nil {
return err
}
if err := generateForsetiPolicies(ctx, rn, statePaths, filepath.Join(tmpDir, forsetiOutputRoot, "policies", "constraints"), c); err != nil {
return fmt.Errorf("generate Forseti policies: %v", err)
}
if err := licenseutil.AddLicense(tmpDir); err != nil {
return fmt.Errorf("add license header: %v", err)
}
if err := hcl.FormatDir(rn, tmpDir); err != nil {
return fmt.Errorf("hcl format: %v", err)
}
if err := os.MkdirAll(outputPath, 0755); err != nil {
return fmt.Errorf("mkdir %q: %v", outputPath, err)
}
return copy.Copy(tmpDir, outputPath)
}
func generateForsetiPolicies(ctx context.Context, rn runner.Runner, statePaths []string, outputPath string, c *config) error {
if c.ForsetiPolicies == nil {
return nil
}
if err := generateGeneralForsetiPolicies(outputPath, c); err != nil {
return fmt.Errorf("generate general forseti policies: %v", err)
}
if err := generateTerraformBasedForsetiPolicies(ctx, rn, statePaths, outputPath, c.TemplateDir); err != nil {
return fmt.Errorf("generate forseti policies from terraform state: %v", err)
}
return nil
}
func generateGeneralForsetiPolicies(outputPath string, c *config) error {
in := filepath.Join(c.TemplateDir, "forseti", "overall")
out := filepath.Join(outputPath, "overall")
return template.WriteDir(in, out, c.ForsetiPolicies)
}
func generateTerraformBasedForsetiPolicies(ctx context.Context, rn runner.Runner, statePaths []string, outputPath, templateDir string) error {
if len(statePaths) == 0 {
log.Println("No Terraform state given, only generating Terraform-agnostic security policies")
return nil
}
var resources []*states.Resource
for _, p := range statePaths {
rs, err := loadResources(ctx, p)
if err != nil {
return err
}
resources = append(resources, rs...)
}
return generateIAMPolicies(rn, resources, outputPath, templateDir)
}
|
package nettools
import (
"fmt"
"net"
)
func GetIP() (string, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", fmt.Errorf("failed to get interface addreses: %v", err)
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {
return ipnet.IP.String(), nil
}
}
return "", fmt.Errorf("unable to retrieve ip from interface addresses")
}
|
//+build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package azure
import (
"fmt"
"testing"
"time"
)
func TestIsClusterExpired(t *testing.T) {
cases := []struct {
rg ResourceGroup
a Account
duration string
expectedResult bool
}{
{
rg: ResourceGroup{
Name: "testRG",
Location: "test",
Tags: map[string]string{
"now": "799786800",
},
},
a: Account{
User: new(User),
TenantID: "1234",
SubscriptionID: "1234",
ResourceGroup: ResourceGroup{},
Deployment: Deployment{},
},
duration: "1h",
expectedResult: true,
},
{
rg: ResourceGroup{
Name: "testRG",
Location: "test",
Tags: map[string]string{
"now": fmt.Sprintf("%v", time.Now().Unix()),
},
},
a: Account{
User: new(User),
TenantID: "1234",
SubscriptionID: "1234",
ResourceGroup: ResourceGroup{},
Deployment: Deployment{},
},
duration: "300h",
expectedResult: false,
},
{
rg: ResourceGroup{
Name: "thisRGDoesNotExist",
Location: "test",
Tags: map[string]string{},
},
a: Account{
User: new(User),
TenantID: "1234",
SubscriptionID: "1234",
ResourceGroup: ResourceGroup{},
Deployment: Deployment{},
},
duration: "1s",
expectedResult: true,
},
}
for _, c := range cases {
c.a.ResourceGroup = c.rg
d, err := time.ParseDuration(c.duration)
if err != nil {
t.Fatalf("unexpected error parsing duration: %s", err)
}
result := c.a.IsClusterExpired(d)
if c.expectedResult != result {
t.Fatalf("IsClusterExpired returned unexpected result: expected %t but got %t", c.expectedResult, result)
}
}
}
|
package pages
const Login string = `
<html>
<head><title>Sessions Example</title></head>
<body>
<form action="login" method="post">
<input name="username" />
<input name="password" type="password" />
<input type="Submit" />
</form>
</body>
</html>`
const Home string = `
<html>
<head><title>Sessions Example</title></head>
<body>
<form action="logout" method="post">
<input type="Submit" value="Logout"/>
</form>
</body>
</html>`
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"path/filepath"
"sort"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli/output"
"helm.sh/helm/v3/pkg/cli/values"
"helm.sh/helm/v3/pkg/helmpath"
"helm.sh/helm/v3/pkg/postrender"
"helm.sh/helm/v3/pkg/repo"
)
const (
outputFlag = "output"
postRenderFlag = "post-renderer"
postRenderArgsFlag = "post-renderer-args"
)
func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) {
f.StringSliceVarP(&v.ValueFiles, "values", "f", []string{}, "specify values in a YAML file or a URL (can specify multiple)")
f.StringArrayVar(&v.Values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&v.StringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
f.StringArrayVar(&v.FileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)")
f.StringArrayVar(&v.JSONValues, "set-json", []string{}, "set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2)")
f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line")
}
func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) {
f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&c.Verify, "verify", false, "verify the package before using it")
f.StringVar(&c.Keyring, "keyring", defaultKeyring(), "location of public keys used for verification")
f.StringVar(&c.RepoURL, "repo", "", "chart repository url where to locate the requested chart")
f.StringVar(&c.Username, "username", "", "chart repository username where to locate the requested chart")
f.StringVar(&c.Password, "password", "", "chart repository password where to locate the requested chart")
f.StringVar(&c.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
f.StringVar(&c.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
f.BoolVar(&c.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
f.StringVar(&c.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
f.BoolVar(&c.PassCredentialsAll, "pass-credentials", false, "pass credentials to all domains")
}
// bindOutputFlag will add the output flag to the given command and bind the
// value to the given format pointer
func bindOutputFlag(cmd *cobra.Command, varRef *output.Format) {
cmd.Flags().VarP(newOutputValue(output.Table, varRef), outputFlag, "o",
fmt.Sprintf("prints the output in the specified format. Allowed values: %s", strings.Join(output.Formats(), ", ")))
err := cmd.RegisterFlagCompletionFunc(outputFlag, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var formatNames []string
for format, desc := range output.FormatsWithDesc() {
formatNames = append(formatNames, fmt.Sprintf("%s\t%s", format, desc))
}
// Sort the results to get a deterministic order for the tests
sort.Strings(formatNames)
return formatNames, cobra.ShellCompDirectiveNoFileComp
})
if err != nil {
log.Fatal(err)
}
}
type outputValue output.Format
func newOutputValue(defaultValue output.Format, p *output.Format) *outputValue {
*p = defaultValue
return (*outputValue)(p)
}
func (o *outputValue) String() string {
// It is much cleaner looking (and technically less allocations) to just
// convert to a string rather than type asserting to the underlying
// output.Format
return string(*o)
}
func (o *outputValue) Type() string {
return "format"
}
func (o *outputValue) Set(s string) error {
outfmt, err := output.ParseFormat(s)
if err != nil {
return err
}
*o = outputValue(outfmt)
return nil
}
func bindPostRenderFlag(cmd *cobra.Command, varRef *postrender.PostRenderer) {
p := &postRendererOptions{varRef, "", []string{}}
cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the path to an executable to be used for post rendering. If it exists in $PATH, the binary will be used, otherwise it will try to look for the executable at the given path")
cmd.Flags().Var(&postRendererArgsSlice{p}, postRenderArgsFlag, "an argument to the post-renderer (can specify multiple)")
}
type postRendererOptions struct {
renderer *postrender.PostRenderer
binaryPath string
args []string
}
type postRendererString struct {
options *postRendererOptions
}
func (p *postRendererString) String() string {
return p.options.binaryPath
}
func (p *postRendererString) Type() string {
return "postRendererString"
}
func (p *postRendererString) Set(val string) error {
if val == "" {
return nil
}
p.options.binaryPath = val
pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...)
if err != nil {
return err
}
*p.options.renderer = pr
return nil
}
type postRendererArgsSlice struct {
options *postRendererOptions
}
func (p *postRendererArgsSlice) String() string {
return "[" + strings.Join(p.options.args, ",") + "]"
}
func (p *postRendererArgsSlice) Type() string {
return "postRendererArgsSlice"
}
func (p *postRendererArgsSlice) Set(val string) error {
// a post-renderer defined by a user may accept empty arguments
p.options.args = append(p.options.args, val)
if p.options.binaryPath == "" {
return nil
}
// overwrite if already create PostRenderer by `post-renderer` flags
pr, err := postrender.NewExec(p.options.binaryPath, p.options.args...)
if err != nil {
return err
}
*p.options.renderer = pr
return nil
}
func (p *postRendererArgsSlice) Append(val string) error {
p.options.args = append(p.options.args, val)
return nil
}
func (p *postRendererArgsSlice) Replace(val []string) error {
p.options.args = val
return nil
}
func (p *postRendererArgsSlice) GetSlice() []string {
return p.options.args
}
func compVersionFlag(chartRef string, toComplete string) ([]string, cobra.ShellCompDirective) {
chartInfo := strings.Split(chartRef, "/")
if len(chartInfo) != 2 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
repoName := chartInfo[0]
chartName := chartInfo[1]
path := filepath.Join(settings.RepositoryCache, helmpath.CacheIndexFile(repoName))
var versions []string
if indexFile, err := repo.LoadIndexFile(path); err == nil {
for _, details := range indexFile.Entries[chartName] {
appVersion := details.Metadata.AppVersion
appVersionDesc := ""
if appVersion != "" {
appVersionDesc = fmt.Sprintf("App: %s, ", appVersion)
}
created := details.Created.Format("January 2, 2006")
createdDesc := ""
if created != "" {
createdDesc = fmt.Sprintf("Created: %s ", created)
}
deprecated := ""
if details.Metadata.Deprecated {
deprecated = "(deprecated)"
}
versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Metadata.Version, appVersionDesc, createdDesc, deprecated))
}
}
return versions, cobra.ShellCompDirectiveNoFileComp
}
// addKlogFlags adds flags from k8s.io/klog
// marks the flags as hidden to avoid polluting the help text
func addKlogFlags(fs *pflag.FlagSet) {
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = normalize(fl.Name)
if fs.Lookup(fl.Name) != nil {
return
}
newflag := pflag.PFlagFromGoFlag(fl)
newflag.Hidden = true
fs.AddFlag(newflag)
})
}
// normalize replaces underscores with hyphens
func normalize(s string) string {
return strings.ReplaceAll(s, "_", "-")
}
|
package leetcode
func MergeKLists(lists []*ListNode) *ListNode {
if len(lists) == 0 {
return nil
}
if len(lists) == 1 {
return lists[0]
}
var m *ListNode
for i := 0; i < len(lists); i++ {
m = mergeTwoLists(m, lists[i])
}
return m
}
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
m := &ListNode{}
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
res := m
for {
if l1.Val < l2.Val {
m.Next = l1
l1 = l1.Next
if l1 == nil {
m.Next.Next = l2
break
}
} else {
m.Next = l2
l2 = l2.Next
if l2 == nil {
m.Next.Next = l1
break
}
}
m = m.Next
}
return res.Next
}
|
package proxy
import (
"github.com/sirupsen/logrus"
v1apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
)
// BuildDeployment builds kubernetes deployment object to create proxy container of the database
func BuildDeployment(proxy Proxy) (*v1apps.Deployment, error) {
deploy, err := proxy.buildDeployment()
if err != nil {
logrus.Error("failed building proxy deployment")
return nil, err
}
return deploy, nil
}
// BuildService builds kubernetes service object for proxy service of the database
func BuildService(proxy Proxy) (*v1.Service, error) {
svc, err := proxy.buildService()
if err != nil {
logrus.Error("failed building proxy service")
return nil, err
}
return svc, nil
}
// BuildConfigmap builds kubernetes configmap object used by proxy container of the database
func BuildConfigmap(proxy Proxy) (*v1.ConfigMap, error) {
cm, err := proxy.buildConfigMap()
if err != nil {
logrus.Error("failed building proxy configmap")
return nil, err
}
return cm, nil
}
|
/*
命題
与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.
* 英小文字ならば(219 - 文字コード)の文字に置換
* その他の文字はそのまま出力
この関数を用い,英語のメッセージを暗号化・復号化せよ.
*/
package main
import (
"fmt"
)
func cipher(str string) string {
var encodedStr string
for _, v := range str {
if v > 'a' && v < 'z' {
encodedStr += string(219 - v)
} else {
encodedStr += string(v)
}
}
return encodedStr
}
func main(){
str := "foo bar baz. FOO BAR BAZ :)"
encode := cipher(str)
decode := cipher(encode)
fmt.Println("元の文字", str)
fmt.Println("暗号化", encode)
fmt.Println("複合化", decode)
}
|
package types
import "net/http"
//
// Route is used to denote a route for all
// API routers
//
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
|
package main
import (
"fmt"
)
const (
lseed = 516
rseed = 190
lfactor = 16807
rfactor = 48271
divisor = 2147483647
maxvals = 40000000
bitmask = 65535
)
func genNext(val int, factor int) int {
return (val * factor) % divisor
}
func main() {
sum := 0
lval := lseed
rval := rseed
for i := 0; i < maxvals; i++ {
lval = genNext(lval, lfactor)
rval = genNext(rval, rfactor)
if lval & bitmask == rval & bitmask {
sum++
}
}
fmt.Println(sum)
}
|
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cuegen
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func testGenerator(t *testing.T) *Generator {
g, err := NewGenerator("testdata/valid.go")
require.NoError(t, err)
require.NotNil(t, g)
require.Len(t, g.pkg.Errors, 0)
return g
}
func TestNewGenerator(t *testing.T) {
g := testGenerator(t)
assert.NotNil(t, g.pkg)
assert.NotNil(t, g.types)
assert.Equal(t, g.opts.types, newDefaultOptions().types)
assert.Equal(t, g.opts.nullable, newDefaultOptions().nullable)
// assert can't compare function
assert.True(t, g.opts.typeFilter(nil))
assert.Greater(t, len(g.types), 0)
}
func TestGeneratorPackage(t *testing.T) {
g := testGenerator(t)
assert.Equal(t, g.Package(), g.pkg)
}
func TestGeneratorGenerate(t *testing.T) {
g := testGenerator(t)
decls, err := g.Generate(WithTypes(map[string]Type{
"foo": TypeAny,
"bar": TypeAny,
}), nil)
assert.NoError(t, err)
assert.NotNil(t, decls)
decls, err = g.Generate()
assert.NoError(t, err)
assert.NotNil(t, decls)
}
func TestGeneratorFormat(t *testing.T) {
g := testGenerator(t)
decls, err := g.Generate()
assert.NoError(t, err)
assert.NoError(t, g.Format(io.Discard, decls))
assert.NoError(t, g.Format(io.Discard, []Decl{nil, nil}))
assert.Error(t, g.Format(nil, decls))
assert.Error(t, g.Format(io.Discard, nil))
assert.Error(t, g.Format(io.Discard, []Decl{}))
}
func TestLoadPackage(t *testing.T) {
pkg, err := loadPackage("testdata/valid.go")
require.NoError(t, err)
require.NotNil(t, pkg)
require.Len(t, pkg.Errors, 0)
}
func TestGetTypeInfo(t *testing.T) {
pkg, err := loadPackage("testdata/valid.go")
require.NoError(t, err)
require.Greater(t, len(getTypeInfo(pkg)), 0)
}
|
package util
import (
"bufio"
"fmt"
"io"
"net/http"
"os"
)
func DownloadImg(imgUrl string) (name string,path string) {
imgPath := "/Users/ZhuangXiaoDa/open-work/image/blog/"
fileName := CreateRandomString(10)
writePath := imgPath + fileName + ".jpg"
res, err := http.Get(imgUrl)
if err != nil {
fmt.Println(err)
fmt.Println("A error occurred!")
return
}
defer res.Body.Close()
// 获得get请求响应的reader对象
reader := bufio.NewReaderSize(res.Body, 32 * 1024 * 10)
file, err := os.Create(writePath)
if err != nil {
panic(err)
}
// 获得文件的writer对象
writer := bufio.NewWriter(file)
written, _ := io.Copy(writer, reader)
fmt.Printf("%s.jpg Total length: %d write finish ! \n",fileName, written)
return fileName,writePath
}
|
package linters
import (
"github.com/simonjohansson/go-linter/model"
"github.com/simonjohansson/go-linter/manifest"
)
type Linter interface {
Lint() (model.Result, error)
LintManifest(manifest model.Manifest) (model.Result, error)
}
type FullLint struct {
Config model.LinterConfig
ManifestReader manifest.ManifestReader
RequiredFilesLinter Linter
RequiredFieldsLinter Linter
RepoLinter Linter
}
func (l FullLint) Lint() ([]model.Result, error) {
results := []model.Result{}
r, err := l.RequiredFilesLinter.Lint()
if err != nil {
return []model.Result{}, err
}
results = append(results, r)
if len(r.Errors) != 0 {
// If halfpipe.io file is missing, no need to continue..
return results, nil
}
manifest, err := l.ManifestReader.ParseManifest(l.Config.RepoRoot)
if err != nil {
return []model.Result{}, err
}
linters := []Linter{
l.RequiredFieldsLinter,
l.RepoLinter,
}
for _, linter := range linters {
r, err = linter.LintManifest(manifest)
if err != nil {
return []model.Result{}, err
}
results = append(results, r)
}
return results, nil
}
|
package router
import (
"project/app/admin/apis"
"project/app/admin/middleware"
"project/utils/app"
"github.com/gin-gonic/gin"
)
func init() {
routerNoCheckRole = append(routerNoCheckRole, jobRouter)
routerCheckRole = append(routerCheckRole, jobAuthRouter)
}
// 无需认证的路由代码
func jobRouter(v1 *gin.RouterGroup) {
r := v1.Group("/job")
{
r.GET("ping", func(c *gin.Context) {
c.String(int(app.CodeSuccess), "ok")
})
}
}
// 需认证的路由代码
func jobAuthRouter(v1 *gin.RouterGroup) {
r := v1.Group("/job")
{
//权限认证的接口
r.Use(middleware.AuthCheckRole())
r.GET("download", apis.JobDownload)
r.GET("", apis.GetJobList)
r.DELETE("", apis.DelJobById)
r.POST("", apis.AddJob)
r.PUT("", apis.UpdateJob)
}
}
|
package myTime
import (
"fmt"
"time"
)
func TimeTest(s string) {
fmt.Println(s)
wipsd, err := time.Parse("2006/01/02 15:04:05", s)
if err != nil {
fmt.Println(err)
}
fmt.Println(wipsd.Unix())
t := time.Now()
for i := 0; i < 2; i++ {
t = t.AddDate(0, 0, -1)
fmt.Println(t.Format("2006/01/02 15:04:05"))
// fmt.Println(t.Format("2006/01/02"))
fmt.Println(t.Unix(), "\n")
fmt.Println(t.UTC(), "\n")
}
// fmt.Println(t.Format("2006/01/02 15:04:05"))
}
|
package source
import (
"errors"
"math/rand"
)
//默认初始化方法,供别的方法调用
func init() {
RegisterBalancer("random", &RandomBalance{})
}
//随机调度算法 简单实现
type RandomBalance struct {
}
func (p *RandomBalance) DoBalance(insts []*Instance, key ...string) (inst *Instance, err error) {
if len(insts) == 0 {
err = errors.New("instance is empty")
return
}
length := len(insts)
id := rand.Intn(length)
inst = insts[id]
return
}
|
// +build linux
package volume
import "testing"
// Tests allocate.
func TestFallocate(t *testing.T) {
err := Fallocate(0, 0, 0)
if err != nil {
t.Fatal("Unexpected error in fallocate for length 0:", err)
}
}
|
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package build
import (
"os"
"path/filepath"
"strconv"
"strings"
"github.com/google/blueprint/microfactory"
"android/soong/ui/metrics"
"android/soong/ui/status"
)
func runSoong(ctx Context, config Config) {
ctx.BeginTrace(metrics.RunSoong, "soong")
defer ctx.EndTrace()
func() {
ctx.BeginTrace(metrics.RunSoong, "blueprint bootstrap")
defer ctx.EndTrace()
cmd := Command(ctx, config, "blueprint bootstrap", "build/blueprint/bootstrap.bash", "-t")
cmd.Environment.Set("BLUEPRINTDIR", "./build/blueprint")
cmd.Environment.Set("BOOTSTRAP", "./build/blueprint/bootstrap.bash")
cmd.Environment.Set("BUILDDIR", config.SoongOutDir())
cmd.Environment.Set("GOROOT", "./"+filepath.Join("prebuilts/go", config.HostPrebuiltTag()))
cmd.Environment.Set("BLUEPRINT_LIST_FILE", filepath.Join(config.FileListDir(), "Android.bp.list"))
cmd.Environment.Set("NINJA_BUILDDIR", config.OutDir())
cmd.Environment.Set("SRCDIR", ".")
cmd.Environment.Set("TOPNAME", "Android.bp")
cmd.Sandbox = soongSandbox
cmd.RunAndPrintOrFatal()
}()
func() {
ctx.BeginTrace(metrics.RunSoong, "environment check")
defer ctx.EndTrace()
envFile := filepath.Join(config.SoongOutDir(), ".soong.environment")
envTool := filepath.Join(config.SoongOutDir(), ".bootstrap/bin/soong_env")
if _, err := os.Stat(envFile); err == nil {
if _, err := os.Stat(envTool); err == nil {
cmd := Command(ctx, config, "soong_env", envTool, envFile)
cmd.Sandbox = soongSandbox
var buf strings.Builder
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
ctx.Verboseln("soong_env failed, forcing manifest regeneration")
os.Remove(envFile)
}
if buf.Len() > 0 {
ctx.Verboseln(buf.String())
}
} else {
ctx.Verboseln("Missing soong_env tool, forcing manifest regeneration")
os.Remove(envFile)
}
} else if !os.IsNotExist(err) {
ctx.Fatalf("Failed to stat %f: %v", envFile, err)
}
}()
var cfg microfactory.Config
cfg.Map("github.com/google/blueprint", "build/blueprint")
cfg.TrimPath = absPath(ctx, ".")
func() {
ctx.BeginTrace(metrics.RunSoong, "minibp")
defer ctx.EndTrace()
minibp := filepath.Join(config.SoongOutDir(), ".minibootstrap/minibp")
if _, err := microfactory.Build(&cfg, minibp, "github.com/google/blueprint/bootstrap/minibp"); err != nil {
ctx.Fatalln("Failed to build minibp:", err)
}
}()
func() {
ctx.BeginTrace(metrics.RunSoong, "bpglob")
defer ctx.EndTrace()
bpglob := filepath.Join(config.SoongOutDir(), ".minibootstrap/bpglob")
if _, err := microfactory.Build(&cfg, bpglob, "github.com/google/blueprint/bootstrap/bpglob"); err != nil {
ctx.Fatalln("Failed to build bpglob:", err)
}
}()
ninja := func(name, file string) {
ctx.BeginTrace(metrics.RunSoong, name)
defer ctx.EndTrace()
fifo := filepath.Join(config.OutDir(), ".ninja_fifo")
nr := status.NewNinjaReader(ctx, ctx.Status.StartTool(), fifo)
defer nr.Close()
cmd := Command(ctx, config, "soong "+name,
config.PrebuiltBuildTool("ninja"),
"-d", "keepdepfile",
"-w", "dupbuild=err",
"-j", strconv.Itoa(config.Parallel()),
"--frontend_file", fifo,
"-f", filepath.Join(config.SoongOutDir(), file))
cmd.Environment.Set("SOONG_SANDBOX_SOONG_BUILD", "true")
cmd.Sandbox = soongSandbox
cmd.RunAndStreamOrFatal()
}
ninja("minibootstrap", ".minibootstrap/build.ninja")
ninja("bootstrap", ".bootstrap/build.ninja")
}
|
package triangle
import (
"image"
"image/color"
"image/png"
"io"
"os"
"github.com/fogleman/gg"
)
const (
WITHOUT_WIREFRAME = iota
WITH_WIREFRAME
WIREFRAME_ONLY
)
// Processor : type with processing options
type Processor struct {
BlurRadius int
SobelThreshold int
PointsThreshold int
MaxPoints int
Wireframe int
Noise int
LineWidth float64
IsSolid bool
Grayscale bool
}
// Process : Triangulate the source image
func (p *Processor) Process(file io.Reader, output string) (*os.File, []Triangle, []Point, error) {
src, _, err := image.Decode(file)
if err != nil {
return nil, nil, nil, err
}
width, height := src.Bounds().Dx(), src.Bounds().Dy()
ctx := gg.NewContext(width, height)
ctx.DrawRectangle(0, 0, float64(width), float64(height))
ctx.SetRGBA(1, 1, 1, 1)
ctx.Fill()
delaunay := &Delaunay{}
img := toNRGBA(src)
blur := Stackblur(img, uint32(width), uint32(height), uint32(p.BlurRadius))
gray := Grayscale(blur)
sobel := SobelFilter(gray, float64(p.SobelThreshold))
points := GetEdgePoints(sobel, p.PointsThreshold, p.MaxPoints)
triangles := delaunay.Init(width, height).Insert(points).GetTriangles()
var srcImg *image.NRGBA
if p.Grayscale {
srcImg = gray
} else {
srcImg = img
}
for _, t := range triangles {
p0, p1, p2 := t.Nodes[0], t.Nodes[1], t.Nodes[2]
ctx.Push()
ctx.MoveTo(float64(p0.X), float64(p0.Y))
ctx.LineTo(float64(p1.X), float64(p1.Y))
ctx.LineTo(float64(p2.X), float64(p2.Y))
ctx.LineTo(float64(p0.X), float64(p0.Y))
cx := float64(p0.X+p1.X+p2.X) * 0.33333
cy := float64(p0.Y+p1.Y+p2.Y) * 0.33333
j := ((int(cx) | 0) + (int(cy) | 0) * width) * 4
r, g, b := srcImg.Pix[j], srcImg.Pix[j+1], srcImg.Pix[j+2]
var lineColor color.RGBA
if p.IsSolid {
lineColor = color.RGBA{R: 0, G: 0, B: 0, A: 255}
} else {
lineColor = color.RGBA{R: r, G: g, B: b, A: 255}
}
switch p.Wireframe {
case WITHOUT_WIREFRAME:
ctx.SetFillStyle(gg.NewSolidPattern(color.RGBA{R: r, G: g, B: b, A: 255}))
ctx.FillPreserve()
ctx.Fill()
case WITH_WIREFRAME:
ctx.SetFillStyle(gg.NewSolidPattern(color.RGBA{R: r, G: g, B: b, A: 255}))
ctx.SetStrokeStyle(gg.NewSolidPattern(color.RGBA{R: 0, G: 0, B: 0, A: 20}))
ctx.SetLineWidth(p.LineWidth)
ctx.FillPreserve()
ctx.StrokePreserve()
ctx.Stroke()
case WIREFRAME_ONLY:
ctx.SetStrokeStyle(gg.NewSolidPattern(lineColor))
ctx.SetLineWidth(p.LineWidth)
ctx.StrokePreserve()
ctx.Stroke()
}
ctx.Pop()
}
fq, err := os.Create(output)
if err != nil {
return nil, nil, nil, err
}
defer fq.Close()
newimg := ctx.Image()
// Apply a noise on the final image. This will give it a more artistic look.
if p.Noise > 0 {
noisyImg := Noise(p.Noise, newimg, newimg.Bounds().Dx(), newimg.Bounds().Dy())
if err = png.Encode(fq, noisyImg); err != nil {
return nil, nil, nil, err
}
} else {
if err = png.Encode(fq, newimg); err != nil {
return nil, nil, nil, err
}
}
return fq, triangles, points, err
}
// toNRGBA converts any image type to *image.NRGBA with min-point at (0, 0).
func toNRGBA(img image.Image) *image.NRGBA {
srcBounds := img.Bounds()
if srcBounds.Min.X == 0 && srcBounds.Min.Y == 0 {
if src0, ok := img.(*image.NRGBA); ok {
return src0
}
}
srcMinX := srcBounds.Min.X
srcMinY := srcBounds.Min.Y
dstBounds := srcBounds.Sub(srcBounds.Min)
dstW := dstBounds.Dx()
dstH := dstBounds.Dy()
dst := image.NewNRGBA(dstBounds)
switch src := img.(type) {
case *image.NRGBA:
rowSize := srcBounds.Dx() * 4
for dstY := 0; dstY < dstH; dstY++ {
di := dst.PixOffset(0, dstY)
si := src.PixOffset(srcMinX, srcMinY+dstY)
for dstX := 0; dstX < dstW; dstX++ {
copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize])
}
}
case *image.YCbCr:
for dstY := 0; dstY < dstH; dstY++ {
di := dst.PixOffset(0, dstY)
for dstX := 0; dstX < dstW; dstX++ {
srcX := srcMinX + dstX
srcY := srcMinY + dstY
siy := src.YOffset(srcX, srcY)
sic := src.COffset(srcX, srcY)
r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic])
dst.Pix[di+0] = r
dst.Pix[di+1] = g
dst.Pix[di+2] = b
dst.Pix[di+3] = 0xff
di += 4
}
}
default:
for dstY := 0; dstY < dstH; dstY++ {
di := dst.PixOffset(0, dstY)
for dstX := 0; dstX < dstW; dstX++ {
c := color.NRGBAModel.Convert(img.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA)
dst.Pix[di+0] = c.R
dst.Pix[di+1] = c.G
dst.Pix[di+2] = c.B
dst.Pix[di+3] = c.A
di += 4
}
}
}
return dst
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.