text
stringlengths
11
4.05M
// Graphics project Graphics.go package Graphics import ( // "errors" "fmt" // gl "github.com/chsc/gogl/gl21" // "github.com/Jragonmiris/mathgl" "github.com/go-gl/gl/v3.2-core/gl" Image "image" "image/draw" _ "image/jpeg" _ "image/png" "os" ) type image struct { data *Image.Image height, width int textureId uint32 } type Drawable interface { Draw() } func (img image) Draw() { if img.data == nil { return } // gl.ActiveTexture(texId2) gl.BindTexture(gl.TEXTURE_2D, img.textureId) } func (img image) GetUVFromPosition(x, y float32) (u, v float32) { u = x / float32(img.width) v = y / float32(img.height) return } func NewImage(path string) (retImg image, err error) { imgFile, err := os.Open(path) if err != nil { return image{}, err } img, _, err := Image.Decode(imgFile) if err != nil { return image{}, err } rgba := Image.NewRGBA(img.Bounds()) if rgba.Stride != rgba.Rect.Size().X*4 { return image{}, fmt.Errorf("unsupported stride") } draw.Draw(rgba, rgba.Bounds(), img, Image.Point{0, 0}, draw.Src) var texture uint32 gl.GenTextures(1, &texture) // gl.ActiveTexture(gl.TEXTURE1) gl.BindTexture(gl.TEXTURE_2D, texture) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_R, gl.CLAMP_TO_EDGE) gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(rgba.Rect.Size().X), int32(rgba.Rect.Size().Y), 0, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba.Pix)) if gl.GetError() != gl.NO_ERROR { return image{}, fmt.Errorf("Failed to load texture: " + path) } return image{data: &img, textureId: texture, width: rgba.Rect.Size().X, height: rgba.Rect.Size().Y}, nil }
package cf type UserContext struct { ApiUrl string Username string Password string Org string Space string LoginFlags string } func NewUserContext(apiUrl, username, password, org, space, loginFlags string) UserContext { return UserContext{ ApiUrl: apiUrl, Username: username, Password: password, Org: org, Space: space, LoginFlags: loginFlags, } }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package delegate import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" ) func (d *delegator) delegateShowCreate(n *tree.ShowCreate) (tree.Statement, error) { sqltelemetry.IncrementShowCounter(sqltelemetry.Create) const showCreateQuery = ` WITH zone_configs AS ( SELECT string_agg( raw_config_sql, e';\n' ORDER BY partition_name, index_name ) AS raw, string_agg( crdb_internal.filter_multiregion_fields_from_zone_config_sql(raw_config_sql), e';\n' ORDER BY partition_name, index_name ) AS mr FROM crdb_internal.zones WHERE database_name = %[1]s AND table_name = %[2]s AND raw_config_yaml IS NOT NULL AND raw_config_sql IS NOT NULL ) SELECT %[3]s AS table_name, concat(create_statement, CASE WHEN NOT has_partitions THEN NULL WHEN is_multi_region THEN CASE WHEN (SELECT mr FROM zone_configs) IS NULL THEN NULL ELSE concat(e';\n', (SELECT mr FROM zone_configs)) END WHEN (SELECT raw FROM zone_configs) IS NULL THEN e'\n-- Warning: Partitioned table with no zone configurations.' ELSE concat(e';\n', (SELECT raw FROM zone_configs)) END ) AS create_statement FROM %[4]s.crdb_internal.create_statements WHERE descriptor_id = %[6]d ORDER BY 1, 2;` return d.showTableDetails(n.Name, showCreateQuery) } func (d *delegator) delegateShowIndexes(n *tree.ShowIndexes) (tree.Statement, error) { sqltelemetry.IncrementShowCounter(sqltelemetry.Indexes) getIndexesQuery := ` SELECT s.table_name, s.index_name, non_unique::BOOL, seq_in_index, column_name, direction, storing::BOOL, implicit::BOOL` if n.WithComment { getIndexesQuery += `, obj_description(pg_indexes.crdb_oid) AS comment` } getIndexesQuery += ` FROM %[4]s.information_schema.statistics AS s` if n.WithComment { getIndexesQuery += ` LEFT JOIN pg_indexes ON pg_indexes.tablename = s.table_name AND pg_indexes.indexname = s.index_name` } getIndexesQuery += ` WHERE table_catalog=%[1]s AND table_schema=%[5]s AND table_name=%[2]s ORDER BY 1, 2, 3, 4, 5, 6, 7, 8;` return d.showTableDetails(n.Table, getIndexesQuery) } func (d *delegator) delegateShowColumns(n *tree.ShowColumns) (tree.Statement, error) { getColumnsQuery := ` SELECT column_name AS column_name, crdb_sql_type AS data_type, is_nullable::BOOL, column_default, generation_expression, IF(inames[1] IS NULL, ARRAY[]:::STRING[], inames) AS indices, is_hidden::BOOL` if n.WithComment { getColumnsQuery += `, col_description(%[6]d, attnum) AS comment` } getColumnsQuery += ` FROM ( SELECT column_name, crdb_sql_type, is_nullable, column_default, generation_expression, ordinal_position, is_hidden, array_agg(index_name ORDER BY index_name) AS inames FROM ( SELECT column_name, crdb_sql_type, is_nullable, column_default, generation_expression, ordinal_position, is_hidden FROM %[4]s.information_schema.columns WHERE (length(%[1]s)=0 OR table_catalog=%[1]s) AND table_schema=%[5]s AND table_name=%[2]s ) LEFT OUTER JOIN ( SELECT column_name, index_name FROM %[4]s.information_schema.statistics WHERE (length(%[1]s)=0 OR table_catalog=%[1]s) AND table_schema=%[5]s AND table_name=%[2]s ) USING(column_name) GROUP BY column_name, crdb_sql_type, is_nullable, column_default, generation_expression, ordinal_position, is_hidden )` if n.WithComment { getColumnsQuery += ` LEFT OUTER JOIN pg_attribute ON column_name = pg_attribute.attname AND attrelid = %[6]d` } getColumnsQuery += ` ORDER BY ordinal_position, 1, 2, 3, 4, 5, 6, 7;` return d.showTableDetails(n.Table, getColumnsQuery) } func (d *delegator) delegateShowConstraints(n *tree.ShowConstraints) (tree.Statement, error) { sqltelemetry.IncrementShowCounter(sqltelemetry.Constraints) const getConstraintsQuery = ` SELECT t.relname AS table_name, c.conname AS constraint_name, CASE c.contype WHEN 'p' THEN 'PRIMARY KEY' WHEN 'u' THEN 'UNIQUE' WHEN 'c' THEN 'CHECK' WHEN 'f' THEN 'FOREIGN KEY' ELSE c.contype END AS constraint_type, c.condef AS details, c.convalidated AS validated FROM %[4]s.pg_catalog.pg_class t, %[4]s.pg_catalog.pg_namespace n, %[4]s.pg_catalog.pg_constraint c WHERE t.relname = %[2]s AND n.nspname = %[5]s AND t.relnamespace = n.oid AND t.oid = c.conrelid ORDER BY 1, 2, 3, 4, 5` return d.showTableDetails(n.Table, getConstraintsQuery) } func (d *delegator) delegateShowCreateAllTables() (tree.Statement, error) { sqltelemetry.IncrementShowCounter(sqltelemetry.Create) const showCreateAllTablesQuery = ` SELECT crdb_internal.show_create_all_tables(%[1]s) AS create_statement; ` databaseLiteral := d.evalCtx.SessionData.Database query := fmt.Sprintf(showCreateAllTablesQuery, lex.EscapeSQLString(databaseLiteral), ) return parse(query) } // showTableDetails returns the AST of a query which extracts information about // the given table using the given query patterns in SQL. The query pattern must // accept the following formatting parameters: // %[1]s the database name as SQL string literal. // %[2]s the unqualified table name as SQL string literal. // %[3]s the given table name as SQL string literal. // %[4]s the database name as SQL identifier. // %[5]s the schema name as SQL string literal. // %[6]s the table ID. func (d *delegator) showTableDetails( name *tree.UnresolvedObjectName, query string, ) (tree.Statement, error) { // We avoid the cache so that we can observe the details without // taking a lease, like other SHOW commands. flags := cat.Flags{AvoidDescriptorCaches: true, NoTableStats: true} tn := name.ToTableName() dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, flags, &tn) if err != nil { return nil, err } if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { return nil, err } fullQuery := fmt.Sprintf(query, lex.EscapeSQLString(resName.Catalog()), lex.EscapeSQLString(resName.Table()), lex.EscapeSQLString(resName.String()), resName.CatalogName.String(), // note: CatalogName.String() != Catalog() lex.EscapeSQLString(resName.Schema()), dataSource.PostgresDescriptorID(), ) return parse(fullQuery) }
package memory import ( "math/rand" "time" "github.com/elhamza90/lifelog/internal/domain" "github.com/elhamza90/lifelog/internal/store" ) func generateRandomTagID() domain.TagID { rand.Seed(time.Now().UnixNano()) res := rand.Intn(10000) return domain.TagID(res) } // FindTagByID searches for a tag with the given ID and returns it. // It returns ErrTagNotFound if no tag was found. func (repo Repository) FindTagByID(id domain.TagID) (domain.Tag, error) { for _, t := range repo.Tags { if t.ID == id { return t, nil } } return domain.Tag{}, store.ErrTagNotFound } // FindTagByName searches for a tag with the given name and returns it. // It returns an Empty Tag if not found. func (repo Repository) FindTagByName(n string) (domain.Tag, error) { for _, t := range repo.Tags { if t.Name == n { return t, nil } } return domain.Tag{}, store.ErrTagNotFound } // SaveTag stores the given Tag in memory and returns created tag func (repo Repository) SaveTag(t domain.Tag) (domain.TagID, error) { t.ID = generateRandomTagID() repo.Tags[t.ID] = t return t.ID, nil } // FindAllTags returns all stored tags in memory func (repo Repository) FindAllTags() ([]domain.Tag, error) { tags := []domain.Tag{} for _, t := range repo.Tags { tags = append(tags, t) } return tags, nil } // DeleteTag deletes tag from memory func (repo Repository) DeleteTag(id domain.TagID) error { if _, ok := repo.Tags[id]; !ok { return store.ErrTagNotFound } delete(repo.Tags, id) return nil } // EditTag edits given tag in memory func (repo Repository) EditTag(t domain.Tag) error { repo.Tags[t.ID] = t return nil }
package main import "fmt" func main() { dataSlice := []string{"var1", "var2"} fmt.Println(dataSlice) // Append a new value dataSlice = append(dataSlice, "new value") fmt.Println(dataSlice) // Check loops Dir to check how to iterate over Slice }
package filter import ( "S2Y/pkg/s2y/app/rest" "S2Y/pkg/s2y/app/rest/management" "github.com/emicklei/go-restful" "github.com/pkg/errors" "regexp" ) var ( ErrUnauthorizedRequest = errors.New("access token required") ) var authHeaderFormat = regexp.MustCompile("Bearer (.+)") type AuthorizationHeaderFilter struct{} func NewAuthorizationHeaderFilter() *AuthorizationHeaderFilter { return &AuthorizationHeaderFilter{} } func (f *AuthorizationHeaderFilter) Apply(request *restful.Request, response *restful.Response, chain *restful.FilterChain) { if err := f.validateHeader(request); err != nil { rest.WriteUnauthorizedError(response, ErrUnauthorizedRequest) return } chain.ProcessFilter(request, response) } func (f *AuthorizationHeaderFilter) validateHeader(request *restful.Request) error { token := getBearerToken(request) if token == "" { return ErrUnauthorizedRequest } management.SetToken(request, token) return nil } func getBearerToken(request *restful.Request) string { authHeaderParam := request.HeaderParameter("Authorization") if authHeaderParam == "" { return "" } strs := authHeaderFormat.FindStringSubmatch(authHeaderParam) if len(strs) != 2 { return "" } return strs[1] }
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "gopkg.in/mgo.v2/bson" ) func ValidateObjectId(id string) (oid bson.ObjectId, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) } }() oid = bson.ObjectIdHex(id) return }
package evaluator import ( "os" "path/filepath" "../object" ) var logFolder = "." var stdLog = object.Record{ Stoned: true, Values: map[string]object.Object{ "info": object.BuiltinFunction(func(args ...object.Object) object.Object { if err := checkArgLength("log.info", args, 1); err != nil { return err } f, err := os.OpenFile(filepath.Join(logFolder, "info.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return object.Error("log.info: " + err.Error()) } _, _ = f.WriteString(args[0].String()) _ = f.Close() return object.Nil{} }), "error": object.BuiltinFunction(func(args ...object.Object) object.Object { if err := checkArgLength("log.error", args, 1); err != nil { return err } f, err := os.OpenFile(filepath.Join(logFolder, "error.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return object.Error("log.error: " + err.Error()) } _, _ = f.WriteString(args[0].String()) _ = f.Close() return object.Nil{} }), "set_folder": object.BuiltinFunction(func(args ...object.Object) object.Object { if err := checkArgLength("log.set_folder", args, 1); err != nil { return err } if err := checkFirstArgType("log.set_folder", args[0], object.STRING); err != nil { return err } logFolder = args[0].String() return object.Nil{} }), }, }
package handlers import ( "encoding/json" "net/http" "net/http/httptest" "reflect" "testing" ) func TestGet(t *testing.T) { w := httptest.NewRecorder() r := httptest.NewRequest(http.MethodGet, "/", nil) Get(w, r) if w.Code != http.StatusOK { t.Errorf("Expected %d, got %d", http.StatusOK, w.Code) } // t.Logf(w.Body.String()) got := Person{} err := json.NewDecoder(w.Body).Decode(&got) if err != nil { t.Errorf("Couldn't read body") } want := Person{"Saul", 20} if !reflect.DeepEqual(got, want) { t.Errorf("Expected %v, got %v", want, got) } }
// Package pubsub contains utilities for handling Google Cloud Pub/Sub events. package pubsub import ( "fmt" "regexp" "time" "cloud.google.com/go/functions/metadata" "cloud.google.com/go/pubsub" "github.com/GoogleCloudPlatform/functions-framework-go/internal/fftypes" ) const ( pubsubEventType = "google.pubsub.topic.publish" pubsubMessageType = "type.googleapis.com/google.pubusb.v1.PubsubMessage" pubsubService = "pubsub.googleapis.com" ) // LegacyPushSubscriptionEvent is the event payload for legacy Cloud Pub/Sub // push subscription triggers (https://cloud.google.com/functions/docs/calling/pubsub#legacy_cloud_pubsub_triggers). // This matched the event payload that is sent by Pub/Sub to HTTP push // subscription endpoints (https://cloud.google.com/pubsub/docs/push#receiving_messages). type LegacyPushSubscriptionEvent struct { Subscription string `json:"subscription"` Message `json:"message"` } // Message is a pubsub.Message but with the correct JSON tag for the // message ID field that matches https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage type Message struct { pubsub.Message // The pubsub libary's Message.Id field (https://pkg.go.dev/cloud.google.com/go/internal/pubsub#Message) // doesn't have the correct JSON tag (it serializes to "id" instead of // "messageId"), so use this field to capture the JSON field with key // "messageId". IdFromJSON string `json:"messageId"` } // ExtractTopicFromRequestPath extracts a Pub/Sub topic from a URL request path. func ExtractTopicFromRequestPath(path string) (string, error) { re := regexp.MustCompile(`(projects\/[^/?]+\/topics\/[^/?]+)/*`) matches := re.FindStringSubmatch(path) if matches == nil { return "", fmt.Errorf("failed to extract Pub/Sub topic name from the URL request path: %q, configure your subscription's push endpoint to use the following path pattern: 'projects/PROJECT_NAME/topics/TOPIC_NAME'", path) } // Index 0 is the entire input string matched, index 1 is the first submatch return matches[1], nil } // ToBackgroundEvent converts the event to the standard BackgroundEvent format // for Background Functions. func (e *LegacyPushSubscriptionEvent) ToBackgroundEvent(topic string) *fftypes.BackgroundEvent { timestamp := e.Message.PublishTime if timestamp.IsZero() { timestamp = time.Now() } return &fftypes.BackgroundEvent{ Metadata: &metadata.Metadata{ EventID: e.IdFromJSON, Timestamp: timestamp, EventType: pubsubEventType, Resource: &metadata.Resource{ Name: topic, Type: pubsubMessageType, Service: pubsubService, }, }, Data: map[string]interface{}{ "@type": pubsubMessageType, "data": e.Message.Data, "attributes": e.Message.Attributes, }, } }
package main import ( "fmt" "unicode" ) func main() { s := "a ac d s" fmt.Println(remove(s)) } func remove(a string) string { s := []byte(a) l := len(s) if l <= 1 { return a } for i := 0; i < l-1; i++ { if unicode.IsSpace(rune(s[i])) && unicode.IsSpace(rune(s[i+1])) { copy(s[i:], s[i+1:]) l-- i-- } } res := string(s[:l]) return res }
package camo import ( "context" "hash/adler32" "io" "sync" ) // DefaultMTU TODO const DefaultMTU = 1400 const ( headerClientID = "camo-client-id" headerNoise = "camo-noise" ) const noisePadding = "BYLtpGfhBnrxe2rC7rbZ5QMHMMIjcMeThMI309QI5Zewv9OD1UNhie2ZPmIEuJDeKeQboeo5ClAwLusaKasWVLIGHkJmY3l0YP2dsoT1MyPSLqb7bAyhetxywAWNzDif" // code from https://gist.github.com/badboy/6267743 func hash32(a uint32) uint32 { a = (a + 0x7ed55d16) + (a << 12) a = (a ^ 0xc761c23c) ^ (a >> 19) a = (a + 0x165667b1) + (a << 5) a = (a + 0xd3a2646c) ^ (a << 9) a = (a + 0xfd7046c5) + (a << 3) a = (a ^ 0xb55a4f09) ^ (a >> 16) return a } func getNoisePadding(noise int, url string) string { size := hash32(uint32(noise)+adler32.Checksum([]byte(url))) % uint32(len(noisePadding)) if size == 0 { size = 1 } return noisePadding[:size] } type packetBuffer struct { Data []byte } func (p *packetBuffer) Reset() { p.Data = p.Data[:cap(p.Data)] } type bufferPool interface { getBuffer() *packetBuffer freeBuffer(*packetBuffer) } type ( readPacketHandler func(done <-chan struct{}, pkt *packetBuffer) bool postWritePacketHandler func(done <-chan struct{}, err error) ) func serveIO(ctx context.Context, rw io.ReadWriteCloser, bp bufferPool, readHandler readPacketHandler, toWrite <-chan *packetBuffer, postWriteHandler postWritePacketHandler) (err error) { ctx, cancel := context.WithCancel(ctx) var exitOnce sync.Once exit := func(e error) { exitOnce.Do(func() { err = e rw.Close() cancel() }) } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() <-ctx.Done() exit(ctx.Err()) }() wg.Add(1) go func() { defer wg.Done() done := ctx.Done() for { select { case p, ok := <-toWrite: if !ok { return } _, e := rw.Write(p.Data) bp.freeBuffer(p) if postWriteHandler != nil { postWriteHandler(done, e) } if e != nil { exit(e) return } case <-done: return } } }() done := ctx.Done() for { p := bp.getBuffer() n, e := rw.Read(p.Data) if n > 0 { p.Data = p.Data[:n] ok := readHandler(done, p) if !ok { bp.freeBuffer(p) } } else { bp.freeBuffer(p) } if e != nil { exit(e) break } } wg.Wait() return err }
package libguestfs import ( log "github.com/sirupsen/logrus" "os" "os/exec" ) func SparsifyImage(image string) error { args := []string{"--in-place", "-v", "-x", image} c := exec.Command("virt-sparsify", args...) os.Setenv("LIBGUESTFS_BACKEND", "direct") o, err := c.CombinedOutput() if err != nil { log.Errorf("Unable to run virt-sparsify: %v", string(o)) return err } return nil }
package models type Click struct { Link string `json:"processed"` Timestamp string `json:"timestamp"` }
package main import ( "bytes" "fmt" "log" "net/http" "regexp" "gophr.pm/gocql/gocql@3ac1aabebaf2705c6f695d4ef2c25ab6239e88b3" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common/config" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common/github" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common/models" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common/semver" "gophr.pm/skeswa/gophr@035e5f373426d6fe40f9cd89a615fffedca067fe/common/subv" ) const ( packageRequestRegexIndexAuthor = 1 packageRequestRegexIndexRepo = 2 barePackageRequestRegexIndexSubpath = 3 packageRefRequestRegexIndexRef = 3 packageRefRequestRegexIndexSubpath = 4 packageVersionRequestRegexIndexSemverPrefix = 3 packageVersionRequestRegexIndexSemverMajorVersion = 4 packageVersionRequestRegexIndexSemverMinorVersion = 5 packageVersionRequestRegexIndexSemverPatchVersion = 6 packageVersionRequestRegexIndexSemverPrereleaseLabel = 7 packageVersionRequestRegexIndexSemverPrereleaseVersion = 8 packageVersionRequestRegexIndexSemverSuffix = 9 packageVersionRequestRegexIndexSubpath = 10 ) const ( formKeyGoGet = "go-get" formValueGoGet = "1" contentTypeHTML = "text/html" subPathRegexStr = `((?:\/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)` userRepoRegexStr = `^\/([a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\/([a-zA-Z0-9\.\-_]+)` masterGitRefLabel = "master" someFakeGitTagRef = "refs/tags/thisisnotathinginanyrepo" gitRefsInfoSubPath = "/info/refs" httpLocationHeader = "Location" refSelectorRegexStr = "([a-fA-F0-9]{40})" gitUploadPackSubPath = "/git-upload-pack" httpContentTypeHeader = "Content-Type" packagePageURLTemplate = "https://%s/#/packages/%s/%s" contentTypeGitUploadPack = "application/x-git-upload-pack-advertisement" githubUploadPackURLTemplate = "https://github.com/%s/%s/git-upload-pack" packageRequestRegexTemplate = `%s(?:@%s)%s$` versionSelectorRegexTemplate = `([\%c\%c]?)([0-9]+)(?:\.([0-9]+|%c))?(?:\.([0-9]+|%c))?(?:\-([a-zA-Z0-9\-_]+[a-zA-Z0-9])(?:\.([0-9]+|%c))?)?([\%c\%c]?)` barePackageRequestRegexTemplate = `%s%s$` ) var ( goGetMetadataTemplate = `<html><head><meta name="go-import" content="%s git %s://%s"><meta name="go-source" content="%s _ https://%s/tree/%s{/dir} https://%s/blob/%s{/dir}/{file}#L{line}"></head><body>go get %s</body></html>` versionSelectorRegexStr = fmt.Sprintf( versionSelectorRegexTemplate, semver.SemverSelectorTildeChar, semver.SemverSelectorCaratChar, semver.SemverSelectorWildcardChar, semver.SemverSelectorWildcardChar, semver.SemverSelectorWildcardChar, semver.SemverSelectorLessThanChar, semver.SemverSelectorGreaterThanChar, ) packageRefRequestRegexStr = fmt.Sprintf( packageRequestRegexTemplate, userRepoRegexStr, refSelectorRegexStr, subPathRegexStr, ) barePackageRequestRegexStr = fmt.Sprintf( barePackageRequestRegexTemplate, userRepoRegexStr, subPathRegexStr, ) packageVersionRequestRegexStr = fmt.Sprintf( packageRequestRegexTemplate, userRepoRegexStr, versionSelectorRegexStr, subPathRegexStr, ) packageRefRequestRegex = regexp.MustCompile(packageRefRequestRegexStr) barePackageRequestRegex = regexp.MustCompile(barePackageRequestRegexStr) packageVersionRequestRegex = regexp.MustCompile(packageVersionRequestRegexStr) ) // PackageRequest is stuct that standardizes the output of all the scenarios // through which a package may be requested. PackageRequest is essentially a // helper struct to move data between the sub-functions of // RespondToPackageRequest and RespondToPackageRequest itself. type PackageRequest struct { Repo string Author string Subpath string RefHash string RefsData []byte Selector string GithubTree string } // RespondToPackageRequest processes an incoming request, evaluates whether is a // correctly formatted request for package-related data, and either responds // appropriately or returns an error indicating what went wrong. func RespondToPackageRequest( config *config.Config, session *gocql.Session, creds *config.Credentials, context RequestContext, req *http.Request, res http.ResponseWriter, ) error { var ( err error packageRequest PackageRequest ) // Attempt every request parsing strategy in order or popularity packageRequest, err = processPackageVersionRequest(context, req) if err != nil { refReqErr := err packageRequest, err = processPackageRefRequest(context, req) if err != nil { verReqErr := err packageRequest, err = processBarePackageRequest(context, req) if err != nil { return NewInvalidPackageRequestError( req.URL.Path, refReqErr, verReqErr, err, ) } } } switch packageRequest.Subpath { case gitUploadPackSubPath: log.Printf( "[%s] Responding with the Github upload pack permanent redirect\n", context.RequestID, ) res.Header().Set( httpLocationHeader, fmt.Sprintf( githubUploadPackURLTemplate, packageRequest.Author, packageRequest.Repo, ), ) res.WriteHeader(http.StatusMovedPermanently) case gitRefsInfoSubPath: log.Printf( "[%s] Responding with the git refs pulled from Github\n", context.RequestID, ) res.Header().Set(httpContentTypeHeader, contentTypeGitUploadPack) res.Write(packageRequest.RefsData) default: if req.FormValue(formKeyGoGet) == formValueGoGet { log.Printf( "[%s] Responding with html formatted for go get\n", context.RequestID, ) // Without blocking, record this event as a download in the database. go recordDownload( session, context, packageRequest.Author, packageRequest.Repo, packageRequest.GithubTree) // Only run the sub-versioning if its completely necessary. if !models.IsPackageArchived( session, packageRequest.Author, packageRequest.Repo, packageRequest.GithubTree) { log.Printf("Package %s/%s@%s has not been archived.\n", packageRequest.Author, packageRequest.Repo, packageRequest.GithubTree) // Create a model with the author and repo that we already have. packageModel := &models.PackageModel{ Author: &packageRequest.Author, Repo: &packageRequest.Repo, } if err := subv.SubVersionPackageModel( config, session, creds, packageModel, packageRequest.RefHash, config.ConstructionZonePath); err != nil { log.Println("Sub-versioning failed:", err) return err } log.Println("Finished sub versioning") } var ( repo = github.BuildNewGitHubRepoName(packageRequest.Author, packageRequest.Repo) author = github.GitHubGophrPackageOrgName metaData = []byte(generateGoGetMetadata( config, author, repo, packageRequest.Selector, packageRequest.Subpath, packageRequest.GithubTree, )) ) res.Header().Set(httpContentTypeHeader, contentTypeHTML) res.Write(metaData) } else { log.Printf( "[%s] Responding with a permanent redirect to the gophr package webpage\n", context.RequestID, ) res.Header().Set( httpLocationHeader, fmt.Sprintf( packagePageURLTemplate, req.URL.Host, packageRequest.Author, packageRequest.Repo, ), ) res.WriteHeader(http.StatusMovedPermanently) } } return nil } // processPackageRefRequest is a sub-function of RespondToPackageRequest that // parses and simplifies the information in a package ref request into an // instance of PackageRequest. func processPackageRefRequest( context RequestContext, req *http.Request, ) (PackageRequest, error) { var ( matches []string requestURL string ) requestURL = req.URL.Path matches = packageRefRequestRegex.FindStringSubmatch(requestURL) if matches == nil { return PackageRequest{}, NewInvalidPackageRefRequestURLError(requestURL) } var ( packageRef = matches[packageRefRequestRegexIndexRef] packageRepo = matches[packageRequestRegexIndexRepo] packageAuthor = matches[packageRequestRegexIndexAuthor] packageSubpath = matches[packageRefRequestRegexIndexSubpath] packageRefsData []byte ) // Only go out to fetch refs if they're going to get used if packageSubpath == gitRefsInfoSubPath { refs, err := common.FetchRefs(packageAuthor, packageRepo) if err != nil { return PackageRequest{}, err } // Reserialize the refs data with everything pointing at the specified ref. // The ref hash is obviously packageRef, but the name is empty needs to be a // made up tag. packageRefsData = refs.Reserialize(someFakeGitTagRef, packageRef) } return PackageRequest{ Repo: packageRepo, Author: packageAuthor, Subpath: packageSubpath, RefsData: packageRefsData, Selector: packageRef, GithubTree: packageRef, }, nil } // processBarePackageRequest is a sub-function of RespondToPackageRequest that // parses and simplifies the information in a base package request into an // instance of PackageRequest. func processBarePackageRequest( context RequestContext, req *http.Request, ) (PackageRequest, error) { var ( matches []string requestURL string ) requestURL = req.URL.Path matches = barePackageRequestRegex.FindStringSubmatch(requestURL) if matches == nil { return PackageRequest{}, NewInvalidBarePackageRequestURLError(requestURL) } var ( packageRepo = matches[packageRequestRegexIndexRepo] packageAuthor = matches[packageRequestRegexIndexAuthor] packageSubpath = matches[barePackageRequestRegexIndexSubpath] packageRefsData []byte ) // Only go out to fetch refs if they're going to get used if packageSubpath == gitRefsInfoSubPath { refs, err := common.FetchRefs(packageAuthor, packageRepo) if err != nil { return PackageRequest{}, err } // Just pass the refs along // TODO(skeswa): come up with a way to skip candidate matching here packageRefsData = refs.Data } return PackageRequest{ Repo: packageRepo, Author: packageAuthor, Subpath: packageSubpath, RefsData: packageRefsData, GithubTree: masterGitRefLabel, }, nil } // processPackageVersionRequest is a sub-function of RespondToPackageRequest // that parses and simplifies the information in a package version request into // an instance of PackageRequest. func processPackageVersionRequest( context RequestContext, req *http.Request, ) (PackageRequest, error) { var ( matches []string requestURL string ) requestURL = req.URL.Path matches = packageVersionRequestRegex.FindStringSubmatch(requestURL) if matches == nil { return PackageRequest{}, NewInvalidPackageVersionRequestURLError(requestURL) } var ( packageRepo = matches[packageRequestRegexIndexRepo] packageAuthor = matches[packageRequestRegexIndexAuthor] packageSubpath = matches[packageVersionRequestRegexIndexSubpath] hasMatchedCandidate = false semverSelectorExists = false semverSelector semver.SemverSelector packageRefsData []byte matchedCandidate semver.SemverCandidate masterGitRefHash string matchedCandidateHash string matchedCandidateLabel string ) selector, err := semver.NewSemverSelector( matches[packageVersionRequestRegexIndexSemverPrefix], matches[packageVersionRequestRegexIndexSemverMajorVersion], matches[packageVersionRequestRegexIndexSemverMinorVersion], matches[packageVersionRequestRegexIndexSemverPatchVersion], matches[packageVersionRequestRegexIndexSemverPrereleaseLabel], matches[packageVersionRequestRegexIndexSemverPrereleaseVersion], matches[packageVersionRequestRegexIndexSemverSuffix], ) if err != nil { return PackageRequest{}, NewInvalidPackageVersionRequestURLError(requestURL, err) } semverSelector = selector semverSelectorExists = true log.Printf( "[%s] Found a version selector in the request URL\n", context.RequestID, ) // Only go out to fetch refs if they're going to get used if req.FormValue(formKeyGoGet) == formValueGoGet || packageSubpath == gitRefsInfoSubPath { log.Printf( "[%s] Fetching Github refs since this request is either from a go get or has an info path\n", context.RequestID, ) // Get and process all of the refs for this package. refs, err := common.FetchRefs(packageAuthor, packageRepo) if err != nil { return PackageRequest{}, err } // Cache the master ref has for use in the event that there are no matched candidates. masterGitRefHash = refs.MasterRefHash if semverSelectorExists && refs.Candidates != nil && len(refs.Candidates) > 0 { // Get the list of candidates that match the selector matchedCandidates := refs.Candidates.Match(semverSelector) log.Printf( "[%s] Matched candidates to the version selector\n", context.RequestID, ) // Only proceed if there is at least one matched candidate if matchedCandidates != nil && len(matchedCandidates) > 0 { if len(matchedCandidates) == 1 { matchedCandidate = matchedCandidates[0] hasMatchedCandidate = true } else { selectorHasLessThan := semverSelector.Suffix == semver.SemverSelectorSuffixLessThan selectorHasWildcards := semverSelector.MinorVersion.Type == semver.SemverSegmentTypeWildcard || semverSelector.PatchVersion.Type == semver.SemverSegmentTypeWildcard || semverSelector.PrereleaseVersion.Type == semver.SemverSegmentTypeWildcard var matchedCandidateReference *semver.SemverCandidate if selectorHasWildcards || selectorHasLessThan { matchedCandidateReference = matchedCandidates.Highest() } else { matchedCandidateReference = matchedCandidates.Lowest() } matchedCandidate = *matchedCandidateReference hasMatchedCandidate = true } log.Printf( "[%s] There was at least one candidate matched to the version selector\n", context.RequestID, ) } } if hasMatchedCandidate { log.Printf( "[%s] Tweaked the refs to focus on the matched candidate\n", context.RequestID, ) packageRefsData = refs.Reserialize( matchedCandidate.GitRefName, matchedCandidate.GitRefHash, ) matchedCandidateHash = matchedCandidate.GitRefHash matchedCandidateLabel = matchedCandidate.GitRefLabel } else { if !semverSelectorExists { // Since there was no selector, we are fine with the fact that we didn't // find a match. Now, return the original refs that we downloaded from // github that point to master by default. packageRefsData = refs.Data } else { log.Printf( "[%s] Couldn't find any candidates to match to the version selector \"%s\"\n", context.RequestID, semverSelector.String(), ) return PackageRequest{}, NewNoSuchPackageVersionError( packageAuthor, packageRepo, semverSelector.String(), ) } } } // If there is no label as of yet, just default to master if len(matchedCandidateLabel) < 1 { matchedCandidateHash = masterGitRefHash matchedCandidateLabel = masterGitRefLabel } return PackageRequest{ Repo: packageRepo, Author: packageAuthor, RefHash: matchedCandidateHash, Subpath: packageSubpath, RefsData: packageRefsData, Selector: semverSelector.String(), GithubTree: matchedCandidateLabel, }, nil } // generateGoGetMetadata generates the format of metadata that the go get tool // expects to receive from unknown repository domains before its starts pulling // down source code. func generateGoGetMetadata( conf *config.Config, user string, repo string, selector string, subpath string, githubTree string, ) string { var ( buffer bytes.Buffer domain string protocol string gophrRoot string gophrPath string githubRoot string ) if conf.IsDev { domain = "gophr.dev" protocol = "http" } else { domain = "gophr.pm" protocol = "https" } buffer.WriteString(domain) buffer.WriteByte('/') buffer.WriteString(user) buffer.WriteByte('/') buffer.WriteString(repo) if len(selector) > 0 { buffer.WriteByte('@') buffer.WriteString(selector) } gophrRoot = buffer.String() buffer.WriteString(subpath) gophrPath = buffer.String() buffer.Reset() buffer.WriteString("github.com") buffer.WriteByte('/') buffer.WriteString(user) buffer.WriteByte('/') buffer.WriteString(repo) githubRoot = buffer.String() if len(githubTree) < 1 { githubTree = masterGitRefLabel } return fmt.Sprintf( goGetMetadataTemplate, gophrRoot, protocol, gophrRoot, gophrRoot, githubRoot, githubTree, githubRoot, githubTree, gophrPath, ) } // recordDownload is a helper function that records the download of a specific // package. func recordDownload( session *gocql.Session, context RequestContext, author string, repo string, selector string) { err := models.RecordDailyDownload( session, author, repo, selector, ) // Instead of bubbling this error, just commit it to the logs. That way this // failure is allowed to remain low impact. if err != nil { log.Printf( "[%s] Failed to record package download: %v\n", context.RequestID, err, ) } }
package middlewares import ( "github.com/julienschmidt/httprouter" ) type Middleware func(handler httprouter.Handle) httprouter.Handle func Wrap(middlewares []Middleware, handler httprouter.Handle) httprouter.Handle { for i := len(middlewares) - 1; i >= 0; i-- { handler = middlewares[i](handler) } return handler }
package encryption import ( "github.com/stretchr/testify/assert" "testing" ) var aes = NewAESEncryptionService([]byte("1111111111111111")) var plainJSON = []byte(`{"type":"login","user_name":"test","ping_interval":45000,"ppks":[{"ppk_num":0,"pwd":"0","license_key":[0,0,0,0,0,0]}]}`) func TestAesEncryptionService_Encode(t *testing.T) { t.Run("plain JSON string", func(t *testing.T) { encoded, err := aes.Encode(plainJSON) assert.NoError(t, err) assert.NotEmpty(t, encoded) }) t.Run("bytes string", func(t *testing.T) { encoded, err := aes.Encode([]byte(`hello!`)) assert.NoError(t, err) t.Log("encoded 2", string(encoded)) }) t.Run("same bytes string again", func(t *testing.T) { encoded, err := aes.Encode([]byte(`hello!`)) assert.NoError(t, err) t.Log("encoded 3", string(encoded)) }) } func TestAesEncryptionService_Decode(t *testing.T) { t.Run("string encoded with Encode func", func(t *testing.T) { decoded, err := aes.Decode([]byte(``)) assert.NoError(t, err) assert.Equal(t, string(plainJSON), string(decoded)) }) t.Run("string encoded with www.browserling.com", func(t *testing.T) { encoded, err := aes.Decode([]byte(`U2FsdGVkX1+6/f81ec3nfCjVRv8f133JtC0592XC6xg=`)) assert.NoError(t, err) assert.Equal(t, "test", string(encoded)) }) }
package log import ( "testing" ) func TestSetLevel(t *testing.T) { t.Run("infoLevel", func(t *testing.T) { SetLevel(InfoLevel) Info("this is a info") Error("this is a error") }) t.Run("errorLevel", func(t *testing.T) { SetLevel(ErrorLevel) Info("this is a info") Error("this is a error") }) t.Run("disabled", func(t *testing.T) { SetLevel(Disabled) Info("this is a info") Error("this is a error") }) }
//go:build tools // +build tools package tools import ( // Code generators built at runtime. _ "k8s.io/kube-openapi/cmd/openapi-gen" )
package main import ( "day1/app/http2" ) func main() { http2.Start() }
package _4_Chain_of_Responsibility_Pattern import ( "testing" ) //步骤 3 //创建不同类型的记录器。赋予它们不同的错误级别,并在每个记录器中设置下一个记录器。每个记录器中的下一个记录器代表的是链的一部分。 func TestChainOfResponsibilityPattern(t *testing.T) { errorLogger := ErrorLogger{level: ERROR} fileLogger := FileLogger{level: DEBUG} consoleLogger := ConsoleLogger{level: INFO} errorLogger.nextLogger = &fileLogger fileLogger.nextLogger = &consoleLogger loggerChain := &errorLogger tests := []struct { name string level int message string wantRet string }{ {"information", INFO, "This is an information.", "Console::Logger: This is an information."}, {"debug", DEBUG, "This is a debug level information.", "File::Logger: This is a debug level information." + "Console::Logger: This is a debug level information."}, {"error", ERROR, "This is an error information.", "Error::Logger: This is an error information." + "File::Logger: This is an error information." + "Console::Logger: This is an error information."}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if gotRet := loggerChain.logMessage(tt.level, tt.message); gotRet != tt.wantRet { t.Errorf("logMessage() = %v, want %v", gotRet, tt.wantRet) } }) } }
package middlewares import ( "encoding/json" "fmt" "net/http" "time" "github.com/dgrijalva/jwt-go" "github.com/gin-gonic/gin" "github.com/ramailh/backend/fetch/props" ) const ( tokenExp = 1 * time.Hour ) type claims struct { Name string `json:"name"` Phone string `json:"phone"` Role string `json:"role"` Timestamp int `json:"timestamp"` } func JWTAuth(c *gin.Context) { tokenString := c.Request.Header.Get("Authorization") token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { if token.Method.Alg() != "HS512" { return nil, fmt.Errorf("signing algorithm not matched") } return []byte(props.Secret), nil }) if err != nil { c.JSON(http.StatusUnauthorized, gin.H{"message": err.Error(), "status": false}) c.Abort() return } if err = token.Claims.Valid(); err != nil { c.JSON(http.StatusUnauthorized, gin.H{"message": err.Error(), "status": false}) c.Abort() return } tokenByte, _ := json.Marshal(token.Claims) var claim claims json.Unmarshal(tokenByte, &claim) claimsTimestamp := time.Unix(0, int64(claim.Timestamp)*int64(1000000)).Add(tokenExp).Unix() if time.Now().Unix() > claimsTimestamp { c.JSON(http.StatusUnauthorized, gin.H{"message": "token has expired", "status": false}) c.Abort() return } if !token.Valid { c.JSON(http.StatusUnauthorized, gin.H{"message": "invalid token", "status": false}) c.Abort() } } func JWTAuthAdmin(c *gin.Context) { tokenString := c.Request.Header.Get("Authorization") token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { if token.Method.Alg() != "HS512" { return nil, fmt.Errorf("signing algorithm not matched") } return []byte(props.Secret), nil }) if err != nil { c.JSON(http.StatusUnauthorized, gin.H{"message": err.Error(), "status": false}) c.Abort() return } if err = token.Claims.Valid(); err != nil { c.JSON(http.StatusUnauthorized, gin.H{"message": err.Error(), "status": false}) c.Abort() return } tokenByte, _ := json.Marshal(token.Claims) var claim claims json.Unmarshal(tokenByte, &claim) claimsTimestamp := time.Unix(0, int64(claim.Timestamp)*int64(1000000)).Add(tokenExp).Unix() if time.Now().Unix() > claimsTimestamp { c.JSON(http.StatusUnauthorized, gin.H{"message": "token has expired", "status": false}) c.Abort() return } if claim.Role != "admin" { c.JSON(http.StatusUnauthorized, gin.H{"message": "unauthorized role", "status": false}) c.Abort() return } if !token.Valid { c.JSON(http.StatusUnauthorized, gin.H{"message": "invalid token", "status": false}) c.Abort() } }
package api import ( "log" "golang.org/x/net/context" ) //server represents the gRPC type Server struct { } func (s *Server) SayHello(ctx context.Context, in *PingMessage) (*PingMessage, error) { log.Printf("Menerima pesan %s", in.Greeting) return &PingMessage{Greeting: "bar"}, nil }
package fetcher import ( "github.com/stretchr/testify/assert" "testing" ) func TestNewFetcher(t *testing.T) { const n = 2 //given a fetcher with capacity of n NewFetcher(n) fetcher := singleton //count of elements in the full fetcher must be n fillTheChan() assert.Len(t, singleton.sem, n) //call the constructor one more time with different capacity NewFetcher(3) //object must be the same assert.Equal(t, fetcher, singleton) } func fillTheChan() { for { select { case singleton.sem <- struct{}{}: default: return } } }
// SPDX-License-Identifier: GPL-2.0 package main import ( "flag" "fmt" "log" "net/http" ) var addr = flag.String("addr", ":8080", "listening address") func main() { flag.Parse() db := database{ "shoes": 12.5, "socks": 8.99, } log.Fatal(http.ListenAndServe(*addr, db)) } type dollar float32 func (d dollar) String() string { return fmt.Sprintf("$%.02f", d) } type database map[string]dollar func (db database) ServeHTTP(w http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/", "/list": db.list(w, req) case "/price": db.price(w, req) default: db.handle404(w, req) } } func (db database) list(w http.ResponseWriter, req *http.Request) { for item, price := range db { fmt.Fprintf(w, "%s: %s\n", item, price) } } func (db database) price(w http.ResponseWriter, req *http.Request) { item := req.URL.Query().Get("item") price, ok := db[item] if !ok { w.WriteHeader(http.StatusNotFound) fmt.Fprintf(w, "missing item: %q\n", item) return } fmt.Fprintf(w, "%s: %s\n", item, price) } func (db database) handle404(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusNotFound) fmt.Fprintf(w, "does not exist: %s\n", req.URL.Path) }
package score import "github.com/go-pg/pg/v9" type ( // Repository represents the repository for score. Repository interface { CreateScore(score *Score) error FindAllScores() (Scores, error) } // RepositoryImpl represents the repository implementation for score. RepositoryImpl struct { db *pg.DB } ) // NewRepository initialises a new score repository. func NewRepository(db *pg.DB) *RepositoryImpl { return &RepositoryImpl{db} } // CreateScore creates a new score func (r *RepositoryImpl) CreateScore(score *Score) error { err := r.db.Insert(score) return err } // FindAllScores returns all high scores func (r *RepositoryImpl) FindAllScores() (Scores, error) { scores := Scores{} err := r.db.Model(&scores). Order("score DESC"). Limit(10). Select() return scores, err }
package handler_test import ( "encoding/json" "errors" "net/http" "time" "github.com/Lunchr/luncher-api/db" "github.com/Lunchr/luncher-api/db/model" . "github.com/Lunchr/luncher-api/handler" "github.com/Lunchr/luncher-api/handler/mocks" "github.com/Lunchr/luncher-api/router" "github.com/Lunchr/luncher-api/session" "github.com/deiwin/facebook" "github.com/julienschmidt/httprouter" "github.com/stretchr/testify/mock" "golang.org/x/oauth2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("OffersHandler", func() { var ( offersCollection db.Offers imageStorage *mocks.Images regionsCollection *mocks.Regions ) BeforeEach(func() { offersCollection = &mockOffers{} imageStorage = new(mocks.Images) imageStorage.On("ChecksumDataURL", "image data url").Return("image checksum", nil) imageStorage.On("HasChecksum", "image checksum").Return(false, nil) imageStorage.On("StoreDataURL", "image data url").Return(nil) imageStorage.On("PathsFor", "image checksum").Return(&model.OfferImagePaths{ Large: "images/a large image path", Thumbnail: "images/thumbnail", }, nil) regionsCollection = new(mocks.Regions) regionsCollection.On("GetName", "Tartu").Return(&model.Region{ Name: "Tartu", Location: "Europe/Tallinn", }, nil) }) Describe("PostOffers", func() { var ( usersCollection db.Users restaurantsCollection *mocks.Restaurants handler router.HandlerWithParams params httprouter.Params sessionManager session.Manager facebookPost *mocks.Post ) BeforeEach(func() { usersCollection = &mockUsers{} restaurantsCollection = new(mocks.Restaurants) facebookPost = new(mocks.Post) restaurantID := bson.ObjectId("12letrrestid") restaurant := &model.Restaurant{ ID: restaurantID, Name: "Asian Chef", Region: "Tartu", Address: "an-address", Location: model.Location{ Type: "Point", Coordinates: []float64{26.7, 58.4}, }, Phone: "+372 5678 910", } params = httprouter.Params{httprouter.Param{ Key: "restaurantID", Value: restaurantID.Hex(), }} restaurantsCollection.On("GetID", restaurantID).Return(restaurant, nil).Once() facebookPost = new(mocks.Post) facebookPost.On("Update", model.DateWithoutTime("2014-11-11"), mock.AnythingOfType("*model.User"), restaurant).Return(nil) }) JustBeforeEach(func() { handler = PostOffers(offersCollection, usersCollection, restaurantsCollection, sessionManager, imageStorage, facebookPost, regionsCollection) }) ExpectUserToBeLoggedIn(func() *router.HandlerError { return handler(responseRecorder, request, params) }, func(mgr session.Manager, users db.Users) { sessionManager = mgr usersCollection = users }) Context("with session set and a matching user in DB", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} requestMethod = "POST" requestData = map[string]interface{}{ "title": "thetitle", "description": "a short description", "tags": []string{"tag1", "tag2"}, "price": 123.58, "from_time": "2014-11-11T09:00:00.000Z", "to_time": "2014-11-11T11:00:00.000Z", "image_data": "image data url", } }) It("should succeed", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) It("should return json", func() { handler(responseRecorder, request, params) contentTypes := responseRecorder.HeaderMap["Content-Type"] Expect(contentTypes).To(HaveLen(1)) Expect(contentTypes[0]).To(Equal("application/json")) }) It("should include the offer with the new ID", func() { handler(responseRecorder, request, params) var offer model.OfferJSON json.Unmarshal(responseRecorder.Body.Bytes(), &offer) Expect(offer.ID).To(Equal(objectID)) Expect(offer.Image.Large).To(Equal("images/a large image path")) }) Context("with the image having already been stored once", func() { BeforeEach(func() { imageStorage.ExpectedCalls = make([]*mock.Call, 0) imageStorage.On("ChecksumDataURL", "image data url").Return("image checksum", nil) imageStorage.On("HasChecksum", "image checksum").Return(true, nil) imageStorage.On("StoreDataURL", "image data url").Return(errors.New("already stored")) imageStorage.On("PathsFor", "image checksum").Return(&model.OfferImagePaths{ Large: "images/a large image path", Thumbnail: "images/thumbnail", }, nil) }) It("succeeds", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) It("includes the image paths in the response", func() { handler(responseRecorder, request, params) var offer model.OfferJSON json.Unmarshal(responseRecorder.Body.Bytes(), &offer) Expect(offer.ID).To(Equal(objectID)) Expect(offer.Image.Large).To(Equal("images/a large image path")) Expect(offer.Image.Thumbnail).To(Equal("images/thumbnail")) }) }) }) }) Describe("PutOffers", func() { var ( usersCollection db.Users restaurantsCollection *mocks.Restaurants handler router.HandlerWithParams sessionManager session.Manager facebookPost *mocks.Post params httprouter.Params restaurantID bson.ObjectId ) BeforeEach(func() { usersCollection = &mockUsers{} restaurantsCollection = new(mocks.Restaurants) restaurantID = bson.ObjectId("12letrrestid") params = httprouter.Params{httprouter.Param{ Key: "id", Value: objectID.Hex(), }, httprouter.Param{ Key: "restaurantID", Value: restaurantID.Hex(), }} restaurant := &model.Restaurant{ ID: restaurantID, Name: "Asian Chef", Region: "Tartu", Address: "an-address", Location: model.Location{ Type: "Point", Coordinates: []float64{26.7, 58.4}, }, Phone: "+372 5678 910", } restaurantsCollection.On("GetID", restaurantID).Return(restaurant, nil).Once() facebookPost = new(mocks.Post) facebookPost.On("Update", model.DateWithoutTime("2014-11-11"), mock.AnythingOfType("*model.User"), restaurant).Return(nil) }) JustBeforeEach(func() { handler = PutOffers(offersCollection, usersCollection, restaurantsCollection, sessionManager, imageStorage, facebookPost, regionsCollection) }) ExpectUserToBeLoggedIn(func() *router.HandlerError { return handler(responseRecorder, request, nil) }, func(mgr session.Manager, users db.Users) { sessionManager = mgr usersCollection = users }) Context("with no matching offer in the DB", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} }) It("should fail", func() { err := handler(responseRecorder, request, params) Expect(err.Code).To(Equal(http.StatusNotFound)) }) }) Context("with an ID that's not an object ID", func() { BeforeEach(func() { params = httprouter.Params{httprouter.Param{ Key: "id", Value: "not a proper bson.ObjectId", }, httprouter.Param{ Key: "restaurantID", Value: restaurantID.Hex(), }} sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} }) It("should fail", func() { err := handler(responseRecorder, request, params) Expect(err.Code).To(Equal(http.StatusBadRequest)) }) }) Context("with image not changed", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} requestMethod = "PUT" requestData = map[string]interface{}{ "title": "thetitle", "description": "a short description", "tags": []string{"tag1", "tag2"}, "price": 123.58, "from_time": "2014-11-11T09:00:00.000Z", "to_time": "2014-11-11T11:00:00.000Z", "image": map[string]interface{}{ "large": "images/a large image path", "thumbnail": "images/a thumbnail path", }, } currentOffer := &model.Offer{ CommonOfferFields: model.CommonOfferFields{ ID: objectID2, Title: "an offer title", FromTime: time.Date(2014, 11, 11, 9, 0, 0, 0, time.UTC), }, ImageChecksum: "image checksum", } offersCollection = &mockOffers{ mockOffer: currentOffer, imageIsUnchanged: true, } imageStorage.On("PathsFor", "").Return(&model.OfferImagePaths{}, nil) }) It("should succeed", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) }) Context("with session set, a matching user in DB and an offer in DB", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} requestMethod = "PUT" requestData = map[string]interface{}{ "title": "thetitle", "description": "a short description", "tags": []string{"tag1", "tag2"}, "price": 123.58, "from_time": "2014-11-11T09:00:00.000Z", "to_time": "2014-11-11T11:00:00.000Z", "image_data": "image data url", } currentOffer := &model.Offer{ CommonOfferFields: model.CommonOfferFields{ ID: objectID, Title: "an offer title", FromTime: time.Date(2014, 11, 11, 9, 0, 0, 0, time.UTC), }, ImageChecksum: "image checksum", } offersCollection = &mockOffers{ mockOffer: currentOffer, } }) It("should succeed", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) It("should return json", func() { handler(responseRecorder, request, params) contentTypes := responseRecorder.HeaderMap["Content-Type"] Expect(contentTypes).To(HaveLen(1)) Expect(contentTypes[0]).To(Equal("application/json")) }) It("should include the updated offer in the response", func() { handler(responseRecorder, request, params) var offer *model.OfferJSON json.Unmarshal(responseRecorder.Body.Bytes(), &offer) Expect(offer.ID).To(Equal(objectID)) Expect(offer.Image.Large).To(Equal("images/a large image path")) }) Context("with the image having already been stored once", func() { BeforeEach(func() { imageStorage.ExpectedCalls = make([]*mock.Call, 0) imageStorage.On("ChecksumDataURL", "image data url").Return("image checksum", nil) imageStorage.On("HasChecksum", "image checksum").Return(true, nil) imageStorage.On("StoreDataURL", "image data url").Return(errors.New("already stored")) imageStorage.On("PathsFor", "image checksum").Return(&model.OfferImagePaths{ Large: "images/a large image path", Thumbnail: "images/thumbnail", }, nil) }) It("succeeds", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) It("includes the image paths in the response", func() { handler(responseRecorder, request, params) var offer model.OfferJSON json.Unmarshal(responseRecorder.Body.Bytes(), &offer) Expect(offer.ID).To(Equal(objectID)) Expect(offer.Image.Large).To(Equal("images/a large image path")) Expect(offer.Image.Thumbnail).To(Equal("images/thumbnail")) }) }) Describe("updating group post", func() { AfterEach(func() { facebookPost.AssertExpectations(GinkgoT()) }) BeforeEach(func() { facebookPost = new(mocks.Post) facebookPost.On("Update", model.DateWithoutTime("2014-11-11"), mock.AnythingOfType("*model.User"), mock.AnythingOfType("*model.Restaurant")).Return(nil) }) It("succeeds", func() { err := handler(responseRecorder, request, params) Expect(err).NotTo(HaveOccurred()) }) Context("with offer date changed", func() { BeforeEach(func() { requestData = map[string]interface{}{ "title": "thetitle", "description": "a short description", "tags": []string{"tag1", "tag2"}, "price": 123.58, "from_time": "2014-11-15T09:00:00.000Z", "to_time": "2014-11-15T11:00:00.000Z", "image_data": "image data url", } date := model.DateWithoutTime("2014-11-15") facebookPost.On("Update", date, mock.AnythingOfType("*model.User"), mock.AnythingOfType("*model.Restaurant")).Return(nil) }) It("succeeds updating the group posts for both the previous and new day", func() { err := handler(responseRecorder, request, params) Expect(err).NotTo(HaveOccurred()) }) }) }) }) }) Describe("DeleteOffers", func() { var ( usersCollection db.Users handler router.HandlerWithParams sessionManager session.Manager restaurantsCollection *mocks.Restaurants facebookPost *mocks.Post params httprouter.Params restaurantID bson.ObjectId ) BeforeEach(func() { usersCollection = &mockUsers{} restaurantsCollection = new(mocks.Restaurants) restaurantID = bson.ObjectId("12letrrestid") params = httprouter.Params{httprouter.Param{ Key: "id", Value: objectID.Hex(), }, httprouter.Param{ Key: "restaurantID", Value: restaurantID.Hex(), }} restaurant := &model.Restaurant{ ID: restaurantID, Name: "Asian Chef", Region: "Tartu", Address: "an-address", Location: model.Location{ Type: "Point", Coordinates: []float64{26.7, 58.4}, }, Phone: "+372 5678 910", } restaurantsCollection.On("GetID", restaurantID).Return(restaurant, nil).Once() facebookPost = new(mocks.Post) facebookPost.On("Update", model.DateWithoutTime("2014-11-11"), mock.AnythingOfType("*model.User"), restaurant).Return(nil) }) JustBeforeEach(func() { handler = DeleteOffers(offersCollection, usersCollection, sessionManager, restaurantsCollection, facebookPost, regionsCollection) }) ExpectUserToBeLoggedIn(func() *router.HandlerError { return handler(responseRecorder, request, nil) }, func(mgr session.Manager, users db.Users) { sessionManager = mgr usersCollection = users }) Context("with no matching offer in the DB", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} }) It("should fail", func() { err := handler(responseRecorder, request, params) Expect(err.Code).To(Equal(http.StatusNotFound)) }) }) Context("with an ID that's not an object ID", func() { BeforeEach(func() { params = httprouter.Params{httprouter.Param{ Key: "id", Value: "not a proper bson.ObjectId", }, httprouter.Param{ Key: "restaurantID", Value: restaurantID.Hex(), }} sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} }) It("should fail", func() { err := handler(responseRecorder, request, params) Expect(err.Code).To(Equal(http.StatusBadRequest)) }) }) Context("with session set, a matching user in DB and an offer in DB", func() { BeforeEach(func() { sessionManager = &mockSessionManager{isSet: true, id: "correctSession"} requestMethod = "DELETE" currentOffer := &model.Offer{ CommonOfferFields: model.CommonOfferFields{ ID: objectID, Title: "an offer title", FromTime: time.Date(2014, 11, 11, 0, 0, 0, 0, time.UTC), }, ImageChecksum: "image checksum", } offersCollection = &mockOffers{ mockOffer: currentOffer, } }) It("should succeed", func() { err := handler(responseRecorder, request, params) Expect(err).To(BeNil()) }) }) }) }) var objectID = bson.NewObjectId() var objectID2 = bson.NewObjectId() type mockUsers struct { db.Users } func (m mockUsers) GetSessionID(session string) (*model.User, error) { if session != "correctSession" { return nil, mgo.ErrNotFound } user := &model.User{ ID: objectID, RestaurantIDs: []bson.ObjectId{"12letrrestid"}, Session: model.UserSession{ FacebookUserToken: oauth2.Token{ AccessToken: "usertoken", }, }, } return user, nil } type mockOffers struct { getForTimeRangeFunc func(time.Time, time.Time) ([]*model.Offer, error) mockOffer *model.Offer imageIsUnchanged bool db.Offers } func (m mockOffers) Insert(offers ...*model.Offer) ([]*model.Offer, error) { Expect(offers).To(HaveLen(1)) offer := offers[0] Expect(offer.Title).To(Equal("thetitle")) Expect(offer.Description).To(Equal("a short description")) Expect(offer.Tags).To(HaveLen(2)) Expect(offer.Tags).To(ContainElement("tag1")) Expect(offer.Tags).To(ContainElement("tag2")) Expect(offer.Price).To(BeNumerically("~", 123.58)) Expect(offer.Restaurant.ID).To(Equal(bson.ObjectId("12letrrestid"))) Expect(offer.Restaurant.Name).To(Equal("Asian Chef")) Expect(offer.Restaurant.Region).To(Equal("Tartu")) Expect(offer.Restaurant.Address).To(Equal("an-address")) Expect(offer.Restaurant.Location.Coordinates[0]).To(BeNumerically("~", 26.7)) Expect(offer.Restaurant.Location.Coordinates[1]).To(BeNumerically("~", 58.4)) Expect(offer.Restaurant.Phone).To(Equal("+372 5678 910")) Expect(offer.FromTime).To(Equal(time.Date(2014, 11, 11, 9, 0, 0, 0, time.UTC))) Expect(offer.ToTime).To(Equal(time.Date(2014, 11, 11, 11, 0, 0, 0, time.UTC))) Expect(offer.ImageChecksum).To(Equal("image checksum")) offers[0].ID = objectID return offers, nil } func (m mockOffers) UpdateID(id bson.ObjectId, offer *model.Offer) error { Expect(offer.Title).To(Equal("thetitle")) Expect(offer.Description).To(Equal("a short description")) Expect(offer.Tags).To(HaveLen(2)) Expect(offer.Tags).To(ContainElement("tag1")) Expect(offer.Tags).To(ContainElement("tag2")) Expect(offer.Price).To(BeNumerically("~", 123.58)) Expect(offer.Restaurant.ID).To(Equal(bson.ObjectId("12letrrestid"))) Expect(offer.Restaurant.Name).To(Equal("Asian Chef")) Expect(offer.Restaurant.Region).To(Equal("Tartu")) Expect(offer.Restaurant.Address).To(Equal("an-address")) Expect(offer.Restaurant.Location.Coordinates[0]).To(BeNumerically("~", 26.7)) Expect(offer.Restaurant.Location.Coordinates[1]).To(BeNumerically("~", 58.4)) Expect(offer.Restaurant.Phone).To(Equal("+372 5678 910")) if id == objectID { Expect(offer.ImageChecksum).To(Equal("image checksum")) } else if id == objectID2 { Expect(offer.ImageChecksum).To(Equal("")) } else { Fail("Unexpected id") } return nil } func (m mockOffers) RemoveID(id bson.ObjectId) error { Expect(id).To(Equal(objectID)) return nil } func (m mockOffers) GetID(id bson.ObjectId) (*model.Offer, error) { Expect(id).To(Equal(objectID)) if m.mockOffer == nil { return nil, errors.New("offer not found") } return m.mockOffer, nil } func (m mockRestaurants) GetID(id bson.ObjectId) (*model.Restaurant, error) { Expect(id).To(Equal(bson.ObjectId("12letrrestid"))) restaurant := &model.Restaurant{ ID: id, Name: "Asian Chef", Region: "Tartu", Address: "an-address", Location: model.Location{ Type: "Point", Coordinates: []float64{26.7, 58.4}, }, Phone: "+372 5678 910", } return restaurant, nil } func (m mockAuthenticator) APIConnection(tok *oauth2.Token) facebook.API { Expect(tok.AccessToken).To(Equal("usertoken")) return m.api } type mockAPI struct { shouldFail bool message string facebook.API } func (m mockAPI) PostDelete(pageAccessToken, postID string) error { if m.shouldFail { return errors.New("post to FB failed") } Expect(pageAccessToken).To(Equal("pagetoken")) Expect(postID).To(Equal("fb post id")) return nil } type mockRegions struct { getAllFunc func() db.RegionIter db.Regions } func (m mockRegions) GetName(name string) (*model.Region, error) { Expect(name).To(Equal("Tartu")) region := &model.Region{ Name: "Tartu", Location: "Europe/Tallinn", } return region, nil }
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handler import ( "github.com/gin-gonic/gin" "github.com/koderover/zadig/lib/microservice/aslan/middleware" ) type Router struct{} func (*Router) Inject(router *gin.RouterGroup) { proxy := router.Group("proxy") { proxy.GET("/config", GetProxyConfig) } router.Use(middleware.Auth()) // --------------------------------------------------------------------------------------- // 安装脚本管理接口 // --------------------------------------------------------------------------------------- install := router.Group("install") { install.POST("", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, CreateInstall) install.PUT("", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, UpdateInstall) install.GET("/:name/:version", GetInstall) install.GET("", ListInstalls) install.PUT("/delete", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, DeleteInstall) } // --------------------------------------------------------------------------------------- // 代理管理接口 // --------------------------------------------------------------------------------------- proxyManage := router.Group("proxyManage") { proxyManage.GET("", ListProxies) proxyManage.GET("/:id", GetProxy) proxyManage.POST("", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, CreateProxy) proxyManage.PUT("/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, UpdateProxy) proxyManage.DELETE("/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, DeleteProxy) proxyManage.POST("/connectionTest", TestConnection) } registry := router.Group("registry") { registry.GET("", ListRegistries) registry.GET("/namespaces", middleware.RequireSuperAdminAuth, ListRegistryNamespaces) registry.POST("/namespaces", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, CreateRegistryNamespace) registry.PUT("/namespaces/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, UpdateRegistryNamespace) registry.DELETE("/namespaces/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, DeleteRegistryNamespace) registry.GET("/release/repos", ListAllRepos) registry.POST("/images", ListImages) registry.GET("/images/repos/:name", ListRepoImages) } s3storage := router.Group("s3storage") { s3storage.GET("", ListS3Storage) s3storage.POST("", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, CreateS3Storage) s3storage.GET("/:id", GetS3Storage) s3storage.PUT("/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, UpdateS3Storage) s3storage.DELETE("/:id", middleware.RequireSuperAdminAuth, middleware.UpdateOperationLogStatus, DeleteS3Storage) } //系统清理缓存 cleanCache := router.Group("cleanCache") { cleanCache.POST("/oneClick", CleanImageCache) cleanCache.GET("/state", CleanCacheState) } // --------------------------------------------------------------------------------------- // Github管理接口 // --------------------------------------------------------------------------------------- github := router.Group("githubApp") { github.GET("", GetGithubApp) github.POST("", middleware.RequireSuperAdminAuth, CreateGithubApp) github.DELETE("/:id", middleware.RequireSuperAdminAuth, DeleteGithubApp) } // --------------------------------------------------------------------------------------- // jenkins集成接口以及jobs和buildWithParameters接口 // --------------------------------------------------------------------------------------- jenkins := router.Group("jenkins") { jenkins.POST("/integration", middleware.RequireSuperAdminAuth, CreateJenkinsIntegration) jenkins.GET("/integration", ListJenkinsIntegration) jenkins.PUT("/integration/:id", middleware.RequireSuperAdminAuth, UpdateJenkinsIntegration) jenkins.DELETE("/integration/:id", middleware.RequireSuperAdminAuth, DeleteJenkinsIntegration) jenkins.POST("/user/connection", middleware.RequireSuperAdminAuth, TestJenkinsConnection) jenkins.GET("/jobNames", middleware.RequireSuperAdminAuth, ListJobNames) jenkins.GET("/buildArgs/:jobName", middleware.RequireSuperAdminAuth, ListJobBuildArgs) } // --------------------------------------------------------------------------------------- // 自定义镜像管理接口 // --------------------------------------------------------------------------------------- basicImages := router.Group("basicImages") { basicImages.GET("", ListBasicImages) basicImages.GET("/:id", GetBasicImage) } }
// Copyright (C) 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compiler func (c *C) buildExec() { for _, api := range c.APIs { c.currentAPI = api for _, f := range api.Subroutines { c.subroutine(f) } for _, f := range api.Functions { c.command(f) } } }
package database // Client represents all the method available for the database type Client interface { Query(string) string } type database struct{} func (db *database) Query(string) string { return "Mock response from DB" } // New returns a new instance of connection to the database func New() Client { return &database{} }
package errutil // Func delays error string generation until the error string actually get displayed. It does this by implementing the go Error() interface for parameterless functions. // For example: funError:= errutil.Func(func() string { return "fun" }) type Func func() string // Error implements go's Error() interface for a function. func (e Func) Error() string { return e() }
package gui import ( "github.com/magicmonkey/go-streamdeck" "github.com/magicmonkey/go-streamdeck/buttons" ) type StopAppAction struct { StopFunc func() } func (action *StopAppAction) Pressed(btn streamdeck.Button) { mybtn := btn.(*buttons.TextButton) mybtn.SetText("BYE") action.StopFunc() }
package _091_Decode_Ways import ( "testing" "github.com/stretchr/testify/assert" ) func TestDecodeWays(t *testing.T) { ast := assert.New(t) ast.Equal(0, numDecodings("0")) ast.Equal(0, numDecodings("01")) ast.Equal(2, numDecodings("11")) ast.Equal(1, numDecodings("101")) ast.Equal(0, numDecodings("100")) ast.Equal(1, numDecodings("20")) ast.Equal(0, numDecodings("1324056")) ast.Equal(2, numDecodings("12")) ast.Equal(3, numDecodings("123")) ast.Equal(3, numDecodings("226")) ast.Equal(2, numDecodings("227")) ast.Equal(5, numDecodings("2226")) }
package main import ( . "../shared" "sync" ) func main() { Say(`All goroutines will wakeup and print "Finish waiting"`) var wg1 sync.WaitGroup var wg2 sync.WaitGroup wg1.Add(1) for i := 0; i < 3; i++ { wg2.Add(1) go func() { wg1.Wait() Say("Finish waiting") wg2.Done() }() } wg1.Done() wg2.Wait() }
package cluster import ( "errors" "fmt" "sync" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/tilt-dev/tilt/internal/controllers/apis/cluster" "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/pkg/apis" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" ) func NewConnectionManager() *ConnectionManager { return &ConnectionManager{} } type ConnectionManager struct { connections sync.Map } var _ cluster.ClientProvider = &ConnectionManager{} type connectionType string const ( connectionTypeK8s connectionType = "kubernetes" connectionTypeDocker connectionType = "docker" ) type connection struct { connType connectionType spec v1alpha1.ClusterSpec // createdAt is when the connection object was created. // If initError is empty, it's effectively the time we connected to the // cluster. Otherwise, it's when we _attempted_ to initialize the client // and is used for retry/backoff. createdAt time.Time // initError is populated when the client cannot be instantiated. // For example, if there's no ~/.kube/config, a Kubernetes client // can't be created. initError string dockerClient docker.Client k8sClient k8s.Client arch string serverVersion string registry *v1alpha1.RegistryHosting connStatus *v1alpha1.ClusterConnectionStatus } func (k *ConnectionManager) GetK8sClient(clusterKey types.NamespacedName) (k8s.Client, metav1.MicroTime, error) { conn, err := k.validConnOrError(clusterKey, connectionTypeK8s) if err != nil { return nil, metav1.MicroTime{}, err } return conn.k8sClient, apis.NewMicroTime(conn.createdAt), nil } // GetComposeDockerClient gets the Docker client for the instance that Docker Compose is deploying to. // // This is not currently exposed by the ClientCache interface as Docker Compose logic has not been migrated // to the apiserver. func (k *ConnectionManager) GetComposeDockerClient(key types.NamespacedName) (docker.Client, error) { conn, err := k.validConnOrError(key, connectionTypeDocker) if err != nil { return nil, err } return conn.dockerClient, nil } func (k *ConnectionManager) validConnOrError(key types.NamespacedName, connType connectionType) (connection, error) { conn, ok := k.load(key) if !ok { return connection{}, cluster.NotFoundError } if conn.connType != connType { return connection{}, fmt.Errorf("incorrect cluster client type: got %s, expected %s", conn.connType, connType) } if conn.initError != "" { return connection{}, errors.New(conn.initError) } // N.B. even if there is a statusError, the client is still returned, as it // might still be functional even though it's in a degraded state return conn, nil } func (k *ConnectionManager) store(key types.NamespacedName, conn connection) { k.connections.Store(key, conn) } func (k *ConnectionManager) load(key types.NamespacedName) (connection, bool) { v, ok := k.connections.Load(key) if !ok { return connection{}, false } return v.(connection), true } func (k *ConnectionManager) delete(key types.NamespacedName) { k.connections.LoadAndDelete(key) }
package bolt import ( "github.com/asdine/storm" "github.com/asdine/storm/q" fm "github.com/rjchee/dcac_filemanager" ) // ShareStore is a shareable links store. type ShareStore struct { DB *storm.DB } // Get gets a Share Link from an hash. func (s ShareStore) Get(hash string) (*fm.ShareLink, error) { var v fm.ShareLink err := s.DB.One("Hash", hash, &v) if err == storm.ErrNotFound { return nil, fm.ErrNotExist } return &v, err } // GetPermanent gets the permanent link from a path. func (s ShareStore) GetPermanent(path string) (*fm.ShareLink, error) { var v fm.ShareLink err := s.DB.Select(q.Eq("Path", path), q.Eq("Expires", false)).First(&v) if err == storm.ErrNotFound { return nil, fm.ErrNotExist } return &v, err } // GetByPath gets all the links for a specific path. func (s ShareStore) GetByPath(hash string) ([]*fm.ShareLink, error) { var v []*fm.ShareLink err := s.DB.Find("Path", hash, &v) if err == storm.ErrNotFound { return v, fm.ErrNotExist } return v, err } // Gets retrieves all the shareable links. func (s ShareStore) Gets() ([]*fm.ShareLink, error) { var v []*fm.ShareLink err := s.DB.All(&v) if err == storm.ErrNotFound { return v, fm.ErrNotExist } return v, err } // Save stores a Share Link on the database. func (s ShareStore) Save(l *fm.ShareLink) error { return s.DB.Save(l) } // Delete deletes a Share Link from the database. func (s ShareStore) Delete(hash string) error { return s.DB.DeleteStruct(&fm.ShareLink{Hash: hash}) }
package main import "github.com/oceanho/gw/contrib/cmder/generator" func main() { generator.Run() }
package main import ( "fmt" "html/template" "log" "os" ) var ( path = os.Getenv("NGINX_BASE") available = os.Getenv("NGINX_AVAILABLE") enabled = os.Getenv("NGINX_ENABLED") fullPath string aFile string eFile string output string ) // Nginx - Host structure type Nginx struct { HostName string Root string DomainName string } // IndexFile - Host structure type IndexFile struct { HostName string } // func restartNginx() bool { fmt.Println("restartNginx") runcmd("service nginx restart") runcmd("service php7.1-fpm restart") return true } func createProjectPath(hostName string) { var path = os.Getenv("NGINX_BASE") fmt.Println("createProjectPath") var webPath = fmt.Sprintf("%s/%s/%s", path, hostName, "web") if _, err := os.Stat(webPath); os.IsNotExist(err) { os.MkdirAll(webPath, 0755) } var devPath = fmt.Sprintf("%s/%s/%s", path, hostName, "dev") if _, err := os.Stat(devPath); os.IsNotExist(err) { os.MkdirAll(devPath, 0755) } var statePath = fmt.Sprintf("%s/%s/%s", path, hostName, "stage") if _, err := os.Stat(statePath); os.IsNotExist(err) { os.MkdirAll(statePath, 0755) } var backupPath = fmt.Sprintf("%s/%s/%s", path, hostName, "backup") if _, err := os.Stat(backupPath); os.IsNotExist(err) { os.MkdirAll(backupPath, 0755) } var logPath = fmt.Sprintf("%s/%s/%s", path, hostName, "log") if _, err := os.Stat(logPath); os.IsNotExist(err) { os.MkdirAll(logPath, 0755) } } func crateIndex(hostName string) { path = os.Getenv("NGINX_BASE") var tmplFile = "tmpl/dashboard.tmpl" t, err := template.ParseFiles(tmplFile) if err != nil { log.Print(err) } arrayEnv := [3]string{"web", "stage", "dev"} var indexFile = fmt.Sprintf("%s/%s/%s/index.html", path, hostName, "web") data := IndexFile{ HostName: hostName, } for index, element := range arrayEnv { fmt.Println(index) fmt.Println(element) var indexFile = fmt.Sprintf("%s/%s/%s/index.html", path, hostName, element) f, err := os.Create(indexFile) if err != nil { log.Println("create file: ", err) } err = t.Execute(f, data) if err != nil { log.Print("execute: ", err) } f.Close() } f, err := os.Create(indexFile) if err != nil { log.Println("create file: ", err) } err = t.Execute(f, data) if err != nil { log.Print("execute: ", err) } defer f.Close() } func crateNginxConfig(hostName string, tmpl string) bool { fmt.Println("crateNginxConfig") var ( path = os.Getenv("NGINX_BASE") available = os.Getenv("NGINX_AVAILABLE") enabled = os.Getenv("NGINX_ENABLED") ) var tmplFile = "tmpl/" + os.Getenv("NGINX_DEFAULT_CONFIG") data := Nginx{ HostName: hostName, Root: path + "/" + hostName, DomainName: os.Getenv("NGINX_BASE_DOMAIN"), } //@TODO add log rorater t, err := template.ParseFiles(tmplFile) if err != nil { log.Print(err) return false } var afile = available + "/" + hostName + ".conf" var efile = enabled + "/" + hostName + ".conf" f, err := os.Create(afile) if err != nil { log.Println("create file: ", err) return false } defer f.Close() runcmd("ln -s " + afile + " " + efile) err = t.Execute(f, data) if err != nil { log.Print("execute: ", err) return false } return true } func addHost(hostName string, tmpl string) bool { fmt.Println("addHost") // if tmpl == "d7" { // var tmplFile = "nginx-d7.tmpl" // } createProjectPath(hostName) crateNginxConfig(hostName, tmpl) crateIndex(hostName) //restartNginx() //tpl.Execute(f, data) //fmt.Printf("%s", output) return true }
// Package pubsub provides a library that implements the Publish and Subscribe // model. Subscriptions can subscribe to complex data patterns and data // will be published to all subscribers that fit the criteria. // // Each Subscription when subscribing will walk the underlying subscription // tree to find its place in the tree. The given path when subscribing is used // to analyze the Subscription and find the correct node to store it in. // // As data is published, the TreeTraverser analyzes the data to determine // what nodes the data belongs to. Data is written to multiple subscriptions. // This means that when data is published, the system can // traverse multiple paths for the data. package pubsub import ( "math/rand" "sync" "time" "github.com/apoydence/pubsub/internal/node" ) // PubSub uses the given SubscriptionEnroller to create the subscription // tree. It also uses the TreeTraverser to then write to the subscriber. All // of PubSub's methods safe to access concurrently. PubSub should be // constructed with New(). type PubSub struct { mu rlocker n *node.Node sa ShardingAlgorithm } // New constructs a new PubSub. func New(opts ...PubSubOption) *PubSub { p := &PubSub{ n: node.New(), sa: NewRandSharding(), mu: &sync.RWMutex{}, } for _, o := range opts { o.configure(p) } return p } // PubSubOption is used to configure a PubSub. type PubSubOption interface { configure(*PubSub) } type pubsubConfigFunc func(*PubSub) func (f pubsubConfigFunc) configure(p *PubSub) { f(p) } // WithNoMutex configures a PubSub that does not have any internal mutexes. // This is useful if more complex or custom locking is required. For example, // if a subscription needs to subscribe while being published to. func WithNoMutex() PubSubOption { return pubsubConfigFunc(func(p *PubSub) { p.mu = nopLock{} }) } // Subscription is a subscription that will have corresponding data written // to it. type Subscription interface { Write(data interface{}) } // SubscriptionFunc is an adapter to allow ordinary functions to be a // Subscription. type SubscriptionFunc func(data interface{}) // Write implements Subscription. func (f SubscriptionFunc) Write(data interface{}) { f(data) } // ShardingAlgorithm is used to data across subscriptions with the same // shardID and path. type ShardingAlgorithm interface { // Write is invoked with the given data if publishing traverses to a node // that has multiple subscriptions with the same shardID. Write(data interface{}, subscriptions []Subscription) } // ShardingAlgorithmFunc is an adapter to allow ordinary functions to be a // ShardingAlgorithm. type ShardingAlgorithmFunc func(data interface{}, subscriptions []Subscription) // Write implements ShardingAlgorithmFunc func (f ShardingAlgorithmFunc) Write(data interface{}, subscriptions []Subscription) { f(data, subscriptions) } // RandSharding implements ShardingAlgorithm. It picks a random subscription // to write to. type RandSharding struct { *rand.Rand } // NewRandSharding constructs a new RandSharding. func NewRandSharding() RandSharding { return RandSharding{rand.New(rand.NewSource(time.Now().UnixNano()))} } // Write implements ShardingAlgorithm. func (r RandSharding) Write(data interface{}, subscriptions []Subscription) { idx := r.Rand.Intn(len(subscriptions)) subscriptions[idx].Write(data) } // Unsubscriber is returned by Subscribe. It should be invoked to // remove a subscription from the PubSub. type Unsubscriber func() // SubscribeOption is used to configure a subscription while subscribing. type SubscribeOption interface { configure(*subscribeConfig) } // WithShardID configures a subscription to have a shardID. Subscriptions with // a shardID are sharded to any subscriptions with the same shardID and path. // Defaults to an empty shardID (meaning it does not shard). func WithShardID(shardID string) SubscribeOption { return subscribeConfigFunc(func(c *subscribeConfig) { c.shardID = shardID }) } // WithPath configures a subscription to reside at a path. The path determines // what data the subscription is interested in. This value should be // correspond to what the publishing TreeTraverser yields. // It defaults to nil (meaning it gets everything). func WithPath(path []string) SubscribeOption { return subscribeConfigFunc(func(c *subscribeConfig) { c.path = path }) } type subscribeConfig struct { shardID string path []string } type subscribeConfigFunc func(*subscribeConfig) func (f subscribeConfigFunc) configure(c *subscribeConfig) { f(c) } // Subscribe will add a subscription to the PubSub. It returns a function // that can be used to unsubscribe. Options can be provided to configure // the subscription and its interactions with published data. func (s *PubSub) Subscribe(sub Subscription, opts ...SubscribeOption) Unsubscriber { c := subscribeConfig{} for _, o := range opts { o.configure(&c) } s.mu.Lock() defer s.mu.Unlock() n := s.n for _, p := range c.path { n = n.AddChild(p) } id := n.AddSubscription(sub, c.shardID) return func() { s.mu.Lock() defer s.mu.Unlock() s.cleanupSubscriptionTree(s.n, id, c.path) } } func (s *PubSub) cleanupSubscriptionTree(n *node.Node, id int64, p []string) { if len(p) == 0 { n.DeleteSubscription(id) return } child := n.FetchChild(p[0]) s.cleanupSubscriptionTree(child, id, p[1:]) if child.ChildLen() == 0 && child.SubscriptionLen() == 0 { n.DeleteChild(p[0]) } } // TreeTraverser publishes data to the correct subscriptions. Each // data point can be published to several subscriptions. As the data traverses // the given paths, it will write to any subscribers that are assigned there. // Data can go down multiple paths (i.e., len(paths) > 1). // // Traversing a path ends when the return len(paths) == 0. If // len(paths) > 1, then each path will be traversed. type TreeTraverser interface { // Traverse is used to traverse the subscription tree. Traverse(data interface{}, currentPath []string) Paths } // TreeTraverserFunc is an adapter to allow ordinary functions to be a // TreeTraverser. type TreeTraverserFunc func(data interface{}, currentPath []string) Paths // Traverse implements TreeTraverser. func (f TreeTraverserFunc) Traverse(data interface{}, currentPath []string) Paths { return f(data, currentPath) } // LinearTreeTraverser implements TreeTraverser on behalf of a slice of paths. // If the data does not traverse multiple paths, then this works well. type LinearTreeTraverser []string // Traverse implements TreeTraverser. func (a LinearTreeTraverser) Traverse(data interface{}, currentPath []string) Paths { return a.buildTreeTraverser(a)(data, currentPath) } func (a LinearTreeTraverser) buildTreeTraverser(remainingPath []string) TreeTraverserFunc { return func(data interface{}, currentPath []string) Paths { if len(remainingPath) == 0 { return FlatPaths(nil) } return NewPathsWithTraverser(FlatPaths([]string{remainingPath[0]}), a.buildTreeTraverser(remainingPath[1:])) } } // Paths is returned by a TreeTraverser. It describes how the data is // both assigned and how to continue to analyze it. type Paths interface { // At will be called with idx ranging from [0, n] where n is the number // of valid paths. This means that the Paths needs to be prepared // for an idx that is greater than it has valid data for. // // If nextTraverser is nil, then the previous TreeTraverser is used. At(idx int) (path string, nextTraverser TreeTraverser, ok bool) } // FlatPaths implements Paths for a slice of paths. It // returns nil for all nextTraverser meaning to use the given TreeTraverser. type FlatPaths []string // At implements Paths. func (p FlatPaths) At(idx int) (string, TreeTraverser, bool) { if idx >= len(p) { return "", nil, false } return p[idx], nil, true } // PathsWithTraverser implements Paths for both a slice of paths and // a single TreeTraverser. Each path will return the given TreeTraverser. // It should be constructed with NewPathsWithTraverser(). type PathsWithTraverser struct { a TreeTraverser p []string } // NewPathsWithTraverser creates a new PathsWithTraverser. func NewPathsWithTraverser(paths []string, a TreeTraverser) PathsWithTraverser { return PathsWithTraverser{a: a, p: paths} } // At implements Paths. func (a PathsWithTraverser) At(idx int) (string, TreeTraverser, bool) { if idx >= len(a.p) { return "", nil, false } return a.p[idx], a.a, true } // PathAndTraverser is a path and traverser pair. type PathAndTraverser struct { Path string Traverser TreeTraverser } // PathsWithTraverser implement Paths and allow a TreeTraverser to have // multiple paths with multiple traversers. type PathAndTraversers []PathAndTraverser // At implements Paths. func (t PathAndTraversers) At(idx int) (string, TreeTraverser, bool) { if idx >= len(t) { return "", nil, false } return t[idx].Path, t[idx].Traverser, true } // Publish writes data using the TreeTraverser to the interested subscriptions. func (s *PubSub) Publish(d interface{}, a TreeTraverser) { s.mu.RLock() defer s.mu.RUnlock() s.traversePublish(d, d, a, s.n, nil, make(map[*node.Node]bool)) } func (s *PubSub) traversePublish(d, next interface{}, a TreeTraverser, n *node.Node, l []string, history map[*node.Node]bool) { if n == nil { return } if _, ok := history[n]; !ok { n.ForEachSubscription(func(shardID string, ss []node.SubscriptionEnvelope) { if shardID == "" { for _, x := range ss { x.Subscription.Write(d) } return } var subs []Subscription for _, x := range ss { subs = append(subs, x) } s.sa.Write(d, subs) }) history[n] = true } paths := a.Traverse(next, l) for i := 0; ; i++ { child, nextA, ok := paths.At(i) if !ok { return } if nextA == nil { nextA = a } c := n.FetchChild(child) s.traversePublish(d, next, nextA, c, append(l, child), history) } } // rlocker is used to hold either a real sync.RWMutex or a nop lock. // This is used to turn off locking. type rlocker interface { sync.Locker RLock() RUnlock() } // nopLock is used to turn off locking for the PubSub. It implements the // rlocker interface. type nopLock struct{} // Lock implements rlocker. func (l nopLock) Lock() {} // Unlock implements rlocker. func (l nopLock) Unlock() {} // RLock implements rlocker. func (l nopLock) RLock() {} // RUnlock implements rlocker. func (l nopLock) RUnlock() {}
package main import ( "flag" "fmt" "log" "net/http" "os" "book/ch03/mandelbrot" ) var ( addr = flag.String("address", "", "listening address") port = flag.Int("port", 8002, "listening port") ) func main() { flag.Parse() if *addr == "" { mandelbrot.Draw(os.Stdout) return } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { mandelbrot.Draw(w) }) url := fmt.Sprintf("%s:%d", *addr, *port) log.Fatal(http.ListenAndServe(url, nil)) }
package models import ( "bytes" "errors" "fmt" "github.com/msutter/go-pulp/pulp" "net/url" "strings" ) type Node struct { Fqdn string ApiUser string ApiPasswd string Tags []string Parent *Node Children []*Node Repositories []Repository SyncPath []string Depth int TreePosition int Errors []error RepositoryError map[string]error } // Matches the given fqdn? func (n *Node) MatchFqdn(fqdn string) bool { if n.Fqdn == fqdn { return true } else { return false } } // Matches the given fqdns? func (n *Node) MatchFqdns(fqdns []string) bool { ret := false for _, fqdn := range fqdns { if n.MatchFqdn(fqdn) { ret = true } } return ret } // Contains the given tag? func (n *Node) ContainsTag(tag string) bool { ret := false for _, nodeTag := range n.Tags { if nodeTag == tag { ret = true break } } return ret } // Contains the given tags? func (n *Node) ContainsTags(tags []string) bool { ret := false for _, tag := range tags { if n.ContainsTag(tag) { ret = true } } return ret } // Is a Leaf? func (n *Node) IsLeaf() bool { if len(n.Children) == 0 { return true } else { return false } } // Is a Root? func (n *Node) IsRoot() bool { if n.Parent == nil { return true } else { return false } } func (n *Node) AncestorTreeWalker(f func(*Node)) { parent := n.Parent if parent != nil { f(parent) // resurse parent.AncestorTreeWalker(f) } } // Is Fqdn a Ancestor? func (n *Node) FqdnIsAncestor(ancestorFqdn string) bool { returnValue := false n.AncestorTreeWalker(func(ancestor *Node) { if ancestor.Fqdn == ancestorFqdn { returnValue = true } }) return returnValue } // Are Fqdns a Ancestor? func (n *Node) FqdnsAreAncestor(ancestorFqdns []string) bool { returnValue := false for _, ancestorFqdn := range ancestorFqdns { if n.FqdnIsAncestor(ancestorFqdn) { returnValue = true } } return returnValue } // Get Ancestors func (n *Node) Ancestors() (ancestors []*Node) { n.AncestorTreeWalker(func(ancestor *Node) { ancestors = append(ancestors, ancestor) }) return } // Get Ancestors by Depth id func (n *Node) GetAncestorByDepth(depth int) (depthAncestor *Node) { n.AncestorTreeWalker(func(ancestor *Node) { if ancestor.Depth == depth { depthAncestor = ancestor } }) return } // Has Error func (n *Node) HasError() bool { returnValue := false if (len(n.Errors) > 0) || len(n.RepositoryError) > 0 { returnValue = true } return returnValue } // Ancestor has Error func (n *Node) AncestorsHaveError() bool { returnValue := false for _, ancestor := range n.Ancestors() { if ancestor.Errors != nil { returnValue = true } } return returnValue } // Ancestor has Error func (n *Node) AncestorsHaveRepositoryError(repository string) bool { returnValue := false for _, ancestor := range n.Ancestors() { if ancestor.RepositoryError[repository] != nil { returnValue = true } } return returnValue } func (n *Node) AncestorFqdnsWithErrors() (ancestorFqdns []string) { for _, ancestor := range n.AncestorsWithErrors() { ancestorFqdns = append(ancestorFqdns, ancestor.Fqdn) } return } func (n *Node) AncestorFqdnsWithRepositoryError(repository string) (ancestorFqdns []string) { for _, ancestor := range n.AncestorsWithRepositoryError(repository) { ancestorFqdns = append(ancestorFqdns, ancestor.Fqdn) } return } // Ancestor has Error func (n *Node) AncestorsWithErrors() (ancestors []*Node) { n.AncestorTreeWalker(func(ancestor *Node) { if ancestor.Errors != nil { ancestors = append(ancestors, ancestor) } }) return } // Ancestor has Error func (n *Node) AncestorsWithRepositoryError(repository string) (ancestors []*Node) { n.AncestorTreeWalker(func(ancestor *Node) { if ancestor.RepositoryError[repository] != nil { ancestors = append(ancestors, ancestor) } }) return } func (n *Node) ChildTreeWalker(f func(*Node)) { for _, node := range n.Children { f(node) // resurse node.ChildTreeWalker(f) } } func (n *Node) IslastBrother() bool { if n.lastBrother() == n { return true } else { return false } } func (n *Node) BrotherIndex() (iret int) { if !n.IsRoot() { for i, child := range n.Parent.Children { if n == child { iret = i } } } return iret } func (n *Node) lastBrother() (lastBrother *Node) { brothers := n.Parent.Children lastBrother = brothers[len(brothers)-1] return } // Is Fqdn a Descendant? func (n *Node) FqdnIsDescendant(childFqdn string) bool { returnValue := false n.ChildTreeWalker(func(child *Node) { if child.MatchFqdn(childFqdn) { returnValue = true } }) return returnValue } // Are Fqdns a Descendant? func (n *Node) FqdnsAreDescendant(childFqdns []string) bool { returnValue := false n.ChildTreeWalker(func(child *Node) { if child.MatchFqdns(childFqdns) { returnValue = true } }) return returnValue } // Is Fqdn a Descendant? func (n *Node) TagsInDescendant(childTags []string) bool { returnValue := false n.ChildTreeWalker(func(child *Node) { if child.ContainsTags(childTags) { returnValue = true } }) return returnValue } func (n *Node) Sync(repositories []string, progressChannel chan SyncProgress) (err error) { client, err := PulpApiClient(n) err = PulpApiSyncRepo(n, client, repositories, progressChannel) if err != nil { return err } return } func (n *Node) Show() (err error) { fmt.Println(n.GetTreeRaw(n.Fqdn)) return nil } func (n *Node) CheckRepositories(repositories []string) (err error) { if !n.IsRoot() { fmt.Printf("checking repositories on node %v\n", n.Fqdn) for _, targetRepository := range repositories { fmt.Printf(" - '%v': ", targetRepository) if !n.HasRepository(targetRepository) { fmt.Printf("error\n") fmt.Printf("\n") errorMsg := fmt.Sprintf("Could not find repository '%v' on node %v", targetRepository, n.Fqdn) err = errors.New(errorMsg) n.RepositoryError[targetRepository] = err return err } else { fmt.Printf("pass\n") } } fmt.Printf("\n") } return } func (n *Node) CheckRepositoryFeeds() (err error) { if !n.IsRoot() { for _, currentRepository := range n.Repositories { u, err := url.Parse(currentRepository.Feed) // check that the feed is pointing on the parent node if u.Host != n.Parent.Fqdn { errorMsg := fmt.Sprintf("Repository '%v' has invalid feed '%v'. Parent is '%v'", currentRepository.Name, currentRepository.Feed, n.Parent.Fqdn) err = errors.New(errorMsg) n.RepositoryError[currentRepository.Name] = err return err } // check that the feed is pointing on an existing repository on the parent node pathSlice := strings.Split(u.Path, "/") repoInPath := pathSlice[len(pathSlice)-2] if !n.Parent.HasRepository(repoInPath) { errorMsg := fmt.Sprintf("Repository '%v' does not exist on parent node '%v'", repoInPath, n.Parent.Fqdn) err = errors.New(errorMsg) n.RepositoryError[currentRepository.Name] = err return err } } } return } func (n *Node) HasRepository(repository string) bool { for _, currentRepository := range n.Repositories { if currentRepository.Name == repository { return true } } return false } func (n *Node) UpdateRepositories() (err error) { client, err := PulpApiClient(n) if err != nil { n.Errors = append(n.Errors, err) return err } var remoteRepos []*pulp.Repository remoteRepos, err = PulpApiGetRepos(n, client) if err != nil { n.Errors = append(n.Errors, err) return err } for _, remoteRepo := range remoteRepos { repo := Repository{ Name: remoteRepo.Id, Feed: remoteRepo.Importers[0].ImporterConfig.Feed, } n.Repositories = append(n.Repositories, repo) } return } func (n *Node) GetTreeRaw(msg string) (treeRaw string) { var buffer bytes.Buffer if n.Depth == 0 { buffer.WriteString(fmt.Sprintf("\n├─ %v", msg)) } else { buffer.WriteString(fmt.Sprintf(" ")) } for i := 1; i < n.Depth; i++ { if n.Depth != 0 { // is my ancestor at Depth x the last brother depthAncestor := n.GetAncestorByDepth(i) if depthAncestor.IslastBrother() { buffer.WriteString(fmt.Sprintf(" ")) } else { buffer.WriteString(fmt.Sprintf("│ ")) } } else { buffer.WriteString(fmt.Sprintf(" ")) } } if n.Depth != 0 { if n.IslastBrother() { buffer.WriteString(fmt.Sprintf("└─ %v", msg)) } else { buffer.WriteString(fmt.Sprintf("├─ %v", msg)) } } return buffer.String() }
package model import ( "mall/lib/address" xtime "mall/lib/time" "gopkg.in/mgo.v2/bson" ) type EweiShopGroupsActivity struct { ID bson.ObjectId `bson:"_id,omitempty" gorm:"-" json:"-"` Id string `bson:"id,omitempty" json:"id"` Uniacid int `bson:"uniacid,omitempty" json:"uniacid"` Uid int `bson:"uid,omitempty" json:"uid"` Title string `bson:"title,omitempty" json:"title"` Isbullet int `bson:"isbullet,omitempty" json:"isbullet"` // 是否显示弹幕 Starttime xtime.Time `bson:"starttime,omitempty" json:"starttime"` Endtime xtime.Time `bson:"endtime,omitempty" json:"endtime"` ThumbUrl string `bson:"thumb_url,omitempty" json:"thumb_url"` ShareTitle string `bson:"share_title,omitempty" json:"share_title"` ShareImg string `bson:"share_img,omitempty" json:"share_img"` ShareDesc string `bson:"share_desc,omitempty" gorm:"type:TEXT" json:"share_desc"` Content string `bson:"content,omitempty" gorm:"type:TEXT" json:"content"` Visit int `bson:"visit,omitempty" json:"visit"` // 访问次数 Sales int `bson:"sales,omitempty" json:"sales"` // 购买数量 Shares int `bson:"shares,omitempty" json:"shares"` Purchaselimit int `bson:"purchase_limit,omitempty" json:"purchaselimit"` Price float64 `bson:"price,omitempty" json:"price"` // 团购原价 Singleprice float64 `bson:"singleprice,omitempty" json:"singleprice"` // 单买价格 Groupnum int `bson:"groupnum,omitempty" json:"groupnum"` // 开团人数 ServerPhone string `bson:"server_phone,omitempty" json:"service_phone"` // 客服电话 ServerName string `bson:"server_name,omitempty" json:"server_name"` // 客服姓名 Status int `bson:"status,omitempty" json:"status"` // 状态 Createdtime xtime.Time `bson:"createdtime,omitempty" json:"createtime"` // 创建时间 GoodsListArray []int `bson:"goods_list_array,omitempty" gorm:"-" json:"goods_list"` VerifyCode string `bson:"verify_code,omitempty" json:"verify_code"` // 核销密码 Isteam int `bson:"isteam,omitempty" json:"isteam"` Teamset []EweiShopGroupsActivityTeamset `bson:"teamset,omitempty" json:"teamset" grom:"ForeignKey:ActivityId"` Address address.Address `bson:"address,omitempty" json:"address,omitempty"` } type EweiShopGroupsActivityTeamset struct { Name string `bson:"name" json:"name"` TeamNum int `bson:"team_num" json:"teamnum"` // 成团人数 TeamPrice float64 `bson:"team_price" json:"team_price"` // 团购价格 } type ActivityQuery struct { Id string `json:"id" form:"id"` Uniacid int `json:"uniacid" form:"uniacid"` Uid int `json:"uid" form:"uid"` Uids []int `json:"uids" form:"uids"` Status int `json:"status" form:"status"` Statuses []int `json:"statuses" form:"statuses"` Keyword string `json:"keyword" form:"keyword"` Page int `json:"page" form:"page"` PageSize int `json:"page_size" form:"page_size"` }
package data import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kiali/kiali/kubernetes" ) func CreateEmptyMeshPolicy(name string, peers []interface{}) kubernetes.IstioObject { return (&kubernetes.GenericIstioObject{ ObjectMeta: meta_v1.ObjectMeta{ Name: name, ClusterName: "svc.cluster.local", }, Spec: map[string]interface{}{ "peers": peers, }, }).DeepCopyIstioObject() } func CreateMTLSPeers(mode string) []interface{} { return []interface{}{ map[string]interface{}{ "mtls": map[string]interface{}{ "mode": mode, }, }, } } func AddTargetsToMeshPolicy(targets []interface{}, mp kubernetes.IstioObject) kubernetes.IstioObject { mp.GetSpec()["targets"] = targets return mp }
package edocuments type DocumentFetchQuery struct { Type string `json:"type"` Series string `json:"series"` Number string `json:"number"` } type Document struct { Type string `json:"type"` // FT ; FR ; FS ; DI ; Serie string `json:"serie"` Number string `json:"number"` UserId int `json:"userId"` Observations *string `json:"observations"` ClientId string `json:"clientId"` ClientName string `json:"clientName"` ClientAddress string `json:"clientAddress"` ClientVat string `json:"clientVat"` ClientPostalCode string `json:"clientPostalCode"` ClientCity string `json:"clientCity"` UniqueID string `json:"uniqueID"` Total float32 `json:"total"` SubTotal float32 `json:"subTotal"` Rows []struct { ItemKeyId string `json:"itemKeyId"` ItemDescription string `json:"itemDescription"` Quantity string `json:"quantity"` UnitPrice float32 `json:"unitPrice"` IncludesTaxes bool `json:"includesTaxes"` TaxType string `json:"TaxType"` // ISE ; NOR ; RED ; INT TaxValue float32 `json:"taxValue"` } } type Item struct { Ref string `json:"ref"` Stock float32 `json:"stock"` Description string `json:"description"` Barcode *string `json:"barcode"` AlternativeRef string `json:"alternativeRef"` ControlStock bool `json:"controlStock"` Attributes []ItemAttribute `json:"attributes"` AskQuantity bool `json:"askQuantity"` } type ItemAttribute struct { AtrributeId int `json:"atrributeId"` AttributeValueId int `json:"attributeValueId"` Name string `json:"name"` AttributeName string `json:"attributeName"` }
package node import ( "archive/tar" "context" "fmt" "io" "io/ioutil" "path" "strconv" "strings" "time" "github.com/tinyzimmer/k3p/pkg/log" "github.com/tinyzimmer/k3p/pkg/types" dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" ) // NewDocker initializes a new node using a local container for the instance. func NewDocker(opts *types.DockerNodeOptions) (types.Node, error) { // Get a docker client cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return nil, err } // Check if the node exists already, if it does everything else is probably done containers, err := cli.ContainerList(context.TODO(), dockertypes.ContainerListOptions{ Filters: opts.GetFilters(), }) if err != nil { return nil, err } // If we have a container, return it if len(containers) == 1 { return &Docker{ cli: cli, containerID: containers[0].ID, opts: opts, }, nil } // Ensure a docker network for the cluster if err := ensureClusterNetwork(cli, opts.ClusterOptions); err != nil { return nil, err } // Ensure the docker image with the given k3s version is ready for when we start if err := pullIfNotPresent(cli, opts.GetK3sImage()); err != nil { return nil, err } // If a load balancer, immediately return a non-initialized container for Execute // to create/start if opts.NodeRole == types.K3sRoleLoadBalancer { return &Docker{cli: cli, opts: opts}, nil } // Create a volume for k3s assets varVolCreateBody := volume.VolumeCreateBody{ Driver: "local", DriverOpts: map[string]string{}, Labels: opts.GetLabels(), Name: opts.GetNodeName(), } etcVolCreateBody := volume.VolumeCreateBody{ Driver: "local", DriverOpts: map[string]string{}, Labels: opts.GetLabels(), Name: fmt.Sprintf("%s-etc", opts.GetNodeName()), } volNames := make([]string, 2) for i, volCreateBody := range []volume.VolumeCreateBody{varVolCreateBody, etcVolCreateBody} { log.Info("Creating docker volume", volCreateBody.Name) log.Debugf("VolumeCreateBody: %+v\n", volCreateBody) vol, err := cli.VolumeCreate(context.TODO(), volCreateBody) if err != nil { return nil, err } volNames[i] = vol.Name } // We at first just use a busybox container with a persistent volume to serve // GetFile and WriteFile requests. K3s is not actually launched until Execute // is called. if err := pullIfNotPresent(cli, "busybox:latest"); err != nil { return nil, err } hostConfig := &container.HostConfig{ Binds: []string{ fmt.Sprintf("%s:%s", volNames[0], types.K3sRootConfigDir), fmt.Sprintf("%s:%s", volNames[1], types.K3sEtcDir), }, } busyboxConfig := &container.Config{ Image: "busybox:latest", Volumes: map[string]struct{}{ types.K3sRootConfigDir: struct{}{}, types.K3sEtcDir: struct{}{}, }, Labels: opts.GetComponentLabels("busybox"), Cmd: strslice.StrSlice([]string{"tail", "-f", "/dev/null"}), } log.Debugf("Busybox container config: %+v\n", busyboxConfig) log.Debugf("Busybox host config: %+v\n", hostConfig) log.Debug("Creating busybox container") container, err := cli.ContainerCreate(context.TODO(), busyboxConfig, hostConfig, nil, opts.GetNodeName()) if err != nil { return nil, err } log.Debugf("Starting busybox container %q\n", container.ID) if err := cli.ContainerStart(context.TODO(), container.ID, dockertypes.ContainerStartOptions{}); err != nil { return nil, err } return &Docker{ cli: cli, containerID: container.ID, opts: opts, }, nil } // Docker represents a node backed by a docker container. It is exported for the extra // methods it provides. type Docker struct { cli client.APIClient containerID string opts *types.DockerNodeOptions } // GetType implements the node interface. func (d *Docker) GetType() types.NodeType { return types.NodeDocker } // MkdirAll implements the node interface and will create a directory inside the current // container. func (d *Docker) MkdirAll(dir string) error { execCfg := dockertypes.ExecConfig{ User: "root", Cmd: []string{"mkdir", "-p", dir}, Detach: true, } log.Debugf("Creating exec process in container %q: %+v\n", d.containerID, execCfg) id, err := d.cli.ContainerExecCreate(context.TODO(), d.containerID, execCfg) if err != nil { return err } if err := d.cli.ContainerExecStart(context.TODO(), id.ID, dockertypes.ExecStartCheck{}); err != nil { return err } for { status, err := d.cli.ContainerExecInspect(context.TODO(), id.ID) if err != nil { return err } if status.Pid == 0 { // process hasn't started yet continue } if status.Running { // process is still running continue } if status.ExitCode == 0 { // process completed return nil } // process exited with error return fmt.Errorf("process exited with status code %d", status.ExitCode) } } // GetFile implements the node interface and will retrieve a file from the container. func (d *Docker) GetFile(path string) (io.ReadCloser, error) { rdr, _, err := d.cli.CopyFromContainer(context.TODO(), d.containerID, path) if err != nil { return nil, err } tr := tar.NewReader(rdr) header, err := tr.Next() if err != nil { return nil, err } if header.Typeflag != tar.TypeReg { if header.Typeflag == tar.TypeSymlink { log.Debugf("Following symlink to %q\n", header.Linkname) return d.GetFile(header.Linkname) } log.Debugf("Invalid header: %+v\n", *header) return nil, fmt.Errorf("%q is not a regular file", path) } return ioutil.NopCloser(tr), nil } // WriteFile implements the node interface, and will write a file to the container. For docker nodes // it only accepts files rooted in /var/lib/rancher/k3s. func (d *Docker) WriteFile(rdr io.ReadCloser, destination string, mode string, size int64) error { defer rdr.Close() // stupid hack to only care about actual runtime files if !strings.HasPrefix(destination, types.K3sRootConfigDir) && !strings.HasPrefix(destination, types.K3sEtcDir) { return nil } if err := d.MkdirAll(path.Dir(destination)); err != nil { return err } // Make a pipe for sending the contents to the container r, w := io.Pipe() // Kick off the copy in a goroutine errors := make(chan error) log.Debugf("Spawning copy process to %q in container %q\n", path.Dir(destination), d.containerID) go func() { errors <- d.cli.CopyToContainer(context.TODO(), d.containerID, path.Dir(destination), r, dockertypes.CopyToContainerOptions{}) }() // Write tar data to the pipe tw := tar.NewWriter(w) modeInt, err := strconv.ParseInt(mode, 0, 16) if err != nil { return err } now := time.Now() header := &tar.Header{ Typeflag: tar.TypeReg, Name: path.Base(destination), Size: size, Mode: modeInt, Uid: 0, Gid: 0, Uname: "root", Gname: "root", ModTime: now, AccessTime: now, ChangeTime: now, } log.Debugf("Generated tar header for docker copy: %+v\n", header) if err := tw.WriteHeader(header); err != nil { return err } log.Debugf("Copying tar buffer to container %q at %q\n", d.containerID, destination) if _, err := io.Copy(tw, rdr); err != nil { log.Error("Error copying contents to buffer:", err) } if err := tw.Close(); err != nil { return err } // Send an EOF to the docker copy if err := w.Close(); err != nil { return err } return <-errors } // Execute implements the node interface and starts the K3s container. It treats the provided command // as arguments to run K3s with. It's because of this implementation that this method should probably be // renamed. func (d *Docker) Execute(opts *types.ExecuteOptions) error { if opts.Command == "k3s-uninstall.sh" { // TODO: type cast this somewhere return d.RemoveAll() } log.Info("Starting k3s docker node", d.opts.GetNodeName()) // Assume being called to start K3s if a server or agent, first remove busybox container if d.opts.NodeRole != types.K3sRoleLoadBalancer { log.Debug("Removing busybox bootstrap node for", d.opts.GetNodeName()) if err := d.cli.ContainerRemove(context.TODO(), d.containerID, dockertypes.ContainerRemoveOptions{ Force: true, }); err != nil { return err } } // Build the k3s container according to the opts containerConfig, hostConfig, networkConfig, err := translateOptsToConfigs(d.opts, opts) if err != nil { return err } log.Debugf("K3s container config: %+v\n", containerConfig) log.Debugf("K3s host config: %+v\n", hostConfig) log.Debugf("K3s network config: %+v\n", networkConfig) container, err := d.cli.ContainerCreate(context.TODO(), containerConfig, hostConfig, networkConfig, d.opts.GetNodeName()) if err != nil { return err } log.Debugf("Starting K3s container %q\n", container.ID) if err := d.cli.ContainerStart(context.TODO(), container.ID, dockertypes.ContainerStartOptions{}); err != nil { return err } d.containerID = container.ID return nil } // GetK3sAddress implements the node interface and returns this node's name. It is assumed // the interested caller is interacting with a node on the same network. func (d *Docker) GetK3sAddress() (string, error) { return d.opts.GetNodeName(), nil } // Close implements the node interface and closes the connection to the docker daemon func (d *Docker) Close() error { return d.cli.Close() } // RemoveAll is a special method implemented by the Docker object. It cleans up the container // and all its resources. func (d *Docker) RemoveAll() error { if addr, err := d.GetK3sAddress(); err == nil { // it's always nil for docker log.Info("Removing docker container and volumes for", addr) } if err := d.cli.ContainerRemove(context.TODO(), d.containerID, dockertypes.ContainerRemoveOptions{ Force: true, RemoveVolumes: true, }); err != nil { return err } for _, vol := range []string{d.opts.GetNodeName(), fmt.Sprintf("%s-etc", d.opts.GetNodeName())} { if err := d.cli.VolumeRemove(context.TODO(), vol, true); err != nil { return err } } return nil } // IsK3sRunning is a special method implemented by the Docker object to determine if a node is already // running k3s. func (d *Docker) IsK3sRunning() bool { status, err := d.cli.ContainerInspect(context.TODO(), d.containerID) if err != nil { // Assume CLI error is false, might not be a good idea tho return false } if len(status.Config.Entrypoint) == 0 { return false } return strings.HasSuffix(status.Config.Entrypoint[0], "k3s") && status.State.Running } // GetOpts returns the options that were used to configure this node. func (d *Docker) GetOpts() *types.DockerNodeOptions { return d.opts }
/* B1 Yönetim Sistemleri Yazılım ve Danışmanlık Ltd. Şti. User : ICI Name : Ibrahim ÇOBANİ Date : 25.07.2019 15:57 Notes : */ package models type ApiKeyUsages struct { ApiKey string Usage uint64 }
package vaulttransit import ( "crypto/rand" "encoding/base64" "encoding/json" "fmt" "path" "strings" "sync" "github.com/hashicorp/vault/api" "github.com/libopenstorage/secrets" "github.com/libopenstorage/secrets/pkg/store" "github.com/libopenstorage/secrets/vault/utils" "github.com/libopenstorage/secrets/vaulttransit/client/transit" "github.com/portworx/kvdb" "github.com/sirupsen/logrus" ) const ( Name = secrets.TypeVaultTransit TransitKvdbKey = "vaulttransit-kvdb" kvdbPublicBasePath = "vaulttransit/secrets/public/" kvdbDataBasePath = "vaulttransit/secrets/data/" defaultPxEncryptionKey = "pwx-encryption-key" EncryptionKey = "VAULT_ENCRYPTION_KEY" ) type vaultSecrets struct { mu sync.RWMutex client *api.Client currentNamespace string lockClientToken sync.Mutex namespace string autoAuth bool config map[string]interface{} ps store.PersistenceStore encryptionKey string } // These variables are helpful in testing to stub method call from packages var ( newVaultClient = api.NewClient ) func init() { if err := secrets.Register(Name, New); err != nil { panic(err.Error()) } } func New( secretConfig map[string]interface{}, ) (secrets.Secrets, error) { var ( kv kvdb.Kvdb ) v, ok := secretConfig[TransitKvdbKey] if !ok { return nil, secrets.ErrInvalidKvdbProvided } kv, ok = v.(kvdb.Kvdb) if !ok || kv == nil { return nil, secrets.ErrInvalidKvdbProvided } ps := store.NewKvdbPersistenceStore(kv, kvdbPublicBasePath, kvdbDataBasePath) // DefaultConfig uses the environment variables if present. config := api.DefaultConfig() if len(secretConfig) == 0 && config.Error != nil { return nil, config.Error } address := utils.GetVaultParam(secretConfig, api.EnvVaultAddress) if address == "" { return nil, utils.ErrVaultAddressNotSet } if err := utils.IsValidAddr(address); err != nil { return nil, err } config.Address = address if err := utils.ConfigureTLS(config, secretConfig); err != nil { return nil, err } client, err := newVaultClient(config) if err != nil { return nil, err } namespace := utils.GetVaultParam(secretConfig, api.EnvVaultNamespace) if len(namespace) > 0 { // use a namespace as a header for setup purposes // later use it as a key prefix client.SetNamespace(namespace) defer client.SetNamespace("") } token, autoAuth, err := utils.Authenticate(client, secretConfig) if err != nil { utils.CloseIdleConnections(config) return nil, fmt.Errorf("failed to get the authentication token: %w", err) } client.SetToken(token) authMethod := "token" method := utils.GetVaultParam(secretConfig, utils.AuthMethod) if method != "" && utils.GetVaultParam(secretConfig, api.EnvVaultToken) == "" { authMethod = method } logrus.Infof("Authenticated to Vault Transit with %v\n", authMethod) userEncryptionKey := utils.GetVaultParam(secretConfig, EncryptionKey) // vault namespace has been already set to the client encryptionKey, err := ensureEncryptionKey(client, userEncryptionKey, "") if err != nil { return nil, err } return &vaultSecrets{ namespace: namespace, currentNamespace: namespace, client: client, autoAuth: autoAuth, config: secretConfig, ps: ps, encryptionKey: encryptionKey, }, nil } func (v *vaultSecrets) String() string { return Name } func (v *vaultSecrets) GetSecret( secretID string, keyContext map[string]string, ) (map[string]interface{}, secrets.Version, error) { _, customData := keyContext[secrets.CustomSecretData] _, publicData := keyContext[secrets.PublicSecretData] if customData && publicData { return nil, secrets.NoVersion, &secrets.ErrInvalidKeyContext{ Reason: "both CustomSecretData and PublicSecretData flags cannot be set", } } key := v.encryptionSecret(keyContext) dek, err := v.getDekFromStore(key.Namespace, secretID) if err != nil { return nil, secrets.NoVersion, err } secretData := make(map[string]interface{}) if publicData { secretData[secretID] = dek return secretData, secrets.NoVersion, nil } // Use the encryption key to unwrap the DEK and get the secret passphrase encodedPassphrase, err := v.decrypt(key, string(dek)) if err != nil { return nil, secrets.NoVersion, err } decodedPassphrase, err := base64.StdEncoding.DecodeString(encodedPassphrase) if err != nil { return nil, secrets.NoVersion, err } if customData { if err := json.Unmarshal(decodedPassphrase, &secretData); err != nil { return nil, secrets.NoVersion, err } } else { secretData[secretID] = string(decodedPassphrase) } return secretData, secrets.NoVersion, nil } func (v *vaultSecrets) PutSecret( secretID string, secretData map[string]interface{}, keyContext map[string]string, ) (secrets.Version, error) { var ( cipher string dek []byte err error ) _, override := keyContext[secrets.OverwriteSecretDataInStore] _, customData := keyContext[secrets.CustomSecretData] _, publicData := keyContext[secrets.PublicSecretData] key := v.encryptionSecret(keyContext) if err := secrets.KeyContextChecks(keyContext, secretData); err != nil { return secrets.NoVersion, err } else if publicData && len(secretData) > 0 { publicDek, ok := secretData[secretID] if !ok { return secrets.NoVersion, secrets.ErrInvalidSecretData } dek, ok = publicDek.([]byte) if !ok { return secrets.NoVersion, &secrets.ErrInvalidKeyContext{ Reason: "secret data when PublicSecretData flag is set should be of the type []byte", } } } else if len(secretData) > 0 && customData { // Wrap the custom secret data and create a new entry in store // with the input secretID and the returned dek value, err := json.Marshal(secretData) if err != nil { return secrets.NoVersion, err } encodedPassphrase := base64.StdEncoding.EncodeToString(value) cipher, err = v.encrypt(key, encodedPassphrase) dek = []byte(cipher) } else { // Generate a new dek and create a new entry in store // with the input secretID and the generated dek cipher, err = v.generateDataKey(key) dek = []byte(cipher) } if err != nil { return secrets.NoVersion, err } return secrets.NoVersion, v.ps.Set( v.persistentStorePath(key.Namespace, secretID), dek, nil, nil, override, ) } func (v *vaultSecrets) DeleteSecret( secretID string, keyContext map[string]string, ) error { key := v.encryptionSecret(keyContext) return v.ps.Delete(v.persistentStorePath(key.Namespace, secretID)) } func (v *vaultSecrets) Encrypt( secretID string, plaintTextData string, keyContext map[string]string, ) (string, error) { return "", secrets.ErrNotSupported } func (v *vaultSecrets) Decrypt( secretID string, encryptedData string, keyContext map[string]string, ) (string, error) { return "", secrets.ErrNotSupported } func (v *vaultSecrets) Rencrypt( originalSecretID string, newSecretID string, originalKeyContext map[string]string, newKeyContext map[string]string, encryptedData string, ) (string, error) { return "", secrets.ErrNotSupported } func (v *vaultSecrets) ListSecrets() ([]string, error) { return v.ps.List() } func (v *vaultSecrets) encrypt(key transit.SecretKey, plaintext string) (string, error) { // as vault supports both auto auth and namespaces at once, needs to ensure that a correct // vault token is used for a namespace and lock it for the next usage if v.autoAuth { v.lockClientToken.Lock() defer v.lockClientToken.Unlock() if err := v.setNamespaceToken(key.Namespace); err != nil { return "", err } } secretValue, err := v.lockedEncrypt(key, plaintext) if v.isTokenExpired(err) { if err = v.renewToken(key.Namespace); err != nil { return "", fmt.Errorf("failed to renew token: %s", err) } return v.lockedEncrypt(key, plaintext) } return secretValue, err } func (v *vaultSecrets) lockedEncrypt(key transit.SecretKey, plaintext string) (string, error) { v.mu.RLock() defer v.mu.RUnlock() c, err := transit.New(v.client.Logical()) if err != nil { return "", err } return c.Encrypt(key, plaintext) } func (v *vaultSecrets) decrypt(key transit.SecretKey, ciphertext string) (string, error) { if v.autoAuth { v.lockClientToken.Lock() defer v.lockClientToken.Unlock() if err := v.setNamespaceToken(key.Namespace); err != nil { return "", err } } secretValue, err := v.lockedDecrypt(key, ciphertext) if v.isTokenExpired(err) { if err = v.renewToken(key.Namespace); err != nil { return "", fmt.Errorf("failed to renew token: %s", err) } return v.lockedDecrypt(key, ciphertext) } return secretValue, err } func (v *vaultSecrets) lockedDecrypt(key transit.SecretKey, cipher string) (string, error) { v.mu.RLock() defer v.mu.RUnlock() c, err := transit.New(v.client.Logical()) if err != nil { return "", err } return c.Decrypt(key, cipher) } func (v *vaultSecrets) generateDataKey(key transit.SecretKey) (string, error) { if v.autoAuth { v.lockClientToken.Lock() defer v.lockClientToken.Unlock() if err := v.setNamespaceToken(key.Namespace); err != nil { return "", err } } secretValue, err := v.lockedGenerate(key) if v.isTokenExpired(err) { if err = v.renewToken(key.Namespace); err != nil { return "", fmt.Errorf("failed to renew token: %s", err) } return v.lockedGenerate(key) } return secretValue, err } func (v *vaultSecrets) lockedGenerate(key transit.SecretKey) (string, error) { v.mu.RLock() defer v.mu.RUnlock() c, err := transit.New(v.client.Logical()) if err != nil { return "", err } newKey := make([]byte, 32) _, err = rand.Read(newKey) if err != nil { return "", err } return c.Encrypt(key, base64.StdEncoding.EncodeToString(newKey)) } func (v *vaultSecrets) getDekFromStore(namespace, secretID string) ([]byte, error) { secretPath := v.persistentStorePath(namespace, secretID) if exists, err := v.ps.Exists(secretPath); err != nil { return nil, err } else if !exists { return nil, secrets.ErrInvalidSecretId } // Get the DEK (Data Encryption Key) from kvdb return v.ps.GetPublic(secretPath) } func (v *vaultSecrets) renewToken(namespace string) error { v.mu.Lock() defer v.mu.Unlock() if len(namespace) > 0 { v.client.SetNamespace(namespace) defer v.client.SetNamespace("") } token, err := utils.GetAuthToken(v.client, v.config) if err != nil { return fmt.Errorf("get auth token for %s namespace: %s", namespace, err) } v.currentNamespace = namespace v.client.SetToken(token) return nil } func (v *vaultSecrets) isTokenExpired(err error) bool { return err != nil && v.autoAuth && strings.Contains(err.Error(), "permission denied") } // setNamespaceToken is used for a multi-token support with a kubernetes auto auth setup. // // This allows to talk with a multiple vault namespaces (which are not sub-namespace). Create // the same “Kubernetes Auth Role” in each of the configured namespace. For every request it // fetches the token for that specific namespace. func (v *vaultSecrets) setNamespaceToken(namespace string) error { if v.currentNamespace == namespace { return nil } return v.renewToken(namespace) } func (v *vaultSecrets) encryptionSecret(keyContext map[string]string) transit.SecretKey { namespace := v.namespace if keyContext != nil && len(keyContext[secrets.KeyVaultNamespace]) > 0 { namespace = keyContext[secrets.KeyVaultNamespace] } return transit.SecretKey{ Name: v.encryptionKey, Namespace: namespace, } } func (v *vaultSecrets) persistentStorePath(namespace, name string) string { return path.Join("vault", namespace, name) } // ensureEncryptionKey creates an encryption key if it's not exist. func ensureEncryptionKey(c *api.Client, key, namespace string) (string, error) { transitClient, err := transit.New(c.Logical()) if err != nil { return "", err } // create an encryption key if it's not provided if key == "" { _, err = transitClient.Create(transit.SecretKey{Name: defaultPxEncryptionKey, Namespace: namespace}, "") if err != nil { return "", err } return defaultPxEncryptionKey, nil } // check if the provided key exists _, err = transitClient.Read(transit.SecretKey{Name: key, Namespace: namespace}) if err != nil { return "", err } return key, nil }
package Reverse_Words_in_a_String import ( "strings" ) func reverseWords1(s string) string { words := strings.Fields(s) left, right := 0, len(words)-1 for left < right { words[left], words[right] = words[right], words[left] left++ right-- } return strings.Join(words, " ") }
package main import ( "fmt" "log" "strings" ) func (c element) Generate() { fmt.Printf("%s %s\r\n", c.GetName(), c.GetType()) if y := c.GetRelated(); y != nil { y.Generate() for _, e := range y.GetElements() { e.Generate() } } } func (e element) Setter(typeName string) { if strings.HasSuffix(typeName, "RequestType") { if e.TypeDetails().IsSlice { funcIdx := fmt.Sprintf("%s_Append%s", typeName, UpperFirstLetter(e.GetName())) Funcs[funcIdx] = NewBuffer() if _, yes := SliceableType[e.GetType().GoType()]; yes { Funcs[funcIdx].Sprintf(`func (x *%[1]s) Append%[2]s(v ...%[3]s) { x.%[2]s.Append(v...) } `, typeName, UpperFirstLetter(e.GetName()), e.GetType().GoType(false)) } else { Funcs[funcIdx].Sprintf(`func (x *%[1]s) Append%[2]s(v ...%[3]s) { x.%[2]s = append(x.%[2]s, v...) } `, typeName, UpperFirstLetter(e.GetName()), e.GetType().GoType()) } } } if strings.HasSuffix(typeName, "ResponseType") { if e.GetType().GoType() == "AckCodeType" && e.GetName() == "Ack" { if splx, ok := FindSimple("AckCodeType"); ok { funcIdx := fmt.Sprintf("%s_AckCodeType%s", typeName, UpperFirstLetter(e.GetName())) Funcs[funcIdx] = NewBuffer() funcCount := 0 for _, e := range splx.Restriction.Enumeration { if e.Annotation.Skip() || e.Value == "CustomCode" { continue } Funcs[funcIdx].Sprintf(`func (x %[1]s) %[2]s() bool { return x.Ack == Ack_%[2]s } `, typeName, UpperFirstLetter(e.Value)) funcCount++ } if funcCount == 0 { delete(Funcs, funcIdx) } } } } } func (e element) DeepValidator(callName, path string) bool { path = path + "." + e.Name if e.GetType().IsBasic() && e.TypeDetails().IsSlice { return false } if _, yes := e.NeedsValidation(callName); yes { return true } return false } func (e element) Validator(callName, path string) { rules2, ok := e.NeedsValidation(callName) if !ok { return } rules := rules2.Except(ValTypRequired, ValTypMaxOccurs) related := e.GetRelated() key := "" newPath := fmt.Sprintf("%s.%s", path, UpperFirstLetter(e.GetName())) hasDeepValidationRequirement := false loopBracket, pointerBracket := false, false if related != nil { if e.Annotation.AppInfo.MaxDepth == 0 { hasDeepValidationRequirement = related.DeepValidator(callName, newPath) } } if rule, yes := rules2.Includes(ValTypMaxOccurs); yes { Validator[callName].Sprintf("%s", e.TypeDetails().Key(key).ValidationString(*rule, path)) } if rule, yes := rules2.Includes(ValTypRequired); yes { Validator[callName].Sprintf("%s", e.TypeDetails().Key(key).ValidationString(*rule, path)) } if e.TypeDetails().IsPointer && hasDeepValidationRequirement { pointerBracket = true Validator[callName].Sprintf("if %s != nil {\r\n", newPath) } if e.TypeDetails().IsSlice { key = fmt.Sprintf("i%d", hash(path)) newPath = path + "." + UpperFirstLetter(e.GetName()) + "[" + key + "]" if rules.Len() > 0 || hasDeepValidationRequirement { loopBracket = true Validator[callName].Sprintf("for %s := range %s {\r\n", key, fmt.Sprintf("%s.%s", path, UpperFirstLetter(e.GetName()))) } else { //Validator[callName].Sprintf("// No validation for %s %+v | %v\r\n", newPath, rules, rules2) } } for _, r := range rules { Validator[callName].Sprintf("%s", e.TypeDetails().Key(key).ValidationString(r, path)) } if related != nil { related.Validator(callName, newPath) } if loopBracket { Validator[callName].Sprintf("}\r\n") } if pointerBracket { Validator[callName].Sprintf("}\r\n") } } func (e element) TypeDetails() *TypeDetails { t := &TypeDetails{} t.Field = e.GetName() t.Type = e.GetType() _, t.IsSlice = e.SliceLen() if e.Annotation != nil { if listBasedOn, ok := e.Annotation.AppInfo.ListBasedOn(); ok { if !strings.Contains(listBasedOn, ",") { if x, ok := FindSimple(listBasedOn); ok { t.SimpleType = true t.AliasFor = x.GetType() } else { log.Fatalf("could not find simple type -`%s`- ", listBasedOn) } } } else { if x, ok := FindSimple(e.GetType().String()); ok { t.SimpleType = true t.AliasFor = x.GetType() } } } if t.AliasFor == "" { t.AliasFor = e.GetType() } t.IsPointer = !e.GetType().Nullable() && !t.IsSlice return t } func (e element) TransformType() string { var let Type = e.Type if e.Annotation != nil { if listBasedOn, ok := e.Annotation.AppInfo.ListBasedOn(); ok { if !strings.Contains(listBasedOn, ",") { if Type(listBasedOn).Nullable() { let = Type(listBasedOn) } if x, ok := FindSimple(listBasedOn); ok { x.Generate() } } else { listBasedOn = strings.Replace(listBasedOn, " ", "", -1) for _, k := range strings.Split(listBasedOn, ",") { if x, ok := FindSimple(k); ok { x.Generate() } } } } } if _, yes := e.SliceLen(); yes { if s, yes := SliceableType[let.GoType()]; yes { return s } return "[]" + let.GoType() } if let.Nullable() { return let.GoType() } return "*" + let.GoType() } func (c element) GoLine() string { return fmt.Sprintf("%s %s `xml:\"%s,omitempty\" json:\"%s,omitempty\"`", UpperFirstLetter(c.GetName()), c.TransformType(), c.GetName(), ToSnake(c.GetName())) } func (c element) GetName() string { return c.Name } func (c element) GetRelated() Xyer { if c.Type.IsXS() { return nil } return Find(c.Type.String()) } func (c element) GetType() Type { return c.Type } func (c element) GetElements() (r []Xyer) { return nil }
package main import ( "os" "fmt" "github.com/webview/webview" ) func main() { debug := true w := webview.New(debug) defer w.Destroy() w.SetTitle("Minimal webview example") w.SetSize(800, 600, webview.HintNone) // Get working directory path, err := os.Getwd() if err != nil { panic(err) } fmt.Println("cwd:", path) w.Navigate("file://" + path + "/index.html") w.Run() }
// Copyright (C) 2019 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package app import "context" // Cleanup is a function that is invoked at a later time to perform the cleanup. type Cleanup func(ctx context.Context) // Then combines two clean up functions into a single cleanup function. func (c Cleanup) Then(next Cleanup) Cleanup { if c == nil { return next } if next == nil { return c } return func(ctx context.Context) { c(ctx) next(ctx) } } // Invoke invokes the possibly nil cleanup safely. Returns a nil Cleanup, so // this can be chained when invoking the cleanup as part of the error handling. func (c Cleanup) Invoke(ctx context.Context) Cleanup { if c != nil { c(ctx) } return nil }
package main import ( "flag" "fmt" "net/http" "os" "strings" "time" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/mem" ) var ( interval int token string ) const ( api = "http://localhost:3000/servers" web = "http://localhost/api/users" version = "1.0.0" ) // TODO: spearating function // TODO: creating flag to accept interval and token // TODO: creating a function to check if token valid func main() { // Flags flag.IntVar(&interval, "interval", 5, "Interval agent sending metrics to server in seconds") flag.StringVar(&token, "token", "", "Access token generated from Furion application settings") flag.Parse() // Checking if key present if token == "" { fmt.Printf("Error: access token is not defined.\n") fmt.Printf("See './furion-agent -h' for usage\n") os.Exit(2) } // Checking if access token is valid if !isTokenValid(token) { fmt.Printf("Error: access token is invalid or expired.") os.Exit(2) } // Sending metrics to Furion tick := time.Tick(time.Duration(interval) * time.Second) for { select { case <-tick: // fmt.Printf("v %f, h %s, c %f\n", v.UsedPercent, h.Hostname, c[0]) go sendMetrics() } } } // checking if given access token is valid // TODO: implement. we will return true for temporary func isTokenValid(token string) bool { return true } // Sending metrics to Furion server func sendMetrics() { mem, _ := mem.VirtualMemory() cpu, _ := cpu.Percent(0, false) body := strings.NewReader( fmt.Sprintf( "token=%s&mem_used_percent=%f&cpu_used_percent=%f", token, mem.UsedPercent, cpu[0]), ) req, err := http.NewRequest("POST", api, body) if err != nil { fmt.Printf("cannot build request %v\n", err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, err := http.DefaultClient.Do(req) if err != nil { fmt.Printf("cannot post to server %v\n", err) } defer resp.Body.Close() }
package client import ( "errors" "net/http" "testing" "github.com/moyen-blog/client-go/client/mocks" ) func init() { DefaultHTTPClient = &mocks.MockHTTPClient // Ensure no real HTTP calls are made } func TestRequestSuccess(t *testing.T) { token := "testtoken" mocks.MockHTTPClient.SetResponse("", 200, nil) status, err := request(http.MethodGet, "", token, nil, nil) if err != nil { t.Error("Should successfully make request") } if status != 200 { t.Errorf("Should return status %d but got %d", 200, status) } authHeader := mocks.MockHTTPClient.LastRequest.Header.Get("Authorization") if authHeader != "Bearer "+token { t.Errorf("Should have auth token %s but got %s", token, authHeader) } } func TestRequestStatusError(t *testing.T) { mocks.MockHTTPClient.SetResponse("[]", 500, nil) _, err := request(http.MethodGet, "", "", nil, nil) if err == nil { t.Error("Should fail request with server error") } } func TestRequestJSONError(t *testing.T) { dest := struct{}{} mocks.MockHTTPClient.SetResponse("*&(^", 200, nil) _, err := request(http.MethodGet, "", "", nil, dest) if err == nil { t.Error("Should fail request with JSON error") } } func TestRequestHTTPError(t *testing.T) { mocks.MockHTTPClient.SetResponse("", 200, errors.New("deliberate error in TestRequestHTTPError")) _, err := request(http.MethodGet, "", "", nil, nil) if err == nil { t.Error("Should fail request with HTTP error") } }
package ines import ( "fmt" "io/ioutil" "github.com/funsun/peridot/cartridge" "github.com/funsun/peridot/common" ) func ReadFile(filename string) common.Cartridge { data, _ := ioutil.ReadFile(filename) return Read([]uint8(data)) } func Read(data []uint8) common.Cartridge { header := data[0:16] mapper := getMapperNumber(header) base := 16 rpg := data[base : base+int(header[4])*16*1024] base = 16 + int(header[4])*16*1024 chr := data[base : base+int(header[5])*8*1024] fmt.Println("mapper:", mapper) switch mapper { case 0x00: return new(cartridge.Mapper0).Init(rpg, chr) case 0x4a: return new(cartridge.MMC3).Init(rpg, chr) } panic("no mapper founded") } func getMapperNumber(header []uint8) uint8 { l := header[6] >> 4 h := header[7] & 0xf0 return h + l }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package models import ( "context" "yunion.io/x/onecloud/pkg/cloudcommon/db" ) type SEventManager struct { db.SStandaloneAnonResourceBaseManager } var EventManager *SEventManager func init() { EventManager = &SEventManager{ SStandaloneAnonResourceBaseManager: db.NewStandaloneAnonResourceBaseManager( SEvent{}, "events_tbl", "notifyevent", "notifyevents", ), } EventManager.SetVirtualObject(EventManager) } type SEvent struct { db.SStandaloneAnonResourceBase Message string Event string `width:"32" nullable:"true"` AdvanceDays int } func (e *SEventManager) CreateEvent(ctx context.Context, event, message string, advanceDays int) (*SEvent, error) { eve := &SEvent{ Message: message, Event: event, AdvanceDays: advanceDays, } err := e.TableSpec().Insert(ctx, eve) if err != nil { return nil, err } return eve, nil } func (e *SEventManager) GetEvent(id string) (*SEvent, error) { model, err := e.FetchById(id) if err != nil { return nil, err } return model.(*SEvent), nil }
package do func serviceCallOne() (string, error) { return "a", nil } func serviceCallTwo() (int, error) { return 1, nil } func serviceCalls() (string, int, error) { return deriveDo(serviceCallOne, serviceCallTwo) }
package main import ( "container/list" "fmt" ) type lru struct { capacity int // list of keys queue *list.List data map[string]*node } type node struct { Data interface{} KeyPtr *list.Element } func (n *node) String() string { if key, ok := n.KeyPtr.Value.(string); ok { return fmt.Sprintf("%s - %v", key, n.Data) } return fmt.Sprintf("%v", n.Data) } func New(capacity int) *lru { return &lru{ capacity: capacity, queue: list.New(), data: make(map[string]*node), } } func (c *lru) Add(key string, value interface{}) { if _, ok := c.data[key]; ok { fmt.Println("key already in the cache:", key) return } if len(c.data) >= c.capacity { // remove the oldest element elToRemove := c.queue.Front() c.queue.Remove(elToRemove) // check the type of the element if keyToRemove, ok := elToRemove.Value.(string); ok { delete(c.data, keyToRemove) } else { panic("invalid type: this should not happen") } } el := c.queue.PushBack(key) c.data[key] = &node{ Data: value, KeyPtr: el, } } func (c *lru) Get(key string) interface{} { // check if the key is in the map if val, ok := c.data[key]; ok { // move the key to the back of the queue el := val.KeyPtr c.queue.MoveToBack(el) return val.Data } else { fmt.Println("key not in the cache:", key) return nil } } func (c *lru) Print() { fmt.Println("--------------------") for el := c.queue.Front(); el != nil; el = el.Next() { fmt.Println(el.Value, ":", c.data[el.Value.(string)].Data) } } func main() { cache := New(3) cache.Add("1", 1) cache.Add("2", 2) cache.Add("3", 3) cache.Print() cache.Add("4", 4) // 1 should be removed cache.Print() v := cache.Get("2") fmt.Println("Got from cache:", v) cache.Print() cache.Add("5", 5) // 3 should be removed cache.Print() }
package models type Vote struct { userId string `json:"userId"` voteId string `json:"voteId"` }
package main import ( "flag" "fmt" ) var name string func init() { //接收命令行参数 //参数1:&name表示接收参数变量的地址,&符号取地址的意思 //参数2:name表示接收参数的名字 //参数3:默认值 //参数4:参数描述 flag.StringVar(&name, "name", "default name", "请输入名称:") } func main() { //开始真正开始解析命令行参数,将内容赋值给响应的name变量 flag.Parse() fmt.Println("Hello " + name) } /** 在命令行中输入如下命令: -> go run main.go -name "duwanjiang" 输出: -> Hello duwanjiang */
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package handler import ( "github.com/gin-gonic/gin" commonmodels "github.com/koderover/zadig/lib/microservice/aslan/core/common/dao/models" "github.com/koderover/zadig/lib/microservice/aslan/core/system/service" internalhandler "github.com/koderover/zadig/lib/microservice/aslan/internal/handler" e "github.com/koderover/zadig/lib/tool/errors" "github.com/koderover/zadig/lib/util" ) func CreateJenkinsIntegration(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() args := new(commonmodels.JenkinsIntegration) if err := c.BindJSON(args); err != nil { ctx.Err = e.ErrInvalidParam.AddDesc("invalid jenkinsIntegration json args") return } args.UpdateBy = ctx.Username if _, err := util.GetUrl(args.URL); err != nil { ctx.Err = e.ErrInvalidParam.AddDesc("invalid url") return } ctx.Err = service.CreateJenkinsIntegration(args, ctx.Logger) } func ListJenkinsIntegration(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() ctx.Resp, ctx.Err = service.ListJenkinsIntegration(ctx.Logger) } func UpdateJenkinsIntegration(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() args := new(commonmodels.JenkinsIntegration) if err := c.BindJSON(args); err != nil { ctx.Err = e.ErrInvalidParam.AddDesc("invalid jenkinsIntegration json args") return } args.UpdateBy = ctx.Username ctx.Err = service.UpdateJenkinsIntegration(c.Param("id"), args, ctx.Logger) } func DeleteJenkinsIntegration(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() ctx.Err = service.DeleteJenkinsIntegration(c.Param("id"), ctx.Logger) } func TestJenkinsConnection(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() args := new(service.JenkinsArgs) if err := c.BindJSON(args); err != nil { ctx.Err = e.ErrInvalidParam.AddDesc("invalid jenkinsArgs json args") return } ctx.Err = service.TestJenkinsConnection(args, ctx.Logger) } func ListJobNames(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() ctx.Resp, ctx.Err = service.ListJobNames(ctx.Logger) } func ListJobBuildArgs(c *gin.Context) { ctx := internalhandler.NewContext(c) defer func() { internalhandler.JsonResponse(c, ctx) }() ctx.Resp, ctx.Err = service.ListJobBuildArgs(c.Param("jobName"), ctx.Logger) }
package main //1744. 你能在你最喜欢的那天吃到你最喜欢的糖果吗? //给你一个下标从 0 开始的正整数数组candiesCount,其中candiesCount[i]表示你拥有的第i类糖果的数目。同时给你一个二维数组queries,其中queries[i] = [favoriteTypei, favoriteDayi, dailyCapi]。 // //你按照如下规则进行一场游戏: // //你从第0天开始吃糖果。 //你在吃完 所有第 i - 1类糖果之前,不能吃任何一颗第 i类糖果。 //在吃完所有糖果之前,你必须每天 至少吃 一颗糖果。 //请你构建一个布尔型数组answer,满足answer.length == queries.length 。answer[i]为true的条件是:在每天吃 不超过 dailyCapi颗糖果的前提下,你可以在第favoriteDayi天吃到第favoriteTypei类糖果;否则 answer[i]为 false。注意,只要满足上面 3 条规则中的第二条规则,你就可以在同一天吃不同类型的糖果。 // //请你返回得到的数组answer。 // // // //示例 1: // //输入:candiesCount = [7,4,5,3,8], queries = [[0,2,2],[4,2,4],[2,13,1000000000]] //输出:[true,false,true] //提示: //1- 在第 0 天吃 2 颗糖果(类型 0),第 1 天吃 2 颗糖果(类型 0),第 2 天你可以吃到类型 0 的糖果。 //2- 每天你最多吃 4 颗糖果。即使第 0 天吃 4 颗糖果(类型 0),第 1 天吃 4 颗糖果(类型 0 和类型 1),你也没办法在第 2 天吃到类型 4 的糖果。换言之,你没法在每天吃 4 颗糖果的限制下在第 2 天吃到第 4 类糖果。 //3- 如果你每天吃 1 颗糖果,你可以在第 13 天吃到类型 2 的糖果。 //示例 2: // //输入:candiesCount = [5,2,6,4,1], queries = [[3,1,2],[4,10,3],[3,10,100],[4,100,30],[1,3,1]] //输出:[false,true,true,false,false] // // //提示: // //1 <= candiesCount.length <= 10^5 //1 <= candiesCount[i] <= 10^5 //1 <= queries.length <= 10^5 //queries[i].length == 3 //0 <= favoriteTypei < candiesCount.length //0 <= favoriteDayi <= 10^9 //1 <= dailyCapi <= 10^9 //思路 前缀和 func canEat(candiesCount []int, queries [][]int) []bool { n := len(candiesCount) sum := make([]int, n+1) for i := 1; i <= n; i++ { sum[i] = sum[i-1] + candiesCount[i-1] } result := make([]bool, len(queries)) for i, q := range queries { result[i] = sum[q[0]]/q[2] <= q[1] && q[1]+1 <= sum[q[0]+1] } return result }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package kernel import ( "bufio" "context" "os" "regexp" "strconv" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: HighResTimers, Desc: "Fails if timers have nanosecond resolution that is not 1 ns", Contacts: []string{ "tbroch@chromium.org", "chromeos-kernel-test@google.com", "kathrelkeld@chromium.org", // Tast port author }, Attr: []string{"group:mainline"}, }) } // HighResTimers reads from /proc/timer_list to verify that any resolution // listed in nsecs has a value of 1. func HighResTimers(ctx context.Context, s *testing.State) { re := regexp.MustCompile(`^\s*\.resolution:\s(\d+)\s*nsecs$`) f, err := os.Open("/proc/timer_list") if err != nil { s.Fatal("Failed to open timer list: ", err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { if matches := re.FindStringSubmatch(scanner.Text()); matches != nil { res, err := strconv.Atoi(matches[1]) if err != nil { s.Error("Error convering resolution to int: ", err) } if res != 1 { s.Errorf("Unexpected timer resoultion: %d ns, want 1 ns", res) } } } if scanner.Err(); err != nil { s.Error("Error reading timers file: ", err) } }
package handlers type getResponse struct { Rating float64 `json:"rating"` } type reviewResponse struct { Author string `json:"author"` Rating int `json:"rating"` Commentary string `json:"commentary"` } type reportResponse struct { ReportedBy string `json:"reportedBy"` ReportedUser string `json:"reportedUser"` PackageName string `json:"packageName"` Commentary string `json:"commentary"` Date int `json:"date"` Reviewed bool `json:"reviewed"` ReviewedBy string `json:"reviewedBy"` ReviewedDate int `json:"reviewedDate"` Review string `json:"review"` } type rateRequest struct { Token string `json:"token"` Name string `json:"name"` Rating float64 `json:"rating"` Comment string `json:"commentary"` } type reviewReportRequest struct { Token string `json:"token"` PackageName string `json:"packageName"` Review string `json:"review"` ReviewedBy string `json:"reviewedBy"` ReportedBy string `json:"reportedBy"` Ban bool `json:"ban"` DeleteReview bool `json:"deleteReview"` ReportedUser string `json:"reportedUser"` } type deleteRequest struct { Token string `json:"token"` Package string `json:"package"` Author string `json:"author"` } type reportsRequest struct { Token string `json:"token"` ShowReviewed bool `json:"showReviewed"` } type registerRequest struct { Login string `json:"login"` Email string `json:"email"` Password string `json:"password"` } type reportRequest struct { Token string `json:"token"` Commentary string `json:"commentary"` ReportedUser string `json:"reportedUser"` PackageName string `json:"packageName"` } type loginRequest struct { Username string `json:"login"` Password string `json:"password"` } type loginResponse struct { AccessToken string `json:"accessToken"` RefreshToken string `json:"refreshToken"` Role string `json:"role"` } type tokenRequest struct { RefreshToken string `json:"refreshToken"` } type tokenResponse struct { AccessToken string `json:"accessToken"` RefreshToken string `json:"refreshToken"` } type isolatedResponse struct { Packages []string `json:"packages"` } type isolatedRequest struct { Token string `json:"token"` Name string `json:"name"` } const RoleUser = "user" const RoleModerator = "moderator"
/* Spiral Matrix Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order. For example, Given the following matrix: [ [ 1, 2, 3 ], [ 4, 5, 6 ], [ 7, 8, 9 ] ] You should return [1,2,3,6,9,8,7,4,5]. */ package main // 模拟螺旋的方式 func spiralOrder(matrix [][]int) []int { if len(matrix) == 0 { return []int{} } result := make([]int,len(matrix) * len(matrix[0])) towards := 3 // 0 - down 1 - up 2 - left 3 - right width, height, i, j := len(matrix[0]) - 1, len(matrix) - 1, 0, 0 start := 1 for idx := range result { result[idx] = matrix[i][j] switch towards { case 0: if i == height { towards = 2 j-- } else { i++ } case 1: if i == start { start++ width-- height-- towards = 3 j++ } else { i-- } case 2: if j == start - 1 { towards = 1 i-- } else { j-- } case 3: if j == width { towards = 0 i++ } else { j++ } } } return result } // 层层推进 func spiralOrder1(matrix [][]int) []int { if len(matrix) == 0 { return []int{} } start,width,height := 0,len(matrix[0]) - 1,len(matrix) - 1 length := len(matrix) * len(matrix[0]) result,current := make([]int, length),0 for start <= height && start <= width { if start == height && start == width { result[current] = matrix[start][start] } for i := start;i < width;i++ { result[current] = matrix[start][i] current++ } for i := start;i < height;i++ { result[current] = matrix[i][width] current++ } for i := width;i > start;i-- { result[current] = matrix[height][i] current++ if current == length { return result } } for i := height;i > start;i-- { result[current] = matrix[i][start] current++ if current == length { return result } } start++ width-- height-- } return result }
package main import "fmt" func main() { var list LinkedList list.print() // appends list.append(Node{data: 1}) list.append(Node{data: 2}) list.append(Node{data: 3}) list.print() fmt.Println("size is", list.size()) // prepends list.prepend(Node{data: 4}) list.prepend(Node{data: 5}) list.prepend(Node{data: 6}) list.print() fmt.Println("size is", list.size()) fmt.Println("") // gethead gettail wrappers list.first().data = 0 list.last().data = 99 fmt.Println("first node is now", list.first()) fmt.Println("last node is now", list.last()) list.print() fmt.Println("") // pops fmt.Println("popfirst popped", list.popFirst()) fmt.Println("poplast popped", list.popLast()) list.print() fmt.Println("") // index based operations index := 2 fmt.Println("item at index", index, "is", list.get(index)) fmt.Println("remove item at index", index, "which is", list.remove(index)) item := Node{data: 69} fmt.Println("insert item", item,"at index", index) list.insert(index, item) item = Node{data: 999} index = 30 fmt.Println("insert item", item,"at index", index) list.insert(index, item) list.print() fmt.Println("") // crude iterator // haven't understood go's iterator idioms yet i := list.getIterator() for n := i(); n != nil; n = i() { fmt.Println("modifying node", n) n.data += 10 } list.print() fmt.Println("") list.clear() fmt.Println("cleared list") list.print() fmt.Println("remove item at index", 1, "which is", list.remove(1)) fmt.Println("remove item at index", 2, "which is", list.remove(2)) list.print() }
package metaltest import ( "fmt" "github.com/ionous/sashimi/compiler/model/modeltest" "github.com/ionous/sashimi/metal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) func VerifyPostConditions(t *testing.T, v metal.ObjectValue) { VerifyPostValues(t, v) VerifyPostLists(t, v) } func VerifyPostValues(t *testing.T, v metal.ObjectValue) { // test that things really changed if res, ok := v.GetValue(modeltest.TestInstance, modeltest.NumProp); !assert.True(t, ok, fmt.Sprintf("missing (%s.%s)", modeltest.TestInstance, modeltest.NumProp)) { t.Fatal() } else { require.EqualValues(t, float64(32), res) } if res, ok := v.GetValue(modeltest.TestInstance, modeltest.TextProp); assert.True(t, ok) { require.EqualValues(t, "text", res) } if res, ok := v.GetValue(modeltest.TestInstance, modeltest.StateProp); assert.True(t, ok) { require.EqualValues(t, "yes", res) } if res, ok := v.GetValue(modeltest.TestInstance, modeltest.ObjectProp); assert.True(t, ok) { require.EqualValues(t, modeltest.TestInstance, res) } } func VerifyPostLists(t *testing.T, v metal.ObjectValue) { // if res, ok := v.GetValue(modeltest.TestInstance, modeltest.NumsProp); assert.True(t, ok, "get value nums prop") { require.Contains(t, res, float64(32)) } if res, ok := v.GetValue(modeltest.TestInstance, modeltest.TextsProp); assert.True(t, ok, "get value texts prop") { require.Contains(t, res, "text") } if res, ok := v.GetValue(modeltest.TestInstance, modeltest.ObjectsProp); assert.True(t, ok, "get value objects props") { require.Contains(t, res, modeltest.TestInstance) } }
package usermodel import ( "time" "github.com/naaltunian/wyn-search-go/utils/errors" "go.mongodb.org/mongo-driver/bson/primitive" ) // User is the user model type User struct { ID primitive.ObjectID `json:"_id,omitempty" bson:"_id, omitempty"` Name string `json:"name,omitempty" bson:"name,omitempty"` Email string `json:"email,omitempty" bson:"email,omitempty"` Password string `json:"password,omitempty" bson:"password,omitempty"` LinkedIn string `json:"linkedIn,omitempty" bson:"linkedIn,omitempty"` GithubUsername string `json:"githubUsername,omitempty" bson:"githubUsername,omitempty"` PersonalSite string `json:"personalSite,omitempty" bson:"personalSite,omitempty"` Bio string `json:"bio,omitempty" bson:"bio,omitempty"` DateCreated time.Time `json:"dateCreated,omitempty" bson:"dateCreated,omitempty"` } // UserModels is a slice of UserModel type userModels []User // Validate validates if the User model is valid func (u *User) Validate() *errors.RestErr { // TODO: add further validations if u.Name == "" { return errors.NewBadRequestError("invalid name field") } if u.Email == "" { return errors.NewBadRequestError("invalid email field") } if u.Password == "" { return errors.NewBadRequestError("invalid password") } if u.GithubUsername == "" { return errors.NewBadRequestError("invalid github username field") } return nil }
// Copyright 2019 Prometheus Team // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dingtalk import ( "github.com/go-kit/kit/log" "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/notify/test" commoncfg "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "testing" ) func TestDingTalkRedactedURL(t *testing.T) { ctx, u, fn := test.GetContextWithCancelingURL() defer fn() notifier, err := New( &config.DingTalkConfig{ Webhook: &config.SecretURL{URL: u}, HTTPConfig: &commoncfg.HTTPClientConfig{}, }, test.CreateTmpl(t), log.NewNopLogger(), ) require.NoError(t, err) test.AssertNotifyLeaksNoSecret(t, ctx, notifier, u.String()) }
package goisgod import ( "image" "os" "github.com/disintegration/imaging" "log" ) type GopherImageDrawer struct { gopher *image.Image dao *GigDao stoppedCh chan struct{} } // NewGopherImageDrawer is used to draw gopher via image input channel func NewGopherImageDrawer(dao *GigDao, stoppingCh <-chan struct{}, rawImageInCh <-chan *GigImage) (gid *GopherImageDrawer, err error) { gid = new(GopherImageDrawer) if gid.gopher, err = gid.getGopherImage(); err != nil { return } gid.stoppedCh = make(chan struct{}, 1) gid.dao = dao go func() { for { select { case img := <-rawImageInCh: log.Printf("Image incoming") if gimg, err := gid.drawGopher(img); err != nil { gid.stoppedCh <- struct{}{} break } else { log.Printf("save new gopher drawed image") gid.dao.storeImage(gimg, img.key) } case <-stoppingCh: gid.stoppedCh <- struct{}{} break } } }() return } func (gid *GopherImageDrawer) getGopherImage() (img *image.Image, err error) { img = new(image.Image) var f *os.File if f, err = os.Open("gopher-normal.png"); err != nil { return } defer f.Close() if *img, _, err = image.Decode(f); err != nil { return } return } func (gid *GopherImageDrawer) drawGopher(_dst *GigImage) (gimg *GigImage, err error) { dst := imaging.Resize(*_dst.image, 300, 0, imaging.Lanczos) var img image.Image = imaging.Overlay(dst, *gid.gopher, image.Pt(0, 0), 0.5) return &GigImage{image: &img, key: _dst.key}, nil }
package lib import ( "github.com/chidakiyo/benkyo/go-memleak-check/log" "github.com/gin-gonic/gin" "math/rand" "net/http" "strconv" ) func RandStream(g *gin.Context) { c := g.Request.Context() bit := uint64(0) bitSt := g.Query("b") if bitSt == "" { bit = 8 } else { bit, _ = strconv.ParseUint(bitSt, 10, 64) } var buf []byte b := genBytes(bit) buf = append(buf, *b...) log.Info(c, "byte: %v", len(buf)) g.String(http.StatusOK, "%v", len(buf)) // responseはデータのサイズ } func RandStreamString(g *gin.Context) { c := g.Request.Context() bit := uint64(0) bitSt := g.Query("b") if bitSt == "" { bit = 8 } else { bit, _ = strconv.ParseUint(bitSt, 10, 64) } var buf []byte b := genBytes(bit) buf = append(buf, *b...) log.Info(c, "byte: %v", len(buf)) g.String(http.StatusOK, "%v", buf) // responseはデータそのものをstring化 } func genBytes(bit uint64) *[]byte { token := make([]byte, 1 << bit) rand.Read(token) return &token }
package main func f(n... int) { }
package api import ( "encoding/base64" uuid "github.com/satori/go.uuid" "github.com/sirupsen/logrus" "net/http" "strings" "text-converter/internal/cfg" ) func requestIdMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config := cfg.GetConfig() config.RequestId = uuid.NewV4().String() next.ServeHTTP(w, r) }) } func loggingMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config := cfg.GetConfig() cfg.NewLogger() cfg.BindFields(logrus.Fields{ "requestUri": r.RequestURI, "requestMethod": r.Method, "requestId": config.RequestId, }) next.ServeHTTP(w, r) }) } func authMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config := cfg.GetConfig() w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) s := strings.SplitN(r.Header.Get("Authorization"), " ", 2) if len(s) != 2 { http.Error(w, "Not authorized", 401) return } b, err := base64.StdEncoding.DecodeString(s[1]) if err != nil { http.Error(w, err.Error(), 401) return } pair := strings.SplitN(string(b), ":", 2) if len(pair) != 2 { http.Error(w, "Not authorized", 401) return } if pair[0] != config.Username || pair[1] != config.Password { cfg.Logger.WithFields(logrus.Fields{ "remoteUser": pair[0], }).Warning("not_authorized") http.Error(w, "Not authorized", 401) return } cfg.BindFields(logrus.Fields{"remoteUser": pair[0]}) next.ServeHTTP(w, r) }) }
package git import ( "container/heap" "fmt" ) type NodeFlag uint32 const ( NodeColorRed NodeFlag = (1 << iota) NodeColorGreen NodeColorBlue NodeColorYellow = NodeColorRed | NodeColorGreen NodeColorWhite = NodeColorRed | NodeColorGreen | NodeColorBlue NodeFlagSeen = 1 << 4 ) type CommitNode struct { commit *Commit parents []*CommitNode Flags NodeFlag ID SHA1 } func (n *CommitNode) Parents() []*CommitNode { return n.parents } type CommitGraph struct { tips []*CommitNode commits map[SHA1]*CommitNode repo *Repository } func NewCommitGraph(repo *Repository) *CommitGraph { return &CommitGraph{repo: repo, commits: make(map[SHA1]*CommitNode, 0)} } func (c *CommitGraph) openObject(oid SHA1) (*CommitNode, error) { if node, ok := c.commits[oid]; ok { return node, nil } obj, err := c.repo.OpenObject(oid) if err != nil { return nil, err } commit, ok := obj.(*Commit) if !ok { return nil, fmt.Errorf("object [%s] not of type commit", oid) } node := &CommitNode{commit: commit, ID: oid} c.commits[oid] = node return node, nil } func (c *CommitGraph) AddTip(oid SHA1) (*CommitNode, error) { node, err := c.openObject(oid) if err != nil { return nil, err } c.tips = append(c.tips, node) return node, nil } func (c *CommitGraph) loadParents(node *CommitNode) error { if len(node.parents) != len(node.commit.Parent) { node.parents = make([]*CommitNode, len(node.commit.Parent)) for i, parent := range node.commit.Parent { var err error node.parents[i], err = c.openObject(parent) if err != nil { return err } } } return nil } //youngestFirst is a priority queue implemented via a 'container/heap' //the latter is a min-heap, which nicely aligns with times in epoch type youngestFirst []*CommitNode func (y youngestFirst) Len() int { return len(y) } func (y youngestFirst) Less(i, j int) bool { // true -> i before j // -> i.Date() after j.Date ic, jc := y[i], y[j] return ic.commit.Date().After(jc.commit.Date()) } func (y youngestFirst) Swap(i, j int) { y[i], y[j] = y[j], y[i] } func (y *youngestFirst) Push(x interface{}) { *y = append(*y, x.(*CommitNode)) } func (y *youngestFirst) Pop() interface{} { n := len(*y) - 1 o := *y r := o[n] *y = o[0:n] return r } func (y youngestFirst) notAllWhite() bool { for _, node := range y { if node.Flags&NodeColorWhite != NodeColorWhite { return true } } return false } //youngestFirstFromTips creates a priority queue initialized //with the tips of the graph. func (c *CommitGraph) youngestFirstFromTips() youngestFirst { pq := make(youngestFirst, len(c.tips)) for i, node := range c.tips { pq[i] = node } heap.Init(&pq) return pq } func (c *CommitGraph) PaintDownToCommon() error { pq := c.youngestFirstFromTips() for pq.notAllWhite() { node := heap.Pop(&pq).(*CommitNode) flags := node.Flags if flags&NodeColorWhite == NodeColorYellow { flags |= NodeColorBlue } err := c.loadParents(node) if err != nil { return err } for _, parent := range node.parents { if parent.Flags == flags { continue } parent.Flags |= flags heap.Push(&pq, parent) } } return nil } type CommitVisitor func(node *CommitNode) bool func (c *CommitGraph) VisitCommits(fn CommitVisitor) { //let's clear all the seen flags so we can use them for _, v := range c.commits { v.Flags &^= NodeFlagSeen } pq := c.youngestFirstFromTips() for len(pq) != 0 { node := heap.Pop(&pq).(*CommitNode) if node.Flags&NodeFlagSeen != 0 { continue } node.Flags |= NodeFlagSeen stop := fn(node) if stop { break } // ensure commits are loaded? for _, parent := range node.Parents() { heap.Push(&pq, parent) } } }
package main import ( "fmt" "math" ) func find132pattern(nums []int) bool { n:=len(nums) stack:=make([]int,0) second:=math.MinInt64 for i:=n-1;i>=0;i--{ if nums[i]<second{ return true } for len(stack)>0&&stack[len(stack)-1]<nums[i]{ second=stack[len(stack)-1] stack=stack[:len(stack)-1] } stack=append(stack,nums[i]) } return false } func main() { nums:=[]int{3,1,4,2} fmt.Println(find132pattern(nums)) }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tasks import ( "context" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/util/logclient" ) type WafSyncstatusTask struct { taskman.STask } func init() { taskman.RegisterTask(WafSyncstatusTask{}) } func (self *WafSyncstatusTask) taskFailed(ctx context.Context, waf *models.SWafInstance, err error) { waf.SetStatus(self.UserCred, api.WAF_STATUS_UNKNOWN, err.Error()) logclient.AddActionLogWithStartable(self, waf, logclient.ACT_SYNC_STATUS, err, self.UserCred, false) self.SetStageFailed(ctx, jsonutils.NewString(err.Error())) } func (self *WafSyncstatusTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) { waf := obj.(*models.SWafInstance) iWaf, err := waf.GetICloudWafInstance() if err != nil { self.taskFailed(ctx, waf, errors.Wrapf(err, "GetICloudWafInstance")) return } waf.SyncWithCloudWafInstance(ctx, self.GetUserCred(), iWaf) rules, err := iWaf.GetRules() if err == nil { result := waf.SyncWafRules(ctx, self.GetUserCred(), rules) log.Infof("Sync waf %s rules result: %s", waf.Name, result.Result()) } self.SetStageComplete(ctx, nil) }
package sentiment import "strings" import "regexp" var whitespace = regexp.MustCompile("[\\r\\n\\t ]+") // SanitizerFunc will operate on an entire document and return // the result. Note that the length of the processed array // need not be the same as the input. type SanitizerFunc func(words []string) (result []string) // A Sanitizer will apply a bunch of Sanitizer functions // in sequence. type Sanitizer struct { funcs []SanitizerFunc } // NewSanitizer returns a new Sanitizer. func NewSanitizer(funcs ...SanitizerFunc) *Sanitizer { return &Sanitizer{ funcs: funcs, } } func (s *Sanitizer) GetDocument(document string) (result []string) { document = string(whitespace.ReplaceAll([]byte(document), []byte(" "))) result = strings.Split(document, " ") for _, f := range s.funcs { if f != nil { result = f(result) } } return } func apply(words []string, f func(string) string) (result []string) { result = words for inx, word := range words { result[inx] = f(word) } return } func filterIf(words []string, f func(string) bool) (result []string) { result = make([]string, 0, len(words)) for _, word := range words { if !f(word) { result = append(result, word) } } return }
package main import "fmt" import "strings" import "unicode/utf8" func isLetter(c rune) bool { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') } func Fast_Encode_Upper(a, b rune) rune { return (((a - 'A') + (b - 'A')) % 26) + 'A' } func Fast_Decode_Upper(a, b rune) rune { return (((((a - 'A') - (b - 'A')) + 26) % 26) + 'A') } func Fast_Encode_Lower(a, b rune) rune { return (((a - 'a') + (b - 'a')) % 26) + 'a' } func Fast_Decode_Lower(a, b rune) rune { return (((((a - 'a') - (b - 'a')) + 26) % 26) + 'a') } func encrypt_vigenere(plaintext string, shift string) string{ var key_string string out := make([]rune, 0, len(plaintext)) key_string = strings.Repeat(shift, utf8.RuneCountInString(plaintext)) for i, ch := range plaintext { if !isLetter(rune(plaintext[i])) { out = append(out,rune(plaintext[i])) } else if rune(plaintext[i])>='a' && rune(plaintext[i])<='z'{ out = append(out, Fast_Encode_Lower(ch, rune(key_string[i]))) } else{ out = append(out, Fast_Encode_Upper(ch, rune(key_string[i]))) } } return string(out) } func decrypt_vigenere(ciphertext, key string) string { var key_string string out := make([]rune, 0, len(ciphertext)) key_string = strings.Repeat(key, utf8.RuneCountInString(ciphertext)) for i, ch := range ciphertext { if !isLetter(rune(ciphertext[i])) { out = append(out,rune(ciphertext[i])) } else if rune(ciphertext[i])>='a' && rune(ciphertext[i])<='z'{ out = append(out, Fast_Decode_Lower(ch, rune( key_string[i]))) } else{ out = append(out, Fast_Decode_Upper(ch, rune(key_string[i]))) } } return string(out) } func main(){ var str string str = encrypt_vigenere("python3.5", "a") fmt.Println(str) fmt.Println(decrypt_vigenere(str, "a")) }
package session import "github.com/gin-contrib/sessions" type Session interface { Store() sessions.Store }
package main // /root page -- welcome, show link to go to /mastermind // /mastermind -- welcome, mastermind home page, instruction // /mastermind/play -- start playing, sessionId, secret code generated, board init // /mastermind/play/{sessionId}?guess={guess} -- send guess to the server to crack the code // /mastermind/endgame -- game ended, link to /play again, or quit // /mastermind/ // tables // 1. secret code table, cols: // - client_id - string, pk // - session_id - string (client_id + '_' + timestamp) // - secret - string (4 digits) // - session_active - boolean // 2. guess table, pk = sessionId_attempt // - session_id_attempt - string, pk // - session_id - string // - attempt_no - int // - guess - string // - hint - string func main() { }
package transfer import ( "net" "golang.org/x/net/context" "github.com/juntaki/transparent" pb "github.com/juntaki/transparent/transfer/pb" "google.golang.org/grpc" ) type receiver struct { serverAddr string grpcServer *grpc.Server transferServer *server } func NewSimpleLayerReceiver(serverAddr string) transparent.Layer { r := NewSimpleReceiver(serverAddr) return transparent.NewLayerReceiver(r) } // NewSimpleReceiver returns simple Receiver func NewSimpleReceiver(serverAddr string) transparent.BackendReceiver { return &receiver{ serverAddr: serverAddr, transferServer: &server{converter: converter{}}, } } func (r *receiver) Start() error { lis, err := net.Listen("tcp", r.serverAddr) if err != nil { return err } r.grpcServer = grpc.NewServer() pb.RegisterTransferServer(r.grpcServer, r.transferServer) go r.grpcServer.Serve(lis) return nil } func (r *receiver) Stop() error { return nil } func (r *receiver) SetCallback(cb func(m *transparent.Message) (*transparent.Message, error)) error { r.transferServer.callback = cb return nil } type server struct { converter callback func(m *transparent.Message) (*transparent.Message, error) } func (t *server) Request(c context.Context, m *pb.Message) (*pb.Message, error) { decoded, err := t.convertReceiveMessage(m) if err != nil { return nil, err } res, err := t.callback(decoded) if err != nil { return nil, err } message, err := t.convertSendMessage(res) if err != nil { return nil, err } return message, nil }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package webrtc import ( "context" "time" "chromiumos/tast/common/media/caps" "chromiumos/tast/local/bundles/cros/webrtc/mediarecorder" "chromiumos/tast/local/chrome" "chromiumos/tast/testing" ) // mediaRecorderPerfTest is used to describe the config used to run each test case. type mediaRecorderPerfTest struct { enableHWAccel bool // Instruct to use hardware or software encoding. profile string // Codec to try, e.g. VP8, VP9. } func init() { testing.AddTest(&testing.Test{ Func: MediaRecorderPerf, LacrosStatus: testing.LacrosVariantUnknown, Desc: "Captures performance data about MediaRecorder for both SW and HW", Contacts: []string{ "mcasas@chromium.org", // Test author. "hiroh@chromium.org", "chromeos-gfx-video@google.com", }, SoftwareDeps: []string{"chrome"}, Data: []string{"loopback_media_recorder.html"}, Attr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, Timeout: 5 * time.Minute, Params: []testing.Param{{ Name: "h264_sw", Val: mediaRecorderPerfTest{enableHWAccel: false, profile: "H264"}, ExtraSoftwareDeps: []string{"proprietary_codecs"}, Fixture: "chromeVideoWithFakeWebcamAndSWEncoding", }, { Name: "vp8_sw", Val: mediaRecorderPerfTest{enableHWAccel: false, profile: "VP8"}, Fixture: "chromeVideoWithFakeWebcamAndSWEncoding", }, { Name: "vp9_sw", Val: mediaRecorderPerfTest{enableHWAccel: false, profile: "VP9"}, Fixture: "chromeVideoWithFakeWebcamAndSWEncoding", }, { Name: "h264_hw", Val: mediaRecorderPerfTest{enableHWAccel: true, profile: "H264"}, ExtraSoftwareDeps: []string{caps.HWEncodeH264, "proprietary_codecs"}, Fixture: "chromeVideoWithFakeWebcam", }, { // TODO(b/236546408): Remove once hardware variable bitrate encoding is enabled by default. Name: "h264_hw_vbr", Val: mediaRecorderPerfTest{enableHWAccel: true, profile: "H264"}, ExtraSoftwareDeps: []string{caps.HWEncodeH264VBR, "proprietary_codecs"}, Fixture: "chromeVideoWithFakeWebcamAndHWVBREncoding", }, { Name: "vp8_hw", Val: mediaRecorderPerfTest{enableHWAccel: true, profile: "VP8"}, ExtraSoftwareDeps: []string{caps.HWEncodeVP8}, Fixture: "chromeVideoWithFakeWebcam", }, { Name: "vp9_hw", Val: mediaRecorderPerfTest{enableHWAccel: true, profile: "VP9"}, ExtraSoftwareDeps: []string{caps.HWEncodeVP9}, Fixture: "chromeVideoWithFakeWebcam", }}, }) } // MediaRecorderPerf captures the perf data of MediaRecorder for HW and SW // cases with a given codec and uploads to server. func MediaRecorderPerf(ctx context.Context, s *testing.State) { testOpt := s.Param().(mediaRecorderPerfTest) if err := mediarecorder.MeasurePerf(ctx, s.FixtValue().(*chrome.Chrome), s.DataFileSystem(), s.OutDir(), testOpt.profile, testOpt.enableHWAccel); err != nil { s.Error("Failed to measure performance: ", err) } }
package lbricks import "github.com/ungerik/go3d/vec2" type Typable interface { Type() } type Poolable interface { Reset() } type Disposable interface { Dispose() } type Renderable interface { Render(Batch) } type Positionable interface { Position() *vec2.T } type Scalable interface { Scale() *vec2.T } type Rotable interface { Rotation() float32 } type Identificable interface { Id() uint } type Nombrable interface { Name() string } type Tagger interface { Tags() []string } type Displayable interface { Visible() bool } type Colorize interface { RGB() uint32 Alpha() float32 } type Drawable interface { Texture() uint Width() float32 Height() float32 View() (float32, float32, float32, float32) } type Recognizable interface { Identificable Nombrable Tagger } type Transformer interface { Positionable Scalable Rotable } type Element interface { Colorize Drawable Shape() Shape Anchor() [2]float32 } type Layout interface { Direction() Direction } type View interface { Transformer Element Layer() uint } type Batch interface { Begin() Draw(r Drawable, x, y, originX, originY, scaleX, scaleY, rotation float32, color uint32, transparency float32) End() SetProjection(width, height float32) }
/* Package resolv is a simple collision detection and resolution library. Its goal is to be lightweight, fast, simple, and easy-to-use for game development. Its goal is to also to not become a physics engine or physics library itself, but to always leave the actual physics implementation and "game feel" to the developer, while making it very easy to do so. Usage of resolv essentially centers around two main concepts: Spaces and Shapes. A Shape can be used to test for collisions against another Shape. That's really all they have to do, but that capability is powerful when paired with the resolv.Resolve() function. You can then check to see if a Shape would have a collision if it attempted to move in a specified direction. If so, the Resolve() function would return a Collision object, which tells you some information about the Collision, like how far the checking Shape would have to move to come into contact with the other, and which Shape it comes into contact with. A Space is just a slice that holds Shapes for detection. It doesn't represent any real physical space, and so there aren't any units of measurement to remember when using Spaces. Similar to Shapes, Spaces are simple, but also very powerful. Spaces allow you to easily check for collision with, and resolve collision against multiple Shapes within that Space. A Space being just a collection of Shapes means that you can manipulate and filter them as necessary. */ package resolv import ( "fmt" "math" ) func abs(f float64) float64 { if f < 0 { return -f } return f } // Space represents a collection that holds Shapes for collision detection in the same common space. A Space is arbitrarily large - // you can use one Space for a single level, room, or area in your game, or split it up if it makes more sense for your game design. // Technically, a Space is just a slice of Shapes. type Space []Shape // NewSpace creates a new Space for shapes to exist in and be tested against in. func NewSpace() Space { sp := Space{} sp = make(Space, 0) return sp } // AddShape adds the designated Shapes to the Space. func (sp *Space) AddShape(shapes ...Shape) { *sp = append(*sp, shapes...) } // RemoveShape removes the designated Shapes from the Space. func (sp *Space) RemoveShape(shapes ...Shape) { for _, shape := range shapes { for deleteIndex, s := range *sp { if s == shape { s := *sp s[deleteIndex] = nil s = append(s[:deleteIndex], s[deleteIndex+1:]...) *sp = s break } } } } // Clear "resets" the Space, cleaning out the Space of references to Shapes. func (sp *Space) Clear() { *sp = make(Space, 0) } // IsColliding returns whether the provided Shape is colliding with something in this Space. func (sp *Space) IsColliding(shape Shape) bool { for _, other := range *sp { if other != shape { if shape.IsColliding(other) { return true } } } return false } // GetCollidingShapes returns a Space comprised of Shapes that collide with the checking Shape. func (sp *Space) GetCollidingShapes(shape Shape) Space { newSpace := Space{} for _, other := range *sp { if other != shape { if shape.IsColliding(other) { newSpace = append(newSpace, other) } } } return newSpace } // Resolve runs Resolve() using the checking Shape, checking against all other Shapes in the Space. The first Collision // that returns true is the Collision that gets returned. func (sp *Space) Resolve(checkingShape Shape, deltaX, deltaY int32) Collision { res := Collision{} for _, other := range *sp { if other != checkingShape && checkingShape.WouldBeColliding(other, int32(deltaX), int32(deltaY)) { res = Resolve(checkingShape, other, deltaX, deltaY) if res.Colliding() { break } } } return res } // Filter filters out a Space, returning a new Space comprised of Shapes that return true for the boolean function you provide. // This can be used to focus on a set of object for collision testing or resolution, or lower the number of Shapes to test // by filtering some out beforehand. func (sp *Space) Filter(filterFunc func(Shape) bool) Space { subSpace := make(Space, 0) for _, shape := range *sp { if filterFunc(shape) { subSpace.AddShape(shape) } } return subSpace } // FilterByTags filters a Space out, creating a new Space that has just the Shapes that have all of the specified tags. func (sp *Space) FilterByTags(tags ...string) Space { return sp.Filter(func(s Shape) bool { if s.HasTags(tags...) { return true } return false }) } // Contains returns true if the Shape provided exists within the Space. func (sp *Space) Contains(shape Shape) bool { for _, s := range *sp { if s == shape { return true } } return false } func (sp *Space) String() string { str := "" for _, s := range *sp { str += fmt.Sprintf("%v ", s) } return str } // Shape is a basic interface that describes a Shape that can be passed to collision resolution functions and exist in the same // Space. type Shape interface { IsColliding(Shape) bool WouldBeColliding(Shape, int32, int32) bool IsCollideable() bool SetCollideable(bool) GetTags() []string SetTags(...string) HasTags(...string) bool GetData() interface{} SetData(interface{}) GetXY() (int32, int32) SetXY(int32, int32) } // basicShape isn't to be used; it just has some basic functions and data, common to all structs that embed it, like and position // and collide-ability. type basicShape struct { X, Y int32 tags []string Collideable bool Data interface{} } // GetTags returns the tags on the Shape. func (b *basicShape) GetTags() []string { return b.tags } // SetTags sets the tags on the Shape. func (b *basicShape) SetTags(tags ...string) { b.tags = tags } // If the Shape has all of the tags provided. func (b *basicShape) HasTags(tags ...string) bool { hasTags := true for _, t1 := range tags { found := false for _, shapeTag := range b.tags { if t1 == shapeTag { found = true continue } } if !found { hasTags = false break } } return hasTags } // IsCollideable returns whether the Shape is currently collide-able or not. func (b *basicShape) IsCollideable() bool { return b.Collideable } // SetCollideable sets the Shape's collide-ability. func (b *basicShape) SetCollideable(on bool) { b.Collideable = on } // GetData returns the data on the Shape. func (b *basicShape) GetData() interface{} { return b.Data } // SetData sets the data on the Shape. func (b *basicShape) SetData(data interface{}) { b.Data = data } // GetXY returns the position of the Shape. func (b *basicShape) GetXY() (int32, int32) { return b.X, b.Y } // SetXY sets the position of the Shape. func (b *basicShape) SetXY(x, y int32) { b.X = x b.Y = y } // Collision describes the collision found when a Shape attempted to resolve a movement into another Shape, or in the same Space as // other existing Shapes. type Collision struct { ResolveX, ResolveY int32 // ResolveX and ResolveY represent the displacement of the Shape to the point of collision. How far along the Shape got when // attempting to move along the direction given by deltaX and deltaY in the Resolve() function before touching another Shape. Teleporting bool // Teleporting is if moving according to ResolveX and ResolveY might be considered teleporting, which is moving greater than the // X or deltaY provided to the Resolve function * 1.5 (this is arbitrary, but can be useful). OtherShape Shape // OtherShape should be a pointer to the Shape that the colliding object collided with. } // Colliding returns whether the Collision actually was valid because of a collision against another Shape. func (c Collision) Colliding() bool { return c.OtherShape != nil } // Resolve attempts to move the checking Shape with the specified X and Y values, returning a Collision object if it collides with // the specified other Shape. The deltaX and deltaY arguments are the movement displacement in pixels. For most situations, you // would want to resolve on the X and Y axes separately. func Resolve(firstShape Shape, other Shape, deltaX, deltaY int32) Collision { out := Collision{} out.ResolveX = deltaX out.ResolveY = deltaY if !firstShape.IsCollideable() || !other.IsCollideable() || (deltaX == 0 && deltaY == 0) { return out } x := float32(deltaX) y := float32(deltaY) primeX := true slope := float32(0) if deltaY != 0 && deltaX != 0 { slope = float32(deltaY) / float32(deltaX) } if abs(float64(deltaY)) > abs(float64(deltaX)) { primeX = false if deltaY != 0 && deltaX != 0 { slope = float32(deltaX) / float32(deltaY) } } for true { if firstShape.WouldBeColliding(other, out.ResolveX, out.ResolveY) { if primeX { if deltaX > 0 { x-- } else if deltaX < 0 { x++ } if deltaY > 0 { y -= slope } else if deltaY < 0 { y += slope } } else { if deltaY > 0 { y-- } else if deltaY < 0 { y++ } if deltaX > 0 { x -= slope } else if deltaX < 0 { x += slope } } out.ResolveX = int32(x) out.ResolveY = int32(y) out.OtherShape = other } else { break } } if abs(float64(deltaX-out.ResolveX)) > abs(float64(deltaX)*1.5) || abs(float64(deltaY-out.ResolveY)) > abs(float64(deltaY)*1.5) { out.Teleporting = true } return out } // Rectangle represents a rectangle. type Rectangle struct { basicShape W, H int32 } // NewRectangle creates a new Rectangle and returns a pointer to it. func NewRectangle(x, y, w, h int32) *Rectangle { r := &Rectangle{W: w, H: h} r.X = x r.Y = y r.Collideable = true return r } // IsColliding returns whether the Rectangle is colliding with the specified other Shape or not. func (r *Rectangle) IsColliding(other Shape) bool { if !r.Collideable || !other.IsCollideable() { return false } b, ok := other.(*Rectangle) if ok { return r.X > b.X-r.W && r.Y > b.Y-r.H && r.X < b.X+b.W && r.Y < b.Y+b.H } c, ok := other.(*Circle) if ok { return c.IsColliding(r) } fmt.Println("WARNING! Object ", other, " isn't a valid shape for collision testing against a Rectangle ", r, "!") return false } // WouldBeColliding returns whether the Rectangle would be colliding with the other Shape if it were to move in the // specified direction. func (r *Rectangle) WouldBeColliding(other Shape, dx, dy int32) bool { r.X += dx r.Y += dy isColliding := r.IsColliding(other) r.X -= dx r.Y -= dy return isColliding } // IsZero returns whether the Rectangle has been initialized or not. func (r *Rectangle) IsZero() bool { return r.X == 0 && r.Y == 0 && r.W == 0 && r.H == 0 } // Center returns the center point of the Rectangle. func (r *Rectangle) Center() (int32, int32) { x := r.X + r.W/2 y := r.Y + r.H/2 return x, y } // A Circle represents an ordinary circle, and has a radius, in addition to normal shape properties. type Circle struct { basicShape Radius int32 } // NewCircle returns a pointer to a new Circle object. func NewCircle(x, y, radius int32) *Circle { c := &Circle{Radius: radius} c.X = x c.Y = y c.Collideable = true return c } // IsColliding returns true if the Circle is colliding with the specified other Shape. func (c *Circle) IsColliding(other Shape) bool { if !c.Collideable || !other.IsCollideable() { return false } b, ok := other.(*Circle) if ok { return Distance(c.X, c.Y, b.X, b.Y) <= c.Radius+b.Radius } r, ok := other.(*Rectangle) if ok { closestX := c.X closestY := c.Y if c.X < r.X { closestX = r.X } else if c.X > r.X+r.W { closestX = r.X + r.W } if c.Y < r.Y { closestY = r.Y } else if c.Y > r.Y+r.H { closestY = r.Y + r.H } return Distance(c.X, c.Y, closestX, closestY) <= c.Radius } fmt.Println("WARNING! Object ", other, " isn't a valid shape for collision testing against Circle ", c, "!") return false } // WouldBeColliding returns whether the Rectangle would be colliding with the specified other Shape if it were to move // in the specified direction. func (c *Circle) WouldBeColliding(other Shape, dx, dy int32) bool { c.X += dx c.Y += dy isColliding := c.IsColliding(other) c.X -= dx c.Y -= dy return isColliding } // GetBoundingRect returns a Rectangle which has a width and height of 2*Radius. func (c *Circle) GetBoundingRect() *Rectangle { r := &Rectangle{} r.W = c.Radius * 2 r.H = c.Radius * 2 r.X = c.X - r.W/2 r.Y = c.Y - r.H/2 return r } // Distance returns the distance from one pair of X and Y values to another. func Distance(x, y, x2, y2 int32) int32 { dx := x - x2 dy := y - y2 ds := (dx * dx) + (dy * dy) return int32(math.Sqrt(abs(float64(ds)))) }
package servicediscovery import ( "bytes" "encoding/json" "fmt" "io/ioutil" "os" "path" "strconv" "strings" "github.com/hyperhq/hyper/utils" "github.com/hyperhq/runv/hypervisor" "github.com/hyperhq/runv/hypervisor/pod" "github.com/hyperhq/runv/lib/glog" ) var ( ServiceVolume string = "/usr/local/etc/haproxy/" ServiceImage string = "haproxy:latest" ServiceConfig string = "haproxy.cfg" ) func UpdateLoopbackAddress(vm *hypervisor.Vm, container string, oldServices, newServices []pod.UserService) error { addedIPs := make([]string, 0, 1) deletedIPs := make([]string, 0, 1) for _, n := range newServices { found := 0 for _, o := range oldServices { if n.ServiceIP == o.ServiceIP { found = 1 } } if found == 0 { addedIPs = append(addedIPs, n.ServiceIP) } } for _, o := range oldServices { found := 0 for _, n := range newServices { if n.ServiceIP == o.ServiceIP { found = 1 } } if found == 0 { deletedIPs = append(deletedIPs, o.ServiceIP) } } for _, ip := range addedIPs { err := SetupLoopbackAddress(vm, container, ip, "add") if err != nil { return err } } for _, ip := range deletedIPs { err := SetupLoopbackAddress(vm, container, ip, "del") if err != nil { return err } } return nil } // Setup lo ip address // options for operation: add or del func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error { command := "ip addr " + operation + " dev lo " + ip + "/32" execcmd, err := json.Marshal(strings.Split(command, " ")) if err != nil { return err } return vm.Exec(nil, nil, string(execcmd), "", container) } func ApplyServices(vm *hypervisor.Vm, container string, services []pod.UserService) error { // Update lo ip addresses var command []string oldServices, err := GetServices(vm, container) if err != nil { return err } err = UpdateLoopbackAddress(vm, container, oldServices, services) if err != nil { return err } // Update haproxy config config := path.Join(ServiceVolume, ServiceConfig) vm.WriteFile(container, config, GenerateServiceConfig(services)) command = append(command, "sh") command = append(command, "-c") command = append(command, "haproxy -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf `cat /var/run/haproxy.pid`") execcmd, err := json.Marshal(command) if err != nil { return err } return vm.Exec(nil, nil, string(execcmd), "", container) } func GetServices(vm *hypervisor.Vm, container string) ([]pod.UserService, error) { var services []pod.UserService config := path.Join(ServiceVolume, ServiceConfig) data, err := vm.ReadFile(container, config) if err != nil { return nil, err } token := bytes.Split(data, []byte("\n")) for _, tok := range token { first := bytes.Split(tok, []byte(" ")) reader := bytes.NewReader(tok) if len(first) > 0 { var t1, t2, t3, t4 string if string(first[0][:]) == "frontend" { s := pod.UserService{ Protocol: "TCP", } _, err := fmt.Fscanf(reader, "%s %s %s", &t1, &t2, &t3) if err != nil { return nil, err } hostport := strings.Split(t3, ":") s.ServiceIP = hostport[0] port, err := strconv.ParseInt(hostport[1], 10, 32) if err != nil { return nil, err } s.ServicePort = int(port) services = append(services, s) } else if string(first[0][:]) == "\tserver" { var idx int var h pod.UserServiceBackend _, err := fmt.Fscanf(reader, "%s %s %s %s", &t1, &t2, &t3, &t4) if err != nil { return nil, err } hostport := strings.Split(t3, ":") h.HostIP = hostport[0] port, err := strconv.ParseInt(hostport[1], 10, 32) if err != nil { return nil, err } h.HostPort = int(port) idxs := strings.Split(t2, "-") idxLong, err := strconv.ParseInt(idxs[1], 10, 32) if err != nil { return nil, err } idx = int(idxLong) services[idx].Hosts = append(services[idx].Hosts, h) } } } return services, nil } func GenerateServiceConfig(services []pod.UserService) []byte { data := []byte{} globalConfig := fmt.Sprintf("global\n\t#chroot\t/var/lib/haproxy\n\tpidfile\t/var/run/haproxy.pid\n\tmaxconn\t4000\n\t#user\thaproxy\n\t#group\thaproxy\n\tdaemon\ndefaults\n\tmode\ttcp\n\tretries\t3\n\ttimeout queue\t1m\n\ttimeout connect\t10s\n\ttimeout client\t1m\n\ttimeout server\t1m\n\ttimeout check\t10s\n\tmaxconn\t3000\n") data = append(data, globalConfig...) for idx, srv := range services { front := fmt.Sprintf("frontend front%d %s:%d\n\tdefault_backend\tback%d\n", idx, srv.ServiceIP, srv.ServicePort, idx) data = append(data, front...) back := fmt.Sprintf("backend back%d\n\tbalance\troundrobin\n", idx) data = append(data, back...) for hostid, host := range srv.Hosts { back := fmt.Sprintf("\tserver back-%d-%d %s:%d check\n", idx, hostid, host.HostIP, host.HostPort) data = append(data, back...) } } glog.V(1).Infof("haproxy config: %s", data[:]) return data } func checkHaproxyConfig(services []pod.UserService, config string) error { var err error glog.V(1).Infof("haproxy config: %s\n", config) if _, err = os.Stat(config); err != nil && os.IsNotExist(err) { /* Generate haproxy config from service and write to config */ return ioutil.WriteFile(config, GenerateServiceConfig(services), 0644) } return err } func PrepareServices(userPod *pod.UserPod, podId string) error { var serviceDir string = path.Join(utils.HYPER_ROOT, "services", podId) var config string = path.Join(serviceDir, ServiceConfig) var err error if len(userPod.Services) == 0 { return nil } if err = os.MkdirAll(serviceDir, 0755); err != nil && !os.IsExist(err) { return err } return checkHaproxyConfig(userPod.Services, config) }
package api import ( commonapi "github.com/cidverse/cid/pkg/common/api" "github.com/cidverse/cid/pkg/core/catalog" "github.com/cidverse/cid/pkg/core/state" ) // ActionExecutor is the interface that needs to be implemented by all action executors type ActionExecutor interface { // GetName returns the name of the executor GetName() string // GetVersion returns the version of the executor GetVersion() string // GetType returns the action type which needs to match the config action type to activate this implementation GetType() string // Execute will run the action Execute(ctx *commonapi.ActionExecutionContext, localState *state.ActionStateContext, catalogAction *catalog.Action, action *catalog.WorkflowAction) error }
package pubsub import ( "fmt" "log" "testing" "time" ) type intlDummyChanColl map[uint]Channel func (coll intlDummyChanColl) LoadOrOpen(id uint) Channel { if _, ok := coll[id]; !ok { coll[id] = newDummyChannel() } return coll[id] } func (coll intlDummyChanColl) Close(id uint) { } type intlDummyChannel struct { broadcast chan interface{} conns map[MessageWriteCloser]bool } func newDummyChannel() Channel { ch := &intlDummyChannel{ broadcast: make(chan interface{}), conns: make(map[MessageWriteCloser]bool), } go ch.run() return ch } func (ch *intlDummyChannel) Subscribe(conn MessageWriteCloser) { ch.conns[conn] = true } func (ch *intlDummyChannel) Unsubscribe(conn MessageWriteCloser) { delete(ch.conns, conn) } func (ch *intlDummyChannel) BroadcastJSON(v interface{}) { for conn := range ch.conns { conn.WriteJSON(v) } } func (ch *intlDummyChannel) run() { intlDummyChanMainLoop: for { select { case msg := <-ch.broadcast: // Grab the next message from the broadcast channel // Send it out to every client that is currently connected for client := range ch.conns { err := client.WriteJSON(msg) if err != nil { client.Close() ch.Unsubscribe(client) log.Printf("error: %v", err) } } case <-time.After(1 * time.Second): log.Printf("timeout") break intlDummyChanMainLoop } } } type dummyWriter struct { lastMsg interface{} lastMsgType int lastMsgBytes []byte } func (w *dummyWriter) WriteMessage(messageType int, p []byte) error { w.lastMsg = nil w.lastMsgType = messageType w.lastMsgBytes = make([]byte, len(p)) copy(w.lastMsgBytes, p) return nil } func (w *dummyWriter) WriteJSON(v interface{}) error { w.lastMsg = v w.lastMsgType = -1 w.lastMsgBytes = make([]byte, 0) return nil } func (w *dummyWriter) Close() error { return nil } type errMsgWriter int func (w errMsgWriter) WriteMessage(messageType int, p []byte) error { return fmt.Errorf("dummy error, %#v, %#v", messageType, p) } func (w errMsgWriter) WriteJSON(v interface{}) error { return fmt.Errorf("dummy error, %#v", v) } func (w errMsgWriter) Close() error { return nil } func TestMessageTo(t *testing.T) { ch := &intlDummyChannel{ conns: make(map[MessageWriteCloser]bool), } w1 := errMsgWriter(0) ch.Subscribe(w1) err := messageTo(ch, w1, "hello message") if err == nil { t.Errorf("expected error, got nil") } if want, have := `dummy error, "hello message"`, err.Error(); want != have { t.Errorf("expected %#v, got %#v", want, have) } }
package cmd import ( "os" "github.com/spf13/cobra" ) var newPublisherAlias string // trustPublisherCmd represents the add-identity command var trustPublisherCmd = &cobra.Command{ Use: "trust-publisher ID_OR_ALIAS", Short: "records trust in publisher", Long: `TODO`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { cmd.Usage() os.Exit(-1) } idOrAlias := args[0] id, err := resolveAlias(idOrAlias) mustSucceed(err) newAddy := id.PublicKey() for _, existing := range config.TrustedPublishers { if newAddy == existing { fail("publisher is already trusted", -1) } } config.TrustedPublishers = append(config.TrustedPublishers, newAddy) if newPublisherAlias != "" { mustSucceed(addIdentity(newPublisherAlias, newAddy)) } mustSucceed(saveConfig(cfgFile)) }, } func init() { RootCmd.AddCommand(trustPublisherCmd) trustPublisherCmd.Flags().StringVarP(&newPublisherAlias, "alias", "", "", "Also add alias for publisher") }
package main import "fmt" func main() { //string.Split分割函数 len //var s1 string = "hello" // s2 := "how are you" // s3 := strings.Split(s2, " ") // fmt.Println(s3[1]) // s := "hello 你好" // for i := 0; i < len(s); i++ { //按字节计算 // fmt.Printf("%c\n", s[i]) // } // for i, v := range s { //按单个字符输出 // fmt.Printf("index = %d,v=%c\n", i, v) // } // var c1 byte = 'a' // var c2 rune = '中' // fmt.Printf("%T %T\n", c1, c2) // fmt.Println(c1, c2) //if // if b := 3; b == 3 { // fmt.Println("==") // } //for range // s := "hello 你好" // for _, v := range s { // fmt.Println(v) // } //switch // score := 'A' // switch score { // case 'A': // fmt.Println("成绩为:90-100") // case 'B': // fmt.Println("成绩为:80-90") // } //var arr [5]int=[5]int{1,2,3,4,5} //arr := [5]int{1, 2, 3, 4, 5} //var arr = [5]int{1, 2, 3, 4, 5} //var arr [5]int = [5]int{1: 3, 4: 9} //arr := [...]int{1, 2, 4} //fmt.Println(arr) //fmt.Printf("%T", arr) // var a = [3][2]string{{"陕西", "西安"}, {"四川,“成都"}, {"中国", "北京"}} // fmt.Println(a) // var b [3][2]string // b = a // a[0][1] = "长安" // fmt.Println(b) // fmt.Println(a) var a []int = []int{1, 2, 3} if a == nil { fmt.Println("没有值") } fmt.Println(a) fmt.Printf("%T\n", a) fmt.Println("len=", len(a), "cap =", cap(a)) }
package gmtls // #cgo CFLAGS: -I${SRCDIR}/../../3rd_party/gmssl/include // #cgo CFLAGS: -I${SRCDIR}/../../3rd_party/addon/include // #cgo LDFLAGS: -L${SRCDIR}/../../3rd_party/gmssl/lib -lcrypto -lssl // #cgo LDFLAGS: -L${SRCDIR}/../../3rd_party/addon/lib -laddon // #cgo LDFLAGS: -Wl,-rpath=${SRCDIR}/../../3rd_party/gmssl/lib // #cgo LDFLAGS: -Wl,-rpath=${SRCDIR}/../../3rd_party/addon/lib // #include <stdlib.h> // #include "tls.h" import "C" import ( "fmt" "unsafe" ) type Certificate = unsafe.Pointer func LoadX509KeyPair(certFile, keyFile string) (Certificate, error) { ccert, ckey := C.CString(certFile), C.CString(keyFile) defer C.free(unsafe.Pointer(ccert)) defer C.free(unsafe.Pointer(ckey)) response := C.loadX509KeyPair(ccert, ckey) if 0 != response.error { return nil, fmt.Errorf("failed to to load cert: %d", response.error) } return response.value, nil } func UnloadX509KeyPair(cert Certificate) { C.destroyCert(cert) }
package plot import ( "time" "github.com/nictuku/latency" ) // ExamplePlot creates a histogram with 10 buckets and records an event that took 16ms (16000us), // then plots a (very uninteresting) graph for it. func ExamplePlot() { h := &latency.Histogram{ Buckets: make([]int, 10), Resolution: time.Millisecond, } h.Record(16 * time.Millisecond) Plot(h, "HTTP server GET latency histogram", "test.svg") }
package store import ( "fmt" "math" ) type MuxingWriter []Writer // This builds a writer that writes each record it recieves to one of the // provided writers according to the record's DatabaseIndex. For example, the // writer NewMuxedStoreWriter(db0, db1) will write records with DatabaseIndex = // 0 to db0 and records with DatabaseIndex = 1 to db1. func NewMuxingWriter(writers ...Writer) MuxingWriter { if len(writers) > math.MaxUint8 { panic(fmt.Errorf("Cannot write to more than %d databases", math.MaxUint8)) } return MuxingWriter(writers) } func (writers MuxingWriter) BeginWriting() error { for _, writer := range writers { if err := writer.BeginWriting(); err != nil { return err } } return nil } func (writers MuxingWriter) WriteRecord(record *Record) error { return writers[record.DatabaseIndex].WriteRecord(record) } func (writers MuxingWriter) EndWriting() error { for _, writer := range writers { if err := writer.EndWriting(); err != nil { return err } } return nil }
package build import ( "fmt" "regexp" "strings" "time" digest "github.com/opencontainers/go-digest" "github.com/tonistiigi/units" "github.com/tilt-dev/tilt/pkg/apis" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" "github.com/tilt-dev/tilt/pkg/logger" ) type buildkitPrinter struct { logger logger.Logger vData map[digest.Digest]*vertexAndLogs vOrder []digest.Digest } type vertex struct { digest digest.Digest name string error string started bool startedTime *time.Time completed bool completedTime *time.Time startPrinted bool errorPrinted bool completePrinted bool durationPrinted time.Duration cached bool duration time.Duration } const internalPrefix = "[internal]" const internalLoader = "[internal] load build context" const logPrefix = " → " var stageNameRegexp = regexp.MustCompile(`^\[.+\]`) func (v *vertex) shouldHide() bool { return strings.HasPrefix(v.name, internalPrefix) && v.name != internalLoader } func (v *vertex) isError() bool { return len(v.error) > 0 } func (v *vertex) humanName() string { if v.name == internalLoader { return "[background] read source files" } return v.name } func (v *vertex) stageName() string { match := stageNameRegexp.FindString(v.name) if match == "" { // If we couldn't find a match, just return the whole // human-readable name, so that the user has some hope of figuring out // what went wrong. return v.humanName() } return match } type vertexAndLogs struct { vertex *vertex logs []*vertexLog logsPrinted int logger logger.Logger // A map of statuses, indexed by the layer id being downloaded. statuses vertexStatusSet // A combined status for all the downloadable layers, merged // into a single status object. lastPrintedStatus vertexStatus } type vertexLog struct { vertex digest.Digest msg []byte } type vertexStatus struct { vertex digest.Digest id string total int64 current int64 timestamp time.Time } // The buildkit protocol represents each downloadable layer // as a separate status object, identified by a layer ID. // We want to present this to the user as a single, combined status // that summarizes all layers. type vertexStatusSet map[string]vertexStatus func (s vertexStatusSet) combined() vertexStatus { current := int64(0) total := int64(0) t := time.Time{} for _, v := range s { current += v.current total += v.total if v.timestamp.After(t) { t = v.timestamp } } return vertexStatus{ current: current, total: total, timestamp: t, } } func newBuildkitPrinter(l logger.Logger) *buildkitPrinter { return &buildkitPrinter{ logger: l, vData: map[digest.Digest]*vertexAndLogs{}, vOrder: []digest.Digest{}, } } func (b *buildkitPrinter) toStageStatuses() []v1alpha1.DockerImageStageStatus { result := make([]v1alpha1.DockerImageStageStatus, 0) for _, digest := range b.vOrder { if vl, ok := b.vData[digest]; ok { v := vl.vertex status := v1alpha1.DockerImageStageStatus{ Name: v.name, Cached: v.cached, } if v.startedTime != nil { st := apis.NewMicroTime(*v.startedTime) status.StartedAt = &st } if v.completedTime != nil { ct := apis.NewMicroTime(*v.completedTime) status.FinishedAt = &ct } if v.isError() { status.Error = v.error } result = append(result, status) } } return result } func (b *buildkitPrinter) parseAndPrint(vertexes []*vertex, logs []*vertexLog, statuses []*vertexStatus) error { for _, v := range vertexes { if vl, ok := b.vData[v.digest]; ok { vl.vertex.started = v.started vl.vertex.completed = v.completed vl.vertex.startedTime = v.startedTime vl.vertex.completedTime = v.completedTime vl.vertex.cached = v.cached // NOTE(nick): Fun fact! The buildkit protocol sends down multiple completion timestamps. // We need to take the last one. if v.duration > vl.vertex.duration { vl.vertex.duration = v.duration } if v.isError() { vl.vertex.error = v.error } } else { b.vData[v.digest] = &vertexAndLogs{ vertex: v, logs: []*vertexLog{}, logger: logger.NewPrefixedLogger(logPrefix, b.logger), } b.vOrder = append(b.vOrder, v.digest) } } for _, l := range logs { if vl, ok := b.vData[l.vertex]; ok { vl.logs = append(vl.logs, l) } } for _, s := range statuses { if vl, ok := b.vData[s.vertex]; ok { if vl.statuses == nil { vl.statuses = vertexStatusSet{} } vl.statuses[s.id] = *s } } for _, d := range b.vOrder { vl, ok := b.vData[d] if !ok { return fmt.Errorf("Expected to find digest %s in %+v", d, b.vData) } v := vl.vertex if v.started && !v.startPrinted && !v.shouldHide() { cacheSuffix := "" if v.cached { cacheSuffix = " [cached]" } b.logger.WithFields(logger.Fields{logger.FieldNameProgressID: v.stageName()}). Infof("%s%s", v.humanName(), cacheSuffix) v.startPrinted = true } if v.isError() && !v.errorPrinted { // TODO(nick): Should this be logger.Errorf? b.logger.Infof("\nERROR IN: %s", v.humanName()) v.errorPrinted = true } if v.isError() || !v.shouldHide() { b.flushLogs(vl) } if !v.shouldHide() && !v.cached && !v.isError() { var progressInBytes string status := vl.statuses.combined() shouldPrintProgress := false if vl.lastPrintedStatus.total != status.total { // print progress when the total has changed. That means we've started // downloading a new layer. shouldPrintProgress = true } else if status.total > 0 { // print progress when at least 1% of total has changed and at least 2 seconds have passed. diff := float64(status.current) - float64(vl.lastPrintedStatus.current) largeEnoughChange := diff/float64(status.total) >= 0.01 largeEnoughTime := status.timestamp.Sub(vl.lastPrintedStatus.timestamp) > 2*time.Second shouldPrintProgress = largeEnoughChange && largeEnoughTime } else if status.current > 0 { // print progress when at least 5% of current has changed and at least 2 seconds have passed. // We need to handle this case separately when we don't have a total estimate. diff := float64(status.current) - float64(vl.lastPrintedStatus.current) largeEnoughChange := diff/float64(status.current) >= 0.05 largeEnoughTime := status.timestamp.Sub(vl.lastPrintedStatus.timestamp) > 2*time.Second shouldPrintProgress = largeEnoughChange && largeEnoughTime } if status.total != 0 { progressInBytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(status.current), units.Bytes(status.total)) } else if status.current != 0 { progressInBytes = fmt.Sprintf(" %.2f", units.Bytes(status.current)) } // NOTE(nick): Fun fact! The buildkit protocol sends down multiple completion timestamps. // We need to print the longest one. shouldPrintCompletion := v.completed && v.duration > 10*time.Millisecond && (!v.completePrinted || v.durationPrinted < v.duration) doneSuffix := "" fields := logger.Fields{logger.FieldNameProgressID: v.stageName()} if shouldPrintCompletion { doneSuffix = fmt.Sprintf(" [done: %s]", v.duration.Truncate(time.Millisecond)) v.completePrinted = true v.durationPrinted = v.duration fields[logger.FieldNameProgressMustPrint] = "1" } if shouldPrintCompletion || shouldPrintProgress { b.logger.WithFields(fields). Infof("%s%s%s", v.humanName(), progressInBytes, doneSuffix) vl.lastPrintedStatus = status } } } return nil } func (b *buildkitPrinter) flushLogs(vl *vertexAndLogs) { for vl.logsPrinted < len(vl.logs) { l := vl.logs[vl.logsPrinted] vl.logsPrinted++ vl.logger.Write(logger.InfoLvl, l.msg) } }
package split import ( "encoding/hex" "fmt" "testing" ) func TestEncode(t *testing.T) { s := Secret{} f := "e424cc3ef5c62accff7ebe9c3d797927af59976677501b2c4cd9a2f046218952" p := "d41ca9b3ff93b24da439c32ab28c24fd03220fbee13d3c4650f20125172ae72d" bf, _ := hex.DecodeString(f) bp, _ := hex.DecodeString(p) fmt.Println(bf, bp) s.SetCid(bf) s.SetPassword(bp) addr, code, _ := s.CalculateAddrAndCode() s.SetPasscode(code) s.SetAddrs(addr) cid, _ := s.RecalculateCid() t.Log(cid) }
package datasource import ( "encoding/json" "github.com/aws/aws-sdk-go/service/ec2" "net/http" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" ) func (ds *Datasource) getRegions(rw http.ResponseWriter, req *http.Request) { if req.Method != "GET" { rw.WriteHeader(http.StatusMethodNotAllowed) return } pluginConfig := httpadapter.PluginConfigFromContext(req.Context()) // getRegions should not require region itself ec2Client, err := ds.ec2ClientFactory(&pluginConfig, "") if err != nil { sendError(rw, err) return } regions, err := getRegions(ec2Client) if err != nil { sendError(rw, err) return } body, err := json.Marshal(regions) if err != nil { sendError(rw, err) return } rw.Header().Set("content-type", "application/json") _, err = rw.Write(body) if err != nil { log.DefaultLogger.Error("failed to write response", "err", err.Error()) return } } func getRegions(client *ec2.EC2) ([]*ec2.Region, error) { input := &ec2.DescribeRegionsInput{} out, err := client.DescribeRegions(input) if err != nil { return nil, err } return out.Regions, err }
package main import ( "encoding/json" "fmt" "html/template" "net/url" "os" "strings" "time" "github.com/apex/go-apex" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) func getEnvmap() map[string]string { envmap := make(map[string]string) for _, e := range os.Environ() { ep := strings.SplitN(e, "=", 2) if ep[0] == "AWS_SECRET_ACCESS_KEY" { continue } if ep[0] == "AWS_SESSION_TOKEN" { continue } envmap[ep[0]] = ep[1] } return envmap } func main() { apex.HandleFunc(func(event json.RawMessage, ctx *apex.Context) (interface{}, error) { // Stringify the incoming event var obj interface{} json.Unmarshal(event, &obj) b, _ := json.MarshalIndent(obj, "", " ") // JSON.stringify the context for the template ctxjson, _ := json.MarshalIndent(ctx, "", " ") templates := template.Must(template.New("main").Funcs(template.FuncMap{"time": time.Now}).ParseGlob("templates/*.html")) templates = template.Must(templates.ParseGlob("templates/includes/*.html")) fn := "/tmp/index.html" outputfile, err := os.Create(fn) if err != nil { panic(err) // Not sure if Panic is the right approach within Lambda } templates.ExecuteTemplate(outputfile, "index.html", struct { Input string Indent string Env map[string]string Ctx string }{ string(event), string(b), getEnvmap(), string(ctxjson), }) s3url, err := url.Parse(os.Getenv("S3URI")) if err != nil { panic(err) } fmt.Fprintf(os.Stderr, "%#+ v \n", s3url) sess, _ := session.NewSession() svc := s3.New(sess) // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#PutObjectInput // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#example_S3_PutObject params := &s3.PutObjectInput{ Bucket: aws.String(s3url.Host), // Required Body: outputfile, // Required Key: aws.String(s3url.Path[1:]), // Required ACL: aws.String("public-read"), ContentType: aws.String("text/html"), } resp, err := svc.PutObject(params) if err != nil { fmt.Fprintln(os.Stderr, "Failed to upload", err.Error()) fmt.Fprintln(os.Stderr, "Does the", os.Getenv("ROLE"), " role in IAM have S3 permissions?") } else { fmt.Fprintln(os.Stderr, "Managed to upload", resp) } return "http://" + s3url.Host + ".s3-website-" + os.Getenv("AWS_REGION") + ".amazonaws.com" + s3url.Path, nil }) }
package main //724. 寻找数组的中心索引 //给定一个整数类型的数组nums,请编写一个能够返回数组 “中心索引” 的方法。 // //我们是这样定义数组 中心索引 的:数组中心索引的左侧所有元素相加的和等于右侧所有元素相加的和。 // //如果数组不存在中心索引,那么我们应该返回 -1。如果数组有多个中心索引,那么我们应该返回最靠近左边的那一个。 // // // //示例 1: // //输入: //nums = [1, 7, 3, 6, 5, 6] //输出:3 //解释: //索引 3 (nums[3] = 6) 的左侧数之和 (1 + 7 + 3 = 11),与右侧数之和 (5 + 6 = 11) 相等。 //同时, 3 也是第一个符合要求的中心索引。 //示例 2: // //输入: //nums = [1, 2, 3] //输出:-1 //解释: //数组中不存在满足此条件的中心索引。 // // //说明: // //nums 的长度范围为[0, 10000]。 //任何一个nums[i] 将会是一个范围在[-1000, 1000]的整数。 //思路 前缀和 func pivotIndex(nums []int) int { sum := 0 for _, v := range nums { sum += v } subSum := 0 for i, v := range nums { if sum-v == subSum<<1 { return i } subSum += v } return -1 }
// Package echo shows a simple RPC service that can be served with rpcz. package echo import ( "context" "errors" "time" ) const ( defaultTimeout = 60 * time.Second ) var ( errInvalidMsg = errors.New("echo: invalid message") ) // Echo service replies back with the message it receives. type Echo struct{} // New returns an instance of Echo. func New() *Echo { return &Echo{} } // Echo handles req and fills in resp. func (s *Echo) Echo(ctx context.Context, req *EchoRequest, resp *EchoResponse) error { if req.GetMsg() == "" { return errInvalidMsg } resp.Msg = req.Msg return nil } // ExtendedEcho service replies back with the message it receives. // // It shows an example of service-side and caller-defined timeouts. type ExtendedEcho struct { echo *Echo timeout time.Duration } // NewExtendedEcho returns a new ExtendedEcho with the specified timeout. func NewExtendedEcho(timeout time.Duration) *ExtendedEcho { result := &ExtendedEcho{timeout: timeout} if result.timeout == 0 { result.timeout = defaultTimeout } return result } // Echo handles req and fills in resp. // // The service may wait for req.Delay, if specified, but no longer than s.timeout. func (s *ExtendedEcho) Echo(ctx context.Context, req *EchoRequest, resp *EchoResponse) error { return s.handle(ctx, req, resp) } func (s *ExtendedEcho) handle(ctx context.Context, req *EchoRequest, resp *EchoResponse) error { lctx, cancel := context.WithTimeout(ctx, s.timeout) defer cancel() out := s.do(ctx, req, resp) select { case <-lctx.Done(): return lctx.Err() case err := <-out: return err } } func (s *ExtendedEcho) do(ctx context.Context, req *EchoRequest, resp *EchoResponse) chan error { out := make(chan error, 1) go doEcho(ctx, out, s.echo, req, resp) return out } func doEcho(ctx context.Context, dst chan<- error, s *Echo, req *EchoRequest, resp *EchoResponse) { defer close(dst) if req.Delay <= 0 { dst <- s.Echo(ctx, req, resp) return } timer := time.NewTimer(time.Duration(req.Delay)) defer func() { _ = timer.Stop() }() select { case <-ctx.Done(): dst <- ctx.Err() return case <-timer.C: } dst <- s.Echo(ctx, req, resp) }
package exec import ( "encoding/binary" "fmt" "io" "math" "reflect" "github.com/pgavlin/warp/wasm" "github.com/pgavlin/warp/wasm/code" "github.com/pgavlin/warp/wasm/leb128" ) type InvalidGlobalIndexError uint32 func (e InvalidGlobalIndexError) Error() string { return fmt.Sprintf("wasm: Invalid index to global index space: %#x", uint32(e)) } type InvalidValueTypeInitExprError struct { Wanted reflect.Kind Got reflect.Kind } func (e InvalidValueTypeInitExprError) Error() string { return fmt.Sprintf("wasm: Wanted initializer expression to return %v value, got %v", e.Wanted, e.Got) } // EvalConstantExpression executes the given (encoded) constant expression in the context of the given imports. func EvalConstantExpression(imports []*Global, expr []byte) (interface{}, error) { var stack []uint64 var topType wasm.ValueType if len(expr) == 0 { return nil, wasm.ErrEmptyInitExpr } for { if len(expr) == 0 { return nil, io.ErrUnexpectedEOF } opcode := expr[0] expr = expr[1:] switch opcode { case code.OpI32Const: v, sz, err := leb128.GetVarint32(expr) if err != nil { return nil, err } expr = expr[sz:] stack = append(stack, uint64(v)) topType = wasm.ValueTypeI32 case code.OpI64Const: v, sz, err := leb128.GetVarint64(expr) if err != nil { return nil, err } expr = expr[sz:] stack = append(stack, uint64(v)) topType = wasm.ValueTypeI64 case code.OpF32Const: if len(expr) < 4 { return nil, io.ErrUnexpectedEOF } v := binary.LittleEndian.Uint32(expr) expr = expr[4:] stack = append(stack, uint64(v)) topType = wasm.ValueTypeF32 case code.OpF64Const: if len(expr) < 8 { return nil, io.ErrUnexpectedEOF } v := binary.LittleEndian.Uint64(expr) expr = expr[8:] stack = append(stack, v) topType = wasm.ValueTypeF64 case code.OpGlobalGet: index, sz, err := leb128.GetVarUint32(expr) if err != nil { return nil, err } expr = expr[sz:] if index > uint32(len(imports)) { return nil, InvalidGlobalIndexError(index) } global := imports[int(index)] stack = append(stack, global.value) topType = global.typ case code.OpEnd: if len(stack) == 0 { return nil, nil } v := stack[len(stack)-1] switch topType { case wasm.ValueTypeI32: return int32(v), nil case wasm.ValueTypeI64: return int64(v), nil case wasm.ValueTypeF32: return math.Float32frombits(uint32(v)), nil case wasm.ValueTypeF64: return math.Float64frombits(uint64(v)), nil default: panic("unreachable") } default: return nil, wasm.InvalidInitExprOpError(opcode) } } }
package state import ( "time" "github.com/guregu/null" ) type PostgresReplication struct { InRecovery bool // Data available on primary CurrentXlogLocation null.String Standbys []PostgresReplicationStandby // Data available on standby IsStreaming null.Bool ReceiveLocation null.String ReplayLocation null.String ApplyByteLag null.Int ReplayTimestamp null.Time ReplayTimestampAge null.Int } // PostgresReplicationStandby - Standby information as seen from the primary type PostgresReplicationStandby struct { ClientAddr string RoleOid Oid Pid int64 ApplicationName string ClientHostname null.String ClientPort int32 BackendStart time.Time SyncPriority int32 SyncState string State string SentLocation null.String WriteLocation null.String FlushLocation null.String ReplayLocation null.String RemoteByteLag null.Int LocalByteLag null.Int }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package wilco import ( "bytes" "context" "time" "chromiumos/tast/common/testexec" "chromiumos/tast/ctxutil" "chromiumos/tast/local/vm" "chromiumos/tast/local/wilco" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: VMStart, Desc: "Starts a new instance of the Wilco DTC VM and tests that the DTC binaries are running", Contacts: []string{ "chromeos-oem-services@google.com", // Use team email for tickets. "bkersting@google.com", "lamzin@google.com", }, Attr: []string{"group:mainline", "informational"}, // b/217770420 SoftwareDeps: []string{"vm_host", "wilco"}, }) } func VMStart(ctx context.Context, s *testing.State) { const ( storagePath = "/opt/dtc/storage" diagPath = "/opt/dtc/diagnostics" ) // Shorten the total context by 15 seconds to allow for cleanup. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 15*time.Second) defer cancel() if err := wilco.StartVM(ctx, wilco.DefaultVMConfig()); err != nil { s.Fatal("Unable to start Wilco DTC VM: ", err) } defer wilco.StopVM(cleanupCtx) // Wait for the ddv dbus service to be up and running before continuing the // test. if err := wilco.WaitForDDVDBus(ctx); err != nil { s.Fatal("DDV dbus service not available: ", err) } for _, name := range []string{"ddv", "ddtm", "sa"} { cmd := vm.CreateVSHCommand(ctx, wilco.WilcoVMCID, "pgrep", name) if out, err := cmd.Output(testexec.DumpLogOnError); err != nil { s.Errorf("Process %v not found: %v", name, err) } else { s.Logf("Process %v started with PID %s", name, bytes.TrimSpace(out)) } } for _, path := range []string{storagePath, diagPath} { cmd := vm.CreateVSHCommand(ctx, wilco.WilcoVMCID, "mountpoint", path) if err := cmd.Run(testexec.DumpLogOnError); err != nil { s.Errorf("Path %v is not mounted inside VM: %v", path, err) } else { s.Logf("Path %v is mounted inside VM", path) } } }