text
stringlengths
11
4.05M
package array import ( "github.com/project-flogo/core/data" "github.com/project-flogo/core/data/expression/function" "github.com/project-flogo/core/support/log" ) type Create struct { } func init() { function.Register(&Create{}) } func (s *Create) Name() string { return "create" } func (s *Create) Sig() (paramTypes []data.Type, isVariadic bool) { return []data.Type{data.TypeAny}, true } func (s *Create) Eval(object ...interface{}) (interface{}, error) { log.RootLogger().Debugf("Start array function with parameters %v", object) if object == nil { return nil, nil } var result []interface{} result = append(result, object...) log.RootLogger().Debugf("Done array function with result %v", result) return result, nil }
/* * @lc app=leetcode id=1480 lang=golang * * [1480] Running Sum of 1d Array * * https://leetcode.com/problems/running-sum-of-1d-array/description/ * * algorithms * Easy (93.64%) * Likes: 305 * Dislikes: 40 * Total Accepted: 70.2K * Total Submissions: 77.3K * Testcase Example: '[1,2,3,4]' * * Given an array nums. We define a running sum of an array as runningSum[i] = * sum(nums[0]…nums[i]). * * Return the running sum of nums. * * * Example 1: * * * Input: nums = [1,2,3,4] * Output: [1,3,6,10] * Explanation: Running sum is obtained as follows: [1, 1+2, 1+2+3, 1+2+3+4]. * * Example 2: * * * Input: nums = [1,1,1,1,1] * Output: [1,2,3,4,5] * Explanation: Running sum is obtained as follows: [1, 1+1, 1+1+1, 1+1+1+1, * 1+1+1+1+1]. * * Example 3: * * * Input: nums = [3,1,2,10,1] * Output: [3,4,6,16,17] * * * * Constraints: * * * 1 <= nums.length <= 1000 * -10^6 <= nums[i] <= 10^6 * */ // @lc code=start func runningSum(nums []int) []int { return runningSum1(nums) } func runningSum2(nums []int) []int { nums2 := []int{} for i, v := range nums { if i == 0 { nums2 = append(nums2, v) } else { nums2 = append(nums2, v+nums2[i-1]) } } return nums2 } func runningSum1(nums []int) []int { if len(nums) <= 1 { return nums } for i := 1; i < len(nums); i++ { nums[i] = nums[i] + nums[i-1] } return nums } // @lc code=end
package textMetrics import ( "github.com/RobertGumpert/vkr-pckg/runtimeinfo" "github.com/RobertGumpert/vkr-pckg/textPreprocessing" "github.com/RobertGumpert/vkr-pckg/textPreprocessing/textDictionary" "github.com/RobertGumpert/vkr-pckg/textPreprocessing/textVectorized" "testing" ) var ( testCorpus = []string{ // Vue "Vue js is a progressive incrementally adoptable JavaScript framework for building UI on the web framework frontend javascript vue", // React "A declarative efficient and flexible JavaScript library for building user interfaces declarative frontend javascript library react ui", //Hyper "A terminal built on web technologies css html hyper javascript linux macos react terminal terminal emulators", // Alacritty "A cross platform OpenGL terminal emulator bsd gpu linux macos opengl rust terminal terminal emulators vte windows", } ) func TestFullDictionaryCosineDistanceFlow(t *testing.T) { dictionary, vectorsOfWords, countFeatures := textDictionary.FullDictionary(testCorpus, textPreprocessing.LinearMode) bagOfWords := textVectorized.FrequencyVectorized(vectorsOfWords, dictionary, textPreprocessing.LinearMode) // runtimeinfo.LogInfo(countFeatures) cosineMatrix := CosineDistance(bagOfWords, textPreprocessing.LinearMode) for _, distance := range cosineMatrix { runtimeinfo.LogInfo(distance) } // runtimeinfo.LogInfo(countFeatures) cosineMatrix = CosineDistance(bagOfWords, textPreprocessing.ParallelMode) for _, distance := range cosineMatrix { runtimeinfo.LogInfo(distance) } } func TestIDFDictionaryCosineDistanceFlow(t *testing.T) { dictionary, vectorsOfWords, countFeatures := textDictionary.IDFDictionary(testCorpus, 2, textPreprocessing.LinearMode) bagOfWords := textVectorized.FrequencyVectorized(vectorsOfWords, dictionary, textPreprocessing.LinearMode) // runtimeinfo.LogInfo(countFeatures) cosineMatrix := CosineDistance(bagOfWords, textPreprocessing.LinearMode) for _, distance := range cosineMatrix { runtimeinfo.LogInfo(distance) } // runtimeinfo.LogInfo(countFeatures) cosineMatrix = CosineDistance(bagOfWords, textPreprocessing.ParallelMode) for _, distance := range cosineMatrix { runtimeinfo.LogInfo(distance) } } func TestCosineDistanceOnPairVectorsFlow(t *testing.T) { corpus := []string{ testCorpus[0], testCorpus[1], } // dictionary, vectorsOfWords, countFeatures := textDictionary.FullDictionary(corpus, textPreprocessing.LinearMode) bagOfWords := textVectorized.FrequencyVectorized(vectorsOfWords, dictionary, textPreprocessing.LinearMode) runtimeinfo.LogInfo("FULL : ", countFeatures) if cosineDistance, err := CosineDistanceOnPairVectors(bagOfWords); err != nil { t.Fatal(err) } else { runtimeinfo.LogInfo(cosineDistance) } // dictionary, vectorsOfWords, countFeatures = textDictionary.IDFDictionary(corpus, 2, textPreprocessing.LinearMode) bagOfWords = textVectorized.FrequencyVectorized(vectorsOfWords, dictionary, textPreprocessing.LinearMode) runtimeinfo.LogInfo("IDF : ", countFeatures) if cosineDistance, err := CosineDistanceOnPairVectors(bagOfWords); err != nil { t.Fatal(err) } else { runtimeinfo.LogInfo(cosineDistance) } }
package main import ( "sort" ) // O(w * n * log(n) + n * w * log(w)) itme | O(wn) space // w = number of words, n = length of the longest word func GroupAnagrams(words []string) [][]string { if len(words) == 0 { return [][]string{} } sortedWords := []string{} indices := []int{} for i, word := range words { sortedWords = append(sortedWords, sortWord(word)) indices = append(indices, i) } sort.Slice(indices, func(i, j int) bool { return sortedWords[indices[i]] < sortedWords[indices[j]] }) result := [][]string{} currentAnagramGroup := []string{} currentAnagram := sortedWords[indices[0]] for _, index := range indices { word := words[index] sortedWord := sortedWords[index] if len(currentAnagramGroup) == 0 { currentAnagramGroup = append(currentAnagramGroup, word) currentAnagram = sortedWord continue } if sortedWord == currentAnagram { currentAnagramGroup = append(currentAnagramGroup, word) continue } result = append(result, currentAnagramGroup) currentAnagramGroup = []string{word} currentAnagram = sortedWord } result = append(result, currentAnagramGroup) return result } func sortWord(word string) string { wordBytes := []byte(word) sort.Slice(wordBytes, func(i, j int) bool { return wordBytes[i] < wordBytes[j] }) return string(wordBytes) }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package model import "github.com/pkg/errors" // PGBouncerConfig contains the configuration for the PGBouncer utility. // ////////////////////////////////////////////////////////////////////////////// // - MaxDatabaseConnectionsPerPool is the maximum number of connections per // logical database pool when using proxy databases. // - MinPoolSize is the minimum pool size. // - DefaultPoolSize is the default pool size per user. // - ReservePoolSize is the default pool size per user. // - MaxClientConnections is the maximum client connections. // - ServerIdleTimeout is the server idle timeout. // - ServerLifetime is the server lifetime. // - ServerResetQueryAlways is boolean 0 or 1 whether server_reset_query should // be run in all pooling modes. // // ////////////////////////////////////////////////////////////////////////////// type PGBouncerConfig struct { MinPoolSize int DefaultPoolSize int ReservePoolSize int MaxClientConnections int MaxDatabaseConnectionsPerPool int ServerIdleTimeout int ServerLifetime int ServerResetQueryAlways int } // Validate validates a PGBouncerConfig. func (c *PGBouncerConfig) Validate() error { if c.MaxDatabaseConnectionsPerPool < 1 { return errors.New("MaxDatabaseConnectionsPerPool must be 1 or greater") } if c.DefaultPoolSize < 1 { return errors.New("DefaultPoolSize must be 1 or greater") } if c.ServerResetQueryAlways != 0 && c.ServerResetQueryAlways != 1 { return errors.New("ServerResetQueryAlways must be 0 or 1") } return nil }
package command import ( "familyTree/src/data" "familyTree/src/family" "familyTree/src/person" "testing" "github.com/stretchr/testify/assert" ) var tree family.Tree func TestMain(m *testing.M) { tree = data.Build() m.Run() } func TestShouldReturnTheRelatedMembers(t *testing.T) { executeFunc := GetCommandFunc(AddChildCommand) message := executeFunc([]string{"Chitra", "Aria", person.Female}, tree) assert.Equal(t, message, "CHILD_ADDITION_SUCCEEDED") executeFunc = GetCommandFunc(GetRelationShipCommand) message = executeFunc([]string{"Lavnya", "Maternal-Aunt"}, tree) assert.Equal(t, message, "Aria") }
package service import ( "net/url" "github.com/cerana/cerana/acomm" "github.com/cerana/cerana/pkg/errors" "github.com/cerana/cerana/providers/systemd" ) // RestartArgs are arguments for Restart. type RestartArgs struct { ID string `json:"id"` BundleID uint64 `json:"bundleID"` } // Restart restarts a service. func (p *Provider) Restart(req *acomm.Request) (interface{}, *url.URL, error) { var args RestartArgs if err := req.UnmarshalArgs(&args); err != nil { return nil, nil, err } if args.ID == "" { return nil, nil, errors.Newv("missing arg: id", map[string]interface{}{"args": args}) } if args.BundleID == 0 { return nil, nil, errors.Newv("missing arg: bundleID", map[string]interface{}{"args": args}) } ch := make(chan *acomm.Response, 1) rh := func(_ *acomm.Request, resp *acomm.Response) { ch <- resp } req, err := acomm.NewRequest(acomm.RequestOptions{ Task: "systemd-restart", ResponseHook: p.tracker.URL(), Args: systemd.ActionArgs{ Name: serviceName(args.BundleID, args.ID), Mode: systemd.ModeFail, }, SuccessHandler: rh, ErrorHandler: rh, }) if err != nil { return nil, nil, err } if err := p.tracker.TrackRequest(req, 0); err != nil { return nil, nil, err } if err := acomm.Send(p.config.CoordinatorURL(), req); err != nil { p.tracker.RemoveRequest(req) return nil, nil, err } resp := <-ch return nil, nil, errors.ResetStack(resp.Error) }
package main import ( "encoding/json" "fmt" "os" ) type ColorGroup struct { ID int Name string Colors []string } type Car struct { Name string Manufacturer string } var jsonBlob = []byte(`[ {"Name": "WRX", "Manufacturer": "Subaru"}, {"Name": "BRZ", "Manufacturer": "Subaru/Toyota"}, {"Name": "Tacoma", "Manufacturer": "Toyota"} ]`) var cars []Car func main() { group := ColorGroup{ ID: 1, Name: "Red", Colors: []string{"Maroon", "Magenta", "Burgundy"}, } b, err := json.Marshal(group) if err != nil { fmt.Println("Error:", err) } // Provide the ADDRESS of cars or else [] is returned // Can ONLY unmarshal to a pointer address err = json.Unmarshal(jsonBlob, &cars) if err != nil { fmt.Println("Error:", err) } os.Stdout.Write(b) fmt.Printf("\n") fmt.Printf("%+v\n", cars) }
package logger import ( "github.com/I-Reven/Hexagonal/src/domain/entity" "github.com/I-Reven/Hexagonal/src/infrastructure/repository/redis/track" "github.com/gin-contrib/sessions" "github.com/gin-gonic/gin" ) var ( Session sessions.Session = nil ) type Tracker struct { track track.Track } func (t *Tracker) Create(context *gin.Context) (string, error) { Session = sessions.Default(context) id, err := t.track.CreateTrack() if err != nil { return "", err } Session.Set("track-id", id) return id, Session.Save() } func (t *Tracker) Message(message string) error { if Session != nil { id := Session.Get("track-id").(string) return t.track.AddMessage(id, message) } return nil } func (t *Tracker) Error(error error) error { if Session != nil { id := Session.Get("track-id").(string) return t.track.AddError(id, error) } return nil } func (t *Tracker) Data(data ...interface{}) error { if Session != nil { id := Session.Get("track-id").(string) for _, info := range data { return t.track.AddData(id, info) } } return nil } func (t *Tracker) Debug(message string, data ...interface{}) error { if Session != nil { id := Session.Get("track-id").(string) return t.track.AddDebug(id, message, data...) } return nil } func (t *Tracker) Get() (entity.Track, error) { if Session != nil { id := Session.Get("track-id").(string) return t.track.GetTrack(id) } return entity.Track{}, nil }
package utils import ( "encoding/json" "net/http" ) type responseApi struct { Code int `json:"code"` Msg string `json:"msg"` Payload *interface{} `json:"payload,omitempty"` } func CreateResponseSuccess(w http.ResponseWriter, code int, pld interface{}) { data, _ := json.Marshal(&responseApi{ Code: code, Payload: &pld, }) w.Header().Set("Content-Type", "application/json") w.Write(data) } func CreateResponseError(w http.ResponseWriter, code int, msg error) { data, _ := json.Marshal(&responseApi{ Code: code, Msg: msg.Error(), }) w.Header().Set("Content-Type", "application/json") w.Write(data) }
package saml import "encoding/base64" import "encoding/xml" type SAMLRole struct { RoleArn string PrincipalArn string } type Response struct { XMLName xml.Name Assertion Assertion `xml:"Assertion"` } type Assertion struct { XMLName xml.Name AttributeStatement AttributeStatement } type AttributeStatement struct { XMLName xml.Name Attributes []Attribute `xml:"Attribute"` } type Attribute struct { XMLName xml.Name Name string `xml:",attr"` AttributeValues []AttributeValue `xml:"AttributeValue"` } type AttributeValue struct { XMLName xml.Name Type string `xml:"xsi:type,attr"` Value string `xml:",innerxml"` } func ParseEncodedSAMLResponse(b64ResponseXML string) (*Response, error) { response := Response{} bytesXML, err := base64.StdEncoding.DecodeString(b64ResponseXML) if err != nil { return nil, err } err = xml.Unmarshal(bytesXML, &response) if err != nil { return nil, err } return &response, nil } func (r *Response) GetAttribute(name string) string { for _, attr := range r.Assertion.AttributeStatement.Attributes { if attr.Name == name { return attr.AttributeValues[0].Value } } return "" } func (r *Response) GetAttributeValues(name string) []string { var values []string for _, attr := range r.Assertion.AttributeStatement.Attributes { if attr.Name == name { for _, v := range attr.AttributeValues { values = append(values, v.Value) } } } return values }
package models import ( "github.com/mobilemindtec/go-utils/beego/db" ) type Estado struct { Id int64 `form:"-" json:",string,omitempty"` Nome string `orm:"size(100)" valid:"Required;MaxSize(100)" form:""` Uf string `orm:"size(2)" valid:"Required;MaxSize(2)" form:""` Session *db.Session `orm:"-" json:"-" inject:""` } func NewEstado(session *db.Session) *Estado { return &Estado{Session: session} } func (this *Estado) TableName() string { return "estados" } func (this *Estado) IsPersisted() bool { return this.Id > 0 } func (this *Estado) List() (*[]*Estado, error) { var results []*Estado err := this.Session.List(this, &results) return &results, err } func (this *Estado) FindByUf(uf string) (*Estado, error) { result := new(Estado) criteria := db.NewCriteria(this.Session, result, nil).Eq("Uf", uf).One() return result, criteria.Error }
package util import "testing" func TestLowerBound(t *testing.T) { tt := []struct { name string value int bound int expected int }{ { name: "zeros", value: 0, bound: 0, expected: 0, }, { name: "same", value: 5, bound: 5, expected: 5, }, { name: "bound", value: 3, bound: 5, expected: 5, }, { name: "unbound", value: 3, bound: 5, expected: 5, }, { name: "negative bound", value: -7, bound: -5, expected: -5, }, { name: "negative unbound", value: -7, bound: -10, expected: -7, }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { if got := LowerBound(tc.value, tc.bound); got != tc.expected { t.Errorf("expected `%v` got `%v`", tc.expected, got) } }) } } func TestUpperBound(t *testing.T) { tt := []struct { name string value int bound int expected int }{ { name: "zeros", value: 0, bound: 0, expected: 0, }, { name: "same", value: 5, bound: 5, expected: 5, }, { name: "bound", value: 5, bound: 3, expected: 3, }, { name: "unbound", value: 3, bound: 5, expected: 3, }, { name: "negative bound", value: -5, bound: -7, expected: -7, }, { name: "negative unbound", value: -10, bound: -7, expected: -10, }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { if got := UpperBound(tc.value, tc.bound); got != tc.expected { t.Errorf("expected `%v` got `%v`", tc.expected, got) } }) } }
package pipa import ( "github.com/Shopify/sarama" "github.com/bsm/sarama-cluster" ) // Consumer interface type Consumer interface { Messages() <-chan *sarama.ConsumerMessage MarkOffset(*sarama.ConsumerMessage, string) Close() error } // NewConsumer connects to a real consumer func NewConsumer(addrs []string, groupID string, topics []string, config *cluster.Config, notifier Notifier) (Consumer, error) { consumer, err := cluster.NewConsumer(addrs, groupID, topics, config) if err != nil { return nil, err } // process consumer errors go func() { for err := range consumer.Errors() { notifier.ConsumerError(err) } }() // process consumer notifications go func() { for ntfy := range consumer.Notifications() { notifier.ClaimedTopics(ntfy.Current) } }() return consumer, nil }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexecjoin import ( "context" "math" "unsafe" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecbase" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // group is an ADT representing a contiguous set of rows that match on their // equality columns. type group struct { rowStartIdx int rowEndIdx int // numRepeats is used when expanding each group into a cross product in the // build phase. numRepeats int // toBuild is used in the build phase to determine the right output count. // This field should stay in sync with the builder over time. toBuild int // nullGroup indicates whether the output corresponding to the group should // consist of all nulls. nullGroup bool // unmatched indicates that the rows in the group do not have matching rows // from the other side (i.e. other side's group will be a null group). // NOTE: during the probing phase, the assumption is that such group will // consist of a single row. unmatched bool } // mjBuildFrom is an indicator of which source we're building the output from. type mjBuildFrom int const ( // mjBuildFromBatch indicates that we should be building from the current // probing batches. Note that in such case we might have multiple groups to // build. mjBuildFromBatch mjBuildFrom = iota // mjBuildFromBufferedGroup indicates that we should be building from the // buffered group. Note that in such case we might have at most one group to // build. mjBuildFromBufferedGroup ) // mjBuilderState contains all the state required to execute the build phase. type mjBuilderState struct { buildFrom mjBuildFrom // Fields to identify the groups in the input sources. lGroups []group rGroups []group // outCount keeps record of the current number of rows in the output. outCount int // outFinished is used to determine if the builder is finished outputting // the groups from input. outFinished bool totalOutCountFromBufferedGroup int alreadyEmittedFromBufferedGroup int // Cross product materialization state. left mjBuilderCrossProductState right mjBuilderCrossProductState } // mjBuilderCrossProductState is used to keep track of builder state within the // loops to materialize the cross product. Useful for picking up where we left // off. type mjBuilderCrossProductState struct { groupsIdx int curSrcStartIdx int numRepeatsIdx int } // mjBufferedGroup is a helper struct that stores information about the tuples // from both inputs for the buffered group. type mjBufferedGroup struct { // firstTuple stores a single tuple that was first in the buffered group. firstTuple []coldata.Vec scratchBatch coldata.Batch } type mjBufferedGroupState struct { // Local buffer for the last left and right groups which is used when the // group ends with a batch and the group on each side needs to be saved to // state in order to be able to continue it in the next batch. left mjBufferedGroup right mjBufferedGroup helper *crossJoinerBase needToReset bool } // mjProberState contains all the state required to execute in the probing // phase. type mjProberState struct { // Fields to save the "working" batches to state in between outputs. lBatch coldata.Batch rBatch coldata.Batch lIdx int lLength int rIdx int rLength int } // mjState represents the state of the merge joiner. type mjState int const ( // mjEntry is the entry state of the merge joiner where all the batches and // indices are properly set, regardless if Next was called the first time or // the 1000th time. This state also routes into the correct state based on // the prober state after setup. mjEntry mjState = iota // mjSourceFinished is the state in which one of the input sources has no // more available batches, thus signaling that the joiner should begin // wrapping up execution by outputting any remaining groups in state. mjSourceFinished // mjFinishBufferedGroup is the state in which the previous state resulted in // a group that ended with a batch. Such a group was buffered, and this state // finishes that group and builds the output. mjFinishBufferedGroup // mjProbe is the main probing state in which the groups for the current // batch are determined. mjProbe // mjBuild is the state in which the groups determined by the probing states // are built, i.e. materialized to the output member by creating the cross // product. mjBuild // mjDone is the final state of the merge joiner in which it'll be returning // only zero-length batches. In this state, the disk infrastructure is // cleaned up. mjDone ) type mergeJoinInput struct { // eqCols specify the indices of the source table equality columns during the // merge join. eqCols []uint32 // directions specifies the ordering direction of each column. Note that each // direction corresponds to an equality column at the same location, i.e. the // direction of eqCols[x] is encoded at directions[x], or // len(eqCols) == len(directions). directions []execinfrapb.Ordering_Column_Direction // sourceTypes specify the types of the input columns of the source table for // the merge joiner. sourceTypes []*types.T // canonicalTypeFamilies stores the canonical type families from // sourceTypes. It is stored explicitly rather than being converted at // runtime because that conversion would occur in tight loops and // noticeably hurt the performance. canonicalTypeFamilies []types.Family // The distincter is used in the finishGroup phase, and is used only to // determine where the current group ends, in the case that the group ended // with a batch. distincterInput *colexecop.FeedOperator distincter colexecop.Operator distinctOutput []bool // source specifies the input operator to the merge join. source colexecop.Operator } // The merge join operator uses a probe and build approach to generate the // join. What this means is that instead of going through and expanding the // cross product row by row, the operator performs two passes. // The first pass generates a list of groups of matching rows based on the // equality columns (where a "group" represents a contiguous set of rows that // match on the equality columns). // The second pass is where the groups and their associated cross products are // materialized into the full output. // Two buffers are used, one for the group on the left table and one for the // group on the right table. These buffers are only used if the group ends with // a batch, to make sure that we don't miss any cross product entries while // expanding the groups (leftGroups and rightGroups) when a group spans // multiple batches. // NewMergeJoinOp returns a new merge join operator with the given spec that // implements sort-merge join. It performs a merge on the left and right input // sources, based on the equality columns, assuming both inputs are in sorted // order. func NewMergeJoinOp( unlimitedAllocator *colmem.Allocator, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, joinType descpb.JoinType, left colexecop.Operator, right colexecop.Operator, leftTypes []*types.T, rightTypes []*types.T, leftOrdering []execinfrapb.Ordering_Column, rightOrdering []execinfrapb.Ordering_Column, diskAcc *mon.BoundAccount, ) (colexecop.ResettableOperator, error) { // Merge joiner only supports the case when the physical types in the // equality columns in both inputs are the same. We, however, also need to // support joining on numeric columns of different types or widths. If we // encounter such mismatch, we need to cast one of the vectors to another // and use the cast vector for equality check. // Make a copy of types and orderings to be sure that we don't modify // anything unwillingly. actualLeftTypes, actualRightTypes := append([]*types.T{}, leftTypes...), append([]*types.T{}, rightTypes...) actualLeftOrdering := make([]execinfrapb.Ordering_Column, len(leftOrdering)) actualRightOrdering := make([]execinfrapb.Ordering_Column, len(rightOrdering)) copy(actualLeftOrdering, leftOrdering) copy(actualRightOrdering, rightOrdering) // Iterate over each equality column and check whether a cast is needed. If // it is needed for some column, then a cast operator is planned on top of // the input from the corresponding side and the types and ordering are // adjusted accordingly. We will also need to project out that temporary // column, so a simple project will be planned below. var needProjection bool var err error for i := range leftOrdering { leftColIdx := leftOrdering[i].ColIdx rightColIdx := rightOrdering[i].ColIdx leftType := leftTypes[leftColIdx] rightType := rightTypes[rightColIdx] if !leftType.Identical(rightType) && leftType.IsNumeric() && rightType.IsNumeric() { // The types are different and both are numeric, so we need to plan // a cast. There is a hierarchy of valid casts: // INT2 -> INT4 -> INT8 -> FLOAT -> DECIMAL // and the cast is valid if 'fromType' is mentioned before 'toType' // in this chain. castLeftToRight := false switch leftType.Family() { case types.IntFamily: switch leftType.Width() { case 16: castLeftToRight = true case 32: castLeftToRight = !rightType.Identical(types.Int2) default: castLeftToRight = rightType.Family() != types.IntFamily } case types.FloatFamily: castLeftToRight = rightType.Family() == types.DecimalFamily } if castLeftToRight { castColumnIdx := len(actualLeftTypes) left, err = colexecbase.GetCastOperator(unlimitedAllocator, left, int(leftColIdx), castColumnIdx, leftType, rightType) if err != nil { return nil, err } actualLeftTypes = append(actualLeftTypes, rightType) actualLeftOrdering[i].ColIdx = uint32(castColumnIdx) } else { castColumnIdx := len(actualRightTypes) right, err = colexecbase.GetCastOperator(unlimitedAllocator, right, int(rightColIdx), castColumnIdx, rightType, leftType) if err != nil { return nil, err } actualRightTypes = append(actualRightTypes, leftType) actualRightOrdering[i].ColIdx = uint32(castColumnIdx) } needProjection = true } } base, err := newMergeJoinBase( unlimitedAllocator, memoryLimit, diskQueueCfg, fdSemaphore, joinType, left, right, actualLeftTypes, actualRightTypes, actualLeftOrdering, actualRightOrdering, diskAcc, ) if err != nil { return nil, err } var mergeJoinerOp colexecop.ResettableOperator switch joinType { case descpb.InnerJoin: mergeJoinerOp = &mergeJoinInnerOp{base} case descpb.LeftOuterJoin: mergeJoinerOp = &mergeJoinLeftOuterOp{base} case descpb.RightOuterJoin: mergeJoinerOp = &mergeJoinRightOuterOp{base} case descpb.FullOuterJoin: mergeJoinerOp = &mergeJoinFullOuterOp{base} case descpb.LeftSemiJoin: mergeJoinerOp = &mergeJoinLeftSemiOp{base} case descpb.RightSemiJoin: mergeJoinerOp = &mergeJoinRightSemiOp{base} case descpb.LeftAntiJoin: mergeJoinerOp = &mergeJoinLeftAntiOp{base} case descpb.RightAntiJoin: mergeJoinerOp = &mergeJoinRightAntiOp{base} case descpb.IntersectAllJoin: mergeJoinerOp = &mergeJoinIntersectAllOp{base} case descpb.ExceptAllJoin: mergeJoinerOp = &mergeJoinExceptAllOp{base} default: return nil, errors.AssertionFailedf("merge join of type %s not supported", joinType) } if !needProjection { // We didn't add any cast operators, so we can just return the operator // right away. return mergeJoinerOp, nil } // We need to add a projection to remove all the cast columns we have added // above. Note that all extra columns were appended to the corresponding // types slices, so we simply need to include first len(leftTypes) from the // left and first len(rightTypes) from the right (paying attention to the // join type). numLeftTypes := len(leftTypes) numRightTypes := len(rightTypes) numActualLeftTypes := len(actualLeftTypes) numActualRightTypes := len(actualRightTypes) if !joinType.ShouldIncludeLeftColsInOutput() { numLeftTypes = 0 numActualLeftTypes = 0 } if !joinType.ShouldIncludeRightColsInOutput() { numRightTypes = 0 numActualRightTypes = 0 } projection := make([]uint32, 0, numLeftTypes+numRightTypes) for i := 0; i < numLeftTypes; i++ { projection = append(projection, uint32(i)) } for i := 0; i < numRightTypes; i++ { // Merge joiner outputs all columns from both sides, and the columns // from the right have indices in [numActualLeftTypes, // numActualLeftTypes + numActualRightTypes) range. projection = append(projection, uint32(numActualLeftTypes+i)) } return colexecbase.NewSimpleProjectOp( mergeJoinerOp, numActualLeftTypes+numActualRightTypes, projection, ).(colexecop.ResettableOperator), nil } // Const declarations for the merge joiner cross product (MJCP) zero state. const ( zeroMJCPGroupsIdx = 0 // The sentinel value for curSrcStartIdx is -1, as this: // a) indicates that a src has not been started // b) panics if the sentinel isn't checked zeroMJCPCurSrcStartIdx = -1 zeroMJCPNumRepeatsIdx = 0 ) // Package level struct for easy access to the MJCP zero state. var zeroMJBuilderState = mjBuilderCrossProductState{ groupsIdx: zeroMJCPGroupsIdx, curSrcStartIdx: zeroMJCPCurSrcStartIdx, numRepeatsIdx: zeroMJCPNumRepeatsIdx, } func (s *mjBuilderCrossProductState) reset() { s.setBuilderColumnState(zeroMJBuilderState) } func (s *mjBuilderCrossProductState) setBuilderColumnState(target mjBuilderCrossProductState) { s.groupsIdx = target.groupsIdx s.curSrcStartIdx = target.curSrcStartIdx s.numRepeatsIdx = target.numRepeatsIdx } func newMergeJoinBase( unlimitedAllocator *colmem.Allocator, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, joinType descpb.JoinType, left colexecop.Operator, right colexecop.Operator, leftTypes []*types.T, rightTypes []*types.T, leftOrdering []execinfrapb.Ordering_Column, rightOrdering []execinfrapb.Ordering_Column, diskAcc *mon.BoundAccount, ) (*mergeJoinBase, error) { lEqCols := make([]uint32, len(leftOrdering)) lDirections := make([]execinfrapb.Ordering_Column_Direction, len(leftOrdering)) for i, c := range leftOrdering { lEqCols[i] = c.ColIdx lDirections[i] = c.Direction } rEqCols := make([]uint32, len(rightOrdering)) rDirections := make([]execinfrapb.Ordering_Column_Direction, len(rightOrdering)) for i, c := range rightOrdering { rEqCols[i] = c.ColIdx rDirections[i] = c.Direction } diskQueueCfg.CacheMode = colcontainer.DiskQueueCacheModeReuseCache diskQueueCfg.SetDefaultBufferSizeBytesForCacheMode() base := &mergeJoinBase{ twoInputNode: newTwoInputNode(left, right), unlimitedAllocator: unlimitedAllocator, memoryLimit: memoryLimit, diskQueueCfg: diskQueueCfg, fdSemaphore: fdSemaphore, joinType: joinType, left: mergeJoinInput{ source: left, sourceTypes: leftTypes, canonicalTypeFamilies: typeconv.ToCanonicalTypeFamilies(leftTypes), eqCols: lEqCols, directions: lDirections, }, right: mergeJoinInput{ source: right, sourceTypes: rightTypes, canonicalTypeFamilies: typeconv.ToCanonicalTypeFamilies(rightTypes), eqCols: rEqCols, directions: rDirections, }, diskAcc: diskAcc, } var err error base.left.distincterInput = &colexecop.FeedOperator{} base.left.distincter, base.left.distinctOutput, err = colexecbase.OrderedDistinctColsToOperators( base.left.distincterInput, lEqCols, leftTypes) if err != nil { return base, err } base.right.distincterInput = &colexecop.FeedOperator{} base.right.distincter, base.right.distinctOutput, err = colexecbase.OrderedDistinctColsToOperators( base.right.distincterInput, rEqCols, rightTypes) if err != nil { return base, err } return base, err } // mergeJoinBase extracts the common logic between all merge join operators. type mergeJoinBase struct { twoInputNode colexecop.CloserHelper unlimitedAllocator *colmem.Allocator memoryLimit int64 diskQueueCfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore joinType descpb.JoinType left mergeJoinInput right mergeJoinInput // Output buffer definition. output coldata.Batch outputTypes []*types.T // outputReady is a flag to indicate that merge joiner is ready to emit an // output batch. outputReady bool // Local buffer for the "working" repeated groups. groups circularGroupsBuffer state mjState bufferedGroup mjBufferedGroupState proberState mjProberState builderState mjBuilderState diskAcc *mon.BoundAccount } var _ colexecop.Resetter = &mergeJoinBase{} var _ colexecop.Closer = &mergeJoinBase{} func (o *mergeJoinBase) Reset(ctx context.Context) { if r, ok := o.left.source.(colexecop.Resetter); ok { r.Reset(ctx) } if r, ok := o.right.source.(colexecop.Resetter); ok { r.Reset(ctx) } o.outputReady = false o.state = mjEntry o.bufferedGroup.helper.Reset(ctx) o.bufferedGroup.needToReset = false o.proberState.lBatch = nil o.proberState.rBatch = nil o.resetBuilderCrossProductState() } func (o *mergeJoinBase) Init() { o.outputTypes = o.joinType.MakeOutputTypes(o.left.sourceTypes, o.right.sourceTypes) o.left.source.Init() o.right.source.Init() o.bufferedGroup.left.firstTuple = o.unlimitedAllocator.NewMemBatchWithFixedCapacity( o.left.sourceTypes, 1, /* capacity */ ).ColVecs() o.bufferedGroup.right.firstTuple = o.unlimitedAllocator.NewMemBatchWithFixedCapacity( o.right.sourceTypes, 1, /* capacity */ ).ColVecs() o.bufferedGroup.helper = newCrossJoinerBase( o.unlimitedAllocator, o.joinType, o.left.sourceTypes, o.right.sourceTypes, o.memoryLimit, o.diskQueueCfg, o.fdSemaphore, o.diskAcc, ) o.builderState.lGroups = make([]group, 1) o.builderState.rGroups = make([]group, 1) const sizeOfGroup = int(unsafe.Sizeof(group{})) o.unlimitedAllocator.AdjustMemoryUsage(int64(8 * coldata.BatchSize() * sizeOfGroup)) o.groups = makeGroupsBuffer(coldata.BatchSize()) o.resetBuilderCrossProductState() } func (o *mergeJoinBase) resetBuilderCrossProductState() { o.builderState.left.reset() o.builderState.right.reset() } // appendToBufferedGroup appends all the tuples from batch that are part of the // same group as the ones in the buffered group that corresponds to the input // source. This needs to happen when a group starts at the end of an input // batch and can continue into the following batches. // A zero-length batch needs to be appended when no more batches will be // appended to the buffered group. func (o *mergeJoinBase) appendToBufferedGroup( ctx context.Context, input *mergeJoinInput, batch coldata.Batch, sel []int, groupStartIdx int, groupLength int, ) { var ( bufferedGroup *mjBufferedGroup sourceTypes []*types.T bufferedTuples *colexecutils.SpillingQueue numBufferedTuples int ) if input == &o.left { sourceTypes = o.left.sourceTypes bufferedGroup = &o.bufferedGroup.left bufferedTuples = o.bufferedGroup.helper.left.tuples numBufferedTuples = o.bufferedGroup.helper.left.numTuples o.bufferedGroup.helper.left.numTuples += groupLength } else { sourceTypes = o.right.sourceTypes bufferedGroup = &o.bufferedGroup.right bufferedTuples = o.bufferedGroup.helper.right.tuples numBufferedTuples = o.bufferedGroup.helper.right.numTuples o.bufferedGroup.helper.right.numTuples += groupLength } if batch.Length() == 0 || groupLength == 0 { // We have finished appending to this buffered group, so we need to // Enqueue a zero-length batch per the contract of the spilling queue. bufferedTuples.Enqueue(ctx, coldata.ZeroBatch) return } // TODO(yuzefovich): for LEFT/RIGHT ANTI joins we only need to store the // first tuple (in order to find the boundaries of the groups) since all // of the buffered tuples do have a match and, thus, don't contribute to // the output. // TODO(yuzefovich): for INTERSECT/EXCEPT ALL joins we can buffer only // tuples from the left side and count the number of tuples on the right. // TODO(yuzefovich): for LEFT/RIGHT SEMI joins we only need to buffer tuples // from one side (left/right respectively). if numBufferedTuples == 0 { o.unlimitedAllocator.PerformOperation(bufferedGroup.firstTuple, func() { for colIdx := range sourceTypes { bufferedGroup.firstTuple[colIdx].Copy( coldata.CopySliceArgs{ SliceArgs: coldata.SliceArgs{ Src: batch.ColVec(colIdx), Sel: sel, DestIdx: 0, SrcStartIdx: groupStartIdx, SrcEndIdx: groupStartIdx + 1, }, }, ) } }) } // For now, we don't enforce any footprint-based memory limit. // TODO(yuzefovich): refactor this. const maxBatchMemSize = math.MaxInt64 bufferedGroup.scratchBatch, _ = o.unlimitedAllocator.ResetMaybeReallocate( input.sourceTypes, bufferedGroup.scratchBatch, groupLength, maxBatchMemSize, ) o.unlimitedAllocator.PerformOperation(bufferedGroup.scratchBatch.ColVecs(), func() { for colIdx := range input.sourceTypes { bufferedGroup.scratchBatch.ColVec(colIdx).Copy( coldata.CopySliceArgs{ SliceArgs: coldata.SliceArgs{ Src: batch.ColVec(colIdx), Sel: sel, DestIdx: 0, SrcStartIdx: groupStartIdx, SrcEndIdx: groupStartIdx + groupLength, }, }, ) } bufferedGroup.scratchBatch.SetLength(groupLength) }) bufferedTuples.Enqueue(ctx, bufferedGroup.scratchBatch) } // setBuilderSourceToBatch sets the builder state to use groups from the // circular group buffer and the batches from input. This happens when we have // groups that are fully contained within a single input batch from each of the // sources. func (o *mergeJoinBase) setBuilderSourceToBatch() { o.builderState.lGroups, o.builderState.rGroups = o.groups.getGroups() o.builderState.buildFrom = mjBuildFromBatch } // initProberState sets the batches, lengths, and current indices to the right // locations given the last iteration of the operator. func (o *mergeJoinBase) initProberState(ctx context.Context) { // If this is the first batch or we're done with the current batch, get the // next batch. if o.proberState.lBatch == nil || (o.proberState.lLength != 0 && o.proberState.lIdx == o.proberState.lLength) { o.proberState.lIdx, o.proberState.lBatch = 0, o.left.source.Next(ctx) o.proberState.lLength = o.proberState.lBatch.Length() } if o.proberState.rBatch == nil || (o.proberState.rLength != 0 && o.proberState.rIdx == o.proberState.rLength) { o.proberState.rIdx, o.proberState.rBatch = 0, o.right.source.Next(ctx) o.proberState.rLength = o.proberState.rBatch.Length() } if o.bufferedGroup.needToReset { o.bufferedGroup.helper.Reset(ctx) o.bufferedGroup.needToReset = false } } // nonEmptyBufferedGroup returns true if there is a buffered group that needs // to be finished. func (o *mergeJoinBase) nonEmptyBufferedGroup() bool { return o.bufferedGroup.helper.left.numTuples > 0 || o.bufferedGroup.helper.right.numTuples > 0 } // sourceFinished returns true if either of input sources has no more rows. func (o *mergeJoinBase) sourceFinished() bool { return o.proberState.lLength == 0 || o.proberState.rLength == 0 } // finishBufferedGroup appends a zero-length batch to the buffered group which // is required by the contract of the spilling queue. func (o *mergeJoinBase) finishBufferedGroup(ctx context.Context, input *mergeJoinInput) { o.appendToBufferedGroup( ctx, input, coldata.ZeroBatch, nil, /* sel */ 0 /* groupStartIdx */, 0, /* groupLength */ ) } // completeBufferedGroup extends the buffered group corresponding to input. // First, we check that the first row in batch is still part of the same group. // If this is the case, we use the Distinct operator to find the first // occurrence in batch (or subsequent batches) that doesn't match the current // group. // NOTE: we will be buffering all batches until we find such non-matching tuple // (or until we exhaust the input). // TODO(yuzefovich): this can be refactored so that only the right side does // unbounded buffering. // SIDE EFFECT: can append to the buffered group corresponding to the source. func (o *mergeJoinBase) completeBufferedGroup( ctx context.Context, input *mergeJoinInput, batch coldata.Batch, rowIdx int, ) (_ coldata.Batch, idx int, batchLength int) { batchLength = batch.Length() if o.isBufferedGroupFinished(input, batch, rowIdx) { o.finishBufferedGroup(ctx, input) return batch, rowIdx, batchLength } isBufferedGroupComplete := false input.distincter.(colexecop.Resetter).Reset(ctx) // Ignore the first row of the distincter in the first pass since we already // know that we are in the same group and, thus, the row is not distinct, // regardless of what the distincter outputs. loopStartIndex := 1 var sel []int for !isBufferedGroupComplete { // Note that we're not resetting the distincter on every loop iteration // because if we're doing the second, third, etc, iteration, then all the // previous iterations had only the matching tuples to the buffered group, // so the distincter - in a sense - compares the incoming tuples to the // first tuple of the first iteration (which we know is the same group). input.distincterInput.SetBatch(batch) input.distincter.Next(ctx) sel = batch.Selection() var groupLength int if sel != nil { for groupLength = loopStartIndex; groupLength < batchLength; groupLength++ { if input.distinctOutput[sel[groupLength]] { // We found the beginning of a new group! isBufferedGroupComplete = true break } } } else { for groupLength = loopStartIndex; groupLength < batchLength; groupLength++ { if input.distinctOutput[groupLength] { // We found the beginning of a new group! isBufferedGroupComplete = true break } } } // Zero out the distinct output for the next pass. copy(input.distinctOutput[:batchLength], colexecutils.ZeroBoolColumn) loopStartIndex = 0 // Buffer all the tuples that are part of the buffered group. o.appendToBufferedGroup(ctx, input, batch, sel, rowIdx, groupLength) rowIdx += groupLength if !isBufferedGroupComplete { // The buffered group is still not complete which means that we have // just appended all the tuples from batch to it, so we need to get a // fresh batch from the input. rowIdx, batch = 0, input.source.Next(ctx) batchLength = batch.Length() if batchLength == 0 { // The input has been exhausted, so the buffered group is now complete. isBufferedGroupComplete = true o.finishBufferedGroup(ctx, input) } } } return batch, rowIdx, batchLength } // finishProbe completes the buffered groups on both sides of the input. func (o *mergeJoinBase) finishProbe(ctx context.Context) { o.proberState.lBatch, o.proberState.lIdx, o.proberState.lLength = o.completeBufferedGroup( ctx, &o.left, o.proberState.lBatch, o.proberState.lIdx, ) o.proberState.rBatch, o.proberState.rIdx, o.proberState.rLength = o.completeBufferedGroup( ctx, &o.right, o.proberState.rBatch, o.proberState.rIdx, ) } func (o *mergeJoinBase) Close(ctx context.Context) error { if !o.CloserHelper.Close() { return nil } var lastErr error for _, op := range []colexecop.Operator{o.left.source, o.right.source} { if c, ok := op.(colexecop.Closer); ok { if err := c.Close(ctx); err != nil { lastErr = err } } } if h := o.bufferedGroup.helper; h != nil { if err := h.Close(ctx); err != nil { lastErr = err } } return lastErr }
package transform import ( snapshot "github.com/pganalyze/collector/output/pganalyze_collector" "github.com/pganalyze/collector/state" ) func transformPostgresBackendCounts(s snapshot.FullSnapshot, transientState state.TransientState, roleOidToIdx OidToIdx, databaseOidToIdx OidToIdx) snapshot.FullSnapshot { for _, backendCount := range transientState.BackendCounts { backendCountStatistic := snapshot.BackendCountStatistic{ WaitingForLock: backendCount.WaitingForLock, Count: backendCount.Count, } if backendCount.DatabaseOid.Valid { backendCountStatistic.DatabaseIdx = databaseOidToIdx[state.Oid(backendCount.DatabaseOid.Int64)] backendCountStatistic.HasDatabaseIdx = true } if backendCount.RoleOid.Valid { backendCountStatistic.RoleIdx = roleOidToIdx[state.Oid(backendCount.RoleOid.Int64)] backendCountStatistic.HasRoleIdx = true } switch backendCount.State { case "unknown": backendCountStatistic.State = snapshot.BackendCountStatistic_UNKNOWN_STATE case "active": backendCountStatistic.State = snapshot.BackendCountStatistic_ACTIVE case "idle": backendCountStatistic.State = snapshot.BackendCountStatistic_IDLE case "idle in transaction": backendCountStatistic.State = snapshot.BackendCountStatistic_IDLE_IN_TRANSACTION case "idle in transaction (aborted)": backendCountStatistic.State = snapshot.BackendCountStatistic_IDLE_IN_TRANSACTION_ABORTED case "fastpath function call": backendCountStatistic.State = snapshot.BackendCountStatistic_FASTPATH_FUNCTION_CALL case "disabled": backendCountStatistic.State = snapshot.BackendCountStatistic_DISABLED } switch backendCount.BackendType { case "unknown": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_UNKNOWN_TYPE case "autovacuum launcher": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_AUTOVACUUM_LAUNCHER case "autovacuum worker": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_AUTOVACUUM_WORKER case "background worker": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_BACKGROUND_WORKER case "background writer": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_BACKGROUND_WRITER case "client backend": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_CLIENT_BACKEND case "checkpointer": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_CHECKPOINTER case "startup": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_STARTUP case "walreceiver": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_WALRECEIVER case "walsender": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_WALSENDER case "walwriter": backendCountStatistic.BackendType = snapshot.BackendCountStatistic_WALWRITER } s.BackendCountStatistics = append(s.BackendCountStatistics, &backendCountStatistic) } return s }
// Copyright 2018 Aleksandr Demakin. All rights reserved. package card // EventHeader is a common part for all events. type EventHeader struct { WebsiteURL string SessionID string } // Dimension is a width/height pair. type Dimension struct { Width, Height int } // Handler handles card-related events. type Handler interface { OnResize(h EventHeader, from, to Dimension) OnCopyPaste(h EventHeader, form string, pasted bool) OnSubmit(h EventHeader, time int) }
// Copyright (c) 2018 Palantir Technologies. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config_test import ( "testing" "github.com/palantir/godel-conjure-plugin/v6/conjureplugin" "github.com/palantir/godel-conjure-plugin/v6/conjureplugin/config" v1 "github.com/palantir/godel-conjure-plugin/v6/conjureplugin/config/internal/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) func TestReadConfig(t *testing.T) { for i, tc := range []struct { in string want config.ConjurePluginConfig }{ { ` projects: project: output-dir: outputDir ir-locator: local/yaml-dir `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "local/yaml-dir", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: local/yaml-dir publish: false `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "local/yaml-dir", }, Publish: boolPtr(false), }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: type: yaml locator: explicit/yaml-dir `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeYAML, Locator: "explicit/yaml-dir", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: http://foo.com/ir.json `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "http://foo.com/ir.json", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: http://foo.com/ir.json publish: true `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "http://foo.com/ir.json", }, Publish: boolPtr(true), }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: type: remote locator: localhost:8080/ir.json `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeRemote, Locator: "localhost:8080/ir.json", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: local/nonexistent-ir-file.json `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "local/nonexistent-ir-file.json", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: type: ir-file locator: local/nonexistent-ir-file.json `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeIRFile, Locator: "local/nonexistent-ir-file.json", }, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: type: remote locator: localhost:8080/ir.json server: true cli: true `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeRemote, Locator: "localhost:8080/ir.json", }, Server: true, CLI: true, }, }, }, }, { ` projects: project: output-dir: outputDir ir-locator: type: remote locator: localhost:8080/ir.json accept-funcs: true `, config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeRemote, Locator: "localhost:8080/ir.json", }, Server: false, AcceptFuncs: boolPtr(true), }, }, }, }, } { var got config.ConjurePluginConfig err := yaml.Unmarshal([]byte(tc.in), &got) require.NoError(t, err) assert.Equal(t, tc.want, got, "Case %d", i) } } func TestConjurePluginConfigToParam(t *testing.T) { for i, tc := range []struct { in config.ConjurePluginConfig want conjureplugin.ConjureProjectParams }{ { config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project-1": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "local/yaml-dir", }, }, }, }, conjureplugin.ConjureProjectParams{ SortedKeys: []string{ "project-1", }, Params: map[string]conjureplugin.ConjureProjectParam{ "project-1": { OutputDir: "outputDir", IRProvider: conjureplugin.NewLocalYAMLIRProvider("local/yaml-dir"), Publish: true, AcceptFuncs: true, }, }, }, }, { config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project-1": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "input.yml", }, }, }, }, conjureplugin.ConjureProjectParams{ SortedKeys: []string{ "project-1", }, Params: map[string]conjureplugin.ConjureProjectParam{ "project-1": { OutputDir: "outputDir", IRProvider: conjureplugin.NewLocalYAMLIRProvider("input.yml"), Publish: true, AcceptFuncs: true, }, }, }, }, { config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project-1": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "input.json", }, AcceptFuncs: boolPtr(true), }, }, }, conjureplugin.ConjureProjectParams{ SortedKeys: []string{ "project-1", }, Params: map[string]conjureplugin.ConjureProjectParam{ "project-1": { OutputDir: "outputDir", IRProvider: conjureplugin.NewLocalFileIRProvider("input.json"), AcceptFuncs: true, }, }, }, }, { config.ConjurePluginConfig{ ProjectConfigs: map[string]v1.SingleConjureConfig{ "project-1": { OutputDir: "outputDir", IRLocator: v1.IRLocatorConfig{ Type: v1.LocatorTypeAuto, Locator: "input.json", }, }, }, }, conjureplugin.ConjureProjectParams{ SortedKeys: []string{ "project-1", }, Params: map[string]conjureplugin.ConjureProjectParam{ "project-1": { OutputDir: "outputDir", IRProvider: conjureplugin.NewLocalFileIRProvider("input.json"), AcceptFuncs: true, }, }, }, }, } { got, err := tc.in.ToParams() require.NoError(t, err, "Case %d", i) assert.Equal(t, tc.want, got, "Case %d", i) } } func boolPtr(in bool) *bool { return &in }
package server import ( "net/http/httptest" "testing" ) func TestNotFoundHandler_ServerHTTP(t *testing.T) { respRecorder := httptest.NewRecorder() notFoundHandler := &notFoundHandler{} notFoundHandler.ServeHTTP(respRecorder, nil) var expectedCode = 404 if respRecorder.Code != expectedCode { t.Errorf("Expected status code for '%d' but was '%d'", expectedCode, respRecorder.Code) } var expectedBody = "Oops. Page not found." if string(respRecorder.Body.Bytes()) != expectedBody { t.Errorf("Expected response body of\n'%s'\nbut got:\n'%s\n", expectedBody, string(respRecorder.Body.Bytes())) } }
package user import ( //"net/http" //"webserver/common" "webserver/controllers" "webserver/lib/alioss" ) type OssStsTokenController struct { controllers.BaseController } func (c *OssStsTokenController) Get() { defer c.Recover() respBody, _ := c.getStsToken() c.WriteBodyResponse(respBody) } func (c *OssStsTokenController) getStsToken() (interface{}, error) { return alioss.AssumeRole() /* var resp common.GetResponse resp.Code = 1 resp.Data = body return resp, err*/ }
package protocol import ( "github.com/13k/go-steam-resources/steamlang" "github.com/13k/go-steam/steamid" ) // Message is the interface for all messages, typically outgoing. // // They can also be created by using the Read* methods of Packet. type Message interface { Serializer IsProto() bool Type() steamlang.EMsg SourceJobID() JobID SetSourceJobID(JobID) TargetJobID() JobID SetTargetJobID(JobID) } type MessageHeader interface { Serializable EMsg() steamlang.EMsg SetEMsg(steamlang.EMsg) IsProto() bool SourceJobID() JobID SetSourceJobID(JobID) TargetJobID() JobID SetTargetJobID(JobID) } type StructMessageBody interface { Serializable GetEMsg() steamlang.EMsg } // ClientMessage is the interface for client messages, i.e. messages that are sent after logging in. // // ClientStructMessage and ProtoMessage implement this. type ClientMessage interface { Message SessionID() int32 SetSessionID(int32) SteamID() steamid.SteamID SetSteamID(steamid.SteamID) }
// Copyright (C) 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vulkan import ( "context" "fmt" "sort" "github.com/google/gapid/core/data/id" "github.com/google/gapid/core/log" "github.com/google/gapid/gapis/api" "github.com/google/gapid/gapis/database" "github.com/google/gapid/gapis/memory" ) const ( scratchMemorySize = 64 * 1024 * 1204 scratchBufferAlignment = 256 ) // scratchResources holds a pool of flushing memory and command pool which can // be used to allocate temporary memory and command buffers. It also contains // a queue command handler for each queue. type scratchResources struct { memories map[VkDevice]*flushingMemory commandPools map[VkDevice]map[uint32]VkCommandPool queueCommandHandlers map[VkQueue]*queueCommandHandler } func newScratchResources() *scratchResources { return &scratchResources{ memories: map[VkDevice]*flushingMemory{}, commandPools: map[VkDevice]map[uint32]VkCommandPool{}, queueCommandHandlers: map[VkQueue]*queueCommandHandler{}, } } // Free frees first submit all the pending commands held by all the queue // command handlers, then free all the memories and command pools. func (res *scratchResources) Free(sb *stateBuilder) { for q, h := range res.queueCommandHandlers { err := h.Submit(sb) if err != nil { panic(err) } err = h.WaitUntilFinish(sb) if err != nil { panic(err) } delete(res.queueCommandHandlers, q) } for dev, mem := range res.memories { mem.Free(sb) delete(res.memories, dev) } for dev, families := range res.commandPools { for _, pool := range families { sb.write(sb.cb.VkDestroyCommandPool(dev, pool, memory.Nullptr)) } delete(res.commandPools, dev) } } // GetCommandPool returns a command pool for the given device and queue family // index, if such a pool has been created before in this scratch resources, the // existing one will be returned, otherwise a new one will be created. func (res *scratchResources) GetCommandPool(sb *stateBuilder, dev VkDevice, queueFamilyIndex uint32) VkCommandPool { if _, ok := res.commandPools[dev]; !ok { res.commandPools[dev] = map[uint32]VkCommandPool{} } if _, ok := res.commandPools[dev][queueFamilyIndex]; !ok { // create new command pool commandPool := VkCommandPool(newUnusedID(true, func(x uint64) bool { return sb.s.CommandPools().Contains(VkCommandPool(x)) || GetState(sb.newState).CommandPools().Contains(VkCommandPool(x)) })) sb.write(sb.cb.VkCreateCommandPool( dev, sb.MustAllocReadData(NewVkCommandPoolCreateInfo(sb.ta, VkStructureType_VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType 0, // pNext VkCommandPoolCreateFlags(VkCommandPoolCreateFlagBits_VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT), // flags queueFamilyIndex, // queueFamilyIndex )).Ptr(), memory.Nullptr, sb.MustAllocWriteData(commandPool).Ptr(), VkResult_VK_SUCCESS, )) res.commandPools[dev][queueFamilyIndex] = commandPool } return res.commandPools[dev][queueFamilyIndex] } // AllocateCommandBuffer returns a new allocated command buffer from a command // pool which is created with the given device and queue family index. func (res *scratchResources) AllocateCommandBuffer(sb *stateBuilder, dev VkDevice, queueFamilyIndex uint32) VkCommandBuffer { commandBuffer := VkCommandBuffer(newUnusedID(true, func(x uint64) bool { return sb.s.CommandBuffers().Contains(VkCommandBuffer(x)) || GetState(sb.newState).CommandBuffers().Contains(VkCommandBuffer(x)) })) sb.write(sb.cb.VkAllocateCommandBuffers( dev, sb.MustAllocReadData(NewVkCommandBufferAllocateInfo(sb.ta, VkStructureType_VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType 0, // pNext res.GetCommandPool(sb, dev, queueFamilyIndex), // commandPool VkCommandBufferLevel_VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level uint32(1), // commandBufferCount )).Ptr(), sb.MustAllocWriteData(commandBuffer).Ptr(), VkResult_VK_SUCCESS, )) scratchResName := debugMarkerName("scratch resource") attachDebugMarkerName(sb, scratchResName, dev, commandBuffer) return commandBuffer } // GetMemory returns a flushing memory for temporary memory allocation created // from the given device. If such a flushing memory has been created before, the // existing memory will be returned, otherwise a new one will be created. func (res *scratchResources) GetMemory(sb *stateBuilder, dev VkDevice) *flushingMemory { if _, ok := res.memories[dev]; ok { return res.memories[dev] } mem := newFlushingMemory(sb, dev, scratchMemorySize, scratchBufferAlignment, debugMarkerName(fmt.Sprintf("scratchMemory dev: %v", dev))) res.memories[dev] = mem return mem } // GetQueueCommandHandler returns a queue command handler for the given queue, // which means the commands recorded or committed to that command handler will // be submitted to the given queue. If such a queue has been created before, that // one will be returned, otherwise a new one will be returned. func (res *scratchResources) GetQueueCommandHandler(sb *stateBuilder, queue VkQueue) *queueCommandHandler { if _, ok := res.queueCommandHandlers[queue]; ok { return res.queueCommandHandlers[queue] } queueObj := GetState(sb.newState).Queues().Get(queue) commandBuffer := res.AllocateCommandBuffer(sb, queueObj.Device(), queueObj.Family()) handler, err := newQueueCommandHandler(sb, queue, commandBuffer) if err != nil { panic(err) } res.queueCommandHandlers[queue] = handler return handler } type bufferFlushInfo struct { buffer VkBuffer dataSlices []hashedDataAndOffset } func flushDataToBuffers(sb *stateBuilder, alignment uint64, info ...bufferFlushInfo) error { memoryFlushes := map[VkDeviceMemory][]hashedDataAndOffset{} for _, bfi := range info { if !GetState(sb.newState).Buffers().Contains(bfi.buffer) { return log.Errf(sb.ctx, nil, "Buffer: %v not found in the new state of stateBuilder", bfi.buffer) } buf := GetState(sb.newState).Buffers().Get(bfi.buffer) if buf.Memory().IsNil() { return log.Errf(sb.ctx, nil, "Buffer: %v not bound with memory or is sparsely bound", bfi.buffer) } mem := buf.Memory().VulkanHandle() for _, s := range bfi.dataSlices { memoryFlushes[mem] = append(memoryFlushes[mem], hashedDataAndOffset{ offset: s.offset + uint64(buf.MemoryOffset()), data: s.data, }) } } for m, f := range memoryFlushes { err := flushDataToMemory(sb, m, alignment, f...) if err != nil { return log.Errf(sb.ctx, err, "flush data to buffer's bound memory") } } return nil } // hashedData is a pair of hashed data ID and its size. type hashedData struct { hash id.ID size uint64 } // newHashedDataFromeBytes creates a new hashedData from raw bytes func newHashedDataFromBytes(ctx context.Context, b []byte) hashedData { hash, err := database.Store(ctx, b) if err != nil { panic(err) } return hashedData{ hash: hash, size: uint64(len(b)), } } // newHashedDataFromSlice creates a new hashedData from U8ˢ func newHashedDataFromSlice(ctx context.Context, sliceSrcState *api.GlobalState, slice U8ˢ) hashedData { return hashedData{ hash: slice.ResourceID(ctx, sliceSrcState), size: slice.Size(), } } // hashedDataAndOffset is a pair of offset and hashed data type hashedDataAndOffset struct { offset uint64 data hashedData } func newHashedDataAndOffset(data hashedData, offset uint64) hashedDataAndOffset { return hashedDataAndOffset{ offset: offset, data: data, } } // flushDataToMemory takes a list of hashed data with offsets in device memory // space and, flush the data to the given device memory based on the // corresponding offsets. func flushDataToMemory(sb *stateBuilder, deviceMemory VkDeviceMemory, alignment uint64, dataSlices ...hashedDataAndOffset) error { if len(dataSlices) == 0 { return nil } if !GetState(sb.newState).DeviceMemories().Contains(deviceMemory) { return fmt.Errorf("DeviceMemory: %v not found in the new state of stateBuilder", deviceMemory) } dev := GetState(sb.newState).DeviceMemories().Get(deviceMemory).Device() sort.Slice(dataSlices, func(i, j int) bool { return dataSlices[i].offset < dataSlices[j].offset }) begin := dataSlices[0].offset / alignment * alignment end := nextMultipleOf(dataSlices[len(dataSlices)-1].offset+dataSlices[len(dataSlices)-1].data.size, 256) atData := sb.MustReserve(end - begin) ptrAtData := sb.newState.AllocDataOrPanic(sb.ctx, NewVoidᵖ(atData.Ptr())) sb.write(sb.cb.VkMapMemory( dev, deviceMemory, VkDeviceSize(begin), VkDeviceSize(end-begin), VkMemoryMapFlags(0), ptrAtData.Ptr(), VkResult_VK_SUCCESS, ).AddRead(ptrAtData.Data()).AddWrite(ptrAtData.Data())) ptrAtData.Free() for _, f := range dataSlices { sb.ReadDataAt(f.data.hash, atData.Address()+f.offset-begin, f.data.size) } sb.write(sb.cb.VkFlushMappedMemoryRanges( dev, 1, sb.MustAllocReadData(NewVkMappedMemoryRange(sb.ta, VkStructureType_VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType 0, // pNext deviceMemory, // memory VkDeviceSize(begin), // offset VkDeviceSize(end-begin), // size )).Ptr(), VkResult_VK_SUCCESS, )) sb.write(sb.cb.VkUnmapMemory(dev, deviceMemory)) atData.Free() return nil } // flushableResource is an interface for resources providers that controlls // the life time of the resources offered by those providers. Users of the // resource reserved by a flushableResource should register themselves with // AddUser method to the flushableResource, and when they are done with the // reserved piece of resource, the users should use DropUser to indicate the // piece of resource can be recycled without notifying the user. When a flush // is triggered (either explicitly by an entity out of the flushableResource, or // implicitly by an internal logic of the flushableResource), all the users will // be called with OnResourceFlush method, then all the previously reserved // pieces of resources will be recycled and become invalid to access. type flushableResource interface { flush(*stateBuilder) AddUser(flushableResourceUser) DropUser(flushableResourceUser) } // flushableResourceUser is an interface for types that can use the resources // provided by flushableResource interface. When flush method is called on // a flushableResource, the OnResourceFlush method will be called on the // flushableResourceUser to process the pieces of resources this user uses. type flushableResourceUser interface { OnResourceFlush(*stateBuilder, flushableResource) } // flushablePiece is an interface for resources provided by flushableResource // interfaces, which can be used to query the provider of this piece of // resource, and check if this piece of resource is still valid to use. type flushablePiece interface { IsValid() bool Owner() flushableResource } // flushingMemory only guarantees the validity of the last allocated space, each // incoming allocation request can cause a flush of pre-allocated data. Users of // flushingMemory should register themself with AddUser() methods, and their // OnResourceFlush() method will be call before a flush of allocated spaces is // to occur. type flushingMemory struct { size uint64 allocated uint64 alignment uint64 mem VkDeviceMemory users map[flushableResourceUser]struct{} newMem func(*stateBuilder, uint64, debugMarkerName) VkDeviceMemory freeMem func(*stateBuilder, VkDeviceMemory) name debugMarkerName validPieces []*flushingMemoryAllocationResult } func newFlushingMemory(sb *stateBuilder, dev VkDevice, initialSize uint64, alignment uint64, name debugMarkerName) *flushingMemory { newMem := func(sb *stateBuilder, size uint64, nm debugMarkerName) VkDeviceMemory { deviceMemory := VkDeviceMemory(newUnusedID(true, func(x uint64) bool { return GetState(sb.oldState).DeviceMemories().Contains(VkDeviceMemory(x)) || GetState(sb.newState).DeviceMemories().Contains(VkDeviceMemory(x)) })) memoryTypeIndex := sb.GetScratchBufferMemoryIndex(GetState(sb.newState).Devices().Get(dev)) size = nextMultipleOf(size, alignment) sb.write(sb.cb.VkAllocateMemory( dev, NewVkMemoryAllocateInfoᶜᵖ(sb.MustAllocReadData( NewVkMemoryAllocateInfo(sb.ta, VkStructureType_VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType 0, // pNext VkDeviceSize(size), // allocationSize memoryTypeIndex, // memoryTypeIndex )).Ptr()), memory.Nullptr, sb.MustAllocWriteData(deviceMemory).Ptr(), VkResult_VK_SUCCESS, )) if len(nm) > 0 { attachDebugMarkerName(sb, nm, dev, deviceMemory) } return deviceMemory } freeMem := func(sb *stateBuilder, mem VkDeviceMemory) { sb.write(sb.cb.VkFreeMemory(dev, mem, memory.Nullptr)) } initialSize = nextMultipleOf(initialSize, alignment) return &flushingMemory{ size: initialSize, allocated: 0, alignment: alignment, mem: newMem(sb, initialSize, name), users: map[flushableResourceUser]struct{}{}, newMem: newMem, freeMem: freeMem, name: name, validPieces: []*flushingMemoryAllocationResult{}, } } // flushingMemoryAllocationResult contains the allocated device memory and // offset for a memory reservation request, and also implements flushablePiece // interface. type flushingMemoryAllocationResult struct { valid bool mem VkDeviceMemory offset uint64 owner flushableResource } // IsValid impelements the flushablePiece interface func (r *flushingMemoryAllocationResult) IsValid() bool { return r.valid } // Owner impelements the flushablePiece interface func (r *flushingMemoryAllocationResult) Owner() flushableResource { return r.owner } // Memory returns the backing device memory of an allocation result. func (r *flushingMemoryAllocationResult) Memory() VkDeviceMemory { return r.mem } // Offset returns the offset in the backing device memory for an allocation. func (r *flushingMemoryAllocationResult) Offset() uint64 { return r.offset } // Allocate issues an request of memory allocation with the given size, and // returns an allocation results with device memory and offset to tell the // valid range to use for the caller. However, this may trigger a flush for the // previously allocated memory ranges. func (m *flushingMemory) Allocate(sb *stateBuilder, size uint64) (*flushingMemoryAllocationResult, error) { size = nextMultipleOf(size, m.alignment) if size > m.size { // Need expand the size of this memory size = nextMultipleOf(size, m.alignment) m.expand(sb, size) return m.Allocate(sb, size) } else if size+m.allocated > m.size { // Need scratch m.flush(sb) return m.Allocate(sb, size) } offset := m.allocated m.allocated += size res := &flushingMemoryAllocationResult{ valid: true, mem: m.mem, offset: offset, owner: m, } m.validPieces = append(m.validPieces, res) return res, nil } // flush implements the flushableResource interface. func (m *flushingMemory) flush(sb *stateBuilder) { for u := range m.users { u.OnResourceFlush(sb, m) } for _, p := range m.validPieces { p.valid = false } m.validPieces = []*flushingMemoryAllocationResult{} m.allocated = 0 } // Flush trigger a flush of previous allocated memory ranges. func (m *flushingMemory) Flush(sb *stateBuilder) { m.flush(sb) } // expand replace the backing Vulkan device memory with a larger one. It will // trigger a flush, destroy the existing device memory and create one with the // given size. func (m *flushingMemory) expand(sb *stateBuilder, size uint64) { // flush then reallocate memory m.flush(sb) m.freeMem(sb, m.mem) m.mem = m.newMem(sb, size, m.name) m.size = size } // Free flushes all the memory ranges allocated by this flushing memory and // destroy the backing device memory handle. func (m *flushingMemory) Free(sb *stateBuilder) { m.flush(sb) if m.mem != VkDeviceMemory(0) { m.freeMem(sb, m.mem) m.mem = VkDeviceMemory(0) } m.size = 0 m.users = nil } // AddUser registers a user of the memory ranges allocated from this flushing memory func (m *flushingMemory) AddUser(user flushableResourceUser) { m.users[user] = struct{}{} } // DropUSer removes a user from the user list of this flushing memory func (m *flushingMemory) DropUser(user flushableResourceUser) { if _, ok := m.users[user]; ok { delete(m.users, user) } } // bufferAllocationSize returns the memory allocation size for the given buffer // size. // Since we cannot guess how much the driver will actually request of us, // overallocate by a factor of 2. This should be enough. // Align to 0x100 to make validation layers happy. Assuming the buffer memory // requirement has an alignment value compatible with 0x100. func bufferAllocationSize(bufferSize uint64, alignment uint64) uint64 { return nextMultipleOf(bufferSize*2, alignment) }
package create import ( "encoding/json" "fmt" "net/http" "os" "github.com/ocoscope/face/db" "github.com/ocoscope/face/utils" "github.com/ocoscope/face/utils/answer" ) func InvitationUsers(w http.ResponseWriter, r *http.Request) { type tbody struct { UserID, DepartmentID, CompanyID uint AccessToken string EmailsEmployee []string } var body tbody err := json.NewDecoder(r.Body).Decode(&body) if err != nil { utils.Message(w, answer.WRONG_DATA, 400) return } for _, email := range body.EmailsEmployee { valid := utils.VEmail(&email) if len(valid) != 0 { utils.Message(w, valid, 400) return } } database, err := db.CopmanyDB(int64(body.CompanyID)) if err != nil { utils.Message(w, answer.NOT_FOUND_COMPANY, 400) return } defer database.Close() err = db.CheckUserAccessToken(database, int64(body.UserID), body.AccessToken) if err != nil { utils.Message(w, answer.UNAUTHORIZED, 401) return } userRoleID, err := db.GetUserRoleID(database, int64(body.UserID)) if err != nil || userRoleID != 1 { utils.Message(w, answer.ACCESS_DENIED, 400) return } for _, email := range body.EmailsEmployee { userName := utils.GenName() // random id generate password := utils.GenWord(8, body.EmailsEmployee[0]) hashedPass, _ := utils.HashPassword(password) user := db.TUser{ Email: email, Password: hashedPass, FirstName: userName, LastName: "", PatronymicName: "", Number: "", Position: "", Photo: "", Face: "", AccessToken: "", RoleID: 2, } // создаем нового пользователя userID, err := db.CreateUser(database, user) if err != nil { fmt.Println(err) utils.Message(w, answer.F_INVITE_U, 400) return } // создаем в таблице employees пользователя чтоб привязать к отделу employee := db.TEmployee{ EmployeeID: userID, DepartmentID: int64(body.DepartmentID), } if body.DepartmentID > 0 { _, err = db.CreateEmployee(database, employee) if err != nil { fmt.Println(err) db.DeleteUser(database, userID) utils.Message(w, answer.FR, 400) return } } // От UID генерируем токен и обновляем accessToken, err := db.UpdateAccessTokenById(database, userID) if err != nil { db.DeleteUser(database, userID) utils.Message(w, answer.FR, 400) return } name, err := db.GetPortalNameByID(body.CompanyID) if err != nil { db.DeleteUser(database, userID) utils.Message(w, answer.FR, 400) return } strUserID := utils.IntToStr(userID) strCompanyID := utils.IntToStr(int64(body.CompanyID)) var FRONT_URL = os.Getenv("FRONT_URL") url := FRONT_URL + "/create/face/" + strCompanyID + "/" + strUserID + "/" + accessToken subj := "Ссылка для авторизации" messageBody := ` Компания ` + name + ` приглашает вас в свой Ocoscope. В данном сервисе ` + name + ` фиксирует рабочие часы своих сотрудников. Ваша компания: ` + name + ` Ваш логин: ` + email + ` Ваш пароль: ` + password + ` Пройдите процедуру регистрации. (жирным) 1) Нажмите на кнопку и перейдите по ссылке. ` + url + ` 2) В открывшемся окне вы сможете сфотографировать свое лицо для сохранения в системе. Мы не храним фотографии сотрудников, только черты лица в виде точек. 3) Далее войдите в раздел меню "Профиль" и заполните свои данные. 4) Спасибо! Вы зарегистрированы в системе учета рабочего времени Ocoscope. Как сервис работает? (жирным) При приходе на рабочее место, Вы фотографируете себя в панели регистрации. При уходе с работы, Вы фотографируете себя в той же панели. Ocoscope не требует отдельного указания пришли Вы или ушли, он поймет это сам. Ocoscope покажет в вашем профиле количество часов, которое Вы провели на рабочем месте, время, на которое Вы опоздали, а также сумму штрафа за опоздания, если такие присутствуют в компании. Если Вы забыли отметиться при уходе, Ocoscope закроет Ваш рабочий день согласно указанному в настройках графику компании. Если вы по ошибке получили данное письмо, то просто проигнорируйте его.` err = utils.MessageSmtp(email, subj, messageBody) if err != nil { db.DeleteUser(database, userID) fmt.Println(err) utils.Message(w, answer.F_SEND_EMAIL, 400) return } } utils.Message(w, answer.S_INVITE_U, 200) }
package main import ( "encoding/hex" "flag" "fmt" "log" "net" "github.com/bnordbo/nff-go/flow" "github.com/bnordbo/nff-go/packet" "github.com/bnordbo/nff-go/types" ) var ( teid = flag.Int("teid", 1, "GTP-U TEID") srcIP = flag.String("src-ip", "", "Source IP address") dstIP = flag.String("dst-ip", "", "Destination IP address") data = flag.String("data", "", "GTP-U payload") output = flag.Int("port", 0, "DPDK output port") ) func main() { flag.Parse() srcAddr, err := stringToIPv4(*srcIP) if err != nil { log.Fatal(err) } dstAddr, err := stringToIPv4(*dstIP) if err != nil { log.Fatal(err) } flow.SystemInit(nil) outputPort := uint16(*output) var pkID uint16 = 0 encapFn := func(p *packet.Packet, c flow.UserContext) { pkID++ encap(p, c, srcAddr, dstAddr, data, pkID) } firstFlow, genChannel, _ := flow.SetFastGenerator(encapFn, 64, nil) flow.CheckFatal(flow.SetSender(firstFlow, outputPort)) go updateSpeed(genChannel) flow.SystemStart() } func encap( p *packet.Packet, c flow.UserContext, srcAddr, dstAddr types.IPv4Address, data *string, pkID uint16, ) { genICMP(p, c, data) if !p.EncapsulateIPv4GTP(uint32(*teid)) { log.Println("Error encapsulating GTP-U packet") return } p.ParseL3() p.Ether.SAddr = [6]uint8{0x06, 0xc5, 0x79, 0x20, 0xd0, 0x60} p.Ether.DAddr = [6]uint8{0x06, 0x9a, 0x4b, 0x5a, 0x34, 0xa0} p.Ether.EtherType = types.SwapIPV4Number ipv4 := p.GetIPv4NoCheck() length := p.GetPacketLen() ipv4.VersionIhl = 0x45 ipv4.TypeOfService = 0 ipv4.PacketID = pkID ipv4.FragmentOffset = 0 ipv4.TimeToLive = 64 ipv4.TotalLength = packet.SwapBytesUint16(uint16(length - types.EtherLen)) ipv4.NextProtoID = types.UDPNumber ipv4.SrcAddr = srcAddr ipv4.DstAddr = dstAddr ipv4.HdrChecksum = packet.SwapBytesUint16(packet.CalculateIPv4Checksum(ipv4)) p.ParseL4ForIPv4() udp := p.GetUDPNoCheck() udp.SrcPort = packet.SwapBytesUint16(2152) udp.DstPort = packet.SwapBytesUint16(2152) udp.DgramLen = packet.SwapBytesUint16(uint16(length - types.EtherLen - types.IPv4MinLen)) udp.DgramCksum = 0 } func genICMP(p *packet.Packet, c flow.UserContext, data *string) { // adding fake Ethernet header since encapsulation will truncate it without checking if it exist ether := "0000000000000000000000000000" payload, _ := hex.DecodeString(ether + *data) packet.GeneratePacketFromByte(p, payload) } func stringToIPv4(addr string) (types.IPv4Address, error) { ip := net.ParseIP(addr) if ip == nil { return types.IPv4Address(0), fmt.Errorf("Invalid source IP address %s", addr) } i := ip.To4() return types.BytesToIPv4(i[0], i[1], i[2], i[3]), nil } func updateSpeed(genChannel chan uint64) { var load int for { // Can be file or any other source if _, err := fmt.Scanf("%d", &load); err == nil { genChannel <- uint64(load) } } } func arpSeparator(p *packet.Packet, c flow.UserContext) bool { p.ParseL3() if p.GetARP() != nil { return false } return true } var np = 0 func dump(currentPacket *packet.Packet, context flow.UserContext) { if np < 9 /*dump first three packets */ { fmt.Printf("%v", currentPacket.Ether) currentPacket.ParseL3() ipv4 := currentPacket.GetIPv4() if ipv4 != nil { fmt.Printf("%v", ipv4) tcp, udp, _ := currentPacket.ParseAllKnownL4ForIPv4() if tcp != nil { fmt.Printf("%v", tcp) } else if udp != nil { fmt.Printf("%v", udp) gtp := currentPacket.GTPIPv4FastParsing() fmt.Printf("%v", gtp) } else { println("ERROR L 1") } } else { println("ERROR L 0") } fmt.Println("----------------------------------------------------------") np++ } }
package cfg import ( "github.com/sirupsen/logrus" ) //singleton logger var Logger *logrus.Entry func init() { Logger = logrus.WithFields(logrus.Fields{}) } func NewLogger() { Logger = logrus.WithFields(logrus.Fields{}) } func BindFields(fields logrus.Fields) { Logger = Logger.WithFields(fields) }
// Solution to excericise : https://tour.golang.org/moretypes/23 package main import ( "strings" "golang.org/x/tour/wc" ) //WordCount returns count of occurance for each word in given string s func WordCount(s string) map[string]int { words := strings.Fields(s) wc := make(map[string]int) for i := range words { wc[words[i]]++ } return wc } func main() { wc.Test(WordCount) }
package tiltfile import ( "bytes" "fmt" "os" "path/filepath" "sort" "strings" "github.com/docker/distribution/reference" "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/pkg/errors" "go.starlark.net/starlark" "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/dockerfile" "github.com/tilt-dev/tilt/internal/ospath" "github.com/tilt-dev/tilt/internal/sliceutils" "github.com/tilt-dev/tilt/internal/tiltfile/io" "github.com/tilt-dev/tilt/internal/tiltfile/starkit" "github.com/tilt-dev/tilt/internal/tiltfile/value" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" "github.com/tilt-dev/tilt/pkg/model" ) const dockerPlatformEnv = "DOCKER_DEFAULT_PLATFORM" var cacheObsoleteWarning = "docker_build(cache=...) is obsolete, and currently a no-op.\n" + "You should switch to live_update to optimize your builds." type dockerImage struct { buildType dockerImageBuildType workDir string configurationRef container.RefSelector matchInEnvVars bool sshSpecs []string secretSpecs []string ignores []string onlys []string entrypoint model.Cmd // optional: if specified, we override the image entrypoint/k8s command with this targetStage string // optional: if specified, we build a particular target in the dockerfile network string extraTags []string // Extra tags added at build-time. cacheFrom []string pullParent bool platform string // Overrides the container args. Used as an escape hatch in case people want the old entrypoint behavior. // See discussion here: // https://github.com/tilt-dev/tilt/pull/2933 overrideArgs *v1alpha1.ImageMapOverrideArgs dbDockerfilePath string dbDockerfile dockerfile.Dockerfile // dbBuildPath may be empty if the user is building from a URL dbBuildPath string dbBuildArgs []string customCommand model.Cmd customDeps []string customTag string customImgDeps []reference.Named // Whether this has been matched up yet to a deploy resource. matched bool imageMapDeps []string // Only applicable to custom_build disablePush bool skipsLocalDocker bool outputsImageRefTo string liveUpdate v1alpha1.LiveUpdateSpec // TODO(milas): we should have a better way of passing the Tiltfile path around during resource assembly tiltfilePath string dockerComposeService string dockerComposeLocalVolumePaths []string extraHosts []string } func (d *dockerImage) ID() model.TargetID { return model.ImageID(d.configurationRef) } func (d *dockerImage) ImageMapName() string { return string(model.ImageID(d.configurationRef).Name) } type dockerImageBuildType int const ( UnknownBuild dockerImageBuildType = iota DockerBuild CustomBuild DockerComposeBuild ) func (d *dockerImage) Type() dockerImageBuildType { return d.buildType } func (s *tiltfileState) dockerBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var dockerRef, targetStage string contextVal := value.NewLocalPathUnpacker(thread) dockerfilePathVal := value.NewLocalPathUnpacker(thread) var dockerfileContentsVal, cacheVal, liveUpdateVal, ignoreVal, onlyVal, entrypoint starlark.Value var buildArgs value.StringStringMap var network, platform value.Stringable var ssh, secret, extraTags, cacheFrom, extraHosts value.StringOrStringList var matchInEnvVars, pullParent bool var overrideArgsVal starlark.Sequence if err := s.unpackArgs(fn.Name(), args, kwargs, "ref", &dockerRef, "context", &contextVal, "build_args?", &buildArgs, "dockerfile??", &dockerfilePathVal, "dockerfile_contents?", &dockerfileContentsVal, "cache?", &cacheVal, "live_update?", &liveUpdateVal, "match_in_env_vars?", &matchInEnvVars, "ignore?", &ignoreVal, "only?", &onlyVal, "entrypoint?", &entrypoint, "container_args?", &overrideArgsVal, "target?", &targetStage, "ssh?", &ssh, "secret?", &secret, "network?", &network, "extra_tag?", &extraTags, "cache_from?", &cacheFrom, "pull?", &pullParent, "platform?", &platform, "extra_hosts?", &extraHosts, ); err != nil { return nil, err } ref, err := container.ParseNamed(dockerRef) if err != nil { return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err) } context := contextVal.Value dockerfilePath := filepath.Join(context, "Dockerfile") var dockerfileContents string if dockerfileContentsVal != nil && dockerfilePathVal.IsSet { return nil, fmt.Errorf("Cannot specify both dockerfile and dockerfile_contents keyword arguments") } if dockerfileContentsVal != nil { switch v := dockerfileContentsVal.(type) { case io.Blob: dockerfileContents = v.Text case starlark.String: dockerfileContents = v.GoString() default: return nil, fmt.Errorf("Argument (dockerfile_contents): must be string or blob.") } } else if dockerfilePathVal.IsSet { dockerfilePath = dockerfilePathVal.Value bs, err := io.ReadFile(thread, dockerfilePath) if err != nil { return nil, errors.Wrap(err, "error reading dockerfile") } dockerfileContents = string(bs) } else { bs, err := io.ReadFile(thread, dockerfilePath) if err != nil { return nil, errors.Wrapf(err, "error reading dockerfile") } dockerfileContents = string(bs) } if cacheVal != nil { s.logger.Warnf("%s", cacheObsoleteWarning) } liveUpdate, err := s.liveUpdateFromSteps(thread, liveUpdateVal) if err != nil { return nil, errors.Wrap(err, "live_update") } ignores, err := parseValuesToStrings(ignoreVal, "ignore") if err != nil { return nil, err } onlys, err := s.parseOnly(onlyVal) if err != nil { return nil, err } entrypointCmd, err := value.ValueToUnixCmd(thread, entrypoint, nil, nil) if err != nil { return nil, err } var overrideArgs *v1alpha1.ImageMapOverrideArgs if overrideArgsVal != nil { args, err := value.SequenceToStringSlice(overrideArgsVal) if err != nil { return nil, fmt.Errorf("Argument 'container_args': %v", err) } overrideArgs = &v1alpha1.ImageMapOverrideArgs{Args: args} } for _, extraTag := range extraTags.Values { _, err := container.ParseNamed(extraTag) if err != nil { return nil, fmt.Errorf("Argument extra_tag=%q not a valid image reference: %v", extraTag, err) } } if platform.Value == "" { // for compatibility with Docker CLI, support the env var fallback // see https://docs.docker.com/engine/reference/commandline/cli/#environment-variables platform.Value = os.Getenv(dockerPlatformEnv) } buildArgsList := []string{} for k, v := range buildArgs.AsMap() { if v == "" { buildArgsList = append(buildArgsList, k) } else { buildArgsList = append(buildArgsList, fmt.Sprintf("%s=%s", k, v)) } } sort.Strings(buildArgsList) r := &dockerImage{ buildType: DockerBuild, workDir: starkit.CurrentExecPath(thread), dbDockerfilePath: dockerfilePath, dbDockerfile: dockerfile.Dockerfile(dockerfileContents), dbBuildPath: context, configurationRef: container.NewRefSelector(ref), dbBuildArgs: buildArgsList, liveUpdate: liveUpdate, matchInEnvVars: matchInEnvVars, sshSpecs: ssh.Values, secretSpecs: secret.Values, ignores: ignores, onlys: onlys, entrypoint: entrypointCmd, overrideArgs: overrideArgs, targetStage: targetStage, network: network.Value, extraTags: extraTags.Values, cacheFrom: cacheFrom.Values, pullParent: pullParent, platform: platform.Value, tiltfilePath: starkit.CurrentExecPath(thread), extraHosts: extraHosts.Values, } err = s.buildIndex.addImage(r) if err != nil { return nil, err } return starlark.None, nil } func (s *tiltfileState) parseOnly(val starlark.Value) ([]string, error) { paths, err := parseValuesToStrings(val, "only") if err != nil { return nil, err } for _, p := range paths { // We want to forbid file globs due to these issues: // https://github.com/tilt-dev/tilt/issues/1982 // https://github.com/moby/moby/issues/30018 if strings.Contains(p, "*") { return nil, fmt.Errorf("'only' does not support '*' file globs. Must be a real path: %s", p) } } return paths, nil } func (s *tiltfileState) customBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var dockerRef string var commandVal, commandBat, commandBatVal starlark.Value deps := value.NewLocalPathListUnpacker(thread) var tag string var disablePush bool var liveUpdateVal, ignoreVal starlark.Value var matchInEnvVars bool var entrypoint starlark.Value var overrideArgsVal starlark.Sequence var skipsLocalDocker bool var imageDeps value.ImageList outputsImageRefTo := value.NewLocalPathUnpacker(thread) err := s.unpackArgs(fn.Name(), args, kwargs, "ref", &dockerRef, "command", &commandVal, "deps", &deps, "tag?", &tag, "disable_push?", &disablePush, "skips_local_docker?", &skipsLocalDocker, "live_update?", &liveUpdateVal, "match_in_env_vars?", &matchInEnvVars, "ignore?", &ignoreVal, "entrypoint?", &entrypoint, "container_args?", &overrideArgsVal, "command_bat_val", &commandBatVal, "outputs_image_ref_to", &outputsImageRefTo, // This is a crappy fix for https://github.com/tilt-dev/tilt/issues/4061 // so that we don't break things. "command_bat", &commandBat, "image_deps", &imageDeps, ) if err != nil { return nil, err } ref, err := container.ParseNamed(dockerRef) if err != nil { return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err) } liveUpdate, err := s.liveUpdateFromSteps(thread, liveUpdateVal) if err != nil { return nil, errors.Wrap(err, "live_update") } ignores, err := parseValuesToStrings(ignoreVal, "ignore") if err != nil { return nil, err } entrypointCmd, err := value.ValueToUnixCmd(thread, entrypoint, nil, nil) if err != nil { return nil, err } var overrideArgs *v1alpha1.ImageMapOverrideArgs if overrideArgsVal != nil { args, err := value.SequenceToStringSlice(overrideArgsVal) if err != nil { return nil, fmt.Errorf("Argument 'container_args': %v", err) } overrideArgs = &v1alpha1.ImageMapOverrideArgs{Args: args} } if commandBat == nil { commandBat = commandBatVal } command, err := value.ValueGroupToCmdHelper(thread, commandVal, commandBat, nil, nil) if err != nil { return nil, fmt.Errorf("Argument 2 (command): %v", err) } else if command.Empty() { return nil, fmt.Errorf("Argument 2 (command) can't be empty") } if tag != "" && outputsImageRefTo.Value != "" { return nil, fmt.Errorf("Cannot specify both tag= and outputs_image_ref_to=") } img := &dockerImage{ buildType: CustomBuild, workDir: starkit.AbsWorkingDir(thread), configurationRef: container.NewRefSelector(ref), customCommand: command, customDeps: deps.Value, customTag: tag, customImgDeps: []reference.Named(imageDeps), disablePush: disablePush, skipsLocalDocker: skipsLocalDocker, liveUpdate: liveUpdate, matchInEnvVars: matchInEnvVars, ignores: ignores, entrypoint: entrypointCmd, overrideArgs: overrideArgs, outputsImageRefTo: outputsImageRefTo.Value, tiltfilePath: starkit.CurrentExecPath(thread), } err = s.buildIndex.addImage(img) if err != nil { return nil, err } return &customBuild{s: s, img: img}, nil } type customBuild struct { s *tiltfileState img *dockerImage } var _ starlark.Value = &customBuild{} func (b *customBuild) String() string { return fmt.Sprintf("custom_build(%q)", b.img.configurationRef.String()) } func (b *customBuild) Type() string { return "custom_build" } func (b *customBuild) Freeze() {} func (b *customBuild) Truth() starlark.Bool { return true } func (b *customBuild) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: custom_build") } func (b *customBuild) AttrNames() []string { return []string{} } func parseValuesToStrings(value starlark.Value, param string) ([]string, error) { tempIgnores := starlarkValueOrSequenceToSlice(value) var ignores []string for _, v := range tempIgnores { switch val := v.(type) { case starlark.String: // for singular string goString := val.GoString() if strings.Contains(goString, "\n") { return nil, fmt.Errorf(param+" cannot contain newlines; found "+param+": %q", goString) } ignores = append(ignores, val.GoString()) default: return nil, fmt.Errorf(param+" must be a string or a sequence of strings; found a %T", val) } } return ignores, nil } func isGitRepoBase(path string) bool { return ospath.IsDir(filepath.Join(path, ".git")) } func repoIgnoresForPaths(paths []string) []v1alpha1.IgnoreDef { var result []v1alpha1.IgnoreDef repoSet := map[string]bool{} for _, path := range paths { isRepoBase := isGitRepoBase(path) if !isRepoBase || repoSet[path] { continue } repoSet[path] = true result = append(result, v1alpha1.IgnoreDef{ BasePath: filepath.Join(path, ".git"), }) } return result } func (s *tiltfileState) repoIgnoresForImage(image *dockerImage) []v1alpha1.IgnoreDef { var paths []string paths = append(paths, image.dbDockerfilePath) if image.dbBuildPath != "" { paths = append(paths, image.dbBuildPath) } paths = append(paths, image.workDir) paths = append(paths, image.customDeps...) return repoIgnoresForPaths(paths) } func (s *tiltfileState) defaultRegistry(t *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if !container.IsEmptyRegistry(s.defaultReg) { return starlark.None, errors.New("default registry already defined") } var host, hostFromCluster, singleName string if err := s.unpackArgs(fn.Name(), args, kwargs, "host", &host, "host_from_cluster?", &hostFromCluster, "single_name?", &singleName); err != nil { return nil, err } reg := &v1alpha1.RegistryHosting{ Host: host, HostFromContainerRuntime: hostFromCluster, SingleName: singleName, } ctx, err := starkit.ContextFromThread(t) if err != nil { return starlark.None, err } if err := reg.Validate(ctx); err != nil { return starlark.None, errors.Wrapf(err.ToAggregate(), "validating defaultRegistry") } reg.SingleName = singleName s.defaultReg = reg return starlark.None, nil } func (s *tiltfileState) dockerignoresFromPathsAndContextFilters(source string, paths []string, ignorePatterns []string, onlys []string, dbDockerfilePath string) ([]model.Dockerignore, error) { var result []model.Dockerignore dupeSet := map[string]bool{} onlyPatterns := onlysToDockerignorePatterns(onlys) for _, path := range paths { if path == "" || dupeSet[path] { continue } dupeSet[path] = true if !ospath.IsDir(path) { continue } if len(ignorePatterns) != 0 { result = append(result, model.Dockerignore{ LocalPath: path, Source: source + " ignores=", Patterns: ignorePatterns, }) } if len(onlyPatterns) != 0 { result = append(result, model.Dockerignore{ LocalPath: path, Source: source + " only=", Patterns: onlyPatterns, }) } diFile := filepath.Join(path, ".dockerignore") if dbDockerfilePath != "" { customDiFile := dbDockerfilePath + ".dockerignore" _, err := os.Stat(customDiFile) if !os.IsNotExist(err) { diFile = customDiFile } } s.postExecReadFiles = sliceutils.AppendWithoutDupes(s.postExecReadFiles, diFile) contents, err := os.ReadFile(diFile) if err != nil { if os.IsNotExist(err) { continue } return nil, err } patterns, err := dockerignore.ReadAll(bytes.NewBuffer(contents)) if err != nil { return nil, err } result = append(result, model.Dockerignore{ LocalPath: path, Source: diFile, Patterns: patterns, }) } return result, nil } func onlysToDockerignorePatterns(onlys []string) []string { if len(onlys) == 0 { return nil } result := []string{"**"} for _, only := range onlys { result = append(result, fmt.Sprintf("!%s", only)) } return result } func (s *tiltfileState) dockerignoresForImage(image *dockerImage) ([]model.Dockerignore, error) { var paths []string var source string ref := image.configurationRef.RefFamiliarString() switch image.Type() { case DockerBuild: if image.dbBuildPath != "" { paths = append(paths, image.dbBuildPath) } source = fmt.Sprintf("docker_build(%q)", ref) case CustomBuild: paths = append(paths, image.customDeps...) source = fmt.Sprintf("custom_build(%q)", ref) case DockerComposeBuild: if image.dbBuildPath != "" { paths = append(paths, image.dbBuildPath) } source = fmt.Sprintf("docker_compose(%q)", ref) } return s.dockerignoresFromPathsAndContextFilters( source, paths, image.ignores, image.onlys, image.dbDockerfilePath) } // Filter out all images that are suppressed. func filterUnmatchedImages(us model.UpdateSettings, images []*dockerImage) []*dockerImage { result := make([]*dockerImage, 0, len(images)) for _, image := range images { name := container.FamiliarString(image.configurationRef) ok := true for _, suppressed := range us.SuppressUnusedImageWarnings { if suppressed == "*" { ok = false break } if suppressed == name { ok = false break } } if ok { result = append(result, image) } } return result }
package swproxy import ( "bytes" "encoding/base64" "errors" "io/ioutil" "net/http" "strings" grpczerolog "github.com/cheapRoc/grpc-zerolog" "github.com/elazarl/goproxy" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/swarpf/proxy/pkg/events" "github.com/swarpf/proxy/pkg/utils" ) type ProxyConfiguration struct { CertificateDirectory string `default:"./certs/"` InterceptHttps bool ForceHttpDowngrade bool `default:"false"` Verbose bool `default:"false"` } type Proxy struct { log zerolog.Logger eventChan chan events.ApiEventMsg configuration ProxyConfiguration } // proxy.New : Create a new proxy instance for further use func New(ev chan events.ApiEventMsg, configuration ProxyConfiguration) *Proxy { if ev == nil { log.Panic().Msg("ev is not a valid ApiEventMsg channel") return nil } return &Proxy{ log: log.With().Timestamp().Str("log_type", "module").Str("module", "Proxy").Logger(), eventChan: ev, configuration: configuration, } } func (p *Proxy) CreateProxy() http.Handler { proxy := goproxy.NewProxyHttpServer() proxy.Logger = grpczerolog.New(log.Logger) // todo(lyrex): this need some kind of better implementation that does not just throw everything into INFO proxy.Verbose = p.configuration.Verbose if p.configuration.ForceHttpDowngrade { p.log.Warn().Msg("HTTPS -> HTTP downgrade is enabled") // match the /api/location_c2.php endpoint and modify the body if necessary proxy.OnResponse(newLocationServiceMatcher()). DoFunc(p.onLocationResponse) } if p.configuration.InterceptHttps { p.log.Warn().Msg("HTTPS interception is enabled") rootCa := getRootCA(p.configuration.CertificateDirectory) if err := setCA(rootCa); err != nil { p.log.Fatal().Err(err).Msg("could not set proxy CA") return nil } proxy.OnRequest(newProxyGameEndpointMatcher()).HandleConnect(goproxy.AlwaysMitm) proxy.OnRequest(newCertificateEndpointMatcher()).DoFunc( func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) { p.log.Debug().Msg("user requested certificate") return req, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusOK, string(rootCa.Certificate[0])) }) } proxy.OnRequest(newGameEndpointMatcher()). DoFunc(p.onRequest) proxy.OnResponse(newGameEndpointMatcher()). DoFunc(p.onResponse) return proxy } func (p *Proxy) onRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) { requestLogger := p.log.With().Int64("ctx.Session", ctx.Session).Logger() requestLogger.Trace(). Stringer("ctx.Req.URL", ctx.Req.URL). Interface("ctx.Req.Header", ctx.Req.Header). Msg("New outgoing request") if req == nil || req.ContentLength == 0 || req.Body == nil { requestLogger.Info().Msg("Sending empty request to API") return req, nil } reqBody, err := ioutil.ReadAll(req.Body) if err != nil { requestLogger.Error().Err(err).Msg("could not read request body") return req, nil } req.Body = ioutil.NopCloser(bytes.NewBuffer(reqBody)) reqContent := string(reqBody[:]) plainContent, err := p.readBody(reqContent, false) if err != nil { // do not log here since we're logging the actual error in readBody return req, nil } requestLogger.Trace(). Str("encryptedContent", reqContent). Str("plainContent", plainContent). Msg("Sending request from API") ctx.UserData = plainContent return req, nil } func (p *Proxy) onResponse(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response { responseLogger := p.log.With().Int64("ctx.Session", ctx.Session).Logger() responseLogger.Trace(). Stringer("ctx.Req.URL", ctx.Req.URL). Interface("ctx.Req.Header", ctx.Req.Header). Msg("New incoming response") if resp == nil || resp.ContentLength == 0 || resp.Body == nil { responseLogger.Info().Msg("Received empty reponse from API") return resp } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { responseLogger.Error().Err(err).Msg("could not read response body") return resp } resp.Body = ioutil.NopCloser(bytes.NewBuffer(respBody)) respContent := string(respBody[:]) responsePlainContent, err := p.readBody(respContent, true) if err != nil { // do not log here since we're logging the actual error in readBody return resp } responseLogger.Trace(). Str("encryptedContent", respContent). Str("plainContent", responsePlainContent). Msg("Receiving response from API") requestPlainContent := ctx.UserData.(string) // send ApiEvent to event message p.eventChan <- events.ApiEventMsg{ Request: requestPlainContent, Response: responsePlainContent, } return resp } func (p *Proxy) onLocationResponse(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response { responseLogger := p.log.With(). Int64("ctx.Session", ctx.Session). Str("tag", "location_endpoint"). Logger() responseLogger.Trace(). Stringer("ctx.Req.URL", ctx.Req.URL). Interface("ctx.Req.Header", ctx.Req.Header). Msg("New incoming location response") if resp == nil || resp.ContentLength == 0 || resp.Body == nil { responseLogger.Info().Msg("Received empty reponse from location API") return resp } bodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { responseLogger.Error().Err(err).Msg("could not read location response body") return resp } // NOTE: we keep this here to not break the buffer if we need to bail early resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) responseText, err := p.readBody(string(bodyBytes), true) if err != nil { // do not log here since we're logging the actual error in readBody return resp } responseLogger.Info().Msg("Receiving location response from API") responseLogger.Trace(). Bytes("encryptedContent", bodyBytes). Str("plainContent", responseText). Int64("ContentLength", resp.ContentLength).Send() // ensure we really do have a correctly decryped body if !strings.Contains(responseText, "server_url_list") { p.log.Warn().Msg("Location API response does not contain server url list.") return resp } modifiedResponseText := responseText // only downgrade connections to the game api for _, server := range []string{"gb-lb", "h-lb", "jp-lb", "cn-t", "sea-lb", "eu-lb"} { modifiedResponseText = strings.ReplaceAll(modifiedResponseText, `https:\/\/summonerswar-`+server+`.qpyou.cn\/api\/gateway_c2.php`, `http:\/\/summonerswar-`+server+`.qpyou.cn\/api\/gateway_c2.php`) } // replace all occurences of https with http // modifiedResponseText = strings.ReplaceAll(modifiedResponseText, "https:", "http:") // modifiedResponseText = strings.ReplaceAll(modifiedResponseText, "HTTPS:", "HTTP:") // encrypt and compress new response text workmem, err := utils.CompressBytes([]byte(modifiedResponseText)) if err != nil { p.log.Warn().Err(err).Msg("could not compress data") return resp } workmem, err = utils.EncryptBytes(workmem) if err != nil { p.log.Warn().Err(err).Msg("could not encrypt bytes") return resp } responseBody := base64.StdEncoding.EncodeToString(workmem) responseLogger.Info().Msg("Modified server response and replaced HTTPS with HTTP.") responseLogger.Trace(). Str("encryptedContent", responseBody). Str("plainContent", modifiedResponseText).Send() // create new response and send it to the client return goproxy.NewResponse(ctx.Req, "application/json; charset=utf-8", 200, responseBody) } func (p *Proxy) readBody(body string, decompress bool) (string, error) { if len(body) == 0 { return "", nil } encryptedBody := body encryptedBytes, err := base64.StdEncoding.DecodeString(encryptedBody) if err != nil { p.log.Error().Err(err).Msg("could not decode content") return "", errors.New("could not decode body content") } decryptedBytes, err := utils.DecryptBytes(encryptedBytes) if err != nil { p.log.Error().Err(err).Msg("could not decrypt data") return "", errors.New("could not decrypt body data") } // we're done if we don't need to decompress any data if !decompress { return string(decryptedBytes[:]), nil } // otherwise decompress and return decompressed data decompressedBytes, err := utils.DecompressBytes(decryptedBytes) if err != nil { p.log.Error().Err(err).Msg("could not decompress data") return "", errors.New("could not decompress body data") } return string(decompressedBytes[:]), nil }
package service import ( "github.com/gin-gonic/gin" "net/http" "go-todo/util" "go-todo/database" "go-todo/model" ) // PostHandler ... func PostHandler(c *gin.Context) { db := database.Connect(c) defer db.Close() t := model.Todo{} err := c.ShouldBindJSON(&t) util.BadRequest(c, err) query := "INSERT INTO todos (title, status) VALUES ($1, $2) RETURNING id" row := db.QueryRow(query, t.Title, t.Status) err = row.Scan(&t.ID) util.InternalServerError(c, err) c.JSON(http.StatusCreated, t) }
package postgres import ( "database/sql" "errors" "log" "os" "time" "github.com/CafeLucuma/go-play/users/pkg/adding" "github.com/CafeLucuma/go-play/users/pkg/authentication" "github.com/CafeLucuma/go-play/utils/logging" _ "github.com/lib/pq" ) type Storage struct { db *sql.DB } // NewStorage returns a new JSON storage func NewStorage() (*Storage, error) { storage := new(Storage) dbURL, ok := os.LookupEnv("DATABASE_URL") if !ok { logging.Debug.Printf("env variable %s empty", "DATABASE_URL") logging.Error.Printf("%s", "Cant load databse url from environment") return nil, errors.New("Cant load databse url from environment") } db, err := sql.Open("postgres", dbURL) if err != nil { log.Fatal(err) return nil, err } storage.db = db if err := storage.db.Ping(); err != nil { log.Fatal(err) return nil, err } return storage, nil } const ( SELECT_USER_BY_EMAIL = "SELECT user_id, salted_password FROM users WHERE email = $1" INSERT_USER = "INSERT INTO users (name, last_name, email, salted_password, is_admin, created_on) VALUES ($1, $2, $3, $4, $5, $6)" ) func (s *Storage) CloseDB() { logging.Error.Printf("Closing db...") log.Fatal(s.db.Close()) } func (s *Storage) GetUser(email string) (*authentication.User, error) { var user authentication.User userRow := s.db.QueryRow(SELECT_USER_BY_EMAIL, email) if err := userRow.Scan(&user.ID, &user.Password); err != nil { logging.Error.Printf("Error obtaining user with user email %v: %s", email, err) return nil, err } return &user, nil } func (s *Storage) AddUser(u adding.User) error { logging.Info.Printf("Inserting user %+v to db", u) newUser := User{ Name: u.Name, LastName: u.LastName, Password: u.Password, Email: u.Email, IsAdmin: u.IsAdmin, CreatedOn: time.Now(), } _, err := s.db.Exec(INSERT_USER, newUser.Name, newUser.LastName, newUser.Email, newUser.Password, newUser.IsAdmin, newUser.CreatedOn) if err != nil { logging.Error.Printf("Cant insert new user to database: %s", err) return err } return nil }
package main import ( "strings" ) type Filter interface { match(profile provisioningProfile) bool } type CompoundFilter struct { filters []Filter } func (receiver CompoundFilter) match(profile provisioningProfile) bool { for _, f := range receiver.filters { if !f.match(profile) { return false } } return true } type StringContainsFilter struct { value string extractFunc func(profile provisioningProfile) string } func (receiver StringContainsFilter) match(profile provisioningProfile) bool { return strings.Contains(receiver.extractFunc(profile), receiver.value) } type StringEqualsFilter struct { value string extractFunc func(profile provisioningProfile) string } func (receiver StringEqualsFilter) match(profile provisioningProfile) bool { return strings.Compare(receiver.extractFunc(profile), receiver.value) == 0 }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" gosql "database/sql" "encoding/json" "fmt" "net/http" "os" "os/signal" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/logconfig" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" "github.com/cockroachdb/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/time/rate" ) var runFlags = pflag.NewFlagSet(`run`, pflag.ContinueOnError) var tolerateErrors = runFlags.Bool("tolerate-errors", false, "Keep running on error") var maxRate = runFlags.Float64( "max-rate", 0, "Maximum frequency of operations (reads/writes). If 0, no limit.") var maxOps = runFlags.Uint64("max-ops", 0, "Maximum number of operations to run") var duration = runFlags.Duration("duration", 0, "The duration to run (in addition to --ramp). If 0, run forever.") var doInit = runFlags.Bool("init", false, "Automatically run init. DEPRECATED: Use workload init instead.") var ramp = runFlags.Duration("ramp", 0*time.Second, "The duration over which to ramp up load.") var initFlags = pflag.NewFlagSet(`init`, pflag.ContinueOnError) var drop = initFlags.Bool("drop", false, "Drop the existing database, if it exists") var sharedFlags = pflag.NewFlagSet(`shared`, pflag.ContinueOnError) var pprofport = sharedFlags.Int("pprofport", 33333, "Port for pprof endpoint.") var dataLoader = sharedFlags.String("data-loader", `INSERT`, "How to load initial table data. Options are INSERT and IMPORT") var initConns = sharedFlags.Int("init-conns", 16, "The number of connections to use during INSERT init") var displayEvery = runFlags.Duration("display-every", time.Second, "How much time between every one-line activity reports.") var displayFormat = runFlags.String("display-format", "simple", "Output display format (simple, incremental-json)") var histograms = runFlags.String( "histograms", "", "File to write per-op incremental and cumulative histogram data.") var histogramsMaxLatency = runFlags.Duration( "histograms-max-latency", 100*time.Second, "Expected maximum latency of running a query") var securityFlags = pflag.NewFlagSet(`security`, pflag.ContinueOnError) var secure = securityFlags.Bool("secure", false, "Run in secure mode (sslmode=require). "+ "Running in secure mode expects the relevant certs to have been created for the user in the certs/ directory."+ "For example when using root, certs/client.root.crt certs/client.root.key should exist.") var user = securityFlags.String("user", "root", "Specify a user to run the workload as") func init() { _ = sharedFlags.MarkHidden("pprofport") AddSubCmd(func(userFacing bool) *cobra.Command { var initCmd = SetCmdDefaults(&cobra.Command{ Use: `init`, Short: `set up tables for a workload`, }) for _, meta := range workload.Registered() { gen := meta.New() var genFlags *pflag.FlagSet if f, ok := gen.(workload.Flagser); ok { genFlags = f.Flags().FlagSet } genInitCmd := SetCmdDefaults(&cobra.Command{ Use: meta.Name + " [pgurl...]", Short: meta.Description, Long: meta.Description + meta.Details, Args: cobra.ArbitraryArgs, }) genInitCmd.Flags().AddFlagSet(initFlags) genInitCmd.Flags().AddFlagSet(sharedFlags) genInitCmd.Flags().AddFlagSet(genFlags) genInitCmd.Flags().AddFlagSet(securityFlags) genInitCmd.Run = CmdHelper(gen, runInit) if userFacing && !meta.PublicFacing { genInitCmd.Hidden = true } initCmd.AddCommand(genInitCmd) } return initCmd }) AddSubCmd(func(userFacing bool) *cobra.Command { var runCmd = SetCmdDefaults(&cobra.Command{ Use: `run`, Short: `run a workload's operations against a cluster`, }) for _, meta := range workload.Registered() { gen := meta.New() if _, ok := gen.(workload.Opser); !ok { // If Opser is not implemented, this would just fail at runtime, // so omit it. continue } var genFlags *pflag.FlagSet if f, ok := gen.(workload.Flagser); ok { genFlags = f.Flags().FlagSet } genRunCmd := SetCmdDefaults(&cobra.Command{ Use: meta.Name + " [pgurl...]", Short: meta.Description, Long: meta.Description + meta.Details, Args: cobra.ArbitraryArgs, }) genRunCmd.Flags().AddFlagSet(runFlags) genRunCmd.Flags().AddFlagSet(sharedFlags) genRunCmd.Flags().AddFlagSet(genFlags) genRunCmd.Flags().AddFlagSet(securityFlags) initFlags.VisitAll(func(initFlag *pflag.Flag) { // Every init flag is a valid run flag that implies the --init option. f := *initFlag f.Usage += ` (implies --init)` genRunCmd.Flags().AddFlag(&f) }) genRunCmd.Run = CmdHelper(gen, runRun) if userFacing && !meta.PublicFacing { genRunCmd.Hidden = true } runCmd.AddCommand(genRunCmd) } return runCmd }) } // CmdHelper handles common workload command logic, such as error handling and // ensuring the database name in the connection string (if provided) matches the // expected one. func CmdHelper( gen workload.Generator, fn func(gen workload.Generator, urls []string, dbName string) error, ) func(*cobra.Command, []string) { return HandleErrs(func(cmd *cobra.Command, args []string) error { // Apply the logging configuration if none was set already. if active, _ := log.IsActive(); !active { cfg := logconfig.DefaultStderrConfig() if err := cfg.Validate(nil /* no default log directory */); err != nil { return err } if _, err := log.ApplyConfig(cfg); err != nil { return err } } if h, ok := gen.(workload.Hookser); ok { if h.Hooks().Validate != nil { if err := h.Hooks().Validate(); err != nil { return errors.Wrapf(err, "could not validate") } } } // HACK: Steal the dbOverride out of flags. This should go away // once more of run.go moves inside workload. var dbOverride string if dbFlag := cmd.Flag(`db`); dbFlag != nil { dbOverride = dbFlag.Value.String() } urls := args if len(urls) == 0 { crdbDefaultURL := fmt.Sprintf(`postgres://%s@localhost:26257?sslmode=disable`, *user) if *secure { crdbDefaultURL = fmt.Sprintf( // This URL expects the certs to have been created by the user. `postgres://%s@localhost:26257?sslcert=certs/client.%s.crt&sslkey=certs/client.%s.key&sslrootcert=certs/ca.crt&sslmode=require`, *user, *user, *user) } urls = []string{crdbDefaultURL} } dbName, err := workload.SanitizeUrls(gen, dbOverride, urls) if err != nil { return err } return fn(gen, urls, dbName) }) } // SetCmdDefaults ensures that the provided Cobra command will properly report // an error if the user specifies an invalid subcommand. It is safe to call on // any Cobra command. // // This is a wontfix bug in Cobra: https://github.com/spf13/cobra/pull/329 func SetCmdDefaults(cmd *cobra.Command) *cobra.Command { if cmd.Run == nil && cmd.RunE == nil { cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } } if cmd.Args == nil { cmd.Args = cobra.NoArgs } return cmd } // numOps keeps a global count of successful operations. var numOps uint64 // workerRun is an infinite loop in which the worker continuously attempts to // read / write blocks of random data into a table in cockroach DB. The function // returns only when the provided context is canceled. func workerRun( ctx context.Context, errCh chan<- error, wg *sync.WaitGroup, limiter *rate.Limiter, workFn func(context.Context) error, ) { if wg != nil { defer wg.Done() } for { if ctx.Err() != nil { return } // Limit how quickly the load generator sends requests based on --max-rate. if limiter != nil { if err := limiter.Wait(ctx); err != nil { return } } if err := workFn(ctx); err != nil { if ctx.Err() != nil && errors.Is(err, ctx.Err()) { return } errCh <- err continue } v := atomic.AddUint64(&numOps, 1) if *maxOps > 0 && v >= *maxOps { return } } } func runInit(gen workload.Generator, urls []string, dbName string) error { ctx := context.Background() initDB, err := gosql.Open(`cockroach`, strings.Join(urls, ` `)) if err != nil { return err } startPProfEndPoint(ctx) return runInitImpl(ctx, gen, initDB, dbName) } func runInitImpl( ctx context.Context, gen workload.Generator, initDB *gosql.DB, dbName string, ) error { if *drop { if _, err := initDB.ExecContext(ctx, `DROP DATABASE IF EXISTS `+dbName); err != nil { return err } } if _, err := initDB.ExecContext(ctx, `CREATE DATABASE IF NOT EXISTS `+dbName); err != nil { return err } var l workload.InitialDataLoader switch strings.ToLower(*dataLoader) { case `insert`, `inserts`: l = workloadsql.InsertsDataLoader{ Concurrency: *initConns, } case `import`, `imports`: l = workload.ImportDataLoader default: return errors.Errorf(`unknown data loader: %s`, *dataLoader) } _, err := workloadsql.Setup(ctx, initDB, gen, l) return err } func startPProfEndPoint(ctx context.Context) { b := envutil.EnvOrDefaultInt64("COCKROACH_BLOCK_PROFILE_RATE", 10000000 /* 1 sample per 10 milliseconds spent blocking */) m := envutil.EnvOrDefaultInt("COCKROACH_MUTEX_PROFILE_RATE", 1000 /* 1 sample per 1000 mutex contention events */) runtime.SetBlockProfileRate(int(b)) runtime.SetMutexProfileFraction(m) go func() { err := http.ListenAndServe(":"+strconv.Itoa(*pprofport), nil) if err != nil { log.Errorf(ctx, "%v", err) } }() } func runRun(gen workload.Generator, urls []string, dbName string) error { ctx := context.Background() var formatter outputFormat switch *displayFormat { case "simple": formatter = &textFormatter{} case "incremental-json": formatter = &jsonFormatter{w: os.Stdout} default: return errors.Errorf("unknown display format: %s", *displayFormat) } startPProfEndPoint(ctx) initDB, err := gosql.Open(`cockroach`, strings.Join(urls, ` `)) if err != nil { return err } if *doInit || *drop { log.Info(ctx, `DEPRECATION: `+ `the --init flag on "workload run" will no longer be supported after 19.2`) for { err = runInitImpl(ctx, gen, initDB, dbName) if err == nil { break } if !*tolerateErrors { return err } log.Infof(ctx, "retrying after error during init: %v", err) } } var limiter *rate.Limiter if *maxRate > 0 { // Create a limiter using maxRate specified on the command line and // with allowed burst of 1 at the maximum allowed rate. limiter = rate.NewLimiter(rate.Limit(*maxRate), 1) } o, ok := gen.(workload.Opser) if !ok { return errors.Errorf(`no operations defined for %s`, gen.Meta().Name) } reg := histogram.NewRegistry(*histogramsMaxLatency) var ops workload.QueryLoad prepareStart := timeutil.Now() log.Infof(ctx, "creating load generator...") const prepareTimeout = 60 * time.Minute prepareCtx, cancel := context.WithTimeout(ctx, prepareTimeout) defer cancel() if prepareErr := func(ctx context.Context) error { retry := retry.StartWithCtx(ctx, retry.Options{}) var err error for retry.Next() { if err != nil { log.Warningf(ctx, "retrying after error while creating load: %v", err) } ops, err = o.Ops(ctx, urls, reg) if err == nil { return nil } err = errors.Wrapf(err, "failed to initialize the load generator") if !*tolerateErrors { return err } } if ctx.Err() != nil { // Don't retry endlessly. Note that this retry loop is not under the // control of --duration, so we're avoiding retrying endlessly. log.Errorf(ctx, "Attempt to create load generator failed. "+ "It's been more than %s since we started trying to create the load generator "+ "so we're giving up. Last failure: %s", prepareTimeout, err) } return err }(prepareCtx); prepareErr != nil { return prepareErr } log.Infof(ctx, "creating load generator... done (took %s)", timeutil.Now().Sub(prepareStart)) start := timeutil.Now() errCh := make(chan error) var rampDone chan struct{} if *ramp > 0 { // Create a channel to signal when the ramp period finishes. Will // be reset to nil when consumed by the process loop below. rampDone = make(chan struct{}) } workersCtx, cancelWorkers := context.WithCancel(ctx) defer cancelWorkers() var wg sync.WaitGroup wg.Add(len(ops.WorkerFns)) go func() { // If a ramp period was specified, start all of the workers gradually // with a new context. var rampCtx context.Context if rampDone != nil { var cancel func() rampCtx, cancel = context.WithTimeout(workersCtx, *ramp) defer cancel() } for i, workFn := range ops.WorkerFns { go func(i int, workFn func(context.Context) error) { // If a ramp period was specified, start all of the workers // gradually with a new context. if rampCtx != nil { rampPerWorker := *ramp / time.Duration(len(ops.WorkerFns)) time.Sleep(time.Duration(i) * rampPerWorker) workerRun(rampCtx, errCh, nil /* wg */, limiter, workFn) } // Start worker again, this time with the main context. workerRun(workersCtx, errCh, &wg, limiter, workFn) }(i, workFn) } if rampCtx != nil { // Wait for the ramp period to finish, then notify the process loop // below to reset timers and histograms. <-rampCtx.Done() close(rampDone) } }() ticker := time.NewTicker(*displayEvery) defer ticker.Stop() done := make(chan os.Signal, 3) signal.Notify(done, exitSignals...) go func() { wg.Wait() done <- os.Interrupt }() if *duration > 0 { go func() { time.Sleep(*duration + *ramp) done <- os.Interrupt }() } var jsonEnc *json.Encoder if *histograms != "" { _ = os.MkdirAll(filepath.Dir(*histograms), 0755) jsonF, err := os.Create(*histograms) if err != nil { return err } jsonEnc = json.NewEncoder(jsonF) } everySecond := log.Every(*displayEvery) for { select { case err := <-errCh: formatter.outputError(err) if *tolerateErrors { if everySecond.ShouldLog() { log.Errorf(ctx, "%v", err) } continue } return err case <-ticker.C: startElapsed := timeutil.Since(start) reg.Tick(func(t histogram.Tick) { formatter.outputTick(startElapsed, t) if jsonEnc != nil && rampDone == nil { _ = jsonEnc.Encode(t.Snapshot()) } }) // Once the load generator is fully ramped up, we reset the histogram // and the start time to throw away the stats for the ramp up period. case <-rampDone: rampDone = nil start = timeutil.Now() formatter.rampDone() reg.Tick(func(t histogram.Tick) { t.Cumulative.Reset() t.Hist.Reset() }) case <-done: cancelWorkers() if ops.Close != nil { ops.Close(ctx) } startElapsed := timeutil.Since(start) resultTick := histogram.Tick{Name: ops.ResultHist} reg.Tick(func(t histogram.Tick) { formatter.outputTotal(startElapsed, t) if jsonEnc != nil { // Note that we're outputting the delta from the last tick. The // cumulative histogram can be computed by merging all of the // per-tick histograms. _ = jsonEnc.Encode(t.Snapshot()) } if ops.ResultHist == `` || ops.ResultHist == t.Name { if resultTick.Cumulative == nil { resultTick.Now = t.Now resultTick.Cumulative = t.Cumulative } else { resultTick.Cumulative.Merge(t.Cumulative) } } }) formatter.outputResult(startElapsed, resultTick) if h, ok := gen.(workload.Hookser); ok { if h.Hooks().PostRun != nil { if err := h.Hooks().PostRun(startElapsed); err != nil { fmt.Printf("failed post-run hook: %v\n", err) } } } return nil } } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gvr import ( "context" "github.com/google/gapid/core/image" "github.com/google/gapid/core/math/interval" "github.com/google/gapid/gapis/api" "github.com/google/gapid/gapis/api/gles" "github.com/google/gapid/gapis/api/sync" "github.com/google/gapid/gapis/api/transform" "github.com/google/gapid/gapis/replay" "github.com/google/gapid/gapis/service" "github.com/google/gapid/gapis/service/path" ) var _ = replay.QueryFramebufferAttachment(API{}) // Root returns the path to the root of the state to display. It can vary based // on filtering mode. Returning nil, nil indicates there is no state to show at // this point in the capture. func (s *State) Root(ctx context.Context, p *path.State, r *path.ResolveConfig) (path.Node, error) { return nil, nil } // SetupInitialState sanitizes deserialized state to make it valid. // It can fill in any derived data which we choose not to serialize, // or it can apply backward-compatibility fixes for older traces. func (State) SetupInitialState(ctx context.Context) {} func (s *State) preMutate(ctx context.Context, g *api.GlobalState, cmd api.Cmd) error { return nil } type customState struct{} func (customState) init(*State) {} // RebuildState is a no-op to conform to the api.API interface. func (API) RebuildState(ctx context.Context, g *api.GlobalState) ([]api.Cmd, interval.U64RangeList) { return nil, nil } func (API) QueryFramebufferAttachment( ctx context.Context, intent replay.Intent, mgr replay.Manager, after []uint64, width, height uint32, attachment api.FramebufferAttachment, framebufferIndex uint32, drawMode service.DrawMode, disableReplayOptimization bool, displayToSurface bool, hints *service.UsageHints) (*image.Data, error) { if framebufferIndex == 0 { fb, err := getFramebuffer(ctx, api.CmdID(after[0])) if err != nil { return nil, err } framebufferIndex = uint32(fb) } return gles.API{}.QueryFramebufferAttachment( ctx, intent, mgr, after, width, height, attachment, framebufferIndex, drawMode, disableReplayOptimization, displayToSurface, hints, ) } // GetFramebufferAttachmentInfo returns the width, height and format of the // specified framebuffer attachment. func (API) GetFramebufferAttachmentInfo( ctx context.Context, after []uint64, state *api.GlobalState, thread uint64, attachment api.FramebufferAttachment) (inf api.FramebufferAttachmentInfo, err error) { fb, err := getFramebuffer(ctx, api.CmdID(after[0])) if err != nil { return api.FramebufferAttachmentInfo{}, err } return gles.GetFramebufferAttachmentInfoByID(state, thread, attachment, fb) } // Context returns the active context for the given state and thread. func (API) Context(ctx context.Context, s *api.GlobalState, thread uint64) api.Context { return gles.API{}.Context(ctx, s, thread) } // Mesh implements the api.MeshProvider interface. func (API) Mesh(ctx context.Context, o interface{}, p *path.Mesh, r *path.ResolveConfig) (*api.Mesh, error) { return nil, nil } var _ sync.SynchronizedAPI = API{} // GetTerminator returns a transform that will allow the given capture to be terminated // after a command func (API) GetTerminator(ctx context.Context, c *path.Capture) (transform.Terminator, error) { return nil, nil } // ResolveSynchronization resolve all of the synchronization information for // the given API func (API) ResolveSynchronization(ctx context.Context, d *sync.Data, c *path.Capture) error { return nil } // FlattenSubcommandIdx flattens grouped ids to their flattened linear ids if possible. func (API) FlattenSubcommandIdx(idx api.SubCmdIdx, data *sync.Data, unused bool) (api.CmdID, bool) { sg, ok := data.SubcommandReferences[api.CmdID(idx[0])] if !ok { return api.CmdID(0), false } for _, v := range sg { if v.Index.Equals(idx[1:]) { if v.IsCallerGroup { return v.GeneratingCmd, true } break } } return api.CmdID(0), false } // IsTrivialTerminator returns true if the terminator is just stopping at the given index func (API) IsTrivialTerminator(ctx context.Context, p *path.Capture, command api.SubCmdIdx) (bool, error) { return true, nil } // RecoverMidExecutionCommand returns a virtual command, used to describe the // a subcommand that was created before the start of the trace // GVR has no subcommands of this type, so this should never be called func (API) RecoverMidExecutionCommand(ctx context.Context, c *path.Capture, i interface{}) (api.Cmd, error) { return nil, sync.NoMECSubcommandsError{} } // MutateSubcommands mutates the given Cmd and calls callbacks for subcommands // called before and after executing each subcommand callback. func (API) MutateSubcommands(ctx context.Context, id api.CmdID, cmd api.Cmd, s *api.GlobalState, preSubCmdCallback func(*api.GlobalState, api.SubCmdIdx, api.Cmd), postSubCmdCallback func(*api.GlobalState, api.SubCmdIdx, api.Cmd)) error { return nil }
package daemon import ( "github.com/alauda/kube-ovn/pkg/util" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" "time" ) // InitNodeGateway init ovn0 func InitNodeGateway(config *Configuration) error { var portName, ipAddr, macAddr, gw string for { nodeName := config.NodeName node, err := config.KubeClient.CoreV1().Nodes().Get(nodeName, v1.GetOptions{}) if err != nil { klog.Errorf("failed to get node %s info %v", nodeName, err) return err } macAddr = node.Annotations[util.MacAddressAnnotation] ipAddr = node.Annotations[util.IpAddressAnnotation] portName = node.Annotations[util.PortNameAnnotation] gw = node.Annotations[util.GatewayAnnotation] if macAddr == "" || ipAddr == "" || portName == "" || gw == "" { klog.Errorf("can not find macAddr, ipAddr, portName and gw, wait 3 seconds...") time.Sleep(3 * time.Second) } else { break } } return configureNodeNic(portName, ipAddr, macAddr, gw) }
package log import ( "bufio" "bytes" "fmt" "net" "os" "strings" "sync" "time" ) const ( defaultFlushSec = 30 writeByteSize = 2048 ) type Logger interface { Printf(format string, v ...interface{}) Infof(format string, v ...interface{}) Debugf(format string, v ...interface{}) Warnf(format string, v ...interface{}) Errorf(format string, v ...interface{}) Close() } var ( filepath = "./log/" filename = time.Now().Format("060102150405") + "_" + localIP() + ".log" opt = Options{ isPrint: true, isWroteFile: true, flushSec: defaultFlushSec, filePath: filepath, fileName: filename, } vlog Logger = newLogger(opt) ) func localIP() string { localAddrs, err := net.InterfaceAddrs() if err != nil { println(err.Error()) } var ip = "localhost" for _, address := range localAddrs { ipn, ok := address.(*net.IPNet) if !ok { continue } if ipn.IP.IsLoopback() { continue } if ipn.IP.To4() != nil { ip = ipn.IP.String() break } } return ip } func Printf(format string, v ...interface{}) { vlog.Printf(format, v...) } func Infof(format string, v ...interface{}) { vlog.Infof(format, v...) } func Debugf(format string, v ...interface{}) { vlog.Debugf(format, v...) } func Warnf(format string, v ...interface{}) { vlog.Warnf(format, v...) } func Errorf(format string, v ...interface{}) { vlog.Errorf(format, v...) } type logger struct { opt Options file *os.File wr *bufio.Writer buf *bytes.Buffer mu sync.Mutex } func NewLogger(opts ...Option) Logger { option := Options{} for _, opt := range opts { option = opt(option) } return newLogger(option) } func newLogger(opt Options) Logger { l := &logger{ opt: opt, } if !l.opt.isWroteFile { return l } if l.opt.filePath == "" { l.opt.filePath = filepath } if l.opt.fileName == "" { l.opt.fileName = filename } if l.opt.flushSec == 0 { l.opt.flushSec = defaultFlushSec } if !strings.HasSuffix(l.opt.filePath, string(os.PathSeparator)) { l.opt.filePath += string(os.PathSeparator) } l.mkdir() f, err := os.OpenFile(l.opt.filePath+l.opt.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { panic(err) } l.buf = &bytes.Buffer{} l.file = f l.wr = bufio.NewWriter(f) go l.backgroundWrite() return l } func (l *logger) Printf(format string, v ...interface{}) { l.print("", format, v...) } func (l *logger) Infof(format string, v ...interface{}) { l.print("[INFO]", format, v...) } func (l *logger) Debugf(format string, v ...interface{}) { l.print("[DEBUG]", format, v...) } func (l *logger) Warnf(format string, v ...interface{}) { l.print("[WARN]", format, v...) } func (l *logger) Errorf(format string, v ...interface{}) { l.print("[ERROR]", format, v...) } func (l *logger) Close() { if l.opt.isWroteFile { l.write() _ = l.wr.Flush() _ = l.file.Close() } } func (l *logger) print(head, format string, v ...interface{}) { if !l.opt.isPrint { return } format = fmt.Sprintf(format, v...) if head != "" { format = fmt.Sprintf("%s %s: %s", head, l.now(), format) } println(format) if !l.opt.isWroteFile { return } if !strings.HasSuffix(format, "\n") { format += "\n" } l.mu.Lock() _, err := l.buf.WriteString(format) if err != nil { println(err) l.mu.Unlock() return } if l.buf.Len() >= writeByteSize { l.write() } l.mu.Unlock() } func (l *logger) now() string { return time.Now().Format("2006-01-02 15:04:05") } func (l *logger) backgroundWrite() { t := time.NewTicker(time.Duration(l.opt.flushSec) * time.Second) for range t.C { l.mu.Lock() l.write() l.mu.Unlock() } } func (l *logger) write() { if l.buf.Len() <= 0 { return } _, err := l.wr.WriteString(l.buf.String()) if err != nil { println(err) } } func (l *logger) mkdir() { f, err := os.Stat(l.opt.filePath) if err != nil || f.IsDir() == false { if err := os.Mkdir(l.opt.filePath, os.ModePerm); err != nil { panic("日志目录创建失败, " + err.Error()) } } }
package main import ( "fmt" "testing" ) func Test_run(t *testing.T) { type args struct { mt Mountains } tests := []struct { args args want string }{ { args{Mountains{ &Mount{"Everest", 8849}, &Mount{"K2", 8611}, &Mount{"Kangchenjunga", 8586}, }}, "K2", }, { args{Mountains{ &Mount{"Kita", 3193}, &Mount{"Aino", 3189}, &Mount{"Fuji", 3776}, &Mount{"Okuhotaka", 3190}, }}, "Kita", }, { args{Mountains{ &Mount{"QCFium", 2846}, &Mount{"chokudai", 2992}, &Mount{"kyoprofriends", 2432}, &Mount{"penguinman", 2390}, }}, "QCFium", }, } for i, tt := range tests { t.Run(fmt.Sprintf("Test %d", i), func(t *testing.T) { if got := run(tt.args.mt); got != tt.want { t.Errorf("run() = %v, want %v", got, tt.want) } }) } }
package main /* func main() { var a = 21 var b = 10 var c int c = a + b println(c) a-- println(a) } func main() { var a = 10 if a < 10 { print(a) } else { a++ print(a) } } func main() { var grade string = "B" var marks int = 70 switch marks { case 90: grade = "A" case 80,70: grade = "B" case 60: grade = "C" default: grade = "D" } switch { case grade == "A": print("1") case grade == "B": print("2") default: print("3") } } import "fmt" func main() { var b int = 3 var a int number := [3]int{1,2} for a := 0 ; a < b ; a++ { fmt.Println(a) } for a < b { a ++ fmt.Println(a) } for i,x := range number { fmt.Println(i,x) } } import "fmt" func main() { var i, j int for i = 1; i < 10; i++ { for j = 1; j <= i; j++ { fmt.Printf(" %d*%d=%d", j, i, i*j) } print("\n") } } */ import "math" func pow(a,b,c float64) float64 { if v := math.Pow(a,b); v < c{ return v } return c } func main(){ print( int(pow(2,5,1)), int(pow(2,1,5)), ) }
// utilgen holds various utility function primarily for documenting the algs output and performance // as well as some common generic-like functions package utilgen import ( "errors" "fmt" "time" "github.com/paulidealiste/goalgs/rangen" ) // Simple timetracker function called with defer at the onset of the function. func Timetracker(start time.Time, fname string) { elapsed := time.Since(start) fmt.Printf("Function %s ran for %s\n", fname, elapsed) } //Sum of the supplied values func Sumfun(inpart []float64) float64 { if len(inpart) < 1 { err := errors.New("Input array must be at least two elements long.") panic(err) } var outsum float64 for _, v := range inpart { outsum = outsum + v } return outsum } // Return indices of the elements in the supplied array func Retind(inslice []float64, elems []float64) []int { if len(inslice) < 1 || len(elems) != 2 { err := errors.New("Array of more than one element is required for inslice while a tuple is required for elems.") panic(err) } var inind []int for i, v := range inslice { if v == elems[0] || v == elems[1] { inind = append(inind, i) } } if inind[1] > inind[0] { inind[0], inind[1] = inind[1], inind[0] } return inind } // Swap items in the supplied slice/tuple which sould be a pair of values. func Swapitems(intuple []float64) []float64 { if len(intuple) != 2 { err := errors.New("Tuple (slice of length 2) is required for swapping.") panic(err) } intuple[0], intuple[1] = intuple[1], intuple[0] return intuple } // Reverse input slice func Reverseslice(inslice []float64) []float64 { for l := 0; l < len(inslice)/2; l++ { r := len(inslice) - 1 - l temp := inslice[l] inslice[l] = inslice[r] inslice[r] = temp } return inslice } // Random array subset with percantage sample size - array wrap func RandomArraySubset(inslice []float64, percsample int) []float64 { sampsizefloat := float64(len(inslice)) * float64(percsample) / 100 if sampsizefloat < 1 { err := errors.New("Input array has too few elements") panic(err) } sampsize := int(sampsizefloat) randtarget := rangen.RandIntegerInRange(0, len(inslice)-1) fmt.Println(len(inslice), randtarget, sampsize) if randtarget+sampsize > len(inslice) { diff := (randtarget + sampsize) - len(inslice) arrEnd := inslice[randtarget:len(inslice)] arrStart := inslice[0:diff] return append(arrEnd, arrStart...) } return inslice[randtarget : randtarget+sampsize] }
package elevengo import "strconv" // Get file list under the specific category. // // "0" is a special categoryId which means the root, // everything starts from here. // // `offset` is base on zero. // // `limit` can not be lower than `FileListMinLimit`, // and can not be higher than `FileListMaxLimit` // // `sort` is optional, pass `nil` will use the default sort option: // sorting by modify time in desc. func (c *Client) FileList(categoryId string, offset, limit int, sort *SortOption) (files []*CloudFile, remain int, err error) { if limit < FileListMinLimit { limit = FileListMinLimit } else if limit > FileListMaxLimit { limit = FileListMaxLimit } qs := newQueryString(). WithString("aid", "1"). WithString("cid", categoryId). WithString("o", orderFlagTime). WithString("asc", "0"). WithString("show_dir", "1"). WithString("snap", "0"). WithString("natsort", "1"). WithString("format", "json"). WithInt("offset", offset). WithInt("limit", limit) // override default sort parameters if sort != nil { if sort.asc { qs.WithString("asc", "1") } if sort.flag != "" { qs.WithString("o", sort.flag) } } // call API result := &_FileListResult{} err = c.requestJson(apiFileList, qs, nil, result) if err == nil && !result.State { err = apiError(result.MessageCode) } if err != nil { return } // remain file count remain = result.TotalCount - (result.Offset + result.PageSize) if remain < 0 { remain = 0 } // convert result files = make([]*CloudFile, len(result.Data)) for index, data := range result.Data { info := &CloudFile{ IsCategory: false, IsSystem: (index + result.Offset) < result.SysCount, CategoryId: data.CategoryId, Name: data.Name, Size: data.Size, PickCode: data.PickCode, } info.CreateTime, _ = strconv.ParseInt(data.CreateTime, 10, 64) info.UpdateTime, _ = strconv.ParseInt(data.UpdateTime, 10, 64) if data.FileId == nil { info.IsCategory = true info.ParentId = *data.ParentId } else { info.FileId = *data.FileId info.Sha1 = *data.Sha1 } files[index] = info } return } // Search files which's name contains the specific keyword, // the searching is recursive, starts from the specific category. // // `keyword` can not be empty // // `offset` is base on zero. // // `limit` can not be lower than `FileListMinLimit`, // and can not be higher than `FileListMaxLimit` func (c *Client) FileSearch(categoryId, keyword string, offset, limit int) (files []*CloudFile, remain int, err error) { if len(keyword) == 0 { return nil, 0, ErrEmptyKeyword } if limit < FileListMinLimit { limit = FileListMinLimit } else if limit > FileListMaxLimit { limit = FileListMaxLimit } qs := newQueryString(). WithString("aid", "1"). WithString("cid", categoryId). WithString("search_value", keyword). WithString("format", "json"). WithInt("offset", offset). WithInt("limit", limit) // call API result := &_FileSearchResult{} err = c.requestJson(apiFileSearch, qs, nil, result) if err == nil && !result.State { err = apiError(result.MessageCode) } if err != nil { return } // remain file count remain = result.TotalCount - (result.Offset + result.PageSize) if remain < 0 { remain = 0 } // convert result files = make([]*CloudFile, len(result.Data)) for index, data := range result.Data { info := &CloudFile{ IsCategory: false, IsSystem: false, CategoryId: data.CategoryId, Name: data.Name, Size: data.Size, PickCode: data.PickCode, } info.CreateTime, _ = strconv.ParseInt(data.CreateTime, 10, 64) info.UpdateTime, _ = strconv.ParseInt(data.UpdateTime, 10, 64) if data.FileId == nil { info.IsCategory = true info.ParentId = *data.ParentId } else { info.FileId = *data.FileId info.Sha1 = *data.Sha1 } files[index] = info } return } func (c *Client) FileRename(fileId, name string) (err error) { form := newForm(false). WithString("fid", fileId). WithString("file_name", name) result := new(_FileOperateResult) err = c.requestJson(apiFileEdit, nil, form, result) if err == nil && !result.State { err = apiError(-1) } return } func (c *Client) FileCopy(parentId string, fileIds ...string) (err error) { form := newForm(false). WithString("pid", parentId). WithStrings("fid", fileIds) result := new(_FileOperateResult) err = c.requestJson(apiFileCopy, nil, form, result) if err == nil && !result.State { err = apiError(-1) } return } func (c *Client) FileMove(parentId string, fileIds ...string) (err error) { form := newForm(false). WithString("pid", parentId). WithStrings("fid", fileIds) result := new(_FileOperateResult) err = c.requestJson(apiFileMove, nil, form, result) if err == nil && !result.State { err = apiError(-1) } return } func (c *Client) FileDelete(parentId string, fileIds ...string) (err error) { form := newForm(false). WithString("pid", parentId). WithStrings("fid", fileIds) result := new(_FileOperateResult) err = c.requestJson(apiFileDelete, nil, form, result) if err == nil && !result.State { err = apiError(-1) } return } func (c *Client) CategoryAdd(parentId, name string) (err error) { form := newForm(false). WithString("pid", parentId). WithString("cname", name) result := &_FileAddResult{} err = c.requestJson(apiFileAdd, nil, form, result) if err == nil && !result.State { err = apiError(-1) } return } func (c *Client) CategoryInfo(categoryId string) (err error) { qs := newQueryString(). WithString("aid", "1"). WithString("cid", categoryId) result := &CategoryInfoResult{} return c.requestJson(apiCategoryGet, qs, nil, result) }
package imapmaildir import ( "errors" "fmt" "os" "path/filepath" "strings" "time" "github.com/asdine/storm/v3" "github.com/emersion/go-imap/backend" "github.com/emersion/go-maildir" "go.etcd.io/bbolt" ) const ( InboxName = "INBOX" HierarchySep = "." MaxMboxNesting = 100 IndexFile = "imapmaildir-index.db" ) func validMboxPart(name string) bool { // Restrict characters that may be problematic for FS handling. // This is list of characters not allowed in NTFS minus 0x00 (handled // below), in Unix world many of these characters may be troublesome to // handle in shell scripts. if strings.ContainsAny(name, ":*?\"<>|") { return false } // Disallow ASCII control characters (including 0x00). for _, ch := range name { if ch < ' ' { return false } } // Prevent directory structure escaping. return !strings.Contains(name, "..") } type User struct { b *Backend name string basePath string } func (u *User) prepareMboxPath(mbox string) (fsPath string, parts []string, err error) { if strings.EqualFold(mbox, InboxName) { return u.basePath, []string{}, nil } // Verify validity before attempting to do anything. if len(parts) > MaxMboxNesting { return "", nil, errors.New("mailbox nesting limit exceeded") } fsPath = u.basePath nameParts := strings.Split(mbox, HierarchySep) for i, part := range nameParts { if part == "" { // Strip the possible trailing separator but not allow empty parts // in general. if i != len(parts)-1 { return "", nil, errors.New("illegal mailbox name") } continue } if !validMboxPart(part) { u.b.Log.Printf("illegal mailbox name requested by %s: %v", u.name, mbox) return "", nil, errors.New("illegal mailbox name") } fsPath += string(filepath.Separator) + "." + part } return fsPath, parts, nil } func (u *User) mboxName(fsPath string) (string, error) { fsPath = strings.TrimPrefix(fsPath, u.basePath+string(filepath.Separator)) if fsPath == "" { return InboxName, nil } parts := strings.Split(fsPath, string(filepath.Separator)) if len(parts) > MaxMboxNesting { return "", errors.New("mailbox nesting limit exceeded") } mboxParts := make([]string, 0, len(parts)) for _, part := range parts { if !strings.HasPrefix(part, ".") { return "", fmt.Errorf("not a maildir++ path: %v", fsPath) } mboxParts = append(mboxParts, part[1:]) } return strings.Join(mboxParts, HierarchySep), nil } func (u *User) Username() string { return u.name } func (u *User) ListMailboxes(subscribed bool) ([]backend.Mailbox, error) { // TODO: Figure out a fast way to filter subscribed/unsubscribed // directories. mboxes := []backend.Mailbox{ &Mailbox{ // Inbox always exists. name: InboxName, path: u.basePath, }, } err := filepath.Walk(u.basePath, func(path string, info os.FileInfo, err error) error { if err != nil { // Ignore errors, return as much as possible. u.b.Log.Printf("error during mailboxes iteration: %v", err) return nil } if !info.IsDir() { return nil } // Inbox is already added explicitly above. if path == u.basePath { return nil } if !strings.HasPrefix(info.Name(), ".") { return filepath.SkipDir } mboxName, err := u.mboxName(path) if err != nil { u.b.Log.Printf("error during mailboxes iteration: %v", err) return filepath.SkipDir } u.b.Debug.Printf("listing mbox (%v, %v)", mboxName, path) // Note that Mailbox object has nil handle. mboxes = append(mboxes, &Mailbox{ b: u.b, username: u.name, name: mboxName, path: path, }) return nil }) if err != nil { u.b.Log.Printf("failed to list mailboxes: %v", err) return nil, errors.New("I/O error") } return mboxes, nil } func (u *User) openDB(fsPath, mbox string) (*storm.DB, error) { u.b.dbsLock.Lock() defer u.b.dbsLock.Unlock() key := u.name + "\x00" + mbox handle, ok := u.b.dbs[key] if ok { handle.uses++ u.b.Debug.Printf("%d uses for %s/%s mbox", handle.uses, u.name, mbox) u.b.dbs[key] = handle db := handle.db return db, nil } db, err := storm.Open(filepath.Join(fsPath, IndexFile)) if err != nil { return nil, err } u.b.dbs[key] = mailboxHandle{ uses: 1, db: db, } return db, nil } func (u *User) GetMailbox(mbox string) (backend.Mailbox, error) { fsPath, _, err := u.prepareMboxPath(mbox) if err != nil { return nil, err } _, err = os.Stat(fsPath) if err != nil { if os.IsNotExist(err) { return nil, backend.ErrNoSuchMailbox } u.b.Log.Printf("failed to get mailbox: %v", err) return nil, errors.New("I/O error") } handle, err := u.openDB(fsPath, mbox) if err != nil { u.b.Log.Printf("failed to open DB: %v", err) return nil, errors.New("I/O error, try again more") } err = handle.Bolt.Update(func(btx *bbolt.Tx) error { tx := handle.WithTransaction(btx) var data mboxData err := tx.One("Dummy", 1, &data) if err == nil { return nil } if err != storm.ErrNotFound { return fmt.Errorf("read mboxData: %w", err) } u.b.Debug.Printf("initializing %s/%s", u.name, mbox) data.Dummy = 1 data.UidNext = 1 data.UidValidity = uint32(time.Now().UnixNano() & 0xFFFFFFFF) if err := tx.Save(&data); err != nil { return fmt.Errorf("save mboxData: %w", err) } return nil }) if err != nil { handle.Close() u.b.Log.Printf("failed to init DB: %v", err) return nil, errors.New("I/O error, try again later") } u.b.Debug.Printf("get mbox (%v, %v)", mbox, fsPath) return &Mailbox{ b: u.b, name: mbox, username: u.name, handle: handle, dir: maildir.Dir(fsPath), path: fsPath, }, nil } func (u *User) CreateMailbox(mbox string) error { if strings.EqualFold(mbox, InboxName) { return backend.ErrMailboxAlreadyExists } fsPath, _, err := u.prepareMboxPath(mbox) if err != nil { return err } if _, err := os.Stat(fsPath); err != nil { if !os.IsNotExist(err) { u.b.Debug.Printf("failed to create mailbox: %v", err) return errors.New("I/O error") } } else { return backend.ErrMailboxAlreadyExists } if err := os.MkdirAll(fsPath, 0700); err != nil { u.b.Debug.Printf("failed to create mailbox: %v", err) return errors.New("I/O error") } if err := os.MkdirAll(filepath.Join(fsPath, "cur"), 0700); err != nil { u.b.Debug.Printf("failed to create mailbox: %v", err) return errors.New("I/O error") } if err := os.MkdirAll(filepath.Join(fsPath, "new"), 0700); err != nil { u.b.Debug.Printf("failed to create mailbox: %v", err) return errors.New("I/O error") } if err := os.MkdirAll(filepath.Join(fsPath, "tmp"), 0700); err != nil { u.b.Debug.Printf("failed to create mailbox: %v", err) return errors.New("I/O error") } // IMAP index will be created on demand on first SELECT. u.b.Debug.Printf("create mbox (%v, %v)", mbox, fsPath) return nil } func (u *User) DeleteMailbox(mbox string) error { if strings.EqualFold(mbox, InboxName) { return errors.New("cannot delete inbox") } fsPath, _, err := u.prepareMboxPath(mbox) if err != nil { return err } if _, err := os.Stat(fsPath); err != nil { if os.IsNotExist(err) { return backend.ErrNoSuchMailbox } u.b.Debug.Printf("failed to delete mailbox: %v", err) return errors.New("I/O error") } // Delete in that order to // 1. Prevent IMAP SELECT. if err := os.RemoveAll(filepath.Join(fsPath, IndexFile)); err != nil { if !os.IsNotExist(err) { u.b.Log.Printf("failed to remove mailbox: %v", err) return errors.New("I/O error") } } // 2. Prevent new maildir deliveries. if err := os.RemoveAll(filepath.Join(fsPath, "tmp")); err != nil { if !os.IsNotExist(err) { u.b.Log.Printf("failed to remove mailbox: %v", err) return errors.New("I/O error") } } // 3. Prevent in-flight maildir deliveries from completing. if err := os.RemoveAll(filepath.Join(fsPath, "new")); err != nil { if !os.IsNotExist(err) { u.b.Log.Printf("failed to remove mailbox: %v", err) return errors.New("I/O error") } } // ... and remove all messages if err := os.RemoveAll(filepath.Join(fsPath, "cur")); err != nil { if !os.IsNotExist(err) { u.b.Log.Printf("failed to remove mailbox: %v", err) return errors.New("I/O error") } } u.b.Debug.Printf("delete mbox (%v, %v)", mbox, fsPath) return nil } func (u *User) RenameMailbox(existingName, newName string) error { if strings.EqualFold(existingName, InboxName) { // TODO: Handle special case of INBOX move. return errors.New("not implemented") } fsPathOld, _, err := u.prepareMboxPath(existingName) if err != nil { return err } fsPathNew, _, err := u.prepareMboxPath(newName) if err != nil { return err } if err := os.Rename(fsPathOld, fsPathNew); err != nil { u.b.Log.Printf("failed to rename mailbox: %v", err) return errors.New("I/O error") } u.b.Debug.Printf("rename mbox (%v, %v), (%v, %v)", existingName, fsPathOld, newName, fsPathNew) return nil } func (u *User) Logout() error { u.b.Debug.Printf("user logged out (%v, %v)", u.name, u.basePath) return nil }
// Package kv abstracts a distributed/clustered kv store for use with cerana // kv does not aim to be a full-featured generic kv abstraction, but can be useful anyway. // Only implementors imported by users will be available at runtime. // See documentation of KV for handled operations. package kv import ( "net/url" "sync" "time" "github.com/cerana/cerana/pkg/errors" ) // Value represents the value stored in a key, including the last modification index of the key type Value struct { Data []byte `json:"data"` Index uint64 `json:"index"` } // EventType is used to describe actions on watch events type EventType int //go:generate sh -c "stringer -type=EventType && sed -i 's#_EventType_name#eventTypeName#g;s#_EventType_index#eventTypeIndex#g' eventtype_string.go" const ( // None indicates no event, should induce a panic if ever seen None EventType = iota // Create indicates a new key being added Create // Delete indicates a key being deleted Delete // Update indicates a key being modified, the contents of the key are not taken into account Update ) var types = map[EventType]string{ None: "None", Create: "Create", Delete: "Delete", Update: "Update", } // Event represents an action occurring to a watched key or prefix type Event struct { Key string `json:"key"` Type EventType `json:"type"` Value } var register = struct { sync.RWMutex kvs map[string]func(string) (KV, error) }{ kvs: map[string]func(string) (KV, error){}, } // Register is called by KV implementors to register their scheme to be used with New func Register(name string, fn func(string) (KV, error)) { register.Lock() defer register.Unlock() if _, dup := register.kvs[name]; dup { panic("kv: Register called twice for " + name) } register.kvs[name] = fn } // New will return a KV implementation according to the connection string addr. // The parameter addr may be the empty string or a valid URL. // The special `http` and `https` schemes are deemed generic, the first implementation that supports it will be used. // Otherwise the scheme portion of the URL will be used to select the exact implementation to instantiate. func New(addr string) (KV, error) { var u *url.URL if addr == "" { u = &url.URL{Scheme: "http"} } else { var err error u, err = url.Parse(addr) if err != nil { return nil, errors.Wrapv(err, map[string]interface{}{"addr": addr}, "failed to parse kv addr") } } register.RLock() defer register.RUnlock() constructor := register.kvs[u.Scheme] if constructor != nil { kv, err := constructor(addr) if err != nil { return nil, err } if err = kv.Ping(); err != nil { return nil, err } return kv, nil } else if u.Scheme != "http" && u.Scheme != "https" { return nil, errors.Newv("unknown kv store (forgotten import?)", map[string]interface{}{"scheme": u.Scheme}) } for _, constructor := range register.kvs { kv, err := constructor(addr) if err != nil { return nil, err } // scheme was http(s) so an error just means we tried to connect // to an incompatible cluster if err := kv.Ping(); err == nil { return kv, nil } } return nil, errors.Newv("unknown kv store", map[string]interface{}{"addr": addr}) } // Lock represents a locked key in the distributed key value store. // The value stored in key is managed by lock and may contain private implementation data and should not be fetched out-of-band type Lock interface { // Renew renews the lock, it should be called before attempting any operation on whatever is being protected Renew() error // Unlock unlocks and invalidates the lock Unlock() error } // EphemeralKey represents a key that will disappear once the timeout used to instantiate it has lapsed. type EphemeralKey interface { // Set will first renew the ttl then set the value of key, it is an error if the ttl has expired since last renewal Set(value string) error // Renew renews the key ttl Renew() error // Destroy will delete the key without having to wait for expiration via TTL Destroy() error } // KV is the interface for distributed key value store interaction type KV interface { Delete(string, bool) error Get(string) (Value, error) GetAll(string) (map[string]Value, error) Keys(string) ([]string, error) Set(string, string) error // Atomic operations // Update will set key=value while ensuring that newer values are not clobbered Update(string, Value) (uint64, error) // Remove will delete key only if it has not been modified since index Remove(string, uint64) error // IsKeyNotFound is a helper to determine if the error is a key not found error IsKeyNotFound(error) bool // Watch returns channels for watching prefixes for _future_ events. // stop *must* always be closed by callers // Note: replaying events in history is not guaranteed to be possible. Watch(string, uint64, chan struct{}) (chan Event, chan error, error) // EphemeralKey creates a key that will be deleted if the ttl expires EphemeralKey(string, time.Duration) (EphemeralKey, error) // Lock creates a new lock, it blocks until the lock is acquired. Lock(string, time.Duration) (Lock, error) // Ping verifies communication with the cluster Ping() error }
// SPDX-FileCopyrightText: Copyright 2019 The Go Language Server Authors // SPDX-License-Identifier: BSD-3-Clause package jsonrpc2 import ( "errors" "fmt" "github.com/segmentio/encoding/json" ) // Error represents a JSON-RPC error. type Error struct { // Code a number indicating the error type that occurred. Code Code `json:"code"` // Message a string providing a short description of the error. Message string `json:"message"` // Data a Primitive or Structured value that contains additional // information about the error. Can be omitted. Data *json.RawMessage `json:"data,omitempty"` } // compile time check whether the Error implements error interface. var _ error = (*Error)(nil) // Error implements error.Error. func (e *Error) Error() string { if e == nil { return "" } return e.Message } // Unwrap implements errors.Unwrap. // // Returns the error underlying the receiver, which may be nil. func (e *Error) Unwrap() error { return errors.New(e.Message) } // NewError builds a Error struct for the suppied code and message. func NewError(c Code, message string) *Error { return &Error{ Code: c, Message: message, } } // Errorf builds a Error struct for the suppied code, format and args. func Errorf(c Code, format string, args ...interface{}) *Error { return &Error{ Code: c, Message: fmt.Sprintf(format, args...), } } // constErr represents a error constant. type constErr string // compile time check whether the constErr implements error interface. var _ error = (*constErr)(nil) // Error implements error.Error. func (e constErr) Error() string { return string(e) } const ( // ErrIdleTimeout is returned when serving timed out waiting for new connections. ErrIdleTimeout = constErr("timed out waiting for new connections") )
package main import ( "strings" "testing" ) // http://stackoverflow.com/a/32081891/597260 func stripWhitespace(str string) string { return strings.Join(strings.Fields(str), "") } func TestRender(t *testing.T) { template := Template{ Name: "default", Data: map[string]string{ "body": "", "replyToEmail": "geoff@geoff.com", }, } expected := stripWhitespace(` <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="viewport" content="width=device-width" /> </head> <body> <table cellspacing="0" cellpadding="0" width="100%"> <tbody> <tr> <td></td> </tr> <tr> <td> <br /> -- <br /> Sent via <a target="_blank" href="https://www.raise.me">RaiseMe</a> <br /> Reply to <a href='mailto:geoff@geoff.com'>geoff@geoff.com</a> </td> </tr> </tbody> </table> </body> </html>`) rendered := stripWhitespace(template.Render()) if rendered != expected { t.Errorf("Expected:\n%v\nGot:\n%v", expected, rendered) } } func TestRenderWithNewLine(t *testing.T) { template := Template{ Name: "default", Data: map[string]string{ "body": "hello\nworld", "replyToEmail": "geoff@geoff.com", }, } expected := stripWhitespace(` <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="viewport" content="width=device-width" /> </head> <body> <table cellspacing="0" cellpadding="0" width="100%"> <tbody> <tr> <td>hello<br>world</td> </tr> <tr> <td> <br /> -- <br /> Sent via <a target="_blank" href="https://www.raise.me">RaiseMe</a> <br /> Reply to <a href='mailto:geoff@geoff.com'>geoff@geoff.com</a> </td> </tr> </tbody> </table> </body> </html>`) rendered := stripWhitespace(template.Render()) if rendered != expected { t.Errorf("Expected:\n%v\nGot:\n%v", expected, rendered) } }
package lfs import ( "bufio" "io" "os/exec" "strings" "github.com/rubyist/tracerx" ) type wrappedCmd struct { Stdin io.WriteCloser Stdout *bufio.Reader Stderr *bufio.Reader *exec.Cmd } // startCommand starts up a command and creates a stdin pipe and a buffered // stdout & stderr pipes, wrapped in a wrappedCmd. The stdout buffer will be of stdoutBufSize // bytes. func startCommand(command string, args ...string) (*wrappedCmd, error) { cmd := exec.Command(command, args...) stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } tracerx.Printf("run_command: %s %s", command, strings.Join(args, " ")) if err := cmd.Start(); err != nil { return nil, err } return &wrappedCmd{ stdin, bufio.NewReaderSize(stdout, stdoutBufSize), bufio.NewReaderSize(stderr, stdoutBufSize), cmd, }, nil }
package node_checker import "errors" var errCannotLoadData = errors.New("node checker controller: cannot load data form url") var errCannotFindNodes = errors.New("node checker controller: cannot find nodes") var errCannotFindNodeWithKey = errors.New("node checker controller: cannot find node with key") var errCannotLoadDataFromDatabase = errors.New("node checker controller: cannot load data from database") var errUnableToProcessRequest = errors.New("node checker controller: cannot process request")
package context import ( "github.com/nsqio/go-nsq" "github.com/xozrc/cqrs/command" ordercommand "github.com/xozrc/cqrs/eventsourcing/examples/order/command" messagingnsq "github.com/xozrc/cqrs/messaging/nsq" cqrstypes "github.com/xozrc/cqrs/types" ) type Client interface { CreateOrder() error CancelOrder(id cqrstypes.Guid) error Stop() error } type client struct { producer *nsq.Producer bus command.CommandBus } func (c *client) CreateOrder() error { tId := cqrstypes.NewGuid() oId := cqrstypes.NewGuid() order := ordercommand.NewCreateOrder1(tId, oId) err := c.bus.Publish(order) return err } func (c *client) CancelOrder(id cqrstypes.Guid) error { tId := cqrstypes.NewGuid() order := ordercommand.NewCancelOrder1(tId, id) err := c.bus.Publish(order) return err } func (c *client) Stop() error { c.producer.Stop() return nil } func NewClient(addr string, topic string) (c Client, err error) { tp, err := nsq.NewProducer(addr, nsq.NewConfig()) if err != nil { return } defer func() { if err != nil { tp.Stop() } }() producer, err := messagingnsq.NewProducer(tp, topic) if err != nil { return } bus := command.NewCommandBus(producer) c = &client{ bus: bus, producer: tp, } return }
package winprint import ( "syscall" "unsafe" "fmt" ) type PORT_INFO_1 struct { Name *uint16 } type PORT_INFO_2 struct { PortName *uint16 MonitorName *uint16 Description *uint16 PortType uint32 Reserved uint32 } func (t *PORT_INFO_2) GetPortName() string { return utf16PtrToString(t.PortName) } func (t *PORT_INFO_2) GetMonitorName() string { return utf16PtrToString(t.MonitorName) } func (t *PORT_INFO_2) GetDescription() string { return utf16PtrToString(t.Description) } func EnumPorts2() ([]PORT_INFO_2, error) { var needed, returned uint32 buf := make([]byte, 1) err := SysEnumPorts("", 2, &buf[0], 1, &needed, &returned) if err != syscall.ERROR_INSUFFICIENT_BUFFER { return nil, err } buf = make([]byte, needed) err = SysEnumPorts("", 2, &buf[0], needed, &needed, &returned) if err != nil { return nil, err } var obj PORT_INFO_2 size := unsafe.Sizeof(obj) infos := make([]PORT_INFO_2, 0, returned) for index := 0; index < int(returned); index++ { infos = append(infos, *(*PORT_INFO_2)(unsafe.Pointer(&buf[index*int(size)]))) } return infos, nil } func GetPortInfo2(portName string) (*PORT_INFO_2, error) { info2s, e := EnumPorts2() if e != nil { return nil, e } for _, v := range info2s { if v.GetPortName() == portName { return &v, nil } } return nil, fmt.Errorf("port does not exist") }
/* * Copyright 2023 Gravitational, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai // Message represents a message within a live conversation. // Indexed by ID for frontend ordering and future partial message streaming. type Message struct { Role string `json:"role"` Content string `json:"content"` Idx int `json:"idx"` // NumTokens is the number of completion tokens for the (non-streaming) message NumTokens int `json:"-"` } // Label represents a label returned by OpenAI's completion API. type Label struct { Key string `json:"key"` Value string `json:"value"` } // CompletionCommand represents a command returned by OpenAI's completion API. type CompletionCommand struct { Command string `json:"command,omitempty"` Nodes []string `json:"nodes,omitempty"` Labels []Label `json:"labels,omitempty"` // NumTokens is the number of completion tokens for the (non-streaming) message NumTokens int `json:"-"` } // StreamingMessage represents a message that is streamed from the assistant and will later be stored as a normal message in the conversation store. type StreamingMessage struct { // Role describes the OpenAI role of the message, i.e its sender. Role string // Idx is a semi-unique ID assigned when loading a conversation so that the UI can group partial messages together. Idx int // Chunks is a channel of message chunks that are streamed from the assistant. Chunks <-chan string // Error is a channel which may receive one error if the assistant encounters an error while streaming. // Consumers should stop reading from all channels if they receive an error and abort. Error <-chan error }
package main import "fmt" // *********************************** // struct = typed collection of fields // *********************************** type person struct { name string age int } func NewPerson(name string) *person { p := person{name: name} p.age = 20 return &p } func main() { fmt.Println(person{"Santosh", 25}) fmt.Println(person{name: "Katty Perry", age: 50}) fmt.Println(person{name: "Narendra Modi"}) fmt.Println(&person{name: "Larry Page", age: 45}) fmt.Println(NewPerson("Sergey Brin")) s := person{name: "Mark", age: 39} fmt.Println(s.name) sp := &s fmt.Println(sp.age) sp.age = 35 fmt.Println(s.age) }
package git import ( "os/exec" "testing" "github.com/stretchr/testify/assert" "github.com/tilt-dev/tilt/internal/testutils/tempdir" ) func TestNormalizeGitRemoteSuffix(t *testing.T) { assert.Equal(t, normalizeGitRemote("https://github.com/tilt-dev/tilt.git"), normalizeGitRemote("https://github.com/tilt-dev/tilt")) } func TestNormalizeGitRemoteScheme(t *testing.T) { assert.Equal(t, normalizeGitRemote("https://github.com/tilt-dev/tilt.git"), normalizeGitRemote("ssh://github.com/tilt-dev/tilt")) } func TestNormalizeGitRemoteTrailingSlash(t *testing.T) { assert.Equal(t, normalizeGitRemote("https://github.com/tilt-dev/tilt"), normalizeGitRemote("ssh://github.com/tilt-dev/tilt/")) } func TestNormalizedGitRemoteUsername(t *testing.T) { assert.Equal(t, normalizeGitRemote("https://github.com/tilt-dev/tilt"), normalizeGitRemote("git@github.com:tilt-dev/tilt.git")) } func TestGitOrigin(t *testing.T) { tf := tempdir.NewTempDirFixture(t) err := exec.Command("git", "init", tf.Path()).Run() if err != nil { t.Fatalf("failed to init git repo: %+v", err) } err = exec.Command("git", "-C", tf.Path(), "remote", "add", "origin", "https://github.com/tilt-dev/tilt").Run() if err != nil { t.Fatalf("failed to set origin's url: %+v", err) } origin := gitOrigin(tf.Path()) // we can't just compare raw urls because of https://git-scm.com/docs/git-config#git-config-urlltbasegtinsteadOf // e.g., circleci images set `url.ssh://git@github.com.insteadof=https://github.com` assert.Equal(t, "//github.com/tilt-dev/tilt", normalizeGitRemote(origin)) }
package main import ( "filestore" "io/ioutil" "os" "time" log "github.com/sirupsen/logrus" ) func main() { var server string if len(os.Args) > 1 { server = os.Args[1] } if server == "" { log.Fatal("Server not provided") } var filename string if len(os.Args) > 2 { filename = os.Args[2] } if filename == "" { log.Fatal("Filename not provided") } log.SetLevel(log.DebugLevel) c := filestore.NewMemcache(server, filestore.MemcacheConfig{ // Things are not super fast when reading 50MB file, give it plenty of time Timeout: 5 * time.Second, // For some reason Memcache didnt like 1048576 byte values in my setup, the max value it would take is 1048470... // Memcache logs show that it gets exactly specified number of bytes, no envelop seem to be added by the used // Memcache client lib. ChunkSize: 1048470, }) contents, err := ioutil.ReadFile(filename) if err != nil { log.WithError(err).Fatal("Unable to read original file") } err = c.Store(filename, contents) if err != nil { log.WithError(err).Fatal("Unable to store file") } retrievedContents, err := c.Retrieve(filename) if err != nil { log.WithError(err).Fatal("Unable to retrieve file") } err = ioutil.WriteFile(filename+"-retrieved", retrievedContents, 0644) if err != nil { log.WithError(err).Fatal("Unable to save to file") } err = c.Delete(filename) if err != nil { log.WithError(err).Fatal("Unable to delete file") } }
// // ternary search tree // package tst type node2_t struct { hi_kid int eq_kid int lo_kid int key rune value interface{} // prefix terminator } type Tree2_t struct { root []node2_t } type Cursor2_t struct { root []node2_t cur int } const INTMAX = 1<<32 - 1 func (self *Tree2_t) Add(str string, value interface{}) { cur := 0 last := INTMAX for _, key := range str { for cur < len(self.root) && key != self.root[cur].key { if key < self.root[cur].key { if self.root[cur].lo_kid == INTMAX { self.root[cur].lo_kid = len(self.root) } cur = self.root[cur].lo_kid } else { if self.root[cur].hi_kid == INTMAX { self.root[cur].hi_kid = len(self.root) } cur = self.root[cur].hi_kid } } if cur >= len(self.root) { cur = len(self.root) if last != INTMAX && self.root[last].eq_kid == INTMAX { self.root[last].eq_kid = cur } self.root = append(self.root, node2_t{key: key, eq_kid: INTMAX, hi_kid: INTMAX, lo_kid: INTMAX}) } last = cur cur = self.root[cur].eq_kid } if last != INTMAX { self.root[last].value = value } } func (self *Tree2_t) Cursor() (c *Cursor2_t) { return &Cursor2_t{root: self.root} } func (self *Cursor2_t) Fetch(key rune) (value interface{}, next bool) { if len(self.root) == 0 { return nil, false } for self.cur < len(self.root) && key != self.root[self.cur].key { if key < self.root[self.cur].key { self.cur = self.root[self.cur].lo_kid } else { self.cur = self.root[self.cur].hi_kid } } if self.cur == INTMAX { return } value = self.root[self.cur].value self.cur = self.root[self.cur].eq_kid return value, true } func (self *Tree2_t) Search(str string) (value interface{}) { c := self.Cursor() for _, symbol := range str { temp, ok := c.Fetch(symbol) if temp != nil { value = temp } if ok == false { return } } return }
package lib import ( "fmt" "io/ioutil" "log" "net/http" "os" "path/filepath" ) func Check(e error) { if e != nil { log.Fatalln(e) } } func downloadImage(url string) (img []byte, err error) { resp, err := http.Get(url) if err != nil { return } defer resp.Body.Close() img, err = ioutil.ReadAll(resp.Body) return } func LogFile(name string) *os.File { ex, err := os.Executable() Check(err) logPath := fmt.Sprintf("%s/%s.log", filepath.Dir(ex), name) f, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) Check(err) return f }
package models import ( "api/provider" "api/utils/inject" ) var baseProvider *provider.BaseProvider type BaseModels struct { Hello *HelloModel `auto:"helloModel"` Test *TestModel `auto:"testModel"` } func (model *BaseModels) New() { baseProvider = provider.GetProvider() inject.Register("baseModel", model) inject.Inject() }
package banks_db import "regexp" var rawPaymentSystems = map[string]string{ "electron": `^(4026|417500|4405|4508|4844|4913|4917)\d+$`, "maestro": `^(5018|5020|5038|5612|5893|6304|6759|6761|6762|6763|0604|6390)\d+$`, "dankort": `^(5019)\d+$`, "interpayment": `^(636)\d+$`, "unionpay": `^(62|88)\d+$`, "visa": `^4[0-9]{12}(?:[0-9]{3})?`, "mastercard": `^5[1-5][0-9]{14}`, "amex": `^3[47][0-9]{13}`, "diners": `^3(?:0[0-5]|[68][0-9])[0-9]{11}`, "discover": `^6(?:011|5[0-9]{2})[0-9]{12}`, "jcb": `^(?:2131|1800|35\d{3})\d{11}$`, "forbrugsforeningen": `^(600)\d+$`, "mir": `^220[0-4][0-9][0-9]\d{10}$`, } var paymentSystems = map[string]*regexp.Regexp{} func init() { for paymentSystem, rawRe := range rawPaymentSystems { re, err := regexp.Compile(rawRe) if err != nil { panic(err) } paymentSystems[paymentSystem] = re } } func FindPaymentSystem(creditCard string) *string { for paymentSystem, re := range paymentSystems { if re.MatchString(creditCard) { return &paymentSystem } } return nil }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package diagnostics import ( "context" "math/rand" "net/url" "strconv" "time" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/diagnostics/diagnosticspb" "github.com/cockroachdb/cockroach/pkg/util/cloudinfo" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/system" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" "github.com/shirou/gopsutil/mem" ) // updatesURL is the URL used to check for new versions. Can be nil if an empty // URL is set. var updatesURL *url.URL const defaultUpdatesURL = `https://register.cockroachdb.com/api/clusters/updates` // reportingURL is the URL used to report diagnostics/telemetry. Can be nil if // an empty URL is set. var reportingURL *url.URL const defaultReportingURL = `https://register.cockroachdb.com/api/clusters/report` func init() { var err error updatesURL, err = url.Parse( envutil.EnvOrDefaultString("COCKROACH_UPDATE_CHECK_URL", defaultUpdatesURL), ) if err != nil { panic(err) } reportingURL, err = url.Parse( envutil.EnvOrDefaultString("COCKROACH_USAGE_REPORT_URL", defaultReportingURL), ) if err != nil { panic(err) } } // TestingKnobs groups testing knobs for diagnostics. type TestingKnobs struct { // OverrideUpdatesURL if set, overrides the URL used to check for new // versions. It is a pointer to pointer to allow overriding to the nil URL. OverrideUpdatesURL **url.URL // OverrideReportingURL if set, overrides the URL used to report diagnostics. // It is a pointer to pointer to allow overriding to the nil URL. OverrideReportingURL **url.URL } // ClusterInfo contains cluster information that will become part of URLs. type ClusterInfo struct { ClusterID uuid.UUID TenantID roachpb.TenantID IsInsecure bool IsInternal bool } // addInfoToURL sets query parameters on the URL used to report diagnostics. If // this is a CRDB node, then nodeInfo is filled (and nodeInfo.NodeID is // non-zero). Otherwise, this is a SQL-only tenant and sqlInfo is filled. func addInfoToURL( url *url.URL, clusterInfo *ClusterInfo, env *diagnosticspb.Environment, nodeID roachpb.NodeID, sqlInfo *diagnosticspb.SQLInstanceInfo, ) *url.URL { if url == nil { return nil } result := *url q := result.Query() // Don't set nodeid if this is a SQL-only instance. if nodeID != 0 { q.Set("nodeid", strconv.Itoa(int(nodeID))) } b := env.Build q.Set("sqlid", strconv.Itoa(int(sqlInfo.SQLInstanceID))) q.Set("uptime", strconv.Itoa(int(sqlInfo.Uptime))) q.Set("licensetype", env.LicenseType) q.Set("version", b.Tag) q.Set("platform", b.Platform) q.Set("uuid", clusterInfo.ClusterID.String()) q.Set("tenantid", clusterInfo.TenantID.String()) q.Set("insecure", strconv.FormatBool(clusterInfo.IsInsecure)) q.Set("internal", strconv.FormatBool(clusterInfo.IsInternal)) q.Set("buildchannel", b.Channel) q.Set("envchannel", b.EnvChannel) result.RawQuery = q.Encode() return &result } // randomly shift `d` to be up to `jitterSeconds` shorter or longer. func addJitter(d time.Duration) time.Duration { const jitterSeconds = 120 j := time.Duration(rand.Intn(jitterSeconds*2)-jitterSeconds) * time.Second return d + j } var populateMutex syncutil.Mutex // populateHardwareInfo populates OS, CPU, memory, etc. information about the // environment in which CRDB is running. func populateHardwareInfo(ctx context.Context, e *diagnosticspb.Environment) { // The shirou/gopsutil/host library is not multi-thread safe. As one // example, it lazily initializes a global map the first time the // Virtualization function is called, but takes no lock while doing so. // Work around this limitation by taking our own lock. populateMutex.Lock() defer populateMutex.Unlock() if platform, family, version, err := host.PlatformInformation(); err == nil { e.Os.Family = family e.Os.Platform = platform e.Os.Version = version } if virt, role, err := host.Virtualization(); err == nil && role == "guest" { e.Hardware.Virtualization = virt } if m, err := mem.VirtualMemory(); err == nil { e.Hardware.Mem.Available = m.Available e.Hardware.Mem.Total = m.Total } e.Hardware.Cpu.Numcpu = int32(system.NumCPU()) if cpus, err := cpu.InfoWithContext(ctx); err == nil && len(cpus) > 0 { e.Hardware.Cpu.Sockets = int32(len(cpus)) c := cpus[0] e.Hardware.Cpu.Cores = c.Cores e.Hardware.Cpu.Model = c.ModelName e.Hardware.Cpu.Mhz = float32(c.Mhz) e.Hardware.Cpu.Features = c.Flags } if l, err := load.AvgWithContext(ctx); err == nil { e.Hardware.Loadavg15 = float32(l.Load15) } e.Hardware.Provider, e.Hardware.InstanceClass = cloudinfo.GetInstanceClass(ctx) e.Topology.Provider, e.Topology.Region = cloudinfo.GetInstanceRegion(ctx) }
package descriptor import ( "fmt" maia "github.com/grpc-custom/maia/proto" ) func (r *Registry) Component(typ maia.ComponentType) (*Component, error) { comp, ok := r.components[typ] if !ok { return nil, fmt.Errorf("no found %s", typ) } return comp, nil }
package gosignaler import ( "os" "sync" "syscall" "testing" "time" ) type testSignalReceiver struct { wg *sync.WaitGroup ChkSignal os.Signal } func (t *testSignalReceiver) Receive(signal os.Signal) (proceed bool) { t.ChkSignal = signal return } func (t *testSignalReceiver) WaitGroup() (ret *sync.WaitGroup) { return } func (t *testSignalReceiver) Interests() (ret []os.Signal) { return []os.Signal{syscall.SIGHUP} } //test listen method func TestListen(t *testing.T) { testReceiver := &testSignalReceiver{} Listen(testReceiver) select { case <-time.After(time.Second): syscall.Kill(syscall.Getpid(), syscall.SIGHUP) } select { case <-time.After(time.Second): if testReceiver.ChkSignal != syscall.SIGHUP { t.Fatalf("unexpected signal: %s", testReceiver.ChkSignal.String()) } } }
package gorm_test import ( "testing" "github.com/go-test/deep" ints "github.com/porter-dev/porter/internal/models/integrations" orm "gorm.io/gorm" ) func TestCreateKubeIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_ki.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) ki := &ints.KubeIntegration{ Mechanism: ints.KubeLocal, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, Kubeconfig: []byte("current-context: testing\n"), } expKI := *ki ki, err := tester.repo.KubeIntegration.CreateKubeIntegration(ki) if err != nil { t.Fatalf("%v\n", err) } ki, err = tester.repo.KubeIntegration.ReadKubeIntegration(ki.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if ki.Model.ID != 1 { t.Errorf("incorrect kube integration ID: expected %d, got %d\n", 1, ki.Model.ID) } // reset fields for deep.Equal ki.Model = orm.Model{} if diff := deep.Equal(expKI, *ki); diff != nil { t.Errorf("incorrect kube integration") t.Error(diff) } } func TestListKubeIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_kis.db", } setupTestEnv(tester, t) initProject(tester, t) initKubeIntegration(tester, t) defer cleanup(tester, t) kis, err := tester.repo.KubeIntegration.ListKubeIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(kis) != 1 { t.Fatalf("length of kube integrations incorrect: expected %d, got %d\n", 1, len(kis)) } // make sure data is correct expKI := ints.KubeIntegration{ Mechanism: ints.KubeLocal, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, Kubeconfig: []byte("current-context: testing\n"), } ki := kis[0] // reset fields for reflect.DeepEqual ki.Model = orm.Model{} if diff := deep.Equal(expKI, *ki); diff != nil { t.Errorf("incorrect kube integration") t.Error(diff) } } func TestCreateBasicIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_basic.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) basic := &ints.BasicIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, Username: []byte("username"), Password: []byte("password"), } expBasic := *basic basic, err := tester.repo.BasicIntegration.CreateBasicIntegration(basic) if err != nil { t.Fatalf("%v\n", err) } basic, err = tester.repo.BasicIntegration.ReadBasicIntegration(basic.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if basic.Model.ID != 1 { t.Errorf("incorrect basic integration ID: expected %d, got %d\n", 1, basic.Model.ID) } // reset fields for deep.Equal basic.Model = orm.Model{} if diff := deep.Equal(expBasic, *basic); diff != nil { t.Errorf("incorrect basic integration") t.Error(diff) } } func TestListBasicIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_basics.db", } setupTestEnv(tester, t) initProject(tester, t) initBasicIntegration(tester, t) defer cleanup(tester, t) basics, err := tester.repo.BasicIntegration.ListBasicIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(basics) != 1 { t.Fatalf("length of basic integrations incorrect: expected %d, got %d\n", 1, len(basics)) } // make sure data is correct expBasic := ints.BasicIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, Username: []byte("username"), Password: []byte("password"), } basic := basics[0] // reset fields for reflect.DeepEqual basic.Model = orm.Model{} if diff := deep.Equal(expBasic, *basic); diff != nil { t.Errorf("incorrect basic integration") t.Error(diff) } } func TestCreateOIDCIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_oidc.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) oidc := &ints.OIDCIntegration{ Client: ints.OIDCKube, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, IssuerURL: []byte("https://oidc.example.com"), ClientID: []byte("exampleclientid"), ClientSecret: []byte("exampleclientsecret"), IDToken: []byte("idtoken"), RefreshToken: []byte("refreshtoken"), } expOIDC := *oidc oidc, err := tester.repo.OIDCIntegration.CreateOIDCIntegration(oidc) if err != nil { t.Fatalf("%v\n", err) } oidc, err = tester.repo.OIDCIntegration.ReadOIDCIntegration(oidc.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if oidc.Model.ID != 1 { t.Errorf("incorrect oidc integration ID: expected %d, got %d\n", 1, oidc.Model.ID) } // reset fields for deep.Equal oidc.Model = orm.Model{} if diff := deep.Equal(expOIDC, *oidc); diff != nil { t.Errorf("incorrect oidc integration") t.Error(diff) } } func TestListOIDCIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_oidcs.db", } setupTestEnv(tester, t) initProject(tester, t) initOIDCIntegration(tester, t) defer cleanup(tester, t) oidcs, err := tester.repo.OIDCIntegration.ListOIDCIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(oidcs) != 1 { t.Fatalf("length of oidc integrations incorrect: expected %d, got %d\n", 1, len(oidcs)) } // make sure data is correct expOIDC := ints.OIDCIntegration{ Client: ints.OIDCKube, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, IssuerURL: []byte("https://oidc.example.com"), ClientID: []byte("exampleclientid"), ClientSecret: []byte("exampleclientsecret"), IDToken: []byte("idtoken"), RefreshToken: []byte("refreshtoken"), } oidc := oidcs[0] // reset fields for reflect.DeepEqual oidc.Model = orm.Model{} if diff := deep.Equal(expOIDC, *oidc); diff != nil { t.Errorf("incorrect oidc integration") t.Error(diff) } } func TestCreateOAuthIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_oauth.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) oauth := &ints.OAuthIntegration{ SharedOAuthModel: ints.SharedOAuthModel{ ClientID: []byte("exampleclientid"), AccessToken: []byte("idtoken"), RefreshToken: []byte("refreshtoken"), }, Client: ints.OAuthGithub, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, } expOAuth := *oauth oauth, err := tester.repo.OAuthIntegration.CreateOAuthIntegration(oauth) if err != nil { t.Fatalf("%v\n", err) } oauth, err = tester.repo.OAuthIntegration.ReadOAuthIntegration(oauth.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if oauth.Model.ID != 1 { t.Errorf("incorrect oauth integration ID: expected %d, got %d\n", 1, oauth.Model.ID) } // reset fields for deep.Equal oauth.Model = orm.Model{} if diff := deep.Equal(expOAuth, *oauth); diff != nil { t.Errorf("incorrect oauth integration") t.Error(diff) } } func TestListOAuthIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_oauths.db", } setupTestEnv(tester, t) initProject(tester, t) initOAuthIntegration(tester, t) defer cleanup(tester, t) oauths, err := tester.repo.OAuthIntegration.ListOAuthIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(oauths) != 1 { t.Fatalf("length of oauth integrations incorrect: expected %d, got %d\n", 1, len(oauths)) } // make sure data is correct expOAuth := ints.OAuthIntegration{ SharedOAuthModel: ints.SharedOAuthModel{ ClientID: []byte("exampleclientid"), AccessToken: []byte("idtoken"), RefreshToken: []byte("refreshtoken"), }, Client: ints.OAuthGithub, ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, } oauth := oauths[0] // reset fields for reflect.DeepEqual oauth.Model = orm.Model{} if diff := deep.Equal(expOAuth, *oauth); diff != nil { t.Errorf("incorrect oauth integration") t.Error(diff) } } func TestCreateGCPIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_gcp.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) gcp := &ints.GCPIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, GCPProjectID: "test-proj-123456", GCPUserEmail: "test@test.it", GCPKeyData: []byte("{\"test\":\"key\"}"), } expGCP := *gcp gcp, err := tester.repo.GCPIntegration.CreateGCPIntegration(gcp) if err != nil { t.Fatalf("%v\n", err) } gcp, err = tester.repo.GCPIntegration.ReadGCPIntegration(gcp.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if gcp.Model.ID != 1 { t.Errorf("incorrect gcp integration ID: expected %d, got %d\n", 1, gcp.Model.ID) } // reset fields for deep.Equal gcp.Model = orm.Model{} if diff := deep.Equal(expGCP, *gcp); diff != nil { t.Errorf("incorrect gcp integration") t.Error(diff) } } func TestListGCPIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_gcps.db", } setupTestEnv(tester, t) initProject(tester, t) initGCPIntegration(tester, t) defer cleanup(tester, t) gcps, err := tester.repo.GCPIntegration.ListGCPIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(gcps) != 1 { t.Fatalf("length of gcp integrations incorrect: expected %d, got %d\n", 1, len(gcps)) } // make sure data is correct expGCP := ints.GCPIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, GCPProjectID: "test-proj-123456", GCPUserEmail: "test@test.it", GCPKeyData: []byte("{\"test\":\"key\"}"), } gcp := gcps[0] // reset fields for reflect.DeepEqual gcp.Model = orm.Model{} if diff := deep.Equal(expGCP, *gcp); diff != nil { t.Errorf("incorrect gcp integration") t.Error(diff) } } func TestCreateAWSIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_create_aws.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) defer cleanup(tester, t) aws := &ints.AWSIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, AWSClusterID: []byte("example-cluster-0"), AWSAccessKeyID: []byte("accesskey"), AWSSecretAccessKey: []byte("secret"), AWSSessionToken: []byte("optional"), } expAWS := *aws aws, err := tester.repo.AWSIntegration.CreateAWSIntegration(aws) if err != nil { t.Fatalf("%v\n", err) } aws, err = tester.repo.AWSIntegration.ReadAWSIntegration(aws.Model.ID) if err != nil { t.Fatalf("%v\n", err) } // make sure id is 1 if aws.Model.ID != 1 { t.Errorf("incorrect aws integration ID: expected %d, got %d\n", 1, aws.Model.ID) } // reset fields for deep.Equal aws.Model = orm.Model{} if diff := deep.Equal(expAWS, *aws); diff != nil { t.Errorf("incorrect aws integration") t.Error(diff) } } func TestOverwriteAWSIntegration(t *testing.T) { tester := &tester{ dbFileName: "./porter_overwrite_aws.db", } setupTestEnv(tester, t) initUser(tester, t) initProject(tester, t) initAWSIntegration(tester, t) defer cleanup(tester, t) aws, err := tester.repo.AWSIntegration.ReadAWSIntegration(1) if err != nil { t.Fatalf("%v\n", err) } aws.AWSAccessKeyID = []byte("accesskey2") aws.AWSSecretAccessKey = []byte("secret2") aws, err = tester.repo.AWSIntegration.OverwriteAWSIntegration(aws) if err != nil { t.Fatalf("%v\n", err) } gotAWS, err := tester.repo.AWSIntegration.ReadAWSIntegration(1) expAWS := &ints.AWSIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, AWSClusterID: []byte("example-cluster-0"), AWSAccessKeyID: []byte("accesskey2"), AWSSecretAccessKey: []byte("secret2"), AWSSessionToken: []byte("optional"), } // make sure id is 1 if gotAWS.Model.ID != 1 { t.Errorf("incorrect aws integration ID: expected %d, got %d\n", 1, gotAWS.Model.ID) } // reset fields for deep.Equal gotAWS.Model = orm.Model{} if diff := deep.Equal(expAWS, gotAWS); diff != nil { t.Errorf("incorrect aws integration") t.Error(diff) } } func TestListAWSIntegrationsByProjectID(t *testing.T) { tester := &tester{ dbFileName: "./porter_list_awss.db", } setupTestEnv(tester, t) initProject(tester, t) initAWSIntegration(tester, t) defer cleanup(tester, t) awss, err := tester.repo.AWSIntegration.ListAWSIntegrationsByProjectID( tester.initProjects[0].Model.ID, ) if err != nil { t.Fatalf("%v\n", err) } if len(awss) != 1 { t.Fatalf("length of aws integrations incorrect: expected %d, got %d\n", 1, len(awss)) } // make sure data is correct expAWS := ints.AWSIntegration{ ProjectID: tester.initProjects[0].ID, UserID: tester.initUsers[0].ID, AWSClusterID: []byte("example-cluster-0"), AWSAccessKeyID: []byte("accesskey"), AWSSecretAccessKey: []byte("secret"), AWSSessionToken: []byte("optional"), } aws := awss[0] // reset fields for reflect.DeepEqual aws.Model = orm.Model{} if diff := deep.Equal(expAWS, *aws); diff != nil { t.Errorf("incorrect aws integration") t.Error(diff) } }
package util import ( "encoding/json" "io/ioutil" "net/http" "strconv" "time" "regexp" "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) type ClusterMemberInfo struct { Id int64 Namespace string Cluster string MemberId string MemberName string Attribute string Role string Status string CreatedTime time.Time UpdatedTime time.Time } var ( SMTPUsernamePath string SMTPPasswordPath string SMTPHost string SMTPPort int AccessSecretPath string accessSecret string username string password string inviteMail string HtmlHomePath string TokenExpiredDate string ParsedTokenExpiredDate time.Duration ) //Jsonpatch를 담을 수 있는 구조체 type PatchOps struct { Op string `json:"op"` Path string `json:"path"` Value interface{} `json:"value,omitempty"` } func ReadFile() { content, err := ioutil.ReadFile(AccessSecretPath) if err != nil { klog.Errorln(err) return } accessSecret = string(content) // klog.Infoln(accessSecret) content, err = ioutil.ReadFile(SMTPUsernamePath) if err != nil { klog.Errorln(err) return } username = string(content) content, err = ioutil.ReadFile(SMTPPasswordPath) if err != nil { klog.Errorln(err) return } password = string(content) ParsedTokenExpiredDate = parseDate(TokenExpiredDate) } func parseDate(tokenExpiredDate string) time.Duration { regex := regexp.MustCompile("[0-9]+") num := regex.FindAllString(tokenExpiredDate, -1)[0] parsedNum, err := strconv.Atoi(num) if err != nil { panic(err) } regex = regexp.MustCompile("[a-z]+") unit := regex.FindAllString(tokenExpiredDate, -1)[0] switch unit { case "minutes": return time.Minute * time.Duration(parsedNum) case "hours": return time.Hour * time.Duration(parsedNum) case "days": return time.Hour * time.Duration(24) * time.Duration(parsedNum) case "weeks": return time.Hour * time.Duration(24) * time.Duration(7) * time.Duration(parsedNum) default: return time.Hour * time.Duration(24) * time.Duration(7) //1days } } // Jsonpatch를 하나 만들어서 slice에 추가하는 함수 func CreatePatch(po *[]PatchOps, o, p string, v interface{}) { *po = append(*po, PatchOps{ Op: o, Path: p, Value: v, }) } // Response.result.message에 err 메시지 넣고 반환 func ToAdmissionResponse(err error) *v1beta1.AdmissionResponse { return &v1beta1.AdmissionResponse{ Result: &metav1.Status{ Message: err.Error(), }, } } func SetResponse(res http.ResponseWriter, outString string, outJson interface{}, status int) http.ResponseWriter { //set Cors // res.Header().Set("Access-Control-Allow-Origin", "*") res.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") res.Header().Set("Access-Control-Max-Age", "3628800") res.Header().Set("Access-Control-Expose-Headers", "Content-Type, X-Requested-With, Accept, Authorization, Referer, User-Agent") //set Out if outJson != nil { res.Header().Set("Content-Type", "application/json") js, err := json.Marshal(outJson) if err != nil { http.Error(res, err.Error(), http.StatusInternalServerError) } //set StatusCode res.WriteHeader(status) res.Write(js) return res } else { //set StatusCode res.WriteHeader(status) res.Write([]byte(outString)) return res } } func Contains(slice []string, item string) bool { set := make(map[string]struct{}, len(slice)) for _, s := range slice { set[s] = struct{}{} } _, ok := set[item] return ok } func Remove(slice []string, item string) []string { set := make(map[string]struct{}, len(slice)) for _, s := range slice { set[s] = struct{}{} } // for _, item := range items { if _, ok := set[item]; ok { delete(set, item) } // } var newSlice []string for k, _ := range set { newSlice = append(newSlice, k) } return newSlice } // func Remove(slice []string, items []string) []string { // set := make(map[string]struct{}, len(slice)) // for _, s := range slice { // set[s] = struct{}{} // } // for _, item := range items { // _, ok := set[item] // if ok { // delete(set, item) // } // } // var newSlice []string // for k, _ := range set { // newSlice = append(newSlice, k) // } // return newSlice // }
// Homework 8: CLI and Regex // Due April 4, 2017 at 11:59pm package main import ( "flag" "fmt" "regexp" "unicode" ) // Problem 1: CLI // Write a command line interface that prints out sequences of numbers. // // Usage of hw8: // hw8 [flags] # prints out the sequence of numbers, each on a new line // Flags: // -start int // starting integer for the sequence (default 0) // -end int // ending integer for the sequence, not inclusive (default 0) // -step int // amount to skip by in each iteration (default 1) // // For example, executing `./hw8 -start=2 -end=5` should print out: // 2 // 3 // 4 // // Executing `./hw8 -start=2 -end=7 -step=3` should print out: // 2 // 5 // // Executing `./hw8 -start=10 -end=7 -step=-1` should print out: // 10 // 9 // 8 // // If the parameters are invalid (eg: positive step and start > end or // negative step and start < end or invalid parameter values passed in), // print out an error message using `log.Print(ln|f)?`. // // Feel free to do this section directly in the main() function. func main() { s := flag.Int("start", 0, "an int") e := flag.Int("end", 0, "an int") p := flag.Int("step", 0, "an int") flag.Parse() for i := *s; (*e-i)**p > 0; i += *p { fmt.Println(i) } } // GetEmails takes in string input and returns a string slice of the // emails found in the input string. // // Use regexp to extract all of the emails from the input string. // Each email consists of the email name + "@" + domain + "." + top level domain. // The email name should consist of only letters, numbers, underscores and dots. // The domain should consist of only letters or dots. // The top level domain must be "com", "org", "net" or "edu". // between the domain and tld. // // You can assume that all email addresses will be surrounded by whitespace. func GetEmails(s string) []string { reg := regexp.MustCompile(`[A-Za-z0-9._+-]+@[A-Za-z.]+.(com|net|edu|org)`) emails := reg.FindAllString(s, -1) return emails } // GetPhoneNumbers takes in string input and returns a string slice of the // phone numbers found in the input string. // // Use regexp to extract all of the phone numbers from the input string. // Here are the formats phone numbers can be in for this problem: // 215-555-3232 // (215)-555-3232 // 215.555.3232 // 2155553232 // 215 555 3232 // // For your output, you should return a string slice of phone numbers with // just the numbers (eg: "2158887744") // // You can assume that all phone numbers will be surrounded by whitespace. func GetPhoneNumbers(s string) []string { reg := regexp.MustCompile(`[\\(\\)0-9\s.+-]{10,}`) numstr := reg.FindAllString(s, -1) nums := make([]string, len(numstr)) for i, ns := range numstr { nums[i] = ParsePhone(ns) } return nums } // from hw1 func ParsePhone(phone string) string { var num = make([]rune, 10) var cnt = 0 for _, n := range phone { if cnt >= 10 { break } if unicode.IsDigit(n) { num[cnt], cnt = n, cnt+1 } } return string(num) }
package br import ( "regexp" "strconv" "strings" "time" "github.com/AlekSi/pointer" "github.com/olebedev/when/rules" "github.com/pkg/errors" ) func Deadline(s rules.Strategy) rules.Rule { overwrite := s == rules.Override return &rules.F{ RegExp: regexp.MustCompile( "(?i)(?:\\W|^)(dentro\\sde|em)\\s*" + "(?:(" + INTEGER_WORDS_PATTERN + "|[0-9]+|(?:\\s*pouc[oa](?:s|)?|algu(?:mas|m|ns)?|mei[oa]?))\\s*" + "(segundos?|min(?:uto)?s?|horas?|dias?|semanas?|mês|meses|anos?)\\s*)" + "(?:\\W|$)"), Applier: func(m *rules.Match, c *rules.Context, o *rules.Options, ref time.Time) (bool, error) { numStr := strings.TrimSpace(m.Captures[1]) var num int var err error if n, ok := INTEGER_WORDS[numStr]; ok { num = n } else if strings.Contains(numStr, "pouc") || strings.Contains(numStr, "algu") { num = 3 } else if strings.Contains(numStr, "mei") { // pass } else { num, err = strconv.Atoi(numStr) if err != nil { return false, errors.Wrapf(err, "convert '%s' to int", numStr) } } exponent := strings.TrimSpace(m.Captures[2]) if !strings.Contains(numStr, "mei") { switch { case strings.Contains(exponent, "segundo"): if c.Duration == 0 || overwrite { c.Duration = time.Duration(num) * time.Second } case strings.Contains(exponent, "min"): if c.Duration == 0 || overwrite { c.Duration = time.Duration(num) * time.Minute } case strings.Contains(exponent, "hora"): if c.Duration == 0 || overwrite { c.Duration = time.Duration(num) * time.Hour } case strings.Contains(exponent, "dia"): if c.Duration == 0 || overwrite { c.Duration = time.Duration(num) * 24 * time.Hour } case strings.Contains(exponent, "semana"): if c.Duration == 0 || overwrite { c.Duration = time.Duration(num) * 7 * 24 * time.Hour } case strings.Contains(exponent, "mês"), strings.Contains(exponent, "meses"): if c.Month == nil || overwrite { c.Month = pointer.ToInt((int(ref.Month()) + num) % 12) } case strings.Contains(exponent, "ano"): if c.Year == nil || overwrite { c.Year = pointer.ToInt(ref.Year() + num) } } } else { switch { case strings.Contains(exponent, "hora"): if c.Duration == 0 || overwrite { c.Duration = 30 * time.Minute } case strings.Contains(exponent, "dia"): if c.Duration == 0 || overwrite { c.Duration = 12 * time.Hour } case strings.Contains(exponent, "semana"): if c.Duration == 0 || overwrite { c.Duration = 7 * 12 * time.Hour } case strings.Contains(exponent, "mês"), strings.Contains(exponent, "meses"): if c.Duration == 0 || overwrite { // 2 weeks c.Duration = 14 * 24 * time.Hour } case strings.Contains(exponent, "ano"): if c.Month == nil || overwrite { c.Month = pointer.ToInt((int(ref.Month()) + 6) % 12) } } } return true, nil }, } }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gonet import ( "context" "fmt" "io" "net" "reflect" "strings" "testing" "time" "golang.org/x/net/nettest" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/link/loopback" "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" ) const ( NICID = 1 ) func TestTimeouts(t *testing.T) { nc := NewTCPConn(nil, nil) dlfs := []struct { name string f func(time.Time) error }{ {"SetDeadline", nc.SetDeadline}, {"SetReadDeadline", nc.SetReadDeadline}, {"SetWriteDeadline", nc.SetWriteDeadline}, } for _, dlf := range dlfs { if err := dlf.f(time.Time{}); err != nil { t.Errorf("got %s(time.Time{}) = %v, want = %v", dlf.name, err, nil) } } } func newLoopbackStack() (*stack.Stack, tcpip.Error) { // Create the stack and add a NIC. s := stack.New(stack.Options{ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol}, TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol}, }) if err := s.CreateNIC(NICID, loopback.New()); err != nil { return nil, err } // Add default route. s.SetRouteTable([]tcpip.Route{ // IPv4 { Destination: header.IPv4EmptySubnet, NIC: NICID, }, // IPv6 { Destination: header.IPv6EmptySubnet, NIC: NICID, }, }) return s, nil } type testConnection struct { wq *waiter.Queue e *waiter.Entry ch chan struct{} ep tcpip.Endpoint } func connect(s *stack.Stack, addr tcpip.FullAddress) (*testConnection, tcpip.Error) { wq := &waiter.Queue{} ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) if err != nil { return nil, err } entry, ch := waiter.NewChannelEntry(waiter.WritableEvents) wq.EventRegister(&entry) err = ep.Connect(addr) if _, ok := err.(*tcpip.ErrConnectStarted); ok { <-ch err = ep.LastError() } if err != nil { return nil, err } wq.EventUnregister(&entry) entry, ch = waiter.NewChannelEntry(waiter.ReadableEvents) wq.EventRegister(&entry) return &testConnection{wq, &entry, ch, ep}, nil } func (c *testConnection) close() { c.wq.EventUnregister(c.e) c.ep.Close() } // TestCloseReader tests that Conn.Close() causes Conn.Read() to unblock. func TestCloseReader(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } l, e := ListenTCP(s, addr, ipv4.ProtocolNumber) if e != nil { t.Fatalf("NewListener() = %v", e) } done := make(chan struct{}) go func() { defer close(done) c, err := l.Accept() if err != nil { t.Errorf("l.Accept() = %v", err) // Cannot call Fatalf in goroutine. Just return from the goroutine. return } // Give c.Read() a chance to block before closing the connection. time.AfterFunc(time.Millisecond*50, func() { c.Close() }) buf := make([]byte, 256) n, err := c.Read(buf) if n != 0 || err != io.EOF { t.Errorf("c.Read() = (%d, %v), want (0, EOF)", n, err) } }() sender, err := connect(s, addr) if err != nil { t.Fatalf("connect() = %v", err) } select { case <-done: case <-time.After(5 * time.Second): t.Errorf("c.Read() didn't unblock") } sender.close() } // TestCloseReaderWithForwarder tests that TCPConn.Close wakes TCPConn.Read when // using tcp.Forwarder. func TestCloseReaderWithForwarder(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } done := make(chan struct{}) fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { defer close(done) var wq waiter.Queue ep, err := r.CreateEndpoint(&wq) if err != nil { t.Fatalf("r.CreateEndpoint() = %v", err) } defer ep.Close() r.Complete(false) c := NewTCPConn(&wq, ep) // Give c.Read() a chance to block before closing the connection. time.AfterFunc(time.Millisecond*50, func() { c.Close() }) buf := make([]byte, 256) n, e := c.Read(buf) if n != 0 || e != io.EOF { t.Errorf("c.Read() = (%d, %v), want (0, EOF)", n, e) } }) s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) sender, err := connect(s, addr) if err != nil { t.Fatalf("connect() = %v", err) } select { case <-done: case <-time.After(5 * time.Second): t.Errorf("c.Read() didn't unblock") } sender.close() } func TestCloseRead(t *testing.T) { s, terr := newLoopbackStack() if terr != nil { t.Fatalf("newLoopbackStack() = %v", terr) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } done := make(chan struct{}) fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { var wq waiter.Queue _, err := r.CreateEndpoint(&wq) if err != nil { t.Fatalf("r.CreateEndpoint() = %v", err) } close(done) // Endpoint will be closed in deferred s.Close (above). }) s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) tc, terr := connect(s, addr) if terr != nil { t.Fatalf("connect() = %v", terr) } c := NewTCPConn(tc.wq, tc.ep) if err := c.CloseRead(); err != nil { t.Errorf("c.CloseRead() = %v", err) } buf := make([]byte, 256) if n, err := c.Read(buf); err != io.EOF { t.Errorf("c.Read() = (%d, %v), want (0, io.EOF)", n, err) } if n, err := c.Write([]byte("abc123")); n != 6 || err != nil { t.Errorf("c.Write() = (%d, %v), want (6, nil)", n, err) } select { case <-done: case <-time.After(1 * time.Second): t.Fatalf("timed out waiting for r.CreateEndpoint(...) to complete") } } func TestCloseWrite(t *testing.T) { s, terr := newLoopbackStack() if terr != nil { t.Fatalf("newLoopbackStack() = %v", terr) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { var wq waiter.Queue ep, err := r.CreateEndpoint(&wq) if err != nil { t.Fatalf("r.CreateEndpoint() = %v", err) } defer ep.Close() r.Complete(false) c := NewTCPConn(&wq, ep) n, e := c.Read(make([]byte, 256)) if n != 0 || e != io.EOF { t.Errorf("c.Read() = (%d, %v), want (0, io.EOF)", n, e) } if n, e = c.Write([]byte("abc123")); n != 6 || e != nil { t.Errorf("c.Write() = (%d, %v), want (6, nil)", n, e) } }) s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) tc, terr := connect(s, addr) if terr != nil { t.Fatalf("connect() = %v", terr) } c := NewTCPConn(tc.wq, tc.ep) if err := c.CloseWrite(); err != nil { t.Errorf("c.CloseWrite() = %v", err) } buf := make([]byte, 256) n, err := c.Read(buf) if err != nil || string(buf[:n]) != "abc123" { t.Fatalf("c.Read() = (%d, %v), want (6, nil)", n, err) } n, err = c.Write([]byte("abc123")) got, ok := err.(*net.OpError) want := "endpoint is closed for send" if n != 0 || !ok || got.Op != "write" || got.Err == nil || !strings.HasSuffix(got.Err.Error(), want) { t.Errorf("c.Write() = (%d, %v), want (0, OpError(Op: write, Err: %s))", n, err, want) } } // TestCloseStack tests that stack.Close wakes TCPConn.Read when // using tcp.Forwarder. func TestCloseStack(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } done := make(chan struct{}) fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { defer close(done) var wq waiter.Queue ep, err := r.CreateEndpoint(&wq) if err != nil { t.Fatalf("r.CreateEndpoint() = %v", err) } r.Complete(false) c := NewTCPConn(&wq, ep) // Give c.Read() a chance to block before closing the stack. time.AfterFunc(50*time.Millisecond, func() { s.Close() s.Wait() }) buf := make([]byte, 256) n, e := c.Read(buf) // Depending on the ordering of Close and Read, we should get // one of two errors. if n != 0 || (!strings.Contains(e.Error(), "operation aborted") && !strings.Contains(e.Error(), "connection reset by peer")) { t.Errorf("c.Read() = (%d, %v), want (0, operation aborted) or (0, connection reset by peer)", n, e) } }) s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) sender, err := connect(s, addr) if err != nil { t.Fatalf("connect() = %v", err) } select { case <-done: case <-time.After(5 * time.Second): t.Errorf("c.Read() didn't unblock") } sender.close() } func TestUDPForwarder(t *testing.T) { s, terr := newLoopbackStack() if terr != nil { t.Fatalf("newLoopbackStack() = %v", terr) } defer func() { s.Close() s.Wait() }() ip1 := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()) addr1 := tcpip.FullAddress{NIC: NICID, Addr: ip1, Port: 11211} protocolAddr1 := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip1.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr1, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr1, err) } ip2 := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 2).To4()) addr2 := tcpip.FullAddress{NIC: NICID, Addr: ip2, Port: 11311} protocolAddr2 := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip2.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr2, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr2, err) } done := make(chan struct{}) fwd := udp.NewForwarder(s, func(r *udp.ForwarderRequest) { defer close(done) var wq waiter.Queue ep, err := r.CreateEndpoint(&wq) if err != nil { t.Fatalf("r.CreateEndpoint() = %v", err) } defer ep.Close() c := NewUDPConn(s, &wq, ep) buf := make([]byte, 256) n, e := c.Read(buf) if e != nil { t.Errorf("c.Read() = %v", e) } if _, e := c.Write(buf[:n]); e != nil { t.Errorf("c.Write() = %v", e) } }) s.SetTransportProtocolHandler(udp.ProtocolNumber, fwd.HandlePacket) c2, err := DialUDP(s, &addr2, nil, ipv4.ProtocolNumber) if err != nil { t.Fatal("DialUDP(bind port 5):", err) } sent := "abc123" sendAddr := fullToUDPAddr(addr1) if n, err := c2.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) { t.Errorf("c1.WriteTo(%q, %v) = %d, %v, want = %d, %v", sent, sendAddr, n, err, len(sent), nil) } buf := make([]byte, 256) n, recvAddr, err := c2.ReadFrom(buf) if err != nil || recvAddr.String() != sendAddr.String() { t.Errorf("c1.ReadFrom() = %d, %v, %v, want = %d, %v, %v", n, recvAddr, err, len(sent), sendAddr, nil) } } // TestDeadlineChange tests that changing the deadline affects currently blocked reads. func TestDeadlineChange(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } l, e := ListenTCP(s, addr, ipv4.ProtocolNumber) if e != nil { t.Fatalf("NewListener() = %v", e) } done := make(chan struct{}) go func() { defer close(done) c, err := l.Accept() if err != nil { t.Errorf("l.Accept() = %v", err) // Cannot call Fatalf in goroutine. Just return from the goroutine. return } c.SetDeadline(time.Now().Add(time.Minute)) // Give c.Read() a chance to block before closing the connection. time.AfterFunc(time.Millisecond*50, func() { c.SetDeadline(time.Now().Add(time.Millisecond * 10)) }) buf := make([]byte, 256) n, err := c.Read(buf) got, ok := err.(*net.OpError) want := "i/o timeout" if n != 0 || !ok || got.Err == nil || got.Err.Error() != want { t.Errorf("c.Read() = (%d, %v), want (0, OpError(%s))", n, err, want) } }() sender, err := connect(s, addr) if err != nil { t.Fatalf("connect() = %v", err) } select { case <-done: case <-time.After(time.Millisecond * 500): t.Errorf("c.Read() didn't unblock") } sender.close() } func TestPacketConnTransfer(t *testing.T) { s, e := newLoopbackStack() if e != nil { t.Fatalf("newLoopbackStack() = %v", e) } defer func() { s.Close() s.Wait() }() ip1 := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()) addr1 := tcpip.FullAddress{NIC: NICID, Addr: ip1, Port: 11211} protocolAddr1 := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip1.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr1, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr1, err) } ip2 := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 2).To4()) addr2 := tcpip.FullAddress{NIC: NICID, Addr: ip2, Port: 11311} protocolAddr2 := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip2.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr2, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr2, err) } c1, err := DialUDP(s, &addr1, nil, ipv4.ProtocolNumber) if err != nil { t.Fatal("DialUDP(bind port 4):", err) } c2, err := DialUDP(s, &addr2, nil, ipv4.ProtocolNumber) if err != nil { t.Fatal("DialUDP(bind port 5):", err) } c1.SetDeadline(time.Now().Add(time.Second)) c2.SetDeadline(time.Now().Add(time.Second)) sent := "abc123" sendAddr := fullToUDPAddr(addr2) if n, err := c1.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) { t.Errorf("got c1.WriteTo(%q, %v) = %d, %v, want = %d, %v", sent, sendAddr, n, err, len(sent), nil) } recv := make([]byte, len(sent)) n, recvAddr, err := c2.ReadFrom(recv) if err != nil || n != len(recv) { t.Errorf("got c2.ReadFrom() = %d, %v, want = %d, %v", n, err, len(recv), nil) } if recv := string(recv); recv != sent { t.Errorf("got recv = %q, want = %q", recv, sent) } if want := fullToUDPAddr(addr1); !reflect.DeepEqual(recvAddr, want) { t.Errorf("got recvAddr = %v, want = %v", recvAddr, want) } if err := c1.Close(); err != nil { t.Error("c1.Close():", err) } if err := c2.Close(); err != nil { t.Error("c2.Close():", err) } } func TestConnectedPacketConnTransfer(t *testing.T) { s, e := newLoopbackStack() if e != nil { t.Fatalf("newLoopbackStack() = %v", e) } defer func() { s.Close() s.Wait() }() ip := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()) addr := tcpip.FullAddress{NIC: NICID, Addr: ip, Port: 11211} protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } c1, err := DialUDP(s, &addr, nil, ipv4.ProtocolNumber) if err != nil { t.Fatal("DialUDP(bind port 4):", err) } c2, err := DialUDP(s, nil, &addr, ipv4.ProtocolNumber) if err != nil { t.Fatal("DialUDP(bind port 5):", err) } c1.SetDeadline(time.Now().Add(time.Second)) c2.SetDeadline(time.Now().Add(time.Second)) sent := "abc123" if n, err := c2.Write([]byte(sent)); err != nil || n != len(sent) { t.Errorf("got c2.Write(%q) = %d, %v, want = %d, %v", sent, n, err, len(sent), nil) } recv := make([]byte, len(sent)) n, err := c1.Read(recv) if err != nil || n != len(recv) { t.Errorf("got c1.Read() = %d, %v, want = %d, %v", n, err, len(recv), nil) } if recv := string(recv); recv != sent { t.Errorf("got recv = %q, want = %q", recv, sent) } if err := c1.Close(); err != nil { t.Error("c1.Close():", err) } if err := c2.Close(); err != nil { t.Error("c2.Close():", err) } } func makePipe() (c1, c2 net.Conn, stop func(), err error) { s, e := newLoopbackStack() if e != nil { return nil, nil, nil, fmt.Errorf("newLoopbackStack() = %v", e) } ip := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()) addr := tcpip.FullAddress{NIC: NICID, Addr: ip, Port: 11211} protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: ip.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { return nil, nil, nil, fmt.Errorf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } l, err := ListenTCP(s, addr, ipv4.ProtocolNumber) if err != nil { return nil, nil, nil, fmt.Errorf("NewListener: %w", err) } c1, err = DialTCP(s, addr, ipv4.ProtocolNumber) if err != nil { l.Close() return nil, nil, nil, fmt.Errorf("DialTCP: %w", err) } c2, err = l.Accept() if err != nil { l.Close() c1.Close() return nil, nil, nil, fmt.Errorf("l.Accept: %w", err) } stop = func() { c1.Close() c2.Close() s.Close() s.Wait() } if err := l.Close(); err != nil { stop() return nil, nil, nil, fmt.Errorf("l.Close(): %w", err) } return c1, c2, stop, nil } func TestTCPConnTransfer(t *testing.T) { c1, c2, _, err := makePipe() if err != nil { t.Fatal(err) } defer func() { if err := c1.Close(); err != nil { t.Error("c1.Close():", err) } if err := c2.Close(); err != nil { t.Error("c2.Close():", err) } }() c1.SetDeadline(time.Now().Add(time.Second)) c2.SetDeadline(time.Now().Add(time.Second)) const sent = "abc123" tests := []struct { name string c1 net.Conn c2 net.Conn }{ {"connected to accepted", c1, c2}, {"accepted to connected", c2, c1}, } for _, test := range tests { if n, err := test.c1.Write([]byte(sent)); err != nil || n != len(sent) { t.Errorf("%s: got test.c1.Write(%q) = %d, %v, want = %d, %v", test.name, sent, n, err, len(sent), nil) continue } recv := make([]byte, len(sent)) n, err := test.c2.Read(recv) if err != nil || n != len(recv) { t.Errorf("%s: got test.c2.Read() = %d, %v, want = %d, %v", test.name, n, err, len(recv), nil) continue } if recv := string(recv); recv != sent { t.Errorf("%s: got recv = %q, want = %q", test.name, recv, sent) } } } func TestTCPDialError(t *testing.T) { s, e := newLoopbackStack() if e != nil { t.Fatalf("newLoopbackStack() = %v", e) } defer func() { s.Close() s.Wait() }() ip := tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()) addr := tcpip.FullAddress{NIC: NICID, Addr: ip, Port: 11211} switch _, err := DialTCP(s, addr, ipv4.ProtocolNumber); err := err.(type) { case *net.OpError: if err.Err.Error() != (&tcpip.ErrHostUnreachable{}).String() { t.Errorf("got DialTCP() = %s, want = %s", err, &tcpip.ErrHostUnreachable{}) } default: t.Errorf("got DialTCP(...) = %v, want %s", err, &tcpip.ErrHostUnreachable{}) } } func TestDialContextTCPCanceled(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } ctx := context.Background() ctx, cancel := context.WithCancel(ctx) cancel() if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.Canceled { t.Errorf("got DialContextTCP(...) = %v, want = %v", err, context.Canceled) } } func TestDialContextTCPTimeout(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { time.Sleep(time.Second) r.Complete(true) }) s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) ctx := context.Background() ctx, cancel := context.WithDeadline(ctx, time.Now().Add(100*time.Millisecond)) defer cancel() if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.DeadlineExceeded { t.Errorf("got DialContextTCP(...) = %v, want = %v", err, context.DeadlineExceeded) } } // TestInterruptListender tests that (*TCPListener).Accept can be interrupted. func TestInterruptListender(t *testing.T) { for _, test := range []struct { name string stop func(l *TCPListener) error }{ { "Close", (*TCPListener).Close, }, { "Shutdown", func(l *TCPListener) error { l.Shutdown() return nil }, }, { "Double Shutdown", func(l *TCPListener) error { l.Shutdown() l.Shutdown() return nil }, }, } { t.Run(test.name, func(t *testing.T) { s, err := newLoopbackStack() if err != nil { t.Fatalf("newLoopbackStack() = %v", err) } defer func() { s.Close() s.Wait() }() addr := tcpip.FullAddress{ NIC: NICID, Addr: tcpip.AddrFromSlice(net.IPv4(169, 254, 10, 1).To4()), Port: 11211, } protocolAddr := tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, AddressWithPrefix: addr.Addr.WithPrefix(), } if err := s.AddProtocolAddress(NICID, protocolAddr, stack.AddressProperties{}); err != nil { t.Fatalf("AddProtocolAddress(%d, %+v, {}): %s", NICID, protocolAddr, err) } l, e := ListenTCP(s, addr, ipv4.ProtocolNumber) if e != nil { t.Fatalf("NewListener() = %v", e) } defer l.Close() done := make(chan struct{}) go func() { defer close(done) c, err := l.Accept() if err != nil { // Accept is expected to return an error. t.Log("Accept #1:", err) return } t.Errorf("Accept #1 returned a connection: %v -> %v", c.LocalAddr(), c.RemoteAddr()) c.Close() }() // Give l.Accept a chance to block before stopping it. time.Sleep(time.Millisecond * 50) if err := test.stop(l); err != nil { t.Error("stop:", err) } select { case <-done: case <-time.After(5 * time.Second): t.Errorf("c.Accept didn't unblock") } done = make(chan struct{}) go func() { defer close(done) c, err := l.Accept() if err != nil { // Accept is expected to return an error. t.Log("Accept #2:", err) return } t.Errorf("Accept #2 returned a connection: %v -> %v", c.LocalAddr(), c.RemoteAddr()) c.Close() }() select { case <-done: case <-time.After(5 * time.Second): t.Errorf("c.Accept didn't unblock a second time") } }) } } func TestNetTest(t *testing.T) { nettest.TestConn(t, makePipe) }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package crd provides utilities to set up Chrome Remote Desktop. package crd import ( "context" "time" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/cws" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/testing" ) const ( crdURL = "https://remotedesktop.google.com/support" appCWSURL = "https://chrome.google.com/webstore/detail/chrome-remote-desktop/inomeogfingihgjfjlpeplalcfajhgai?hl=en" ) // Do not poll things too fast to prevent triggering weird UI behaviors. The // share button will not work if we keep clicking it. Sets timeout to 5 minutes // according to timeout for CRD one time access code. var rdpPollOpts = &testing.PollOptions{Interval: time.Second, Timeout: 5 * time.Minute} func launch(ctx context.Context, br *browser.Browser, tconn *chrome.TestConn) (*chrome.Conn, error) { // Use english version to avoid i18n differences of HTML element attributes. conn, err := br.NewConn(ctx, crdURL+"?hl=en") if err != nil { return nil, err } const waitIdle = "new Promise(resolve => window.requestIdleCallback(resolve))" if err := conn.Eval(ctx, waitIdle, nil); err != nil { return nil, err } return conn, nil } func getAccessCode(ctx context.Context, crd *chrome.Conn) (string, error) { const genCodeBtn = `document.querySelector('[aria-label="Generate Code"]')` if err := crd.WaitForExpr(ctx, genCodeBtn); err != nil { return "", err } const clickBtn = genCodeBtn + ".click()" if err := crd.Eval(ctx, clickBtn, nil); err != nil { return "", err } const codeSpan = `document.querySelector('[aria-label^="Your access code is:"]')` if err := crd.WaitForExpr(ctx, codeSpan); err != nil { return "", err } var code string const getCode = codeSpan + `.getAttribute('aria-label').match(/\d+/g).join('')` if err := crd.Eval(ctx, getCode, &code); err != nil { return "", err } return code, nil } // Launch prepares Chrome Remote Desktop and generates access code to be connected by. // tconn is a connection to ash-chrome func Launch(ctx context.Context, br *browser.Browser, tconn *chrome.TestConn) error { // Ensures the companion extension for the Chrome Remote Desktop website // https://remotedesktop.google.com is installed. app := cws.App{Name: "Remote Desktop", URL: appCWSURL} if err := cws.InstallApp(ctx, br, tconn, app); err != nil { return errors.Wrap(err, "failed to install CRD app") } crd, err := launch(ctx, br, tconn) if err != nil { return errors.Wrap(err, "failed to launch CRD") } defer crd.Close() testing.ContextLog(ctx, "Getting access code") accessCode, err := getAccessCode(ctx, crd) if err != nil { return errors.Wrap(err, "failed to getAccessCode") } testing.ContextLog(ctx, "Access code: ", accessCode) testing.ContextLog(ctx, "Please paste the code to \"Give Support\" section on ", crdURL) return nil } // WaitConnection waits for remote desktop client connecting to DUT. func WaitConnection(ctx context.Context, tconn *chrome.TestConn) error { // The share button might not be clickable at first, so we keep retrying // until we see "Stop Sharing". ui := uiauto.New(tconn).WithPollOpts(*rdpPollOpts) share := nodewith.Name("Share").Role(role.Button) shared := ui.Exists(nodewith.Name("Stop Sharing").Role(role.Button)) if err := ui.LeftClickUntil(share, shared)(ctx); err != nil { return errors.Wrap(err, "failed to enable sharing") } return nil }
package leetcode import ( "github.com/lushenle/leetcode/structures" ) // TreeNode definition for a binary tree node type TreeNode = structures.TreeNode // dfs func zigzagLevelOrder(root *TreeNode) [][]int { var ans [][]int var dfs func(*TreeNode, int) dfs = func(root *TreeNode, level int) { if root == nil { return } // 新的 level if level >= len(ans) { ans = append(ans, []int{}) } if level%2 == 0 { ans[level] = append(ans[level], root.Val) } else { temp := make([]int, len(ans[level])+1) temp[0] = root.Val copy(temp[1:], ans[level]) ans[level] = temp } dfs(root.Left, level+1) dfs(root.Right, level+1) } dfs(root, 0) return ans } // bfs func zigzagLevelOrder1(root *TreeNode) [][]int { var ans [][]int if root == nil { return ans } q := []*TreeNode{root} var level []int var temp []*TreeNode size, i, j, flag := 0, 0, 0, false for len(q) > 0 { size = len(q) temp = []*TreeNode{} level = make([]int, size) j = size - 1 for i = 0; i < size; i++ { root = q[0] q = q[1:] if !flag { level[i] = root.Val } else { level[j] = root.Val j-- } if root.Left != nil { temp = append(temp, root.Left) } if root.Right != nil { temp = append(temp, root.Right) } } ans = append(ans, level) flag = !flag q = temp } return ans }
{%- from "../partials/go.template" import messageName -%} {%- from "../partials/go.template" import getOpBinding -%} package asyncapi import ( "asyncapi/transport" "asyncapi/channel" "asyncapi/message" "asyncapi/operation" "errors" ) type Controller struct { Transport transport.PubSub contentWriters map[string]transport.ContentWriter contentReaders map[string]transport.ContentReader } {% for ch_name, ch in asyncapi.channels() -%} {%- if ch.hasPublish() -%} {# These vars are effectively global to this file #} {%- set opName = ch.publish().id() | toGoPublicID -%} {%- set msgName = messageName(ch.publish().message()) -%} // {{ ch.publish().id() | toGoPublicID }} implements operation.Producer func (c Controller) {{ ch.publish().id() | toGoPublicID }}(params channel.{{opName}}Params, msg message.{{msgName}}) error { // Define any operation bindings. These are constant per operation {%- set bindingList = "" -%} {%- set counter = 0 -%} {%- for protocol, binding in ch.publish().bindings() -%} {{ getOpBinding(protocol, binding) }} {#- Append bindings variable name to a string of operation bindings (used in Subscribe below) -#} {%- if counter < ch.publish().bindings() | length -%} {%- set bindingList = bindingList + ", " -%} {% endif -%} {%- set bindingList = bindingList + protocol + "Bindings" -%} {%- set counter = counter + 1 %} {%- endfor %} // Throw error for missing content type encoder w, ok := c.contentWriters[msg.ContentType] if !ok { return errors.New("no message writer is registered for content type: " + msg.ContentType) } // Throw error if failed to encode payload var err error if msg.RawPayload, err = w.Write(msg.Body); err != nil { return err } // Publish the underlying transport.Message with the transport layer return c.Transport.Publish(params.Build(), msg.Message{{bindingList}}) } {% else -%} {# These vars are effectively global to this file #} {%- set opName = ch.subscribe().id() | toGoPublicID -%} {%- set msgName = messageName(ch.subscribe().message()) -%} // {{ ch.subscribe().id() | toGoPublicID }} implements operation.Consumer func (c Controller) {{ ch.subscribe().id() | toGoPublicID }}(params channel.{{opName}}Params, fn operation.{{msgName}}Handler) error { // Define any operation bindings. These are constant per operation {%- set bindingList = "" -%} {%- set counter = 0 -%} {%- for protocol, binding in ch.subscribe().bindings() -%} {{ getOpBinding(protocol, binding) }} {#- Append bindings variable name to a string of operation bindings (used in Subscribe below) -#} {%- if counter < ch.subscribe().bindings() | length -%} {%- set bindingList = bindingList + ", " -%} {% endif -%} {%- set bindingList = bindingList + protocol + "Bindings" -%} {%- set counter = counter + 1 %} {%- endfor %} // Subscribe with the transport layer. Wrap message handler /w logic to decode // transport.Message payload into {{msgName}} message c.Transport.Subscribe(params.Build(), func(ch string, rawMessage transport.Message) { // Init a new message object & attempt to use the defined content.Reader to parse // the message payload msg := message.New{{msgName}}(rawMessage) // TODO: Throw error before subscribing if expected content type reader is not registered r, ok := c.contentReaders[msg.ContentType] if !ok { panic("no ContentReader registered for contentType: " + msg.ContentType) } if err := r.Read(msg.RawPayload, &msg.Body); err != nil { panic(err) } // Parse the params out of the channel recParams := &channel.{{opName}}Params{} if err := recParams.Parse(ch); err != nil { panic(err) } // Call the operation's message handler fn(*recParams, msg) }{{bindingList}}) // TODO: Return an ErrorStream struct wrapping a chan error // instead of panics return nil } {% endif %} {% endfor -%}
package main import( "golang.org/x/tour/reader" "fmt" ) type MyReader struct{} func (reader MyReader) Read(b []byte) (int, error) { if len(b) == 0 { return 0, fmt.Errorf("Buffer is empty") } for i, _ := range b { b[i] = 'A' } return 1, nil } func main() { reader.Validate(MyReader{}) }
package contract import ( "bytes" "encoding/json" "fmt" "github.com/hekonsek/paymentapp/api" "github.com/hekonsek/paymentapp/payments" "github.com/stretchr/testify/assert" "net/http" "os" "path/filepath" "testing" "github.com/pact-foundation/pact-go/dsl" "github.com/pact-foundation/pact-go/types" ) var dir, _ = os.Getwd() var pactDir = fmt.Sprintf("%s/pacts", dir) func TestProviderContract(t *testing.T) { pact := &dsl.Pact{ Consumer: "PaymentAppConsumer", Provider: "PaymentAppProvider", } err := startServer() assert.NoError(t, err) // Fixtures payment := payments.Payment{ Id: "foo", PaymentType: "Payment", } paymentJson, err := json.Marshal(&payment) assert.NoError(t, err) _, err = http.Post("http://localhost:8080/payments", "application/json", bytes.NewBuffer(paymentJson)) assert.NoError(t, err) _, err = pact.VerifyProvider(t, types.VerifyRequest{ ProviderBaseURL: "http://localhost:8080", PactURLs: []string{filepath.ToSlash(fmt.Sprintf("%s/paymentappconsumer-paymentappprovider.json", pactDir))}, StateHandlers: types.StateHandlers{ "payment with id foo exists": func() error { return nil }, }, }) assert.NoError(t, err) } func startServer() error { a := api.ApiServer{ Port: 8080, Store: payments.NewInMemoryPaymentStore(), } err := a.Start() if err != nil { return err } return nil }
package main import "fmt" func main() { somar := func(a, b float64) float64 { return a + b } multiplicar := func(a, b float64) float64 { return a * b } fmt.Println(somar(3, 4)) fmt.Println(multiplicar(3, 4)) }
package main import "fmt" /* Reverse a linked list from position m to n. Do it in one-pass. Note: 1 ≤ m ≤ n ≤ length of list. Example: Input: 1->2->3->4->5->NULL, m = 2, n = 4 Output: 1->4->3->2->5->NULL */ /** * Definition for singly-linked list. */ type ListNode struct { Val int Next *ListNode } func reverseBetween(head *ListNode, m int, n int) *ListNode { if n < m {return head} s := head var sp *ListNode for i:=0;i<m-1&&s!=nil;i++ { sp=s s = s.Next } e := head for i:=0;i<n&&e!=nil;i++ { e = e.Next } h,t := reverse(s,e) if t != nil { t.Next=e } if sp == nil { return h } sp.Next=h return head } func reverse(head *ListNode,tail *ListNode) (*ListNode,*ListNode) { if head == nil { return nil,nil } if head.Next==tail { return head,head } h,t := reverse(head.Next,tail) t.Next = head head.Next = nil return h,head } func printlist(head *ListNode) { res := "" for head != nil { res += fmt.Sprintf("%d",head.Val) res += "->" head = head.Next } fmt.Println(res) } func main() { list := &ListNode{ Val:1, Next:&ListNode{ Val:2, Next:&ListNode{ Val:3, Next:&ListNode{ Val:4, Next:&ListNode{ Val:5, }, }, }, }, } printlist(list) h:=reverseBetween(list,2,3) printlist(h) //l2 := &ListNode{1,nil} //h1:=reverseBetween(l2,1,1) //printlist(h1) }
package server import ( "encoding/hex" "encoding/json" "errors" "fmt" "kto/block" "kto/blockchain" "kto/p2p/node" "kto/transaction" "kto/txpool" "kto/until/logger" "strconv" "github.com/buaazp/fasthttprouter" "github.com/valyala/fasthttp" ) type resp struct { Address string `json:"address"` Hash string `json:"hash"` Num int `json:"num"` Page int `json:"page"` } type reqLock struct { Address string `json:"address"` Pas string `json:"pas"` Amount string `json:"amount"` } type Transaction struct { Nonce uint64 `json:"nonce"` BlockNumber uint64 `json:"blocknumber"` Amount uint64 `json:"amount"` From string `json:"from"` To string `json:"to"` Hash string `json:"hash"` Signature string `json:"signature"` Time int64 `json:"time"` Script string `json:"script"` Ord Order `json:"ord"` } type Balance struct { Bal uint64 `json:"balance"` } type Nonce struct { Nonce uint64 `json:"nonce"` } type Height struct { Height uint64 `json:"height"` } type ErrMsg struct { Errno uint `json:"errno"` Errmsg string `json:"errmsg"` } type Block struct { Height uint64 `json:"height"` PrevBlockHash string `json:"prevblockhash"` Miner string `json:"miner"` Txs []Transaction `json:"txs"` Root string `json:"root"` Version uint64 `json:"version"` Timestamp int64 `json:"timestamp"` Hash string `json:"hash"` FirstTx []block.MinerTx `json:"firsttx"` } type Order struct { Id string `json:"id"` Address string `json:"address"` Price uint64 `json:"price"` Hash string `json:"hash"` Signature string `json:"signature"` Ciphertext string `json:"ciphertext"` Tradename string `json:"tradename"` Region string `json:"region"` } type Servers interface { Run() chukuai() } type Server struct { r *fasthttprouter.Router Bc *blockchain.Blockchain tp *txpool.Txpool n node.Node } func New(Bc *blockchain.Blockchain, tps *txpool.Txpool, n node.Node) *Server { r := fasthttprouter.New() s := &Server{r, Bc, tps, n} return s } func (s *Server) Gettxpool() *txpool.Txpool { return s.tp } func (s *Server) Run() { f := Logger() defer f.Close() fmt.Println("Runserver") s.r.POST("/ReceTransaction", s.RecepTx) s.r.POST("/GetBalancebyAddr", s.GetBalancebyAddr) s.r.POST("/GetTxsbyAddr", s.GetTransactionbyAddr) s.r.POST("/GetTxbyhash", s.GetTransactionbyhash) //s.r.POST("/GetBlockbyHash", s.GetBlockbyHash) s.r.POST("/GetBlockbyNum", s.GetBlockbyNum) s.r.POST("/GetBlockbyNum_FY", s.GetBlockbyNum_FY) s.r.GET("/GetBlock_FY", s.GetBlock_FY) // s.r.POST("/GetTx_FY", s.GetTx_FY) s.r.POST("/kto/getnonce", s.getNonceHandler) s.r.POST("/GetMaxBlockNum", s.GetMaxBlockNum) s.r.GET("/GetUSDkto", s.GetUSDkto) s.r.GET("/GetUsdtCny", s.GetUsdtCny) err := fasthttp.ListenAndServe("0.0.0.0:12344", s.r.Handler) if err != nil { fmt.Println("start fasthttp fail:", err.Error()) } } type Block_Fy struct { Height uint64 `json:"height"` Txs uint64 `json:"txs"` } func (s *Server) GetBlock_FY(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respstr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respstr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") h, err := s.Bc.GetMaxBlockHeight() if err != nil { errorCode = ErrNoBlock fmt.Println(err) return } // var bf []Block_Fy // var heightLimit int // if h > 30 { // heightLimit = int(h) // } else { // heightLimit = 30 // } // for i := 0; i < heightLimit; i++ { // var by Block_Fy // b, err := s.Bc.GetBlockByHeight(h) // fmt.Println("height:", h) // if err != nil { // errorCode = ErrNoBlock // fmt.Println("get block err:", err) // return // } // by.Txs = uint64(len(b.Txs)) // fmt.Println("141") // by.Height = h // bf = append(bf, by) // h -= 1 // } var latestBlcok Block for h > 0 { tmpblock, err := s.Bc.GetBlockByHeight(h) if err != nil { errorCode = ErrNoBlockHeight Error("fail to get blcok by height:", err) return } if tmpblock.Txs == nil || len(tmpblock.Txs) == 0 { h-- continue } latestBlcok.Height = tmpblock.Height latestBlcok.Hash = hex.EncodeToString(tmpblock.Hash) latestBlcok.PrevBlockHash = hex.EncodeToString(tmpblock.PrevBlockHash) latestBlcok.Root = hex.EncodeToString(tmpblock.Root) latestBlcok.Timestamp = tmpblock.Timestamp latestBlcok.Version = tmpblock.Version latestBlcok.Miner = string(tmpblock.Miner) latestBlcok.FirstTx = tmpblock.FirstTx for _, tx := range tmpblock.Txs { var tmpTx Transaction tmpTx.Hash = hex.EncodeToString(tx.Hash) tmpTx.From = string(tx.From.AddressToByte()) tmpTx.Amount = tx.Amount tmpTx.Nonce = tx.Nonce tmpTx.To = string(tx.To.AddressToByte()) tmpTx.Signature = hex.EncodeToString(tx.Signature) tmpTx.Time = tx.Time //tmpTx.BlockNumber = tx.BlockNumber //blocknumber为0 tmpTx.BlockNumber = tmpblock.Height tmpTx.Ord.Id = string(tx.Ord.Id) tmpTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) tmpTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) tmpTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) tmpTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) tmpTx.Ord.Price = tx.Ord.Price latestBlcok.Txs = append(latestBlcok.Txs, tmpTx) } break } var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` BF Block `json:"bf'` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.BF = latestBlcok respBody, _ = json.Marshal(respData) return } func (s *Server) GetMaxBlockNum(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respstr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respstr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") h, err := s.Bc.GetMaxBlockHeight() if err != nil { errorCode = ErrNoBlockHeight fmt.Println(err) return } var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Height uint64 `json:"height"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Height = h respBody, _ = json.Marshal(respData) return } func (s *Server) getNonceHandler(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respstr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respstr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") var reqData resp reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON fmt.Println("Unmarshal is failed") return } nonce, err := s.Bc.GetNonce([]byte(reqData.Address)) if err != nil { errorCode = ErrNoNonce fmt.Println("GetNonce is failed") return } errorCode = Success var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Nonce uint64 `json:"nonce"` } respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Nonce = nonce respBody, _ = json.Marshal(respData) return } //接受交易,放入内存 func (s *Server) RecepTx(ctx *fasthttp.RequestCtx) { var respBody []byte var errorCode int defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() reqBody := ctx.PostBody() var reqData transaction.Transaction err := json.Unmarshal(reqBody, &reqData) if err != nil { errorCode = ErrJSON Error("Unmarshal is faild:", err) return } if reqData.From == reqData.To { errorCode = ErrData Error("from adderss equal to adderss") return } if err := s.tp.Add(&reqData, s.Bc); err != nil { errorCode = ErrtTx Error("fail to add tx:", err) return } s.n.Broadcast(&reqData) var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Hash string `json:"hash"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Hash = hex.EncodeToString(reqData.Hash) respBody, _ = json.Marshal(respData) return } func (s *Server) GetTransactionbyAddr(ctx *fasthttp.RequestCtx) { var reqData resp var respBody []byte var errorCode int defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail parse json:", err) return } err, txs := s.Bc.GetTransactionByAddr([]byte(reqData.Address)) if err != nil { errorCode = ErrNoTransaction Error("fail to get tx:", err) return } if txs == nil { errorCode = ErrNoTransaction err = errors.New("tx list is nil") Error("no tx:", err) return } var Tx []Transaction for _, tx := range txs { var tmpTx Transaction tmpTx.Hash = hex.EncodeToString(tx.Hash) tmpTx.From = string(tx.From.AddressToByte()) tmpTx.Amount = tx.Amount tmpTx.Nonce = tx.Nonce tmpTx.To = string(tx.To.AddressToByte()) tmpTx.Signature = hex.EncodeToString(tx.Signature) tmpTx.Time = tx.Time tmpTx.BlockNumber = tx.BlockNumber tmpTx.Script = tx.Script //=== tmpTx.Ord.Id = string(tx.Ord.Id) tmpTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) tmpTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) tmpTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) tmpTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) tmpTx.Ord.Price = tx.Ord.Price tmpTx.Ord.Region = string(tx.Ord.Region) tmpTx.Ord.Tradename = string(tx.Ord.Tradename) //==== Tx = append(Tx, tmpTx) } var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` TransactionList []Transaction `json:"transactionlist"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.TransactionList = Tx respBody, _ = json.Marshal(respData) return } func (s *Server) GetTransactionbyhash(ctx *fasthttp.RequestCtx) { var reqData resp var respBody []byte var errorCode int defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail to parse json:", err) return } hash, err := hex.DecodeString(reqData.Hash) if err != nil { errorCode = ErrData Error("fail to parse hash:", err) return } tx, err := s.Bc.GetTransaction(hash) if err != nil { errorCode = ErrNoTxByHash Error("fail to get tx:", err) return } var retTx Transaction retTx.Hash = hex.EncodeToString(tx.Hash) retTx.From = string(tx.From.AddressToByte()) retTx.Amount = tx.Amount retTx.Nonce = tx.Nonce retTx.To = string(tx.To.AddressToByte()) retTx.Signature = hex.EncodeToString(tx.Signature) retTx.Time = tx.Time retTx.BlockNumber = tx.BlockNumber retTx.Script = tx.Script //== retTx.Ord.Id = string(tx.Ord.Id) retTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) retTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) retTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) retTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) retTx.Ord.Price = tx.Ord.Price retTx.Ord.Region = string(tx.Ord.Region) retTx.Ord.Tradename = string(tx.Ord.Tradename) var respData struct { ErrorCode int `json:"errorcode"` ErroeMsg string `json:"errormsg"` Transaction Transaction `json:"transaction"` } errorCode = Success respData.ErrorCode = errorCode respData.ErroeMsg = ErrorMap[errorCode] respData.Transaction = retTx respBody, _ = json.Marshal(respData) return } func (s *Server) GetBalancebyAddr(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") reqBody := ctx.PostBody() var reqData resp if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail to parse json:", err) return } var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Balance uint64 `jaon:"balance"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] fmt.Println("[]byte(reqData.Address)=====", []byte(reqData.Address)) fmt.Println(reqData.Address) balance, err := s.Bc.GetBalance([]byte(reqData.Address)) if err != nil { respData.Balance = 0 respBody, _ = json.Marshal(respData) Error("get balance error:", err) return } respData.Balance = balance respBody, _ = json.Marshal(respData) return } func (s *Server) GetBlockbyHash(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() var reqData resp reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail to parse json:", err) return } h, _ := hex.DecodeString(reqData.Hash) b, err := s.Bc.GetBlockByHash(h) if err != nil { errorCode = ErrNoBlock Error("fail to get block:", err) return } Debug(b) var bl Block var Tx []Transaction if len(b.Txs) != 0 { for _, tx := range b.Txs { var tmpTx Transaction tmpTx.Hash = hex.EncodeToString(tx.Hash) tmpTx.From = string(tx.From.AddressToByte()) tmpTx.Amount = tx.Amount tmpTx.Nonce = tx.Nonce tmpTx.To = string(tx.To.AddressToByte()) tmpTx.Signature = hex.EncodeToString(tx.Signature) tmpTx.Time = tx.Time tmpTx.BlockNumber = tx.BlockNumber Tx = append(Tx, tmpTx) } bl.Txs = Tx } else { bl.Txs = nil } bl.Height = b.Height bl.Hash = hex.EncodeToString(b.Hash) bl.PrevBlockHash = hex.EncodeToString(b.PrevBlockHash) bl.Root = hex.EncodeToString(b.Root) bl.Timestamp = b.Timestamp bl.Version = b.Version bl.Miner = string(b.Miner) bl.FirstTx = b.FirstTx var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Block Block `json:"block"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Block = bl respBody, _ = json.Marshal(respData) return } func (s *Server) GetBlockbyNum(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") var reqData resp reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail to parse json:", err) return } fmt.Println("num = ", reqData.Num) b, err := s.Bc.GetBlockByHeight(uint64(reqData.Num)) if err != nil { errorCode = ErrNoBlock Error("fail to get block:", err) return } Debug(b) var bl Block var Tx []Transaction if len(b.Txs) != 0 { for _, tx := range b.Txs { var tmpTx Transaction tmpTx.Hash = hex.EncodeToString(tx.Hash) tmpTx.From = string(tx.From.AddressToByte()) tmpTx.Amount = tx.Amount tmpTx.Nonce = tx.Nonce tmpTx.To = string(tx.To.AddressToByte()) tmpTx.Signature = hex.EncodeToString(tx.Signature) tmpTx.Time = tx.Time tmpTx.BlockNumber = tx.BlockNumber tmpTx.Script = tx.Script tmpTx.Ord.Id = string(tx.Ord.Id) tmpTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) tmpTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) tmpTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) tmpTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) tmpTx.Ord.Price = tx.Ord.Price tmpTx.Ord.Region = string(tx.Ord.Region) tmpTx.Ord.Tradename = string(tx.Ord.Tradename) Tx = append(Tx, tmpTx) } bl.Txs = Tx } else { bl.Txs = nil } bl.Height = b.Height bl.Hash = hex.EncodeToString(b.Hash) bl.PrevBlockHash = hex.EncodeToString(b.PrevBlockHash) bl.Root = hex.EncodeToString(b.Root) bl.Timestamp = b.Timestamp bl.Version = b.Version bl.Miner = string(b.Miner) bl.FirstTx = b.FirstTx var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Block Block `json:"block"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Block = bl respBody, _ = json.Marshal(respData) return } type block_fy struct { TxList []Transaction `json:"txlsit"` Total uint64 `json:"total"` Height uint64 `json:"height"` } func (s *Server) GetBlockbyNum_FY(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) respBody = []byte(respStr) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") var reqData resp reqBody := ctx.PostBody() if err := json.Unmarshal(reqBody, &reqData); err != nil { errorCode = ErrJSON Error("fail to parse json:", err) return } if reqData.Num < 0 || reqData.Page < 0 { errorCode = ErrData Errorf("data error:number:%d,page:%d", reqData.Num, reqData.Page) return } maxHeight, err := s.Bc.GetMaxBlockHeight() if err != nil { errorCode = ErrNoBlockHeight Error("fail to get block height:", err) return } blocknumber := int(maxHeight) - (reqData.Page-1)*reqData.Num if blocknumber <= 0 { errorCode = ErrData fmt.Println("Fy number:", blocknumber) Error("error data", blocknumber) return } var blockList []Block Errorf("blocknumber:%d,num:%d,page:%d\n", blocknumber, reqData.Num, reqData.Page) for i := blocknumber; i > 0 && i > (blocknumber-reqData.Num); i-- { fmt.Println("FY:%d---------------------------", i) block, err := s.Bc.GetBlockByHeight(uint64(i)) if err != nil { //errorCode = ErrNoBlock Error("fail to get blcok:", err) break } Errorf("block:", block) var tmpBlock Block //var Tx []Transaction for _, tx := range block.Txs { var tmpTx Transaction tmpTx.Hash = hex.EncodeToString(tx.Hash) tmpTx.From = string(tx.From.AddressToByte()) tmpTx.Amount = tx.Amount tmpTx.Nonce = tx.Nonce tmpTx.To = string(tx.To.AddressToByte()) tmpTx.Signature = hex.EncodeToString(tx.Signature) tmpTx.Time = tx.Time tmpTx.BlockNumber = tx.BlockNumber tmpTx.Script = tx.Script tmpTx.Ord.Id = string(tx.Ord.Id) tmpTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) tmpTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) tmpTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) tmpTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) tmpTx.Ord.Price = tx.Ord.Price tmpBlock.Txs = append(tmpBlock.Txs, tmpTx) } tmpBlock.Height = block.Height tmpBlock.Hash = hex.EncodeToString(block.Hash) tmpBlock.PrevBlockHash = hex.EncodeToString(block.PrevBlockHash) tmpBlock.Root = hex.EncodeToString(block.Root) tmpBlock.Timestamp = block.Timestamp tmpBlock.Version = block.Version tmpBlock.Miner = string(block.Miner) tmpBlock.FirstTx = block.FirstTx blockList = append(blockList, tmpBlock) } var respData struct { ErrorCode int `json:"errorcode"` ErrorMsg string `json:"errormsg"` Data struct { Total uint64 `json:"total"` BlockList []Block `json:"blocklist"` } `json:"block"` } errorCode = Success respData.ErrorCode = errorCode respData.ErrorMsg = ErrorMap[errorCode] respData.Data.BlockList = blockList respData.Data.Total = maxHeight // fmt.Println("NUm = ", reqData.Num) // fmt.Println("PAGE = ", reqData.Page) // b, err := s.Bc.GetBlockByHeight(uint64(reqData.Num)) // if err != nil { // errorCode = ErrNoBlock // fmt.Println(err) // return // } // fmt.Println(b) // if len(b.Txs) != 0 { // for _, tx := range b.Txs { // var tmpTx Transaction // tmpTx.Hash = hex.EncodeToString(tx.Hash) // tmpTx.From = string(tx.From.AddressToByte()) // tmpTx.Amount = tx.Amount // tmpTx.Nonce = tx.Nonce // tmpTx.To = string(tx.To.AddressToByte()) // tmpTx.Signature = hex.EncodeToString(tx.Signature) // tmpTx.Time = tx.Time // tmpTx.BlockNumber = tx.BlockNumber // tmpTx.Ord.Id = string(tx.Ord.Id) // tmpTx.Ord.Hash = hex.EncodeToString(tx.Ord.Hash) // tmpTx.Ord.Signature = hex.EncodeToString(tx.Ord.Signature) // tmpTx.Ord.Ciphertext = hex.EncodeToString(tx.Ord.Ciphertext) // tmpTx.Ord.Address = string(tx.Ord.Address.AddressToByte()) // tmpTx.Ord.Price = tx.Ord.Price // Tx = append(Tx, tmpTx) // } // switch reqData.Page { // case 1: // if len(Tx) > 30 { // Tx = Tx[:30] // } else { // Tx = Tx[:len(Tx)] // } // case 2: // if len(Tx) > 60 { // Tx = Tx[30:60] // } else { // Tx = Tx[30:len(Tx)] // } // case 3: // if len(Tx) > 90 { // Tx = Tx[60:90] // } else { // Tx = Tx[60:len(Tx)] // } // case 4: // if len(Tx) > 120 { // Tx = Tx[90:120] // } else { // Tx = Tx[90:len(Tx)] // } // case 5: // if len(Tx) > 149 { // Tx = Tx[120:149] // } else { // Tx = Tx[120:len(Tx)] // } // default: // break // } // } // var respData struct { // ErrorCode int `json:"errorcode"` // ErrorMsg string `json:"errormsg"` // BT block_fy `json:"bt"` // } // var bt block_fy // bt.TxList = Tx // bt.Total = uint64(len(bt.TxList)) // bt.Height = b.Height // errorCode = Success // respData.ErrorCode = errorCode // respData.ErrorMsg = ErrorMap[errorCode] // respData.BT = bt respBody, _ = json.Marshal(respData) return } // func (s *Server) Chukuai() { // time.Sleep(time.Second * 5) // s.tp.Pendings(s.Bc) // b, _ := s.Bc.NewBlock(s.tp.Pending) // s.Bc.AddBlock(b, []byte("2o76U4LXhTwjir8gqpShsPxHio7iFb3MR2WEEgef2y9v")) // data, err := json.Marshal(b) // if err != nil { // log.Fatalf("JSON marshaling failed: %s", err) // } // fmt.Printf("%s\n", data) // s.tp.Pending = nil // } func (s *Server) GetUSDkto(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { // respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) // respBody = []byte(respStr) respBody = []byte(`{"success":true}`) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") url := "https://www.bcone.vip/api/market/tickers/ticker?symbol=usdt_kto" _, respBody, err := fasthttp.Get(nil, url) if err != nil { errorCode = ErrData return } errorCode = Success } func (s *Server) GetUsdtCny(ctx *fasthttp.RequestCtx) { var errorCode int var respBody []byte defer func() { if errorCode != Success { // respStr := fmt.Sprintf(`{"errorcode":%d,"errormsg":"%s"}`, errorCode, ErrorMap[errorCode]) // respBody = []byte(respStr) respBody = []byte(`{"flag":0,"isSuccess":false,"data":{},"message":"数据错误"}`) } ctx.Write(respBody) }() ctx.Request.Header.Set("Access-Control-Allow-Origin", "*") ctx.Request.Header.Add("Access-Control-Allow-Headers", "Content-Type") ctx.Request.Header.Set("content-type", "application/json") req := &fasthttp.Request{} req.AppendBody([]byte("{}")) req.SetRequestURI("https://www.bcone.vip/api/currency/currency/getUsdtCny") req.Header.SetMethod("POST") req.Header.SetContentType("application/json") resp := &fasthttp.Response{} client := &fasthttp.Client{} if err := client.Do(req, resp); err != nil { errorCode = ErrData return } errorCode = Success respBody = resp.Body() return } func (s *Server) PackBlock(minaddr, Ds, Cm, QTJ []byte) (*block.Block, error) { logger.Infof("Package blocks from txpool...") s.tp.Pendings(s.Bc) b, err := s.Bc.NewBlock(s.tp.Pending, minaddr, Ds, Cm, QTJ) if err != nil { return nil, err } s.RemovePending() return b, nil } // func (s *Server) CommitBlock(b *block.Blocks, Ds, Cm []byte) error { func (s *Server) CommitBlock(b *block.Blocks, minaddr []byte) error { logger.Infof("server: Start Commit Block.\n") // b.Miner = []byte(mineraddr) // err := s.Bc.AddBlock(b, Ds, Cm) err := s.Bc.AddBlock(b, minaddr) if err != nil { return err } logger.Infof("server: End Commit Block.\n") return nil } func (s *Server) RemovePending() { s.tp.Pending = nil } // func (s *Server) Chukuai() { // b := s.PackBlock() // s.CommitBlock(b) // } const PasLock = "myxwxhqutjqc" func (s *Server) LockAmount(ctx *fasthttp.RequestCtx) { a := ctx.PostBody() var v reqLock json.Unmarshal(a, &v) if v.Pas != PasLock { fmt.Fprintf(ctx, "%s", "lock failed") } num, _ := strconv.ParseInt(v.Amount, 10, 64) s.Bc.LockBalance(v.Address, uint64(num)) }
/* * */ package sync import ( "encoding/base64" "errors" "fmt" "strings" "time" "io/ioutil" "gopkg.in/yaml.v2" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" "github.com/xelalexv/dregsy/internal/pkg/log" "github.com/xelalexv/dregsy/internal/pkg/relays/docker" "github.com/xelalexv/dregsy/internal/pkg/relays/skopeo" ) // const minimumTaskInterval = 30 const minimumAuthRefreshInterval = time.Hour /* ---------------------------------------------------------------------------- * */ type syncConfig struct { Relay string `yaml:"relay"` Docker *docker.RelayConfig `yaml:"docker"` Skopeo *skopeo.RelayConfig `yaml:"skopeo"` DockerHost string `yaml:"dockerhost"` // DEPRECATED APIVersion string `yaml:"api-version"` // DEPRECATED Tasks []*task `yaml:"tasks"` Webhooks []*webhook `yaml:"webhooks"` EndpointsWebhooks map[string]*webhook } // func (c *syncConfig) validate() error { if c.Relay == "" { c.Relay = docker.RelayID } switch c.Relay { case docker.RelayID: if c.Docker == nil { if c.DockerHost == "" && c.APIVersion == "" { log.Warn( "not specifying the 'docker' config item is deprecated") } templ := "the top-level '%s' setting is deprecated, " + "use 'docker' config item instead" if c.DockerHost != "" { log.Warn(fmt.Sprintf(templ, "dockerhost")) } if c.APIVersion != "" { log.Warn(fmt.Sprintf(templ, "api-version")) } c.Docker = &docker.RelayConfig{ DockerHost: c.DockerHost, APIVersion: c.APIVersion, } } else { templ := "discarding deprecated top-level '%s' setting and " + "using 'docker' config item instead" if c.DockerHost != "" { log.Warn(fmt.Sprintf(templ, "dockerhost")) c.DockerHost = "" } if c.APIVersion != "" { log.Warn(fmt.Sprintf(templ, "api-version")) c.APIVersion = "" } } case skopeo.RelayID: if c.DockerHost != "" { return fmt.Errorf( "setting 'dockerhost' implies '%s' relay, but relay is set to '%s'", docker.RelayID, c.Relay) } default: return fmt.Errorf( "invalid relay type: '%s', must be either '%s' or '%s'", c.Relay, docker.RelayID, skopeo.RelayID) } for _, t := range c.Tasks { if err := t.validate(); err != nil { return err } } c.EndpointsWebhooks = make(map[string]*webhook) for _, w := range c.Webhooks { if err := w.validate(); err != nil { return err } c.EndpointsWebhooks[w.Endpoint] = w } return nil } /* ---------------------------------------------------------------------------- * */ type task struct { Name string `yaml:"name"` Interval int `yaml:"interval"` Source *location `yaml:"source"` Target *location `yaml:"target"` Mappings []*mapping `yaml:"mappings"` Verbose bool `yaml:"verbose"` // ticker *time.Ticker lastTick time.Time failed bool } type webhook struct { Name string `yaml:"name"` Format string `yaml:"format"` Endpoint string `yaml:"endpoint"` BearerToken string `yaml:"bearer-token"` Source *location `yaml:"source"` Target *location `yaml:"target"` FromPrefix string `yaml:"from_prefix"` ToPrefix string `yaml:"to_prefix"` Verbose bool `yaml:"verbose"` } type azureWebhookEvent struct { Action string `json:action` Target struct { Repository string `repository` Tag string `tag` } `json:target` failed bool } type webhookEvent struct { Mappings []*mapping `json:"mappings"` Verbose bool `json:"verbose"` failed bool } // func (t *task) validate() error { if len(t.Name) == 0 { return errors.New("a task requires a name") } if 0 < t.Interval && t.Interval < minimumTaskInterval { return fmt.Errorf( "minimum task interval is %d seconds", minimumTaskInterval) } if t.Interval < 0 { return errors.New("task interval needs to be 0 or a positive integer") } if err := t.Source.validate(); err != nil { return fmt.Errorf( "source registry in task '%s' invalid: %v", t.Name, err) } if err := t.Target.validate(); err != nil { return fmt.Errorf( "target registry in task '%s' invalid: %v", t.Name, err) } for _, m := range t.Mappings { if err := m.validate(); err != nil { return err } m.From = normalizePath(m.From) m.To = normalizePath(m.To) } return nil } // func (w *webhook) validate() error { if len(w.Name) == 0 { return errors.New("a webhook requires a name") } if err := w.Source.validate(); err != nil { return fmt.Errorf( "source registry in task '%s' invalid: %v", w.Name, err) } if err := w.Target.validate(); err != nil { return fmt.Errorf( "target registry in task '%s' invalid: %v", w.Name, err) } return nil } func (awe *azureWebhookEvent) validate() error { awe.Target.Repository = normalizePath(awe.Target.Repository) return nil } func (we *webhookEvent) validate() error { for _, m := range we.Mappings { if err := m.validate(); err != nil { return err } m.From = normalizePath(m.From) m.To = normalizePath(m.To) } return nil } // func (t *task) startTicking(c chan *task) { i := time.Duration(t.Interval) if i == 0 { i = 3 } t.ticker = time.NewTicker(time.Second * i) t.lastTick = time.Now().Add(time.Second * i * (-2)) go func() { // fire once right at the start c <- t for range t.ticker.C { c <- t } }() } // func (t *task) tooSoon() bool { i := time.Duration(t.Interval) if i == 0 { return false } return time.Now().Before(t.lastTick.Add(time.Second * i / 2)) } // func (t *task) stopTicking(c chan *task) { if t.ticker != nil { t.ticker.Stop() t.ticker = nil } } // func (t *task) fail(f bool) { t.failed = t.failed || f } func (awe *azureWebhookEvent) fail(f bool) { awe.failed = awe.failed || f } func (we *webhookEvent) fail(f bool) { we.failed = we.failed || f } // func (t *task) mappingRefs(m *mapping) (from string, to string) { if m != nil { from = t.Source.Registry + m.From to = t.Target.Registry + m.To } return from, to } func (w *webhook) mappingRefsAzure(event *azureWebhookEvent) (from string, to string) { from = w.Source.Registry + event.Target.Repository to = w.Target.Registry + event.Target.Repository return from, to } func (w *webhook) mappingRefs(m *mapping) (err error, from string, to string) { if m != nil { if !strings.HasPrefix(m.From, w.FromPrefix) { return fmt.Errorf("Prefix \"%s\" not found in from mapping \"%s\"", w.FromPrefix, m.From), "", "" } from = w.Source.Registry + strings.TrimPrefix(m.From, w.FromPrefix) to = w.Target.Registry + w.ToPrefix + m.To log.Infof("from: %s, to: %s", from, to) } return nil, from, to } func ensureTargetExists(ref string, target *location, syncId int) error { isEcr, region, account := target.getECR() if isEcr { _, path, _ := docker.SplitRef(ref) if len(path) == 0 { return nil } sess, err := session.NewSession() if err != nil { return err } svc := ecr.New(sess, &aws.Config{ Region: aws.String(region), }) inpDescr := &ecr.DescribeRepositoriesInput{ RegistryId: aws.String(account), RepositoryNames: []*string{aws.String(path)}, } out, err := svc.DescribeRepositories(inpDescr) if err == nil && len(out.Repositories) > 0 { log.WithField("syncId", syncId).Infof("target '%s' already exists", ref) return nil } if err != nil { if aerr, ok := err.(awserr.Error); ok { if aerr.Code() != ecr.ErrCodeRepositoryNotFoundException { return err } } else { return err } } log.WithField("syncId", syncId).Infof("creating target '%s'", ref) inpCrea := &ecr.CreateRepositoryInput{ RepositoryName: aws.String(path), } if _, err := svc.CreateRepository(inpCrea); err != nil { return err } } return nil } // func (t *task) ensureTargetExists(ref string, syncId int) error { return ensureTargetExists(ref, t.Target, syncId) } // func (w *webhook) ensureTargetExists(ref string, syncId int) error { return ensureTargetExists(ref, w.Target, syncId) } // func normalizePath(p string) string { if strings.HasPrefix(p, "/") { return p } return "/" + p } /* ---------------------------------------------------------------------------- * */ type location struct { Registry string `yaml:"registry"` Auth string `yaml:"auth"` SkipTLSVerify bool `yaml:"skip-tls-verify"` AuthRefresh *time.Duration `yaml:"auth-refresh"` lastRefresh time.Time } // func (l *location) validate() error { if l == nil { return errors.New("location is nil") } if l.Registry == "" { return errors.New("registry not set") } l.lastRefresh = time.Time{} if l.AuthRefresh != nil { if *l.AuthRefresh == 0 { l.AuthRefresh = nil } else if !l.isECR() { return fmt.Errorf( "'%s' wants authentication refresh, but is not an ECR registry", l.Registry) } else if *l.AuthRefresh < minimumAuthRefreshInterval { *l.AuthRefresh = time.Duration(minimumAuthRefreshInterval) log.Warnf( "auth-refresh for '%s' too short, setting to minimum: %s", l.Registry, minimumAuthRefreshInterval) } } return nil } // func (l *location) isECR() bool { ecr, _, _ := l.getECR() return ecr } // func (l *location) getECR() (ecr bool, region, account string) { url := strings.Split(l.Registry, ".") ecr = (len(url) == 6 || len(url) == 7) && url[1] == "dkr" && url[2] == "ecr" && url[4] == "amazonaws" && url[5] == "com" && (len(url) == 6 || url[6] == "cn") if ecr { region = url[3] account = url[0] } else { region = "" account = "" } return } // func (l *location) refreshAuth() error { if l.AuthRefresh == nil || time.Since(l.lastRefresh) < *l.AuthRefresh { return nil } _, region, account := l.getECR() log.Infof("refreshing credentials for '%s'", l.Registry) sess, err := session.NewSession() if err != nil { return err } svc := ecr.New(sess, &aws.Config{ Region: aws.String(region), }) input := &ecr.GetAuthorizationTokenInput{ RegistryIds: []*string{aws.String(account)}, } authToken, err := svc.GetAuthorizationToken(input) if err != nil { return err } for _, data := range authToken.AuthorizationData { output, err := base64.StdEncoding.DecodeString(*data.AuthorizationToken) if err != nil { return err } split := strings.Split(string(output), ":") if len(split) != 2 { return fmt.Errorf("failed to parse credentials") } user := strings.TrimSpace(split[0]) pass := strings.TrimSpace(split[1]) l.Auth = base64.StdEncoding.EncodeToString([]byte( fmt.Sprintf("{\"username\": \"%s\", \"password\": \"%s\"}", user, pass))) l.lastRefresh = time.Now() return nil } return fmt.Errorf("no authorization data for") } /* ---------------------------------------------------------------------------- * */ type mapping struct { From string `yaml:"from" json:"from"` To string `yaml:"to" json:"to"` Tags []string `yaml:"tags" json:"tags"` } // func (m *mapping) validate() error { if m == nil { return errors.New("mapping is nil") } if m.From == "" { return errors.New("mapping without 'From' path") } if m.To == "" { m.To = m.From } return nil } /* ---------------------------------------------------------------------------- * load config from YAML file */ func LoadConfig(file string) (*syncConfig, error) { data, err := ioutil.ReadFile(file) if err != nil { return nil, fmt.Errorf("error loading config file '%s': %v", file, err) } config := &syncConfig{} err = yaml.Unmarshal(data, config) if err != nil { return nil, fmt.Errorf("error parsing config file '%s': %v", file, err) } return config, config.validate() }
package sma import ( "context" "fmt" "sync" "sync/atomic" "time" "github.com/evcc-io/evcc/util" "gitlab.com/bboehmke/sunny" ) const udpTimeout = 10 * time.Second // map of created discover instances var discoverers = make(map[string]*Discoverer) var discoverersMutex sync.Mutex // initialize sunny logger only once var once sync.Once // GetDiscoverer fo the given interface func GetDiscoverer(iface string) (*Discoverer, error) { // on time initialization of sunny logger log := util.NewLogger("sma") once.Do(func() { sunny.Log = log.TRACE }) discoverersMutex.Lock() defer discoverersMutex.Unlock() // get or create discoverer discoverer, ok := discoverers[iface] if !ok { conn, err := sunny.NewConnection(iface) if err != nil { return nil, fmt.Errorf("connection failed: %w", err) } discoverer = &Discoverer{ log: log, conn: conn, devices: make(map[uint32]*Device), } go discoverer.run() discoverers[iface] = discoverer } return discoverer, nil } // Discoverer discovers SMA devicesBySerial in background while providing already found devicesBySerial type Discoverer struct { log *util.Logger conn *sunny.Connection devices map[uint32]*Device mux sync.RWMutex done uint32 } func (d *Discoverer) createDevice(device *sunny.Device) *Device { return &Device{ Device: device, log: d.log, wait: util.NewWaiter(udpTimeout, func() { d.log.DEBUG.Println("wait for initial value") }), values: make(map[sunny.ValueID]interface{}), } } func (d *Discoverer) addDevice(device *sunny.Device) { d.mux.Lock() defer d.mux.Unlock() if _, ok := d.devices[device.SerialNumber()]; !ok { d.devices[device.SerialNumber()] = d.createDevice(device) } else { device.Close() } } // run discover and store found devicesBySerial func (d *Discoverer) run() { devices := make(chan *sunny.Device) go func() { for device := range devices { d.addDevice(device) } }() // discover devicesBySerial and wait for results ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) d.conn.DiscoverDevices(ctx, devices, "") cancel() close(devices) // mark discover as done atomic.AddUint32(&d.done, 1) } func (d *Discoverer) get(serial uint32, password string) *Device { d.mux.RLock() defer d.mux.RUnlock() device := d.devices[serial] if device != nil { device.SetPassword(password) } return device } // DeviceBySerial with the given serial number func (d *Discoverer) DeviceBySerial(serial uint32, password string) *Device { start := time.Now() for time.Since(start) < time.Second*3 { // discover done -> return immediately regardless of result if atomic.LoadUint32(&d.done) != 0 { return d.get(serial, password) } // device with serial found -> return if device := d.get(serial, password); device != nil { return device } time.Sleep(time.Millisecond * 10) } return d.get(serial, password) } // DeviceByIP with the given serial number func (d *Discoverer) DeviceByIP(ip, password string) (*Device, error) { d.mux.Lock() defer d.mux.Unlock() for _, device := range d.devices { if device.Address().IP.String() == ip { device.SetPassword(password) return device, nil } } device, err := d.conn.NewDevice(ip, password) if err != nil { return nil, fmt.Errorf("failed to get device: %w", err) } dev := d.createDevice(device) d.devices[device.SerialNumber()] = dev return dev, err }
package leetcode import "strings" func lengthLongestPath(input string) int { if !hasFile(input) { return 0 } return llp("\n"+input) - 1 } func max(a, b int) int { if a > b { return a } return b } func nextCR(path string, idx int) int { var i int for i = idx + 1; i < len(path); i++ { if path[i:i+1] == "\n" && (i+1 == len(path) || path[i+1:i+2] != "\t") { break } } return i } func hasSub(path string) bool { return strings.Contains(path, "\n") } func hasFile(path string) bool { return strings.Contains(path, ".") } func llp(path string) int { if !hasFile(path) { return 0 } if !hasSub(path) { return len(path) } var i = nextCR(path, -1) dirLen := i var j, maxSub int for i < len(path) { j = nextCR(path, i) maxSub = max(maxSub, llp(strings.Replace(path[i+1:j], "\n\t", "\n", -1))) i = j } return dirLen + 1 + maxSub }
package main import ( "database/sql" "flag" "fmt" "github.com/360EntSecGroup-Skylar/excelize" "log" "os" "path/filepath" "strconv" "strings" ) import _ "github.com/go-sql-driver/mysql" type Structure struct { ColumnName sql.NullString DataType sql.NullString CharacterMaximumLength sql.NullString IsNullable sql.NullString ColumnDefault sql.NullString ColumnComment sql.NullString } func main() { var ( url string dbName string userName string password string fileName string ) //命令行参数 u := flag.String("url", "", "数据库url") d := flag.String("db", "", "库名") un := flag.String("u", "", "用户名") p := flag.String("p", "", "密码") f := flag.String("f", "", "导出文件名") flag.Parse() url = *u dbName = *d userName = *un password = *p fileName = *f //如果命令行参数没有输全 则开启交互模式 if url == "" || dbName == "" || userName == "" || password == "" { fmt.Println("请输入数据库url: (必填 例:localhost:3306)") fmt.Scanln(&url) fmt.Println("请输入数据库名称: (必填)") fmt.Scanln(&dbName) fmt.Println("请输入数据库用户名: (必填)") fmt.Scanln(&userName) fmt.Println("请输入数据库密码: (必填)") fmt.Scanln(&password) fmt.Println("请输入导出文件名称: (选填 不填直接回车)") fmt.Scanln(&fileName) } //如果经过两种模式后 参数还是不全 则结束程序 if url == "" || dbName == "" || userName == "" || password == "" { fmt.Println("参数不全,无法启动程序") return } if fileName == "" { fileName = "structure.xlsx" } else { fileName = fileName + ".xlsx" } fmt.Println("参数校验通过,程序开始开始执行") db, _ := sql.Open("mysql", ""+userName+":"+password+"@tcp("+url+")/"+dbName+"?charset=utf8") xlsx := excelize.NewFile() xlsx.SetActiveSheet(0) style, _ := xlsx.NewStyle(`{"border":[{"type":"left","color":" 000000","style":1},{"type":"top","color":"000000","style":1},{"type":"bottom","color":"000000","style":1},{"type":"right","color":"000000","style":1}]}`) hyperLinkStyle, _ := xlsx.NewStyle(`{"border":[{"type":"left","color":" 000000","style":1},{"type":"top","color":"000000","style":1},{"type":"bottom","color":"000000","style":1},{"type":"right","color":"000000","style":1}],"font":{"color":"#1265BE","underline":"single"}}`) xlsx.SetSheetName("Sheet1", "概览") xlsx.SetCellValue("概览", "A1", "序号") xlsx.SetCellStyle("概览", "A1", "A1", style) xlsx.SetCellValue("概览", "B1", "备注") xlsx.SetCellStyle("概览", "B1", "B1", style) xlsx.SetCellValue("概览", "C1", "表名") xlsx.SetCellStyle("概览", "C1", "C1", style) xlsx.SetColWidth("概览", "B", "C", 50) tableRows, _ := db.Query("SELECT TABLE_NAME,TABLE_COMMENT FROM information_schema.TABLES WHERE table_schema = '" + dbName + "'") defer tableRows.Close() if err != nil { fmt.Println(err) } var count float64 db.QueryRow("select count(*) FROM information_schema.TABLES WHERE table_schema = '" + dbName + "'").Scan(&count) tableRowIndex := 2 for tableRows.Next() { var TableName string var TableComment string tableRows.Scan(&TableName, &TableComment) rows, _ := db.Query("SELECT COLUMN_NAME,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,IS_NULLABLE,COLUMN_DEFAULT,COLUMN_COMMENT FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = '" + dbName + "' AND table_name = '" + TableName + "'") defer rows.Close() xlsx.NewSheet(TableName) xlsx.SetColWidth(TableName, "A", "A", 30) xlsx.SetColWidth(TableName, "F", "F", 80) xlsx.SetCellValue(TableName, "A1", "列名") xlsx.SetCellStyle(TableName, "A1", "A1", style) xlsx.SetCellValue(TableName, "B1", "类型") xlsx.SetCellStyle(TableName, "B1", "B1", style) xlsx.SetCellValue(TableName, "C1", "长度") xlsx.SetCellStyle(TableName, "C1", "C1", style) xlsx.SetCellValue(TableName, "D1", "是否为空") xlsx.SetCellStyle(TableName, "D1", "D1", style) xlsx.SetCellValue(TableName, "E1", "默认值") xlsx.SetCellStyle(TableName, "E1", "E1", style) xlsx.SetCellValue(TableName, "F1", "备注") xlsx.SetCellStyle(TableName, "F1", "F1", style) rowIndex := 2 for rows.Next() { rowIndexS := strconv.Itoa(rowIndex) s := Structure{} rows.Scan(&s.ColumnName, &s.DataType, &s.CharacterMaximumLength, &s.IsNullable, &s.ColumnDefault, &s.ColumnComment) xlsx.SetCellValue(TableName, "A"+rowIndexS, s.ColumnName.String) xlsx.SetCellStyle(TableName, "A"+rowIndexS, "A"+rowIndexS, style) xlsx.SetCellValue(TableName, "B"+rowIndexS, s.DataType.String) xlsx.SetCellStyle(TableName, "B"+rowIndexS, "B"+rowIndexS, style) xlsx.SetCellValue(TableName, "C"+rowIndexS, s.CharacterMaximumLength.String) xlsx.SetCellStyle(TableName, "C"+rowIndexS, "C"+rowIndexS, style) xlsx.SetCellValue(TableName, "D"+rowIndexS, s.IsNullable.String) xlsx.SetCellStyle(TableName, "D"+rowIndexS, "D"+rowIndexS, style) xlsx.SetCellValue(TableName, "E"+rowIndexS, s.ColumnDefault.String) xlsx.SetCellStyle(TableName, "E"+rowIndexS, "E"+rowIndexS, style) xlsx.SetCellValue(TableName, "F"+rowIndexS, s.ColumnComment.String) xlsx.SetCellStyle(TableName, "F"+rowIndexS, "F"+rowIndexS, style) rowIndex++ } tableRowIndexS := strconv.Itoa(tableRowIndex) xlsx.SetCellValue("概览", "A"+tableRowIndexS, tableRowIndex-1) xlsx.SetCellStyle("概览", "A"+tableRowIndexS, "A"+tableRowIndexS, style) xlsx.SetCellValue("概览", "B"+tableRowIndexS, TableComment) xlsx.SetCellStyle("概览", "B"+tableRowIndexS, "B"+tableRowIndexS, style) xlsx.SetCellValue("概览", "C"+tableRowIndexS, TableName) xlsx.SetCellStyle("概览", "C"+tableRowIndexS, "C"+tableRowIndexS, hyperLinkStyle) xlsx.SetCellHyperLink("概览", "C"+tableRowIndexS, TableName+"!A1", "Location") fmt.Println("当前已完成" + strconv.Itoa(int((float64(tableRowIndex-1)/count)*100)) + "%...") tableRowIndex++ } err = xlsx.SaveAs(getCurrentDirectory() + "/" + fileName) fmt.Println("表结构导出完毕完毕,再见! "+getCurrentDirectory() + "/" + fileName) if err != nil { fmt.Println(err) } } func getCurrentDirectory() string { dir, err := filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { log.Fatal(err) } return strings.Replace(dir, "\\", "/", -1) }
package main import ( "fmt" "io" "io/ioutil" "os/exec" "strconv" "sync" ) func main() { // cmd := exec.Command("ls", "-lrth", "/tmp/") cmds := make(map[*exec.Cmd]io.ReadCloser) for i := 0; i < 10; i++ { cmd := exec.Command("sleep", strconv.Itoa(i)) outPipe, _ := cmd.StdoutPipe() cmd.Start() cmds[cmd] = outPipe } // outPipe, _ := cmd.StdoutPipe() // cmd.Start() fmt.Println("Hello, World!") fmt.Println("Doing some busy work!") var wg sync.WaitGroup for cmd, outPipe := range cmds { wg.Add(1) go func(cmd *exec.Cmd, outPipe io.ReadCloser) { defer wg.Done() out, _ := ioutil.ReadAll(outPipe) cmd.Wait() fmt.Println(string(out)) }(cmd, outPipe) } wg.Wait() // out, _ := cmd.Output() }
package rate_type import ( "sync" "vcm/api" ) type XEM string func (x *XEM) UpdateRate(wg *sync.WaitGroup) { defer wg.Done() rate, err := api.FetchRate("xem_jpy") if err != nil { panic(err) } *x = XEM(rate) }
package slack import ( "bytes" "encoding/json" "fmt" "io" "net/http" "net/http/httputil" ) const ( baseURL = "https://slack.com/api/" ) // Client is a client that authenticates requests to the Slack api. type Client struct { Token string BaseURL string } // NewClient creates a new slack client. func NewClient(oauthToken string) *Client { return &Client{ Token: oauthToken, BaseURL: baseURL, } } func (c *Client) buildUrl(endpoint string) string { return fmt.Sprintf("%v%v", c.BaseURL, endpoint) } // Post sends a message to a given channel. func (c *Client) Post(channel, message string) error { endpoint := "chat.postMessage" m := &Message{ Channel: channel, Text: message, } encoded, err := json.Marshal(m) if err != nil { return fmt.Errorf("failed to marshal data: %v", err) } req, err := c.signedRequest("POST", c.buildUrl(endpoint), bytes.NewReader(encoded)) if err != nil { return fmt.Errorf("failed to get signed request: %v", err) } requestDump, err := httputil.DumpRequest(req, true) if err != nil { fmt.Println(err) } fmt.Println(string(requestDump)) resp, err := http.DefaultClient.Do(req) if err != nil { return fmt.Errorf("failed to send request: %v", err) } if resp.StatusCode >= 300 { return fmt.Errorf("Bad response code: %v", resp.StatusCode) } return nil } // signedRequest adds the right headers and returns the request for further modification. // TODO maybe make this public if needed. func (c *Client) signedRequest(method, url string, body io.Reader) (*http.Request, error) { req, err := http.NewRequest(method, url, body) if err != nil { return nil, fmt.Errorf("could not make a request: %v", err) } req.Header.Add("Authorization", c.bearerTokenHeader()) req.Header.Add("Content-Type", "application/json; charset=utf-8") return req, nil } func (c *Client) bearerTokenHeader() string { return fmt.Sprintf("Bearer %v", c.Token) } // Message is a struct for marshaling data into a format slack likes. type Message struct { Channel string `json:"channel"` Text string `json:"text"` }
// Copyright 2020 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package streamingest import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/stretchr/testify/require" ) type partitionToEvent map[streamingccl.PartitionAddress][]streamingccl.Event func TestStreamIngestionFrontierProcessor(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() tc := testcluster.StartTestCluster(t, 3 /* nodes */, base.TestClusterArgs{}) defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) testDiskMonitor := execinfra.NewTestDiskMonitor(ctx, st) defer testDiskMonitor.Stop(ctx) registry := tc.Server(0).JobRegistry().(*jobs.Registry) flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ Settings: st, DB: kvDB, JobRegistry: registry, }, EvalCtx: &evalCtx, DiskMonitor: testDiskMonitor, } out := &distsqlutils.RowBuffer{} post := execinfrapb.PostProcessSpec{} var spec execinfrapb.StreamIngestionDataSpec // The stream address needs to be set with a scheme we support, but this test // will mock out the actual client. spec.StreamAddress = "randomgen://test/" pa1 := streamingccl.PartitionAddress("randomgen://test1/") pa2 := streamingccl.PartitionAddress("randomgen://test2/") v := roachpb.MakeValueFromString("value_1") v.Timestamp = hlc.Timestamp{WallTime: 1} sampleKV := roachpb.KeyValue{Key: roachpb.Key("key_1"), Value: v} for _, tc := range []struct { name string events partitionToEvent expectedFrontierTimestamp hlc.Timestamp frontierStartTime hlc.Timestamp }{ { name: "same-resolved-ts-across-partitions", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 4}), }, pa2: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 4}), }}, expectedFrontierTimestamp: hlc.Timestamp{WallTime: 4}, }, { // No progress should be reported to the job since partition 2 has not // emitted a resolved ts. name: "no-checkpoints", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeKVEvent(sampleKV), }, pa2: []streamingccl.Event{ streamingccl.MakeKVEvent(sampleKV), }}, }, { // No progress should be reported to the job since partition 2 has not // emitted a resolved ts. name: "no-checkpoint-from-one-partition", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 4}), }, pa2: []streamingccl.Event{}}, }, { name: "one-partition-ahead-of-the-other", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 4}), }, pa2: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1}), }}, expectedFrontierTimestamp: hlc.Timestamp{WallTime: 1}, }, { name: "some-interleaved-timestamps", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 2}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 4}), }, pa2: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 3}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 5}), }}, expectedFrontierTimestamp: hlc.Timestamp{WallTime: 4}, }, { name: "some-interleaved-logical-timestamps", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1, Logical: 2}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1, Logical: 4}), }, pa2: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1, Logical: 1}), streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 2}), }}, expectedFrontierTimestamp: hlc.Timestamp{WallTime: 1, Logical: 4}, }, { // The frontier should error out as it receives a checkpoint with a ts // lower than its start time. name: "checkpoint-lower-than-start-ts", events: partitionToEvent{pa1: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1, Logical: 4}), }, pa2: []streamingccl.Event{ streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 1, Logical: 2}), }}, frontierStartTime: hlc.Timestamp{WallTime: 1, Logical: 3}, }, } { t.Run(tc.name, func(t *testing.T) { spec.PartitionAddresses = []string{string(pa1), string(pa2)} proc, err := newStreamIngestionDataProcessor(&flowCtx, 0 /* processorID */, spec, &post, out) require.NoError(t, err) sip, ok := proc.(*streamIngestionProcessor) if !ok { t.Fatal("expected the processor that's created to be a stream ingestion processor") } // Inject a mock client with the events being tested against. sip.client = &mockStreamClient{ partitionEvents: tc.events, } // Create a frontier processor. var frontierSpec execinfrapb.StreamIngestionFrontierSpec pa1Key := roachpb.Key(pa1) pa2Key := roachpb.Key(pa2) frontierSpec.TrackedSpans = []roachpb.Span{{Key: pa1Key, EndKey: pa1Key.Next()}, {Key: pa2Key, EndKey: pa2Key.Next()}} if !tc.frontierStartTime.IsEmpty() { frontierSpec.HighWaterAtStart = tc.frontierStartTime } // Create a mock ingestion job. record := jobs.Record{ Description: "fake ingestion job", Username: security.TestUserName(), Details: jobspb.StreamIngestionDetails{StreamAddress: "foo"}, // We don't use this so it does not matter what we set it too, as long // as it is non-nil. Progress: jobspb.ImportProgress{}, } record.CreatedBy = &jobs.CreatedByInfo{ Name: "ingestion", } frontierPost := execinfrapb.PostProcessSpec{} frontierOut := distsqlutils.RowBuffer{} frontierProc, err := newStreamIngestionFrontierProcessor(&flowCtx, 0, /* processorID*/ frontierSpec, sip, &frontierPost, &frontierOut) require.NoError(t, err) fp, ok := frontierProc.(*streamIngestionFrontier) if !ok { t.Fatal("expected the processor that's created to be a stream ingestion frontier") } ctxWithCancel, cancel := context.WithCancel(ctx) defer cancel() fp.Run(ctxWithCancel) if !frontierOut.ProducerClosed() { t.Fatal("producer for StreamFrontierProcessor not closed") } var prevTimestamp hlc.Timestamp for { row, meta := frontierOut.Next() if meta != nil { if !tc.frontierStartTime.IsEmpty() { require.True(t, testutils.IsError(meta.Err, fmt.Sprintf("got a resolved timestamp ."+ "* that is less than the frontier processor start time %s", tc.frontierStartTime.String()))) return } t.Fatalf("unexpected meta record returned by frontier processor: %+v\n", *meta) } if row == nil { break } datum := row[0].Datum protoBytes, ok := datum.(*tree.DBytes) require.True(t, ok) var ingestedTimestamp hlc.Timestamp require.NoError(t, protoutil.Unmarshal([]byte(*protoBytes), &ingestedTimestamp)) // Ensure that the rows emitted by the frontier never regress the ts. if !prevTimestamp.IsEmpty() { require.True(t, prevTimestamp.Less(ingestedTimestamp)) } prevTimestamp = ingestedTimestamp } // Check the final ts recorded by the frontier. require.Equal(t, tc.expectedFrontierTimestamp, prevTimestamp) }) } }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package example import ( "context" "time" "chromiumos/tast/local/profiler" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: Profiler, Desc: "Demonstrates how to use profiler package", Contacts: []string{"chinglinyu@chromium.org", "tast-owners@google.com"}, Attr: []string{"group:mainline", "informational"}, }) } func Profiler(ctx context.Context, s *testing.State) { var perfStatOutput profiler.PerfStatOutput profs := []profiler.Profiler{ profiler.Top(&profiler.TopOpts{ Interval: 2 * time.Second, }), profiler.VMStat(nil), profiler.Perf(profiler.PerfStatRecordOpts()), // Get CPU cycle count for all processes. profiler.Perf(profiler.PerfStatOpts(&perfStatOutput, profiler.PerfAllProcs)), } p, err := profiler.Start(ctx, s.OutDir(), profs...) if err != nil { s.Fatal("Failure in starting the profiler: ", err) } defer func() { if err := p.End(ctx); err != nil { s.Error("Failure in ending the profiler: ", err) } s.Log("All CPU cycle count per second: ", perfStatOutput.CyclesPerSecond) }() // Wait for 2 seconds to gather perf.data. if err := testing.Sleep(ctx, 2*time.Second); err != nil { s.Fatal("Failure in sleeping: ", err) } }
package main /* Go does not have classes. However, you can define methods on types. A method is a function with a special receiver argument. The receiver appears in its own argument list between the func keyword and the method name. In this example, the Abs method has a receiver of type Vertex named v. */ import ( "fmt" "math" ) type Vertex struct { X, y float64 // y lowercase can be accessed; Capital->export rule is for packages, not struct data. } // a normal function that receive Vertex as parameter func Abs(v Vertex) float64 { return math.Sqrt(v.X*v.X + v.y*v.y) //return x+y } // a method defined on Vertex: there is a special receiver (can have only one, of course) between func declaration and function name. // GO philosophy: better composition than inheritance func (v Vertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.y*v.y) } func (v Vertex) ScaleCopy(times float64) { v.X*=times v.y*=times } func (v* Vertex) ScalePtr(times float64) { v.X*=times v.y*=times } func main() { v := Vertex{3, 4} fmt.Println(v.Abs()) // call method of v fmt.Println(Abs(v)) // call a normal function fmt.Println("--------") v.ScaleCopy(10) // does not change v fmt.Println(v) p:=&v p.ScaleCopy(10) // interpreted as (*p).ScaleCopy() automatically fmt.Println(v) fmt.Println("--------") v.ScalePtr(10) // will change v, just like normal function receiving pointers. GO interpretted automatically as (&v).ScalePtr(10) fmt.Println(v) /* ======= since ScalePtr takes a pointer receiver, no matter you call v.ScalePtr() or (&v).ScalePtr(), they are the same. v.ScalePtr will be interpretted as (&v).ScalePtr() automatically. Pointer of v will be taken automatically for convenience ========= */ p=&v p.ScalePtr(10) fmt.Println(v) (&v).ScalePtr(10) fmt.Println(v) }
package cache import ( "fmt" "testing" "time" ) func TestExpired(t *testing.T) { e := entry{expiration: time.Second} if e.IsExpired() { t.Fatal() } } func TestList(t *testing.T) { c := NewMemCache() c.Set("1", "1", 0) c.Set("2", "2", 0) c.Set("3", "3", 0) c.List(func(k string, v CacheEntry) bool { fmt.Println(k) return false }) c.List(func(k string, v CacheEntry) bool { c.Delete(k) return false }) c.List(func(k string, v CacheEntry) bool { fmt.Println(k) return false }) }
package operaads import ( "encoding/json" "errors" "fmt" "net/http" "strings" "text/template" "github.com/prebid/prebid-server/adapters" "github.com/prebid/prebid-server/config" "github.com/prebid/prebid-server/errortypes" "github.com/prebid/prebid-server/macros" "github.com/prebid/prebid-server/openrtb_ext" "github.com/prebid/openrtb/v19/openrtb2" ) type adapter struct { epTemplate *template.Template } var ( errBannerFormatMiss = errors.New("Size information missing for banner") errDeviceOrOSMiss = errors.New("Impression is missing device OS information") ) // Builder builds a new instance of the operaads adapter for the given bidder with the given config. func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) { epTemplate, err := template.New("endpoint").Parse(config.Endpoint) if err != nil { return nil, err } bidder := &adapter{ epTemplate: epTemplate, } return bidder, nil } func (a *adapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) { impCount := len(request.Imp) requestData := make([]*adapters.RequestData, 0, impCount) errs := []error{} headers := http.Header{} headers.Add("Content-Type", "application/json;charset=utf-8") headers.Add("Accept", "application/json") err := checkRequest(request) if err != nil { errs = append(errs, &errortypes.BadInput{ Message: err.Error(), }) return nil, errs } for _, imp := range request.Imp { var bidderExt adapters.ExtImpBidder if err := json.Unmarshal(imp.Ext, &bidderExt); err != nil { errs = append(errs, &errortypes.BadInput{ Message: err.Error(), }) continue } var operaadsExt openrtb_ext.ImpExtOperaads if err := json.Unmarshal(bidderExt.Bidder, &operaadsExt); err != nil { errs = append(errs, &errortypes.BadInput{ Message: err.Error(), }) continue } macro := macros.EndpointTemplateParams{PublisherID: operaadsExt.PublisherID, AccountID: operaadsExt.EndpointID} endpoint, err := macros.ResolveMacros(a.epTemplate, &macro) if err != nil { errs = append(errs, &errortypes.BadInput{ Message: err.Error(), }) continue } imp.TagID = operaadsExt.PlacementID formats := make([]interface{}, 0, 1) if imp.Native != nil { formats = append(formats, imp.Native) } if imp.Video != nil { formats = append(formats, imp.Video) } if imp.Banner != nil { formats = append(formats, imp.Banner) } for _, format := range formats { req, err := flatImp(*request, imp, headers, endpoint, format) if err != nil { errs = append(errs, &errortypes.BadInput{ Message: err.Error(), }) continue } if req != nil { requestData = append(requestData, req) } } } return requestData, errs } func flatImp(requestCopy openrtb2.BidRequest, impCopy openrtb2.Imp, headers http.Header, endpoint string, format interface{}) (*adapters.RequestData, error) { switch format.(type) { case *openrtb2.Video: impCopy.Native = nil impCopy.Banner = nil impCopy.ID = buildOperaImpId(impCopy.ID, openrtb_ext.BidTypeVideo) case *openrtb2.Banner: impCopy.Video = nil impCopy.Native = nil impCopy.ID = buildOperaImpId(impCopy.ID, openrtb_ext.BidTypeBanner) case *openrtb2.Native: impCopy.Video = nil impCopy.Banner = nil impCopy.ID = buildOperaImpId(impCopy.ID, openrtb_ext.BidTypeNative) default: // do not need flat return nil, nil } err := convertImpression(&impCopy) if err != nil { return nil, err } requestCopy.Imp = []openrtb2.Imp{impCopy} reqJSON, err := json.Marshal(&requestCopy) if err != nil { return nil, err } return &adapters.RequestData{ Method: http.MethodPost, Uri: endpoint, Body: reqJSON, Headers: headers, }, nil } func checkRequest(request *openrtb2.BidRequest) error { if request.Device == nil || len(request.Device.OS) == 0 { return errDeviceOrOSMiss } return nil } func convertImpression(imp *openrtb2.Imp) error { if imp.Banner != nil { bannerCopy, err := convertBanner(imp.Banner) if err != nil { return err } imp.Banner = bannerCopy } if imp.Native != nil && imp.Native.Request != "" { v := make(map[string]interface{}) err := json.Unmarshal([]byte(imp.Native.Request), &v) if err != nil { return err } _, ok := v["native"] if !ok { body, err := json.Marshal(struct { Native interface{} `json:"native"` }{ Native: v, }) if err != nil { return err } native := *imp.Native native.Request = string(body) imp.Native = &native } } return nil } // make sure that banner has openrtb 2.3-compatible size information func convertBanner(banner *openrtb2.Banner) (*openrtb2.Banner, error) { if banner.W == nil || banner.H == nil || *banner.W == 0 || *banner.H == 0 { if len(banner.Format) > 0 { f := banner.Format[0] bannerCopy := *banner bannerCopy.W = openrtb2.Int64Ptr(f.W) bannerCopy.H = openrtb2.Int64Ptr(f.H) return &bannerCopy, nil } else { return nil, errBannerFormatMiss } } return banner, nil } func (a *adapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) { if response.StatusCode == http.StatusNoContent { return nil, nil } if response.StatusCode == http.StatusBadRequest { return nil, []error{&errortypes.BadInput{ Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode), }} } if response.StatusCode != http.StatusOK { return nil, []error{&errortypes.BadServerResponse{ Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode), }} } var parsedResponse openrtb2.BidResponse if err := json.Unmarshal(response.Body, &parsedResponse); err != nil { return nil, []error{&errortypes.BadServerResponse{ Message: err.Error(), }} } bidResponse := adapters.NewBidderResponseWithBidsCapacity(1) for _, sb := range parsedResponse.SeatBid { for i := 0; i < len(sb.Bid); i++ { bid := sb.Bid[i] if bid.Price != 0 { var bidType openrtb_ext.BidType bid.ImpID, bidType = parseOriginImpId(bid.ImpID) bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{ Bid: &bid, BidType: bidType, }) } } } return bidResponse, nil } func buildOperaImpId(originId string, bidType openrtb_ext.BidType) string { return strings.Join([]string{originId, "opa", string(bidType)}, ":") } func parseOriginImpId(impId string) (originId string, bidType openrtb_ext.BidType) { items := strings.Split(impId, ":") if len(items) < 2 { return impId, "" } return strings.Join(items[:len(items)-2], ":"), openrtb_ext.BidType(items[len(items)-1]) }
/* Copyright 2021 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import "fmt" func HumanReadableBytesSizeIEC(b int64) string { const unit = 1024 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linux import ( "fmt" "time" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/errors/linuxerr" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/host" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/socket" "gvisor.dev/gvisor/pkg/sentry/socket/control" "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/usermem" ) // maxAddrLen is the maximum socket address length we're willing to accept. const maxAddrLen = 200 // maxOptLen is the maximum sockopt parameter length we're willing to accept. const maxOptLen = 1024 * 8 // maxControlLen is the maximum length of the msghdr.msg_control buffer we're // willing to accept. Note that this limit is smaller than Linux, which allows // buffers upto INT_MAX. const maxControlLen = 10 * 1024 * 1024 // maxListenBacklog is the maximum limit of listen backlog supported. const maxListenBacklog = 1024 // nameLenOffset is the offset from the start of the MessageHeader64 struct to // the NameLen field. const nameLenOffset = 8 // controlLenOffset is the offset form the start of the MessageHeader64 struct // to the ControlLen field. const controlLenOffset = 40 // flagsOffset is the offset form the start of the MessageHeader64 struct // to the Flags field. const flagsOffset = 48 const sizeOfInt32 = 4 // messageHeader64Len is the length of a MessageHeader64 struct. var messageHeader64Len = uint64((*MessageHeader64)(nil).SizeBytes()) // multipleMessageHeader64Len is the length of a multipeMessageHeader64 struct. var multipleMessageHeader64Len = uint64((*multipleMessageHeader64)(nil).SizeBytes()) // baseRecvFlags are the flags that are accepted across recvmsg(2), // recvmmsg(2), and recvfrom(2). const baseRecvFlags = linux.MSG_OOB | linux.MSG_DONTROUTE | linux.MSG_DONTWAIT | linux.MSG_NOSIGNAL | linux.MSG_WAITALL | linux.MSG_TRUNC | linux.MSG_CTRUNC // MessageHeader64 is the 64-bit representation of the msghdr struct used in // the recvmsg and sendmsg syscalls. // // +marshal type MessageHeader64 struct { // Name is the optional pointer to a network address buffer. Name uint64 // NameLen is the length of the buffer pointed to by Name. NameLen uint32 _ uint32 // Iov is a pointer to an array of io vectors that describe the memory // locations involved in the io operation. Iov uint64 // IovLen is the length of the array pointed to by Iov. IovLen uint64 // Control is the optional pointer to ancillary control data. Control uint64 // ControlLen is the length of the data pointed to by Control. ControlLen uint64 // Flags on the sent/received message. Flags int32 _ int32 } // multipleMessageHeader64 is the 64-bit representation of the mmsghdr struct used in // the recvmmsg and sendmmsg syscalls. // // +marshal type multipleMessageHeader64 struct { msgHdr MessageHeader64 msgLen uint32 _ int32 } // CaptureAddress allocates memory for and copies a socket address structure // from the untrusted address space range. func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) { if addrlen > maxAddrLen { return nil, linuxerr.EINVAL } addrBuf := make([]byte, addrlen) if _, err := t.CopyInBytes(addr, addrBuf); err != nil { return nil, err } return addrBuf, nil } // writeAddress writes a sockaddr structure and its length to an output buffer // in the unstrusted address space range. If the address is bigger than the // buffer, it is truncated. func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error { // Get the buffer length. var bufLen uint32 if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil { return err } if int32(bufLen) < 0 { return linuxerr.EINVAL } // Write the length unconditionally. if _, err := primitive.CopyUint32Out(t, addrLenPtr, addrLen); err != nil { return err } if addr == nil { return nil } if bufLen > addrLen { bufLen = addrLen } // Copy as much of the address as will fit in the buffer. encodedAddr := t.CopyScratchBuffer(addr.SizeBytes()) addr.MarshalUnsafe(encodedAddr) if bufLen > uint32(len(encodedAddr)) { bufLen = uint32(len(encodedAddr)) } _, err := t.CopyOutBytes(addrPtr, encodedAddr[:int(bufLen)]) return err } // Socket implements the linux syscall socket(2). func Socket(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { domain := int(args[0].Int()) stype := args[1].Int() protocol := int(args[2].Int()) // Check and initialize the flags. if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { return 0, nil, linuxerr.EINVAL } // Create the new socket. s, e := socket.New(t, domain, linux.SockType(stype&0xf), protocol) if e != nil { return 0, nil, e.ToError() } defer s.DecRef(t) if err := s.SetStatusFlags(t, t.Credentials(), uint32(stype&linux.SOCK_NONBLOCK)); err != nil { return 0, nil, err } fd, err := t.NewFDFrom(0, s, kernel.FDFlags{ CloseOnExec: stype&linux.SOCK_CLOEXEC != 0, }) if err != nil { return 0, nil, err } return uintptr(fd), nil, nil } // SocketPair implements the linux syscall socketpair(2). func SocketPair(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { domain := int(args[0].Int()) stype := args[1].Int() protocol := int(args[2].Int()) addr := args[3].Pointer() // Check and initialize the flags. if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { return 0, nil, linuxerr.EINVAL } // Create the socket pair. s1, s2, e := socket.Pair(t, domain, linux.SockType(stype&0xf), protocol) if e != nil { return 0, nil, e.ToError() } // Adding to the FD table will cause an extra reference to be acquired. defer s1.DecRef(t) defer s2.DecRef(t) nonblocking := uint32(stype & linux.SOCK_NONBLOCK) if err := s1.SetStatusFlags(t, t.Credentials(), nonblocking); err != nil { return 0, nil, err } if err := s2.SetStatusFlags(t, t.Credentials(), nonblocking); err != nil { return 0, nil, err } // Create the FDs for the sockets. flags := kernel.FDFlags{ CloseOnExec: stype&linux.SOCK_CLOEXEC != 0, } fds, err := t.NewFDs(0, []*vfs.FileDescription{s1, s2}, flags) if err != nil { return 0, nil, err } if _, err := primitive.CopyInt32SliceOut(t, addr, fds); err != nil { for _, fd := range fds { if file := t.FDTable().Remove(t, fd); file != nil { file.DecRef(t) } } return 0, nil, err } return 0, nil, nil } // Connect implements the linux syscall connect(2). func Connect(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Uint() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Capture address and call syscall implementation. a, err := CaptureAddress(t, addr, addrlen) if err != nil { return 0, nil, err } blocking := (file.StatusFlags() & linux.SOCK_NONBLOCK) == 0 return 0, nil, linuxerr.ConvertIntr(s.Connect(t, a, blocking).ToError(), linuxerr.ERESTARTSYS) } // accept is the implementation of the accept syscall. It is called by accept // and accept4 syscall handlers. func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) { // Check that no unsupported flags are passed in. if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { return 0, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, linuxerr.ENOTSOCK } // Call the syscall implementation for this socket, then copy the // output address if one is specified. blocking := (file.StatusFlags() & linux.SOCK_NONBLOCK) == 0 peerRequested := addrLen != 0 nfd, peer, peerLen, e := s.Accept(t, peerRequested, flags, blocking) if e != nil { return 0, linuxerr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS) } if peerRequested { // NOTE(magi): Linux does not give you an error if it can't // write the data back out so neither do we. if err := writeAddress(t, peer, peerLen, addr, addrLen); linuxerr.Equals(linuxerr.EINVAL, err) { return 0, err } } return uintptr(nfd), nil } // Accept4 implements the linux syscall accept4(2). func Accept4(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Pointer() flags := int(args[3].Int()) n, err := accept(t, fd, addr, addrlen, flags) return n, nil, err } // Accept implements the linux syscall accept(2). func Accept(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Pointer() n, err := accept(t, fd, addr, addrlen, 0) return n, nil, err } // Bind implements the linux syscall bind(2). func Bind(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Uint() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Capture address and call syscall implementation. a, err := CaptureAddress(t, addr, addrlen) if err != nil { return 0, nil, err } return 0, nil, s.Bind(t, a).ToError() } // Listen implements the linux syscall listen(2). func Listen(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() backlog := args[1].Uint() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } if backlog > maxListenBacklog { // Linux treats incoming backlog as uint with a limit defined by // sysctl_somaxconn. // https://github.com/torvalds/linux/blob/7acac4b3196/net/socket.c#L1666 backlog = maxListenBacklog } // Accept one more than the configured listen backlog to keep in parity with // Linux. Ref, because of missing equality check here: // https://github.com/torvalds/linux/blob/7acac4b3196/include/net/sock.h#L937 // // In case of unix domain sockets, the following check // https://github.com/torvalds/linux/blob/7d6beb71da3/net/unix/af_unix.c#L1293 // will allow 1 connect through since it checks for a receive queue len > // backlog and not >=. backlog++ return 0, nil, s.Listen(t, int(backlog)).ToError() } // Shutdown implements the linux syscall shutdown(2). func Shutdown(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() how := args[1].Int() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Validate how, then call syscall implementation. switch how { case linux.SHUT_RD, linux.SHUT_WR, linux.SHUT_RDWR: default: return 0, nil, linuxerr.EINVAL } return 0, nil, s.Shutdown(t, int(how)).ToError() } // GetSockOpt implements the linux syscall getsockopt(2). func GetSockOpt(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() level := args[1].Int() name := args[2].Int() optValAddr := args[3].Pointer() optLenAddr := args[4].Pointer() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Read the length. Reject negative values. var optLen int32 if _, err := primitive.CopyInt32In(t, optLenAddr, &optLen); err != nil { return 0, nil, err } if optLen < 0 { return 0, nil, linuxerr.EINVAL } // Call syscall implementation then copy both value and value len out. v, e := getSockOpt(t, s, int(level), int(name), optValAddr, int(optLen)) if e != nil { return 0, nil, e.ToError() } if _, err := primitive.CopyInt32Out(t, optLenAddr, int32(v.SizeBytes())); err != nil { return 0, nil, err } if v != nil { if _, err := v.CopyOut(t, optValAddr); err != nil { return 0, nil, err } } return 0, nil, nil } // getSockOpt tries to handle common socket options, or dispatches to a specific // socket implementation. func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) { if level == linux.SOL_SOCKET { switch name { case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL: if len < sizeOfInt32 { return nil, syserr.ErrInvalidArgument } } switch name { case linux.SO_TYPE: _, skType, _ := s.Type() v := primitive.Int32(skType) return &v, nil case linux.SO_DOMAIN: family, _, _ := s.Type() v := primitive.Int32(family) return &v, nil case linux.SO_PROTOCOL: _, _, protocol := s.Type() v := primitive.Int32(protocol) return &v, nil } } return s.GetSockOpt(t, level, name, optValAddr, len) } // SetSockOpt implements the linux syscall setsockopt(2). // // Note that unlike Linux, enabling SO_PASSCRED does not autobind the socket. func SetSockOpt(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() level := args[1].Int() name := args[2].Int() optValAddr := args[3].Pointer() optLen := args[4].Int() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } if optLen < 0 { return 0, nil, linuxerr.EINVAL } if optLen > maxOptLen { return 0, nil, linuxerr.EINVAL } buf := t.CopyScratchBuffer(int(optLen)) if _, err := t.CopyInBytes(optValAddr, buf); err != nil { return 0, nil, err } // Call syscall implementation. if err := s.SetSockOpt(t, int(level), int(name), buf); err != nil { return 0, nil, err.ToError() } return 0, nil, nil } // GetSockName implements the linux syscall getsockname(2). func GetSockName(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Pointer() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Get the socket name and copy it to the caller. v, vl, err := s.GetSockName(t) if err != nil { return 0, nil, err.ToError() } return 0, nil, writeAddress(t, v, vl, addr, addrlen) } // GetPeerName implements the linux syscall getpeername(2). func GetPeerName(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() addr := args[1].Pointer() addrlen := args[2].Pointer() // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Get the socket peer name and copy it to the caller. v, vl, err := s.GetPeerName(t) if err != nil { return 0, nil, err.ToError() } return 0, nil, writeAddress(t, v, vl, addr, addrlen) } // RecvMsg implements the linux syscall recvmsg(2). func RecvMsg(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() msgPtr := args[1].Pointer() flags := args[2].Int() if t.Arch().Width() != 8 { // We only handle 64-bit for now. return 0, nil, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Reject flags that we don't handle yet. if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 { return 0, nil, linuxerr.EINVAL } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } var haveDeadline bool var deadline ktime.Time if dl := s.RecvTimeout(); dl > 0 { deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) haveDeadline = true } else if dl < 0 { flags |= linux.MSG_DONTWAIT } n, err := recvSingleMsg(t, s, msgPtr, flags, haveDeadline, deadline) return n, nil, err } // RecvMMsg implements the linux syscall recvmmsg(2). func RecvMMsg(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() msgPtr := args[1].Pointer() vlen := args[2].Uint() flags := args[3].Int() toPtr := args[4].Pointer() if t.Arch().Width() != 8 { // We only handle 64-bit for now. return 0, nil, linuxerr.EINVAL } if vlen > linux.UIO_MAXIOV { vlen = linux.UIO_MAXIOV } // Reject flags that we don't handle yet. if flags & ^(baseRecvFlags|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 { return 0, nil, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } var haveDeadline bool var deadline ktime.Time if toPtr != 0 { var ts linux.Timespec if _, err := ts.CopyIn(t, toPtr); err != nil { return 0, nil, err } if !ts.Valid() { return 0, nil, linuxerr.EINVAL } deadline = t.Kernel().MonotonicClock().Now().Add(ts.ToDuration()) haveDeadline = true } if !haveDeadline { if dl := s.RecvTimeout(); dl > 0 { deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) haveDeadline = true } else if dl < 0 { flags |= linux.MSG_DONTWAIT } } var count uint32 var err error for i := uint64(0); i < uint64(vlen); i++ { mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len) if !ok { return 0, nil, linuxerr.EFAULT } var n uintptr if n, err = recvSingleMsg(t, s, mp, flags, haveDeadline, deadline); err != nil { break } // Copy the received length to the caller. lp, ok := mp.AddLength(messageHeader64Len) if !ok { return 0, nil, linuxerr.EFAULT } if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil { break } count++ } if count == 0 { return 0, nil, err } return uintptr(count), nil, nil } func getSCMRights(t *kernel.Task, rights transport.RightsControlMessage) control.SCMRights { switch v := rights.(type) { case control.SCMRights: return v case *transport.SCMRights: rf := control.RightsFiles(fdsToHostFiles(t, v.FDs)) return &rf default: panic(fmt.Sprintf("rights of type %T must be *transport.SCMRights or implement SCMRights", rights)) } } // If an error is encountered, only files created before the error will be // returned. This is what Linux does. func fdsToHostFiles(ctx context.Context, fds []int) []*vfs.FileDescription { files := make([]*vfs.FileDescription, 0, len(fds)) for _, fd := range fds { // Get flags. We do it here because they may be modified // by subsequent functions. fileFlags, _, errno := unix.Syscall(unix.SYS_FCNTL, uintptr(fd), unix.F_GETFL, 0) if errno != 0 { ctx.Warningf("Error retrieving host FD flags: %v", error(errno)) break } // Create the file backed by hostFD. file, err := host.NewFD(ctx, kernel.KernelFromContext(ctx).HostMount(), fd, &host.NewFDOptions{}) if err != nil { ctx.Warningf("Error creating file from host FD: %v", err) break } if err := file.SetStatusFlags(ctx, auth.CredentialsFromContext(ctx), uint32(fileFlags&linux.O_NONBLOCK)); err != nil { ctx.Warningf("Error setting flags on host FD file: %v", err) break } files = append(files, file) } return files } func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { // Capture the message header and io vectors. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { return 0, err } if msg.IovLen > linux.UIO_MAXIOV { return 0, linuxerr.EMSGSIZE } dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { return 0, err } // Fast path when no control message nor name buffers are provided. if msg.ControlLen == 0 && msg.NameLen == 0 { n, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0) if err != nil { return 0, linuxerr.ConvertIntr(err.ToError(), linuxerr.ERESTARTSYS) } if !cms.Unix.Empty() { mflags |= linux.MSG_CTRUNC cms.Release(t) } if int(msg.Flags) != mflags { // Copy out the flags to the caller. if _, err := primitive.CopyInt32Out(t, msgPtr+flagsOffset, int32(mflags)); err != nil { return 0, err } } return uintptr(n), nil } if msg.ControlLen > maxControlLen { return 0, linuxerr.ENOBUFS } n, mflags, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen) if e != nil { return 0, linuxerr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS) } defer cms.Release(t) controlData := make([]byte, 0, msg.ControlLen) controlData = control.PackControlMessages(t, cms, controlData) if cr, ok := s.(transport.Credentialer); ok && cr.Passcred() { creds, _ := cms.Unix.Credentials.(control.SCMCredentials) controlData, mflags = control.PackCredentials(t, creds, controlData, mflags) } if cms.Unix.Rights != nil { cms.Unix.Rights = getSCMRights(t, cms.Unix.Rights) controlData, mflags = control.PackRights(t, cms.Unix.Rights.(control.SCMRights), flags&linux.MSG_CMSG_CLOEXEC != 0, controlData, mflags) } // Copy the address to the caller. if msg.NameLen != 0 { if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil { return 0, err } } // Copy the control data to the caller. if _, err := primitive.CopyUint64Out(t, msgPtr+controlLenOffset, uint64(len(controlData))); err != nil { return 0, err } if len(controlData) > 0 { if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } // Copy out the flags to the caller. if _, err := primitive.CopyInt32Out(t, msgPtr+flagsOffset, int32(mflags)); err != nil { return 0, err } return uintptr(n), nil } // recvFrom is the implementation of the recvfrom syscall. It is called by // recvfrom and recv syscall handlers. func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) { if int(bufLen) < 0 { return 0, linuxerr.EINVAL } // Reject flags that we don't handle yet. if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CONFIRM) != 0 { return 0, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, linuxerr.ENOTSOCK } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } dst, err := t.SingleIOSequence(bufPtr, int(bufLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { return 0, err } var haveDeadline bool var deadline ktime.Time if dl := s.RecvTimeout(); dl > 0 { deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) haveDeadline = true } else if dl < 0 { flags |= linux.MSG_DONTWAIT } n, _, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0) cm.Release(t) if e != nil { return 0, linuxerr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS) } // Copy the address to the caller. if nameLenPtr != 0 { if err := writeAddress(t, sender, senderLen, namePtr, nameLenPtr); err != nil { return 0, err } } return uintptr(n), nil } // RecvFrom implements the linux syscall recvfrom(2). func RecvFrom(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() bufPtr := args[1].Pointer() bufLen := args[2].Uint64() flags := args[3].Int() namePtr := args[4].Pointer() nameLenPtr := args[5].Pointer() n, err := recvFrom(t, fd, bufPtr, bufLen, flags, namePtr, nameLenPtr) return n, nil, err } // SendMsg implements the linux syscall sendmsg(2). func SendMsg(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() msgPtr := args[1].Pointer() flags := args[2].Int() if t.Arch().Width() != 8 { // We only handle 64-bit for now. return 0, nil, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Reject flags that we don't handle yet. if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 { return 0, nil, linuxerr.EINVAL } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } n, err := sendSingleMsg(t, s, file, msgPtr, flags) return n, nil, err } // SendMMsg implements the linux syscall sendmmsg(2). func SendMMsg(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() msgPtr := args[1].Pointer() vlen := args[2].Uint() flags := args[3].Int() if t.Arch().Width() != 8 { // We only handle 64-bit for now. return 0, nil, linuxerr.EINVAL } if vlen > linux.UIO_MAXIOV { vlen = linux.UIO_MAXIOV } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, nil, linuxerr.ENOTSOCK } // Reject flags that we don't handle yet. if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 { return 0, nil, linuxerr.EINVAL } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } var count uint32 var err error for i := uint64(0); i < uint64(vlen); i++ { mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len) if !ok { return 0, nil, linuxerr.EFAULT } var n uintptr if n, err = sendSingleMsg(t, s, file, mp, flags); err != nil { break } // Copy the received length to the caller. lp, ok := mp.AddLength(messageHeader64Len) if !ok { return 0, nil, linuxerr.EFAULT } if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil { break } count++ } if count == 0 { return 0, nil, err } return uintptr(count), nil, nil } func sendSingleMsg(t *kernel.Task, s socket.Socket, file *vfs.FileDescription, msgPtr hostarch.Addr, flags int32) (uintptr, error) { // Capture the message header. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { return 0, err } var controlData []byte if msg.ControlLen > 0 { // Put an upper bound to prevent large allocations. if msg.ControlLen > maxControlLen { return 0, linuxerr.ENOBUFS } controlData = make([]byte, msg.ControlLen) if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } // Read the destination address if one is specified. var to []byte if msg.NameLen != 0 { var err error to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen) if err != nil { return 0, err } } // Read data then call the sendmsg implementation. if msg.IovLen > linux.UIO_MAXIOV { return 0, linuxerr.EMSGSIZE } src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { return 0, err } controlMessages, err := control.Parse(t, s, controlData, t.Arch().Width()) if err != nil { return 0, err } var haveDeadline bool var deadline ktime.Time if dl := s.SendTimeout(); dl > 0 { deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) haveDeadline = true } else if dl < 0 { flags |= linux.MSG_DONTWAIT } // Call the syscall implementation. n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, controlMessages) err = HandleIOError(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendmsg", file) // Control messages should be released on error as well as for zero-length // messages, which are discarded by the receiver. if n == 0 || err != nil { controlMessages.Release(t) } return uintptr(n), err } // sendTo is the implementation of the sendto syscall. It is called by sendto // and send syscall handlers. func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) { bl := int(bufLen) if bl < 0 { return 0, linuxerr.EINVAL } // Get socket from the file descriptor. file := t.GetFile(fd) if file == nil { return 0, linuxerr.EBADF } defer file.DecRef(t) // Extract the socket. s, ok := file.Impl().(socket.Socket) if !ok { return 0, linuxerr.ENOTSOCK } if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 { flags |= linux.MSG_DONTWAIT } // Read the destination address if one is specified. var to []byte var err error if namePtr != 0 { to, err = CaptureAddress(t, namePtr, nameLen) if err != nil { return 0, err } } src, err := t.SingleIOSequence(bufPtr, bl, usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { return 0, err } var haveDeadline bool var deadline ktime.Time if dl := s.SendTimeout(); dl > 0 { deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) haveDeadline = true } else if dl < 0 { flags |= linux.MSG_DONTWAIT } // Call the syscall implementation. n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, socket.ControlMessages{Unix: control.New(t, s)}) return uintptr(n), HandleIOError(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendto", file) } // SendTo implements the linux syscall sendto(2). func SendTo(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() bufPtr := args[1].Pointer() bufLen := args[2].Uint64() flags := args[3].Int() namePtr := args[4].Pointer() nameLen := args[5].Uint() n, err := sendTo(t, fd, bufPtr, bufLen, flags, namePtr, nameLen) return n, nil, err }
package odoo import ( "fmt" ) // AccountFiscalPositionAccountTemplate represents account.fiscal.position.account.template model. type AccountFiscalPositionAccountTemplate struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` AccountDestId *Many2One `xmlrpc:"account_dest_id,omptempty"` AccountSrcId *Many2One `xmlrpc:"account_src_id,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` Id *Int `xmlrpc:"id,omptempty"` PositionId *Many2One `xmlrpc:"position_id,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` } // AccountFiscalPositionAccountTemplates represents array of account.fiscal.position.account.template model. type AccountFiscalPositionAccountTemplates []AccountFiscalPositionAccountTemplate // AccountFiscalPositionAccountTemplateModel is the odoo model name. const AccountFiscalPositionAccountTemplateModel = "account.fiscal.position.account.template" // Many2One convert AccountFiscalPositionAccountTemplate to *Many2One. func (afpat *AccountFiscalPositionAccountTemplate) Many2One() *Many2One { return NewMany2One(afpat.Id.Get(), "") } // CreateAccountFiscalPositionAccountTemplate creates a new account.fiscal.position.account.template model and returns its id. func (c *Client) CreateAccountFiscalPositionAccountTemplate(afpat *AccountFiscalPositionAccountTemplate) (int64, error) { ids, err := c.CreateAccountFiscalPositionAccountTemplates([]*AccountFiscalPositionAccountTemplate{afpat}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateAccountFiscalPositionAccountTemplate creates a new account.fiscal.position.account.template model and returns its id. func (c *Client) CreateAccountFiscalPositionAccountTemplates(afpats []*AccountFiscalPositionAccountTemplate) ([]int64, error) { var vv []interface{} for _, v := range afpats { vv = append(vv, v) } return c.Create(AccountFiscalPositionAccountTemplateModel, vv) } // UpdateAccountFiscalPositionAccountTemplate updates an existing account.fiscal.position.account.template record. func (c *Client) UpdateAccountFiscalPositionAccountTemplate(afpat *AccountFiscalPositionAccountTemplate) error { return c.UpdateAccountFiscalPositionAccountTemplates([]int64{afpat.Id.Get()}, afpat) } // UpdateAccountFiscalPositionAccountTemplates updates existing account.fiscal.position.account.template records. // All records (represented by ids) will be updated by afpat values. func (c *Client) UpdateAccountFiscalPositionAccountTemplates(ids []int64, afpat *AccountFiscalPositionAccountTemplate) error { return c.Update(AccountFiscalPositionAccountTemplateModel, ids, afpat) } // DeleteAccountFiscalPositionAccountTemplate deletes an existing account.fiscal.position.account.template record. func (c *Client) DeleteAccountFiscalPositionAccountTemplate(id int64) error { return c.DeleteAccountFiscalPositionAccountTemplates([]int64{id}) } // DeleteAccountFiscalPositionAccountTemplates deletes existing account.fiscal.position.account.template records. func (c *Client) DeleteAccountFiscalPositionAccountTemplates(ids []int64) error { return c.Delete(AccountFiscalPositionAccountTemplateModel, ids) } // GetAccountFiscalPositionAccountTemplate gets account.fiscal.position.account.template existing record. func (c *Client) GetAccountFiscalPositionAccountTemplate(id int64) (*AccountFiscalPositionAccountTemplate, error) { afpats, err := c.GetAccountFiscalPositionAccountTemplates([]int64{id}) if err != nil { return nil, err } if afpats != nil && len(*afpats) > 0 { return &((*afpats)[0]), nil } return nil, fmt.Errorf("id %v of account.fiscal.position.account.template not found", id) } // GetAccountFiscalPositionAccountTemplates gets account.fiscal.position.account.template existing records. func (c *Client) GetAccountFiscalPositionAccountTemplates(ids []int64) (*AccountFiscalPositionAccountTemplates, error) { afpats := &AccountFiscalPositionAccountTemplates{} if err := c.Read(AccountFiscalPositionAccountTemplateModel, ids, nil, afpats); err != nil { return nil, err } return afpats, nil } // FindAccountFiscalPositionAccountTemplate finds account.fiscal.position.account.template record by querying it with criteria. func (c *Client) FindAccountFiscalPositionAccountTemplate(criteria *Criteria) (*AccountFiscalPositionAccountTemplate, error) { afpats := &AccountFiscalPositionAccountTemplates{} if err := c.SearchRead(AccountFiscalPositionAccountTemplateModel, criteria, NewOptions().Limit(1), afpats); err != nil { return nil, err } if afpats != nil && len(*afpats) > 0 { return &((*afpats)[0]), nil } return nil, fmt.Errorf("account.fiscal.position.account.template was not found with criteria %v", criteria) } // FindAccountFiscalPositionAccountTemplates finds account.fiscal.position.account.template records by querying it // and filtering it with criteria and options. func (c *Client) FindAccountFiscalPositionAccountTemplates(criteria *Criteria, options *Options) (*AccountFiscalPositionAccountTemplates, error) { afpats := &AccountFiscalPositionAccountTemplates{} if err := c.SearchRead(AccountFiscalPositionAccountTemplateModel, criteria, options, afpats); err != nil { return nil, err } return afpats, nil } // FindAccountFiscalPositionAccountTemplateIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindAccountFiscalPositionAccountTemplateIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(AccountFiscalPositionAccountTemplateModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindAccountFiscalPositionAccountTemplateId finds record id by querying it with criteria. func (c *Client) FindAccountFiscalPositionAccountTemplateId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(AccountFiscalPositionAccountTemplateModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("account.fiscal.position.account.template was not found with criteria %v and options %v", criteria, options) }
package structs import ( "time" "github.com/garyburd/redigo/redis" "github.com/jinzhu/gorm" "github.com/patrickmn/go-cache" ) var ( AdscoopsDB *gorm.DB AdscoopsRealtimeDB *gorm.DB BroadvidDB *gorm.DB RedisPool *redis.Pool Cache *cache.Cache ) func init() { Cache = cache.New(time.Hour, 30*time.Second) } const ( layout = "01/02/2006 3:04 PM" lookbackInMinutes time.Duration = 100 ) type GroupIntf interface { FindAll() error } type GroupVisbleIntf interface { FindVisible(userID uint) error } type GroupScheduleIntf interface { FindAll(id string) error } type SingleIntf interface { Find(id string) error Save() error } type SingleIntfByRedir interface { Find(id string) error Save(id string) error } type BasicIntf interface { BasicSave() error }
package yixia import ( "fmt" "regexp" "videocrawler/clients" "videocrawler/common/util" "videocrawler/crawler" "github.com/buger/jsonparser" "github.com/fatih/color" ) type Miaopai struct { *crawler.CrawlerNet Client *clients.CrClient streams crawler.StreamSet videoId string title string pageUrl string } func (this *Miaopai) Init(param crawler.CrawlerInitParam) { } func (this *Miaopai) GetVideoInfo(url string) (crawler.VideoDetail, error) { this.GetVideoId(url) d := color.New(color.FgBlue, color.Bold) d.Printf("视频标题: %s \n", this.title) c := color.New(color.FgCyan).Add(color.Underline) c.Println("=========================================================") if this.videoId == "" { return crawler.VideoDetail{}, nil } params := util.HttpBuildQuery(map[string]string{ "scid": this.videoId, "vend": "miaopai", "fillType": "259", }) reqUrl := fmt.Sprintf("http://api.miaopai.com/m/v2_channel.json?" + params) body, err := this.Client.GetUrlContent(reqUrl) if err != nil { return crawler.VideoDetail{}, nil } flvSrc, _ := jsonparser.GetString(body, "result", "stream", "base") this.title, _ = jsonparser.GetString(body, "result", "ext", "t") fmt.Println(flvSrc) flvs := []crawler.FlvInfo{} flvs = append(flvs, crawler.FlvInfo{ Src: flvSrc, Size: 0, }) streamInfo := crawler.StreamInfo{ VideoProfile: "", Container: "", Src: "", Width: 0, Height: 0, Flv: flvs, } this.streams["normal"] = streamInfo vDetail := crawler.VideoDetail{ Title: this.title, Streams: this.streams, } return vDetail, nil } func (this *Miaopai) GetVideoId(url string) string { body, err := this.Client.GetUrlContent(url) if err != nil { return "" } r, _ := regexp.Compile(`"scid":"(.+?)"`) matches := r.FindStringSubmatch(string(body)) if matches != nil { this.videoId = matches[1] } else { r, _ = regexp.Compile(`scid\s=\s"(.+?)"`) matches = r.FindStringSubmatch(string(body)) if matches != nil { this.videoId = matches[1] } } r, _ = regexp.Compile("<title>([^<]+)") matches = r.FindStringSubmatch(string(body)) this.title = matches[1] return this.videoId } func New() crawler.Crawler { fmt.Println("init miaopai###############################################") mp := &Miaopai{ &crawler.CrawlerNet{}, clients.NewClient("https://miaopai.com", map[string]string{}), make(crawler.StreamSet), "", "", "", } return mp }
package application const HeaderUserID = "X-UserID" const HeaderFirebaseToken = "X-FirebaseToken"
package kubernetes import ( "encoding/json" "fmt" "regexp" "strings" "sync" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "github.com/kiali/kiali/config" "github.com/kiali/kiali/log" ) var ( portNameMatcher = regexp.MustCompile(`^[\-].*`) portProtocols = [...]string{"grpc", "http", "http2", "https", "mongo", "redis", "tcp", "tls", "udp"} ) // GetIstioDetails returns Istio details for a given namespace, // on this version it collects the VirtualServices and DestinationRules defined for a namespace. // If serviceName param is provided, it filters all the Istio objects pointing to a particular service. // It returns an error on any problem. func (in *IstioClient) GetIstioDetails(namespace string, serviceName string) (*IstioDetails, error) { wg := sync.WaitGroup{} errChan := make(chan error, 4) istioDetails := IstioDetails{} vss := make([]IstioObject, 0) drs := make([]IstioObject, 0) gws := make([]IstioObject, 0) ses := make([]IstioObject, 0) wg.Add(4) go fetchNoEntry(&ses, namespace, in.GetServiceEntries, &wg, errChan) go fetchNoEntry(&gws, namespace, in.GetGateways, &wg, errChan) go fetch(&vss, namespace, serviceName, in.GetVirtualServices, &wg, errChan) go fetch(&drs, namespace, serviceName, in.GetDestinationRules, &wg, errChan) wg.Wait() if len(errChan) != 0 { // We return first error only, likely to be the same issue for all err := <-errChan return nil, err } istioDetails.VirtualServices = vss istioDetails.DestinationRules = drs istioDetails.Gateways = gws istioDetails.ServiceEntries = ses return &istioDetails, nil } // CreateIstioObject creates an Istio object func (in *IstioClient) CreateIstioObject(api, namespace, resourceType, json string) (IstioObject, error) { var result runtime.Object var err error typeMeta := meta_v1.TypeMeta{ Kind: "", APIVersion: "", } typeMeta.Kind = PluralType[resourceType] byteJson := []byte(json) if api == ConfigGroupVersion.Group { result, err = in.istioConfigApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() typeMeta.APIVersion = ApiConfigVersion } else if api == NetworkingGroupVersion.Group { result, err = in.istioNetworkingApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() typeMeta.APIVersion = ApiNetworkingVersion } else if api == AuthenticationGroupVersion.Group { result, err = in.istioAuthenticationApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() typeMeta.APIVersion = ApiAuthenticationVersion } else { result, err = in.istioRbacApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() typeMeta.APIVersion = ApiRbacVersion } if err != nil { return nil, err } istioObject, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return an IstioObject object", namespace, resourceType) } istioObject.SetTypeMeta(typeMeta) return istioObject, err } // DeleteIstioObject deletes an Istio object from either config api or networking api func (in *IstioClient) DeleteIstioObject(api, namespace, resourceType, name string) error { log.Debugf("DeleteIstioObject input: %s / %s / %s / %s", api, namespace, resourceType, name) var err error if api == ConfigGroupVersion.Group { _, err = in.istioConfigApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() } else if api == NetworkingGroupVersion.Group { _, err = in.istioNetworkingApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() } else if api == AuthenticationGroupVersion.Group { _, err = in.istioAuthenticationApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() } else { _, err = in.istioRbacApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() } return err } // UpdateIstioObject updates an Istio object from either config api or networking api func (in *IstioClient) UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (IstioObject, error) { log.Debugf("UpdateIstioObject input: %s / %s / %s / %s", api, namespace, resourceType, name) var result runtime.Object var err error typeMeta := meta_v1.TypeMeta{ Kind: "", APIVersion: "", } typeMeta.Kind = PluralType[resourceType] bytePatch := []byte(jsonPatch) if api == ConfigGroupVersion.Group { result, err = in.istioConfigApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() typeMeta.APIVersion = ApiConfigVersion } else if api == NetworkingGroupVersion.Group { result, err = in.istioNetworkingApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() typeMeta.APIVersion = ApiNetworkingVersion } else if api == AuthenticationGroupVersion.Group { result, err = in.istioAuthenticationApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() typeMeta.APIVersion = ApiAuthenticationVersion } else { result, err = in.istioRbacApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() typeMeta.APIVersion = ApiRbacVersion } if err != nil { return nil, err } istioObject, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return an IstioObject object", namespace, name) } istioObject.SetTypeMeta(typeMeta) return istioObject, err } func (in *IstioClient) hasRbacResource(resource string) bool { return in.getRbacResources()[resource] } func (in *IstioClient) getRbacResources() map[string]bool { if in.rbacResources != nil { return *in.rbacResources } rbacResources := map[string]bool{} resourceListRaw, err := in.k8s.RESTClient().Get().AbsPath("/apis/rbac.istio.io/v1alpha1").Do().Raw() if err == nil { resourceList := meta_v1.APIResourceList{} if errMarshall := json.Unmarshal(resourceListRaw, &resourceList); errMarshall == nil { for _, resource := range resourceList.APIResources { rbacResources[resource.Name] = true } } } in.rbacResources = &rbacResources return *in.rbacResources } // GetVirtualServices return all VirtualServices for a given namespace. // If serviceName param is provided it will filter all VirtualServices having a host defined on a particular service. // It returns an error on any problem. func (in *IstioClient) GetVirtualServices(namespace string, serviceName string) ([]IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(virtualServices).Do().Get() if err != nil { return nil, err } virtualServiceList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a VirtualService list", namespace, serviceName) } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[virtualServices], APIVersion: ApiNetworkingVersion, } virtualServices := make([]IstioObject, 0) for _, virtualService := range virtualServiceList.GetItems() { appendVirtualService := serviceName == "" routeProtocols := []string{"http", "tcp"} if !appendVirtualService && FilterByRoute(virtualService.GetSpec(), routeProtocols, serviceName, namespace, nil) { appendVirtualService = true } if appendVirtualService { vs := virtualService.DeepCopyIstioObject() vs.SetTypeMeta(typeMeta) virtualServices = append(virtualServices, vs) } } return virtualServices, nil } // GetSidecars return all Sidecars for a given namespace. // It returns an error on any problem func (in *IstioClient) GetSidecars(namespace string) ([]IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(sidecars).Do().Get() if err != nil { return nil, err } sidecarList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a Sidecar list", namespace) } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[sidecars], APIVersion: ApiNetworkingVersion, } sidecars := make([]IstioObject, 0) for _, sidecar := range sidecarList.GetItems() { sc := sidecar.DeepCopyIstioObject() sc.SetTypeMeta(typeMeta) sidecars = append(sidecars, sc) } return sidecars, nil } func (in *IstioClient) GetSidecar(namespace string, sidecar string) (IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(sidecars).SubResource(sidecar).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[sidecars], APIVersion: ApiNetworkingVersion, } sidecarObject, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a Sidecar object", namespace, sidecar) } sc := sidecarObject.DeepCopyIstioObject() sc.SetTypeMeta(typeMeta) return sc, nil } func (in *IstioClient) GetVirtualService(namespace string, virtualservice string) (IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(virtualServices).SubResource(virtualservice).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[virtualServices], APIVersion: ApiNetworkingVersion, } virtualService, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a VirtualService object", namespace, virtualservice) } vs := virtualService.DeepCopyIstioObject() vs.SetTypeMeta(typeMeta) return vs, nil } // GetGateways return all Gateways for a given namespace. // It returns an error on any problem. func (in *IstioClient) GetGateways(namespace string) ([]IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(gateways).Do().Get() if err != nil { return nil, err } gatewayList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a Gateway list", namespace) } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[gateways], APIVersion: ApiNetworkingVersion, } gateways := make([]IstioObject, 0) for _, gateway := range gatewayList.GetItems() { gw := gateway.DeepCopyIstioObject() gw.SetTypeMeta(typeMeta) gateways = append(gateways, gw) } return gateways, nil } func (in *IstioClient) GetGateway(namespace string, gateway string) (IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(gateways).SubResource(gateway).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[gateways], APIVersion: ApiNetworkingVersion, } gatewayObject, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a Gateway object", namespace, gateway) } gw := gatewayObject.DeepCopyIstioObject() gw.SetTypeMeta(typeMeta) return gw, nil } // GetServiceEntries return all ServiceEntry objects for a given namespace. // It returns an error on any problem. func (in *IstioClient) GetServiceEntries(namespace string) ([]IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(serviceentries).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[serviceentries], APIVersion: ApiNetworkingVersion, } serviceEntriesList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a ServiceEntry list", namespace) } serviceEntries := make([]IstioObject, 0) for _, serviceEntry := range serviceEntriesList.GetItems() { se := serviceEntry.DeepCopyIstioObject() se.SetTypeMeta(typeMeta) serviceEntries = append(serviceEntries, se) } return serviceEntries, nil } func (in *IstioClient) GetServiceEntry(namespace string, serviceEntryName string) (IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(serviceentries).SubResource(serviceEntryName).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[serviceentries], APIVersion: ApiNetworkingVersion, } serviceEntry, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%v doesn't return a ServiceEntry object", namespace, serviceEntry) } se := serviceEntry.DeepCopyIstioObject() se.SetTypeMeta(typeMeta) return se, nil } // GetDestinationRules returns all DestinationRules for a given namespace. // If serviceName param is provided it will filter all DestinationRules having a host defined on a particular service. // It returns an error on any problem. func (in *IstioClient) GetDestinationRules(namespace string, serviceName string) ([]IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(destinationRules).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[destinationRules], APIVersion: ApiNetworkingVersion, } destinationRuleList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a DestinationRule list", namespace, serviceName) } destinationRules := make([]IstioObject, 0) for _, destinationRule := range destinationRuleList.Items { appendDestinationRule := serviceName == "" if host, ok := destinationRule.Spec["host"]; ok { if dHost, ok := host.(string); ok && FilterByHost(dHost, serviceName, namespace) { appendDestinationRule = true } } if appendDestinationRule { dr := destinationRule.DeepCopyIstioObject() dr.SetTypeMeta(typeMeta) destinationRules = append(destinationRules, dr) } } return destinationRules, nil } func (in *IstioClient) GetDestinationRule(namespace string, destinationrule string) (IstioObject, error) { result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(destinationRules).SubResource(destinationrule).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[destinationRules], APIVersion: ApiNetworkingVersion, } destinationRule, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a DestinationRule object", namespace, destinationrule) } dr := destinationRule.DeepCopyIstioObject() dr.SetTypeMeta(typeMeta) return dr, nil } // GetQuotaSpecs returns all QuotaSpecs objects for a given namespace. // It returns an error on any problem. func (in *IstioClient) GetQuotaSpecs(namespace string) ([]IstioObject, error) { result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecs).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[quotaspecs], APIVersion: ApiConfigVersion, } quotaSpecList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a QuotaSpecList list", namespace) } quotaSpecs := make([]IstioObject, 0) for _, qs := range quotaSpecList.GetItems() { q := qs.DeepCopyIstioObject() q.SetTypeMeta(typeMeta) quotaSpecs = append(quotaSpecs, q) } return quotaSpecs, nil } func (in *IstioClient) GetQuotaSpec(namespace string, quotaSpecName string) (IstioObject, error) { result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecs).SubResource(quotaSpecName).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[quotaspecs], APIVersion: ApiConfigVersion, } quotaSpec, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a QuotaSpec object", namespace, quotaSpecName) } qs := quotaSpec.DeepCopyIstioObject() qs.SetTypeMeta(typeMeta) return qs, nil } // GetQuotaSpecBindings returns all QuotaSpecBindings objects for a given namespace. // It returns an error on any problem. func (in *IstioClient) GetQuotaSpecBindings(namespace string) ([]IstioObject, error) { result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecbindings).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[quotaspecbindings], APIVersion: ApiConfigVersion, } quotaSpecBindingList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a QuotaSpecBindingList list", namespace) } quotaSpecBindings := make([]IstioObject, 0) for _, qs := range quotaSpecBindingList.GetItems() { q := qs.DeepCopyIstioObject() q.SetTypeMeta(typeMeta) quotaSpecBindings = append(quotaSpecBindings, q) } return quotaSpecBindings, nil } func (in *IstioClient) GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (IstioObject, error) { result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecbindings).SubResource(quotaSpecBindingName).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[quotaspecbindings], APIVersion: ApiConfigVersion, } quotaSpecBinding, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s/%s doesn't return a QuotaSpecBinding object", namespace, quotaSpecBindingName) } qs := quotaSpecBinding.DeepCopyIstioObject() qs.SetTypeMeta(typeMeta) return qs, nil } func (in *IstioClient) GetPolicies(namespace string) ([]IstioObject, error) { result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(policies).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[policies], APIVersion: ApiAuthenticationVersion, } policyList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a PolicyList list", namespace) } policies := make([]IstioObject, 0) for _, ps := range policyList.GetItems() { p := ps.DeepCopyIstioObject() p.SetTypeMeta(typeMeta) policies = append(policies, p) } return policies, nil } func (in *IstioClient) GetPolicy(namespace string, policyName string) (IstioObject, error) { result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(policies).SubResource(policyName).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[policies], APIVersion: ApiAuthenticationVersion, } policy, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a Policy object", namespace) } p := policy.DeepCopyIstioObject() p.SetTypeMeta(typeMeta) return p, nil } func (in *IstioClient) GetMeshPolicies(namespace string) ([]IstioObject, error) { // MeshPolicies are not namespaced. However, API returns all the instances even asking for one specific namespace. // Due to soft-multitenancy, the call performed is namespaced to avoid triggering an error for cluster-wide access. result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(meshPolicies).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[meshPolicies], APIVersion: ApiAuthenticationVersion, } policyList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("it doesn't return a PolicyList list") } policies := make([]IstioObject, 0) for _, ps := range policyList.GetItems() { p := ps.DeepCopyIstioObject() p.SetTypeMeta(typeMeta) policies = append(policies, p) } return policies, nil } func (in *IstioClient) GetMeshPolicy(namespace string, policyName string) (IstioObject, error) { result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(meshPolicies).SubResource(policyName).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[meshPolicies], APIVersion: ApiAuthenticationVersion, } mp, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a MeshPolicy object", namespace) } p := mp.DeepCopyIstioObject() p.SetTypeMeta(typeMeta) return p, nil } func (in *IstioClient) GetClusterRbacConfigs(namespace string) ([]IstioObject, error) { // In case ClusterRbacConfigs aren't present on Istio, return empty array. if !in.hasRbacResource(clusterrbacconfigs) { return []IstioObject{}, nil } result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(clusterrbacconfigs).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[clusterrbacconfigs], APIVersion: ApiRbacVersion, } clusterRbacConfigList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a RbacConfigList list", namespace) } clusterRbacConfigs := make([]IstioObject, 0) for _, crc := range clusterRbacConfigList.GetItems() { c := crc.DeepCopyIstioObject() c.SetTypeMeta(typeMeta) clusterRbacConfigs = append(clusterRbacConfigs, c) } return clusterRbacConfigs, nil } func (in *IstioClient) GetClusterRbacConfig(namespace string, name string) (IstioObject, error) { result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(clusterrbacconfigs).SubResource(name).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[clusterrbacconfigs], APIVersion: ApiRbacVersion, } clusterRbacConfig, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a ClusterRbacConfig object", namespace) } c := clusterRbacConfig.DeepCopyIstioObject() c.SetTypeMeta(typeMeta) return c, nil } func (in *IstioClient) GetRbacConfigs(namespace string) ([]IstioObject, error) { // In case RbacConfigs aren't present on Istio, return empty array. if !in.hasRbacResource(rbacconfigs) { return []IstioObject{}, nil } result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(rbacconfigs).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[rbacconfigs], APIVersion: ApiRbacVersion, } rbacConfigList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a RbacConfigList list", namespace) } rbacConfigs := make([]IstioObject, 0) for _, rc := range rbacConfigList.GetItems() { r := rc.DeepCopyIstioObject() r.SetTypeMeta(typeMeta) rbacConfigs = append(rbacConfigs, r) } return rbacConfigs, nil } func (in *IstioClient) GetRbacConfig(namespace string, name string) (IstioObject, error) { result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(rbacconfigs).SubResource(name).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[rbacconfigs], APIVersion: ApiRbacVersion, } rbacConfig, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a RbacConfig object", namespace) } r := rbacConfig.DeepCopyIstioObject() r.SetTypeMeta(typeMeta) return r, nil } func (in *IstioClient) GetServiceRoles(namespace string) ([]IstioObject, error) { // In case ServiceRoles aren't present on Istio, return empty array. if !in.hasRbacResource(serviceroles) { return []IstioObject{}, nil } result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(serviceroles).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[serviceroles], APIVersion: ApiRbacVersion, } serviceRoleList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a ServiceRoleList list", namespace) } serviceRoles := make([]IstioObject, 0) for _, sr := range serviceRoleList.GetItems() { s := sr.DeepCopyIstioObject() s.SetTypeMeta(typeMeta) serviceRoles = append(serviceRoles, s) } return serviceRoles, nil } func (in *IstioClient) GetServiceRole(namespace string, name string) (IstioObject, error) { result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(serviceroles).SubResource(name).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[serviceroles], APIVersion: ApiRbacVersion, } serviceRole, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a ServiceRole object", namespace) } s := serviceRole.DeepCopyIstioObject() s.SetTypeMeta(typeMeta) return s, nil } func (in *IstioClient) GetServiceRoleBindings(namespace string) ([]IstioObject, error) { // In case ServiceRoleBindings aren't present on Istio, return empty array. if !in.hasRbacResource(servicerolebindings) { return []IstioObject{}, nil } result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(servicerolebindings).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[servicerolebindings], APIVersion: ApiRbacVersion, } serviceRoleBindingList, ok := result.(*GenericIstioObjectList) if !ok { return nil, fmt.Errorf("%s doesn't return a ServiceRoleBindingList list", namespace) } serviceRoleBindings := make([]IstioObject, 0) for _, sr := range serviceRoleBindingList.GetItems() { s := sr.DeepCopyIstioObject() s.SetTypeMeta(typeMeta) serviceRoleBindings = append(serviceRoleBindings, s) } return serviceRoleBindings, nil } func (in *IstioClient) GetServiceRoleBinding(namespace string, name string) (IstioObject, error) { result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(servicerolebindings).SubResource(name).Do().Get() if err != nil { return nil, err } typeMeta := meta_v1.TypeMeta{ Kind: PluralType[servicerolebindings], APIVersion: ApiRbacVersion, } serviceRoleBinding, ok := result.(*GenericIstioObject) if !ok { return nil, fmt.Errorf("%s doesn't return a ServiceRoleBinding object", namespace) } s := serviceRoleBinding.DeepCopyIstioObject() s.SetTypeMeta(typeMeta) return s, nil } // GetAuthorizationDetails returns ServiceRoles, ServiceRoleBindings and ClusterRbacDetails func (in *IstioClient) GetAuthorizationDetails(namespace string) (*RBACDetails, error) { rb := &RBACDetails{} // TODO Should we use concurrency here? Are these cached? srb, err := in.GetServiceRoleBindings(namespace) if err != nil { return nil, err } sr, err := in.GetServiceRoles(namespace) if err != nil { return nil, err } crc, err := in.GetClusterRbacConfigs(namespace) if err != nil { return nil, err } rb.ServiceRoleBindings = srb rb.ServiceRoles = sr rb.ClusterRbacConfigs = crc return rb, nil } func FilterByHost(host, serviceName, namespace string) bool { // Check single name if host == serviceName { return true } // Check service.namespace if host == fmt.Sprintf("%s.%s", serviceName, namespace) { return true } // Check the FQDN. <service>.<namespace>.svc if host == fmt.Sprintf("%s.%s.%s", serviceName, namespace, "svc") { return true } // Check the FQDN. <service>.<namespace>.svc.<zone> if host == fmt.Sprintf("%s.%s.%s", serviceName, namespace, config.Get().ExternalServices.Istio.IstioIdentityDomain) { return true } // Note, FQDN names are defined from Kubernetes registry specification [1] // [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md return false } func FilterByRoute(spec map[string]interface{}, protocols []string, service string, namespace string, serviceEntries map[string]struct{}) bool { if len(protocols) == 0 { return false } for _, protocol := range protocols { if prot, ok := spec[protocol]; ok { if aHttp, ok := prot.([]interface{}); ok { for _, httpRoute := range aHttp { if mHttpRoute, ok := httpRoute.(map[string]interface{}); ok { if route, ok := mHttpRoute["route"]; ok { if aDestinationWeight, ok := route.([]interface{}); ok { for _, destination := range aDestinationWeight { if mDestination, ok := destination.(map[string]interface{}); ok { if destinationW, ok := mDestination["destination"]; ok { if mDestinationW, ok := destinationW.(map[string]interface{}); ok { if host, ok := mDestinationW["host"]; ok { if sHost, ok := host.(string); ok { if FilterByHost(sHost, service, namespace) { return true } if serviceEntries != nil { // We have ServiceEntry to check if _, found := serviceEntries[strings.ToLower(protocol)+sHost]; found { return true } } } } } } } } } } } } } } } return false } // ServiceEntryHostnames returns a list of hostnames defined in the ServiceEntries Specs. Key in the resulting map is the protocol (in lowercase) + hostname // exported for test func ServiceEntryHostnames(serviceEntries []IstioObject) map[string][]string { hostnames := make(map[string][]string) for _, v := range serviceEntries { if hostsSpec, found := v.GetSpec()["hosts"]; found { if hosts, ok := hostsSpec.([]interface{}); ok { // Seek the protocol for _, h := range hosts { if hostname, ok := h.(string); ok { hostnames[hostname] = make([]string, 0, 1) } } } } if portsSpec, found := v.GetSpec()["ports"]; found { if portsArray, ok := portsSpec.([]interface{}); ok { for _, portDef := range portsArray { if ports, ok := portDef.(map[string]interface{}); ok { if proto, found := ports["protocol"]; found { if protocol, ok := proto.(string); ok { protocol = mapPortToVirtualServiceProtocol(protocol) for host := range hostnames { hostnames[host] = append(hostnames[host], protocol) } } } } } } } } return hostnames } // mapPortToVirtualServiceProtocol transforms Istio's Port-definitions' protocol names to VirtualService's protocol names func mapPortToVirtualServiceProtocol(proto string) string { // http: HTTP/HTTP2/GRPC/ TLS-terminated-HTTPS and service entry ports using HTTP/HTTP2/GRPC protocol // tls: HTTPS/TLS protocols (i.e. with “passthrough” TLS mode) and service entry ports using HTTPS/TLS protocols. // tcp: everything else switch proto { case "HTTP": fallthrough case "HTTP2": fallthrough case "GRPC": return "http" case "HTTPS": fallthrough case "TLS": return "tls" default: return "tcp" } } // ValidaPort parses the Istio Port definition and validates the naming scheme func ValidatePort(portDef interface{}) bool { return MatchPortNameRule(parsePort(portDef)) } func parsePort(portDef interface{}) (string, string) { var name, proto string if port, ok := portDef.(map[string]interface{}); ok { if portNameDef, found := port["name"]; found { if portName, ok := portNameDef.(string); ok { name = portName } } if protocolDef, found := port["protocol"]; found { if protocol, ok := protocolDef.(string); ok { proto = protocol } } } return name, proto } func MatchPortNameRule(portName, protocol string) bool { protocol = strings.ToLower(protocol) // Check that portName begins with the protocol if protocol == "tcp" || protocol == "udp" { // TCP and UDP protocols do not care about the name return true } if !strings.HasPrefix(portName, protocol) { return false } // If longer than protocol, then it must adhere to <protocol>[-suffix] // and if there's -, then there must be a suffix .. if len(portName) > len(protocol) { restPortName := portName[len(protocol):] return portNameMatcher.MatchString(restPortName) } // Case portName == protocolName return true } func MatchPortNameWithValidProtocols(portName string) bool { for _, protocol := range portProtocols { if strings.HasPrefix(portName, protocol) && (strings.ToLower(portName) == protocol || portNameMatcher.MatchString(portName[len(protocol):])) { return true } } return false } // GatewayNames extracts the gateway names for easier matching func GatewayNames(gateways [][]IstioObject) map[string]struct{} { var empty struct{} names := make(map[string]struct{}) for _, ns := range gateways { for _, gw := range ns { gw := gw clusterName := gw.GetObjectMeta().ClusterName if clusterName == "" { clusterName = config.Get().ExternalServices.Istio.IstioIdentityDomain } names[ParseHost(gw.GetObjectMeta().Name, gw.GetObjectMeta().Namespace, clusterName).String()] = empty } } return names } func PolicyHasStrictMTLS(policy IstioObject) bool { _, mode := PolicyHasMTLSEnabled(policy) return mode == "STRICT" } func PolicyHasMTLSEnabled(policy IstioObject) (bool, string) { // It is mandatory to have default as a name if policyMeta := policy.GetObjectMeta(); policyMeta.Name != "default" { return false, "" } // It is no globally enabled when has targets targets, targetPresent := policy.GetSpec()["targets"] specificTarget := targetPresent && len(targets.([]interface{})) > 0 if specificTarget { return false, "" } // It is globally enabled when a peer has mtls enabled peers, peersPresent := policy.GetSpec()["peers"] if !peersPresent { return false, "" } for _, peer := range peers.([]interface{}) { peerMap := peer.(map[string]interface{}) if mtls, present := peerMap["mtls"]; present { if mtlsMap, ok := mtls.(map[string]interface{}); ok { if modeItf, found := mtlsMap["mode"]; found { if mode, ok := modeItf.(string); ok { return true, mode } else { return false, "" } } } // STRICT mode when mtls object is empty return true, "STRICT" } } return false, "" } func DestinationRuleHasMeshWideMTLSEnabled(destinationRule IstioObject) (bool, string) { // Following the suggested procedure to enable mesh-wide mTLS, host might be '*.local': // https://istio.io/docs/tasks/security/authn-policy/#globally-enabling-istio-mutual-tls return DestinationRuleHasMTLSEnabledForHost("*.local", destinationRule) } func DestinationRuleHasNamespaceWideMTLSEnabled(namespace string, destinationRule IstioObject) (bool, string) { // Following the suggested procedure to enable namespace-wide mTLS, host might be '*.namespace.svc.cluster.local' // https://istio.io/docs/tasks/security/authn-policy/#namespace-wide-policy nsHost := fmt.Sprintf("*.%s.%s", namespace, config.Get().ExternalServices.Istio.IstioIdentityDomain) return DestinationRuleHasMTLSEnabledForHost(nsHost, destinationRule) } func DestinationRuleHasMTLSEnabledForHost(expectedHost string, destinationRule IstioObject) (bool, string) { host, hostPresent := destinationRule.GetSpec()["host"] if !hostPresent || host != expectedHost { return false, "" } if trafficPolicy, trafficPresent := destinationRule.GetSpec()["trafficPolicy"]; trafficPresent { if trafficCasted, ok := trafficPolicy.(map[string]interface{}); ok { if tls, found := trafficCasted["tls"]; found { if tlsCasted, ok := tls.(map[string]interface{}); ok { if mode, found := tlsCasted["mode"]; found { if modeCasted, ok := mode.(string); ok { return modeCasted == "ISTIO_MUTUAL", modeCasted } } } } } } return false, "" } func fetch(rValue *[]IstioObject, namespace string, service string, fetcher func(string, string) ([]IstioObject, error), wg *sync.WaitGroup, errChan chan error) { defer wg.Done() fetched, err := fetcher(namespace, service) *rValue = append(*rValue, fetched...) if err != nil { errChan <- err } } // Identical to above, but since k8s layer has both (namespace, serviceentry) and (namespace) queries, we need two different functions func fetchNoEntry(rValue *[]IstioObject, namespace string, fetcher func(string) ([]IstioObject, error), wg *sync.WaitGroup, errChan chan error) { defer wg.Done() fetched, err := fetcher(namespace) *rValue = append(*rValue, fetched...) if err != nil && len(errChan) == 0 { errChan <- err } }
package user import ( "net/http" "project/database" "project/packages/handlers/response" "strconv" "github.com/gorilla/mux" ) func GetInfoByID(w http.ResponseWriter, r *http.Request) { //get ID order vars := mux.Vars(r) id, _ := strconv.Atoi(vars["userId"]) //connect database db := database.ConnectToDatabase() //find by ID (Primary key) var user database.User db.First(&user, id) //Check result if user.ID == 0 { response.ResponseWithJson(w, http.StatusBadRequest, map[string]string{"message": "User Not Exist!"}) return } //response response.ResponseWithJson(w, http.StatusOK, user) }
package db import ( "github.com/jinzhu/gorm" "logger" ) func TxBegin() (tx *gorm.DB, err error) { db_handle, err := GetDB() if err != nil { logger.Errorln(err) return } tx = db_handle.Begin() return tx, nil } func TxEnd(tx *gorm.DB, exception error) (err error) { if exception != nil { logger.Errorln(exception) tx.Rollback() return nil } else { tx.Commit() return nil } return nil } func TXWrapper(f func(tx *gorm.DB) error) { tx, err := TxBegin() if err != nil { logger.Errorln("TxBegin failed", err) return } err = f(tx) err = TxEnd(tx, err) if err != nil { logger.Errorln("TxEnd failed", err) return } return } func TXWrapperEx(f func(tx *gorm.DB, exchange string) error, exchange string) { tx, err := TxBegin() if err != nil { logger.Errorln("TxBegin failed", err) return } err = f(tx, exchange) err = TxEnd(tx, err) if err != nil { logger.Errorln("TxEnd failed", err) return } return }
/* * Copyright 2020 The Dragonfly Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "errors" "d7y.io/dragonfly/v2/internal/idgen" "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/manager" "d7y.io/dragonfly/v2/scheduler/scheduler" "d7y.io/dragonfly/v2/scheduler/types" ) type SchedulerService struct { CDNManager *manager.CDNManager TaskManager *manager.TaskManager HostManager *manager.HostManager Scheduler *scheduler.Scheduler config config.SchedulerConfig ABTest bool } func NewSchedulerService(cfg *config.Config, dynconfig config.DynconfigInterface) (*SchedulerService, error) { mgr, err := manager.New(cfg, dynconfig) if err != nil { return nil, err } return &SchedulerService{ CDNManager: mgr.CDNManager, TaskManager: mgr.TaskManager, HostManager: mgr.HostManager, Scheduler: scheduler.New(cfg.Scheduler, mgr.TaskManager), ABTest: cfg.Scheduler.ABTest, }, nil } func (s *SchedulerService) GenerateTaskID(url string, filter string, meta *base.UrlMeta, bizID string, peerID string) (taskID string) { if s.ABTest { return idgen.TwinsTaskID(url, filter, meta, bizID, peerID) } return idgen.TaskID(url, filter, meta, bizID) } func (s *SchedulerService) GetTask(taskID string) (*types.Task, bool) { return s.TaskManager.Get(taskID) } func (s *SchedulerService) AddTask(task *types.Task) (*types.Task, error) { // Task already exists if ret, ok := s.TaskManager.Get(task.TaskID); ok { s.TaskManager.PeerTask.AddTask(ret) return ret, nil } // Task does not exist ret := s.TaskManager.Set(task.TaskID, task) if err := s.CDNManager.TriggerTask(ret, s.TaskManager.PeerTask.CDNCallback); err != nil { return nil, err } s.TaskManager.PeerTask.AddTask(ret) return ret, nil } func (s *SchedulerService) ScheduleParent(task *types.PeerTask) (primary *types.PeerTask, secondary []*types.PeerTask, err error) { return s.Scheduler.ScheduleParent(task) } func (s *SchedulerService) ScheduleChildren(task *types.PeerTask) (children []*types.PeerTask, err error) { return s.Scheduler.ScheduleChildren(task) } func (s *SchedulerService) GetPeerTask(peerTaskID string) (peerTask *types.PeerTask, err error) { peerTask, _ = s.TaskManager.PeerTask.Get(peerTaskID) if peerTask == nil { err = errors.New("peer task do not exist: " + peerTaskID) } return } func (s *SchedulerService) AddPeerTask(pid string, task *types.Task, host *types.Host) (ret *types.PeerTask, err error) { ret = s.TaskManager.PeerTask.Add(pid, task, host) host.AddPeerTask(ret) return } func (s *SchedulerService) DeletePeerTask(peerTaskID string) (err error) { peerTask, err := s.GetPeerTask(peerTaskID) if err != nil { return } // delete from manager s.TaskManager.PeerTask.Delete(peerTaskID) // delete from host peerTask.Host.DeletePeerTask(peerTaskID) // delete from piece lazy peerTask.SetDown() return } func (s *SchedulerService) GetHost(hostID string) (host *types.Host, err error) { host, _ = s.HostManager.Get(hostID) if host == nil { err = errors.New("host not exited: " + hostID) } return } func (s *SchedulerService) AddHost(host *types.Host) (ret *types.Host, err error) { ret = s.HostManager.Add(host) return }
package handlers import ( "net/http" "github.com/gorilla/mux" "github.com/kiali/kiali/log" "github.com/kiali/kiali/prometheus" ) func NamespaceList(w http.ResponseWriter, r *http.Request) { business, err := getBusiness(r) if err != nil { log.Error(err) RespondWithError(w, http.StatusInternalServerError, err.Error()) return } namespaces, err := business.Namespace.GetNamespaces() if err != nil { log.Error(err) RespondWithError(w, http.StatusInternalServerError, err.Error()) return } RespondWithJSON(w, http.StatusOK, namespaces) } // NamespaceMetrics is the API handler to fetch metrics to be displayed, related to all // services in the namespace func NamespaceMetrics(w http.ResponseWriter, r *http.Request) { getNamespaceMetrics(w, r, defaultPromClientSupplier) } // getServiceMetrics (mock-friendly version) func getNamespaceMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier) { vars := mux.Vars(r) namespace := vars["namespace"] prom, namespaceInfo := initClientsForMetrics(w, r, promSupplier, namespace) if prom == nil { // any returned value nil means error & response already written return } params := prometheus.IstioMetricsQuery{Namespace: namespace} err := extractIstioMetricsQueryParams(r, &params, namespaceInfo) if err != nil { RespondWithError(w, http.StatusBadRequest, err.Error()) return } metrics := prom.GetMetrics(&params) RespondWithJSON(w, http.StatusOK, metrics) }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package delegate import ( "bytes" "fmt" "strings" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // ShowRoleGrants returns role membership details for the specified roles and grantees. // Privileges: SELECT on system.role_members. // Notes: postgres does not have a SHOW GRANTS ON ROLES statement. func (d *delegator) delegateShowRoleGrants(n *tree.ShowRoleGrants) (tree.Statement, error) { const selectQuery = ` SELECT role AS role_name, member, "isAdmin" AS is_admin FROM system.role_members` var query bytes.Buffer query.WriteString(selectQuery) if n.Roles != nil { var roles []string for _, r := range n.Roles.ToStrings() { roles = append(roles, lex.EscapeSQLString(r)) } fmt.Fprintf(&query, ` WHERE "role" IN (%s)`, strings.Join(roles, ",")) } if n.Grantees != nil { if n.Roles == nil { // No roles specified: we need a WHERE clause. query.WriteString(" WHERE ") } else { // We have a WHERE clause for roles. query.WriteString(" AND ") } var grantees []string for _, g := range n.Grantees.ToStrings() { grantees = append(grantees, lex.EscapeSQLString(g)) } fmt.Fprintf(&query, ` member IN (%s)`, strings.Join(grantees, ",")) } return parse(query.String()) }
package main import ( "fmt" "time" ) func main() { c := make(chan bool, 1) go func() { excuteTask(c) }() v := <-c fmt.Println("complete?", v) } func excuteTask(complete chan bool) { for i := 0; i < 10; i++ { fmt.Println("sleeping for a sec", i) time.Sleep(1 * time.Second) if i == 6 { fmt.Println("sending true over") complete <- true close(complete) return } } }
package main import( "github.com/forj-oss/goforjj" ) //ProjectStruct (TODO) type ProjectStruct struct { Name string Flow string `yaml:",omitempty"` Description string `yaml:",omitempty"` Disabled bool `yaml:",omitempty"` IssueTracker bool `yaml:"issue_tracker,omitempty"` Users map[string]string `yaml:",omitempty"` //Groups exist bool `yaml:",omitempty"` remotes map[string]goforjj.PluginRepoRemoteUrl branchConnect map[string]string //... //maintain Infra bool `yaml:",omitempty"` Role string `yaml:",omitempty"` IsDeployable bool Owner string `yaml:",omitempty"` } //isValid verify name project func (r *RepoInstanceStruct) isValid(repoName string, ret *goforjj.PluginData) (valid bool){ if r.Name == "" { ret.Errorf("Invalid project '%s'. Name is empty.", repoName) return } if r.Name != repoName { ret.Errorf("Invalid project '%s'. Name must be equal to '%s'. But the project name is set to '%s'.", repoName, repoName, r.Name) return } valid = true return } //set (TODO) func (r *ProjectStruct) set(project *RepoInstanceStruct, remotes map[string]goforjj.PluginRepoRemoteUrl, branchConnect map[string]string, isInfra, IsDeployable bool, owner string) *ProjectStruct{ if r == nil { r = new(ProjectStruct) } r.Name = project.Name r.Description = project.Title //issueTracker r.Flow = project.Flow r.Infra = isInfra //r.addUsers(project.Users) //Groups r.remotes = remotes r.branchConnect = branchConnect //WebHooks r.Role = project.Role r.Owner = owner r.IsDeployable = IsDeployable return r } //addUsers (TODO) func (r *ProjectStruct) addUsers(users string) { // }
package main import ( "bufio" "fmt" "io" "log" "os" "sort" "strconv" ) func readIntsFromFile(r io.Reader) ([]int, error) { var result []int scanner := bufio.NewScanner(r) scanner.Split(bufio.ScanWords) for scanner.Scan() { value, err := strconv.Atoi(scanner.Text()) if err != nil { return result, err } result = append(result, value) } return result, nil } func findTwoSum(numbers []int, target int) (int, int) { result := make(map[int]int) for _, v := range numbers { result[target-v] = v if value, ok := result[v]; ok { return value, v } } return 0, 0 } // Part Two func findThreeSum(nums []int, target int) (int, int, int) { for i, _ := range nums[:len(nums)-2] { start, end := i+1, len(nums)-1 for start < end { sum := nums[i] + nums[start] + nums[end] if sum > target { end-- } else if sum < target { start++ } else { return nums[i], nums[start], nums[end] } } } return 0, 0, 0 } func main() { d, err := os.Open("input.txt") if err != nil { log.Fatalf("Could not open the file: %v\n", err) } data, err := readIntsFromFile(d) if err != nil { log.Fatalf("Failed to convert file contents to int array: %v\n", err) } sort.Ints(data) value1, value2 := findTwoSum(data, 2020) fmt.Println("Part One result: ", value1*value2) // Part Two value1, value2, value3 := findThreeSum(data, 2020) fmt.Println("Part Two result: ", value1*value2*value3) }
package editorapi import ( "editorApi/commons" "editorApi/init/mgdb" "editorApi/requests" "editorApi/service" "editorApi/tools/helpers" "strings" "github.com/gin-gonic/gin" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) // @Tags Sentence(句子接口) // @Summary 句子列表 // @Description 句子列表 // @Security ApiKeyAuth // @accept application/json // @Produce application/json // @Param data body requests.SentenceSearchRequests true "句子参数" // @Success 200 {string} string "{"success":true,"data":{},"msg":"获取成功"}" // @Failure 400 {string} string "{"code":500,"data":{},"msg":"获取失败信息"}" // @Router /editor/sentence/list [post] func SentenceList(ctx *gin.Context) { var request requests.SentenceSearchRequests ctx.BindJSON(&request) filter := bson.M{} if request.SearchType == 0 { filter["sentence"] = request.Sentence } else { filter["sentence"] = primitive.Regex{ Pattern: request.Sentence, Options: "i", } } skip := (request.PageIndex - 1) * request.PageSize rsts := []*requests.Sentence{} mgdb.Find( mgdb.EnvEditor, mgdb.DbDict, "sentence_"+strings.ToLower(request.From), filter, bson.M{"created_on": -1}, nil, skip, request.PageSize, &rsts, ) commons.Success(ctx, rsts, "查询成功!", request) } // @Tags Sentence(句子接口) // @Summary 句子详情 // @Description 句子详情 // @Security ApiKeyAuth // @accept application/json // @Produce application/json // @Param data body requests.DictDetailRequests true "辞典列表" // @Success 200 {string} string "{"success":true,"data":{},"msg":"获取成功"}" // @Failure 400 {string} string "{"code":500,"data":{},"msg":"获取失败信息"}" // @Router /editor/sentence/detail [post] func SentenceDetail(ctx *gin.Context) { var request requests.SentenceDetail ctx.BindJSON(&request) //2.验证 UserValidations := commons.BaseValidations{} message, err := UserValidations.Check(request) if err != nil && !helpers.Empty(message) { commons.Error(ctx, 500, err, message) } obj := service.AppService() result, err := obj.AppService.SentenceService.Detail(ctx, request) if err != nil { commons.Error(ctx, 500, err, err.Error()) } commons.Success(ctx, result, "查询成功!", request) } // @Tags Sentence(句子接口) // @Summary 更新句子 // @Description 更新句子 // @Security ApiKeyAuth // @accept application/json // @Produce application/json // @Param data body requests.SentenceUpdate true "更新句子" // @Success 200 {string} string "{"success":true,"data":{},"msg":"获取成功"}" // @Failure 400 {string} string "{"code":500,"data":{},"msg":"获取失败信息"}" // @Router /editor/sentence/update [post] func SentenceUpdate(ctx *gin.Context) { var request requests.SentenceUpdate ctx.BindJSON(&request) //2.验证 UserValidations := commons.BaseValidations{} message, err := UserValidations.Check(request) if err != nil && !helpers.Empty(message) { commons.Error(ctx, 500, err, message) } obj := service.AppService() result, err := obj.AppService.SentenceService.Update(ctx, request) if err != nil { commons.Error(ctx, 500, err, err.Error()) } commons.Success(ctx, result, "更新成功!", request) }
package pkg import ( "sort" bolt "github.com/coreos/bbolt" ) // Blockchain is the list of linked blocks type Blockchain struct { last []byte db *bolt.DB } // NewBlockchain initializes a new blockchain func NewBlockchain(db *bolt.DB) (*Blockchain, error) { var last []byte err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("blockchain")) if b == nil { genesis := NewGenesisBlock() b, err := tx.CreateBucket([]byte("blockchain")) if err != nil { return err } serialized, err := genesis.Serialize() if err != nil { return err } if err := b.Put([]byte(genesis.Hash), serialized); err != nil { return err } if err := b.Put([]byte("last"), []byte(genesis.Hash)); err != nil { return err } last = []byte(genesis.Hash) } else { last = b.Get([]byte("last")) } return nil }) if err != nil { return nil, err } return &Blockchain{last, db}, err } // AddBlock Addes a block in a blockchain func (bc *Blockchain) AddBlock(money int) (*Block, error) { var lastHash []byte var lastBlock *Block err := bc.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("blockchain")) lastHash = b.Get([]byte("last")) block, err := DeserializeBlock(b.Get(lastHash)) lastBlock = block if err != nil { return err } return nil }) if err != nil { return nil, err } newBlock := NewBlock(money, lastBlock) if !IsBlockValid(newBlock, lastBlock) { return nil, NewInvalidBlockError("Invalid Block", newBlock) } err = bc.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("blockchain")) serialized, err := newBlock.Serialize() if err != nil { return err } if err = b.Put([]byte(newBlock.Hash), serialized); err != nil { return err } if err := b.Put([]byte("last"), []byte(newBlock.Hash)); err != nil { return err } return nil }) if err != nil { return nil, err } return newBlock, nil } // GetBlock gets you the block with a specific block func (bc *Blockchain) GetBlock(hash []byte) (*Block, error) { var block *Block err := bc.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("blockchain")) dblock, err := DeserializeBlock(b.Get(hash)) block = dblock if err != nil { return err } return nil }) if err != nil { return nil, err } return block, nil } // AllBlocks get all blocks in the blockchain that are sorted func (bc *Blockchain) AllBlocks() ([]*Block, error) { var blocks []*Block err := bc.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("blockchain")) b.ForEach(func(k, v []byte) error { block, err := DeserializeBlock(v) if err != nil { return err } blocks = append(blocks, block) return nil }) return nil }) if err != nil { return nil, err } sort.Slice(blocks, func(i, j int) bool { return blocks[i].Index < blocks[j].Index }) return blocks, nil }