text
stringlengths
11
4.05M
package main import ( "bytes" "fmt" "sync" ) func main() { var s string buf := bytes.NewBufferString(s) rwmutex := new(sync.RWMutex) wg := new(sync.WaitGroup) for i := 0; i <= 100; i++ { wg.Add(1) go func(i int) { defer wg.Done() rwmutex.Lock() defer rwmutex.Unlock() fmt.Fprintln(buf, "hello:", i) }(i) } wg.Wait() fmt.Println(buf) }
/* =====================defer keyword============= #defer works on LIFO #postpones execution of function util surrounding function returns #widely used for output and input function and saves you from clos a open file #deferred function executed in LIFO order. */ package main import ( "fmt" ) func d1() { for i := 4; i > 0; i-- { defer fmt.Print(i, " ") } } func d2() { for i := 3; i > 0; i-- { defer func() { fmt.Print(i, " ") }() } fmt.Println() } func d3() { for i := 3; i > 0; i-- { defer func(n int) { fmt.Print(n, " ") }(i) } } func defer1() { d1() d2() fmt.Println() d3() fmt.Println() }
package gowally import ( "fmt" "io/ioutil" "net/http" "github.com/Upper-Beacon/gowally/gohttp" ) var ( githubHTTPClient = getGithubClient() ) func getGithubClient() gohttp.HTTPClient { client := gohttp.New() commomHeaders := make(http.Header) commomHeaders.Set("Authorization", "Bearer ABC-123") client.SetHeaders(commomHeaders) return client } func main() { getUrls() } func getUrls() { response, err := githubHTTPClient.Get("https://api.github.com", nil) if err != nil { panic(err) } fmt.Println(response.StatusCode) bytes, _ := ioutil.ReadAll(response.Body) fmt.Println(string(bytes)) }
package main import ( "log" "fmt" "time" ) func main() { str := "10s" duration, err := time.ParseDuration(str) if err != nil { log.Fatal(err) } seconds := duration.Seconds() fmt.Println(seconds) }
package main import "fmt" func main() { fmt.Println(len("Zhuhry")) fmt.Println("Muhammad Zhuhry"[0]) fmt.Println("Muhammad Athallah Zhuhry") }
package dae import ( "encoding/xml" "fmt" "strconv" ) // COLLADA declares the root of the document that contains some of the content // in the COLLADA schema. type COLLADA struct { Version string `xml:"version,attr"` Asset *Asset `xml:"asset"` LibCameras *LibCameras `xml:"library_cameras"` LibLights *LibLights `xml:"library_lights"` LibImages *LibImages `xml:"library_images"` LibEffects *LibEffects `xml:"library_effects"` LibMtls *LibMtls `xml:"library_materials"` LibGeoms *LibGeoms `xml:"library_geometries"` LibCtrls *LibCtrls `xml:"library_controllers"` LibVScenes *LibVScenes `xml:"library_visual_scenes"` LibAniClips *LibAniClips `xml:"library_animation_clips"` LibAnis *LibAnis `xml:"library_animations"` LibFormulas *LibFormulas `xml:"library_formulas"` LibNodes *LibNodes `xml:"library_nodes"` Scene *Scene `xml:"scene"` } // Asset defines asset-management information regarding its parent element. type Asset struct { UnitMeter struct { Meter float32 `xml:"meter,attr,omitempty"` Name string `xml:"name,attr,omitempty"` } `xml:"unit"` UpAxis string `xml:"up_axis,omitempty"` Subject string `xml:"subject,omitempty"` Title string `xml:"title,omitempty"` Revision string `xml:"revision,omitempty"` CTime string `xml:"created,omitempty"` MTime string `xml:"modified,omitempty"` Contributors []Contributor `xml:"contributor,omitempty"` } // Contributor defines authoring information for asset management. type Contributor struct { Author string `xml:"author,omitempty"` Email string `xml:"author_email,omitempty"` Web string `xml:"author_website,omitempty"` Tool string `xml:"authoring_tool,omitempty"` Copyright string `xml:"copyright,omitempty"` } // ValueSid is value with sid type ValueSid struct { Sid string `xml:"sid,attr,omitempty"` V string `xml:",chardata"` } // Float parse value as float32 func (f ValueSid) Float() (float32, error) { x, err := strconv.ParseFloat(f.V, 32) if err != nil { return 0, err } return float32(x), nil } // Scene embodies the entire set of information that can be visualized from the contents of a COLLADA resource. type Scene struct { Physics []struct { URL string `xml:"url,attr,omitempty"` } `xml:"instance_physics_scene"` Visual *struct { URL string `xml:"url,attr,omitempty"` } `xml:"instance_visual_scene"` Kinemat *struct { URL string `xml:"url,attr,omitempty"` } `xml:"instance_kinematics_scene"` } // LibCameras provides a library in which to place <camera> elements. type LibCameras struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Cameras []Camera `xml:"camera"` // <imager> has not technique_common, so not included } // Camera declares a view of the visual scene hierarchy or scene graph. // The camera contains elements that describe the camera’s optics and imager. type Camera struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Persp *Perspective `xml:"optics>technique_common>perspective"` Ortho *Orthographic `xml:"optics>technique_common>orthographic"` } // Perspective describes the field of view of a perspective camera. type Perspective struct { XFov ValueSid `xml:"xfov"` YFov ValueSid `xml:"yfov"` Aspect ValueSid `xml:"aspect_ratio"` ZNear ValueSid `xml:"znear"` ZFar ValueSid `xml:"zfar"` } // Orthographic describes the field of view of an orthographic camera. type Orthographic struct { XMag ValueSid `xml:"xmag"` YMag ValueSid `xml:"ymag"` Aspect ValueSid `xml:"aspect_ratio"` ZNear ValueSid `xml:"znear"` ZFar ValueSid `xml:"zfar"` } // LibLights provides a library in which to place <light> elements. type LibLights struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Lights []*Light `xml:"light"` // <imager> has not technique_common, so not included } // Light declares a light source that illuminates a scene. type Light struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Ambient *Color `xml:"technique_common>ambient>color"` Direct *Color `xml:"technique_common>directional>color"` Point *PointLight `xml:"technique_common>point"` } // Color describes the color of its parent light element. type Color string // RGB color func (c Color) RGB() (v [3]float32) { fmt.Sscanf(string(c), "%f %f %f", &v[0], &v[1], &v[2]) return } // RGBA color func (c Color) RGBA() (v [4]float32) { fmt.Sscanf(string(c), "%f %f %f %f", &v[0], &v[1], &v[2], &v[3]) return } // PointLight describes a point light source. type PointLight struct { Color Color `xml:"color"` Const *ValueSid `xml:"constant_attenuation"` Linear *ValueSid `xml:"linear_attenuation"` Quad ValueSid `xml:"quadratic_attenuation"` } // LibImages provides a library for the storage of <image> assets. type LibImages struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Images []*Image `xml:"image"` } // Image declares the storage for the graphical representation of an object. type Image struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Sid string `xml:"sid,attr,omitempty"` Asset *Asset `xml:"asset"` Renderable *struct { Share bool `xml:"share,attr"` } `xml:"renderable"` InitFrom *struct { MipsGen bool `xml:"mips_generate,attr"` REF string `xml:"ref,omitempty"` // <init_from><ref>foo.png</ref></init_from> ERFOld string `xml:",chardata"` // <init_from>foo.png</init_from> HEX *struct { Format string `xml:"format,attr"` Data string `xml:",chardata"` } } `xml:"init_from"` // TODO: <create_2d> // TODO: <create_3d> // TODO: <create_cube> } // LibEffects provides a library or the storage of <effect> assets. type LibEffects struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Effects []*Effect `xml:"effect"` } // Effect provides a self-contained description of a COLLADA effect. type Effect struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Annotates []*Annotate `xml:"annotate"` NewParams []*NewParam `xml:"newparam"` Common *ProfileCommon `xml:"profile_COMMON"` } // Annotate adds a strongly typed annotation remark to the parent object. type Annotate struct { Name string `xml:"name,attr"` ValueElem struct { XMLName xml.Name Data string `xml:",chardata"` } `xml:",any"` } // Value parse value_element to go's value func (a *Annotate) Value() interface{} { switch a.ValueElem.XMLName.Local { case "string": return a.ValueElem.Data } // TODO: implemenet panic("not implemenet") } // NewParam creates a new, named parameter object, and assigns it a type and an initial value. type NewParam struct { Sid string `xml:"sid,attr,omitempty"` Annotates []*Annotate `xml:"annotate"` Semantic string `xml:"semantic"` Modifier string `xml:"modifier"` // CONST,UNIFORM,VARYING,STATIC,VOLATILE,EXTERN,SHARED } // ProfileCommon opens a block of platform-independent declarations for the common, fixed-function shader. type ProfileCommon struct { ID string `xml:"id,attr,omitempty"` Asset *Asset `xml:"asset"` NewParams []*NewParam `xml:"newparam"` Technique FxTechnique `xml:"technique"` } // FxTechnique Holds a description of the textures, samplers, shaders, parameters, // and passes necessary for rendering this effect using one method. type FxTechnique struct { ID string `xml:"id,attr,omitempty"` Sid string `xml:"sid,attr,omitempty"` Asset *Asset `xml:"asset"` Annotates []*Annotate `xml:"annotate"` // TODO: <blinn> // TODO: <constant> // TODO: <lambert> Phong *Phong `xml:"phong"` Passes []*Pass `xml:"pass"` } // Phong produces a shaded surface where the specular reflection is shaded according the Phong BRDF approximation. type Phong struct { Emission *FxColorOrTex `xml:"emission"` Ambient *FxColorOrTex `xml:"ambient"` Diffuse *FxColorOrTex `xml:"diffuse"` Specular *FxColorOrTex `xml:"specular"` Shininess *FxFloatOrParam `xml:"shininess"` Reflective *FxColorOrTex `xml:"reflective"` Reflectivity *FxFloatOrParam `xml:"reflectivity"` Transparent *FxColorOrTex `xml:"transparent"` Transparency *FxFloatOrParam `xml:"transparency"` IdxRefraction *FxFloatOrParam `xml:"index_of_refraction"` } // FxColorOrTex (fx_common_color_or_texture_type) is A type that describes color attributes // of fixed-function shader elements inside <profile_COMMON> effects. type FxColorOrTex struct { Color *Color `xml:"color"` Param *ParamRef `xml:"param"` Texture *struct { Texture string `xml:"texture,attr"` TexCoord string `xml:"texcoord,attr"` } `xml:"texture"` } // FxFloatOrParam (fx_common_float_or_param_type) is A type that describes the scalar attributes // of fixed-function shader elements inside <profile_COMMON> effects. type FxFloatOrParam struct { Float *ValueSid `xml:"float"` Param *ParamRef `xml:"param"` } // ParamRef references a predefined parameter. type ParamRef struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Semantic string `xml:"semantic,attr,omitempty"` Type string `xml:"type,attr,omitempty"` Ref string `xml:"ref,attr,omitempty"` } // Pass provides a static declaration of all the render states, shaders, and settings for one rendering pipeline. type Pass struct { Sid string `xml:"sid,attr,omitempty"` Annotates []*Annotate `xml:"annotate"` // TODO: <states> // TODO: <evaluate> // TODO: } // LibMtls provides a library for the storage of <material> assets. type LibMtls struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` Mtls []*Material `xml:"material"` } // Material provides a library for the storage of <material> assets. type Material struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` InstEffect InstEffect `xml:"instance_effect"` } // InstEffect instantiates a COLLADA effect. type InstEffect struct { Sid string `xml:"sid,attr,omitempty"` Name string `xml:"name,attr,omitempty"` URL string `xml:"url,attr,omitempty"` TechHints []*TechHint `xml:"technique_hint"` SetParam []*SetParam `xml:"setparam"` } // TechHint adds a hint for a platform of which technique to use in this effect. type TechHint struct { Platform string `xml:"platform,attr,omitempty"` Ref string `xml:"ref,attr,omitempty"` Profile string `xml:"profile,attr,omitempty"` } // SetParam assigns a new value to a previously defined parameter. type SetParam struct { Ref string `xml:"ref,attr,omitempty"` ValueElem struct { XMLName xml.Name Data string `xml:",chardata"` } `xml:",any"` } // LibGeoms provides a library in which to place <geometry> elements. type LibGeoms struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibCtrls provides a library in which to place <controller> elements. type LibCtrls struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibVScenes provides a library in which to place <visual_scene> elements. type LibVScenes struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibAniClips provides a library in which to place <animation_clip> elements. type LibAniClips struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibAnis provides a library in which to place <animation> elements. type LibAnis struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibFormulas provides a library in which to place <formula> elements. type LibFormulas struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` } // LibNodes provides a library in which to place <node> elements. type LibNodes struct { ID string `xml:"id,attr,omitempty"` Name string `xml:"name,attr,omitempty"` Asset *Asset `xml:"asset"` }
package main import ( "os" "testing" ) var targetNumber int = 1024 func TestBuffer_Read(t *testing.T) { sl := make([]byte, 0, 4) b := newMp4Buffer(sl) filename := "test_file_large" fp, e := os.Open(filename) if e != nil { t.Errorf("failed to open test file: %s\n", filename) return } defer func(fp *os.File) { _ = fp.Close() }(fp) n, e := b.ReadBytesFromAtLeast(fp, targetNumber) if e != nil { t.Fatal("ReadBytesFrom return failed") } if n != targetNumber { t.Fatalf("ReadBytesFrom Actually input : %d\n", n) } sR := make([]byte, targetNumber/2) nR, e := b.Read(sR) if e != nil { t.Fatal("Read failed", e) } if b.Len() != (targetNumber - nR) { t.Fatal("internal error: after read") } }
package httpserver import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/cookiejar" "net/http/httptest" "net/url" "strings" "testing" "github.com/ekotlikoff/gochess/internal/model" matchserver "github.com/ekotlikoff/gochess/internal/server/backend/match" gateway "github.com/ekotlikoff/gochess/internal/server/frontend" ) var ( debug bool = false ctp string = "application/json" serverMatch *httptest.Server serverSession *httptest.Server serverSync *httptest.Server serverAsync *httptest.Server serverMatchTimeout *httptest.Server ) func init() { serverSession = httptest.NewServer(http.HandlerFunc(gateway.Session)) serverAsync = httptest.NewServer(http.Handler(makeAsyncHandler())) serverSync = httptest.NewServer(http.Handler(makeSyncHandler())) matchingServer := matchserver.NewMatchingServer() serverMatch = httptest.NewServer( makeSearchForMatchHandler(&matchingServer)) exitChan := make(chan bool, 1) close(exitChan) matchingServer.StartMatchServers(10, exitChan) timeoutMatchingServer := matchserver.NewMatchingServer() serverMatchTimeout = httptest.NewServer( makeSearchForMatchHandler(&timeoutMatchingServer)) generator := func( black *matchserver.Player, white *matchserver.Player, ) matchserver.Match { return matchserver.NewMatch(black, white, 100) } timeoutMatchingServer.StartCustomMatchServers(10, generator, exitChan) SetQuiet() } func TestHTTPServerMatch(t *testing.T) { if debug { fmt.Println("Test Match") } jar, _ := cookiejar.New(&cookiejar.Options{}) jar2, _ := cookiejar.New(&cookiejar.Options{}) client := &http.Client{Jar: jar} client2 := &http.Client{Jar: jar2} startSession(client, "player1") startSession(client2, "player2") wait := make(chan struct{}) var resp *http.Response go func() { resp, _ = client.Get(serverMatch.URL); close(wait) }() resp2, _ := client2.Get(serverMatch.URL) <-wait defer resp.Body.Close() defer resp2.Body.Close() body, _ := ioutil.ReadAll(resp.Body) body2, _ := ioutil.ReadAll(resp2.Body) if debug { fmt.Println(string(body)) fmt.Println(resp.StatusCode) fmt.Println(string(body2)) fmt.Println(resp2.StatusCode) } resp, err := client.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) resp.Body.Close() if err != nil || responseAsync.Matched != true { t.Error("Expected match") } } func TestHTTPServerCheckmate(t *testing.T) { if debug { fmt.Println("Test Checkmate") } black, white, blackName, _ := createMatch(serverMatch) sendMove(white, serverSync, 2, 1, 0, 2) resp, _ := black.Get(serverSync.URL) body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() if !strings.Contains(string(body), "{\"Position\":{\"File\":2,\"Rank\":1},\"Move\":{\"X\":0,\"Y\":2}") { t.Error("Expected opponent's move got ", string(body)) } sendMove(black, serverSync, 4, 6, 0, -2) resp, _ = white.Get(serverSync.URL) body, _ = ioutil.ReadAll(resp.Body) resp.Body.Close() if !strings.Contains(string(body), "{\"Position\":{\"File\":4,\"Rank\":6},\"Move\":{\"X\":0,\"Y\":-2}") { t.Error("Expected opponent's move got ", string(body)) } sendMove(white, serverSync, 2, 3, 0, 1) sendMove(black, serverSync, 3, 7, 4, -4) sendMove(white, serverSync, 2, 4, 0, 1) sendMove(black, serverSync, 5, 7, -3, -3) sendMove(white, serverSync, 2, 5, -1, 1) sendMove(black, serverSync, 7, 3, -2, -2) resp, _ = white.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) if !responseAsync.GameOver || responseAsync.Winner != blackName { t.Error("Expected gameover got ", responseAsync) } if debug { fmt.Println("Success Checkmate") } } func TestHTTPServerDraw(t *testing.T) { if debug { fmt.Println("Test Draw") } black, white, _, _ := createMatch(serverMatch) sendMove(white, serverSync, 2, 1, 0, 2) payloadBuf := new(bytes.Buffer) requestAsync := matchserver.RequestAsync{RequestToDraw: true} json.NewEncoder(payloadBuf).Encode(requestAsync) white.Post(serverAsync.URL, ctp, payloadBuf) resp, _ := black.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) if !responseAsync.RequestToDraw || responseAsync.GameOver { t.Error("Expected draw request from white got ", responseAsync) } json.NewEncoder(payloadBuf).Encode(requestAsync) black.Post(serverAsync.URL, ctp, payloadBuf) resp, _ = white.Get(serverAsync.URL) responseAsync = matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) if !responseAsync.GameOver || responseAsync.Winner != "" || !responseAsync.Draw || responseAsync.Resignation { t.Error("Expected gameover got ", responseAsync) } } func TestHTTPServerResign(t *testing.T) { if debug { fmt.Println("Test Resign") } black, white, blackName, _ := createMatch(serverMatch) sendMove(white, serverSync, 2, 1, 0, 2) payloadBuf := new(bytes.Buffer) requestAsync := matchserver.RequestAsync{Resign: true} json.NewEncoder(payloadBuf).Encode(requestAsync) white.Post(serverAsync.URL, ctp, payloadBuf) resp, _ := black.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) if !responseAsync.GameOver || responseAsync.Winner != blackName || !responseAsync.Resignation { t.Error("Expected gameover got ", responseAsync) } } func TestHTTPServerTimeout(t *testing.T) { if debug { fmt.Println("Test Timeout") } black, white, blackName, _ := createMatch(serverMatchTimeout) sendMove(white, serverSync, 2, 1, 0, 2) sendMove(black, serverSync, 2, 6, 0, -2) resp, _ := black.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) if !responseAsync.GameOver || responseAsync.Winner != blackName || !responseAsync.Timeout { t.Error("Expected timeout got ", responseAsync) } } func createMatch(testMatchServer *httptest.Server) ( black *http.Client, white *http.Client, blackName string, whiteName string, ) { jar, _ := cookiejar.New(&cookiejar.Options{}) jar2, _ := cookiejar.New(&cookiejar.Options{}) client := &http.Client{Jar: jar} client2 := &http.Client{Jar: jar2} startSession(client, "player1") startSession(client2, "player2") wait := make(chan struct{}) var resp *http.Response go func() { resp, _ = client.Get(testMatchServer.URL); close(wait) }() resp2, _ := client2.Get(testMatchServer.URL) <-wait defer resp.Body.Close() defer resp2.Body.Close() black = client blackName = "player1" whiteName = "player2" white = client2 resp, _ = white.Get(serverAsync.URL) resp, _ = black.Get(serverAsync.URL) responseAsync := matchserver.ResponseAsync{} json.NewDecoder(resp.Body).Decode(&responseAsync) resp.Body.Close() if responseAsync.MatchDetails.Color == model.White { black = client2 blackName = "player2" whiteName = "player1" white = client } return black, white, blackName, whiteName } func sendMove(client *http.Client, serverSync *httptest.Server, x, y, moveX, moveY int) { movePayloadBuf := new(bytes.Buffer) moveRequest := model.MoveRequest{ Position: model.Position{File: uint8(x), Rank: uint8(y)}, Move: model.Move{X: int8(moveX), Y: int8(moveY)}, PromoteTo: nil, } json.NewEncoder(movePayloadBuf).Encode(moveRequest) resp, err := client.Post(serverSync.URL, "application/json", movePayloadBuf) if err == nil { defer resp.Body.Close() } } func startSession(client *http.Client, username string) { credentialsBuf := new(bytes.Buffer) credentials := gateway.Credentials{Username: username} json.NewEncoder(credentialsBuf).Encode(credentials) resp, err := client.Post(serverSession.URL, "application/json", credentialsBuf) serverSessionURL, _ := url.Parse(serverSession.URL) serverMatchURL, _ := url.Parse(serverMatch.URL) serverSyncURL, _ := url.Parse(serverSync.URL) serverAsyncURL, _ := url.Parse(serverAsync.URL) // Ensure that the various test handler URLs get passed the session cookie // by the client. client.Jar.SetCookies(serverMatchURL, client.Jar.Cookies(serverSessionURL)) client.Jar.SetCookies(serverSyncURL, client.Jar.Cookies(serverSessionURL)) client.Jar.SetCookies(serverAsyncURL, client.Jar.Cookies(serverSessionURL)) if err == nil { defer resp.Body.Close() } } func TestCurrentMatch(t *testing.T) { if debug { fmt.Println("Test CurrentMatch") } jar, _ := cookiejar.New(&cookiejar.Options{}) client := &http.Client{Jar: jar} startSession(client, "Dawn") resp, err := client.Get(serverSession.URL) if err != nil { t.Error(err) } defer resp.Body.Close() var currentMatchResponse gateway.SessionResponse json.NewDecoder(resp.Body).Decode(&currentMatchResponse) if debug { fmt.Println(currentMatchResponse) fmt.Println(resp.Cookies()) } if resp.StatusCode != 200 || err != nil || currentMatchResponse.Credentials.Username != "Dawn" { t.Error("Expected a username") } } func TestCurrentMatchWithGame(t *testing.T) { if debug { fmt.Println("Test CurrentMatchWithGame") } black, white, blackName, _ := createMatch(serverMatch) sendMove(white, serverSync, 2, 1, 0, 2) sendMove(black, serverSync, 2, 6, 0, -2) resp, err := black.Get(serverSession.URL) if err != nil { t.Error(err) } defer resp.Body.Close() var currentMatchResponse gateway.SessionResponse json.NewDecoder(resp.Body).Decode(&currentMatchResponse) if debug { fmt.Println(currentMatchResponse) fmt.Println(currentMatchResponse.Match) fmt.Println(resp.Cookies()) } if resp.StatusCode != 200 || err != nil || currentMatchResponse.Credentials.Username != blackName || currentMatchResponse.Match.GameOver == true { t.Error("Expected cookies") } }
// Copyright 2018 Diego Bernardes. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flare import ( "context" "net/url" "time" "github.com/pkg/errors" ) // Resource represents the apis Flare track and the info to detect changes on documents. type Resource struct { ID string Endpoint url.URL Change ResourceChange CreatedAt time.Time } // ResourceChange holds the information to detect document change. type ResourceChange struct { Field string Format string } // Valid indicates if the current resourceChange is valid. func (rc *ResourceChange) Valid() error { if rc.Field == "" { return errors.New("missing field") } return nil } // ResourceRepositorier is used to interact with Resource repository. type ResourceRepositorier interface { Find(context.Context, *Pagination) ([]Resource, *Pagination, error) FindByID(context.Context, string) (*Resource, error) FindByURI(context.Context, url.URL) (*Resource, error) Partitions(ctx context.Context, id string) (partitions []string, err error) Create(context.Context, *Resource) error Delete(context.Context, string) error } // ResourceRepositoryError represents all the errors the repository can return. type ResourceRepositoryError interface { error AlreadyExists() bool NotFound() bool }
package component import ( "fmt" "gone/utils" ) /** * * Create BY YooDing * * Des: application console menus * * Time: 2019/7/5 8:11 PM. * * <a href="https://github.com/YooDing/gone">Github<a> */ var ( input string ) func Menus() { fmt.Println("\n 输入数字选择功能:\n") fmt.Println(" 1 - 安装JDK \n") fmt.Println(" 2 - 安装Tomcat \n") fmt.Println(" 3 - 加速算法 \n") fmt.Println(" 0 - 退出程序 \n") fmt.Print("选择功能: ") fmt.Scanln(&input) switch input { case "1": JDK() case "2": fmt.Println("tomcat") case "3": TCPA() default: fmt.Println(input) utils.Warning("请输入正确序号!") Menus() } }
/* Copyright IBM Corporation 2020 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cnb import ( "encoding/json" "errors" "fmt" "os" "os/exec" "path/filepath" "strings" "github.com/containers/skopeo/cmd/skopeo/inspect" ocispec "github.com/opencontainers/runtime-spec/specs-go" log "github.com/sirupsen/logrus" "github.com/konveyor/move2kube/internal/common" ) var ( // CNBContainersPath defines the location of the cnb container cache used by runc cnbContainersPath string = filepath.Join(common.AssetsPath, "cnb") runcImagesPath = filepath.Join(cnbContainersPath, "images") runcBundlesPath = filepath.Join(cnbContainersPath, "bundles") ) type runcProvider struct { } func (r *runcProvider) getAllBuildpacks(builders []string) (map[string][]string, error) { //[Containerization target option value] buildpacks buildpacks := make(map[string][]string) if !r.isAvailable() { return buildpacks, errors.New("Runc not supported in this instance") } log.Debugf("Getting data of all builders %s", builders) for _, builder := range builders { cmd := exec.Command("skopeo", "inspect", "docker://"+string(builder)) output, err := cmd.CombinedOutput() log.Debugf("Builder %s data :%s", builder, output) if err != nil { log.Warnf("Error while getting supported buildpacks for builder %s : %s", builder, err) continue } sio := inspect.Output{} err = json.Unmarshal(output, &sio) if err != nil { log.Warnf("Unable to seriablize inspect output for builder %s : %s", builder, err) continue } o, found := sio.Labels[orderLabel] if !found { log.Warnf("%s missing in builder %s : %s", orderLabel, builder, err) continue } buildpacks[builder] = getBuildersFromLabel(o) } return buildpacks, nil } func (r *runcProvider) isAvailable() bool { _, err := exec.LookPath("runc") if err != nil { log.Debugf("Unable to find runc, ignoring runc based cnb check : %s", err) return false } _, err = exec.LookPath("skopeo") if err != nil { log.Debugf("Unable to find skopeo, ignoring runc based cnb check : %s", err) return false } _, err = exec.LookPath("umoci") if err != nil { log.Debugf("Unable to find umoci, ignoring runc based cnb check : %s", err) return false } return true } func (r *runcProvider) isBuilderAvailable(builder string) bool { if !r.isAvailable() { return false } r.init([]string{builder}) image, _ := common.GetImageNameAndTag(builder) _, err := os.Stat(filepath.Join(runcBundlesPath, image)) if os.IsNotExist(err) { log.Debugf("Unable to find pack builder oci bundle, ignoring builder : %s", err) return false } return true } func (r *runcProvider) isBuilderSupported(path string, builder string) (bool, error) { if !r.isBuilderAvailable(builder) { return false, fmt.Errorf("Runc Builder image not available : %s", builder) } image, _ := common.GetImageNameAndTag(builder) ociimagespec := ocispec.Spec{} configfilepath := filepath.Join(runcBundlesPath, image, "config.json") err := common.ReadJSON(configfilepath, &ociimagespec) if err != nil { log.Errorf("Unable to read config for image %s : %s", builder, err) return false, err } mount := ocispec.Mount{} mount.Source, _ = filepath.Abs(path) mount.Destination = "/workspace" mount.Type = "bind" mount.Options = []string{"rbind", "ro"} found := false for i, m := range ociimagespec.Mounts { if m.Destination == mount.Destination { mounts := ociimagespec.Mounts mounts[i] = mount ociimagespec.Mounts = mounts found = true } } if !found { ociimagespec.Mounts = append(ociimagespec.Mounts, mount) } ociimagespec.Process.Args = []string{"/cnb/lifecycle/detector"} ociimagespec.Process.Terminal = false err = common.WriteJSON(configfilepath, ociimagespec) if err != nil { log.Errorf("Unable to write config json %s : %s", configfilepath, err) } //TODO: Check if two instances of runc can be spawned by two processes with same container name without errors cmd := exec.Command("runc", "run", "cnbbuilder") cmd.Dir = filepath.Join(runcBundlesPath, image) output, err := cmd.CombinedOutput() if err != nil { log.Debugf("Error while executing runc %+v at %s : %s, %s, %s", cmd, cmd.Dir, path, output, err) return false, err } if strings.Contains(string(output), "ERROR: No buildpack groups passed detection.") { log.Debugf("No compatible cnb for %s", path) return false, nil } return true, nil } func (r *runcProvider) init(builders []string) { if !r.isAvailable() { return } err := os.MkdirAll(runcImagesPath, common.DefaultDirectoryPermission) if err != nil { log.Debugf("Unable to create cnb directory ignoring runc based cnb check : %s", err) return } err = os.MkdirAll(runcBundlesPath, common.DefaultDirectoryPermission) if err != nil { log.Debugf("Unable to create cnb directory ignoring runc based cnb check : %s", err) return } for _, builder := range builders { image, tag := common.GetImageNameAndTag(builder) if _, err := os.Stat(filepath.Join(runcImagesPath, image)); !os.IsNotExist(err) { continue } skopeocmd := exec.Command("skopeo", "copy", "docker://"+builder, "oci:"+image+":"+tag) skopeocmd.Dir = runcImagesPath log.Debugf("Pulling %s", builder) output, err := skopeocmd.CombinedOutput() if err != nil { log.Debugf("Unable to copy image %s : %s, %s", image, err, output) continue } else { log.Debugf("Image pull done : %s", output) } fullbundlepath, err := filepath.Abs(filepath.Join(runcBundlesPath, image)) if err != nil { log.Errorf("Unable to resolve full path of directory %s : %s", fullbundlepath, err) } umocicmd := exec.Command("umoci", "unpack", "--image", image+":"+tag, fullbundlepath) umocicmd.Dir = runcImagesPath log.Debugf("Creating OCI image %s", builder) output, err = umocicmd.CombinedOutput() if err != nil { log.Debugf("Unable to copy image %s : %s, %s", image, err, output) continue } else { log.Debugf("Image extract done : %s", output) } } }
package service import ( "bufio" "bytes" "encoding/json" "mime" "net/http" "path/filepath" "strings" "time" "github.com/ONSdigital/florence/config" "github.com/ONSdigital/log.go/log" "github.com/gorilla/mux" ) // generated files constants const ( assetStaticRoot = "../dist/" assetLegacyIndex = "../dist/legacy-assets/index.html" assetRefactored = "../dist/refactored.html" ) type florenceLogEvent struct { ServerTimestamp string `json:"-"` ClientTimestamp time.Time `json:"clientTimestamp"` Type string `json:"type"` Location string `json:"location"` InstanceID string `json:"instanceID"` Payload interface{} `json:"payload"` } type florenceServerEvent struct { Type string `json:"type"` Payload interface{} `json:"payload"` } type florenceVersionPayload struct { Version string `json:"version"` } func redirectToFlorence(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, "/florence", 301) } func staticFiles(w http.ResponseWriter, req *http.Request) { path := mux.Vars(req)["uri"] assetPath := assetStaticRoot + path etag, err := getAssetETag(assetPath) if hdr := req.Header.Get("If-None-Match"); len(hdr) > 0 && hdr == etag { w.WriteHeader(http.StatusNotModified) return } b, err := getAsset(assetPath) if err != nil { log.Event(req.Context(), "error getting asset", log.ERROR, log.Error(err)) w.WriteHeader(404) return } w.Header().Set(`ETag`, etag) w.Header().Set(`Cache-Control`, "no-cache") w.Header().Set(`Content-Type`, mime.TypeByExtension(filepath.Ext(path))) w.WriteHeader(200) w.Write(b) } func legacyIndexFile(cfg *config.Config) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { log.Event(req.Context(), "getting legacy html file", log.INFO) b, err := getAsset(assetLegacyIndex) if err != nil { log.Event(req.Context(), "error getting legacy html file", log.ERROR, log.Error(err)) w.WriteHeader(404) return } cfgJSON, err := json.Marshal(cfg.SharedConfig) if err != nil { log.Event(req.Context(), "error marshalling shared configuration", log.ERROR, log.Error(err)) w.WriteHeader(500) return } b = []byte(strings.Replace(string(b), "/* environment variables placeholder */", "/* server generated shared config */ "+string(cfgJSON), 1)) w.Header().Set(`Content-Type`, "text/html") w.WriteHeader(200) w.Write(b) } } func websocketHandler(serviceVersion string) func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) { c, err := upgrader.Upgrade(w, req, nil) if err != nil { log.Event(req.Context(), "error upgrading connection to websocket", log.ERROR, log.Error(err)) return } defer c.Close() err = c.WriteJSON(florenceServerEvent{"version", florenceVersionPayload{Version: serviceVersion}}) if err != nil { log.Event(req.Context(), "error writing version message", log.ERROR, log.Error(err)) return } for { _, message, err := c.ReadMessage() if err != nil { log.Event(req.Context(), "error reading websocket message", log.ERROR, log.Error(err)) break } rdr := bufio.NewReader(bytes.NewReader(message)) b, err := rdr.ReadBytes('{') if err != nil { log.Event(req.Context(), "error reading websocket bytes", log.WARN, log.Error(err), log.Data{"bytes": string(b)}) continue } tags := strings.Split(string(b), ":") eventID := tags[0] eventType := tags[1] eventData := message[len(eventID)+len(eventType)+2:] switch eventType { case "log": var e florenceLogEvent e.ServerTimestamp = time.Now().UTC().Format("2006-01-02T15:04:05.000-0700Z") err = json.Unmarshal(eventData, &e) if err != nil { log.Event(req.Context(), "error unmarshalling websocket message", log.WARN, log.Error(err), log.Data{"data": string(eventData)}) continue } log.Event(req.Context(), "client log", log.INFO, log.Data{"data": e}) err = c.WriteJSON(florenceServerEvent{"ack", eventID}) if err != nil { log.Event(req.Context(), "error writing websocket ack", log.WARN, log.Error(err)) } default: log.Event(req.Context(), "unknown websocket event type", log.WARN, log.Data{"type": eventType, "data": string(eventData)}) } } } } func refactoredIndexFile(cfg *config.Config) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { log.Event(req.Context(), "getting refactored html file", log.INFO) b, err := getAsset(assetRefactored) if err != nil { log.Event(req.Context(), "error getting refactored html file", log.ERROR, log.Error(err)) w.WriteHeader(404) return } cfgJSON, err := json.Marshal(cfg.SharedConfig) if err != nil { log.Event(req.Context(), "error marshalling shared configuration", log.ERROR, log.Error(err)) w.WriteHeader(500) return } b = []byte(strings.Replace(string(b), "/* environment variables placeholder */", "/* server generated shared config */ "+string(cfgJSON), 1)) w.Header().Set(`Content-Type`, "text/html") w.WriteHeader(200) w.Write(b) } } func DeleteHttpCookie() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { c := &http.Cookie{ Name: "access_token", Value: "", Path: "/", Expires: time.Unix(0, 0), } http.SetCookie(w, c) w.WriteHeader(http.StatusAccepted) } }
package sqlite import ( "database/sql" "errors" _ "github.com/mattn/go-sqlite3" ) type ConnSqlite struct { } type ConnSqliteInterface interface { SqliteConnInit() *sql.DB AutoDropDB() error } func (sqliteConn *ConnSqlite) SqliteConnInit() *sql.DB { result, err := sql.Open("sqlite3", "./db/sqlite/pembukuan_db") if err != nil { panic(err) } return result } func (sqliteConn *ConnSqlite) AutoDropDB() error { conn := sqliteConn.SqliteConnInit() defer func() { if err := conn.Close(); err != nil { panic(err) } }() if conn == nil { return errors.New("connection failed to db") } // customers table if _, err := conn.Exec("drop table if exists customers"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `customers` (`id` INTEGER NOT NULL, `name` TEXT NOT NULL,`phone` TEXT NOT NULL UNIQUE,`email` TEXT UNIQUE,`address` TEXT NOT NULL,`created_at` TEXT NOT NULL,`updated_at` TEXT NOT NULL,`deleted_at` TEXT, PRIMARY KEY(`id`));"); err != nil { return err } } // user_types table if _, err := conn.Exec("drop table if exists user_types"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `user_types` (`id` INTEGER NOT NULL, `name` TEXT NOT NULL, `created_at` TEXT NOT NULL, `updated_at` TEXT NOT NULL, `deleted_at` TEXT, PRIMARY KEY(`id`));"); err != nil { return err } } // invoices table if _, err := conn.Exec("drop table if exists invoices"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `invoices` (`id` INTEGER NOT NULL,`customer_id` INTEGER NOT NULL,`user_id` INTEGER NOT NULL,`created_at` TEXT NOT NULL,`updated_at` TEXT NOT NULL,`deleted_at` TEXT,FOREIGN KEY(`user_id`) REFERENCES `users`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,FOREIGN KEY(`customer_id`) REFERENCES `customers`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,PRIMARY KEY(`id`));"); err != nil { return err } } // products table if _, err := conn.Exec("drop table if exists products"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `products` (`id` INTEGER NOT NULL,`name` TEXT NOT NULL,`price` TEXT NOT NULL,`created_at` TEXT NOT NULL,`updated_at` TEXT NOT NULL,`deleted_at` TEXT,PRIMARY KEY(`id`));"); err != nil { return err } } // product_decreases table if _, err := conn.Exec("drop table if exists product_decreases"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `product_decreases` (`id` INTEGER NOT NULL,`product_id` INTEGER NOT NULL,`quantity` INTEGER NOT NULL,`invoice_id` INTEGER NOT NULL,FOREIGN KEY(`product_id`) REFERENCES `products`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,FOREIGN KEY(`invoice_id`) REFERENCES `invoices`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,PRIMARY KEY(`id`));"); err != nil { return err } } // product_increases table if _, err := conn.Exec("drop table if exists product_increases"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `product_increases` (`id` INTEGER NOT NULL,`product_id` INTEGER NOT NULL,`quantity` INTEGER NOT NULL,`user_id` INTEGER NOT NULL,`created_at` TEXT NOT NULL,`updated_at` TEXT NOT NULL,`deleted_at` TEXT,FOREIGN KEY(`user_id`) REFERENCES `users`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,FOREIGN KEY(`product_id`) REFERENCES `products`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,PRIMARY KEY(`id`));"); err != nil { return err } } // users table if _, err := conn.Exec("drop table if exists users"); err != nil { return err } else { if _, err := conn.Exec("CREATE TABLE IF NOT EXISTS `users` (`id` INTEGER NOT NULL,`user_type_id` INTEGER NOT NULL,`username` TEXT NOT NULL UNIQUE,`password` TEXT NOT NULL,`created_at` TEXT NOT NULL,`updated_at`TEXT NOT NULL,`deleted_at` TEXT,FOREIGN KEY(`user_type_id`) REFERENCES `user_types`(`id`) ON UPDATE CASCADE ON DELETE CASCADE,PRIMARY KEY(`id`));"); err != nil { return err } else { } } // relationship if _, err := conn.Exec("CREATE INDEX IF NOT EXISTS `users_fkIdx_68` ON `users` (`user_type_id`);CREATE INDEX IF NOT EXISTS `product_increases_fkIdx_93` ON `product_increases` (`user_id`);CREATE INDEX IF NOT EXISTS `product_increases_fkIdx_88` ON `product_increases` (`product_id`);CREATE INDEX IF NOT EXISTS `product_decreases_fkIdx_95` ON `product_decreases` (`product_id`);CREATE INDEX IF NOT EXISTS `product_decreases_fkIdx_109` ON `product_decreases` (`invoice_id`);CREATE INDEX IF NOT EXISTS `invoices_fkIdx_96` ON `invoices` (`user_id`);CREATE INDEX IF NOT EXISTS `invoices_fkIdx_81` ON `invoices` (`customer_id`);"); err != nil { return err } return nil }
package service import ( "context" "fmt" "net/http" "github.com/go-ocf/cloud/cloud2cloud-gateway/store" "github.com/gorilla/mux" ) type retrieveDeviceSubscriptionHandler struct { s store.Subscription } func (c *retrieveDeviceSubscriptionHandler) Handle(ctx context.Context, iter store.SubscriptionIter) error { for iter.Next(ctx, &c.s) { return nil } return fmt.Errorf("not found") } func (rh *RequestHandler) retrieveDeviceSubscription(w http.ResponseWriter, r *http.Request) (int, error) { routeVars := mux.Vars(r) deviceID := routeVars[deviceIDKey] subscriptionID := routeVars[subscriptionIDKey] err := rh.IsAuthorized(r.Context(), r, deviceID) if err != nil { return http.StatusUnauthorized, err } res := retrieveDeviceSubscriptionHandler{} err = rh.store.LoadSubscriptions(r.Context(), store.SubscriptionQuery{SubscriptionID: subscriptionID}, &res) if err != nil { return http.StatusBadRequest, fmt.Errorf("cannot load subscription %v: %w", subscriptionID, err) } err = jsonResponseWriterEncoder(w, SubscriptionResponse{ SubscriptionID: subscriptionID, }, http.StatusOK) if err != nil { return http.StatusBadRequest, fmt.Errorf("cannot write response: %w", err) } return http.StatusOK, nil } func (rh *RequestHandler) RetrieveDeviceSubscription(w http.ResponseWriter, r *http.Request) { statusCode, err := rh.retrieveDeviceSubscription(w, r) if err != nil { logAndWriteErrorResponse(fmt.Errorf("cannot retrieve device subscription: %w", err), statusCode, w) } }
package main import ( "context" "fmt" "os" "github.com/libp2p/go-libp2p" circuit "github.com/libp2p/go-libp2p-circuit" quic "github.com/libp2p/go-libp2p-quic-transport" "github.com/libp2p/go-tcp-transport" ma "github.com/multiformats/go-multiaddr" ) func main() { publicIP := os.Getenv("RELAY_IP") factory := func(addrs []ma.Multiaddr) []ma.Multiaddr { if len(publicIP) != 0 { tcp := fmt.Sprintf("/ip4/%s/tcp/12001", publicIP) quic := fmt.Sprintf("/ip4/%s/udp/12001/quic", publicIP) return append(addrs, ma.StringCast(tcp), ma.StringCast(quic)) } return addrs } // A public relay server that supports TCP & QUIC and listens on port 12001 ctx := context.Background() h1, err := libp2p.New(ctx, libp2p.ForceReachabilityPublic(), libp2p.EnableRelay(circuit.OptHop), libp2p.AddrsFactory(factory), libp2p.Transport(tcp.NewTCPTransport), libp2p.Transport(quic.NewTransport), libp2p.ListenAddrs(ma.StringCast("/ip4/0.0.0.0/tcp/12001"), ma.StringCast("/ip4/0.0.0.0/udp/12001/quic"))) if err != nil { panic(err) } fmt.Println("\n relay server peerID: ", h1.ID().Pretty()) fmt.Println("\n relay server addresses:") for _, a := range h1.Addrs() { fmt.Println(a) } // Relay connections for { } }
package telehash import ( "flag" "fmt" "log" "net" "telehash/exchange" "telehash/telex" ) var ( port = flag.Int("port", 4242, "Specify the UDP port to listen on") ) func init() { flag.Parse() } func main() { exchange, err := listener.New((*port)) if err != nil { log.Fatal(err) } defer exchange.Close() for { msg, err := exchange.Read() if err != nil { log.Fatal(err) } fmt.Printf(msg) } }
package utils import "os" func GetConfig() string { environment := os.Getenv("ENV") if len(environment) == 0 { environment = "development" } return environment }
package keystone import ( "errors" "fmt" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/utils" ) func createIdentityV3Provider(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { client, err := openstack.NewClient(options.IdentityEndpoint) if err != nil { return nil, err } versions := []*utils.Version{ {ID: "v3.0", Priority: 30, Suffix: "/v3/"}, } chosen, _, err := utils.ChooseVersion(client, versions) if err != nil { return nil, fmt.Errorf("Unable to find identity API v3 version : %v", err) } switch chosen.ID { case "v3.0": return client, nil default: return nil, fmt.Errorf("Unsupported identity API version: %s", chosen.ID) } } func createKeystoneClient(authURL string) (*gophercloud.ServiceClient, error) { if authURL == "" { return nil, errors.New("Auth URL is empty") } opts := gophercloud.AuthOptions{IdentityEndpoint: authURL} provider, err := createIdentityV3Provider(opts) if err != nil { return nil, err } client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) if err != nil { return nil, errors.New("Failed to authenticate") } if err != nil { return nil, errors.New("Failed to authenticate") } client.IdentityBase = client.IdentityEndpoint client.Endpoint = client.IdentityEndpoint return client, nil } func NewKeystoneAuthenticator(authURL string) (*KeystoneAuthenticator, error) { client, err := createKeystoneClient(authURL) if err != nil { return nil, err } return &KeystoneAuthenticator{client: client}, nil }
package apicore import ( "testing" ) func TestAddMiddleware(t *testing.T) { input := map[string]int{"t1": 1, "t2": 4, "t3": 3, "t4": 2} want := []string{"t1", "t4", "t3"} for key, value := range input { AddMiddleware(func() MiddleWare { return t_middleware{name: key, index: value} }) } index := 0 for i, _ := range want { if name := middlewareMap[i].(t_middleware).name; name != want[index] { t.Log("want:" + want[index] + ",got:" + name) } index++ } } type t_middleware struct { name string index int } func (t t_middleware) Before(ctx Context) { if t.index == 3 { ctx.Break() } } func (t t_middleware) After(ctx Context) { panic("implement me") } func (t t_middleware) Index() int { return t.index }
/* * untangle.go * This is the main query handling code for the Untangle DNS filter proxy * We lookup the reputation and categories for inbound queries and then * consult the customer policy to make the allow or block decision. */ package untangle import ( "bufio" "context" "encoding/json" "fmt" "net" "time" "github.com/fsnotify/fsnotify" "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/request" "github.com/miekg/dns" "github.com/caddyserver/caddy" ) // Untangle allows CoreDNS to submit DNS queries to a filter // daemon and return a block address or allow normal processing type Untangle struct { Next plugin.Handler DaemonAddress string DaemonPort int } type Category struct { Catid int Conf int } type Response struct { Url string Reputation int Cats []Category A1cat bool Source string } // ServeDNS implements the plugin.Handler interface. func (ut Untangle) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { state := request.Request{W: w, Req: r} // we only care about queries with INET class if state.QClass() != dns.ClassINET { return plugin.NextOrFailure(ut.Name(), ut.Next, ctx, w, r) } // we only care about queries for A and AAAA records if state.QType() != dns.TypeA && state.QType() != dns.TypeAAAA { return plugin.NextOrFailure(ut.Name(), ut.Next, ctx, w, r) } log.Debugf("QUERY: name:%s client:%s\n", state.Name(), state.IP()) // pass the query name to the filterLookup function daemon := fmt.Sprintf("%s:%d", ut.DaemonAddress, ut.DaemonPort) filter := filterLookup(state.Name(), daemon) // if we get nothing from the filter we are done if filter == nil { return plugin.NextOrFailure(ut.Name(), ut.Next, ctx, w, r) } // pass the name, client, and policy result to the checkPolicy function // and get back the address of the block server or nil to allow blocker := checkPolicy(state.Name(), state.IP(), filter) // emtpy result from checkPolicy means we allow the query if len(blocker) == 0 { return plugin.NextOrFailure(ut.Name(), ut.Next, ctx, w, r) } // checkPolicy gave us a result so we need to block the query a := new(dns.Msg) a.SetReply(r) a.Authoritative = true var rr dns.RR if state.QType() == dns.TypeA { rr = new(dns.A) rr.(*dns.A).Hdr = dns.RR_Header{Name: state.QName(), Rrtype: dns.TypeA, Class: state.QClass()} rr.(*dns.A).A = net.ParseIP(blocker).To4() a.Answer = []dns.RR{rr} } if state.QType() == dns.TypeAAAA { rr = new(dns.AAAA) rr.(*dns.AAAA).Hdr = dns.RR_Header{Name: state.QName(), Rrtype: dns.TypeAAAA, Class: state.QClass()} rr.(*dns.AAAA).AAAA = net.ParseIP(blocker) a.Answer = []dns.RR{rr} } w.WriteMsg(a) return 0, nil } // Name implements the Handler interface. func (ut Untangle) Name() string { return "untangle" } func filterLookup(qname string, server string) *Response { var response []Response // connect to this socket conn, err := net.DialTimeout("tcp", server, time.Second) if err != nil { log.Errorf("Error connecting to daemon %s: %v\n", server, err) return nil } // make sure the socket is closed defer conn.Close() // send to socket command := fmt.Sprintf("{\"url/getinfo\":{\"urls\":[\"" + qname + "\"],\"a1cat\":1, \"reputation\":1}}" + "\r\n") log.Debugf("DAEMON COMMAND: %s\n", command) conn.Write([]byte(command)) // listen for reply message, err := bufio.NewReader(conn).ReadString('\n') if err != nil { log.Errorf("Error reading from daemon: %v\n", err) return nil } log.Debugf("DAEMON RESPONSE: %s\n", message) json.Unmarshal([]byte(message), &response) return &response[0] } func hook(event caddy.EventName, info interface{}) error { if event != caddy.InstanceStartupEvent { return nil } instance := info.(*caddy.Instance) // this should be an instance. ok to panic if not /* go func() { tick := time.NewTicker(10 * time.Second) for { select { case <-tick.C: corefile, err := caddy.LoadCaddyfile(instance.Caddyfile().ServerType()) if err != nil { continue } _, err = instance.Restart(corefile) if err != nil { log.Errorf("Corefile changed but reload failed: %s", err) continue } return } } }() */ // creates a new file watcher watcher, err := fsnotify.NewWatcher() if err != nil { fmt.Println("ERROR", err) } defer watcher.Close() // done := make(chan bool) // go func() { for { select { // watch for events case event := <-watcher.Events: fmt.Printf("EVENT! %#v\n", event) corefile, err := caddy.LoadCaddyfile(instance.Caddyfile().ServerType()) if err != nil { continue } _, err = instance.Restart(corefile) if err != nil { log.Errorf("Corefile changed but reload failed: %s", err) continue } // watch for errors case err := <-watcher.Errors: fmt.Println("ERROR", err) } } }() // out of the box fsnotify can watch a single file, or a single directory if err := watcher.Add("/etc/dnsproxy"); err != nil { fmt.Println("ERROR", err) } <-done return nil }
package main func main() { ch := make(chan int) go func(sc chan<- int) { for i := 0; i < 100; i++ { ch <- i } close(ch) }(ch) for v := range ch { println("Value:", v) } println("done") }
package main import ( "github.com/PuerkitoBio/goquery" "github.com/labstack/echo" "github.com/labstack/echo/engine" "github.com/labstack/echo/test" "github.com/stretchr/testify/assert" "net/url" "os" "strings" "testing" ) var server *echo.Echo var testUser string = "testUser" var testPW string = "testPW" func scrapeLoginTicket(path string) (string, *goquery.Document) { req := test.NewRequest(echo.GET, path, nil) res := test.NewResponseRecorder() server.ServeHTTP(req, res) doc, err := goquery.NewDocumentFromReader(res.Body) if err != nil { return "", doc } return doc.Find("input[name=lt]").AttrOr("value", ""), doc } func performLogin(ticket, service, username, password string) engine.Response { form := url.Values{ "lt": []string{ticket}, "username": []string{username}, "password": []string{password}, } if service != "" { form.Set("service", service) } req := test.NewRequest(echo.POST, "/login", strings.NewReader(form.Encode())) req.Header().Set("Content-Type", "application/x-www-form-urlencoded") res := test.NewResponseRecorder() server.ServeHTTP(req, res) return res.Response } func TestLoginRoutine(t *testing.T) { service := "https://myservice.com/auth/" // user is sent to CAS from webapp ticket, doc := scrapeLoginTicket("/login?service=" + service) assert.NotEmpty(t, ticket) assert.Equal(t, service, doc.Find("input[name=service]").AttrOr("value", "")) // user logs in successfully res := performLogin(ticket, service, testUser, testPW) assert.Equal(t, 302, res.Status()) assert.Contains(t, res.Header(), "Location") // CAS authenticates to webapp redirect, err := url.Parse(res.Header().Get("Location")) assert.NoError(t, err) assert.NotEmpty(t, redirect.Query().Get("ticket")) } func TestLoginRequestBase(t *testing.T) { // if service is not specified and session does not exist, SHOULD request credentials req := test.NewRequest(echo.GET, "/login", nil) res := test.NewResponseRecorder() server.ServeHTTP(req, res) // if service is not specified and session exists, SHOULD display "already logged in" } func TestLoginAccept(t *testing.T) { // fail: return to login as credential requestor // success (service specified): redirect to service with ticket in GET request ticket, _ := scrapeLoginTicket("/login") assert.NotEmpty(t, ticket) // res := performLogin(ticket, testUser, testPW) // assert.Equal(t, 302, ) // success (service not specified): display "successfully logged in" message } func TestLoginRequestRenew(t *testing.T) { } func TestMain(m *testing.M) { cas := NewCAS() createTestData("/tmp/casablanca-test.sqlite3", testUser, testPW, "testuser@email.test") backend, err := NewDatabaseBackend(map[string]interface{}{ "driver": "sqlite3", "connection": "/tmp/casablanca-test.sqlite3", "table": "users", "username_col": "username", "password_col": "password", "extra": map[string]interface{}{ "email": "email", }, }) if err != nil { panic(err) } cas.backends = append(cas.backends, backend) server = createServer(cas) server.SetDebug(true) result := m.Run() os.Remove("/tmp/casablanca-test.sqlite3") os.Exit(result) }
package exiftool import ( "bufio" "bytes" "fmt" "io" "os/exec" "sync" "github.com/pkg/errors" ) // Stayopen abstracts running exiftool with `-stay_open` to greatly improve // performance. Remember to call Stayopen.Stop() to signal exiftool to shutdown // to avoid zombie perl processes type Stayopen struct { sync.Mutex cmd *exec.Cmd // channels for passing data to the input/output of // the running exiftool in chan string out chan []byte // waits for stdin/stdout goroutines to finish when stopping waitEnd sync.WaitGroup } // Extract calls exiftool on the supplied filename func (e *Stayopen) Extract(filename string) (*Metadata, error) { e.Lock() defer e.Unlock() if e.cmd == nil { return nil, errors.New("Stopped") } // send it and wait for it to come back from exiftool e.in <- filename data := <-e.out return parse(data) } func (e *Stayopen) Stop() { e.Lock() defer e.Unlock() // closing the in channel will trigger a shutdown // wait for both goroutines to finish before finishing close(e.in) e.waitEnd.Wait() e.cmd = nil } func NewStayopen(exiftool string) (*Stayopen, error) { stayopen := &Stayopen{ in: make(chan string), out: make(chan []byte), } stayopen.cmd = exec.Command(exiftool, "-stay_open", "True", "-@", "-") stdin, _ := stayopen.cmd.StdinPipe() stdout, _ := stayopen.cmd.StdoutPipe() var startReady sync.WaitGroup startReady.Add(2) if err := stayopen.cmd.Start(); err != nil { return nil, errors.Wrap(err, "Failed starting exiftool in stay_open mode") } // send commands to exiftool's stdin go func() { startReady.Done() stayopen.waitEnd.Add(1) for filename := range stayopen.in { fmt.Fprintln(stdin, "-json") fmt.Fprintln(stdin, "-binary") fmt.Fprintln(stdin, "--printConv") fmt.Fprintln(stdin, "-groupHeadings") fmt.Fprintln(stdin, filename) fmt.Fprintln(stdin, "-execute") } // write message telling it to close // but don't actually wait for the command to stop fmt.Fprintln(stdin, "-stay_open") fmt.Fprintln(stdin, "False") fmt.Fprintln(stdin, "-execute") // closing stdout will stop the scanner goroutine stdout.Close() stayopen.waitEnd.Done() }() // scan exiftool's stdout, parse out JSON messages // and publish them on the out channel go func() { scanner := bufio.NewScanner(stdout) scanner.Split(splitReadyToken) startReady.Done() stayopen.waitEnd.Add(1) for scanner.Scan() { results := scanner.Bytes() sendResults := make([]byte, len(results), len(results)) copy(sendResults, results) stayopen.out <- sendResults } close(stayopen.out) stayopen.waitEnd.Done() }() // wait for both go-routines to startup startReady.Wait() return stayopen, nil } func splitReadyToken(data []byte, atEOF bool) (advance int, token []byte, err error) { if i := bytes.Index(data, []byte("\n{ready}")); i >= 0 { return i + 8, data[:i], nil } if atEOF { return len(data), data, io.EOF } return 0, nil, nil }
package cooker import ( "fmt" "github.com/ProfessorMc/Recipe/spoilers/appliance" "github.com/ProfessorMc/Recipe/spoilers/dish" "sync" "time" ) type HeatOMatic struct { hasPower bool isOn bool currentTemp float32 busy bool mtx sync.Mutex } func NewHeatOMatic() *HeatOMatic { newHeatOMatic := &HeatOMatic{ } return newHeatOMatic } func (h *HeatOMatic) SetPower(power bool) { h.hasPower = power } func (h *HeatOMatic) TurnOn() error { if !h.hasPower { return appliance.BuildApplianceError("Not Plugged In", h) } h.isOn = true return nil } func (h *HeatOMatic) TurnOff() error { h.isOn = false return nil } func (h HeatOMatic) IsOn() bool { return h.isOn } func (HeatOMatic) GetName() string { return "Heat-O-Matic" } func (HeatOMatic) GetBrand() string { return "Brandly" } func (h *HeatOMatic) CookDish(d *dish.Dish) error { var wg sync.WaitGroup wg.Add(1) var err error go func() { err = h.CookDishAsync(d, &wg) }() wg.Wait() return err } func (h *HeatOMatic) CookDishAsync(d *dish.Dish, wg *sync.WaitGroup) error { defer wg.Done() if !h.isOn { return appliance.BuildApplianceError("appliance isn't on", h) } h.mtx.Lock() defer h.mtx.Unlock() fmt.Printf("[Appliance %s] Handling Dish: %s\n", h.GetName(), d.String()) h.preheatOven(d.GetCookTemp()) fmt.Printf("[Appliance %s] Cooking Dish: %s\n", h.GetName(), d.String()) <- time.After(d.GetCookTime()) fmt.Printf("[Appliance %s] Dish Complete: %s\n", h.GetName(), d.String()) return nil } func (h *HeatOMatic) preheatOven(temp float32) { fmt.Printf("Preheating %s to %v degrees\n", h.GetName(), temp) <- time.After(5 * time.Second) }
package pain import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document01800101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:pain.018.001.01 Document"` Message *MandateSuspensionRequestV01 `xml:"MndtSspnsnReq"` } func (d *Document01800101) AddMessage() *MandateSuspensionRequestV01 { d.Message = new(MandateSuspensionRequestV01) return d.Message } // Scope // The MandateSuspensionRequest message is sent by the initiator of the request to its agent. The initiator can either be the debtor, debtor agent, creditor or creditor agent. // A MandateSuspensionRequest message is used to request the suspension of an existing mandate until the suspension is lifted. // Usage // The MandateSuspensionRequest message can contain one or more suspension requests. // The messages can be exchanged between creditor and creditor agent or debtor and debtor agent and between creditor agent and debtor agent. // The MandateSuspensionRequest message can be used in domestic and cross-border scenarios. // type MandateSuspensionRequestV01 struct { // Set of characteristics to identify the message and parties playing a role in the mandate suspension request, but which are not part of the mandate. GroupHeader *iso20022.GroupHeader47 `xml:"GrpHdr"` // Set of elements used to provide information on the suspension request of the mandate. UnderlyingSuspensionDetails []*iso20022.MandateSuspension1 `xml:"UndrlygSspnsnDtls"` // Additional information that cannot be captured in the structured elements and/or any other specific block. SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"` } func (m *MandateSuspensionRequestV01) AddGroupHeader() *iso20022.GroupHeader47 { m.GroupHeader = new(iso20022.GroupHeader47) return m.GroupHeader } func (m *MandateSuspensionRequestV01) AddUnderlyingSuspensionDetails() *iso20022.MandateSuspension1 { newValue := new(iso20022.MandateSuspension1) m.UnderlyingSuspensionDetails = append(m.UnderlyingSuspensionDetails, newValue) return newValue } func (m *MandateSuspensionRequestV01) AddSupplementaryData() *iso20022.SupplementaryData1 { newValue := new(iso20022.SupplementaryData1) m.SupplementaryData = append(m.SupplementaryData, newValue) return newValue }
// Copyright (C) 2019 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sysprop import ( "reflect" "android/soong/android" "android/soong/cc" "android/soong/java" "io/ioutil" "os" "strings" "testing" "github.com/google/blueprint" "github.com/google/blueprint/proptools" ) var buildDir string func setUp() { var err error buildDir, err = ioutil.TempDir("", "soong_sysprop_test") if err != nil { panic(err) } } func tearDown() { os.RemoveAll(buildDir) } func TestMain(m *testing.M) { run := func() int { setUp() defer tearDown() return m.Run() } os.Exit(run()) } func testContext(config android.Config) *android.TestContext { ctx := android.NewTestArchContext() java.RegisterJavaBuildComponents(ctx) java.RegisterAppBuildComponents(ctx) java.RegisterSystemModulesBuildComponents(ctx) ctx.PreArchMutators(android.RegisterDefaultsPreArchMutators) ctx.PreArchMutators(func(ctx android.RegisterMutatorsContext) { ctx.BottomUp("sysprop_deps", syspropDepsMutator).Parallel() }) cc.RegisterRequiredBuildComponentsForTest(ctx) ctx.PreDepsMutators(func(ctx android.RegisterMutatorsContext) { ctx.BottomUp("sysprop_java", java.SyspropMutator).Parallel() }) ctx.RegisterModuleType("sysprop_library", syspropLibraryFactory) ctx.Register(config) return ctx } func run(t *testing.T, ctx *android.TestContext, config android.Config) { t.Helper() _, errs := ctx.ParseFileList(".", []string{"Android.bp"}) android.FailIfErrored(t, errs) _, errs = ctx.PrepareBuildActions(config) android.FailIfErrored(t, errs) } func testConfig(env map[string]string, bp string, fs map[string][]byte) android.Config { bp += cc.GatherRequiredDepsForTest(android.Android) mockFS := map[string][]byte{ "a.java": nil, "b.java": nil, "c.java": nil, "d.cpp": nil, "api/sysprop-platform-current.txt": nil, "api/sysprop-platform-latest.txt": nil, "api/sysprop-platform-on-product-current.txt": nil, "api/sysprop-platform-on-product-latest.txt": nil, "api/sysprop-vendor-current.txt": nil, "api/sysprop-vendor-latest.txt": nil, "api/sysprop-odm-current.txt": nil, "api/sysprop-odm-latest.txt": nil, "framework/aidl/a.aidl": nil, // For framework-res, which is an implicit dependency for framework "AndroidManifest.xml": nil, "build/make/target/product/security/testkey": nil, "build/soong/scripts/jar-wrapper.sh": nil, "build/make/core/proguard.flags": nil, "build/make/core/proguard_basic_keeps.flags": nil, "jdk8/jre/lib/jce.jar": nil, "jdk8/jre/lib/rt.jar": nil, "jdk8/lib/tools.jar": nil, "bar-doc/a.java": nil, "bar-doc/b.java": nil, "bar-doc/IFoo.aidl": nil, "bar-doc/known_oj_tags.txt": nil, "external/doclava/templates-sdk": nil, "cert/new_cert.x509.pem": nil, "cert/new_cert.pk8": nil, "android/sysprop/PlatformProperties.sysprop": nil, "com/android/VendorProperties.sysprop": nil, "com/android2/OdmProperties.sysprop": nil, } for k, v := range fs { mockFS[k] = v } config := java.TestConfig(buildDir, env, bp, mockFS) config.TestProductVariables.DeviceSystemSdkVersions = []string{"28"} config.TestProductVariables.DeviceVndkVersion = proptools.StringPtr("current") config.TestProductVariables.Platform_vndk_version = proptools.StringPtr("VER") return config } func test(t *testing.T, bp string) *android.TestContext { t.Helper() config := testConfig(nil, bp, nil) ctx := testContext(config) run(t, ctx, config) return ctx } func TestSyspropLibrary(t *testing.T) { ctx := test(t, ` sysprop_library { name: "sysprop-platform", apex_available: ["//apex_available:platform"], srcs: ["android/sysprop/PlatformProperties.sysprop"], api_packages: ["android.sysprop"], property_owner: "Platform", vendor_available: true, host_supported: true, } sysprop_library { name: "sysprop-platform-on-product", srcs: ["android/sysprop/PlatformProperties.sysprop"], api_packages: ["android.sysprop"], property_owner: "Platform", product_specific: true, } sysprop_library { name: "sysprop-vendor", srcs: ["com/android/VendorProperties.sysprop"], api_packages: ["com.android"], property_owner: "Vendor", product_specific: true, vendor_available: true, } sysprop_library { name: "sysprop-odm", srcs: ["com/android2/OdmProperties.sysprop"], api_packages: ["com.android2"], property_owner: "Odm", device_specific: true, } java_library { name: "java-platform", srcs: ["c.java"], sdk_version: "system_current", libs: ["sysprop-platform"], } java_library { name: "java-platform-private", srcs: ["c.java"], platform_apis: true, libs: ["sysprop-platform"], } java_library { name: "java-product", srcs: ["c.java"], sdk_version: "system_current", product_specific: true, libs: ["sysprop-platform", "sysprop-vendor"], } java_library { name: "java-vendor", srcs: ["c.java"], sdk_version: "system_current", soc_specific: true, libs: ["sysprop-platform", "sysprop-vendor"], } cc_library { name: "cc-client-platform", srcs: ["d.cpp"], static_libs: ["sysprop-platform"], } cc_library_static { name: "cc-client-platform-static", srcs: ["d.cpp"], whole_static_libs: ["sysprop-platform"], } cc_library { name: "cc-client-product", srcs: ["d.cpp"], product_specific: true, static_libs: ["sysprop-platform-on-product", "sysprop-vendor"], } cc_library { name: "cc-client-vendor", srcs: ["d.cpp"], soc_specific: true, static_libs: ["sysprop-platform", "sysprop-vendor"], } cc_library { name: "libbase", host_supported: true, } cc_library_headers { name: "libbase_headers", vendor_available: true, recovery_available: true, } cc_library { name: "liblog", no_libcrt: true, nocrt: true, system_shared_libs: [], recovery_available: true, host_supported: true, } cc_binary_host { name: "hostbin", static_libs: ["sysprop-platform"], } llndk_library { name: "liblog", symbol_file: "", } java_library { name: "sysprop-library-stub-platform", sdk_version: "core_current", } java_library { name: "sysprop-library-stub-vendor", soc_specific: true, sdk_version: "core_current", } `) // Check for generated cc_library for _, variant := range []string{ "android_vendor.VER_arm_armv7-a-neon_shared", "android_vendor.VER_arm_armv7-a-neon_static", "android_vendor.VER_arm64_armv8-a_shared", "android_vendor.VER_arm64_armv8-a_static", } { ctx.ModuleForTests("libsysprop-platform", variant) ctx.ModuleForTests("libsysprop-vendor", variant) ctx.ModuleForTests("libsysprop-odm", variant) } for _, variant := range []string{ "android_arm_armv7-a-neon_shared", "android_arm_armv7-a-neon_static", "android_arm64_armv8-a_shared", "android_arm64_armv8-a_static", } { library := ctx.ModuleForTests("libsysprop-platform", variant).Module().(*cc.Module) expectedApexAvailableOnLibrary := []string{"//apex_available:platform"} if !reflect.DeepEqual(library.ApexProperties.Apex_available, expectedApexAvailableOnLibrary) { t.Errorf("apex available property on libsysprop-platform must be %#v, but was %#v.", expectedApexAvailableOnLibrary, library.ApexProperties.Apex_available) } // core variant of vendor-owned sysprop_library is for product ctx.ModuleForTests("libsysprop-vendor", variant) } ctx.ModuleForTests("sysprop-platform", "android_common") ctx.ModuleForTests("sysprop-platform_public", "android_common") ctx.ModuleForTests("sysprop-vendor", "android_common") // Check for exported includes coreVariant := "android_arm64_armv8-a_static" vendorVariant := "android_vendor.VER_arm64_armv8-a_static" platformInternalPath := "libsysprop-platform/android_arm64_armv8-a_static/gen/sysprop/include" platformPublicCorePath := "libsysprop-platform/android_arm64_armv8-a_static/gen/sysprop/public/include" platformPublicVendorPath := "libsysprop-platform/android_vendor.VER_arm64_armv8-a_static/gen/sysprop/public/include" platformOnProductPath := "libsysprop-platform-on-product/android_arm64_armv8-a_static/gen/sysprop/public/include" vendorInternalPath := "libsysprop-vendor/android_vendor.VER_arm64_armv8-a_static/gen/sysprop/include" vendorPublicPath := "libsysprop-vendor/android_arm64_armv8-a_static/gen/sysprop/public/include" platformClient := ctx.ModuleForTests("cc-client-platform", coreVariant) platformFlags := platformClient.Rule("cc").Args["cFlags"] // platform should use platform's internal header if !strings.Contains(platformFlags, platformInternalPath) { t.Errorf("flags for platform must contain %#v, but was %#v.", platformInternalPath, platformFlags) } platformStaticClient := ctx.ModuleForTests("cc-client-platform-static", coreVariant) platformStaticFlags := platformStaticClient.Rule("cc").Args["cFlags"] // platform-static should use platform's internal header if !strings.Contains(platformStaticFlags, platformInternalPath) { t.Errorf("flags for platform-static must contain %#v, but was %#v.", platformInternalPath, platformStaticFlags) } productClient := ctx.ModuleForTests("cc-client-product", coreVariant) productFlags := productClient.Rule("cc").Args["cFlags"] // Product should use platform's and vendor's public headers if !strings.Contains(productFlags, platformOnProductPath) || !strings.Contains(productFlags, vendorPublicPath) { t.Errorf("flags for product must contain %#v and %#v, but was %#v.", platformPublicCorePath, vendorPublicPath, productFlags) } vendorClient := ctx.ModuleForTests("cc-client-vendor", vendorVariant) vendorFlags := vendorClient.Rule("cc").Args["cFlags"] // Vendor should use platform's public header and vendor's internal header if !strings.Contains(vendorFlags, platformPublicVendorPath) || !strings.Contains(vendorFlags, vendorInternalPath) { t.Errorf("flags for vendor must contain %#v and %#v, but was %#v.", platformPublicVendorPath, vendorInternalPath, vendorFlags) } // Java modules linking against system API should use public stub javaSystemApiClient := ctx.ModuleForTests("java-platform", "android_common") publicStubFound := false ctx.VisitDirectDeps(javaSystemApiClient.Module(), func(dep blueprint.Module) { if dep.Name() == "sysprop-platform_public" { publicStubFound = true } }) if !publicStubFound { t.Errorf("system api client should use public stub") } }
package irc import ( "strings" "time" "github.com/goshuirc/irc-go/ircmsg" "github.com/goshuirc/irc-go/ircutils" "awesome-dragon.science/go/goGoGameBot/pkg/event" "awesome-dragon.science/go/goGoGameBot/pkg/util" ) // RawEvent represents an incoming raw IRC Line that needs to be handled type RawEvent struct { event.BaseEvent Line ircmsg.IrcMessage Time time.Time } // CommandIs returns whether or not the command on the Line contained in the RawEvent matches any of the passed command // names func (r *RawEvent) CommandIs(names ...string) bool { for _, n := range names { if n == r.Line.Command { return true } } return false } // NewRawEvent creates a RawEvent with the given name and Line func NewRawEvent(name string, line ircmsg.IrcMessage, tme time.Time) *RawEvent { return &RawEvent{event.BaseEvent{Name_: strings.ToUpper(name)}, line, tme} } // MessageEvent represents an IRC authUser message, both NOTICE and PRIVMSGs type MessageEvent struct { *RawEvent IsNotice bool Source ircutils.UserHost Channel string Message string } // NewMessageEvent creates a MessageEvent with the given data. func NewMessageEvent(name string, line ircmsg.IrcMessage, tme time.Time) *MessageEvent { return &MessageEvent{ NewRawEvent(name, line, tme), line.Command == "NOTICE", ircutils.ParseUserhost(line.Prefix), util.IdxOrEmpty(line.Params, 0), util.IdxOrEmpty(line.Params, 1), } } // JoinEvent represents an IRC channel JOIN type JoinEvent struct { *RawEvent Source ircutils.UserHost Channel string } // NewJoinEvent creates a JoinEvent with the given data func NewJoinEvent(name string, line ircmsg.IrcMessage, tme time.Time) *JoinEvent { return &JoinEvent{ NewRawEvent(name, line, tme), ircutils.ParseUserhost(line.Prefix), util.IdxOrEmpty(line.Params, 0), } } // PartEvent represents an IRC channel PART type PartEvent struct { *JoinEvent Message string } // NewPartEvent creates a PartEvent with the given data func NewPartEvent(name string, line ircmsg.IrcMessage, tme time.Time) *PartEvent { return &PartEvent{ JoinEvent: NewJoinEvent(name, line, tme), Message: util.IdxOrEmpty(line.Params, 1), } } // QuitEvent represents an IRC QUIT type QuitEvent struct { *RawEvent Source ircutils.UserHost Message string } // NewQuitEvent creates a QuitEvent from the given data func NewQuitEvent(name string, line ircmsg.IrcMessage, tme time.Time) *QuitEvent { return &QuitEvent{ RawEvent: NewRawEvent(name, line, tme), Source: ircutils.ParseUserhost(line.Prefix), Message: util.IdxOrEmpty(line.Params, 0), } } // NickEvent represents an IRC NICK command type NickEvent struct { *RawEvent Source ircutils.UserHost NewNick string } // NewNickEvent creates a NickEvent from the given data func NewNickEvent(name string, line ircmsg.IrcMessage, tme time.Time) *NickEvent { return &NickEvent{ RawEvent: NewRawEvent(name, line, tme), Source: ircutils.ParseUserhost(line.Prefix), NewNick: util.IdxOrEmpty(line.Params, 0), } } // KickEvent represents a channel KICK type KickEvent struct { *RawEvent Source ircutils.UserHost Channel string KickedNick string Message string } // NewKickEvent creates a KickEvent from the given data func NewKickEvent(name string, line ircmsg.IrcMessage, tme time.Time) *KickEvent { return &KickEvent{ RawEvent: NewRawEvent(name, line, tme), Source: ircutils.ParseUserhost(line.Prefix), Channel: util.IdxOrEmpty(line.Params, 0), KickedNick: util.IdxOrEmpty(line.Params, 1), Message: util.IdxOrEmpty(line.Params, 2), } }
package main import "fmt" type OffsetWidget int func (widget OffsetWidget) sizeForLayout(layout Layout) Size { if layout.pressure > 3 { return Size{0, 0} } height := 2 width := 20 if layout.show_date { height = 4 } return Size{width, height} } func (widget OffsetWidget) drawAtPoint(tab *DataTab, layout Layout, point Point, style Style) Size { if layout.pressure > 3 { return Size{0, 0} } fg := style.default_fg bg := style.default_bg cursor := tab.cursor y_pos := point.y x_pos := point.x width := 20 if tab.edit_mode == EditingSearch || tab.is_searching { x_pos += drawStringAtPoint("Search(/)", point.x, y_pos, fg, bg) if tab.is_searching { drawStringAtPoint(tab.prev_search, x_pos+2, y_pos, fg, bg) } } else if cursor.hex_mode { last_pos := len(tab.bytes) - 1 last_pos_len := len(fmt.Sprintf("%x", last_pos)) drawStringAtPoint(fmt.Sprintf("Offset(:) 0x%0[2]*[1]x", cursor.pos, last_pos_len), point.x, y_pos, fg, bg) } else { drawStringAtPoint(fmt.Sprintf("Offset(:) %d", cursor.pos), point.x, y_pos, fg, bg) } y_pos++ x_pos = point.x if tab.is_searching { x_pos += drawStringAtPoint("[", x_pos, y_pos, fg, bg) eighths := [...]string{ " ", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█", } fifty_sixths := int(7 * 8 * tab.search_progress) if fifty_sixths < 2 { drawStringAtPoint(fmt.Sprintf("%2.2f%% ", 100*tab.search_progress), x_pos+1, y_pos, style.space_rune_fg, bg) } for i := 0; i < 7; i++ { if fifty_sixths >= 8*(i+1) { drawStringAtPoint(eighths[8], x_pos+i, y_pos, style.search_progress_fg, bg) } else if fifty_sixths > 8*i { drawStringAtPoint(eighths[fifty_sixths-8*i], x_pos+i, y_pos, style.search_progress_fg, bg) } } x_pos += 7 x_pos += drawStringAtPoint("]", x_pos, y_pos, fg, bg) drawStringAtPoint("<Esc> cancel", x_pos+2, y_pos, fg, bg) } else { drawStringAtPoint(fmt.Sprintf(" Type : %s", cursor.c_type()), point.x, y_pos, fg, bg) } if layout.show_date { y_pos++ y_pos++ if cursor.mode == FloatingPointMode || cursor.mode == IntegerMode { fg = style.default_fg } else { fg = style.space_rune_fg } epoch_string := fmt.Sprintf(" Epoch(@) %s", cursor.epoch_time.Format("1/2/2006")) drawStringAtPoint(epoch_string, point.x, y_pos, fg, bg) } return Size{width, y_pos - point.y + 1} }
// Copyright 2021 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containerpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/container/container_go_proto" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/container" ) // Server implements the gRPC interface for NodePool. type NodePoolServer struct{} // ProtoToNodePoolConfigSandboxConfigTypeEnum converts a NodePoolConfigSandboxConfigTypeEnum enum from its proto representation. func ProtoToContainerNodePoolConfigSandboxConfigTypeEnum(e containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum) *container.NodePoolConfigSandboxConfigTypeEnum { if e == 0 { return nil } if n, ok := containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum_name[int32(e)]; ok { e := container.NodePoolConfigSandboxConfigTypeEnum(n[len("ContainerNodePoolConfigSandboxConfigTypeEnum"):]) return &e } return nil } // ProtoToNodePoolConfigReservationAffinityConsumeReservationTypeEnum converts a NodePoolConfigReservationAffinityConsumeReservationTypeEnum enum from its proto representation. func ProtoToContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum(e containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum) *container.NodePoolConfigReservationAffinityConsumeReservationTypeEnum { if e == 0 { return nil } if n, ok := containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum_name[int32(e)]; ok { e := container.NodePoolConfigReservationAffinityConsumeReservationTypeEnum(n[len("ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum"):]) return &e } return nil } // ProtoToNodePoolConditionsCodeEnum converts a NodePoolConditionsCodeEnum enum from its proto representation. func ProtoToContainerNodePoolConditionsCodeEnum(e containerpb.ContainerNodePoolConditionsCodeEnum) *container.NodePoolConditionsCodeEnum { if e == 0 { return nil } if n, ok := containerpb.ContainerNodePoolConditionsCodeEnum_name[int32(e)]; ok { e := container.NodePoolConditionsCodeEnum(n[len("ContainerNodePoolConditionsCodeEnum"):]) return &e } return nil } // ProtoToNodePoolConfig converts a NodePoolConfig resource from its proto representation. func ProtoToContainerNodePoolConfig(p *containerpb.ContainerNodePoolConfig) *container.NodePoolConfig { if p == nil { return nil } obj := &container.NodePoolConfig{ MachineType: dcl.StringOrNil(p.MachineType), DiskSizeGb: dcl.Int64OrNil(p.DiskSizeGb), ServiceAccount: dcl.StringOrNil(p.ServiceAccount), ImageType: dcl.StringOrNil(p.ImageType), LocalSsdCount: dcl.Int64OrNil(p.LocalSsdCount), Preemptible: dcl.Bool(p.Preemptible), DiskType: dcl.StringOrNil(p.DiskType), MinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform), SandboxConfig: ProtoToContainerNodePoolConfigSandboxConfig(p.GetSandboxConfig()), ReservationAffinity: ProtoToContainerNodePoolConfigReservationAffinity(p.GetReservationAffinity()), ShieldedInstanceConfig: ProtoToContainerNodePoolConfigShieldedInstanceConfig(p.GetShieldedInstanceConfig()), } for _, r := range p.GetOauthScopes() { obj.OAuthScopes = append(obj.OAuthScopes, r) } for _, r := range p.GetTags() { obj.Tags = append(obj.Tags, r) } for _, r := range p.GetAccelerators() { obj.Accelerators = append(obj.Accelerators, *ProtoToContainerNodePoolConfigAccelerators(r)) } for _, r := range p.GetTaints() { obj.Taints = append(obj.Taints, *ProtoToContainerNodePoolConfigTaints(r)) } return obj } // ProtoToNodePoolConfigAccelerators converts a NodePoolConfigAccelerators resource from its proto representation. func ProtoToContainerNodePoolConfigAccelerators(p *containerpb.ContainerNodePoolConfigAccelerators) *container.NodePoolConfigAccelerators { if p == nil { return nil } obj := &container.NodePoolConfigAccelerators{ AcceleratorCount: dcl.Int64OrNil(p.AcceleratorCount), AcceleratorType: dcl.StringOrNil(p.AcceleratorType), } return obj } // ProtoToNodePoolConfigTaints converts a NodePoolConfigTaints resource from its proto representation. func ProtoToContainerNodePoolConfigTaints(p *containerpb.ContainerNodePoolConfigTaints) *container.NodePoolConfigTaints { if p == nil { return nil } obj := &container.NodePoolConfigTaints{ Key: dcl.StringOrNil(p.Key), Value: dcl.StringOrNil(p.Value), Effect: dcl.StringOrNil(p.Effect), } return obj } // ProtoToNodePoolConfigSandboxConfig converts a NodePoolConfigSandboxConfig resource from its proto representation. func ProtoToContainerNodePoolConfigSandboxConfig(p *containerpb.ContainerNodePoolConfigSandboxConfig) *container.NodePoolConfigSandboxConfig { if p == nil { return nil } obj := &container.NodePoolConfigSandboxConfig{ Type: ProtoToContainerNodePoolConfigSandboxConfigTypeEnum(p.GetType()), } return obj } // ProtoToNodePoolConfigReservationAffinity converts a NodePoolConfigReservationAffinity resource from its proto representation. func ProtoToContainerNodePoolConfigReservationAffinity(p *containerpb.ContainerNodePoolConfigReservationAffinity) *container.NodePoolConfigReservationAffinity { if p == nil { return nil } obj := &container.NodePoolConfigReservationAffinity{ ConsumeReservationType: ProtoToContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum(p.GetConsumeReservationType()), Key: dcl.StringOrNil(p.Key), } for _, r := range p.GetValues() { obj.Values = append(obj.Values, r) } return obj } // ProtoToNodePoolConfigShieldedInstanceConfig converts a NodePoolConfigShieldedInstanceConfig resource from its proto representation. func ProtoToContainerNodePoolConfigShieldedInstanceConfig(p *containerpb.ContainerNodePoolConfigShieldedInstanceConfig) *container.NodePoolConfigShieldedInstanceConfig { if p == nil { return nil } obj := &container.NodePoolConfigShieldedInstanceConfig{ EnableSecureBoot: dcl.Bool(p.EnableSecureBoot), EnableIntegrityMonitoring: dcl.Bool(p.EnableIntegrityMonitoring), } return obj } // ProtoToNodePoolAutoscaling converts a NodePoolAutoscaling resource from its proto representation. func ProtoToContainerNodePoolAutoscaling(p *containerpb.ContainerNodePoolAutoscaling) *container.NodePoolAutoscaling { if p == nil { return nil } obj := &container.NodePoolAutoscaling{ Enabled: dcl.Bool(p.Enabled), MinNodeCount: dcl.Int64OrNil(p.MinNodeCount), MaxNodeCount: dcl.Int64OrNil(p.MaxNodeCount), Autoprovisioned: dcl.Bool(p.Autoprovisioned), } return obj } // ProtoToNodePoolManagement converts a NodePoolManagement resource from its proto representation. func ProtoToContainerNodePoolManagement(p *containerpb.ContainerNodePoolManagement) *container.NodePoolManagement { if p == nil { return nil } obj := &container.NodePoolManagement{ AutoUpgrade: dcl.Bool(p.AutoUpgrade), AutoRepair: dcl.Bool(p.AutoRepair), UpgradeOptions: ProtoToContainerNodePoolManagementUpgradeOptions(p.GetUpgradeOptions()), } return obj } // ProtoToNodePoolManagementUpgradeOptions converts a NodePoolManagementUpgradeOptions resource from its proto representation. func ProtoToContainerNodePoolManagementUpgradeOptions(p *containerpb.ContainerNodePoolManagementUpgradeOptions) *container.NodePoolManagementUpgradeOptions { if p == nil { return nil } obj := &container.NodePoolManagementUpgradeOptions{ AutoUpgradeStartTime: dcl.StringOrNil(p.GetAutoUpgradeStartTime()), Description: dcl.StringOrNil(p.Description), } return obj } // ProtoToNodePoolMaxPodsConstraint converts a NodePoolMaxPodsConstraint resource from its proto representation. func ProtoToContainerNodePoolMaxPodsConstraint(p *containerpb.ContainerNodePoolMaxPodsConstraint) *container.NodePoolMaxPodsConstraint { if p == nil { return nil } obj := &container.NodePoolMaxPodsConstraint{ MaxPodsPerNode: dcl.Int64OrNil(p.MaxPodsPerNode), } return obj } // ProtoToNodePoolConditions converts a NodePoolConditions resource from its proto representation. func ProtoToContainerNodePoolConditions(p *containerpb.ContainerNodePoolConditions) *container.NodePoolConditions { if p == nil { return nil } obj := &container.NodePoolConditions{ Code: ProtoToContainerNodePoolConditionsCodeEnum(p.GetCode()), Message: dcl.StringOrNil(p.Message), } return obj } // ProtoToNodePoolUpgradeSettings converts a NodePoolUpgradeSettings resource from its proto representation. func ProtoToContainerNodePoolUpgradeSettings(p *containerpb.ContainerNodePoolUpgradeSettings) *container.NodePoolUpgradeSettings { if p == nil { return nil } obj := &container.NodePoolUpgradeSettings{ MaxSurge: dcl.Int64OrNil(p.MaxSurge), MaxUnavailable: dcl.Int64OrNil(p.MaxUnavailable), } return obj } // ProtoToNodePool converts a NodePool resource from its proto representation. func ProtoToNodePool(p *containerpb.ContainerNodePool) *container.NodePool { obj := &container.NodePool{ Name: dcl.StringOrNil(p.Name), Config: ProtoToContainerNodePoolConfig(p.GetConfig()), NodeCount: dcl.Int64OrNil(p.NodeCount), Version: dcl.StringOrNil(p.Version), Status: dcl.StringOrNil(p.Status), StatusMessage: dcl.StringOrNil(p.StatusMessage), Autoscaling: ProtoToContainerNodePoolAutoscaling(p.GetAutoscaling()), Management: ProtoToContainerNodePoolManagement(p.GetManagement()), MaxPodsConstraint: ProtoToContainerNodePoolMaxPodsConstraint(p.GetMaxPodsConstraint()), PodIPv4CidrSize: dcl.Int64OrNil(p.PodIpv4CidrSize), UpgradeSettings: ProtoToContainerNodePoolUpgradeSettings(p.GetUpgradeSettings()), Cluster: dcl.StringOrNil(p.Cluster), Project: dcl.StringOrNil(p.Project), Location: dcl.StringOrNil(p.Location), } for _, r := range p.GetLocations() { obj.Locations = append(obj.Locations, r) } for _, r := range p.GetConditions() { obj.Conditions = append(obj.Conditions, *ProtoToContainerNodePoolConditions(r)) } return obj } // NodePoolConfigSandboxConfigTypeEnumToProto converts a NodePoolConfigSandboxConfigTypeEnum enum to its proto representation. func ContainerNodePoolConfigSandboxConfigTypeEnumToProto(e *container.NodePoolConfigSandboxConfigTypeEnum) containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum { if e == nil { return containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum(0) } if v, ok := containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum_value["NodePoolConfigSandboxConfigTypeEnum"+string(*e)]; ok { return containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum(v) } return containerpb.ContainerNodePoolConfigSandboxConfigTypeEnum(0) } // NodePoolConfigReservationAffinityConsumeReservationTypeEnumToProto converts a NodePoolConfigReservationAffinityConsumeReservationTypeEnum enum to its proto representation. func ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnumToProto(e *container.NodePoolConfigReservationAffinityConsumeReservationTypeEnum) containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum { if e == nil { return containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum(0) } if v, ok := containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum_value["NodePoolConfigReservationAffinityConsumeReservationTypeEnum"+string(*e)]; ok { return containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum(v) } return containerpb.ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnum(0) } // NodePoolConditionsCodeEnumToProto converts a NodePoolConditionsCodeEnum enum to its proto representation. func ContainerNodePoolConditionsCodeEnumToProto(e *container.NodePoolConditionsCodeEnum) containerpb.ContainerNodePoolConditionsCodeEnum { if e == nil { return containerpb.ContainerNodePoolConditionsCodeEnum(0) } if v, ok := containerpb.ContainerNodePoolConditionsCodeEnum_value["NodePoolConditionsCodeEnum"+string(*e)]; ok { return containerpb.ContainerNodePoolConditionsCodeEnum(v) } return containerpb.ContainerNodePoolConditionsCodeEnum(0) } // NodePoolConfigToProto converts a NodePoolConfig resource to its proto representation. func ContainerNodePoolConfigToProto(o *container.NodePoolConfig) *containerpb.ContainerNodePoolConfig { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfig{ MachineType: dcl.ValueOrEmptyString(o.MachineType), DiskSizeGb: dcl.ValueOrEmptyInt64(o.DiskSizeGb), ServiceAccount: dcl.ValueOrEmptyString(o.ServiceAccount), ImageType: dcl.ValueOrEmptyString(o.ImageType), LocalSsdCount: dcl.ValueOrEmptyInt64(o.LocalSsdCount), Preemptible: dcl.ValueOrEmptyBool(o.Preemptible), DiskType: dcl.ValueOrEmptyString(o.DiskType), MinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform), SandboxConfig: ContainerNodePoolConfigSandboxConfigToProto(o.SandboxConfig), ReservationAffinity: ContainerNodePoolConfigReservationAffinityToProto(o.ReservationAffinity), ShieldedInstanceConfig: ContainerNodePoolConfigShieldedInstanceConfigToProto(o.ShieldedInstanceConfig), } for _, r := range o.OAuthScopes { p.OauthScopes = append(p.OauthScopes, r) } p.Metadata = make(map[string]string) for k, r := range o.Metadata { p.Metadata[k] = r } p.Labels = make(map[string]string) for k, r := range o.Labels { p.Labels[k] = r } for _, r := range o.Tags { p.Tags = append(p.Tags, r) } for _, r := range o.Accelerators { p.Accelerators = append(p.Accelerators, ContainerNodePoolConfigAcceleratorsToProto(&r)) } for _, r := range o.Taints { p.Taints = append(p.Taints, ContainerNodePoolConfigTaintsToProto(&r)) } return p } // NodePoolConfigAcceleratorsToProto converts a NodePoolConfigAccelerators resource to its proto representation. func ContainerNodePoolConfigAcceleratorsToProto(o *container.NodePoolConfigAccelerators) *containerpb.ContainerNodePoolConfigAccelerators { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfigAccelerators{ AcceleratorCount: dcl.ValueOrEmptyInt64(o.AcceleratorCount), AcceleratorType: dcl.ValueOrEmptyString(o.AcceleratorType), } return p } // NodePoolConfigTaintsToProto converts a NodePoolConfigTaints resource to its proto representation. func ContainerNodePoolConfigTaintsToProto(o *container.NodePoolConfigTaints) *containerpb.ContainerNodePoolConfigTaints { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfigTaints{ Key: dcl.ValueOrEmptyString(o.Key), Value: dcl.ValueOrEmptyString(o.Value), Effect: dcl.ValueOrEmptyString(o.Effect), } return p } // NodePoolConfigSandboxConfigToProto converts a NodePoolConfigSandboxConfig resource to its proto representation. func ContainerNodePoolConfigSandboxConfigToProto(o *container.NodePoolConfigSandboxConfig) *containerpb.ContainerNodePoolConfigSandboxConfig { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfigSandboxConfig{ Type: ContainerNodePoolConfigSandboxConfigTypeEnumToProto(o.Type), } return p } // NodePoolConfigReservationAffinityToProto converts a NodePoolConfigReservationAffinity resource to its proto representation. func ContainerNodePoolConfigReservationAffinityToProto(o *container.NodePoolConfigReservationAffinity) *containerpb.ContainerNodePoolConfigReservationAffinity { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfigReservationAffinity{ ConsumeReservationType: ContainerNodePoolConfigReservationAffinityConsumeReservationTypeEnumToProto(o.ConsumeReservationType), Key: dcl.ValueOrEmptyString(o.Key), } for _, r := range o.Values { p.Values = append(p.Values, r) } return p } // NodePoolConfigShieldedInstanceConfigToProto converts a NodePoolConfigShieldedInstanceConfig resource to its proto representation. func ContainerNodePoolConfigShieldedInstanceConfigToProto(o *container.NodePoolConfigShieldedInstanceConfig) *containerpb.ContainerNodePoolConfigShieldedInstanceConfig { if o == nil { return nil } p := &containerpb.ContainerNodePoolConfigShieldedInstanceConfig{ EnableSecureBoot: dcl.ValueOrEmptyBool(o.EnableSecureBoot), EnableIntegrityMonitoring: dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring), } return p } // NodePoolAutoscalingToProto converts a NodePoolAutoscaling resource to its proto representation. func ContainerNodePoolAutoscalingToProto(o *container.NodePoolAutoscaling) *containerpb.ContainerNodePoolAutoscaling { if o == nil { return nil } p := &containerpb.ContainerNodePoolAutoscaling{ Enabled: dcl.ValueOrEmptyBool(o.Enabled), MinNodeCount: dcl.ValueOrEmptyInt64(o.MinNodeCount), MaxNodeCount: dcl.ValueOrEmptyInt64(o.MaxNodeCount), Autoprovisioned: dcl.ValueOrEmptyBool(o.Autoprovisioned), } return p } // NodePoolManagementToProto converts a NodePoolManagement resource to its proto representation. func ContainerNodePoolManagementToProto(o *container.NodePoolManagement) *containerpb.ContainerNodePoolManagement { if o == nil { return nil } p := &containerpb.ContainerNodePoolManagement{ AutoUpgrade: dcl.ValueOrEmptyBool(o.AutoUpgrade), AutoRepair: dcl.ValueOrEmptyBool(o.AutoRepair), UpgradeOptions: ContainerNodePoolManagementUpgradeOptionsToProto(o.UpgradeOptions), } return p } // NodePoolManagementUpgradeOptionsToProto converts a NodePoolManagementUpgradeOptions resource to its proto representation. func ContainerNodePoolManagementUpgradeOptionsToProto(o *container.NodePoolManagementUpgradeOptions) *containerpb.ContainerNodePoolManagementUpgradeOptions { if o == nil { return nil } p := &containerpb.ContainerNodePoolManagementUpgradeOptions{ AutoUpgradeStartTime: dcl.ValueOrEmptyString(o.AutoUpgradeStartTime), Description: dcl.ValueOrEmptyString(o.Description), } return p } // NodePoolMaxPodsConstraintToProto converts a NodePoolMaxPodsConstraint resource to its proto representation. func ContainerNodePoolMaxPodsConstraintToProto(o *container.NodePoolMaxPodsConstraint) *containerpb.ContainerNodePoolMaxPodsConstraint { if o == nil { return nil } p := &containerpb.ContainerNodePoolMaxPodsConstraint{ MaxPodsPerNode: dcl.ValueOrEmptyInt64(o.MaxPodsPerNode), } return p } // NodePoolConditionsToProto converts a NodePoolConditions resource to its proto representation. func ContainerNodePoolConditionsToProto(o *container.NodePoolConditions) *containerpb.ContainerNodePoolConditions { if o == nil { return nil } p := &containerpb.ContainerNodePoolConditions{ Code: ContainerNodePoolConditionsCodeEnumToProto(o.Code), Message: dcl.ValueOrEmptyString(o.Message), } return p } // NodePoolUpgradeSettingsToProto converts a NodePoolUpgradeSettings resource to its proto representation. func ContainerNodePoolUpgradeSettingsToProto(o *container.NodePoolUpgradeSettings) *containerpb.ContainerNodePoolUpgradeSettings { if o == nil { return nil } p := &containerpb.ContainerNodePoolUpgradeSettings{ MaxSurge: dcl.ValueOrEmptyInt64(o.MaxSurge), MaxUnavailable: dcl.ValueOrEmptyInt64(o.MaxUnavailable), } return p } // NodePoolToProto converts a NodePool resource to its proto representation. func NodePoolToProto(resource *container.NodePool) *containerpb.ContainerNodePool { p := &containerpb.ContainerNodePool{ Name: dcl.ValueOrEmptyString(resource.Name), Config: ContainerNodePoolConfigToProto(resource.Config), NodeCount: dcl.ValueOrEmptyInt64(resource.NodeCount), Version: dcl.ValueOrEmptyString(resource.Version), Status: dcl.ValueOrEmptyString(resource.Status), StatusMessage: dcl.ValueOrEmptyString(resource.StatusMessage), Autoscaling: ContainerNodePoolAutoscalingToProto(resource.Autoscaling), Management: ContainerNodePoolManagementToProto(resource.Management), MaxPodsConstraint: ContainerNodePoolMaxPodsConstraintToProto(resource.MaxPodsConstraint), PodIpv4CidrSize: dcl.ValueOrEmptyInt64(resource.PodIPv4CidrSize), UpgradeSettings: ContainerNodePoolUpgradeSettingsToProto(resource.UpgradeSettings), Cluster: dcl.ValueOrEmptyString(resource.Cluster), Project: dcl.ValueOrEmptyString(resource.Project), Location: dcl.ValueOrEmptyString(resource.Location), } for _, r := range resource.Locations { p.Locations = append(p.Locations, r) } for _, r := range resource.Conditions { p.Conditions = append(p.Conditions, ContainerNodePoolConditionsToProto(&r)) } return p } // ApplyNodePool handles the gRPC request by passing it to the underlying NodePool Apply() method. func (s *NodePoolServer) applyNodePool(ctx context.Context, c *container.Client, request *containerpb.ApplyContainerNodePoolRequest) (*containerpb.ContainerNodePool, error) { p := ProtoToNodePool(request.GetResource()) res, err := c.ApplyNodePool(ctx, p) if err != nil { return nil, err } r := NodePoolToProto(res) return r, nil } // ApplyNodePool handles the gRPC request by passing it to the underlying NodePool Apply() method. func (s *NodePoolServer) ApplyContainerNodePool(ctx context.Context, request *containerpb.ApplyContainerNodePoolRequest) (*containerpb.ContainerNodePool, error) { cl, err := createConfigNodePool(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return s.applyNodePool(ctx, cl, request) } // DeleteNodePool handles the gRPC request by passing it to the underlying NodePool Delete() method. func (s *NodePoolServer) DeleteContainerNodePool(ctx context.Context, request *containerpb.DeleteContainerNodePoolRequest) (*emptypb.Empty, error) { cl, err := createConfigNodePool(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteNodePool(ctx, ProtoToNodePool(request.GetResource())) } // ListContainerNodePool handles the gRPC request by passing it to the underlying NodePoolList() method. func (s *NodePoolServer) ListContainerNodePool(ctx context.Context, request *containerpb.ListContainerNodePoolRequest) (*containerpb.ListContainerNodePoolResponse, error) { cl, err := createConfigNodePool(ctx, request.ServiceAccountFile) if err != nil { return nil, err } resources, err := cl.ListNodePool(ctx, request.Project, request.Location, request.Cluster) if err != nil { return nil, err } var protos []*containerpb.ContainerNodePool for _, r := range resources.Items { rp := NodePoolToProto(r) protos = append(protos, rp) } return &containerpb.ListContainerNodePoolResponse{Items: protos}, nil } func createConfigNodePool(ctx context.Context, service_account_file string) (*container.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return container.NewClient(conf), nil }
// Copyright 2015-2018 trivago N.V. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package format import ( "gollum/core" ) // Override formatter // // This formatter sets a given value to a metadata field or payload. // // Examples // // This example sets the value "foo" on the key "bar". // // exampleConsumer: // Type: consumer.Console // Streams: stdin // Modulators: // - format.Override // Target: bar // Value: "foo" type Override struct { core.SimpleFormatter `gollumdoc:"embed_type"` value string `config:"Value"` } func init() { core.TypeRegistry.Register(Override{}) } // Configure initializes this formatter with values from a plugin config. func (format *Override) Configure(conf core.PluginConfigReader) { } // ApplyFormatter update message payload func (format *Override) ApplyFormatter(msg *core.Message) error { format.SetTargetData(msg, format.value) return nil }
package raw_client import "context" type DeletePreviewAppFormFieldsRequest struct { App string `json:"app"` Fields []string `json:"fields"` Revision string `json:"revision,omitempty"` } type DeletePreviewAppFormFieldsResponse struct { Revision string `json:"revision"` } func DeletePreviewAppFormFields(ctx context.Context, apiClient *ApiClient, req DeletePreviewAppFormFieldsRequest) (*DeletePreviewAppFormFieldsResponse, error) { apiRequest := ApiRequest{ Method: "DELETE", Scheme: "https", Path: "/k/v1/preview/app/form/fields.json", Json: req, } var postAppResponse DeletePreviewAppFormFieldsResponse if err := apiClient.Call(ctx, apiRequest, &postAppResponse); err != nil { return nil, err } return &postAppResponse, nil }
package main import ( "fmt" "os" "github.com/shirou/gopsutil/process" ) var ps *process.Process func men(n int) { if ps == nil { p, err := process.NewProcess(int32(os.Getpid())) if err != nil { panic(err) } ps = p } m, _ := ps.MemoryInfoEx() fmt.Printf("%d. VMS: %d MB, RSS: %d MB\n", n, m.VMS>>20, m.RSS>>20) } func main() { men(1) data := new([10][1024 * 1024]byte) men(2) for i := range data { for x, n := 0, len(data[i]); x < n; x++ { data[i][x] = 1 } men(3) } }
package components import ( "encoding/json" "net/http" "time" "github.com/dgrijalva/jwt-go" ) type jsonError struct { Error string `json:"error"` } //JSONError Helper function to return restful errors func JSONError(response http.ResponseWriter, errorString string, statusCode int) { errorJSONString, _ := json.Marshal(jsonError{Error: errorString}) response.WriteHeader(statusCode) response.Write([]byte(errorJSONString)) } //GenerateJWTToken Generates the authenticaton token (JWT) func GenerateJWTToken(subject string, extraTime time.Duration) (tokenString string, err error) { JWTToken := jwt.New(jwt.SigningMethodHS256) JWTToken.Claims["iat"] = time.Now() JWTToken.Claims["exp"] = time.Now().Add(extraTime) JWTToken.Claims["sub"] = subject return JWTToken.SignedString([]byte(JWTSecret)) }
package main import ( "fmt" ) //DECLARE that the variable with the IDENTIFIER "Z" is od TYPE int var z = 42 func main() { fmt.Println(z) }
// // Copyright (c) 2017 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package virtcontainers import ( "fmt" ) // This is the no proxy implementation of the proxy interface. This // is a generic implementation for any case (basically any agent), // where no actual proxy is needed. This happens when the combination // of the VM and the agent can handle multiple connections without // additional component to handle the multiplexing. Both the runtime // and the shim will connect to the agent through the VM, bypassing // the proxy model. // That's why this implementation is very generic, and all it does // is to provide both shim and runtime the correct URL to connect // directly to the VM. type noProxy struct { } // start is noProxy start implementation for proxy interface. func (p *noProxy) start(pod Pod, params proxyParams) (int, string, error) { if params.agentURL == "" { return -1, "", fmt.Errorf("AgentURL cannot be empty") } return 0, params.agentURL, nil } // stop is noProxy stop implementation for proxy interface. func (p *noProxy) stop(pod Pod, pid int) error { return nil }
package leetcode // HasDuplicates checks whether the slice has duplicates func HasDuplicates(numbers []int) bool { hash := map[int]int{} for i := 0; i < len(numbers); i++ { if _, ok := hash[numbers[i]]; !ok { hash[numbers[i]] = hash[numbers[i]] + 1 } else { return true } } return false }
package bitmask type Bitmask uint32 func (f Bitmask) HasFlag(flag Bitmask) bool { return f&flag != 0 } func (f *Bitmask) AddFlag(flag Bitmask) { *f |= flag } func (f *Bitmask) ClearFlag(flag Bitmask) { *f &= ^flag } func (f *Bitmask) ToggleFlag(flag Bitmask) { *f ^= flag }
package leetcode import ( "bytes" "container/list" "fmt" ) //TODO 自实现双向链表 type kv struct { k, v int } // LRUCache 最近最少使用缓存 type LRUCache struct { hash map[int]*list.Element data *list.List len int cap int } // Constructor 初始化 func Constructor(capacity int) LRUCache { return LRUCache{hash: map[int]*list.Element{}, data: list.New(), cap: capacity} } // Get 获取元素 func (l *LRUCache) Get(key int) int { if ele, exist := l.hash[key]; exist { l.data.MoveToFront(ele) l.hash[key] = l.data.Front() return ele.Value.(kv).v } return -1 } // Put 插入元素 func (l *LRUCache) Put(key int, value int) { if ele, exist := l.hash[key]; exist { ele.Value = kv{key, value} l.data.MoveToFront(ele) return } // 删除最久 if l.len == l.cap { back := l.data.Back() delete(l.hash, back.Value.(kv).k) l.data.Remove(back) l.len-- } // 插入 l.hash[key] = l.data.PushFront(kv{key, value}) l.len++ return } func (l LRUCache) String() string { var buffer bytes.Buffer ele := l.data.Front() for ele != nil { buffer.WriteString(fmt.Sprintf("%v->", ele.Value)) ele = ele.Next() } return buffer.String() }
package suites import ( "testing" "github.com/go-rod/rod" ) func (rs *RodSession) verifyMailNotificationDisplayed(t *testing.T, page *rod.Page) { rs.verifyNotificationDisplayed(t, page, "An email has been sent to your address to complete the process.") }
package nsmanager_test import( "testing" "manager/nsmanager" //"stockdb" //"fmt" ) func Test_NSMfgPmiManager_Process(t *testing.T){ m := nsmanager.NewNSMfgPmiManager() m.Process() }
package service import ( "gopetstore/src/domain" "gopetstore/src/persistence" "log" "sync" ) const orderNum = "ordernum" // get order by order id func GetOrderByOrderId(orderId int) (*domain.Order, error) { o, err := persistence.GetOrderByOrderId(orderId) if err != nil { return nil, err } o.LineItems, err = persistence.GetLineItemsByOrderId(orderId) if err != nil { return nil, err } for _, li := range o.LineItems { item, err := persistence.GetItem(li.ItemId) if err != nil { log.Printf("service GetOrderByOrderId GetItem error: %v", err.Error()) continue } item.Quantity, err = persistence.GetInventoryQuantity(li.ItemId) if err != nil { log.Printf("service GetOrderByOrderId GetInventoryQuantity error: %v", err.Error()) continue } li.Item = item li.CalculateTotal() } return o, nil } // get all orders by user name func GetOrdersByUserName(userName string) ([]*domain.Order, error) { return persistence.GetOrdersByUserName(userName) } // insert order func InsertOrder(o *domain.Order) error { orderId, err := getNextId(orderNum) if err != nil { return err } o.OrderId = orderId return persistence.InsertOrder(o) } // update the sequence and next id func getNextId(name string) (int, error) { // 在并发场景下,这里需要锁 var mutex sync.Mutex mutex.Lock() defer mutex.Unlock() s, err := persistence.GetSequence(name) if err != nil { return -1, err } s.NextId++ err = persistence.UpdateSequence(s) if err != nil { return -1, err } return s.NextId, nil }
package main func test() { var a int defer func() { if p := recover(); p != nil { a = 1111 } }() panic(2222) print(a) } func main() { test() }
package status import ( "encoding/json" "io/ioutil" "os" "path/filepath" "regexp" "strings" "github.com/devspace-cloud/devspace/pkg/util/factory" "github.com/devspace-cloud/devspace/pkg/util/log" "github.com/devspace-cloud/devspace/pkg/util/message" "github.com/pkg/errors" "github.com/spf13/cobra" ) var syncStopped = regexp.MustCompile(`^\[Sync\] Sync stopped$`) var downstreamChanges = regexp.MustCompile(`^\[Downstream\] Successfully processed (\d+) change\(s\)$`) var upstreamChanges = regexp.MustCompile(`^\[Upstream\] Successfully processed (\d+) change\(s\)$`) type syncStatus struct { Level string Message string } type syncCmd struct{} func newSyncCmd(f factory.Factory) *cobra.Command { cmd := &syncCmd{} return &cobra.Command{ Use: "sync", Short: "Shows the sync status", Long: ` ####################################################### ################ devspace status sync ################# ####################################################### Shows the sync status ####################################################### `, Args: cobra.NoArgs, RunE: func(cobraCmd *cobra.Command, args []string) error { return cmd.RunStatusSync(f, cobraCmd, args) }} } // RunStatusSync executes the devspace status sync commad logic func (cmd *syncCmd) RunStatusSync(f factory.Factory, cobraCmd *cobra.Command, args []string) error { // Set config root logger := f.GetLog() configLoader := f.NewConfigLoader(nil, logger) configExists, err := configLoader.SetDevSpaceRoot() if err != nil { return err } if !configExists { return errors.New(message.ConfigNotFound) } // Read syncLog cwd, err := os.Getwd() if err != nil { return err } syncLogPath := filepath.Join(cwd, ".devspace", "logs", "sync.log") data, err := ioutil.ReadFile(syncLogPath) if err != nil { return errors.Errorf("Couldn't read %s. Do you have a sync path configured? (check `devspace list sync`)", syncLogPath) } // Prepare table header := []string{ "Level", "Message", "Time", } values := [][]string{} lines := strings.Split(string(data), "\n") for _, line := range lines { if line == "" { continue } jsonMap := make(map[string]string) err = json.Unmarshal([]byte(line), &jsonMap) if err != nil { return err } if isSyncJSONMapInvalid(jsonMap) { return errors.Errorf("Error parsing %s: Json object is invalid %s", syncLogPath, line) } values = append(values, []string{jsonMap["level"], jsonMap["msg"], jsonMap["time"]}) } if len(values) == 0 { logger.Info("No sync activity found. Did you run `devspace dev`?") return nil } log.PrintTable(logger, header, values) return nil } func isSyncJSONMapInvalid(jsonMap map[string]string) bool { return jsonMap["level"] == "" || jsonMap["time"] == "" || jsonMap["msg"] == "" }
package fastsort import ( . "leetcode_notes/utils/linkedlist" ) // func LinkedListFastSort1(l *IntListNode) *IntListNode { head := l linkedListFastSort1(head, nil) return head } func linkedListFastSort1(head, end *IntListNode) { if head == nil || head == end { return } p := head.Next // pointer for run small := head for p != end { if p.Val < head.Val { small = small.Next small.Val, p.Val = p.Val, small.Val } p = p.Next } head.Val, small.Val = small.Val, head.Val linkedListFastSort1(head, small) linkedListFastSort1(small.Next, end) }
package medianheap_test import ( . "math" "math/rand" "reflect" "sort" "testing" "time" . "github.com/pietv/medianheap" ) var Tests = []struct { name string in []int want []int }{ {"1", []int{0}, []int{0}}, {"2", []int{MaxInt32}, []int{MaxInt32}}, {"3", []int{MinInt32}, []int{MinInt32}}, {"4", []int{0, 1}, []int{0, 0}}, {"5", []int{-1, 2}, []int{-1, -1}}, {"6", []int{2, -1}, []int{2, -1}}, {"7", []int{2, 1}, []int{2, 1}}, {"8", []int{2, 2}, []int{2, 2}}, {"9", []int{MaxInt32, MinInt32}, []int{MaxInt32, MinInt32}}, {"10", []int{MinInt32, MaxInt32}, []int{MinInt32, MinInt32}}, {"11", []int{MinInt32, 0}, []int{MinInt32, MinInt32}}, {"12", []int{0, MinInt32}, []int{0, MinInt32}}, {"13", []int{0, MaxInt32}, []int{0, 0}}, {"14", []int{MinInt32, 0}, []int{MinInt32, MinInt32}}, {"15", []int{MaxInt32, 0}, []int{MaxInt32, 0}}, {"16", []int{1, 2, 3, 4, 5}, []int{1, 1, 2, 2, 3}}, {"17", []int{5, 4, 3, 2, 1}, []int{5, 4, 4, 3, 3}}, {"18", []int{2, 4, 5, 3, 1}, []int{2, 2, 4, 3, 3}}, {"19", []int{20, 40, 50, 30, 10}, []int{20, 20, 40, 30, 30}}, {"20", []int{0, 0, 0, 0, 1}, []int{0, 0, 0, 0, 0}}, {"21", []int{0, 0, 0, 1, 1}, []int{0, 0, 0, 0, 0}}, {"22", []int{0, 0, 1, 1, 1}, []int{0, 0, 0, 0, 1}}, {"23", []int{0, 1, 1, 1, 1}, []int{0, 0, 1, 1, 1}}, {"24", []int{1, 0, 0, 0, 0}, []int{1, 0, 0, 0, 0}}, {"25", []int{1, 1, 0, 0, 0}, []int{1, 1, 1, 0, 0}}, {"26", []int{1, 1, 1, 0, 0}, []int{1, 1, 1, 1, 1}}, {"27", []int{0, 0, MaxInt32, 0, 0}, []int{0, 0, 0, 0, 0}}, {"28", []int{MaxInt32, MinInt32, 0, MaxInt32, 0}, []int{MaxInt32, MinInt32, 0, 0, 0}}, {"29", []int{MinInt32, 0, MinInt32, MinInt32, 0}, []int{MinInt32, MinInt32, MinInt32, MinInt32, MinInt32}}, {"30", []int{0, 0, 0, MaxInt32, MaxInt32}, []int{0, 0, 0, 0, 0}}, {"31", []int{0, 0, MaxInt32, MinInt32, MaxInt32}, []int{0, 0, 0, 0, 0}}, } func init() { rand.Seed(time.Now().UnixNano()) } func checkAdd(a []int) []int { h := New() out := make([]int, 0) for _, elem := range a { h.Add(elem) out = append(out, h.Median()) } return out } func checkUpdate(a []int) []int { h := New() out := make([]int, 0) for _, elem := range a { out = append(out, h.Update(elem)) } return out } func checkReference(a []int) []int { median := func(a []int) int { b := make([]int, len(a)) copy(b, a) sort.Ints(b) if len(b)%2 == 0 { return b[(len(b)-1)/2] } else { return b[len(b)/2] } } out := make([]int, 0) for i := 1; i <= len(a); i++ { out = append(out, median(a[:i])) } return out } func genRandomIntSlice(size int) []int { seq := make([]int, 0) N := rand.Intn(size) for i := 0; i < N; i++ { // Generate both positive and negative integers. seq = append(seq, rand.Int()*(-1*rand.Intn(2))) } return seq } func TestMedianWithoutAdd(t *testing.T) { defer func() { if r := recover(); r == nil { t.Errorf("median didn't panic when expected to") } }() h := New() _ = h.Median() } func TestMultipleCalls(t *testing.T) { h := New() h.Add(-1) if h.Median() != h.Median() { t.Errorf("consecutive calls don't yield the same result") } } func TestAdd(t *testing.T) { for _, test := range Tests { if actual := checkAdd(test.in); reflect.DeepEqual(actual, test.want) != true { t.Errorf("%q; got %v, want %v", test.name, actual, test.want) } } } func TestUpdate(t *testing.T) { for _, test := range Tests { if actual := checkUpdate(test.in); reflect.DeepEqual(actual, test.want) != true { t.Errorf("%q: got %v, want %v", test.name, actual, test.want) } } } func TestRandomAdd10(t *testing.T) { for i := 0; i < 10; i++ { a := genRandomIntSlice(256) ref, actual := checkReference(a), checkAdd(a) if reflect.DeepEqual(ref, actual) != true { t.Errorf("got %v, want %v", actual, ref) } } } func TestRandomUpdate10(t *testing.T) { for i := 0; i < 10; i++ { a := genRandomIntSlice(256) ref, actual := checkReference(a), checkUpdate(a) if reflect.DeepEqual(ref, actual) != true { t.Errorf("got %v, want %v", actual, ref) } } } func BenchmarkManyAddsOneMedian(b *testing.B) { b.StopTimer() h := New() for i := 0; i < b.N; i++ { n := rand.Int() b.StartTimer() h.Add(n) b.StopTimer() } b.StartTimer() _ = h.Median() b.StopTimer() } func BenchmarkManyAddsManyMedians(b *testing.B) { b.StopTimer() h := New() for i := 0; i < b.N; i++ { n := rand.Int() b.StartTimer() h.Add(n) _ = h.Median() b.StopTimer() } }
//go:build !fast // +build !fast // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package helpers const ( // SSHKeySize is the size (in bytes) of SSH key to create SSHKeySize = 4096 // DefaultPkiKeySize is the default size in bytes of the PKI key DefaultPkiKeySize = 4096 )
// Package fakefs contains fake implementations of interfaces from package io/fs // from the standard library. // // It is recommended to fill all methods that shouldn't be called with: // // panic("not implemented") // // in the body of the test, so that if the method is called the panic backtrace // points to the method definition in the test. See the package example. package fakefs import "io/fs" // File is the [fs.File] for tests. type File struct { OnClose func() error OnRead func(b []byte) (n int, err error) OnStat func() (fi fs.FileInfo, err error) } // type check var _ fs.File = (*File)(nil) // Close implements the [fs.File] interface for *File. func (f *File) Close() (err error) { return f.OnClose() } // Read implements the [fs.File] interface for *File. func (f *File) Read(b []byte) (n int, err error) { return f.OnRead(b) } // Stat implements the [fs.File] interface for *File. func (f *File) Stat() (fi fs.FileInfo, err error) { return f.OnStat() } // FS is the [fs.FS] for tests. type FS struct { OnOpen func(name string) (fs.File, error) } // type check var _ fs.FS = (*FS)(nil) // Open implements the [fs.FS] interface for *FS. func (fsys *FS) Open(name string) (f fs.File, err error) { return fsys.OnOpen(name) } // type check var _ fs.GlobFS = (*GlobFS)(nil) // GlobFS is the [fs.GlobFS] for tests. type GlobFS struct { OnOpen func(name string) (f fs.File, err error) OnGlob func(pattern string) (paths []string, err error) } // Open implements the [fs.GlobFS] interface for *GlobFS. func (fsys *GlobFS) Open(name string) (f fs.File, err error) { return fsys.OnOpen(name) } // Glob implements the [fs.GlobFS] interface for *GlobFS. func (fsys *GlobFS) Glob(pattern string) (paths []string, err error) { return fsys.OnGlob(pattern) } // type check var _ fs.StatFS = (*StatFS)(nil) // StatFS is the [fs.StatFS] for tests. type StatFS struct { OnOpen func(name string) (f fs.File, err error) OnStat func(name string) (fi fs.FileInfo, err error) } // Open implements the [fs.StatFS] interface for *StatFS. func (fsys *StatFS) Open(name string) (f fs.File, err error) { return fsys.OnOpen(name) } // Stat implements the [fs.StatFS] interface for *StatFS. func (fsys *StatFS) Stat(name string) (fi fs.FileInfo, err error) { return fsys.OnStat(name) }
package ionic import ( "bytes" "encoding/json" "fmt" "github.com/ion-channel/ionic/pagination" "net/url" "time" "github.com/ion-channel/ionic/community" "github.com/ion-channel/ionic/dependencies" "github.com/ion-channel/ionic/products" "github.com/ion-channel/ionic/responses" "github.com/ion-channel/ionic/searches" ) const ( searchEndpoint = "v1/search" ) // SearchMatch structure for holding multiple search response types type SearchMatch struct { // Requires common fields to be explicitly // defined here Name string `json:"name"` CreatedAt time.Time `json:"created_at,omitempty"` UpdatedAt time.Time `json:"updated_at"` Confidence float32 `json:"confidence"` Version string `json:"version,omitempty"` Org string `json:"org,omitempty"` Type string `json:"type,omitempty"` ExternalID string `json:"external_id,omitempty"` // Clean up the output Vulnerabilities *interface{} `json:"vulnerabilities,omitempty"` Source *interface{} `json:"source,omitempty"` References *interface{} `json:"references,omitempty"` Aliases *interface{} `json:"aliases,omitempty"` Dependencies *interface{} `json:"dependencies,omitempty"` *community.Repo `json:",omitempty"` *products.Product `json:",omitempty"` *dependencies.Dependency `json:",omitempty"` *searches.Report `json:",omitempty"` } // GetSearch takes a query to perform and a to be searched param // a productidentifier search against the Ion API, assembling a slice of Ionic // products.ProductSearchResponse objects func (ic *IonClient) GetSearch(query, tbs, token string) ([]SearchMatch, *responses.Meta, error) { params := url.Values{} params.Set("q", query) params.Set("tbs", tbs) b, m, err := ic.Get(searchEndpoint, token, params, nil, pagination.Pagination{}) if err != nil { return nil, nil, fmt.Errorf("failed to get productidentifiers search: %v", err.Error()) } var results []SearchMatch err = json.Unmarshal(b, &results) if err != nil { return nil, nil, fmt.Errorf("failed to unmarshal product search results: %v", err.Error()) } return results, m, nil } // BulkSearch takes one or more query strings and a "to be searched" param, then performs a productidentifier search // against the Ion API, returning a map of the original query string(s) to SearchMatch objects func (ic *IonClient) BulkSearch(queries []string, tbs, token string) (map[string][]SearchMatch, error) { params := url.Values{} params.Set("tbs", tbs) body, err := json.Marshal(queries) if err != nil { return nil, fmt.Errorf("session: failed to marshal login body: %v", err.Error()) } buff := bytes.NewBuffer(body) b, err := ic.Post(searchEndpoint, token, params, *buff, nil) if err != nil { return nil, fmt.Errorf("failed to get productidentifiers search: %v", err.Error()) } var results map[string][]SearchMatch err = json.Unmarshal(b, &results) if err != nil { return nil, fmt.Errorf("failed to unmarshal product search results: %v", err.Error()) } return results, nil }
package main //import "time" const OVERKILL_DAMAGE = 5 type OnKillCallback func(u *BaseCharacter) FList type BaseCharacter struct { Object *Object Killed bool HP float64 onKill OnKillCallback MaxHP float64 CurrentSkill *SkillUsing } type SkillUsing struct { startIteration uint32 prop float64 } func (a *BaseCharacter) GetTeam() []*BaseCharacter { return nil } func (a *BaseCharacter) OnDestroy() { //a.Object = nil //a.sm = nil } func (a *BaseCharacter) SetKilled(state bool, dmg *Damage, damageDealer *BaseCharacter) (list FList) { if !a.Killed { a.Killed = state if a.onKill != nil { list = list.Add(a.onKill(a)) } a.Object.Remove() } return } func GetSkillProb(baseChance float64, will uint32, maxwill uint32) float64 { if maxwill == 0 { return 10000 } return baseChance * (1 - (float64(will)/float64(maxwill))*0.75) } /*func (a *BaseCharacter) DealDamage(damageDealer *BaseCharacter, damage *Damage) (list FList) { damage.Target = a.Object.ID if a.Killed || a.Object.doRemove { return nil } //ai := a.Object.FindByComponent(confComponents.Fish) var onDeal, onTake FList if damage.Value > 0 && damage.Heal { damage.Value = -damage.Value } //DAMAGE => DAMAGE if damage.Value < 0 { if damage.Value > 0 { damage.Value = 0 } } if damage.Type != confDamageType.DamageTypeWillOnly.Type { deltaInt := math.Round(damage.Value) //oldhp := a.HP a.HP += deltaInt if a.HP > a.MaxHP { a.HP = a.MaxHP } if a.HP < 0 { a.HP = 0 } //server.logger.Debug("Changed hp from ", zap.Float64("old", oldhp), zap.Float64("new", a.HP), zap.Float64("delta", deltaInt)) list = list.AddSingle(a.Object.Effect(confActions.HealthChange). V(float32(deltaInt))) } list = list.Add(onDeal, onTake) if a.HP <= 0 && a.Killed == false { if damageDealer != nil { pl := damageDealer.Object.FindByComponent(confComponents.Player) if pl != nil { fish := a.Object.FindByComponent(confComponents.Fish) if fish != nil { _, money := fish.(*Fish).getMoneyPrize(damageDealer.Object.session, pl.(*Player).currentGun.Damage) damageDealer.Object.session.NeedToKnow(pl.(*Player).AddMoney(float32(money))) } } } list = list.Add(a.Object.factory.RemoveObject(a.Object, true, false)) //list = list.Add(a.SetKilled(true, damage, damageDealer)) //server.logger.Debug("Killed") } return list }*/ func (a *BaseCharacter) DealProbDamage(damageDealer *BaseCharacter, gun *Gun) (list FList) { if a.Killed || a.Object.doRemove { return nil } list = list.AddSingle(a.Object.Effect(confActions.HealthChange). V(0)) prob := gun.CalcProb(a.Object.FindByComponent(confComponents.Fish).(*Fish).FishConfig, a.Object.factory.run.timeline.RTP) deltaHp := gun.CalcHpDelta(a.Object.FindByComponent(confComponents.Fish).(*Fish).FishConfig, a.Object.BaseCharacter().HP, a.Object.BaseCharacter().MaxHP, a.Object.factory.run.timeline.RTP) oldHp := a.HP a.HP = a.HP + deltaHp list = list.AddSingle(a.Object.Effect(confActions.HealthChange). V(float32(deltaHp))) killed := prob > server.Rand() if killed { if damageDealer != nil { pl := damageDealer.Object.FindByComponent(confComponents.Player) if pl != nil { fish := a.Object.FindByComponent(confComponents.Fish) if fish != nil { prop := oldHp / a.MaxHP if prop > 0.7 && prob < 0.11 { damageDealer.Object.session.run.team.NeedToKnow(a.Object.Effect(confActions.Critical) ) } _, money := fish.(*Fish).getMoneyPrize(damageDealer.Object.session, pl.(*Player).currentGun.Damage, a.Object.factory.run.RoomCoef, a.Object.factory.run.timeline.RTP) damageDealer.Object.session.NeedToKnow(pl.(*Player).AddMoney(float32(money))) //r := damageDealer.Object.session.run //timeline := r.timeline /*if fish.(*Fish).FishConfig.IsBoss { if timeline.CurrentScene.Name == "aquaman" { if !r.transition { r.transition = true r.async.DelayedCall(func() { r.transition = false r.StartScene("deadfish") }, time.Second*2) } } }*/ } } } list = list.Add(a.Object.factory.RemoveObject(a.Object, true, false)) } return }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package distsql_test import ( "context" "fmt" "strings" "testing" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) func TestDistsqlPartitionTableConcurrency(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t1, t2, t3") tk.MustExec("create table t1(id int primary key , val int)") partitions := make([]string, 0, 20) for i := 0; i < 20; i++ { pid := i + 1 partitions = append(partitions, fmt.Sprintf("PARTITION p%d VALUES LESS THAN (%d00)", pid, pid)) } tk.MustExec("create table t2(id int primary key, val int)" + "partition by range(id)" + "(" + strings.Join(partitions[:10], ",") + ")") tk.MustExec("create table t3(id int primary key, val int)" + "partition by range(id)" + "(" + strings.Join(partitions, ",") + ")") for i := 0; i < 20; i++ { for _, tbl := range []string{"t1", "t2", "t3"} { tk.MustExec(fmt.Sprintf("insert into %s values(%d, %d)", tbl, i*50, i*50)) } } tk.MustExec("analyze table t1, t2, t3") // non-partitioned table checker ctx1 := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { require.Equal(t, req.KeyRanges.PartitionNum(), 1) require.Equal(t, req.Concurrency, 1) }) // 10-ranges-partitioned table checker ctx2 := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { require.Equal(t, req.KeyRanges.PartitionNum(), 10) require.Equal(t, req.Concurrency, 10) }) // 20-ranges-partitioned table checker ctx3 := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { require.Equal(t, req.KeyRanges.PartitionNum(), 20) require.Equal(t, req.Concurrency, variable.DefDistSQLScanConcurrency) }) ctxs := []context.Context{ctx1, ctx2, ctx3} for i, tbl := range []string{"t1", "t2", "t3"} { ctx := ctxs[i] // If order by is added here, the concurrency is always equal to 1. // Because we will use different kv.Request for each partition in TableReader. tk.MustQueryWithContext(ctx, fmt.Sprintf("select * from %s limit 1", tbl)) tk.MustQueryWithContext(ctx, fmt.Sprintf("select * from %s limit 5", tbl)) tk.MustQueryWithContext(ctx, fmt.Sprintf("select * from %s limit 1", tbl)) tk.MustQueryWithContext(ctx, fmt.Sprintf("select * from %s limit 5", tbl)) } }
package main import ( "encoding/json" "fmt" "io" "log" "net/http" "os" "strings" "github.com/gorilla/mux" ) type Gpx = struct { Name string Date string Description string Track_points [][]float64 } func Map(input [][]float64, f func([]float64) string) []string { result := make([]string, len(input)) for index, element := range input { result[index] = f(element) } return result } func CreateTrackPointXml(trackPoint []float64) string { return fmt.Sprintf( ` <trkpt lat="%f" lon="%f"> </trkpt>`, trackPoint[0], trackPoint[1]) } func CreateGpxContent(gpx *Gpx) string { trackPointsXml := strings.Join(Map(gpx.Track_points, CreateTrackPointXml), "\n") return fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?> <gpx creator="maps0" version="1.1" xmlns="http://www.topografix.com/GPX/1/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd http://www.garmin.com/xmlschemas/GpxExtensions/v3 http://www.garmin.com/xmlschemas/GpxExtensionsv3.xsd" xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1" xmlns:gpxx="http://www.garmin.com/xmlschemas/GpxExtensions/v3"> <metadata> <name>%s</name> <desc>%s</desc> <time>%s</time> </metadata> <trk> <name>%s</name> <desc>%s</desc> <trkseg> %s </trkseg> </trk> </gpx>`, gpx.Name, gpx.Description, gpx.Date, gpx.Name, gpx.Description, trackPointsXml) } func SaveGpx(w http.ResponseWriter, r *http.Request) { if len(r.Header["X-Csrftoken"]) == 0 || r.Header["X-Csrftoken"][0] != token { log.Print("No CSRF Token sent") w.WriteHeader(403) return } w.Header().Add("Content-Type", "application/json") w.Header().Add("Access-Control-Allow-Origin", "*") buffer := make([]uint8, 1024*1024) count, err := r.Body.Read(buffer) if err != nil && err != io.EOF { log.Fatal(err) w.Write([]byte("{}")) return } var gpx Gpx err = json.Unmarshal(buffer[0:count], &gpx) gpx.Name = strings.Trim(gpx.Name, " \t\n") content := CreateGpxContent(&gpx) fileName := gpx.Date + "-" + gpx.Name + ".gpx" fh, err := os.Create(GpxBase + fileName) if err != nil { log.Fatal(err) } else { fh.Write([]byte(content)) fh.Close() log.Print("GPX file " + GpxBase + filename + " created") } w.Write([]byte("{}")) } func GetGpx(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) name := vars["name"] fh, err := os.Open(GpxBase + name) if err != nil { w.Header().Add("Content-Type", "text/plain") w.WriteHeader(404) w.Write([]byte("Could not open gpx file")) } w.Header().Add("Content-Type", "text/xml") w.Header().Add("Access-Control-Allow-Origin", "*") buffer := make([]uint8, 100*1024) for { count, err := fh.Read(buffer) if err != nil || count == 0 { break } w.Write(buffer[0:count]) } fh.Close() } func LoadGpxList(w http.ResponseWriter, r *http.Request) { fh, err := os.Open(GpxBase) if err != nil { w.Header().Add("Content-Type", "text/plain") w.WriteHeader(404) w.Write([]byte("Could not open gpx store directoy")) } names, err := fh.Readdirnames(-1) if err != nil { w.Header().Add("Content-Type", "text/plain") w.WriteHeader(404) w.Write([]byte("Could not read gpx store directoy")) } w.Header().Add("Content-Type", "application/json") w.Header().Add("Access-Control-Allow-Origin", "*") jsonResponse, err := json.Marshal(names) if err != nil { w.Header().Add("Content-Type", "text/plain") w.WriteHeader(404) w.Write([]byte("Could not marshal result")) } w.Write([]byte(jsonResponse)) }
/* * @Description: * @Author: JiaYe * @Date: 2021-04-12 15:30:12 * @LastEditTime: 2021-04-12 16:03:44 * @LastEditors: JiaYe * @Descripttion: * @version: */ package main import "fmt" //定义结构体Dog type Dog struct { name string } func (dog Dog) call() { fmt.Printf("%s: 汪汪\n", dog.name) } //为结构体Dog定义方法Setname //结构体为值传递,修改数据需使用指针 func (dog *Dog) Setname(name string) { dog.name = name } //调用 /* 调用方法通过自定义类型的对象.方法名进行调用,在调用的过程中对象传递(赋值)给方法的接受者(值类型,拷贝) */ func main() { dog := Dog{"dahuang"} dog.call() dog.Setname("mingshiyin") dog.call() }
package main import ( "fmt" "log" "net" "os" "os/signal" i "puppet_monitoring/impl" "puppet_monitoring/rpc" "runtime" "strconv" "syscall" ) // global variable (load once) var settings = i.Settings{}.LoadSettings() // runs master process func run_master_process() { log.Printf("PID:%v\n", os.Getpid()) // limit CPU usage runtime.GOMAXPROCS(1) // check for existing pid file if check_pid_file() { fmt.Println(settings.PidFile + " already exists! (other instance run?)") os.Exit(1) } create_pid_file() defer kill_pid() // creating server socket var laddr, err = net.ResolveTCPAddr("tcp", settings.Ip+":"+strconv.Itoa(settings.Port)) ln, err := net.ListenTCP("tcp", laddr) if err != nil { panic(err) } defer ln.Close() log.Println("listening on", ln.Addr()) // creating service to handle server tcp socket service := i.Service{}.NewService() // creating puppet environment collection envs := i.EnvironmentCollection{}.NewEnvironmentCollection() envs.Conf = &settings // setup service params service.SetEnvCollection(&envs) // and run as go routine go service.HandleListener(ln) // creating and starting the rpc server to handle commands from outside rpcsrv := rpc.RPCServer{Envs: &envs} rpcsrv.CreateServer(settings) // handle SIGINT and SIGTERM ch := make(chan os.Signal) var sig os.Signal signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) // awaiting signals select { case sig = <-ch: log.Println(sig) } // tell service to stop service.Stop() } // Check if pid file exists func check_pid_file() bool { var _, err = os.Stat(settings.PidFile) return err == nil } // Create pid file func create_pid_file() { // create file var fd, err = os.Create(settings.PidFile) fd.Close() if err != nil { fmt.Println("Error creating pid file!") panic(err) } // set rw-r--r-- os.Chmod(settings.PidFile, 0644) fd, err = os.OpenFile(settings.PidFile, os.O_RDWR, 0644) defer fd.Close() // writing current process id var _, werr = fd.WriteString(strconv.Itoa(os.Getpid())) if werr != nil { fmt.Println("Error write pid file!") panic(werr) } fd.Sync() } // Remove pid file func kill_pid() { err := os.Remove(settings.PidFile) if err != nil { log.Println(err) } }
package main import ( "encoding/json" "net/http" "strconv" auth "github.com/ahmedash95/authSDK" "github.com/gorilla/mux" ) func GetPostComments(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) postID := vars["id"] var comments []Comment GetDB().Where("post_id = ?", postID).Find(&comments) jsonResponse(w, comments, 200) } func CreateComment(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) var comment Comment err := decoder.Decode(&comment) if err != nil { jsonResponse(w, err, 400) return } validation_errors := comment.Validate() if validation_errors != nil { jsonResponse(w, validation_errors, 422) return } user := auth.GetUser(r) comment.UserID = user.ID GetDB().Create(&comment) jsonResponse(w, comment, 200) } func DeleteComment(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) commentID, _ := strconv.Atoi(vars["id"]) var comment Comment GetDB().Find(&comment, commentID) GetDB().Delete(&comment) jsonResponse(w, nil, 200) } func ShowUserComments(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) userID := vars["id"] var comments []Comment GetDB().Where("user_id = ?", userID).Find(&comments) jsonResponse(w, comments, 200) }
package problem0354 func maxEnvelopes(envelopes [][]int) int { if len(envelopes) <= 1 { return len(envelopes) } quickSort(envelopes) dolls := []int{envelopes[0][1]} for i := 1; i < len(envelopes); i++ { num := envelopes[i][1] if num > dolls[len(dolls)-1] { dolls = append(dolls, num) } else { pos := search(dolls, num) dolls[pos] = num } } return len(dolls) } func search(dolls []int, target int) int { low, high := 0, len(dolls)-1 for low <= high { mid := (low + high) / 2 if dolls[mid] == target { return mid } else if dolls[mid] < target { low = mid + 1 } else { high = mid - 1 } } return low } func compare(a, b []int) bool { if a[0] == b[0] { return a[1] > b[1] } return a[0] < b[0] } func quickSort(arr [][]int) { quickSortRecu(arr, 0, len(arr)-1) } func quickSortRecu(arr [][]int, left int, right int) { if left >= right { return } pivot := partition(arr, left, right) quickSortRecu(arr, left, pivot-1) quickSortRecu(arr, pivot+1, right) } func partition(arr [][]int, left, right int) int { pivot := arr[right] i := left for j := left; j <= right-1; j++ { if compare(arr[j], pivot) { arr[i], arr[j] = arr[j], arr[i] i++ } } arr[i], arr[right] = arr[right], arr[i] return i }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "testing" "time" "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) func TestPBToExpr(t *testing.T) { sc := new(stmtctx.StatementContext) fieldTps := make([]*types.FieldType, 1) ds := []types.Datum{types.NewIntDatum(1), types.NewUintDatum(1), types.NewFloat64Datum(1), types.NewDecimalDatum(newMyDecimal(t, "1")), types.NewDurationDatum(newDuration(time.Second))} for _, d := range ds { expr := datumExpr(t, d) expr.Val = expr.Val[:len(expr.Val)/2] _, err := PBToExpr(expr, fieldTps, sc) require.Error(t, err) } expr := &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, }, }, } _, err := PBToExpr(expr, fieldTps, sc) require.NoError(t, err) val := make([]byte, 0, 32) val = codec.EncodeInt(val, 1) expr = &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, Val: val[:len(val)/2], }, }, } _, err = PBToExpr(expr, fieldTps, sc) require.Error(t, err) expr = &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, Val: val, }, }, Sig: tipb.ScalarFuncSig_AbsInt, FieldType: ToPBFieldType(newIntFieldType()), } _, err = PBToExpr(expr, fieldTps, sc) require.Error(t, err) } // TestEval test expr.Eval(). func TestEval(t *testing.T) { row := chunk.MutRowFromDatums([]types.Datum{types.NewDatum(100)}).ToRow() fieldTps := make([]*types.FieldType, 1) fieldTps[0] = types.NewFieldType(mysql.TypeLonglong) tests := []struct { expr *tipb.Expr result types.Datum }{ // Datums. { datumExpr(t, types.NewFloat32Datum(1.1)), types.NewFloat32Datum(1.1), }, { datumExpr(t, types.NewFloat64Datum(1.1)), types.NewFloat64Datum(1.1), }, { datumExpr(t, types.NewIntDatum(1)), types.NewIntDatum(1), }, { datumExpr(t, types.NewUintDatum(1)), types.NewUintDatum(1), }, { datumExpr(t, types.NewBytesDatum([]byte("abc"))), types.NewBytesDatum([]byte("abc")), }, { datumExpr(t, types.NewStringDatum("abc")), types.NewStringDatum("abc"), }, { datumExpr(t, types.Datum{}), types.Datum{}, }, { datumExpr(t, types.NewDurationDatum(types.Duration{Duration: time.Hour})), types.NewDurationDatum(types.Duration{Duration: time.Hour}), }, { datumExpr(t, types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1))), types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1)), }, // Columns. { columnExpr(0), types.NewIntDatum(100), }, // Scalar Functions. { scalarFunctionExpr(tipb.ScalarFuncSig_JsonDepthSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `true`), ), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonDepthSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `[10, {"a": 20}]`), ), types.NewIntDatum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonStorageSizeSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `[{"a":{"a":1},"b":2}]`), ), types.NewIntDatum(82), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonSearchSig, toPBFieldType(newJSONFieldType()), jsonDatumExpr(t, `["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]`), datumExpr(t, types.NewBytesDatum([]byte(`all`))), datumExpr(t, types.NewBytesDatum([]byte(`10`))), datumExpr(t, types.NewBytesDatum([]byte(`\`))), datumExpr(t, types.NewBytesDatum([]byte(`$**.k`))), ), newJSONDatum(t, `"$[1][0].k"`), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(2)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(2)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GTDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("2")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GTJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[2]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[2]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DurationIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LeftShift, ToPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(-1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsUInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewUintDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(-1.23))), types.NewFloat64Datum(1.23), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "-1.23")))), types.NewDecimalDatum(newMyDecimal(t, "1.23")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalAnd, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalOr, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalXor, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitAndSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitOrSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitXorSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitNegSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, // { // scalarFunctionExpr(tipb.ScalarFuncSig_InTime, // toPBFieldType(newIntFieldType()), datumExpr(t, types.NewTimeDatum(types.ZeroDate)), datumExpr(t, types.NewTimeDatum(types.ZeroDate))), // types.NewIntDatum(1), // }, { scalarFunctionExpr(tipb.ScalarFuncSig_InDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "2")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewStringDatum("1"))), types.NewStringDatum("1"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewStringDatum("2"))), types.NewStringDatum("2"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewDurationDatum(newDuration(time.Second)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewDurationDatum(newDuration(time.Second * 2)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDurationAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*1)))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewStringDatum("1"))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastTimeAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewTimeDatum(newDateTime(t, "2000-01-01")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewIntDatum(20000101))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewFloat64Datum(20000101))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "20000101")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewStringDatum("20000101"))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "3")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "-1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "2")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilIntToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilIntToDec, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilDecToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorIntToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorIntToDec, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorDecToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewStringDatum("1"))), types.NewStringDatum("1"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewDurationDatum(newDuration(time.Second)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewTimeDatum(newDateTime(t, "2000-01-01")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenInt, toPBFieldType(newIntFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenReal, toPBFieldType(newRealFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenDecimal, toPBFieldType(newDecimalFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenDuration, toPBFieldType(newDurFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenTime, toPBFieldType(newDateFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenJson, toPBFieldType(newJSONFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsFalse, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(0), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsFalse, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(0), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsTrue, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsTrue, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, } sc := new(stmtctx.StatementContext) for _, tt := range tests { expr, err := PBToExpr(tt.expr, fieldTps, sc) require.NoError(t, err) result, err := expr.Eval(row) require.NoError(t, err) require.Equal(t, tt.result.Kind(), result.Kind()) cmp, err := result.Compare(sc, &tt.result, collate.GetCollator(fieldTps[0].GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } } func TestPBToExprWithNewCollation(t *testing.T) { collate.SetNewCollationEnabledForTest(false) sc := new(stmtctx.StatementContext) fieldTps := make([]*types.FieldType, 1) cases := []struct { name string expName string id int32 pbID int32 }{ {"utf8_general_ci", "utf8_general_ci", 33, 33}, {"UTF8MB4_BIN", "utf8mb4_bin", 46, 46}, {"utf8mb4_bin", "utf8mb4_bin", 46, 46}, {"utf8mb4_general_ci", "utf8mb4_general_ci", 45, 45}, {"", "utf8mb4_bin", 46, 46}, {"some_error_collation", "utf8mb4_bin", 46, 46}, {"utf8_unicode_ci", "utf8_unicode_ci", 192, 192}, {"utf8mb4_unicode_ci", "utf8mb4_unicode_ci", 224, 224}, {"utf8mb4_zh_pinyin_tidb_as_cs", "utf8mb4_zh_pinyin_tidb_as_cs", 2048, 2048}, } for _, cs := range cases { ft := types.NewFieldType(mysql.TypeString) ft.SetCollate(cs.name) expr := new(tipb.Expr) expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(ft) require.Equal(t, cs.pbID, expr.FieldType.Collate) e, err := PBToExpr(expr, fieldTps, sc) require.NoError(t, err) cons, ok := e.(*Constant) require.True(t, ok) require.Equal(t, cs.expName, cons.Value.Collation()) } collate.SetNewCollationEnabledForTest(true) for _, cs := range cases { ft := types.NewFieldType(mysql.TypeString) ft.SetCollate(cs.name) expr := new(tipb.Expr) expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(ft) require.Equal(t, -cs.pbID, expr.FieldType.Collate) e, err := PBToExpr(expr, fieldTps, sc) require.NoError(t, err) cons, ok := e.(*Constant) require.True(t, ok) require.Equal(t, cs.expName, cons.Value.Collation()) } } // Test convert various scalar functions. func TestPBToScalarFuncExpr(t *testing.T) { sc := new(stmtctx.StatementContext) fieldTps := make([]*types.FieldType, 1) exprs := []*tipb.Expr{ { Tp: tipb.ExprType_ScalarFunc, Sig: tipb.ScalarFuncSig_RegexpSig, FieldType: ToPBFieldType(newStringFieldType()), }, { Tp: tipb.ExprType_ScalarFunc, Sig: tipb.ScalarFuncSig_RegexpUTF8Sig, FieldType: ToPBFieldType(newStringFieldType()), }, } for _, expr := range exprs { _, err := PBToExpr(expr, fieldTps, sc) require.NoError(t, err) } } func datumExpr(t *testing.T, d types.Datum) *tipb.Expr { expr := new(tipb.Expr) switch d.Kind() { case types.KindInt64: expr.Tp = tipb.ExprType_Int64 expr.FieldType = toPBFieldType(types.NewFieldType(mysql.TypeLonglong)) expr.Val = codec.EncodeInt(nil, d.GetInt64()) case types.KindUint64: expr.Tp = tipb.ExprType_Uint64 expr.FieldType = toPBFieldType(types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlag(mysql.UnsignedFlag).BuildP()) expr.Val = codec.EncodeUint(nil, d.GetUint64()) case types.KindString: expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(types.NewFieldType(mysql.TypeString)) expr.Val = d.GetBytes() case types.KindBytes: expr.Tp = tipb.ExprType_Bytes expr.Val = d.GetBytes() case types.KindFloat32: expr.Tp = tipb.ExprType_Float32 expr.Val = codec.EncodeFloat(nil, d.GetFloat64()) case types.KindFloat64: expr.Tp = tipb.ExprType_Float64 expr.Val = codec.EncodeFloat(nil, d.GetFloat64()) case types.KindMysqlDuration: expr.Tp = tipb.ExprType_MysqlDuration expr.Val = codec.EncodeInt(nil, int64(d.GetMysqlDuration().Duration)) case types.KindMysqlDecimal: expr.Tp = tipb.ExprType_MysqlDecimal var err error expr.Val, err = codec.EncodeDecimal(nil, d.GetMysqlDecimal(), d.Length(), d.Frac()) require.NoError(t, err) case types.KindMysqlJSON: expr.Tp = tipb.ExprType_MysqlJson var err error expr.Val = make([]byte, 0, 1024) expr.Val, err = codec.EncodeValue(nil, expr.Val, d) require.NoError(t, err) case types.KindMysqlTime: expr.Tp = tipb.ExprType_MysqlTime var err error expr.Val, err = codec.EncodeMySQLTime(nil, d.GetMysqlTime(), mysql.TypeUnspecified, nil) require.NoError(t, err) expr.FieldType = ToPBFieldType(newDateFieldType()) default: expr.Tp = tipb.ExprType_Null } return expr } func newJSONDatum(t *testing.T, s string) (d types.Datum) { j, err := types.ParseBinaryJSONFromString(s) require.NoError(t, err) d.SetMysqlJSON(j) return d } func jsonDatumExpr(t *testing.T, s string) *tipb.Expr { return datumExpr(t, newJSONDatum(t, s)) } func columnExpr(columnID int64) *tipb.Expr { expr := new(tipb.Expr) expr.Tp = tipb.ExprType_ColumnRef expr.Val = codec.EncodeInt(nil, columnID) return expr } // toPBFieldType converts *types.FieldType to *tipb.FieldType. func toPBFieldType(ft *types.FieldType) *tipb.FieldType { return &tipb.FieldType{ Tp: int32(ft.GetType()), Flag: uint32(ft.GetFlag()), Flen: int32(ft.GetFlen()), Decimal: int32(ft.GetDecimal()), Charset: ft.GetCharset(), Collate: collate.CollationToProto(ft.GetCollate()), Elems: ft.GetElems(), } } func newMyDecimal(t *testing.T, s string) *types.MyDecimal { d := new(types.MyDecimal) require.Nil(t, d.FromString([]byte(s))) return d } func newDuration(dur time.Duration) types.Duration { return types.Duration{ Duration: dur, Fsp: types.DefaultFsp, } } func newDateTime(t *testing.T, s string) types.Time { tt, err := types.ParseDate(nil, s) require.NoError(t, err) return tt } func newDateFieldType() *types.FieldType { return types.NewFieldType(mysql.TypeDate) } func newIntFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlag(mysql.BinaryFlag).SetFlen(mysql.MaxIntWidth).BuildP() } func newDurFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeDuration).SetDecimal(types.DefaultFsp).BuildP() } func newStringFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlen(types.UnspecifiedLength).BuildP() } func newRealFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).SetFlen(types.UnspecifiedLength).BuildP() } func newDecimalFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlen(types.UnspecifiedLength).BuildP() } func newJSONFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeJSON).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newFloatFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newBinaryLiteralFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeBit).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newBlobFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeBlob).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newEnumFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeEnum).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func scalarFunctionExpr(sigCode tipb.ScalarFuncSig, retType *tipb.FieldType, args ...*tipb.Expr) *tipb.Expr { return &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Sig: sigCode, Children: args, FieldType: retType, } }
package token type TokenService struct { }
package utils import ( "bytes" "encoding/hex" "io" "log" "math" "math/rand" "mime/multipart" "os" "strconv" "strings" "time" ) /** * 初始化一下种子 */ func RandomInit() { rand.Seed(time.Now().UnixNano()) } func Random(min, max int64) int64 { if min == max { return min } //rand.Seed(time.Now().UnixNano()) //fmt.Println("["+strconv.FormatInt(min, 10)+"]"+"["+strconv.FormatInt(max, 10)+"]") return rand.Int63n(max-min) + min } func RandomFloat64(min, max float64) float64 { // 浮点数比较 if math.Abs(min-max) < 0.00000001 { return min } //rand.Seed(time.Now().UnixNano()) //fmt.Println("["+strconv.FormatInt(min, 10)+"]"+"["+strconv.FormatInt(max, 10)+"]") return rand.Float64()*(max-min) + min } // ////RandomString 在数字、大写字母、小写字母范围内生成length位的随机字符串,所有字符出现的概率相同 //func RandomDigitStr(length int32) string { // // 48 ~ 57 数字 // // 65 ~ 90 A ~ Z // // 97 ~ 122 a ~ z // // 一共62个字符,在0~61进行随机,小于10时,在数字范围随机, // // 小于36在大写范围内随机,其他在小写范围随机 // rand.Seed(time.Now().UnixNano()) // result := make([]string, 0, length) // var i int32 // for i = 0; i < length; i++ { // t := rand.Intn(62) // if t < 10 { // result = append(result, strconv.Itoa(t)) // } else if t < 36 { // result = append(result, string(rand.Intn(26)+65)) // } else { // result = append(result, string(rand.Intn(26)+97)) // } // } // return strings.Join(result, "") //} func RandomDigitStrOnlyNum(length int32) string { // 48 ~ 57 数字 // 65 ~ 90 A ~ Z // 97 ~ 122 a ~ z // 一共62个字符,在0~61进行随机,小于10时,在数字范围随机, // 小于36在大写范围内随机,其他在小写范围随机 rand.Seed(time.Now().UnixNano()) result := make([]string, 0, length) var i int32 for i = 0; i < length; i++ { t := rand.Intn(10) result = append(result, strconv.Itoa(t)) } return strings.Join(result, "") } //func RandomDigitStrOnlyAlphabet(length int32) string { // // 97 ~ 122 a ~ z // rand.Seed(time.Now().UnixNano()) // result := make([]string, 0, length) // var i int32 // for i = 0; i < length; i++ { // result = append(result, string(rand.Intn(26)+97)) // } // return strings.Join(result, "") //} //func RandomDigitStrOnlyAlphabetUpper(length int32) string { // // 65 ~ 90 A ~ Z // rand.Seed(time.Now().UnixNano()) // result := make([]string, 0, length) // var i int32 // for i = 0; i < length; i++ { // result = append(result, string(rand.Intn(26)+65)) // } // return strings.Join(result, "") //} // func RandomDigitHex(length int32) string { rand.Seed(time.Now().UnixNano()) result := make([]byte, 0, length) var i int32 for i = 0; i < length; i++ { t := rand.Intn(255) result = append(result, byte(t)) } return hex.EncodeToString(result) } // 只能在0~19位之间 func RandomDigit(digit int32) int64 { if digit <= 0 { return 0 } else if digit > 19 { return 0 } min := int64(math.Pow10(int(digit) - 1)) max := int64(math.Pow10(int(digit))) - 1 return Random(min, max) } // 生成随机字符串 func RandomStringLower(l int) string { str := "0123456789abcdefghijklmnopqrstuvwxyz" bytesSlice := []byte(str) result := []byte{} r := rand.New(rand.NewSource(time.Now().UnixNano())) for i := 0; i < l; i++ { result = append(result, bytesSlice[r.Intn(len(bytesSlice))]) } return string(result) } /** * 打印异常 */ func CheckNPrintError(err error) { if err != nil { log.Fatalf("failed to listen: %v", err) } } func PathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } func WriteHttpFile(filepath, httpFileKey string, otherHttpParams map[string]string) (data io.Reader, contentType string, err error) { file, err := os.Open(filepath) if err != nil { return nil, "", err } defer file.Close() body := bytes.Buffer{} writer := multipart.NewWriter(&body) defer writer.Close() part, err := writer.CreateFormFile(httpFileKey, filepath) if err != nil { return nil, "", err } _, err = io.Copy(part, file) log.Printf("%v\n", err) for key, val := range otherHttpParams { log.Printf("field=[%v]->[%v]\n", key, val) _ = writer.WriteField(key, val) } return &body, writer.FormDataContentType(), nil } func BetweenInt32(min, max, value int32) int32 { if min > max { t := min min = max max = t } if value >= min && value <= max { return value } else if value < min { return min } return max } func BetweenInt64(min, max, value int64) int64 { if min > max { t := min min = max max = t } if value >= min && value <= max { return value } else if value < min { return min } return max } func AtleastInt64(min, value int64) int64 { if value >= min { return value } return min } func FixDig(str string, n int, isLeft bool, spaceStr string) string { lenStr := len(str) str2 := str if lenStr < n { for i := 0; i < n-lenStr; i++ { if isLeft { str2 = spaceStr + str2 } else { str2 += spaceStr } } } return str2 } func Goooooooooooooooo( loopFunc func(...interface{}), // 每次循环执行这个 args ...interface{}, // 给loopFunc的参数 ) { go func() { defer func() { log.Printf("报错了,Goooooooooooooooo中断") recover() }() //log.Printf("runtime.NumGoroutine()=[%v]", runtime.NumGoroutine()) loopFunc(args...) }() } func GooooooooooooooooDeadLoop( loopFunc func(...interface{}), // 每次循环执行这个 args ...interface{}, // 给loopFunc的参数 ) { go func() { defer func() { log.Printf("报错了,Goooooooooooooooo中断") recover() GooooooooooooooooDeadLoop(loopFunc, args...) }() //log.Printf("runtime.NumGoroutine()=[%v]", runtime.NumGoroutine()) loopFunc(args...) }() } type Int64Slice []int64 func (c Int64Slice) Len() int { return len(c) } func (c Int64Slice) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c Int64Slice) Less(i, j int) bool { return c[i] < c[j] }
package pubsubprovider import ( "context" "encoding/json" "sync" "time" "cloud.google.com/go/pubsub" "go-gcs/src/logger" "go-gcs/src/service/googlecloud" "go-gcs/src/service/googlecloud/storageprovider" "google.golang.org/api/option" ) // PubSub is the structure for config type PubSub struct { Topic string `json:"topic"` Subscription string `json:"subscription"` } // Service is the structure for service type Service struct { Config *PubSub Client *pubsub.Client Context context.Context } // GoogleCloudStorageNotification is the structure for notification type GoogleCloudStorageNotification struct { Name string `json:"name" validate:"required"` Bucket string `json:"bucket" validate:"required"` ContentType string `json:"contentType" validate:"required"` } // NotifyFromGCSStorage will call if google cloud storage object update func (s *Service) NotifyFromGCSStorage(sp *storageprovider.Service) { var sub *pubsub.Subscription sub, err := s.Client.CreateSubscription(s.Context, s.Config.Subscription, pubsub.SubscriptionConfig{ Topic: s.Client.Topic(s.Config.Topic), AckDeadline: 20 * time.Second, }) if err != nil { logger.Warnf("error while create google cloud pubsub subscription: %s", err) logger.Info("try to use the exist subscription...") sub = s.Client.Subscription(s.Config.Subscription) } var mu sync.Mutex cctx, cancel := context.WithCancel(s.Context) err = sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) { msg.Ack() if msg.Attributes["eventType"] == "OBJECT_FINALIZE" { gcsNotification := GoogleCloudStorageNotification{} json.Unmarshal(msg.Data, &gcsNotification) if gcsNotification.ContentType == "image/jpg" || gcsNotification.ContentType == "image/jpeg" || gcsNotification.ContentType == "jpeg" || gcsNotification.ContentType == "image/png" { go sp.ResizeMultiImageSizeAndUpload(gcsNotification.ContentType, gcsNotification.Bucket, gcsNotification.Name) } } mu.Lock() defer mu.Unlock() }) if err != nil { logger.Warnf("error while create google cloud pubsub notify: %s", err) cancel() } } // New will reture a new service func New(ctx context.Context, googleCloudConfig *googlecloud.Config, pubsubConfig *PubSub) *Service { plan, err := json.Marshal(googleCloudConfig) if err != nil { logger.Warnf("error while read config file: %s", err) } client, err := pubsub.NewClient(ctx, googleCloudConfig.ProjectId, option.WithCredentialsJSON(plan)) if err != nil { logger.Warnf("error while create google cloud pubsub client: %s", err) } return &Service{ Config: pubsubConfig, Client: client, Context: ctx, } }
/* Copyright 2021 CodeNotary, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package client import ( "encoding/binary" "errors" "strings" ) type TokenService interface { SetToken(database string, token string) error WithHds(hds HomedirService) TokenService WithTokenFileName(tfn string) TokenService IsTokenPresent() (bool, error) DeleteToken() error GetToken() (string, error) GetDatabase() (string, error) } type tokenService struct { tokenFileName string hds HomedirService } //NewTokenService ... func NewTokenService() TokenService { return &tokenService{} } func (ts *tokenService) GetToken() (string, error) { _, token, err := ts.parseContent() if err != nil { return "", err } return token, nil } //SetToken ... func (ts *tokenService) SetToken(database string, token string) error { return ts.hds.WriteFileToUserHomeDir(BuildToken(database, token), ts.tokenFileName) } func BuildToken(database string, token string) []byte { dbsl := uint64(len(database)) dbnl := len(database) tl := len(token) lendbs := binary.Size(dbsl) var cnt = make([]byte, lendbs+dbnl+tl) binary.BigEndian.PutUint64(cnt, dbsl) copy(cnt[lendbs:], database) copy(cnt[lendbs+dbnl:], token) return cnt } func (ts *tokenService) DeleteToken() error { return ts.hds.DeleteFileFromUserHomeDir(ts.tokenFileName) } //IsTokenPresent ... func (ts *tokenService) IsTokenPresent() (bool, error) { return ts.hds.FileExistsInUserHomeDir(ts.tokenFileName) } func (ts *tokenService) GetDatabase() (string, error) { dbname, _, err := ts.parseContent() return dbname, err } func (ts *tokenService) parseContent() (string, string, error) { content, err := ts.hds.ReadFileFromUserHomeDir(ts.tokenFileName) if err != nil { return "", "", err } if len(content) <= 8 { return "", "", errors.New("token content not present") } // token prefix is hardcoded into library. Please modify in case of changes in paseto library if strings.HasPrefix(content, "v2.public.") { return "", "", errors.New("old token format. Please remove old token located in your default home dir") } dbNameLen := make([]byte, 8) copy(dbNameLen, content[:8]) dbNameLenUint64 := binary.BigEndian.Uint64(dbNameLen) if dbNameLenUint64 > uint64(len(content))-8 { return "", "", errors.New("invalid token format") } databasename := make([]byte, dbNameLenUint64) copy(databasename, content[8:8+dbNameLenUint64]) token := make([]byte, uint64(len(content))-8-dbNameLenUint64) copy(token, content[8+dbNameLenUint64:]) return string(databasename), string(token), nil } // WithHds ... func (ts *tokenService) WithHds(hds HomedirService) TokenService { ts.hds = hds return ts } // WithTokenFileName ... func (ts *tokenService) WithTokenFileName(tfn string) TokenService { ts.tokenFileName = tfn return ts }
package handler import ( "context" "errors" "time" "fmt" crypto "github.com/jinmukeji/go-pkg/v2/crypto/encrypt/legacy" "github.com/jinmukeji/go-pkg/v2/crypto/rand" "github.com/jinmukeji/jiujiantang-services/jinmuid/mysqldb" proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1" ) const ( // MinpasswordLength 密码的最小长度 MinpasswordLength = 8 // MaxpasswordLength 密码的最大长度 MaxpasswordLength = 20 ) // UserResetPasswordViaSecureQuestions 通过密保问题重置密码 func (j *JinmuIDService) UserResetPasswordViaSecureQuestions(ctx context.Context, req *proto.UserResetPasswordViaSecureQuestionsRequest, resp *proto.UserResetPasswordViaSecureQuestionsResponse) error { clientID, _ := ClientIDFromContext(ctx) seed, _ := rand.RandomStringWithMask(rand.MaskLetterDigits, 4) helper := crypto.NewPasswordCipherHelper() encryptedPassword := helper.Encrypt(req.Password, seed, j.encryptKey) if req.ValidationType == proto.ValidationType_VALIDATION_TYPE_UNKNOWN { return NewError(ErrInvalidSecureQuestionValidationMethod, errors.New("invalid secure queston validation type")) } if req.ValidationType == proto.ValidationType_VALIDATION_TYPE_PHONE { if req.Username != "" { return NewError(ErrInvalidValidationValue, errors.New("non-empty username when getting secure questions by phone")) } errValidatePhoneFormat := validatePhoneFormat(req.Phone, req.NationCode) if errValidatePhoneFormat != nil { return errValidatePhoneFormat } existSignInPhone, _ := j.datastore.ExistSignInPhone(ctx, req.Phone, req.NationCode) if !existSignInPhone { return NewError(ErrNoneExistentPhone, fmt.Errorf("phone %s%s doesn't exist", req.NationCode, req.Phone)) } questions, err := j.datastore.FindSecureQuestionByPhone(ctx, req.Phone, req.NationCode) if err != nil { return NewError(ErrCurrentSecureQuestionsNotSet, fmt.Errorf("failed to find secure questions by phone %s%s: %s", req.NationCode, req.Phone, err.Error())) } user, errFindUserByPhone := j.datastore.FindUserByPhone(ctx, req.Phone, req.NationCode) if errFindUserByPhone != nil { return NewError(ErrDatabase, fmt.Errorf("failed to find username by phone %s%s: %s", req.NationCode, req.Phone, errFindUserByPhone.Error())) } password := helper.Decrypt(user.EncryptedPassword, user.Seed, j.encryptKey) // 判断密码是否跟以前一样 if password == req.Password { return NewError(ErrSamePassword, errors.New("new password cannot equals old password")) } // 比较密保是否正确 wrongQuestions, errCompareSecureQuestion := compareSecureQuestion(req.SecureQuestions, questions) if errCompareSecureQuestion != nil { return errCompareSecureQuestion } if len(wrongQuestions) == 0 { // TODO: SetPasswordByPhone,DeleteTokenByUserID,CreateAuditUserCredentialUpdate要写在同一个事务中 resp.Result = true errSetPasswordByPhone := j.datastore.SetPasswordByPhone(ctx, req.Phone, req.NationCode, encryptedPassword, seed) if errSetPasswordByPhone != nil { return NewError(ErrDatabase, fmt.Errorf("failed to set password by phone %s%s: %s", req.NationCode, req.Phone, errSetPasswordByPhone.Error())) } userID, errFindUserIDByPhone := j.datastore.FindUserIDByPhone(ctx, req.Phone, req.NationCode) if errFindUserIDByPhone != nil { return NewError(ErrDatabase, fmt.Errorf("failed to find userID by phone %s%s: %s", req.NationCode, req.Phone, errFindUserIDByPhone.Error())) } errDeleteTokenByUserID := j.datastore.DeleteTokenByUserID(ctx, userID) if errDeleteTokenByUserID != nil { return NewError(ErrDatabase, fmt.Errorf("failed to delete token by user %d: %s", userID, errDeleteTokenByUserID.Error())) } now := time.Now() record := &mysqldb.AuditUserCredentialUpdate{ UserID: userID, ClientID: clientID, UpdatedRecordType: mysqldb.PasswordUpdated, CreatedAt: now, UpdatedAt: now, } errCreateAuditUserCredentialUpdate := j.datastore.CreateAuditUserCredentialUpdate(ctx, record) if errCreateAuditUserCredentialUpdate != nil { return NewError(ErrDatabase, fmt.Errorf("failed to create audit user credential update: %s", errCreateAuditUserCredentialUpdate.Error())) } } else { resp.WrongSecureQuestionKeys = wrongQuestions } } if req.ValidationType == proto.ValidationType_VALIDATION_TYPE_USERNAME { if req.Phone != "" || req.NationCode != "" { return NewError(ErrInvalidValidationValue, errors.New("phone should be empty when validation type is username")) } user, FindUserByUsername := j.datastore.FindUserByUsername(ctx, req.Username) if FindUserByUsername != nil { return NewError(ErrDatabase, fmt.Errorf("failed to find user by username %s: %s", req.Username, FindUserByUsername.Error())) } if !user.HasSetPassword { return nil } password := helper.Decrypt(user.EncryptedPassword, user.Seed, j.encryptKey) // 判断密码是否跟以前一样 if password == req.Password { return NewError(ErrSamePassword, errors.New("new password cannot equals old password")) } existUsername, _ := j.datastore.ExistUsername(ctx, req.Username) if !existUsername { return NewError(ErrNonexistentUsername, fmt.Errorf("username %s doesn't exist", req.Username)) } questions, errFindSecureQuestionByUsername := j.datastore.FindSecureQuestionByUsername(ctx, req.Username) if errFindSecureQuestionByUsername != nil { return NewError(ErrCurrentSecureQuestionsNotSet, fmt.Errorf("failed to find secure questions by username %s: %s", req.Username, errFindSecureQuestionByUsername.Error())) } // 比较密保是否正确 wrongQuestions, err := compareSecureQuestion(req.SecureQuestions, questions) if err != nil { return err } if len(wrongQuestions) == 0 { // TODO: SetPasswordByUsername,CreateAuditUserCredentialUpdate,DeleteTokenByUserID要写在同一个事务中 resp.Result = true errSetPasswordByUsername := j.datastore.SetPasswordByUsername(ctx, req.Username, encryptedPassword, seed) if errSetPasswordByUsername != nil { return NewError(ErrDatabase, fmt.Errorf("failed to set password by username %s: %s", req.Username, errSetPasswordByUsername.Error())) } userID, errFindUserIDByUsername := j.datastore.FindUserIDByUsername(ctx, req.Username) if errFindUserIDByUsername != nil { return NewError(ErrDatabase, fmt.Errorf("failed to find userID by username %s: %s", req.Username, errFindUserIDByUsername.Error())) } now := time.Now() record := &mysqldb.AuditUserCredentialUpdate{ UserID: userID, ClientID: clientID, UpdatedRecordType: mysqldb.PasswordUpdated, CreatedAt: now, UpdatedAt: now, } errCreateAuditUserCredentialUpdate := j.datastore.CreateAuditUserCredentialUpdate(ctx, record) if errCreateAuditUserCredentialUpdate != nil { return NewError(ErrDatabase, errors.New("failed to create audit user credential")) } errDeleteTokenByUserID := j.datastore.DeleteTokenByUserID(ctx, userID) if errDeleteTokenByUserID != nil { return NewError(ErrDatabase, fmt.Errorf("failed to delete token by user %d: %s", userID, errDeleteTokenByUserID.Error())) } } else { resp.WrongSecureQuestionKeys = wrongQuestions } } return nil }
package sys func init() { FixConsole() }
package handler import "simple-calculator/internal/utils" func CalHandler(exp string) int { //调用计算工具进行计算 result := utils.Calculator(exp) return result }
package main import "fmt" //标识符:变量名 函数名 类型名 方法名 //go语言中如果标识符首字母是大写的,就表示对外部包可见 // Dog 首字母大写需要添加注释 type Dog struct { name string gender string } //构造函数 func newDog(name string, gender string) dog { return dog{ name: name, gender: gender, } } //方法是作用于特定类型的函数 //接受者表示的是调用该方法的具体类型变量,多用于类型名首字母小写表示 func (d dog) wang() { fmt.Printf("%s哇哇哇,性别是:%s\n", d.name, d.gender) } func main() { s1 := newDog("xiaohei", "male") s2 := newDog("xiaohua", "female") s1.wang() s2.wang() }
package main import ( "fmt" ) type myType int func (t myType) setByValue(nval myType) { t = nval } func (t *myType) setByPtr(nval myType) { *t = nval } func main() { var x myType = 0 x.setByValue(1) fmt.Println(x) x.setByPtr(2) fmt.Println(x) }
package goproxy import ( "context" "io" ) // RevInfo describes a single revision of a module source type RevInfo struct { Version string // version string Time string // commit time // These fields are used for Stat of arbitrary rev, // but they are not recorded when talking about module versions. Name string `json:"-"` // complete ID in underlying repository Short string `json:"-"` // shortened ID, for use in pseudo-version } // Module represents go module: some VSC (git, mercurial, svn, etc), Gitlab, another Go modules proxy, etc type Module interface { // ModulePath returns the module path. ModulePath() string // Versions lists all known versions with the given prefix. // Pseudo-versions are not included. // Versions should be returned sorted in semver order // (implementations can use SortVersions). Versions(ctx context.Context, prefix string) (tags []string, err error) // Stat returns information about the revision rev. // A revision can be any identifier known to the underlying service: // commit hash, branch, tag, and so on. Stat(ctx context.Context, rev string) (*RevInfo, error) // GoMod returns the go.mod file for the given version. GoMod(ctx context.Context, version string) (data []byte, err error) // Zip returns file reader of ZIP file for the given version of the module Zip(ctx context.Context, version string) (file io.ReadCloser, err error) }
package montecarlo import ( "fmt" "math" "testing" log "github.com/Sirupsen/logrus" assert "github.com/stretchr/testify/assert" ) /*-------- TEST INPUTS & SETUP --------*/ var normal, nodeWithChildren, nodeWithGrandchildren Node func nodeTestSetup() { var err error normal, err = NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } nodeWithChildren, err = NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } nodeWithChildren.visits = 10 nodeWithGrandchildren, err = NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } nodeWithGrandchildren.visits = 100 for i := 0; i < 10; i++ { childNode, err := NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } //add scores to create some sort of identifiable order childNode.SetScore(0, float64(i)) childNode.visits = 1 nodeWithChildren.SetChild(fmt.Sprintf("%v", i), &childNode) //add grandchildren and their respective parents parentNode, err := NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } parentNode.SetScore(0, float64(i)) parentNode.visits = 10 for j := 0; j < 10; j++ { //add "grandchildren" to each child grandchild, err := NewNode(1) if err != nil { panic(fmt.Sprintf("%v", err)) } grandchild.SetScore(0, float64(i*j)) grandchild.visits = 1 parentNode.SetChild(fmt.Sprintf("%v", j), &grandchild) } nodeWithGrandchildren.SetChild(fmt.Sprintf("%v", i), &parentNode) } log.SetLevel(log.DebugLevel) } /*-------- TESTING --------*/ func TestNewNode(t *testing.T) { nodeTestSetup() assert.Equal(t, float64(0), normal.Score(0), "score of new mcts node should be zero") assert.Equal(t, int64(0), normal.Visits(), "total sims of new mcts node should be zero") assert.Nil(t, normal.parent, "a new bare mcts node should have nil parent") assert.Equal(t, 0, len(normal.children), "new mcts node should have no children") assert.False(t, normal.Visits() > 0, "new mcts node should not be labelled visited") assert.Equal(t, 10, len(nodeWithChildren.children)) assert.Equal(t, 10, len(nodeWithGrandchildren.children)) for _, c := range nodeWithGrandchildren.children { assert.Equal(t, 10, len(c.children)) } } func TestNewNodeIsRoot(t *testing.T) { nodeTestSetup() assert.True(t, normal.IsRoot(), "a new bare mcts node should have nil parent, and hence be a root") } func TestMakeZeroPlayerNode(t *testing.T) { _, err := NewNode(0) _, ok := err.(ZeroPlayerCount) if !ok { assert.Fail(t, "expected ZeroPlayerCount error when making node with zero players") } //try where there shouldn't be an error _, err = NewNode(1) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } } func TestSetAndGetScore(t *testing.T) { n, err := NewNode(1) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } assert.Equal(t, float64(0), n.Score(0)) n.SetScore(0, 42) assert.Equal(t, float64(42), n.Score(0)) } func TestSetAndGetScoreMultiplayer(t *testing.T) { n, err := NewNode(4) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } assert.Equal(t, []float64{0, 0, 0, 0}, n.ScoreVector()) assert.Equal(t, float64(0), n.Score(0)) assert.Equal(t, float64(0), n.Score(1)) assert.Equal(t, float64(0), n.Score(2)) assert.Equal(t, float64(0), n.Score(3)) n.SetScore(0, 5) n.SetScore(1, 7) n.SetScore(2, 6) n.SetScore(3, 8) assert.Equal(t, []float64{5, 7, 6, 8}, n.ScoreVector()) assert.Equal(t, float64(5), n.Score(0)) assert.Equal(t, float64(7), n.Score(1)) assert.Equal(t, float64(6), n.Score(2)) assert.Equal(t, float64(8), n.Score(3)) } func TestAddChild(t *testing.T) { nodeTestSetup() //try our test inputs prev := len(normal.children) newNode, err := NewNode(1) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } normal.SetChild("1", &newNode) assert.Equal(t, prev+1, len(normal.children), "adding child should increase NumChildren by 1") assert.NotNil(t, normal.GetChild("1")) assert.Equal(t, &normal, newNode.Parent(), "adding a child should set its parent") prev = len(nodeWithChildren.children) newNode, err = NewNode(1) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } nodeWithChildren.SetChild("11", &newNode) assert.Equal(t, prev+1, len(nodeWithChildren.children), "adding child should increase NumChildren by 1") assert.NotNil(t, nodeWithChildren.GetChild("11")) prev = len(nodeWithGrandchildren.children) newNode, err = NewNode(1) if err != nil { assert.Fail(t, fmt.Sprintf("%v", err)) } nodeWithGrandchildren.SetChild("11", &newNode) assert.Equal(t, prev+1, len(nodeWithGrandchildren.children), "adding child should increase NumChildren by 1") assert.NotNil(t, nodeWithGrandchildren.GetChild("11")) } func TestChildOrder(t *testing.T) { nodeTestSetup() assert.Equal(t, 10, len(nodeWithChildren.children), "mcts node should have indicated number of children") for i := 0; i < len(nodeWithChildren.children); i++ { //scores of children should be 1 to the number of children assert.Equal(t, float64(i), nodeWithChildren.GetChild(fmt.Sprintf("%v", i)).Score(0), "order of insertion should be preserved in mcts node") for j := 0; j < len(nodeWithChildren.children); j++ { assert.Equal(t, float64(i*j), nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).GetChild(fmt.Sprintf("%v", j)).Score(0), "order of insertion should be preserved in mcts node") } } } func TestRemoveChild(t *testing.T) { nodeTestSetup() copy := normal normal.RemoveChild("0") assert.Equal(t, copy, normal, "removing child from mcts node with no children should not affect it") prev := len(nodeWithChildren.children) child := nodeWithChildren.GetChild("0") assert.NotNil(t, child, "node should have returned valid child") nodeWithChildren.RemoveChild("0") assert.Equal(t, prev-1, len(nodeWithChildren.children), "removing from mcts node should decrease number of children") assert.Nil(t, child.Parent(), "removing child should cause child's parent to be nil") assert.Nil(t, nodeWithChildren.GetChild("0"), "child was removed, hence GetChild should return nil") } func TestGetChild(t *testing.T) { nodeTestSetup() assert.Nil(t, normal.GetChild("0"), "mcts GetChild should return nil if there are no children") assert.Nil(t, normal.GetChild("0"), "mcts GetChild should return nil if there are no children") assert.Nil(t, normal.GetChild("2"), "mcts GetChild should return nil if there are no children") assert.Nil(t, normal.GetChild("-1"), "mcts GetChild should return nil if there are no children") } func TestNumChildren(t *testing.T) { nodeTestSetup() assert.Equal(t, 0, len(normal.children), "new mcts node should have no children") assert.Equal(t, 10, len(nodeWithChildren.children), "mcts node should have indicated number of children") assert.Equal(t, 10, len(nodeWithGrandchildren.children), "mcts node should have indicated number of children") //test children too for i := 0; i < len(nodeWithGrandchildren.children); i++ { assert.Equal(t, 10, len(nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).children), "mcts node should have indicated number of children") } } func TestIsTerminal(t *testing.T) { nodeTestSetup() assert.True(t, normal.IsLeaf(), "bare mcts node should be terminal") assert.False(t, nodeWithChildren.IsLeaf(), "mcts nodes with children should not be terminal") assert.False(t, nodeWithGrandchildren.IsLeaf(), "mcts nodes with children should not be terminal") for i := 0; i < len(nodeWithChildren.children); i++ { assert.True(t, nodeWithChildren.GetChild(fmt.Sprintf("%v", i)).IsLeaf(), "mcts nodes without children should be terminal") assert.False(t, nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).IsLeaf(), "mcts nodes with children should not be terminal") for j := 0; j < len(nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).children); j++ { assert.True(t, nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).GetChild(fmt.Sprintf("%v", j)).IsLeaf(), "mcts nodes without children should be terminal") } } } func TestParent(t *testing.T) { nodeTestSetup() assert.Nil(t, normal.Parent(), "a bare mcts node should have no parent") for i := 0; i < len(nodeWithChildren.children); i++ { assert.Equal(t, &nodeWithChildren, nodeWithChildren.GetChild(fmt.Sprintf("%v", i)).Parent(), "a child mcts node should know its parent") assert.Equal(t, &nodeWithGrandchildren, nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).Parent(), "a child mcts node should know its parent") for j := 0; j < len(nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).children); j++ { assert.Equal(t, nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)), nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).GetChild(fmt.Sprintf("%v", j)).Parent(), "a grandchild mcts node should know its parent") assert.Equal(t, &nodeWithGrandchildren, nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", i)).GetChild(fmt.Sprintf("%v", j)).Parent().Parent(), "a grandchild mcts node should know its grandparent, via its parent") } } } func TestNodeSelectBestChild(t *testing.T) { nodeTestSetup() // TODO currently based on pure exploitation (no exploration) _, node := nodeWithGrandchildren.selectBestChild(0) assert.Equal(t, float64(9), node.Score(0)) _, node = nodeWithGrandchildren.selectBestChild(0) assert.Equal(t, float64(9), node.Score(0)) } func TestSelectBestChildDirectFromRoot(t *testing.T) { nodeTestSetup() k, node := nodeWithChildren.selectBestChild(0) assert.NotEqual(t, "", k) assert.NotNil(t, node) } func TestSelectBestChildAsLeaf(t *testing.T) { nodeTestSetup() grandChild := nodeWithGrandchildren.GetChild(fmt.Sprintf("%v", 0)).GetChild(fmt.Sprintf("%v", 0)) k, c := grandChild.selectBestChild(0) assert.Equal(t, "", k) assert.Equal(t, grandChild, c) } func TestUCBExplorationParamLessThanZero(t *testing.T) { nodeTestSetup() ucb := nodeWithGrandchildren.UpperConfidenceBound(float64(-1), 0) assert.Equal(t, math.Inf(1), ucb) } func TestUpperConfidenceBoundDirectFromRoot(t *testing.T) { nodeTestSetup() ucb := normal.UpperConfidenceBound(0, 0) assert.Equal(t, math.Inf(1), ucb) ucb = nodeWithChildren.UpperConfidenceBound(0, 0) assert.Equal(t, math.Inf(1), ucb) for _, c := range nodeWithChildren.children { ucb = c.UpperConfidenceBound(0, 0) assert.Equal(t, math.Inf(1), ucb) } } func TestNodeIsExhausted(t *testing.T) { nodeTestSetup() assert.True(t, normal.IsExhausted()) assert.True(t, nodeWithChildren.IsExhausted()) assert.True(t, nodeWithGrandchildren.IsExhausted()) } func TestNodeString(t *testing.T) { nodeTestSetup() assert.NotEqual(t, "", nodeWithGrandchildren.String(), "string output is empty") } func TestNodeAddVisit(t *testing.T) { node, err := NewNode(1) if err != nil { assert.Fail(t, err.Error()) } v := node.Visits() node.AddVisit() assert.Equal(t, v+1, node.Visits()) } func TestNodeCopy(t *testing.T) { nodeTestSetup() cpy := nodeWithGrandchildren.Copy() assert.NotEqual(t, nodeWithGrandchildren, cpy) assert.Equal(t, nodeWithGrandchildren.State, cpy.State) for k, c := range nodeWithGrandchildren.children { cpyChild, ok := cpy.children[k] assert.True(t, ok) assert.NotEqual(t, &c, cpyChild) assert.Equal(t, c.State, cpyChild.State) for k2, c2 := range c.children { cpyChild, ok := cpy.children[k].children[k2] assert.True(t, ok) assert.NotEqual(t, &c2, cpyChild) assert.Equal(t, c2.State, cpyChild.State) } } } func TestNodeMergeWithSelf(t *testing.T) { nodeTestSetup() cpy := nodeWithGrandchildren.Copy() err := nodeWithGrandchildren.Merge(*cpy) if err != nil { assert.Fail(t, err.Error()) } //expect visits and score to be doubled v := cpy.Visits() assert.Equal(t, v*2, nodeWithGrandchildren.Visits()) s := cpy.ScoreVector() for i, v := range nodeWithGrandchildren.ScoreVector() { assert.Equal(t, v*2, s[i]) } assert.Equal(t, len(cpy.children), len(nodeWithGrandchildren.children)) } func TestNodeMergeDifferingPlayerCount(t *testing.T) { nodeTestSetup() node, err := NewNode(5) if err != nil { assert.Fail(t, err.Error()) } err = nodeWithGrandchildren.Merge(node) assert.NotNil(t, err, "expected MergeDifferingPlayercount error when merging") var ok bool err, ok = err.(MergeDifferingPlayerCount) assert.True(t, ok, "expected MergeDifferingPlayercount error when merging") } // very simple state implementation to test with all nodes using this state will // be exhausted and terminal type simpleStateImplementation struct { internal int } func (ssi simpleStateImplementation) LegalActions() ActionSet { return make(ActionSet) } func (ssi simpleStateImplementation) Score(player uint) float64 { return float64(0) } func (ssi simpleStateImplementation) Bias() float64 { return float64(0) } func (ssi simpleStateImplementation) Copy() State { return ssi } func (ssi simpleStateImplementation) Player() uint { return 0 } func (ssi simpleStateImplementation) Policy() Policy { return nil } func TestNodeMergeStateMismatch(t *testing.T) { nodeTestSetup() node, err := NewNode(1) if err != nil { assert.Fail(t, err.Error()) } node.State = simpleStateImplementation{3} err = nodeWithGrandchildren.Merge(node) assert.NotNil(t, err, "expected MergeStateMismatch error when merging") var ok bool err, ok = err.(MergeStateMismatch) assert.True(t, ok, "expected MergeStateMismatch error when merging") } func TestNodeIsTerminal(t *testing.T) { nodeTestSetup() assert.True(t, normal.IsTerminal()) } func TestNodeWithStateIsExhausted(t *testing.T) { nodeTestSetup() normal.State = simpleStateImplementation{} assert.True(t, normal.IsExhausted()) nodeWithChildren.State = simpleStateImplementation{} assert.True(t, nodeWithChildren.IsExhausted()) nodeWithGrandchildren.State = simpleStateImplementation{} assert.True(t, nodeWithGrandchildren.IsExhausted()) }
package double_pointer // RemoveElement 删除所有值为val的元素 func RemoveElement(nums []int, val int) []int { fast, slow := 0, 0 for fast < len(nums) { if nums[fast] != val { nums[slow] = nums[fast] slow++ } fast++ } return nums[:slow] }
package main import ( "fmt" "strconv" "strings" ) func strToIntArr(str string) (result []int) { var k int arrStr := strings.Split(str, ",") for _, each := range arrStr { k, _ = strconv.Atoi(each) result = append(result, k) } return } func findMajority(arr []int) (result int, found bool) { limit := len(arr) / 2 count := make(map[int]int) for _, each := range arr { if count[each] > 0 { count[each]++ } else { count[each] = 1 } if count[each] > limit { return each, true } } return -1, false } func main() { var numbers string fmt.Println("FIND MAJORITY ELEMENT OF AN ARRAY OF INTS") fmt.Print("Input (ex:1,2,3,4,5): ") _, _ = fmt.Scanln(&numbers) arrNum := strToIntArr(numbers) result, found := findMajority(arrNum) fmt.Print("Output: ") if !found { fmt.Println("Tidak Ditemukan") } else { fmt.Println(result) } }
package mocks import ( "io" "strings" ) var _ io.WriteCloser = &BuildCloser{} type BuildCloser struct { strings.Builder } func (b *BuildCloser) Close() error { return nil } func NewBuildCloser() *BuildCloser { return &BuildCloser{strings.Builder{}} }
package runner import ( "evier/config" "evier/integrations" "time" ) func Run(cfg config.Config) (e error) { rsyncOptions := cfg.Rsync intgs := integrations.IntegrationGroup{cfg.Integrations} startTime := time.Now() intgs.NotifyProcessStart(startTime) for _, job := range cfg.Jobs { jobStart := time.Now() e = intgs.NotifyJobStart(job, jobStart) if e != nil { return e } e = job.Perform(rsyncOptions) jobEnd := time.Now() if e == nil { e = intgs.NotifyJobSuccess(job, jobStart, jobEnd) if e != nil { return e } } else { err := intgs.NotifyJobFailure(job, jobStart, jobEnd) if err != nil { return err } return e } } intgs.NotifyProcessSuccess(startTime, time.Now()) return nil }
package main import ( "encoding/json" "fmt" ) type MultiInvoiceResultQueryPostData struct { Pch string `json:"pch"` } func FlowMultiInvoiceResultQuery(PchNumber string) string{ multiInvoiceResultQueryPostData := MultiInvoiceCheckPostData{} multiInvoiceResultQueryPostData.Pch = PchNumber multiInvoiceResultQueryPostDataJson,_ :=json.Marshal(multiInvoiceResultQueryPostData) fmt.Println("json data",multiInvoiceResultQueryPostDataJson) jsonData := PrepareJsonForHttpRequest(multiInvoiceResultQueryPostDataJson) fmt.Println("json data", string(jsonData)) MultiInvoiceResultQueryUrl := GetUrlFromFactory("MultiInvoiceResultQuery") fmt.Println("Url", string(MultiInvoiceResultQueryUrl)) result := SentHttpequestByPost(MultiInvoiceResultQueryUrl, jsonData) //fmt.Println("result", result) return result } //func main() { // flowMultiInvoiceResultQuery("00000000000000000020201123105043") //}
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package charset import ( "bytes" "golang.org/x/text/encoding" ) // EncodingBinImpl is the instance of encodingBin. var EncodingBinImpl = &encodingBin{encodingBase{enc: encoding.Nop}} func init() { EncodingBinImpl.self = EncodingBinImpl } // encodingBin is the binary encoding. type encodingBin struct { encodingBase } // Name implements Encoding interface. func (*encodingBin) Name() string { return CharsetBin } // Tp implements Encoding interface. func (*encodingBin) Tp() EncodingTp { return EncodingTpBin } // Peek implements Encoding interface. func (*encodingBin) Peek(src []byte) []byte { if len(src) == 0 { return src } return src[:1] } // IsValid implements Encoding interface. func (*encodingBin) IsValid(_ []byte) bool { return true } // Foreach implements Encoding interface. func (*encodingBin) Foreach(src []byte, _ Op, fn func(from, to []byte, ok bool) bool) { for i := 0; i < len(src); i++ { if !fn(src[i:i+1], src[i:i+1], true) { return } } } func (*encodingBin) Transform(_ *bytes.Buffer, src []byte, _ Op) ([]byte, error) { return src, nil }
// Copyright 2021 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" cloudbuildpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/cloudbuild/cloudbuild_go_proto" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" ) // Server implements the gRPC interface for BuildTrigger. type BuildTriggerServer struct{} // ProtoToBuildTriggerGithubPullRequestCommentControlEnum converts a BuildTriggerGithubPullRequestCommentControlEnum enum from its proto representation. func ProtoToCloudbuildBuildTriggerGithubPullRequestCommentControlEnum(e cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum) *cloudbuild.BuildTriggerGithubPullRequestCommentControlEnum { if e == 0 { return nil } if n, ok := cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum_name[int32(e)]; ok { e := cloudbuild.BuildTriggerGithubPullRequestCommentControlEnum(n[len("CloudbuildBuildTriggerGithubPullRequestCommentControlEnum"):]) return &e } return nil } // ProtoToBuildTriggerBuildStepsStatusEnum converts a BuildTriggerBuildStepsStatusEnum enum from its proto representation. func ProtoToCloudbuildBuildTriggerBuildStepsStatusEnum(e cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum) *cloudbuild.BuildTriggerBuildStepsStatusEnum { if e == 0 { return nil } if n, ok := cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum_name[int32(e)]; ok { e := cloudbuild.BuildTriggerBuildStepsStatusEnum(n[len("CloudbuildBuildTriggerBuildStepsStatusEnum"):]) return &e } return nil } // ProtoToBuildTriggerTriggerTemplate converts a BuildTriggerTriggerTemplate resource from its proto representation. func ProtoToCloudbuildBuildTriggerTriggerTemplate(p *cloudbuildpb.CloudbuildBuildTriggerTriggerTemplate) *cloudbuild.BuildTriggerTriggerTemplate { if p == nil { return nil } obj := &cloudbuild.BuildTriggerTriggerTemplate{ ProjectId: dcl.StringOrNil(p.ProjectId), RepoName: dcl.StringOrNil(p.RepoName), BranchName: dcl.StringOrNil(p.BranchName), TagName: dcl.StringOrNil(p.TagName), CommitSha: dcl.StringOrNil(p.CommitSha), Dir: dcl.StringOrNil(p.Dir), InvertRegex: dcl.Bool(p.InvertRegex), } return obj } // ProtoToBuildTriggerGithub converts a BuildTriggerGithub resource from its proto representation. func ProtoToCloudbuildBuildTriggerGithub(p *cloudbuildpb.CloudbuildBuildTriggerGithub) *cloudbuild.BuildTriggerGithub { if p == nil { return nil } obj := &cloudbuild.BuildTriggerGithub{ Owner: dcl.StringOrNil(p.Owner), Name: dcl.StringOrNil(p.Name), PullRequest: ProtoToCloudbuildBuildTriggerGithubPullRequest(p.GetPullRequest()), Push: ProtoToCloudbuildBuildTriggerGithubPush(p.GetPush()), } return obj } // ProtoToBuildTriggerGithubPullRequest converts a BuildTriggerGithubPullRequest resource from its proto representation. func ProtoToCloudbuildBuildTriggerGithubPullRequest(p *cloudbuildpb.CloudbuildBuildTriggerGithubPullRequest) *cloudbuild.BuildTriggerGithubPullRequest { if p == nil { return nil } obj := &cloudbuild.BuildTriggerGithubPullRequest{ Branch: dcl.StringOrNil(p.Branch), CommentControl: ProtoToCloudbuildBuildTriggerGithubPullRequestCommentControlEnum(p.GetCommentControl()), InvertRegex: dcl.Bool(p.InvertRegex), } return obj } // ProtoToBuildTriggerGithubPush converts a BuildTriggerGithubPush resource from its proto representation. func ProtoToCloudbuildBuildTriggerGithubPush(p *cloudbuildpb.CloudbuildBuildTriggerGithubPush) *cloudbuild.BuildTriggerGithubPush { if p == nil { return nil } obj := &cloudbuild.BuildTriggerGithubPush{ Branch: dcl.StringOrNil(p.Branch), Tag: dcl.StringOrNil(p.Tag), InvertRegex: dcl.Bool(p.InvertRegex), } return obj } // ProtoToBuildTriggerBuild converts a BuildTriggerBuild resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuild(p *cloudbuildpb.CloudbuildBuildTriggerBuild) *cloudbuild.BuildTriggerBuild { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuild{ QueueTtl: dcl.StringOrNil(p.QueueTtl), LogsBucket: dcl.StringOrNil(p.LogsBucket), Timeout: dcl.StringOrNil(p.Timeout), Source: ProtoToCloudbuildBuildTriggerBuildSource(p.GetSource()), } for _, r := range p.GetTags() { obj.Tags = append(obj.Tags, r) } for _, r := range p.GetImages() { obj.Images = append(obj.Images, r) } for _, r := range p.GetSecrets() { obj.Secrets = append(obj.Secrets, *ProtoToCloudbuildBuildTriggerBuildSecrets(r)) } for _, r := range p.GetSteps() { obj.Steps = append(obj.Steps, *ProtoToCloudbuildBuildTriggerBuildSteps(r)) } return obj } // ProtoToBuildTriggerBuildSecrets converts a BuildTriggerBuildSecrets resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildSecrets(p *cloudbuildpb.CloudbuildBuildTriggerBuildSecrets) *cloudbuild.BuildTriggerBuildSecrets { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildSecrets{ KmsKeyName: dcl.StringOrNil(p.KmsKeyName), } return obj } // ProtoToBuildTriggerBuildSteps converts a BuildTriggerBuildSteps resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildSteps(p *cloudbuildpb.CloudbuildBuildTriggerBuildSteps) *cloudbuild.BuildTriggerBuildSteps { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildSteps{ Name: dcl.StringOrNil(p.Name), Dir: dcl.StringOrNil(p.Dir), Id: dcl.StringOrNil(p.Id), Entrypoint: dcl.StringOrNil(p.Entrypoint), Timing: ProtoToCloudbuildBuildTriggerBuildStepsTiming(p.GetTiming()), PullTiming: ProtoToCloudbuildBuildTriggerBuildStepsPullTiming(p.GetPullTiming()), Timeout: dcl.StringOrNil(p.Timeout), Status: ProtoToCloudbuildBuildTriggerBuildStepsStatusEnum(p.GetStatus()), } for _, r := range p.GetEnv() { obj.Env = append(obj.Env, r) } for _, r := range p.GetArgs() { obj.Args = append(obj.Args, r) } for _, r := range p.GetWaitFor() { obj.WaitFor = append(obj.WaitFor, r) } for _, r := range p.GetSecretEnv() { obj.SecretEnv = append(obj.SecretEnv, r) } for _, r := range p.GetVolumes() { obj.Volumes = append(obj.Volumes, *ProtoToCloudbuildBuildTriggerBuildStepsVolumes(r)) } return obj } // ProtoToBuildTriggerBuildStepsVolumes converts a BuildTriggerBuildStepsVolumes resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildStepsVolumes(p *cloudbuildpb.CloudbuildBuildTriggerBuildStepsVolumes) *cloudbuild.BuildTriggerBuildStepsVolumes { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildStepsVolumes{ Name: dcl.StringOrNil(p.Name), Path: dcl.StringOrNil(p.Path), } return obj } // ProtoToBuildTriggerBuildStepsTiming converts a BuildTriggerBuildStepsTiming resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildStepsTiming(p *cloudbuildpb.CloudbuildBuildTriggerBuildStepsTiming) *cloudbuild.BuildTriggerBuildStepsTiming { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildStepsTiming{ StartTime: dcl.StringOrNil(p.StartTime), EndTime: dcl.StringOrNil(p.EndTime), } return obj } // ProtoToBuildTriggerBuildStepsPullTiming converts a BuildTriggerBuildStepsPullTiming resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildStepsPullTiming(p *cloudbuildpb.CloudbuildBuildTriggerBuildStepsPullTiming) *cloudbuild.BuildTriggerBuildStepsPullTiming { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildStepsPullTiming{ StartTime: dcl.StringOrNil(p.StartTime), EndTime: dcl.StringOrNil(p.EndTime), } return obj } // ProtoToBuildTriggerBuildSource converts a BuildTriggerBuildSource resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildSource(p *cloudbuildpb.CloudbuildBuildTriggerBuildSource) *cloudbuild.BuildTriggerBuildSource { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildSource{ StorageSource: ProtoToCloudbuildBuildTriggerBuildSourceStorageSource(p.GetStorageSource()), RepoSource: ProtoToCloudbuildBuildTriggerBuildSourceRepoSource(p.GetRepoSource()), } return obj } // ProtoToBuildTriggerBuildSourceStorageSource converts a BuildTriggerBuildSourceStorageSource resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildSourceStorageSource(p *cloudbuildpb.CloudbuildBuildTriggerBuildSourceStorageSource) *cloudbuild.BuildTriggerBuildSourceStorageSource { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildSourceStorageSource{ Bucket: dcl.StringOrNil(p.Bucket), Object: dcl.StringOrNil(p.Object), Generation: dcl.StringOrNil(p.Generation), } return obj } // ProtoToBuildTriggerBuildSourceRepoSource converts a BuildTriggerBuildSourceRepoSource resource from its proto representation. func ProtoToCloudbuildBuildTriggerBuildSourceRepoSource(p *cloudbuildpb.CloudbuildBuildTriggerBuildSourceRepoSource) *cloudbuild.BuildTriggerBuildSourceRepoSource { if p == nil { return nil } obj := &cloudbuild.BuildTriggerBuildSourceRepoSource{ ProjectId: dcl.StringOrNil(p.ProjectId), RepoName: dcl.StringOrNil(p.RepoName), BranchName: dcl.StringOrNil(p.BranchName), TagName: dcl.StringOrNil(p.TagName), CommitSha: dcl.StringOrNil(p.CommitSha), Dir: dcl.StringOrNil(p.Dir), InvertRegex: dcl.Bool(p.InvertRegex), } return obj } // ProtoToBuildTrigger converts a BuildTrigger resource from its proto representation. func ProtoToBuildTrigger(p *cloudbuildpb.CloudbuildBuildTrigger) *cloudbuild.BuildTrigger { obj := &cloudbuild.BuildTrigger{ Name: dcl.StringOrNil(p.Name), Description: dcl.StringOrNil(p.Description), Disabled: dcl.Bool(p.Disabled), Filename: dcl.StringOrNil(p.Filename), TriggerTemplate: ProtoToCloudbuildBuildTriggerTriggerTemplate(p.GetTriggerTemplate()), Github: ProtoToCloudbuildBuildTriggerGithub(p.GetGithub()), Project: dcl.StringOrNil(p.Project), Build: ProtoToCloudbuildBuildTriggerBuild(p.GetBuild()), Id: dcl.StringOrNil(p.Id), CreateTime: dcl.StringOrNil(p.CreateTime), } for _, r := range p.GetTags() { obj.Tags = append(obj.Tags, r) } for _, r := range p.GetIgnoredFiles() { obj.IgnoredFiles = append(obj.IgnoredFiles, r) } for _, r := range p.GetIncludedFiles() { obj.IncludedFiles = append(obj.IncludedFiles, r) } return obj } // BuildTriggerGithubPullRequestCommentControlEnumToProto converts a BuildTriggerGithubPullRequestCommentControlEnum enum to its proto representation. func CloudbuildBuildTriggerGithubPullRequestCommentControlEnumToProto(e *cloudbuild.BuildTriggerGithubPullRequestCommentControlEnum) cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum { if e == nil { return cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum(0) } if v, ok := cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum_value["BuildTriggerGithubPullRequestCommentControlEnum"+string(*e)]; ok { return cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum(v) } return cloudbuildpb.CloudbuildBuildTriggerGithubPullRequestCommentControlEnum(0) } // BuildTriggerBuildStepsStatusEnumToProto converts a BuildTriggerBuildStepsStatusEnum enum to its proto representation. func CloudbuildBuildTriggerBuildStepsStatusEnumToProto(e *cloudbuild.BuildTriggerBuildStepsStatusEnum) cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum { if e == nil { return cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum(0) } if v, ok := cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum_value["BuildTriggerBuildStepsStatusEnum"+string(*e)]; ok { return cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum(v) } return cloudbuildpb.CloudbuildBuildTriggerBuildStepsStatusEnum(0) } // BuildTriggerTriggerTemplateToProto converts a BuildTriggerTriggerTemplate resource to its proto representation. func CloudbuildBuildTriggerTriggerTemplateToProto(o *cloudbuild.BuildTriggerTriggerTemplate) *cloudbuildpb.CloudbuildBuildTriggerTriggerTemplate { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerTriggerTemplate{ ProjectId: dcl.ValueOrEmptyString(o.ProjectId), RepoName: dcl.ValueOrEmptyString(o.RepoName), BranchName: dcl.ValueOrEmptyString(o.BranchName), TagName: dcl.ValueOrEmptyString(o.TagName), CommitSha: dcl.ValueOrEmptyString(o.CommitSha), Dir: dcl.ValueOrEmptyString(o.Dir), InvertRegex: dcl.ValueOrEmptyBool(o.InvertRegex), } return p } // BuildTriggerGithubToProto converts a BuildTriggerGithub resource to its proto representation. func CloudbuildBuildTriggerGithubToProto(o *cloudbuild.BuildTriggerGithub) *cloudbuildpb.CloudbuildBuildTriggerGithub { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerGithub{ Owner: dcl.ValueOrEmptyString(o.Owner), Name: dcl.ValueOrEmptyString(o.Name), PullRequest: CloudbuildBuildTriggerGithubPullRequestToProto(o.PullRequest), Push: CloudbuildBuildTriggerGithubPushToProto(o.Push), } return p } // BuildTriggerGithubPullRequestToProto converts a BuildTriggerGithubPullRequest resource to its proto representation. func CloudbuildBuildTriggerGithubPullRequestToProto(o *cloudbuild.BuildTriggerGithubPullRequest) *cloudbuildpb.CloudbuildBuildTriggerGithubPullRequest { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerGithubPullRequest{ Branch: dcl.ValueOrEmptyString(o.Branch), CommentControl: CloudbuildBuildTriggerGithubPullRequestCommentControlEnumToProto(o.CommentControl), InvertRegex: dcl.ValueOrEmptyBool(o.InvertRegex), } return p } // BuildTriggerGithubPushToProto converts a BuildTriggerGithubPush resource to its proto representation. func CloudbuildBuildTriggerGithubPushToProto(o *cloudbuild.BuildTriggerGithubPush) *cloudbuildpb.CloudbuildBuildTriggerGithubPush { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerGithubPush{ Branch: dcl.ValueOrEmptyString(o.Branch), Tag: dcl.ValueOrEmptyString(o.Tag), InvertRegex: dcl.ValueOrEmptyBool(o.InvertRegex), } return p } // BuildTriggerBuildToProto converts a BuildTriggerBuild resource to its proto representation. func CloudbuildBuildTriggerBuildToProto(o *cloudbuild.BuildTriggerBuild) *cloudbuildpb.CloudbuildBuildTriggerBuild { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuild{ QueueTtl: dcl.ValueOrEmptyString(o.QueueTtl), LogsBucket: dcl.ValueOrEmptyString(o.LogsBucket), Timeout: dcl.ValueOrEmptyString(o.Timeout), Source: CloudbuildBuildTriggerBuildSourceToProto(o.Source), } for _, r := range o.Tags { p.Tags = append(p.Tags, r) } for _, r := range o.Images { p.Images = append(p.Images, r) } p.Substitutions = make(map[string]string) for k, r := range o.Substitutions { p.Substitutions[k] = r } for _, r := range o.Secrets { p.Secrets = append(p.Secrets, CloudbuildBuildTriggerBuildSecretsToProto(&r)) } for _, r := range o.Steps { p.Steps = append(p.Steps, CloudbuildBuildTriggerBuildStepsToProto(&r)) } return p } // BuildTriggerBuildSecretsToProto converts a BuildTriggerBuildSecrets resource to its proto representation. func CloudbuildBuildTriggerBuildSecretsToProto(o *cloudbuild.BuildTriggerBuildSecrets) *cloudbuildpb.CloudbuildBuildTriggerBuildSecrets { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildSecrets{ KmsKeyName: dcl.ValueOrEmptyString(o.KmsKeyName), } p.SecretEnv = make(map[string]string) for k, r := range o.SecretEnv { p.SecretEnv[k] = r } return p } // BuildTriggerBuildStepsToProto converts a BuildTriggerBuildSteps resource to its proto representation. func CloudbuildBuildTriggerBuildStepsToProto(o *cloudbuild.BuildTriggerBuildSteps) *cloudbuildpb.CloudbuildBuildTriggerBuildSteps { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildSteps{ Name: dcl.ValueOrEmptyString(o.Name), Dir: dcl.ValueOrEmptyString(o.Dir), Id: dcl.ValueOrEmptyString(o.Id), Entrypoint: dcl.ValueOrEmptyString(o.Entrypoint), Timing: CloudbuildBuildTriggerBuildStepsTimingToProto(o.Timing), PullTiming: CloudbuildBuildTriggerBuildStepsPullTimingToProto(o.PullTiming), Timeout: dcl.ValueOrEmptyString(o.Timeout), Status: CloudbuildBuildTriggerBuildStepsStatusEnumToProto(o.Status), } for _, r := range o.Env { p.Env = append(p.Env, r) } for _, r := range o.Args { p.Args = append(p.Args, r) } for _, r := range o.WaitFor { p.WaitFor = append(p.WaitFor, r) } for _, r := range o.SecretEnv { p.SecretEnv = append(p.SecretEnv, r) } for _, r := range o.Volumes { p.Volumes = append(p.Volumes, CloudbuildBuildTriggerBuildStepsVolumesToProto(&r)) } return p } // BuildTriggerBuildStepsVolumesToProto converts a BuildTriggerBuildStepsVolumes resource to its proto representation. func CloudbuildBuildTriggerBuildStepsVolumesToProto(o *cloudbuild.BuildTriggerBuildStepsVolumes) *cloudbuildpb.CloudbuildBuildTriggerBuildStepsVolumes { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildStepsVolumes{ Name: dcl.ValueOrEmptyString(o.Name), Path: dcl.ValueOrEmptyString(o.Path), } return p } // BuildTriggerBuildStepsTimingToProto converts a BuildTriggerBuildStepsTiming resource to its proto representation. func CloudbuildBuildTriggerBuildStepsTimingToProto(o *cloudbuild.BuildTriggerBuildStepsTiming) *cloudbuildpb.CloudbuildBuildTriggerBuildStepsTiming { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildStepsTiming{ StartTime: dcl.ValueOrEmptyString(o.StartTime), EndTime: dcl.ValueOrEmptyString(o.EndTime), } return p } // BuildTriggerBuildStepsPullTimingToProto converts a BuildTriggerBuildStepsPullTiming resource to its proto representation. func CloudbuildBuildTriggerBuildStepsPullTimingToProto(o *cloudbuild.BuildTriggerBuildStepsPullTiming) *cloudbuildpb.CloudbuildBuildTriggerBuildStepsPullTiming { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildStepsPullTiming{ StartTime: dcl.ValueOrEmptyString(o.StartTime), EndTime: dcl.ValueOrEmptyString(o.EndTime), } return p } // BuildTriggerBuildSourceToProto converts a BuildTriggerBuildSource resource to its proto representation. func CloudbuildBuildTriggerBuildSourceToProto(o *cloudbuild.BuildTriggerBuildSource) *cloudbuildpb.CloudbuildBuildTriggerBuildSource { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildSource{ StorageSource: CloudbuildBuildTriggerBuildSourceStorageSourceToProto(o.StorageSource), RepoSource: CloudbuildBuildTriggerBuildSourceRepoSourceToProto(o.RepoSource), } return p } // BuildTriggerBuildSourceStorageSourceToProto converts a BuildTriggerBuildSourceStorageSource resource to its proto representation. func CloudbuildBuildTriggerBuildSourceStorageSourceToProto(o *cloudbuild.BuildTriggerBuildSourceStorageSource) *cloudbuildpb.CloudbuildBuildTriggerBuildSourceStorageSource { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildSourceStorageSource{ Bucket: dcl.ValueOrEmptyString(o.Bucket), Object: dcl.ValueOrEmptyString(o.Object), Generation: dcl.ValueOrEmptyString(o.Generation), } return p } // BuildTriggerBuildSourceRepoSourceToProto converts a BuildTriggerBuildSourceRepoSource resource to its proto representation. func CloudbuildBuildTriggerBuildSourceRepoSourceToProto(o *cloudbuild.BuildTriggerBuildSourceRepoSource) *cloudbuildpb.CloudbuildBuildTriggerBuildSourceRepoSource { if o == nil { return nil } p := &cloudbuildpb.CloudbuildBuildTriggerBuildSourceRepoSource{ ProjectId: dcl.ValueOrEmptyString(o.ProjectId), RepoName: dcl.ValueOrEmptyString(o.RepoName), BranchName: dcl.ValueOrEmptyString(o.BranchName), TagName: dcl.ValueOrEmptyString(o.TagName), CommitSha: dcl.ValueOrEmptyString(o.CommitSha), Dir: dcl.ValueOrEmptyString(o.Dir), InvertRegex: dcl.ValueOrEmptyBool(o.InvertRegex), } p.Substitutions = make(map[string]string) for k, r := range o.Substitutions { p.Substitutions[k] = r } return p } // BuildTriggerToProto converts a BuildTrigger resource to its proto representation. func BuildTriggerToProto(resource *cloudbuild.BuildTrigger) *cloudbuildpb.CloudbuildBuildTrigger { p := &cloudbuildpb.CloudbuildBuildTrigger{ Name: dcl.ValueOrEmptyString(resource.Name), Description: dcl.ValueOrEmptyString(resource.Description), Disabled: dcl.ValueOrEmptyBool(resource.Disabled), Filename: dcl.ValueOrEmptyString(resource.Filename), TriggerTemplate: CloudbuildBuildTriggerTriggerTemplateToProto(resource.TriggerTemplate), Github: CloudbuildBuildTriggerGithubToProto(resource.Github), Project: dcl.ValueOrEmptyString(resource.Project), Build: CloudbuildBuildTriggerBuildToProto(resource.Build), Id: dcl.ValueOrEmptyString(resource.Id), CreateTime: dcl.ValueOrEmptyString(resource.CreateTime), } for _, r := range resource.Tags { p.Tags = append(p.Tags, r) } for _, r := range resource.IgnoredFiles { p.IgnoredFiles = append(p.IgnoredFiles, r) } for _, r := range resource.IncludedFiles { p.IncludedFiles = append(p.IncludedFiles, r) } return p } // ApplyBuildTrigger handles the gRPC request by passing it to the underlying BuildTrigger Apply() method. func (s *BuildTriggerServer) applyBuildTrigger(ctx context.Context, c *cloudbuild.Client, request *cloudbuildpb.ApplyCloudbuildBuildTriggerRequest) (*cloudbuildpb.CloudbuildBuildTrigger, error) { p := ProtoToBuildTrigger(request.GetResource()) res, err := c.ApplyBuildTrigger(ctx, p) if err != nil { return nil, err } r := BuildTriggerToProto(res) return r, nil } // ApplyBuildTrigger handles the gRPC request by passing it to the underlying BuildTrigger Apply() method. func (s *BuildTriggerServer) ApplyCloudbuildBuildTrigger(ctx context.Context, request *cloudbuildpb.ApplyCloudbuildBuildTriggerRequest) (*cloudbuildpb.CloudbuildBuildTrigger, error) { cl, err := createConfigBuildTrigger(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return s.applyBuildTrigger(ctx, cl, request) } // DeleteBuildTrigger handles the gRPC request by passing it to the underlying BuildTrigger Delete() method. func (s *BuildTriggerServer) DeleteCloudbuildBuildTrigger(ctx context.Context, request *cloudbuildpb.DeleteCloudbuildBuildTriggerRequest) (*emptypb.Empty, error) { cl, err := createConfigBuildTrigger(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteBuildTrigger(ctx, ProtoToBuildTrigger(request.GetResource())) } // ListCloudbuildBuildTrigger handles the gRPC request by passing it to the underlying BuildTriggerList() method. func (s *BuildTriggerServer) ListCloudbuildBuildTrigger(ctx context.Context, request *cloudbuildpb.ListCloudbuildBuildTriggerRequest) (*cloudbuildpb.ListCloudbuildBuildTriggerResponse, error) { cl, err := createConfigBuildTrigger(ctx, request.ServiceAccountFile) if err != nil { return nil, err } resources, err := cl.ListBuildTrigger(ctx, request.Project) if err != nil { return nil, err } var protos []*cloudbuildpb.CloudbuildBuildTrigger for _, r := range resources.Items { rp := BuildTriggerToProto(r) protos = append(protos, rp) } return &cloudbuildpb.ListCloudbuildBuildTriggerResponse{Items: protos}, nil } func createConfigBuildTrigger(ctx context.Context, service_account_file string) (*cloudbuild.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return cloudbuild.NewClient(conf), nil }
package Problem0134 func canCompleteCircuit(gas []int, cost []int) int { remains, debts, start := 0, 0, 0 for i, g := range gas { remains += g - cost[i] if remains < 0 { // i + 1 处重新开始 start = i + 1 // 记录沿路一共欠缺的油量 debts += remains // remain 至零 remains = 0 } } if debts+remains < 0 { // 最后的剩余的油量,如法全部偿还前期欠缺的油量 // 则,无法跑完一圈 return -1 } return start }
package builtins import ( "os" "path/filepath" ) type fileClass struct { valueStub } func NewFileClass() Value { f := &fileClass{} f.initialize() f.class = NewClassValue() // FIXME: this should be a global reference f.AddMethod(NewMethod("expand_path", func(args ...Value) (Value, error) { arg1 := args[0].(*StringValue).String() if arg1[0] == '~' { arg1 = os.Getenv("HOME") + arg1[1:] } var path string if len(args) == 2 { path = filepath.Join(args[1].(*StringValue).String(), arg1) } else { path, _ = filepath.Abs(arg1) } return NewString(path), nil })) f.AddMethod(NewMethod("dirname", func(args ...Value) (Value, error) { filename := args[0].(*StringValue).String() return NewString(filepath.Base(filename)), nil })) return f } func (file *fileClass) String() string { return "File" } func (file *fileClass) New(args ...Value) Value { return file }
//source: https://github.com/ftylitak/qzxing/tree/master/examples/QZXingLive package main import ( "os" "github.com/therecipe/qt/core" "github.com/therecipe/qt/gui" "github.com/therecipe/qt/internal/examples/3rdparty/qzxing" ) func main() { // enable high dpi scaling // useful for devices with high pixel density displays // such as smartphones, retina displays, ... core.QCoreApplication_SetAttribute(core.Qt__AA_EnableHighDpiScaling, true) // needs to be called once before you can start using QML gui.NewQGuiApplication(len(os.Args), os.Args) qzxing.RegisterQMLTypes() customApp := NewApplication(nil) customApp.checkPermissions() // start the main Qt event loop // and block until app.Exit() is called // or the window is closed by the user gui.QGuiApplication_Exec() }
package resources import ( "fmt" v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" ) // MakeRoute creates an HTTP route that routes to a given cluster. //type clusterName string //type routeConfig struct{ // clusterName []string, // domains []string, //} //type routerConfig map[clusterName]routeConfig // MakeRoutes creates all routes available to a listener func MakeRoutes(listenerName string, vhosts []route.VirtualHost) *v2.RouteConfiguration { return &v2.RouteConfiguration{ Name: listenerName, VirtualHosts: vhosts, } } // MakeVirtualHost creates a virtual host named `cluster-listener` with all domains // accepted in `Host:` header and routes to clusters func MakeVirtualHost(clusterName string, domains []string, listenerName string) route.VirtualHost { return route.VirtualHost{ Name: fmt.Sprintf("%s-%s", clusterName, listenerName), Domains: domains, Routes: []route.Route{{ Match: route.RouteMatch{ PathSpecifier: &route.RouteMatch_Prefix{ Prefix: "/", }, }, Action: &route.Route_Route{ Route: &route.RouteAction{ ClusterSpecifier: &route.RouteAction_Cluster{ Cluster: clusterName, }, }, }, }, }} }
package main import ( "fmt" "math" ) type Shape interface { area() float64 perimeter() float64 } type Rect struct { width, height float64 } type Circle struct { radius float64 } //Rect 타입에 대한 Shape 인터페이스 구현 func (r Rect) area() float64 { return r.width * r.height } func (r Rect) perimeter() float64 { return 2 * (r.width + r.height) } //Circle 타입에 대한 Shape 인터페이스 구현 func (c Circle) area() float64 { return math.Pi * c.radius * c.radius } func (c Circle) perimeter() float64 { return 2 * math.Pi * c.radius } func main() { r := Rect{width: 10.3, height: 4.2} c := Circle{radius: 3.3} showPerimeter(r, c) } func showPerimeter(shapes ...Shape) { for _, s := range shapes { function := s.perimeter() fmt.Println(function) } }
package common type Service interface { Stop() }
// Copyright © 2017 Aeneas Rekkas <aeneas+oss@aeneas.io> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oauth2 import ( "encoding/json" "fmt" "net/http" "github.com/julienschmidt/httprouter" "github.com/ory/herodot" "github.com/ory/hydra/firewall" "github.com/pkg/errors" ) const ( ConsentRequestAccepted = "accepted" ConsentRequestRejected = "rejected" ConsentRequestPath = "/oauth2/consent/requests" ConsentResource = "oauth2:consent:requests:%s" ConsentScope = "hydra.consent" ) type ConsentSessionHandler struct { H herodot.Writer M ConsentRequestManager W firewall.Firewall ResourcePrefix string } func (h *ConsentSessionHandler) PrefixResource(resource string) string { if h.ResourcePrefix == "" { h.ResourcePrefix = "rn:hydra" } if h.ResourcePrefix[len(h.ResourcePrefix)-1] == ':' { h.ResourcePrefix = h.ResourcePrefix[:len(h.ResourcePrefix)-1] } return h.ResourcePrefix + ":" + resource } func (h *ConsentSessionHandler) SetRoutes(r *httprouter.Router) { r.GET(ConsentRequestPath+"/:id", h.FetchConsentRequest) r.PATCH(ConsentRequestPath+"/:id/reject", h.RejectConsentRequestHandler) r.PATCH(ConsentRequestPath+"/:id/accept", h.AcceptConsentRequestHandler) } // swagger:route GET /oauth2/consent/requests/{id} oAuth2 getOAuth2ConsentRequest // // Receive consent request information // // Call this endpoint to receive information on consent requests. The consent request id is usually transmitted via the URL query `consent`. // For example: `http://consent-app.mydomain.com/?consent=1234abcd` // // // The subject making the request needs to be assigned to a policy containing: // // ``` // { // "resources": ["rn:hydra:oauth2:consent:requests:<request-id>"], // "actions": ["get"], // "effect": "allow" // } // ``` // // Consumes: // - application/json // // Produces: // - application/json // // Schemes: http, https // // Security: // oauth2: hydra.consent // // Responses: // 200: oAuth2ConsentRequest // 401: genericError // 500: genericError func (h *ConsentSessionHandler) FetchConsentRequest(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { if _, err := h.W.TokenAllowed(r.Context(), h.W.TokenFromRequest(r), &firewall.TokenAccessRequest{ Resource: fmt.Sprintf(h.PrefixResource(ConsentResource), ps.ByName("id")), Action: "get", }, ConsentScope); err != nil { h.H.WriteError(w, r, err) return } if session, err := h.M.GetConsentRequest(ps.ByName("id")); err != nil { h.H.WriteError(w, r, err) return } else { h.H.Write(w, r, session) } } // swagger:route PATCH /oauth2/consent/requests/{id}/reject oAuth2 rejectOAuth2ConsentRequest // // Reject a consent request // // Call this endpoint to reject a consent request. This usually happens when a user denies access rights to an // application. // // // The consent request id is usually transmitted via the URL query `consent`. // For example: `http://consent-app.mydomain.com/?consent=1234abcd` // // // The subject making the request needs to be assigned to a policy containing: // // ``` // { // "resources": ["rn:hydra:oauth2:consent:requests:<request-id>"], // "actions": ["reject"], // "effect": "allow" // } // ``` // // Consumes: // - application/json // // Produces: // - application/json // // Schemes: http, https // // Security: // oauth2: hydra.consent // // Responses: // 204: emptyResponse // 401: genericError // 500: genericError func (h *ConsentSessionHandler) RejectConsentRequestHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { if _, err := h.W.TokenAllowed(r.Context(), h.W.TokenFromRequest(r), &firewall.TokenAccessRequest{ Resource: fmt.Sprintf(h.PrefixResource(ConsentResource), ps.ByName("id")), Action: "reject", }, ConsentScope); err != nil { h.H.WriteError(w, r, err) return } var payload RejectConsentRequestPayload if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { h.H.WriteError(w, r, errors.WithStack(err)) return } if err := h.M.RejectConsentRequest(ps.ByName("id"), &payload); err != nil { h.H.WriteError(w, r, err) return } w.WriteHeader(http.StatusNoContent) } // swagger:route PATCH /oauth2/consent/requests/{id}/accept oAuth2 acceptOAuth2ConsentRequest // // Accept a consent request // // Call this endpoint to accept a consent request. This usually happens when a user agrees to give access rights to // an application. // // // The consent request id is usually transmitted via the URL query `consent`. // For example: `http://consent-app.mydomain.com/?consent=1234abcd` // // // The subject making the request needs to be assigned to a policy containing: // // ``` // { // "resources": ["rn:hydra:oauth2:consent:requests:<request-id>"], // "actions": ["accept"], // "effect": "allow" // } // ``` // // Consumes: // - application/json // // Produces: // - application/json // // Schemes: http, https // // Security: // oauth2: hydra.consent // // Responses: // 204: emptyResponse // 401: genericError // 500: genericError func (h *ConsentSessionHandler) AcceptConsentRequestHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { if _, err := h.W.TokenAllowed(r.Context(), h.W.TokenFromRequest(r), &firewall.TokenAccessRequest{ Resource: fmt.Sprintf(h.PrefixResource(ConsentResource), ps.ByName("id")), Action: "accept", }, ConsentScope); err != nil { h.H.WriteError(w, r, err) return } var payload AcceptConsentRequestPayload if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { h.H.WriteError(w, r, errors.WithStack(err)) return } if err := h.M.AcceptConsentRequest(ps.ByName("id"), &payload); err != nil { h.H.WriteError(w, r, err) return } w.WriteHeader(http.StatusNoContent) }
package config import ( "testing" "fmt" ) func Test_readXml(t *testing.T) { const result = `map[server>address:127.0.0.1 server>port:8080 log>file>name:log.log log>file>size:1024]` readXml(configMap, `example.xml`) if fmt.Sprintf("%s", configMap) != result { t.Error(fmt.Sprintf("%s", configMap), " \nNot Equal\n", result) } }
package main import ( "flag" "fmt" "log" "os" "github.com/tada3/triton/tritondb" "github.com/tada3/triton/weather/owm" ) const () var ( clearFlag bool ) func main() { log.Println("Triton Admin Tool") flag.Usage = func() { fmt.Fprintln(os.Stderr, "Usage:\n att [--clear] <filepath>") } flag.BoolVar(&clearFlag, "c", false, "clear existing records first") flag.BoolVar(&clearFlag, "clear", false, "clear existing records first") flag.Parse() if flag.NArg() == 0 { flag.Usage() os.Exit(1) } if clearFlag { clear() } load(flag.Args()[0]) } func clear() { fmt.Printf("Clear existing records") count, err := owm.ClearCityList() if err != nil { fmt.Printf("Failed to clear: %s\n", err.Error()) os.Exit(1) } fmt.Printf("Deleted %d records.\n", count) } func load(filepath string) { fmt.Printf("Load %v\n", filepath) count, err := owm.LoadCityList(filepath) if err != nil { fmt.Printf("Failed to load %v: %s\n", filepath, err.Error()) os.Exit(1) } fmt.Printf("Inserted %d records.\n", count) count2, err := tritondb.RemoveShiFromJPCities() if err != nil { fmt.Printf("Failed to update %v: %s\n", filepath, err.Error()) os.Exit(1) } fmt.Printf("Updated %d records.\n", count2) }
package stages import ( "encoding/json" "fmt" "os" "path/filepath" "github.com/hashicorp/terraform-exec/tfexec" "github.com/pkg/errors" "github.com/openshift/installer/pkg/terraform" "github.com/openshift/installer/pkg/terraform/providers" "github.com/openshift/installer/pkg/types" ) // StageOption is an option for configuring a split stage. type StageOption func(*SplitStage) // NewStage creates a new split stage. // The default behavior is the following. The behavior can be changed by providing StageOptions. // - The resources of the stage will not be deleted as part of destroying the bootstrap. // - The IP addresses for the bootstrap and control plane VMs will be output from the stage as bootstrap_ip and // control_plane_ips, respectively. Only one stage for the platform should output a particular variable. This will // likely be the same stage that creates the VM. func NewStage(platform, name string, providers []providers.Provider, opts ...StageOption) SplitStage { s := SplitStage{ platform: platform, name: name, providers: providers, } for _, opt := range opts { opt(&s) } return s } // WithNormalBootstrapDestroy returns an option for specifying that a split stage should use the normal bootstrap // destroy process. The normal process is to fully delete all of the resources created in the stage. func WithNormalBootstrapDestroy() StageOption { return WithCustomBootstrapDestroy(normalDestroy) } // WithCustomBootstrapDestroy returns an option for specifying that a split stage should use a custom bootstrap // destroy process. func WithCustomBootstrapDestroy(destroy DestroyFunc) StageOption { return func(s *SplitStage) { s.destroyWithBootstrap = true s.destroy = destroy } } // WithCustomExtractHostAddresses returns an option for specifying that a split stage should use a custom extract host addresses process. func WithCustomExtractHostAddresses(extractHostAddresses ExtractFunc) StageOption { return func(s *SplitStage) { s.extractHostAddresses = extractHostAddresses } } // SplitStage is a split stage. type SplitStage struct { platform string name string providers []providers.Provider destroyWithBootstrap bool destroy DestroyFunc extractHostAddresses ExtractFunc } // DestroyFunc is a function for destroying the stage. type DestroyFunc func(s SplitStage, directory string, terraformDir string, varFiles []string) error // ExtractFunc is a function for extracting host addresses. type ExtractFunc func(s SplitStage, directory string, ic *types.InstallConfig) (string, int, []string, error) // Name implements pkg/terraform/Stage.Name func (s SplitStage) Name() string { return s.name } // Providers is the list of providers that are used for the stage. func (s SplitStage) Providers() []providers.Provider { return s.providers } // StateFilename implements pkg/terraform/Stage.StateFilename func (s SplitStage) StateFilename() string { return fmt.Sprintf("terraform.%s.tfstate", s.name) } // OutputsFilename implements pkg/terraform/Stage.OutputsFilename func (s SplitStage) OutputsFilename() string { return fmt.Sprintf("%s.tfvars.json", s.name) } // DestroyWithBootstrap implements pkg/terraform/Stage.DestroyWithBootstrap func (s SplitStage) DestroyWithBootstrap() bool { return s.destroyWithBootstrap } // Destroy implements pkg/terraform/Stage.Destroy func (s SplitStage) Destroy(directory string, terraformDir string, varFiles []string) error { return s.destroy(s, directory, terraformDir, varFiles) } // ExtractHostAddresses implements pkg/terraform/Stage.ExtractHostAddresses func (s SplitStage) ExtractHostAddresses(directory string, ic *types.InstallConfig) (string, int, []string, error) { if s.extractHostAddresses != nil { return s.extractHostAddresses(s, directory, ic) } return normalExtractHostAddresses(s, directory, ic) } // GetTerraformOutputs reads the terraform outputs file for the stage and parses it into a map of outputs. func GetTerraformOutputs(s SplitStage, directory string) (map[string]interface{}, error) { outputsFilePath := filepath.Join(directory, s.OutputsFilename()) if _, err := os.Stat(outputsFilePath); err != nil { return nil, errors.Wrapf(err, "could not find outputs file %q", outputsFilePath) } outputsFile, err := os.ReadFile(outputsFilePath) if err != nil { return nil, errors.Wrapf(err, "failed to read outputs file %q", outputsFilePath) } outputs := map[string]interface{}{} if err := json.Unmarshal(outputsFile, &outputs); err != nil { return nil, errors.Wrapf(err, "could not unmarshal outputs file %q", outputsFilePath) } return outputs, nil } func normalExtractHostAddresses(s SplitStage, directory string, _ *types.InstallConfig) (string, int, []string, error) { outputs, err := GetTerraformOutputs(s, directory) if err != nil { return "", 0, nil, err } var bootstrap string if bootstrapRaw, ok := outputs["bootstrap_ip"]; ok { bootstrap, ok = bootstrapRaw.(string) if !ok { return "", 0, nil, errors.New("could not read bootstrap IP from terraform outputs") } } var masters []string if mastersRaw, ok := outputs["control_plane_ips"]; ok { mastersSlice, ok := mastersRaw.([]interface{}) if !ok { return "", 0, nil, errors.New("could not read control plane IPs from terraform outputs") } masters = make([]string, len(mastersSlice)) for i, ipRaw := range mastersSlice { ip, ok := ipRaw.(string) if !ok { return "", 0, nil, errors.New("could not read control plane IPs from terraform outputs") } masters[i] = ip } } return bootstrap, 0, masters, nil } func normalDestroy(s SplitStage, directory string, terraformDir string, varFiles []string) error { opts := make([]tfexec.DestroyOption, len(varFiles)) for i, varFile := range varFiles { opts[i] = tfexec.VarFile(varFile) } return errors.Wrap(terraform.Destroy(directory, s.platform, s, terraformDir, opts...), "terraform destroy") }
package models import ( "fmt" "contoh_mvc/database" ) type Siswa struct { Id int `json:"id"` Nama string `json:"nama"` Kelas string `json:"kelas"` } // method untuk inisialisasi model siswa baru func NewSiswaModel() Siswa { return Siswa{} } func (s *Siswa) SetId(id int) { s.Id = id; } func (s *Siswa) SetNama(nama string) { s.Nama = nama; } func (s *Siswa) SetKelas(kelas string) { s.Kelas = kelas; } func (s *Siswa) SelectAll() (data_siswa []Siswa) { db, err := database.Connect() // ambil koneksi database ke variabel if err != nil { // cek error fmt.Println(err.Error()) return } defer db.Close() // tutup koneksi nanti rows, err := db.Query("select * from siswa", 1) // jalankan query sql if err != nil { // cek eksekusi error fmt.Println(err.Error()) return } defer rows.Close() // tutup proses menjalankan querynya belakangan for rows.Next() { // for each var siswa = Siswa{} // buat variabel penampung satu baris var err = rows.Scan(&siswa.Id, &siswa.Nama, &siswa.Kelas) // ambil data setiap baris if err != nil { // cek error fmt.Println(err.Error()) return } data_siswa = append(data_siswa, siswa) // masukkan hasil pembacaan ke array siswa } return } func (s *Siswa) Select(id string) (data_siswa Siswa) { db, err := database.Connect() // ambil koneksi database ke variabel if err != nil { // cek error fmt.Println(err.Error()) return } defer db.Close() // tutup koneksi nanti err = db.QueryRow("select * from siswa where id = ?", id).Scan(&data_siswa.Id, &data_siswa.Nama, &data_siswa.Kelas) // jalankan query sql if err != nil { // cek eksekusi error fmt.Println(err.Error()) return } return } func (data_siswa *Siswa) Insert() { db, err := database.Connect() // ambil koneksi database ke variabel if err != nil { // cek error fmt.Println(err.Error()) return } defer db.Close() // tutup koneksi nanti hasil, err := db.Exec("INSERT INTO siswa (nama, kelas) VALUES(?, ?)", data_siswa.Nama, data_siswa.Kelas); if err != nil { fmt.Println(err); return } last_id, _ := hasil.LastInsertId(); // AMBI LAST INSERT ID rows_affected, _ := hasil.RowsAffected(); // AMBIL BARIS YANG TERPENGARUH fmt.Println("LAST INSERT ID ", last_id); fmt.Println("AFFECTED ROWS ", rows_affected); } func (data_siswa *Siswa) Update() { db, err := database.Connect() // ambil koneksi database ke variabel if err != nil { // cek error fmt.Println(err.Error()) return } defer db.Close() // tutup koneksi nanti hasil, err := db.Exec("UPDATE siswa SET nama = ?, kelas = ? WHERE id = ?", data_siswa.Nama, data_siswa.Kelas, data_siswa.Id); if err != nil { fmt.Println(err); return } rows_affected, _ := hasil.RowsAffected(); // AMBIL BARIS YANG TERPENGARUH fmt.Println("AFFECTED ROWS ", rows_affected); } func (data_siswa *Siswa) Delete() { db, err := database.Connect() // ambil koneksi database ke variabel if err != nil { // cek error fmt.Println(err.Error()) return } defer db.Close() // tutup koneksi nanti hasil, err := db.Exec("DELETE FROM siswa WHERE id = ?", data_siswa.Id); if err != nil { fmt.Println(err); return } rows_affected, _ := hasil.RowsAffected(); // AMBIL BARIS YANG TERPENGARUH fmt.Println("AFFECTED ROWS ", rows_affected); }
package mcbanner import ( "fmt" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" ) func ExampleGetAddress() { status := ServerStatus{ Host: "example.com", Port: 25565, } fmt.Println(status.GetAddress()) status = ServerStatus{ Host: "example.com", Port: 25566, } fmt.Println(status.GetAddress()) // Output: // example.com // example.com:25566 } // font.Drawer Mock type fontDrawerMock struct { } func (fd *fontDrawerMock) MeasureString(s string) (width, height float64) { return float64(len(s)), 0 } func TestGetNameWidth(t *testing.T) { fakePlayers := make([]Player, 20) for i := 0; i < 19; i++ { fakePlayers[i] = Player{ Name: fmt.Sprintf("Player%d", i), UUID: uuid.New(), } } longName := "longest player name " fakePlayers[19] = Player{ Name: longName, UUID: uuid.New(), } list := PlayerList{ Max: 20, Players: fakePlayers, } assert.Equal(t, float64(len(longName)), list.GetNameWidth(&fontDrawerMock{})) } func TestGetNameWidthWillNotReturnZero(t *testing.T) { namelessPlayers := make([]Player, 20) for i := 0; i < 20; i++ { namelessPlayers[i] = Player{ Name: "", UUID: uuid.New(), } } list := PlayerList{ Max: 20, Players: namelessPlayers, } assert.Equal(t, float64(1), list.GetNameWidth(&fontDrawerMock{})) }
package leetcode func isPalindrome234(head *ListNode) bool { nums := make([]int, 0) for head != nil { nums = append(nums, head.Val) head = head.Next } for i, j := 0, len(nums)-1; i < j; i++ { if nums[i] != nums[j] { return false } j-- } return true }
package arrays /*ArraySum ... Function Specification: INPUTS: numbers = An array of integer values OUTPUTS: The sum of all the entries in the inputted array */ func ArraySum(numbers []int) int { var sum int for _, number := range numbers { sum += number } return sum } /*ArraySumAll ... Function Specification: INPUTS: numbersToSum = A variable length array of arrays of integers OUTPUTS: sums = An array with the sums of the inputted arrays */ func ArraySumAll(numbersToSum ...[]int) (sums []int) { for _, numbers := range numbersToSum { sums = append(sums, ArraySum(numbers)) } return } /*ArraySumAllTails ... Function Specification: INPUTS: numbersToSum = A variable length array of arrays of integers OUTPUTS: tailSums = An array with the sums of the tails of the inputted arrays */ func ArraySumAllTails(numbersToSum ...[]int) (tailSums []int) { for _, numbers := range numbersToSum { currentTail := numbers[1:] tailSums = append(tailSums, ArraySum(currentTail)) } return }
package main //go:generate go run scripts/inline_schema.go
package corekit import ( "context" "encoding/json" "fmt" "log" "net/http" "os" "os/signal" "syscall" "time" "github.com/bmizerany/pat" "github.com/prometheus/client_golang/prometheus/promhttp" ) type Service interface { Get(path string, handler APIHandler) Post(path string, handler APIHandler) Put(path string, handler APIHandler) Del(path string, handler APIHandler) Stream(path string, handler StreamAPIHandler) Run() } type ServeMux interface { Add(meth string, pat string, h http.Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) } type Option func(o *Options) type Options struct { name string version string dependenciesInfo map[string]func() interface{} params map[string]string port int certFile string keyFile string serveMux ServeMux httpsEnabled bool logger func(format string, args ...interface{}) } func Name(n string) Option { return func(o *Options) { o.name = n } } func Version(v string) Option { return func(o *Options) { o.version = v } } func DependencyInfo(name string, f func() interface{}) Option { return func(o *Options) { o.dependenciesInfo[name] = f } } func Param(name, val string) Option { return func(o *Options) { o.params[name] = val } } func Port(port int) Option { return func(o *Options) { o.port = port } } func Https(certFile, keyFile string) Option { return func(o *Options) { o.certFile = certFile o.keyFile = keyFile o.httpsEnabled = true } } func UseServeMux(mux ServeMux) Option { return func(o *Options) { o.serveMux = mux } } func Logger(l func(format string, args ...interface{})) Option { return func(o *Options) { o.logger = l } } func NewService(opts ...Option) Service { defaultLogger := log.New(os.Stdout, "", log.LUTC|log.LstdFlags|log.Lshortfile) options := &Options{ dependenciesInfo: map[string]func() interface{}{}, params: map[string]string{}, serveMux: &adoptPatRouter{pat.New()}, logger: defaultLogger.Printf, } for _, o := range opts { o(options) } service := &service{ options: *options, wrapAPIHandler: wrapAPIHandler(options.logger), streamAPIHandler: streamWrapAPIHandler(options.logger), } service.options.serveMux.Add(http.MethodGet, "/health", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) })) service.options.serveMux.Add(http.MethodGet, "/info", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("content-type", "application/json") dp := map[string]interface{}{} for name, d := range options.dependenciesInfo { dp[name] = d() } json.NewEncoder(w).Encode(map[string]interface{}{ "name": options.name, "version": options.version, "params": options.params, "dependencies": dp, }) })) service.options.serveMux.Add(http.MethodGet, "/metrics", promhttp.Handler()) return service } type service struct { options Options wrapAPIHandler func(handler APIHandler) http.Handler streamAPIHandler func(handler StreamAPIHandler) http.Handler } func (s *service) Get(path string, handler APIHandler) { s.options.serveMux.Add(http.MethodGet, path, s.wrapAPIHandler(handler)) } func (s *service) Post(path string, handler APIHandler) { s.options.serveMux.Add(http.MethodPost, path, s.wrapAPIHandler(handler)) } func (s *service) Put(path string, handler APIHandler) { s.options.serveMux.Add(http.MethodPut, path, s.wrapAPIHandler(handler)) } func (s *service) Del(path string, handler APIHandler) { s.options.serveMux.Add(http.MethodDelete, path, s.wrapAPIHandler(handler)) } func (s *service) Stream(path string, handler StreamAPIHandler) { s.options.serveMux.Add(http.MethodGet, path, s.streamAPIHandler(handler)) } func (s *service) Run() { s.options.logger("[INFO] Start listening address :%v\n", s.options.port) server := http.Server{ Addr: fmt.Sprint(":", s.options.port), Handler: s.options.serveMux, } ch := make(chan os.Signal) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) go func() { <-ch s.options.logger("[INFO] Graceful shutdown...\n") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := server.Shutdown(ctx); err != nil { s.options.logger("[ERROR] %+v\n", err) } s.options.logger("[INFO] Service stoped\n") }() var err error if s.options.httpsEnabled { err = server.ListenAndServeTLS(s.options.certFile, s.options.keyFile) } else { err = server.ListenAndServe() } if err != nil && err != http.ErrServerClosed { s.options.logger("[ERROR] %+v\n", err) } }
package Problem0290 import ( "strings" ) func wordPattern(pattern string, str string) bool { ps := strings.Split(pattern, "") ss := strings.Split(str, " ") if len(ps) != len(ss) { return false } return isMatch(ps, ss) && isMatch(ss, ps) } func isMatch(s1, s2 []string) bool { size := len(s1) m := make(map[string]string, size) var i int var w string var ok bool for i = 0; i < size; i++ { if w, ok = m[s1[i]]; ok { if w != s2[i] { return false } } else { m[s1[i]] = s2[i] } } return true }
package main import ( "fmt" "log" "net/http" "github.com/ITSecMedia/gfapigonnect" ) func queryGravityFormsAPI(w http.ResponseWriter, r *http.Request) { var gf API gf.BaseURL = "http://<wordpressblog_domain>/gravityformsapi/" gf.KeyPublic = "<public_key>" gf.KeyPrivate = "<private_key>" gfID := "<gf_form_id_int_value_check_wordpress>" // Form ID gfType := "results" // Result Type - Read docs as there are more than one j := gf.Call(gfID, gfType) w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, j) } func main() { http.HandleFunc("/", queryGravityFormsAPI) err := http.ListenAndServe(":80", nil) if err != nil { log.Fatal("ListenAndServe: ", err) } }
// Copyright (c) 2020 by meng. All rights reserved. // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. /** * @Author: meng * @Description: * @File: behavior_tree * @Version: 1.0.0 * @Date: 2020/4/10 15:28 */ package behavior_tree import ( "github.com/mx5566/behavior-tree/interf" ) type BehaviorTree struct { root interf.IBehaviour name string } func NewBehaviorTree(root interf.IBehaviour, name string) *BehaviorTree { return &BehaviorTree{root: root, name: name} } func (this *BehaviorTree) GetName() string { return this.name } func (this *BehaviorTree) Tick(inter interface{}) { this.root.Tick(inter) } func (this *BehaviorTree) HaveRoot() bool { if this.root != nil { return true } return false } func (this *BehaviorTree) SetRoot(inNode interf.IBehaviour) { this.root = inNode } func (this *BehaviorTree) Release() { this.root.Release() } func (this *BehaviorTree) GetRoot() interf.IBehaviour { return this.root }
package dht import ( "fmt" "net" "encoding/json" ) type Transport struct { bindAddress string msgQueue chan *Msg node *NODE } func (transport *Transport) listen() { udpAddr, err := net.ResolveUDPAddr("udp", transport.bindAddress) conn, err := net.ListenUDP("udp", udpAddr) conn.SetReadBuffer(10000) conn.SetWriteBuffer(10000) if err != nil { fmt.Println(err) } defer conn.Close() dec := json.NewDecoder(conn) fmt.Println(transport.node.ID, "listening on", transport.bindAddress) for { msg := Msg{} err = dec.Decode(&msg) if err != nil { fmt.Println(err) } go func() { transport.msgQueue <- &msg } () } } func (transport *Transport) msgHandler() { go func () { for { select { case msg :=<-transport.msgQueue: switch msg.Type { case "lookup": go transport.node.lookup(msg) case "response": go transport.node.responseHandler(msg) case "addResponse": go transport.node.addResponseHandler(msg) case "youAreMyPred": go transport.node.predHandler(msg) case "youAreMySucc": go transport.node.succHandler(msg) case "addToRing": go transport.node.lookup(msg) case "fingerLookup": go transport.node.lookup(msg) case "responseFingerLookup": go transport.node.fingerLookupHandler(msg) } } } } () } func (transport *Transport) send(msg *Msg) { udpAddr, err := net.ResolveUDPAddr("udp", msg.Dst[0]) conn, err := net.DialUDP("udp", nil, udpAddr) bytes, err := json.Marshal(msg) if err != nil { fmt.Println(err) } defer conn.Close() _, err = conn.Write(bytes) }
package mascot_test import ( "testing" "github.com/Akim-Delli/landGo/mascot" ) func TestMascot(t *testing.T) { if mascot.BestMascot() != "Tux" { t.Fatal("Go Gopher") } }
package main import ( "fmt" "github.com/ermos/hue" "log" ) func main() { bridge := hue.Conn("192.168.1.2", hue.BridgeOptions{ SaveToken: true, SaveLocation: "./", Debug: hue.DebugAll, }) err := bridge.Fetch.Bridge() if err != nil { log.Fatal(err) } fmt.Println(bridge.Config.Name) }
package trial import ( "io" "regexp" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) type mockReader struct { mock.Mock } func (reader *mockReader) Read(p []byte) (int, error) { returns := reader.Mock.Called(p) n := copy(p, returns.String(0)) return n, returns.Error(1) } var cases = []struct { law regexp.Regexp evidence string verdict bool }{ { law: compile("test"), evidence: "tset", verdict: false, }, { law: compile("test"), evidence: "test", verdict: true, }, { law: compile(`^test\dtest$`), evidence: "test0test", verdict: true, }, } func TestCorrectVerdicts(t *testing.T) { for _, c := range cases { got, evidence := Conduct(c.law, getReaderThatReads(c.evidence, nil)) require.Equal(t, c.evidence, evidence, c) require.Equal(t, c.verdict, got, c) } } func compile(regex string) regexp.Regexp { r, e := regexp.Compile(regex) if e != nil { panic(e) } return *r } func getReaderThatReads(toRead string, err error) io.Reader { reader := new(mockReader) reader.Mock.On("Read", mock.AnythingOfType("[]uint8")).Return(toRead, io.EOF) return reader }
package errorDispose import "fmt" func ErrorPrint(error error, errorText string) { if error != nil { fmt.Println(error, errorText) return } }
package rpc import ( "net/http" "net/http/httptest" "testing" hTest "github.com/skos-ninja/truelayer-tech/lib/http/test" "github.com/skos-ninja/truelayer-tech/svc/pokemon/app/test" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" ) func TestGetPokemonNoID(t *testing.T) { app := test.New(false, false) rpc := New(app) r := httptest.NewRecorder() ctx, _ := gin.CreateTestContext(r) rpc.GetPokemon(ctx) resp := r.Result() assert.Equal(t, http.StatusBadRequest, resp.StatusCode) } func TestGetPokemonSuccess(t *testing.T) { app := test.New(false, false) rpc := New(app) r := httptest.NewRecorder() ctx, _ := gin.CreateTestContext(r) // Set our route params so the id is test ctx.Params = gin.Params{ gin.Param{ Key: "id", Value: "test", }, } rpc.GetPokemon(ctx) resp := r.Result() assert.Equal(t, http.StatusOK, resp.StatusCode) assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) expectedResponse := getPokemonResponse{ Name: "test", Description: test.Text, } hTest.AssertJSONMatches(t, resp, expectedResponse) } func TestGetPokemonNotFound(t *testing.T) { app := test.New(false, true) rpc := New(app) r := httptest.NewRecorder() ctx, _ := gin.CreateTestContext(r) // Set our route params so the id is test ctx.Params = gin.Params{ gin.Param{ Key: "id", Value: "test", }, } rpc.GetPokemon(ctx) resp := r.Result() assert.Equal(t, http.StatusNotFound, resp.StatusCode) } func TestGetPokemonInternalError(t *testing.T) { app := test.New(true, false) rpc := New(app) r := httptest.NewRecorder() ctx, _ := gin.CreateTestContext(r) // Set our route params so the id is test ctx.Params = gin.Params{ gin.Param{ Key: "id", Value: "test", }, } rpc.GetPokemon(ctx) resp := r.Result() assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) }
package main import ( "log" "net/http" "os" ) func main() { port := os.Getenv("PORT") log.Println("Listening...") err := http.ListenAndServe(":"+port, http.FileServer(http.Dir("public"))) if err != nil { log.Printf("Error running web server for static assets: %v", err) } }