text stringlengths 11 4.05M |
|---|
// +build windows
package main
import "syscall"
var suggestedShells = [][]string{
{"powershell"},
{"cmd"},
}
func sysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{HideWindow: true}
}
|
package main
import (
"github.com/julienschmidt/httprouter"
"log"
"net/http"
)
func Home(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
w.WriteHeader(http.StatusOK)
}
func newRouter() *httprouter.Router {
router := httprouter.New()
router.GET("/", Home)
return router
}
func main() {
router := newRouter()
log.Fatal(http.ListenAndServe(":8080", router))
}
|
package fosscafe
import (
"fmt"
"strings"
"testing"
)
func TestGreeting(t *testing.T) {
var g string
g = "hello everyone"
fmt.Println(g)
if strings.Compare(g, "hello everyone") != 0 {
t.Error("Expected hello everyone, got", g)
}
}
func TestGreetingsToMe(t *testing.T) {
// var g string
// g = "hello everyone"
fmt.Println("Saying hi to myself")
// if(strings.Compare(g,"hello everyone")!= 0){
// t.Error("Expected hello everyone, got",g)
// }
}
|
package config
import (
"encoding/json"
"os"
)
// Config contains the information for ElasticSearch
type Config struct {
Elastic struct {
URL string `json:"url"`
} `json:"elastic"`
}
// New returns a Config struct filled with the json file
func New(path string) (Config, error) {
var cfg Config
file, err := os.Open(path)
defer file.Close()
if err != nil {
return cfg, err
}
if err = json.NewDecoder(file).Decode(&cfg); err != nil {
return cfg, err
}
return cfg, err
}
|
// Copyright (c) 2018 Palantir Technologies. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration_test
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/nmiyake/pkg/dirs"
"github.com/palantir/godel/v2/framework/pluginapitester"
"github.com/palantir/godel/v2/pkg/products"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConjurePlugin(t *testing.T) {
const (
conjureSpecYML = `
types:
definitions:
default-package: com.palantir.conjure.test.api
objects:
TestCase:
fields:
name: string
TestUnion:
union:
testCase: TestCase
`
conjureSpecJSON = `
{
"version" : 1,
"errors" : [ ],
"types" : [ {
"type" : "object",
"object" : {
"typeName" : {
"name" : "TestCase",
"package" : "com.palantir.conjure.test.api"
},
"fields" : [ {
"fieldName" : "name",
"type" : {
"type" : "primitive",
"primitive" : "STRING"
}
} ]
}
} ],
"services" : [ ]
}
`
yamlDir = "yamlDir"
conjureYMLSubstitute = `
projects:
project-1:
accept-funcs: true
output-dir: conjure-output
ir-locator: ` + yamlDir + `
project-2:
output-dir: conjure-output2
ir-locator:
type: remote
locator: SUBSTITUTE_URL
`
)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprint(w, conjureSpecJSON)
}))
defer ts.Close()
pluginPath, err := products.Bin("conjure-plugin")
require.NoError(t, err)
projectDir, cleanup, err := dirs.TempDir(".", "")
require.NoError(t, err)
ymlDir := path.Join(projectDir, yamlDir)
err = os.Mkdir(ymlDir, 0755)
require.NoError(t, err)
defer cleanup()
err = os.MkdirAll(path.Join(projectDir, "godel", "config"), 0755)
require.NoError(t, err)
conjureYML := strings.Replace(conjureYMLSubstitute, "SUBSTITUTE_URL", ts.URL, -1)
err = ioutil.WriteFile(path.Join(projectDir, "godel", "config", "conjure-plugin.yml"), []byte(conjureYML), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(ymlDir, "conjure.yml"), []byte(conjureSpecYML), 0644)
require.NoError(t, err)
outputBuf := &bytes.Buffer{}
runPluginCleanup, err := pluginapitester.RunPlugin(pluginapitester.NewPluginProvider(pluginPath), nil, "conjure", nil, projectDir, false, outputBuf)
defer runPluginCleanup()
require.NoError(t, err, outputBuf.String())
for _, outputName := range []string{"conjure-output", "conjure-output2"} {
contentBytes, err := ioutil.ReadFile(path.Join(projectDir, outputName, "conjure", "test", "api", "structs.conjure.go"))
require.NoError(t, err)
wantContent := `// This file was generated by Conjure and should not be manually edited.
package api
import (
"github.com/palantir/pkg/safejson"
"github.com/palantir/pkg/safeyaml"
)
type TestCase struct {
Name string ` + "`" + `json:"name"` + "`" + `
}
func (o TestCase) MarshalYAML() (interface{}, error) {
jsonBytes, err := safejson.Marshal(o)
if err != nil {
return nil, err
}
return safeyaml.JSONtoYAMLMapSlice(jsonBytes)
}
func (o *TestCase) UnmarshalYAML(unmarshal func(interface{}) error) error {
jsonBytes, err := safeyaml.UnmarshalerToJSONBytes(unmarshal)
if err != nil {
return err
}
return safejson.Unmarshal(jsonBytes, *&o)
}
`
assert.Equal(t, wantContent, string(contentBytes), "Got:\n%s", string(contentBytes))
}
for _, outputName := range []string{"conjure-output"} {
contentBytes, err := ioutil.ReadFile(path.Join(projectDir, outputName, "conjure", "test", "api", "unions.conjure.go"))
require.NoError(t, err)
wantContent := `// This file was generated by Conjure and should not be manually edited.
package api
import (
"context"
"fmt"
"github.com/palantir/pkg/safejson"
"github.com/palantir/pkg/safeyaml"
)
type TestUnion struct {
typ string
testCase *TestCase
}
type testUnionDeserializer struct {
Type string ` + "`" + `json:"type"` + "`" + `
TestCase *TestCase ` + "`" + `json:"testCase"` + "`" + `
}
func (u *testUnionDeserializer) toStruct() TestUnion {
return TestUnion{typ: u.Type, testCase: u.TestCase}
}
func (u *TestUnion) toSerializer() (interface{}, error) {
switch u.typ {
default:
return nil, fmt.Errorf("unknown type %s", u.typ)
case "testCase":
return struct {
Type string ` + "`" + `json:"type"` + "`" + `
TestCase TestCase ` + "`" + `json:"testCase"` + "`" + `
}{Type: "testCase", TestCase: *u.testCase}, nil
}
}
func (u TestUnion) MarshalJSON() ([]byte, error) {
ser, err := u.toSerializer()
if err != nil {
return nil, err
}
return safejson.Marshal(ser)
}
func (u *TestUnion) UnmarshalJSON(data []byte) error {
var deser testUnionDeserializer
if err := safejson.Unmarshal(data, &deser); err != nil {
return err
}
*u = deser.toStruct()
return nil
}
func (u TestUnion) MarshalYAML() (interface{}, error) {
jsonBytes, err := safejson.Marshal(u)
if err != nil {
return nil, err
}
return safeyaml.JSONtoYAMLMapSlice(jsonBytes)
}
func (u *TestUnion) UnmarshalYAML(unmarshal func(interface{}) error) error {
jsonBytes, err := safeyaml.UnmarshalerToJSONBytes(unmarshal)
if err != nil {
return err
}
return safejson.Unmarshal(jsonBytes, *&u)
}
func (u *TestUnion) AcceptFuncs(testCaseFunc func(TestCase) error, unknownFunc func(string) error) error {
switch u.typ {
default:
if u.typ == "" {
return fmt.Errorf("invalid value in union type")
}
return unknownFunc(u.typ)
case "testCase":
return testCaseFunc(*u.testCase)
}
}
func (u *TestUnion) TestCaseNoopSuccess(TestCase) error {
return nil
}
func (u *TestUnion) ErrorOnUnknown(typeName string) error {
return fmt.Errorf("invalid value in union type. Type name: %s", typeName)
}
func (u *TestUnion) Accept(v TestUnionVisitor) error {
switch u.typ {
default:
if u.typ == "" {
return fmt.Errorf("invalid value in union type")
}
return v.VisitUnknown(u.typ)
case "testCase":
return v.VisitTestCase(*u.testCase)
}
}
type TestUnionVisitor interface {
VisitTestCase(v TestCase) error
VisitUnknown(typeName string) error
}
func (u *TestUnion) AcceptWithContext(ctx context.Context, v TestUnionVisitorWithContext) error {
switch u.typ {
default:
if u.typ == "" {
return fmt.Errorf("invalid value in union type")
}
return v.VisitUnknownWithContext(ctx, u.typ)
case "testCase":
return v.VisitTestCaseWithContext(ctx, *u.testCase)
}
}
type TestUnionVisitorWithContext interface {
VisitTestCaseWithContext(ctx context.Context, v TestCase) error
VisitUnknownWithContext(ctx context.Context, typeName string) error
}
func NewTestUnionFromTestCase(v TestCase) TestUnion {
return TestUnion{typ: "testCase", testCase: &v}
}
`
assert.Equal(t, wantContent, string(contentBytes), "Got:\n%s", string(contentBytes))
}
}
func TestConjurePluginVerify(t *testing.T) {
const (
conjureSpecYML = `
types:
definitions:
default-package: com.palantir.base.api
objects:
BaseType:
fields:
id: string
`
yamlDir = "yamlDir"
)
pluginPath, err := products.Bin("conjure-plugin")
require.NoError(t, err)
projectDir, cleanup, err := dirs.TempDir(".", "")
require.NoError(t, err)
ymlDir := path.Join(projectDir, yamlDir)
err = os.Mkdir(ymlDir, 0755)
require.NoError(t, err)
defer cleanup()
err = os.MkdirAll(path.Join(projectDir, "godel", "config"), 0755)
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(projectDir, "godel", "config", "conjure-plugin.yml"), []byte(`
projects:
project-1:
output-dir: conjure
ir-locator: `+yamlDir+`
`), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(ymlDir, "conjure.yml"), []byte(conjureSpecYML), 0644)
require.NoError(t, err)
outputBuf := &bytes.Buffer{}
runPluginCleanup, err := pluginapitester.RunPlugin(pluginapitester.NewPluginProvider(pluginPath), nil, "conjure", nil, projectDir, false, outputBuf)
defer runPluginCleanup()
require.NoError(t, err, outputBuf.String())
outputBuf = &bytes.Buffer{}
_, err = pluginapitester.RunPlugin(pluginapitester.NewPluginProvider(pluginPath), nil, "conjure", []string{"verify"}, projectDir, false, outputBuf)
require.NoError(t, err, outputBuf.String())
structsFile := filepath.Join("conjure", "base", "api", "structs.conjure.go")
err = ioutil.WriteFile(filepath.Join(projectDir, structsFile), []byte("package api"), 0644)
require.NoError(t, err, "failed to change generated file")
outputBuf = &bytes.Buffer{}
_, err = pluginapitester.RunPlugin(pluginapitester.NewPluginProvider(pluginPath), nil, "conjure", []string{"--verify"}, projectDir, false, outputBuf)
assert.Error(t, err, "modified file did not trigger verify fail")
stdout := outputBuf.String()
assert.True(t, strings.Contains(stdout, structsFile+": checksum changed"), "Unexpected standard out: %s", stdout)
}
func TestConjurePluginPublish(t *testing.T) {
const (
conjureSpecYML = `
types:
definitions:
default-package: com.palantir.conjure.test.api
objects:
TestCase:
fields:
name: string
`
yamlDir = "yamlDir"
conjureYML = `
projects:
project-1:
output-dir: conjure-output
ir-locator: ` + yamlDir + `
`
)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
defer ts.Close()
pluginPath, err := products.Bin("conjure-plugin")
require.NoError(t, err)
projectDir, cleanup, err := dirs.TempDir(".", "")
require.NoError(t, err)
ymlDir := path.Join(projectDir, yamlDir)
err = os.Mkdir(ymlDir, 0755)
require.NoError(t, err)
defer cleanup()
err = os.MkdirAll(path.Join(projectDir, "godel", "config"), 0755)
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(projectDir, "godel", "config", "conjure-plugin.yml"), []byte(conjureYML), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(ymlDir, "conjure.yml"), []byte(conjureSpecYML), 0644)
require.NoError(t, err)
outputBuf := &bytes.Buffer{}
runPluginCleanup, err := pluginapitester.RunPlugin(pluginapitester.NewPluginProvider(pluginPath), nil, "conjure-publish", []string{
"--dry-run",
"--group-id=com.palantir.test-group",
"--repository=test-repo",
"--url=" + ts.URL,
"--username=test-username",
"--password=test-password",
}, projectDir, false, outputBuf)
defer runPluginCleanup()
require.NoError(t, err, outputBuf.String())
lines := strings.Split(outputBuf.String(), "\n")
assert.Equal(t, 3, len(lines), "Expected output to have 3 lines:\n%s", outputBuf.String())
wantRegexp := regexp.QuoteMeta("[DRY RUN]") + " Uploading .*?" + regexp.QuoteMeta(".conjure.json") + " to " + regexp.QuoteMeta(ts.URL+"/artifactory/test-repo/com/palantir/test-group/project-1/") + ".*?" + regexp.QuoteMeta("/project-1-") + ".*?" + regexp.QuoteMeta(".conjure.json")
assert.Regexp(t, wantRegexp, lines[0])
wantRegexp = regexp.QuoteMeta("[DRY RUN]") + " Uploading to " + regexp.QuoteMeta(ts.URL+"/artifactory/test-repo/com/palantir/test-group/") + ".*?" + regexp.QuoteMeta(".pom")
assert.Regexp(t, wantRegexp, lines[1])
}
func TestUpgradeConfig(t *testing.T) {
pluginPath, err := products.Bin("conjure-plugin")
require.NoError(t, err)
pluginProvider := pluginapitester.NewPluginProvider(pluginPath)
pluginapitester.RunUpgradeConfigTest(t,
pluginProvider,
nil,
[]pluginapitester.UpgradeConfigTestCase{
{
Name: "legacy config fails to upgrade",
ConfigFiles: map[string]string{
"godel/config/conjure.yml": `
conjure-projects:
# comment
project-1:
project-file: ./conjure-project.yml
project-2:
project-file: foo/conjure-project.yml
project-3:
project-file: bar/conjure-project.yml
`,
},
Legacy: true,
WantError: true,
WantOutput: "Failed to upgrade configuration:\n\tgodel/config/conjure-plugin.yml: failed to upgrade configuration: v0 configuration is not supported\n",
},
{
Name: "blank legacy config upgrade succeeds",
ConfigFiles: map[string]string{
"godel/config/conjure-plugin.yml": ``,
},
WantFiles: map[string]string{
"godel/config/conjure-plugin.yml": ``,
},
Legacy: true,
WantOutput: "",
},
{
Name: "v0 config fails to upgrade",
ConfigFiles: map[string]string{
"godel/config/conjure-plugin.yml": `
conjure-projects:
# comment
project-1:
project-file: ./conjure-project.yml
project-2:
project-file: foo/conjure-project.yml
project-3:
project-file: bar/conjure-project.yml
`,
},
WantError: true,
WantOutput: "Failed to upgrade configuration:\n\tgodel/config/conjure-plugin.yml: v0 configuration is not supported\n",
},
{
Name: "blank v0 config upgrade succeeds",
ConfigFiles: map[string]string{
"godel/config/conjure-plugin.yml": ``,
},
WantFiles: map[string]string{
"godel/config/conjure-plugin.yml": ``,
},
WantOutput: "",
},
{
Name: "current config is unmodified",
ConfigFiles: map[string]string{
"godel/config/conjure-plugin.yml": `
version: 1
projects:
sls-health-api:
# comment
output-dir: conjure
ir-locator: https://publish.artifactory.com/artifactory/internal-conjure-release/com/palantir/spec/health-api/3.2.0/health-api-3.2.0.json
`,
},
WantOutput: "",
WantFiles: map[string]string{
"godel/config/conjure-plugin.yml": `
version: 1
projects:
sls-health-api:
# comment
output-dir: conjure
ir-locator: https://publish.artifactory.com/artifactory/internal-conjure-release/com/palantir/spec/health-api/3.2.0/health-api-3.2.0.json
`,
},
},
},
)
}
|
package client
import (
"fmt"
"github.com/UnityTech/nemesis/pkg/resource/gcp"
"github.com/UnityTech/nemesis/pkg/utils"
"github.com/golang/glog"
)
// GetProjects gathers the list of projects and active API resources for the project
func (c *Client) GetProjects() error {
if *flagProjectFilter == "" {
glog.Exitf("No project filter was provided. Either specify --project.filter or set NEMESIS_PROJECT_FILTER to the appropriate regex (e.g. my-cool-projects-*)")
}
defer utils.Elapsed("GetProjects")()
// Get list of all projects.
// Additionally we must make sure that the project is ACTIVE. Any other state will return errors
projectFilter := fmt.Sprintf("%v AND lifecycleState=ACTIVE", *flagProjectFilter)
projects := listAllProjects(projectFilter, c.cloudResourceClient)
// Return an error that we retrieved no projects
if len(projects) == 0 {
return fmt.Errorf("No projects found when matching against '%v'", projectFilter)
}
// Create a short-lived goroutine for retrieving project services
servicesWorker := func(workerID int, projectIDs <-chan string, results chan<- serviceCallResult) {
id := <-projectIDs
projectID := fmt.Sprintf("projects/%v", id)
servicesList, err := c.serviceusageClient.Services.List(projectID).Filter("state:ENABLED").Do()
if err != nil {
glog.Fatalf("Failed to retrieve list of services for project %v: %v", projectID, err)
}
projectServices := []*gcp.ServiceAPIResource{}
for _, s := range servicesList.Services {
projectServices = append(projectServices, gcp.NewServiceAPIResource(s))
}
res := serviceCallResult{ProjectID: id, Services: projectServices}
results <- res
}
// Setup worker pool
projectIDs := make(chan string, len(projects))
results := make(chan serviceCallResult, len(projects))
numWorkers := len(projects)
for w := 0; w < numWorkers; w++ {
go servicesWorker(w, projectIDs, results)
}
// Feed the workers and collect the projects for reuse
for _, p := range projects {
projectIDs <- p.ProjectId
c.resourceprojects = append(c.resourceprojects, p)
}
close(projectIDs)
// Collect the results
for i := 0; i < len(projects); i++ {
res := <-results
c.services[res.ProjectID] = res.Services
}
return nil
}
type serviceCallResult struct {
ProjectID string
Services []*gcp.ServiceAPIResource
}
|
package shape
import (
"fmt"
"io"
"github.com/gregoryv/draw/xy"
"github.com/gregoryv/nexus"
)
func NewDot() *Dot {
return &Dot{
Radius: 6,
class: "dot",
}
}
type Dot struct {
x, y int
Radius int
class string
}
func (d *Dot) String() string {
return fmt.Sprintf("Dot")
}
func (d *Dot) Position() (int, int) {
return d.x, d.y
}
func (d *Dot) SetX(x int) { d.x = x }
func (d *Dot) SetY(y int) { d.y = y }
func (d *Dot) Width() int {
return d.Radius * 2
}
func (d *Dot) Height() int { return d.Width() }
func (d *Dot) Direction() Direction { return DirectionRight }
func (d *Dot) SetClass(class string) { d.class = class }
func (d *Dot) WriteSVG(out io.Writer) error {
w, err := nexus.NewPrinter(out)
x, y := d.Position()
x += d.Radius
y += d.Radius
w.Printf(
`<circle class="%s" cx="%v" cy="%v" r="%v" />\n`,
d.class, x, y, d.Radius,
)
return *err
}
func (d *Dot) Edge(start xy.Point) xy.Point {
return boxEdge(start, d)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package wifiutil provides helper functions for the wificell package.
package wifiutil
|
// Package apitypes defines types shared between the daemon and its api client.
package apitypes
import (
"strings"
"github.com/arigatomachine/cli/identity"
"github.com/arigatomachine/cli/primitive"
)
// ErrorType represents the string error types that the daemon and registry can
// return.
type ErrorType string
// These are the possible error types.
const (
BadRequestError = "bad_request"
UnauthorizedError = "unauthorized"
NotFoundError = "not_found"
InternalServerError = "internal_server"
NotImplementedError = "not_implemented"
)
// Error represents standard formatted API errors from the daemon or registry.
type Error struct {
StatusCode int
Type string `json:"type"`
Err []string `json:"error"`
}
// Error implements the error interface for formatted API errors.
func (e *Error) Error() string {
segments := strings.Split(e.Type, "_")
errType := strings.Join(segments, " ")
return strings.Title(errType) + ": " + strings.Join(e.Err, " ")
}
// IsNotFoundError returns whether or not an error is a 404 result from the api.
func IsNotFoundError(err error) bool {
if err == nil {
return false
}
if apiErr, ok := err.(*Error); ok {
return apiErr.Type == NotFoundError
}
return false
}
// Version contains the release version of the daemon.
type Version struct {
Version string `json:"version"`
}
// SessionStatus contains details about the user's daemon session.
type SessionStatus struct {
Token bool `json:"token"`
Passphrase bool `json:"passphrase"`
}
// Login contains the required details for logging in to the api and daemon.
type Login struct {
Email string `json:"email"`
Passphrase string `json:"passphrase"`
}
// Profile contains the fields in the response for the profiles endpoint
type Profile struct {
ID *identity.ID `json:"id"`
Body *struct {
Name string `json:"name"`
Username string `json:"username"`
} `json:"body"`
}
// Signup contains information required for registering an account
type Signup struct {
Name string
Username string
Email string
Passphrase string
InviteCode string
OrgName string
OrgInvite bool
}
// OrgInvite contains information for sending an Org invite
type OrgInvite struct {
ID string `json:"id"`
Version int `json:"version"`
Body *primitive.OrgInvite `json:"body"`
}
// Team contains information for creating a new Team object
type Team struct {
ID *identity.ID `json:"id"`
Version int `json:"version"`
Body *primitive.Team `json:"body"`
}
// Service contains information for creating a new Service object
type Service struct {
ID *identity.ID `json:"id"`
Version int `json:"version"`
Body *primitive.Service `json:"body"`
}
// Environment contains information for creating a new Env object
type Environment struct {
ID string `json:"id"`
Version int `json:"version"`
Body *primitive.Environment `json:"body"`
}
// InviteAccept contains data required to accept org invite
type InviteAccept struct {
Org string `json:"org"`
Email string `json:"email"`
Code string `json:"code"`
}
// Membership contains data required to be added to a team
type Membership struct {
ID *identity.ID `json:"id"`
Version int `json:"version"`
Body *primitive.Membership `json:"body"`
}
// VerifyEmail contains email verification code
type VerifyEmail struct {
Code string `json:"code"`
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"time"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/motioninput"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
// mouseInputParams holds a collection of tests to run in the given test setup.
type mouseInputParams struct {
tests []motioninput.WMTestParams
}
func init() {
testing.AddTest(&testing.Test{
Func: MouseInput,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies mouse input in various window states on Android",
Contacts: []string{"yhanada@chromium.org", "hirokisato@chromium.org", "arc-framework+tast@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "android_vm"},
Fixture: "arcBooted",
Timeout: 10 * time.Minute,
Params: []testing.Param{{
Name: "tablet",
ExtraHardwareDeps: hwdep.D(hwdep.InternalDisplay(), hwdep.TouchScreen()),
Val: mouseInputParams{[]motioninput.WMTestParams{
{
Name: "Tablet",
TabletMode: true,
WmEventToSend: nil,
}, {
Name: "Tablet Snapped Left",
TabletMode: true,
WmEventToSend: ash.WMEventSnapLeft,
}, {
Name: "Tablet Snapped Right",
TabletMode: true,
WmEventToSend: ash.WMEventSnapRight,
},
}},
}, {
Name: "clamshell",
Val: mouseInputParams{[]motioninput.WMTestParams{
{
Name: "Clamshell Normal",
TabletMode: false,
WmEventToSend: ash.WMEventNormal,
}, {
Name: "Clamshell Fullscreen",
TabletMode: false,
WmEventToSend: ash.WMEventFullscreen,
}, {
Name: "Clamshell Maximized",
TabletMode: false,
WmEventToSend: ash.WMEventMaximize,
},
}},
}},
})
}
// MouseInput runs several sub-tests, where each sub-test sets up the Chrome WM environment as
// specified by the motioninput.WMTestParams. Each sub-test installs and runs the test application
// (ArcMotionInputTest.apk), injects various input events into ChromeOS through the test API,
// and verifies that those events were received by the Android application in the expected screen
// locations.
func MouseInput(ctx context.Context, s *testing.State) {
p := s.FixtValue().(*arc.PreData)
cr := p.Chrome
a := p.ARC
d := p.UIDevice
testParams := s.Param().(mouseInputParams)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
if err := a.Install(ctx, arc.APKPath(motioninput.APK)); err != nil {
s.Fatal("Failed installing ", motioninput.APK, ": ", err)
}
for _, params := range testParams.tests {
s.Run(ctx, params.Name+": Verify Mouse", func(ctx context.Context, s *testing.State) {
motioninput.RunTestWithWMParams(ctx, s, tconn, d, a, ¶ms, verifyMouse)
})
}
}
// mouseMatcher returns a motionEventMatcher that matches events from a Mouse device.
func mouseMatcher(a motioninput.Action, p coords.Point) motioninput.Matcher {
pressure := 0.
if a == motioninput.ActionMove || a == motioninput.ActionDown || a == motioninput.ActionButtonPress || a == motioninput.ActionHoverExit {
pressure = 1.
}
return motioninput.SinglePointerMatcher(a, motioninput.SourceMouse, p, pressure)
}
// initialEventMatcher returns a motionEventMatcher that matches the first mouse event
// that should be received by an app.
func initialEventMatcher(p coords.Point) motioninput.Matcher {
return motioninput.MatcherOr(mouseMatcher(motioninput.ActionHoverEnter, p), mouseMatcher(motioninput.ActionHoverMove, p))
}
// verifyMouse tests the behavior of mouse events injected into Ash on Android apps. It tests hover,
// button, and drag events. It does not use the uinput mouse to inject events because the scale
// relation between the relative movements injected by a relative mouse device and the display
// pixels is determined by ChromeOS and could vary between devices.
func verifyMouse(ctx context.Context, s *testing.State, tconn *chrome.TestConn, t *motioninput.WMTestState, tester *motioninput.Tester) {
s.Log("Verifying Mouse")
p := t.CenterOfWindow()
e := t.ExpectedPoint(p)
s.Log("Injected initial move, waiting... ")
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.WaitUntilEvent(ctx, initialEventMatcher(e)); err != nil {
s.Fatal("Failed to wait for the initial hover event: ", err)
}
if err := tester.ClearMotionEvents(ctx); err != nil {
s.Fatal("Failed to clear events: ", err)
}
// numMouseMoveIterations is the number of times certain motion events should be repeated in
// a test. Increasing this number will increase the time it takes to run the test.
const numMouseMoveIterations = 1
// deltaDP is the amount we want to move the mouse pointer between each successive injected
// event. We use an arbitrary value that is not too large so that we can safely assume that
// the injected events stay within the bounds of the application in the various WM states, so
// that clicks performed after moving the mouse are still inside the application.
const deltaDP = 5
for i := 0; i < numMouseMoveIterations; i++ {
p.X += deltaDP
p.Y += deltaDP
e = t.ExpectedPoint(p)
s.Log("Verifying mouse move event at ", e)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.ExpectEventsAndClear(ctx, mouseMatcher(motioninput.ActionHoverMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
}
if err := mouse.Press(tconn, mouse.LeftButton)(ctx); err != nil {
s.Fatal("Failed to press button on mouse: ", err)
}
if err := tester.ExpectEventsAndClear(
ctx,
mouseMatcher(motioninput.ActionHoverExit, e),
mouseMatcher(motioninput.ActionDown, e),
mouseMatcher(motioninput.ActionButtonPress, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
for i := 0; i < numMouseMoveIterations; i++ {
p.X -= deltaDP
p.Y -= deltaDP
e = t.ExpectedPoint(p)
s.Log("Verifying mouse move event at ", e)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.ExpectEventsAndClear(ctx, mouseMatcher(motioninput.ActionMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
}
if err := mouse.Release(tconn, mouse.LeftButton)(ctx); err != nil {
s.Fatal("Failed to release mouse button: ", err)
}
if err := tester.ExpectEventsAndClear(
ctx,
mouseMatcher(motioninput.ActionButtonRelease, e),
mouseMatcher(motioninput.ActionUp, e),
mouseMatcher(motioninput.ActionHoverEnter, e),
mouseMatcher(motioninput.ActionHoverMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
p.X -= deltaDP
p.Y -= deltaDP
e = t.ExpectedPoint(p)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.ExpectEventsAndClear(ctx, mouseMatcher(motioninput.ActionHoverMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
}
|
package model
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/tilt/internal/ospath"
"github.com/tilt-dev/tilt/internal/testutils/tempdir"
)
func TestNewRelativeFileOrChildMatcher(t *testing.T) {
f := tempdir.NewTempDirFixture(t)
paths := []string{
"a",
"b/c/d",
ospath.MustAbs("already/abs"),
}
matcher := NewRelativeFileOrChildMatcher(f.Path(), paths...)
expected := map[string]bool{
f.JoinPath("a"): true,
f.JoinPath("b/c/d"): true,
ospath.MustAbs("already/abs"): true,
}
assert.Equal(t, expected, matcher.paths)
}
func TestFileOrChildMatcher(t *testing.T) {
matcher := fileOrChildMatcher{map[string]bool{
"file.txt": true,
"nested/file.txt": true,
"directory": true,
}}
// map test case --> expected match
expectedMatch := map[string]bool{
"file.txt": true,
"nested/file.txt": true,
"nested": false,
"nested/otherfile.txt": false,
"directory/some/file.txt": true,
"other/dir/entirely": false,
}
for f, expected := range expectedMatch {
match, err := matcher.Matches(f)
if assert.NoError(t, err) {
assert.Equal(t, expected, match, "expected file '%s' match --> %t", f, expected)
}
}
}
|
package postgres
import (
"database/sql"
"fmt"
"net/url"
"path"
"strconv"
"github.com/go-jet/jet/v2/generator/metadata"
"github.com/go-jet/jet/v2/generator/template"
"github.com/go-jet/jet/v2/internal/utils"
"github.com/go-jet/jet/v2/internal/utils/throw"
"github.com/go-jet/jet/v2/postgres"
"github.com/jackc/pgconn"
)
// DBConnection contains postgres connection details
type DBConnection struct {
Host string
Port int
User string
Password string
SslMode string
Params string
DBName string
SchemaName string
}
// Generate generates jet files at destination dir from database connection details
func Generate(destDir string, dbConn DBConnection, genTemplate ...template.Template) (err error) {
dsn := fmt.Sprintf("postgresql://%s:%s@%s:%s/%s?sslmode=%s",
url.PathEscape(dbConn.User),
url.PathEscape(dbConn.Password),
dbConn.Host,
strconv.Itoa(dbConn.Port),
url.PathEscape(dbConn.DBName),
dbConn.SslMode,
)
return GenerateDSN(dsn, dbConn.SchemaName, destDir, genTemplate...)
}
// GenerateDSN generates jet files using dsn connection string
func GenerateDSN(dsn, schema, destDir string, templates ...template.Template) (err error) {
defer utils.ErrorCatch(&err)
cfg, err := pgconn.ParseConfig(dsn)
throw.OnError(err)
if cfg.Database == "" {
panic("database name is required")
}
db := openConnection(dsn)
defer utils.DBClose(db)
fmt.Println("Retrieving schema information...")
generatorTemplate := template.Default(postgres.Dialect)
if len(templates) > 0 {
generatorTemplate = templates[0]
}
schemaMetadata := metadata.GetSchema(db, &postgresQuerySet{}, schema)
dirPath := path.Join(destDir, cfg.Database)
template.ProcessSchema(dirPath, schemaMetadata, generatorTemplate)
return
}
func openConnection(dsn string) *sql.DB {
fmt.Println("Connecting to postgres database: " + dsn)
db, err := sql.Open("postgres", dsn)
throw.OnError(err)
err = db.Ping()
throw.OnError(err)
return db
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"encoding/json"
"time"
arcpkg "chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DynamicColor,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that ArcSystemUIService changes Settings.Secure",
Contacts: []string{"arc-app-dev@google.com, ttefera@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "android_vm_t"},
Fixture: "arcBootedWithoutUIAutomator",
Timeout: chrome.GAIALoginTimeout + arcpkg.BootTimeout + 2*time.Minute,
})
}
func DynamicColor(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*arcpkg.PreData).Chrome
arc := s.FixtValue().(*arcpkg.PreData).ARC
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
var ret bool
const overlayStyle = "Expressive"
if err := tconn.Call(ctx, &ret, "tast.promisify(chrome.autotestPrivate.sendArcOverlayColor)", 50, overlayStyle); err != nil {
s.Fatal("Failed to call sendArcOverlayColor: ", err)
}
if !ret {
s.Fatal("Failed to parse return value of sendArcOverlayColor")
}
output, err := arc.Command(ctx, "settings", "get", "secure", "theme_customization_overlay_packages").Output()
if err != nil {
s.Fatal("Failed to get secure settings: ", err)
}
var themeOverlay map[string]interface{}
if err := json.Unmarshal([]byte(string(output)), &themeOverlay); err != nil {
s.Fatal("Failed to unmarshal output: ", err)
}
const testPalette = 50
if palette := themeOverlay["android.theme.customization.system_palette"].(float64); palette != testPalette {
s.Errorf("Invalid system_palette got: %f, want: %d", palette, testPalette)
}
const testStyle = "EXPRESSIVE"
if style := themeOverlay["android.theme.customization.theme_style"].(string); style != testStyle {
s.Errorf("Invalid theme_style got: %s want: %s", style, testStyle)
}
}
|
package array
import (
"fmt"
"github.com/morzhanov/algorithm-problem-solutions/utils"
)
// TrappingRainWatter test
// Given n non-negative integers representing an elevation map
// where the width of each bar is 1,
// compute how much water it is able to trap after raining.
// For example, given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.
func TrappingRainWatter(heights []int) {
n := len(heights)
res := 0
resArr := make([]int, n)
left := make([]int, n)
right := make([]int, n)
max := heights[0]
left[0] = heights[0]
for i := 1; i < n; i++ {
if max < heights[i] {
max = heights[i]
left[i] = heights[i]
} else {
left[i] = max
}
}
max = heights[n-1]
right[n-1] = heights[n-1]
for i := n - 2; i >= 0; i-- {
if max < heights[i] {
max = heights[i]
right[i] = heights[i]
} else {
right[i] = max
}
}
for i := 0; i < n; i++ {
resArr[i] = utils.Min(left[i], right[i]) - heights[i]
res += resArr[i]
}
fmt.Printf("Left: %v\n", left)
fmt.Printf("Right: %v\n", right)
fmt.Printf("Traps: %v, Water: %v\n", resArr, res)
}
|
package trade_server
import (
"db"
"logger"
"strategy"
"trade_service"
)
type TradeServiceHandler struct {
}
func (this *TradeServiceHandler) Ping() (err error) {
return nil
}
func (this *TradeServiceHandler) ConfigKeys(exchange_configs []*trade_service.ExchangeConfig) (err error) {
logger.Infoln("-->ConfigKeys begin:")
if err = db.SetExchangeConfig(exchange_configs); err == nil {
strategy.Update_tickers()
strategy.Update_traders()
}
logger.Infoln("-->ConfigKeys end:", err)
return
}
func (this *TradeServiceHandler) ConfigAmount(amount_config *trade_service.AmountConfig) (err error) {
logger.Infoln("-->ConfigAmount begin:")
err = db.SetAmountConfig(amount_config)
logger.Infoln("-->ConfigAmount end:", err)
return
}
func (this *TradeServiceHandler) CheckPrice(price float64, trade_type trade_service.TradeType) (err error) {
if !strategy.Check_ticker_limit(price, trade_type) {
tradeResult_ := trade_service.NewTradeException()
tradeResult_.Reason = trade_service.EX_PRICE_NOT_SYNC
logger.Infoln(price, trade_type, tradeResult_)
return tradeResult_
}
return nil
}
// Parameters:
// - Cny
func (this *TradeServiceHandler) Buy(buyOrder *trade_service.Trade) (err error) {
logger.Infoln("-->Buy begin:", buyOrder)
tradeResult_ := strategy.Buy(buyOrder)
logger.Infoln("-->Buy end:", buyOrder, tradeResult_)
return tradeResult_
}
// Parameters:
// - Btc
func (this *TradeServiceHandler) Sell(sellOrder *trade_service.Trade) (err error) {
logger.Infoln("-->Sell begin:", sellOrder)
tradeResult_ := strategy.Sell(sellOrder)
logger.Infoln("-->Sell end:", sellOrder, tradeResult_)
return tradeResult_
}
func (this *TradeServiceHandler) GetAccount() (r []*trade_service.Account, err error) {
logger.Infoln("-->GetAccount begin:")
r, err = db.GetAccount()
if r == nil {
r = make([]*trade_service.Account, 0)
}
logger.Infoln("-->GetAccount end:", r, err)
return
}
func (this *TradeServiceHandler) GetTicker() (r *trade_service.Ticker, err error) {
// logger.Infoln("-->GetTicker begin:")
r, err = db.GetTicker()
// logger.Infoln("-->GetTicker end:", r, err)
return
}
func (this *TradeServiceHandler) GetAlertOrders() (err error) {
logger.Infoln("-->GetAlertOrders begin:")
tradeOrders, err := db.GetAlertOrders()
if err == nil && len(tradeOrders) > 0 {
tradeResult_ := trade_service.NewTradeException()
tradeResult_.Reason = trade_service.EX_EXIST_ERROR_ORDERS
logger.Infoln("-->GetAlertOrders end:", tradeResult_)
return tradeResult_
}
logger.Infoln("-->GetAlertOrders end:", err)
return
}
func (this *TradeServiceHandler) GetExchangeStatus() (r *trade_service.ExchangeStatus, err error) {
logger.Infoln("-->GetExchangeStatus begin:")
r = trade_service.NewExchangeStatus()
markets := strategy.GetUsableExchange(trade_service.TradeType_BUY.String(), true)
if len(markets) == 0 {
r.Canbuy = false
} else {
r.Canbuy = true
}
markets = strategy.GetUsableExchange(trade_service.TradeType_SELL.String(), true)
if len(markets) == 0 {
r.Cansell = false
} else {
r.Cansell = true
}
logger.Infoln("-->GetExchangeStatus end:", err, r)
return
}
|
package gfx
// FontStretch is used for Style's FontStretch property.
type FontStretch uint
// The following values are valid as FontStretch's values.
const (
FontStretchInherit FontStretch = iota
FontStretchWider
FontStretchNarrower
FontStretchUltraCondensed
FontStretchExtraCondensed
FontStretchCondensed
FontStretchSemiCondensed
FontStretchSemiExpanded
FontStretchExpanded
FontStretchExtraExpanded
FontStretchUltraExpanded
)
// FontStyle is used for Style's FontStyle property.
type FontStyle uint
// The following values are valid as FontStyle's values.
const (
FontStyleInherit FontStyle = iota
FontStyleNormal
FontStyleOblique
FontStyleItalic
)
// FontVariant is used for Style's FontVariant property.
type FontVariant uint
// The following values are valid as FontVariant's values.
const (
FontVariantInherit FontVariant = iota
FontVariantNormal
FontVariantSmallCaps
)
// FontWeight is used for Style's FontWeight property.
type FontWeight uint
// The following values are valid as FontWeight's values.
const (
FontWeightInherit FontWeight = 100 * iota
FontWeight100
FontWeight200
FontWeight300
FontWeight400
FontWeight500
FontWeight600
FontWeight700
FontWeight800
FontWeight900
FontWeightNormal FontWeight = 400
FontWeightBold FontWeight = 700
)
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package a11y provides functions to assist with interacting with accessibility
// features and settings.
package a11y
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/a11y"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SelectToSpeak,
LacrosStatus: testing.LacrosVariantUnneeded, // TODO(crbug.com/1159107): Test is disabled in continuous testing. Migrate when enabled.
Desc: "A test that invokes Select-to-Speak and verifies the correct speech is given by the Google TTS engine",
Contacts: []string{
"akihiroota@chromium.org", // Test author
"chromeos-a11y-eng@google.com", // Backup mailing list
},
// TODO(https://crbug.com/1267448): Investigate failures and re-enable this test.
SoftwareDeps: []string{"chrome"},
Pre: chrome.LoggedIn(),
})
}
func SelectToSpeak(ctx context.Context, s *testing.State) {
cr := s.PreValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
// Shorten deadline to leave time for cleanup
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Mute the device to avoid noisiness.
if err := crastestclient.Mute(ctx); err != nil {
s.Fatal("Failed to mute: ", err)
}
defer crastestclient.Unmute(cleanupCtx)
if err := a11y.SetFeatureEnabled(ctx, tconn, a11y.SelectToSpeak, true); err != nil {
s.Fatal("Failed to enable Select-to-Speak: ", err)
}
defer func(ctx context.Context) {
if err := a11y.SetFeatureEnabled(ctx, tconn, a11y.SelectToSpeak, false); err != nil {
s.Error("Failed to disable Select-to-Speak: ", err)
}
}(cleanupCtx)
ed := a11y.TTSEngineData{ExtID: a11y.GoogleTTSExtensionID, UseOnSpeakWithAudioStream: false}
sm, err := a11y.RelevantSpeechMonitor(ctx, cr, tconn, ed)
if err != nil {
s.Fatal("Failed to connect to the Google TTS background page: ", err)
}
defer sm.Close()
c, err := a11y.NewTabWithHTML(ctx, cr.Browser(), "<p>This is a select-to-speak test</p>")
if err != nil {
s.Fatal("Failed to open a new tab with HTML: ", err)
}
c.Close()
// Select all and invoke Select-to-Speak.
if err := a11y.PressKeysAndConsumeExpectations(ctx, sm, []string{"Ctrl+A", "Search+S"}, []a11y.SpeechExpectation{a11y.NewStringExpectation("This is a select-to-speak test")}); err != nil {
s.Error("Error when pressing keys and expecting speech: ", err)
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpc
import (
"bytes"
"sync"
"time"
"github.com/bitmark-inc/bitmarkd/announce/fingerprint"
"github.com/bitmark-inc/bitmarkd/announce/helper"
"github.com/bitmark-inc/bitmarkd/announce/parameter"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/util"
)
const (
addressLimit = 100
maxNodeCount = 1000
)
// RPC - interface for RPC operations
type RPC interface {
Set(fingerprint.Fingerprint, []byte) error
Add([]byte, []byte, uint64) bool
Expire()
IsInitialised() bool
Fetch(start uint64, count int) ([]Entry, uint64, error)
Self() []byte
ID() fingerprint.Fingerprint
}
// Entry type of returned data
type Entry struct {
Fingerprint fingerprint.Fingerprint `json:"fingerprint"`
Connections []*util.Connection `json:"connections"`
}
type node struct {
address util.PackedConnection // packed addresses
fin fingerprint.Fingerprint // SHA3-256(certificate)
timestamp time.Time // creation time
local bool // true => never expires
}
type rpc struct {
sync.RWMutex
fin fingerprint.Fingerprint
initialised bool
nodes []*node
index map[fingerprint.Fingerprint]int
self []byte
}
// Set - initialise this node's rpc announcement data
func (r *rpc) Set(fin fingerprint.Fingerprint, rpcs []byte) error {
r.Lock()
defer r.Unlock()
if r.initialised {
return fault.AlreadyInitialised
}
r.fin = fin
r.self = rpcs
r.initialised = true
// save node info
r.add(fin, rpcs, uint64(time.Now().Unix()), true)
return nil
}
// Add - add an remote RPC listener
// returns:
// true if this was a new/updated entry
// false if the update was within the limits (to prevent continuous relaying)
func (r *rpc) Add(f []byte, listeners []byte, timestamp uint64) bool {
var fp fingerprint.Fingerprint
// discard invalid records
if len(fp) != len(f) || len(listeners) > addressLimit {
return false
}
copy(fp[:], f)
r.Lock()
rc := r.add(fp, listeners, timestamp, false)
r.Unlock()
return rc
}
// internal add an remote RPC listener, hold lock before calling
func (r *rpc) add(fin fingerprint.Fingerprint, listeners []byte, timestamp uint64, local bool) bool {
i, ok := r.index[fin]
// if new item
if !ok {
ts := helper.ResetFutureTimestampToNow(timestamp)
if helper.IsExpiredAfterDuration(ts, parameter.ExpiryInterval) {
return false
}
// ***** FIX THIS: add more validation here
e := &node{
address: listeners,
fin: fin,
timestamp: ts,
local: local,
}
n := len(r.nodes)
r.nodes = append(r.nodes, e)
r.index[fin] = n
return true
}
e := r.nodes[i]
// update old item
if !bytes.Equal(e.address, listeners) {
e.address = listeners
}
// check for too frequent update
rc := time.Since(e.timestamp) > parameter.RebroadcastInterval
e.timestamp = time.Now()
return rc
}
// Expire - called in background to expire outdated RPC entries
func (r *rpc) Expire() {
r.Lock()
defer r.Unlock()
n := len(r.nodes)
expiration:
for i := n - 1; i >= 0; i-- {
e := r.nodes[i]
if nil == e || e.local {
continue expiration
}
if time.Since(e.timestamp) > parameter.ExpiryInterval {
delete(r.index, e.fin)
n--
if i != n {
e := r.nodes[n]
r.nodes[i] = e
r.index[e.fin] = i
}
r.nodes[n] = nil
}
}
r.nodes = r.nodes[:n] // shrink the list
}
// IsInitialised - return flag of initialised status
func (r *rpc) IsInitialised() bool {
return r.initialised
}
// Fetch - fetch some records
func (r *rpc) Fetch(start uint64, count int) ([]Entry, uint64, error) {
if count <= 0 {
return nil, 0, fault.InvalidCount
}
r.Lock()
defer r.Unlock()
n := uint64(len(r.nodes))
if start >= n {
return nil, 0, nil
}
remainder := n - start
c := uint64(count)
if c >= remainder {
c = remainder
}
records := make([]Entry, c)
for i := uint64(0); i < c; i += 1 {
a := r.nodes[start].address
conn := make([]*util.Connection, 0, 4)
loop:
for {
c, n := a.Unpack()
if 0 == n {
break loop
}
conn = append(conn, c)
a = a[n:]
}
records[i].Fingerprint = r.nodes[start].fin
records[i].Connections = conn
start++
}
return records, start, nil
}
func (r *rpc) Self() []byte {
return r.self
}
// ID - SHA3 of a node's certificate public key
func (r *rpc) ID() fingerprint.Fingerprint {
return r.fin
}
// New - return RPC interface
func New() RPC {
return &rpc{
index: make(map[fingerprint.Fingerprint]int, maxNodeCount),
nodes: make([]*node, 0, maxNodeCount),
}
}
|
package carbon
import (
"bytes"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/lomik/zapwriter"
)
const MetricEndpointLocal = "local"
// Duration wrapper time.Duration for TOML
type Duration struct {
time.Duration
}
var _ toml.TextMarshaler = &Duration{}
// UnmarshalText from TOML
func (d *Duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
// MarshalText encode text with TOML format
func (d *Duration) MarshalText() ([]byte, error) {
return []byte(d.Duration.String()), nil
}
// Value return time.Duration value
func (d *Duration) Value() time.Duration {
return d.Duration
}
type commonConfig struct {
MetricPrefix string `toml:"metric-prefix"`
MetricInterval *Duration `toml:"metric-interval"`
MetricEndpoint string `toml:"metric-endpoint"`
MaxCPU int `toml:"max-cpu"`
}
type clickhouseConfig struct {
Url string `toml:"url"`
DataTable string `toml:"data-table"`
DataTables []string `toml:"data-tables"`
ReverseDataTables []string `toml:"reverse-data-tables"`
DataTimeout *Duration `toml:"data-timeout"`
TreeTable string `toml:"tree-table"`
ReverseTreeTable string `toml:"reverse-tree-table"`
TreeDateString string `toml:"tree-date"`
TreeDate time.Time `toml:"-"`
TreeTimeout *Duration `toml:"tree-timeout"`
Threads int `toml:"threads"`
}
type udpConfig struct {
Listen string `toml:"listen"`
Enabled bool `toml:"enabled"`
LogIncomplete bool `toml:"log-incomplete"`
}
type tcpConfig struct {
Listen string `toml:"listen"`
Enabled bool `toml:"enabled"`
}
type pickleConfig struct {
Listen string `toml:"listen"`
Enabled bool `toml:"enabled"`
}
type grpcConfig struct {
Listen string `toml:"listen"`
Enabled bool `toml:"enabled"`
}
type pprofConfig struct {
Listen string `toml:"listen"`
Enabled bool `toml:"enabled"`
}
type dataConfig struct {
Path string `toml:"path"`
FileInterval *Duration `toml:"chunk-interval"`
}
// Config ...
type Config struct {
Common commonConfig `toml:"common"`
ClickHouse clickhouseConfig `toml:"clickhouse"`
Data dataConfig `toml:"data"`
Udp udpConfig `toml:"udp"`
Tcp tcpConfig `toml:"tcp"`
Pickle pickleConfig `toml:"pickle"`
Grpc grpcConfig `toml:"grpc"`
Pprof pprofConfig `toml:"pprof"`
Logging []zapwriter.Config `toml:"logging"`
}
// NewConfig ...
func NewConfig() *Config {
cfg := &Config{
Common: commonConfig{
MetricPrefix: "carbon.agents.{host}",
MetricInterval: &Duration{
Duration: time.Minute,
},
MetricEndpoint: MetricEndpointLocal,
MaxCPU: 1,
},
Logging: nil,
ClickHouse: clickhouseConfig{
Url: "http://localhost:8123/",
DataTable: "graphite",
DataTables: []string{},
ReverseDataTables: []string{},
TreeTable: "graphite_tree",
TreeDateString: "2016-11-01",
DataTimeout: &Duration{
Duration: time.Minute,
},
TreeTimeout: &Duration{
Duration: time.Minute,
},
Threads: 1,
},
Data: dataConfig{
Path: "/data/carbon-clickhouse/",
FileInterval: &Duration{
Duration: time.Second,
},
},
Udp: udpConfig{
Listen: ":2003",
Enabled: true,
LogIncomplete: false,
},
Tcp: tcpConfig{
Listen: ":2003",
Enabled: true,
},
Pickle: pickleConfig{
Listen: ":2004",
Enabled: true,
},
Grpc: grpcConfig{
Listen: ":2005",
Enabled: false,
},
Pprof: pprofConfig{
Listen: "localhost:7007",
Enabled: false,
},
}
return cfg
}
func NewLoggingConfig() zapwriter.Config {
cfg := zapwriter.NewConfig()
cfg.File = "/var/log/carbon-clickhouse/carbon-clickhouse.log"
return cfg
}
// PrintConfig ...
func PrintDefaultConfig() error {
cfg := NewConfig()
buf := new(bytes.Buffer)
if cfg.Logging == nil {
cfg.Logging = make([]zapwriter.Config, 0)
}
if len(cfg.Logging) == 0 {
cfg.Logging = append(cfg.Logging, NewLoggingConfig())
}
encoder := toml.NewEncoder(buf)
encoder.Indent = ""
if err := encoder.Encode(cfg); err != nil {
return err
}
fmt.Print(buf.String())
return nil
}
// ReadConfig ...
func ReadConfig(filename string) (*Config, error) {
var err error
cfg := NewConfig()
if filename != "" {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
body := string(b)
// @TODO: fix for config starts with [logging]
body = strings.Replace(body, "\n[logging]\n", "\n[[logging]]\n", -1)
if _, err := toml.Decode(body, cfg); err != nil {
return nil, err
}
}
if cfg.Logging == nil {
cfg.Logging = make([]zapwriter.Config, 0)
}
if len(cfg.Logging) == 0 {
cfg.Logging = append(cfg.Logging, NewLoggingConfig())
}
if err := zapwriter.CheckConfig(cfg.Logging, nil); err != nil {
return nil, err
}
cfg.ClickHouse.TreeDate, err = time.ParseInLocation("2006-01-02", cfg.ClickHouse.TreeDateString, time.Local)
if err != nil {
return nil, err
}
return cfg, nil
}
|
/*
Wetware - the distributed programming language
Copyright 2020, Louis Thibault. All rights reserved.
*/
package main
import (
"os"
"github.com/urfave/cli/v2"
"github.com/lthibault/log"
"github.com/wetware/ww/internal/cmd/boot"
"github.com/wetware/ww/internal/cmd/client"
"github.com/wetware/ww/internal/cmd/keygen"
"github.com/wetware/ww/internal/cmd/shell"
"github.com/wetware/ww/internal/cmd/start"
)
const version = "0.0.0"
var flags = []cli.Flag{
&cli.StringFlag{
Name: "logfmt",
Aliases: []string{"f"},
Usage: "text, json, none",
Value: "text",
EnvVars: []string{"WW_LOGFMT"},
},
&cli.StringFlag{
Name: "loglvl",
Usage: "trace, debug, info, warn, error, fatal",
Value: "info",
EnvVars: []string{"WW_LOGLVL"},
},
&cli.BoolFlag{
Name: "prettyprint",
Aliases: []string{"pp"},
Usage: "pretty-print JSON output",
Hidden: true,
},
}
var commands = []*cli.Command{
start.Command(),
shell.Command(),
client.Command(),
keygen.Command(),
boot.Command(),
}
func main() {
run(&cli.App{
Name: "wetware",
Usage: "the distributed programming language",
UsageText: "ww [global options] command [command options] [arguments...]",
Copyright: "2020 The Wetware Project",
Version: version,
EnableBashCompletion: true,
Flags: flags,
Commands: commands,
})
}
func run(app *cli.App) {
if err := app.Run(os.Args); err != nil {
log.New().Fatal(err)
}
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sessiondata
import (
"fmt"
"net"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// SessionData contains session parameters. They are all user-configurable.
// A SQL Session changes fields in SessionData through sql.sessionDataMutator.
type SessionData struct {
// SessionData contains session parameters that are easily serializable and
// are required to be propagated to the remote nodes for the correct
// execution of DistSQL flows.
sessiondatapb.SessionData
// LocalOnlySessionData contains session parameters that don't need to be
// propagated to the remote nodes.
LocalOnlySessionData
// All session parameters below must be propagated to the remote nodes but
// are not easily serializable. They require custom serialization
// (MarshalNonLocal) and deserialization (UnmarshalNonLocal).
//
// Location indicates the current time zone.
Location *time.Location
// SearchPath is a list of namespaces to search builtins in.
SearchPath SearchPath
// SequenceState gives access to the SQL sequences that have been
// manipulated by the session.
SequenceState *SequenceState
}
// MarshalNonLocal serializes all non-local parameters from SessionData struct
// that don't have native protobuf support into proto.
func MarshalNonLocal(sd *SessionData, proto *sessiondatapb.SessionData) {
proto.Location = sd.GetLocation().String()
// Populate the search path. Make sure not to include the implicit pg_catalog,
// since the remote end already knows to add the implicit pg_catalog if
// necessary, and sending it over would make the remote end think that
// pg_catalog was explicitly included by the user.
proto.SearchPath = sd.SearchPath.GetPathArray()
proto.TemporarySchemaName = sd.SearchPath.GetTemporarySchemaName()
// Populate the sequences state.
latestValues, lastIncremented := sd.SequenceState.Export()
if len(latestValues) > 0 {
proto.SeqState.LastSeqIncremented = lastIncremented
for seqID, latestVal := range latestValues {
proto.SeqState.Seqs = append(proto.SeqState.Seqs,
&sessiondatapb.SequenceState_Seq{SeqID: seqID, LatestVal: latestVal},
)
}
}
}
// UnmarshalNonLocal returns a new SessionData based on the serialized
// representation. Note that only non-local session parameters are populated.
func UnmarshalNonLocal(proto sessiondatapb.SessionData) (*SessionData, error) {
location, err := timeutil.TimeZoneStringToLocation(
proto.Location,
timeutil.TimeZoneStringToLocationISO8601Standard,
)
if err != nil {
return nil, err
}
seqState := NewSequenceState()
var haveSequences bool
for _, seq := range proto.SeqState.Seqs {
seqState.RecordValue(seq.SeqID, seq.LatestVal)
haveSequences = true
}
if haveSequences {
seqState.SetLastSequenceIncremented(proto.SeqState.LastSeqIncremented)
}
return &SessionData{
SessionData: proto,
SearchPath: MakeSearchPath(
proto.SearchPath,
).WithTemporarySchemaName(
proto.TemporarySchemaName,
).WithUserSchemaName(proto.UserProto.Decode().Normalized()),
SequenceState: seqState,
Location: location,
}, nil
}
// GetLocation returns the session timezone.
func (s *SessionData) GetLocation() *time.Location {
if s == nil || s.Location == nil {
return time.UTC
}
return s.Location
}
// LocalOnlySessionData contains session parameters that only influence the
// execution on the gateway node and don't need to be propagated to the remote
// nodes.
type LocalOnlySessionData struct {
// SaveTablesPrefix indicates that a table should be created with the
// given prefix for the output of each subexpression in a query. If
// SaveTablesPrefix is empty, no tables are created.
SaveTablesPrefix string
// RemoteAddr is used to generate logging events.
RemoteAddr net.Addr
// ExperimentalDistSQLPlanningMode indicates whether the experimental
// DistSQL planning driven by the optimizer is enabled.
ExperimentalDistSQLPlanningMode ExperimentalDistSQLPlanningMode
// DistSQLMode indicates whether to run queries using the distributed
// execution engine.
DistSQLMode DistSQLExecMode
// OptimizerFKCascadesLimit is the maximum number of cascading operations that
// are run for a single query.
OptimizerFKCascadesLimit int
// ResultsBufferSize specifies the size at which the pgwire results buffer
// will self-flush.
ResultsBufferSize int64
// NoticeDisplaySeverity indicates the level of Severity to send notices for the given
// session.
NoticeDisplaySeverity pgnotice.DisplaySeverity
// SerialNormalizationMode indicates how to handle the SERIAL pseudo-type.
SerialNormalizationMode SerialNormalizationMode
// DatabaseIDToTempSchemaID stores the temp schema ID for every database that
// has created a temporary schema. The mapping is from descpb.ID -> desscpb.ID,
// but cannot be stored as such due to package dependencies.
DatabaseIDToTempSchemaID map[uint32]uint32
// StmtTimeout is the duration a query is permitted to run before it is
// canceled by the session. If set to 0, there is no timeout.
StmtTimeout time.Duration
// IdleInSessionTimeout is the duration a session is permitted to idle before
// the session is canceled. If set to 0, there is no timeout.
IdleInSessionTimeout time.Duration
// IdleInTransactionSessionTimeout is the duration a session is permitted to
// idle in a transaction before the session is canceled.
// If set to 0, there is no timeout.
IdleInTransactionSessionTimeout time.Duration
// ReorderJoinsLimit indicates the number of joins at which the optimizer should
// stop attempting to reorder.
ReorderJoinsLimit int
// DefaultTxnPriority indicates the default priority of newly created
// transactions.
// NOTE: we'd prefer to use tree.UserPriority here, but doing so would
// introduce a package dependency cycle.
DefaultTxnPriority int
// DefaultTxnReadOnly indicates the default read-only status of newly
// created transactions.
DefaultTxnReadOnly bool
// DefaultTxnUseFollowerReads indicates whether transactions should be
// created by default using an AS OF SYSTEM TIME clause far enough in the
// past to facilitate reads against followers. If true, transactions will
// also default to being read-only.
DefaultTxnUseFollowerReads bool
// PartiallyDistributedPlansDisabled indicates whether the partially
// distributed plans produced by distSQLSpecExecFactory are disabled. It
// should be set to 'true' only in tests that verify that the old and the
// new factories return exactly the same physical plans.
// TODO(yuzefovich): remove it when deleting old sql.execFactory.
PartiallyDistributedPlansDisabled bool
// OptimizerUseHistograms indicates whether we should use histograms for
// cardinality estimation in the optimizer.
OptimizerUseHistograms bool
// OptimizerUseMultiColStats indicates whether we should use multi-column
// statistics for cardinality estimation in the optimizer.
OptimizerUseMultiColStats bool
// LocalityOptimizedSearch indicates that the optimizer will try to plan scans
// and lookup joins in which local nodes (i.e., nodes in the gateway region)
// are searched for matching rows before remote nodes, in the hope that the
// execution engine can avoid visiting remote nodes.
LocalityOptimizedSearch bool
// SafeUpdates causes errors when the client
// sends syntax that may have unwanted side effects.
SafeUpdates bool
// PreferLookupJoinsForFKs causes foreign key operations to prefer lookup
// joins.
PreferLookupJoinsForFKs bool
// ZigzagJoinEnabled indicates whether the optimizer should try and plan a
// zigzag join.
ZigzagJoinEnabled bool
// RequireExplicitPrimaryKeys indicates whether CREATE TABLE statements should
// error out if no primary key is provided.
RequireExplicitPrimaryKeys bool
// ForceSavepointRestart overrides the default SAVEPOINT behavior
// for compatibility with certain ORMs. When this flag is set,
// the savepoint name will no longer be compared against the magic
// identifier `cockroach_restart` in order use a restartable
// transaction.
ForceSavepointRestart bool
// AllowPrepareAsOptPlan must be set to allow use of
// PREPARE name AS OPT PLAN '...'
AllowPrepareAsOptPlan bool
// TempTablesEnabled indicates whether temporary tables can be created or not.
TempTablesEnabled bool
// ImplicitPartitioningEnabled indicates whether implicit column partitioning
// can be created.
ImplicitColumnPartitioningEnabled bool
// DropEnumValueEnabled indicates whether enum values can be dropped.
DropEnumValueEnabled bool
// OverrideMultiRegionZoneConfigEnabled indicates whether zone configurations can be
// modified for multi-region databases and tables/indexes/partitions.
OverrideMultiRegionZoneConfigEnabled bool
// HashShardedIndexesEnabled indicates whether hash sharded indexes can be created.
HashShardedIndexesEnabled bool
// DisallowFullTableScans indicates whether queries that plan full table scans
// should be rejected.
DisallowFullTableScans bool
// ImplicitSelectForUpdate is true if FOR UPDATE locking may be used during
// the row-fetch phase of mutation statements.
ImplicitSelectForUpdate bool
// InsertFastPath is true if the fast path for insert (with VALUES input) may
// be used.
InsertFastPath bool
// AlterColumnTypeGeneralEnabled is true if ALTER TABLE ... ALTER COLUMN ...
// TYPE x may be used for general conversions requiring online schema change/
AlterColumnTypeGeneralEnabled bool
// SynchronousCommit is a dummy setting for the synchronous_commit var.
SynchronousCommit bool
// EnableSeqScan is a dummy setting for the enable_seqscan var.
EnableSeqScan bool
// EnableUniqueWithoutIndexConstraints indicates whether creating unique
// constraints without an index is allowed.
// TODO(rytaft): remove this once unique without index constraints are fully
// supported.
EnableUniqueWithoutIndexConstraints bool
// NewSchemaChangerMode indicates whether to use the new schema changer.
NewSchemaChangerMode NewSchemaChangerMode
// EnableStreamReplication indicates whether to allow setting up a replication
// stream.
EnableStreamReplication bool
// SequenceCache stores sequence values which have been cached using the
// CACHE sequence option.
SequenceCache SequenceCache
// StubCatalogTablesEnabled allows queries against virtual
// tables that are not yet implemented.
StubCatalogTablesEnabled bool
///////////////////////////////////////////////////////////////////////////
// WARNING: consider whether a session parameter you're adding needs to //
// be propagated to the remote nodes. If so, that parameter should live //
// in the SessionData struct above. //
///////////////////////////////////////////////////////////////////////////
}
// IsTemporarySchemaID returns true if the given ID refers to any of the temp
// schemas created by the session.
func (s *SessionData) IsTemporarySchemaID(ID uint32) bool {
for _, tempSchemaID := range s.DatabaseIDToTempSchemaID {
if tempSchemaID == ID {
return true
}
}
return false
}
// GetTemporarySchemaIDForDb returns the schemaID for the temporary schema if
// one exists for the DB. The second return value communicates the existence of
// the temp schema for that DB.
func (s *SessionData) GetTemporarySchemaIDForDb(dbID uint32) (uint32, bool) {
schemaID, found := s.DatabaseIDToTempSchemaID[dbID]
return schemaID, found
}
// ExperimentalDistSQLPlanningMode controls if and when the opt-driven DistSQL
// planning is used to create physical plans.
type ExperimentalDistSQLPlanningMode int64
const (
// ExperimentalDistSQLPlanningOff means that we always use the old path of
// going from opt.Expr to planNodes and then to processor specs.
ExperimentalDistSQLPlanningOff ExperimentalDistSQLPlanningMode = iota
// ExperimentalDistSQLPlanningOn means that we will attempt to use the new
// path for performing DistSQL planning in the optimizer, and if that
// doesn't succeed for some reason, we will fallback to the old path.
ExperimentalDistSQLPlanningOn
// ExperimentalDistSQLPlanningAlways means that we will only use the new path,
// and if it fails for any reason, the query will fail as well.
ExperimentalDistSQLPlanningAlways
)
func (m ExperimentalDistSQLPlanningMode) String() string {
switch m {
case ExperimentalDistSQLPlanningOff:
return "off"
case ExperimentalDistSQLPlanningOn:
return "on"
case ExperimentalDistSQLPlanningAlways:
return "always"
default:
return fmt.Sprintf("invalid (%d)", m)
}
}
// ExperimentalDistSQLPlanningModeFromString converts a string into a
// ExperimentalDistSQLPlanningMode. False is returned if the conversion was
// unsuccessful.
func ExperimentalDistSQLPlanningModeFromString(val string) (ExperimentalDistSQLPlanningMode, bool) {
var m ExperimentalDistSQLPlanningMode
switch strings.ToUpper(val) {
case "OFF":
m = ExperimentalDistSQLPlanningOff
case "ON":
m = ExperimentalDistSQLPlanningOn
case "ALWAYS":
m = ExperimentalDistSQLPlanningAlways
default:
return 0, false
}
return m, true
}
// DistSQLExecMode controls if and when the Executor distributes queries.
// Since 2.1, we run everything through the DistSQL infrastructure,
// and these settings control whether to use a distributed plan, or use a plan
// that only involves local DistSQL processors.
type DistSQLExecMode int64
const (
// DistSQLOff means that we never distribute queries.
DistSQLOff DistSQLExecMode = iota
// DistSQLAuto means that we automatically decide on a case-by-case basis if
// we distribute queries.
DistSQLAuto
// DistSQLOn means that we distribute queries that are supported.
DistSQLOn
// DistSQLAlways means that we only distribute; unsupported queries fail.
DistSQLAlways
)
func (m DistSQLExecMode) String() string {
switch m {
case DistSQLOff:
return "off"
case DistSQLAuto:
return "auto"
case DistSQLOn:
return "on"
case DistSQLAlways:
return "always"
default:
return fmt.Sprintf("invalid (%d)", m)
}
}
// DistSQLExecModeFromString converts a string into a DistSQLExecMode
func DistSQLExecModeFromString(val string) (_ DistSQLExecMode, ok bool) {
switch strings.ToUpper(val) {
case "OFF":
return DistSQLOff, true
case "AUTO":
return DistSQLAuto, true
case "ON":
return DistSQLOn, true
case "ALWAYS":
return DistSQLAlways, true
default:
return 0, false
}
}
// SerialNormalizationMode controls if and when the Executor uses DistSQL.
type SerialNormalizationMode int64
const (
// SerialUsesRowID means use INT NOT NULL DEFAULT unique_rowid().
SerialUsesRowID SerialNormalizationMode = iota
// SerialUsesVirtualSequences means create a virtual sequence and
// use INT NOT NULL DEFAULT nextval(...).
SerialUsesVirtualSequences
// SerialUsesSQLSequences means create a regular SQL sequence and
// use INT NOT NULL DEFAULT nextval(...). Each call to nextval()
// is a distributed call to kv. This minimizes the size of gaps
// between successive sequence numbers (which occur due to
// node failures or errors), but the multiple kv calls
// can impact performance negatively.
SerialUsesSQLSequences
// SerialUsesCachedSQLSequences is identical to SerialUsesSQLSequences with
// the exception that nodes can cache sequence values. This significantly
// reduces contention and distributed calls to kv, which results in better
// performance. Gaps between sequences may be larger as a result of cached
// values being lost to errors and/or node failures.
SerialUsesCachedSQLSequences
)
func (m SerialNormalizationMode) String() string {
switch m {
case SerialUsesRowID:
return "rowid"
case SerialUsesVirtualSequences:
return "virtual_sequence"
case SerialUsesSQLSequences:
return "sql_sequence"
case SerialUsesCachedSQLSequences:
return "sql_sequence_cached"
default:
return fmt.Sprintf("invalid (%d)", m)
}
}
// SerialNormalizationModeFromString converts a string into a SerialNormalizationMode
func SerialNormalizationModeFromString(val string) (_ SerialNormalizationMode, ok bool) {
switch strings.ToUpper(val) {
case "ROWID":
return SerialUsesRowID, true
case "VIRTUAL_SEQUENCE":
return SerialUsesVirtualSequences, true
case "SQL_SEQUENCE":
return SerialUsesSQLSequences, true
case "SQL_SEQUENCE_CACHED":
return SerialUsesCachedSQLSequences, true
default:
return 0, false
}
}
// NewSchemaChangerMode controls if and when the new schema changer (in
// sql/schemachanger) is in use.
type NewSchemaChangerMode int64
const (
// UseNewSchemaChangerOff means that we never use the new schema changer.
UseNewSchemaChangerOff NewSchemaChangerMode = iota
// UseNewSchemaChangerOn means that we use the new schema changer for
// supported statements in implicit transactions, but fall back to the old
// schema changer otherwise.
UseNewSchemaChangerOn
// UseNewSchemaChangerUnsafeAlways means that we attempt to use the new schema
// changer for all statements and return errors for unsupported statements.
// Used for testing/development.
UseNewSchemaChangerUnsafeAlways
)
func (m NewSchemaChangerMode) String() string {
switch m {
case UseNewSchemaChangerOff:
return "off"
case UseNewSchemaChangerOn:
return "on"
case UseNewSchemaChangerUnsafeAlways:
return "unsafe_always"
default:
return fmt.Sprintf("invalid (%d)", m)
}
}
// NewSchemaChangerModeFromString converts a string into a NewSchemaChangerMode
func NewSchemaChangerModeFromString(val string) (_ NewSchemaChangerMode, ok bool) {
switch strings.ToUpper(val) {
case "OFF":
return UseNewSchemaChangerOff, true
case "ON":
return UseNewSchemaChangerOn, true
case "UNSAFE_ALWAYS":
return UseNewSchemaChangerUnsafeAlways, true
default:
return 0, false
}
}
|
package server
import (
"errors"
"github.com/bitmaelum/bitmaelum-server/core"
"github.com/bitmaelum/bitmaelum-server/core/messagebox"
)
type Service struct {
repo Repository
}
// Create new service
func AccountService(repo Repository) *Service {
return &Service{
repo: repo,
}
}
// Create new account for the given address and public key
func (s *Service) CreateAccount(addr core.HashAddress, pubKey string) error {
if s.repo.Exists(addr) {
return errors.New("account already exists")
}
err := s.repo.Create(addr)
if err != nil {
return err
}
_ = s.repo.CreateBox(addr, "inbox", "This is your regular inbox", 0)
_ = s.repo.CreateBox(addr, "outbox", "All your outgoing messages will be stored here", 0)
_ = s.repo.CreateBox(addr, "trash", "Trashcan. Everything in here will be removed automatically after 30 days or when purged manually", 0)
_ = s.repo.StorePubKey(addr, pubKey)
return nil
}
// Check if account exists for address
func (s *Service) AccountExists(addr core.HashAddress) bool {
return s.repo.Exists(addr)
}
// Retrieve the public keys for given address
func (s *Service) GetPublicKeys(addr core.HashAddress) []string {
if ! s.repo.Exists(addr) {
return []string{}
}
pubKeys, err := s.repo.FetchPubKeys(addr)
if err != nil {
return []string{}
}
return pubKeys
}
func (s *Service) FetchMessageBoxes(addr core.HashAddress, query string) []messagebox.MailBoxInfo {
list, err := s.repo.FindBox(addr, query)
if err != nil {
return []messagebox.MailBoxInfo{}
}
return list
}
func (s *Service) FetchListFromBox(addr core.HashAddress, box string, offset int, limit int) []messagebox.MessageList {
list, err := s.repo.FetchListFromBox(addr, box, offset, limit)
if err != nil {
return []messagebox.MessageList{}
}
return list
}
// Gets the flags for the given message
func (s *Service) GetFlags(addr core.HashAddress, box string, id string) ([]string, error) {
return s.repo.GetFlags(addr, box, id)
}
// Sets a flag for a given message
func (s *Service) SetFlag(addr core.HashAddress, box string, id string, flag string) error {
return s.repo.SetFlag(addr, box, id, flag)
}
// Unsets a flag for a given message
func (s *Service) UnsetFlag(addr core.HashAddress, box string, id string, flag string) error {
return s.repo.UnsetFlag(addr, box, id, flag)
}
|
//This file is for the second problem in Exercise for Programmers: 57 Challenges
//to Develop Your Coding Skills
|
package main
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"os"
"os/exec"
"syscall"
"time"
"golang.org/x/crypto/ssh/terminal"
)
// Caracteres que formaran parte de la contraseña aleatoria
const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + "!?$%&@*+-_"
// Struct que tendra los datos del usuario para la tarea de login
// Funcion que recogera el password introducido por
// el Usuario pero sin mostrarlo por la pantalla
// Devuelve:
// string : password del usuario en formato string
func leePassword() string {
passLoginByte, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
fmt.Println(err)
}
return string(passLoginByte)
}
/*
Funcion que creara una cadena de caracteres de forma aleatoria
Parametros entrada:
length: int -> Longitud de la cadena
charset: string -> Caracteres que formaran la cadena
*/
func randomPassword(length int, charset string) string {
var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
// Funcion que limpiara la consola utilizando el comando 'cls' de windows
func limpiarPantallaWindows() {
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
}
/*
Funcion que enviara los datos al servidor
Parametros entrada:
ruta : string -> Ruta del servidor
datos : interface{} -> Struct con los datos del cliente
Devuelve:
json.Decoder
*/
func send(ruta string, datos interface{}) *json.Decoder {
// Creamos un cliente especial que no comprueba la validez de los certificados
// esto es necesario por que usamos certificados autofirmados (para pruebas)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
datosJSON, err := json.Marshal(&datos)
chk(err)
// Enviamos al servidor los datos del cliente mediante POST
r, err := client.Post("https://localhost:10441/"+ruta, "application/json", bytes.NewBuffer(datosJSON))
chk(err)
// Recogemos la respuesta del servidor y lo convertimos a JSON
decoder := json.NewDecoder(r.Body)
return decoder
}
/*
Funcion que mostrara el menu de registro de usuario
Parametros entrada:
mensajeMenuRegistro : string -> Mensaje de INFO/ERROR del menu de registro
Devuelve:
opMenuPrincipal : string -> Opcion elegida del menu principal
mensajeMenuPrincipal : string -> Mensaje de INFO/ERROR del menu principal
mensajeMenuRegistro : string -> Mensaje de INFO/ERROR del menu de registro
*/
func menuRegistroUsuario(mensajeMenuRegistro string) (string, string, string) {
// Variables para el registro de usuario
var usuario string
var passRegistro string
var repitePassRegistro string
// Mensajes de INFO/ERROR
var mensajeMenuPrincipal string
// Operacion del menu principal
var opMenuPrincipal string
// JSON que recibiremos del servidor
var jis jsonIdentificacionServidor
scanner := bufio.NewScanner(os.Stdin)
// Limpiamos la terminal y mostramos el menu
limpiarPantallaWindows()
fmt.Println("+---------------------------------------------------+")
fmt.Println("| Introduce tus datos de usuario para registrarte |")
fmt.Println("+---------------------------------------------------+")
fmt.Printf(mensajeMenuRegistro)
// Solicitamos los datos al usuario
fmt.Print("Email: ")
scanner.Scan()
usuario = scanner.Text()
fmt.Print("Password: ")
passRegistro = leePassword()
fmt.Print("\nRepite el password: ")
repitePassRegistro = leePassword()
// Comprobamos que el usuario ha introducido correctamente los datos
if len(usuario) > 0 && len(passRegistro) > 0 && len(repitePassRegistro) > 0 {
if passRegistro == repitePassRegistro {
// Resumimos en SHA3 el password
passRegistroSHA3 := hashSha512(passRegistro)
// Partimos el resumen en dos partes iguales y la
// primera parte se la enviamos al servidor
parteUnoPassRegistroSHA3 := passRegistroSHA3[0:32]
// Convertimos a JSON los datos que le enviaremos al servidor
datosJSON := jsonIdentificacion{Usuario: usuario, Password: encode64(parteUnoPassRegistroSHA3)}
// Enviamos al servidor los datos
decoder := send("registrar", datosJSON)
decoder.Decode(&jis)
// Comprobamos la respuesta del servidor
if jis.Valido == true {
mensajeMenuPrincipal = "[INFO] Registro de usuario realizado correctamente!\n"
opMenuPrincipal = "0"
mensajeMenuRegistro = ""
} else {
mensajeMenuRegistro = "[ERROR] " + jis.Mensaje + "\n"
// Mostramos el registro otra vez
opMenuPrincipal = "2"
}
limpiarPantallaWindows()
} else {
mensajeMenuRegistro = "[ERROR] Los password no coinciden\n"
// Mostramos el registro otra vez
opMenuPrincipal = "2"
}
} else {
mensajeMenuRegistro = "[ERROR] Te has dejado campos sin rellenar\n"
// Mostramos el registro otra vez
opMenuPrincipal = "2"
}
return opMenuPrincipal, mensajeMenuPrincipal, mensajeMenuRegistro
}
/*
Funcion que buscara la cuenta que introduce el usuario y si existe
la mostrara por pantalla junto con el password descrifrado
Parametros entrada:
usuario : string -> Nombre del usuario que esta logueado
passAES : []byte -> Clave AES para cifrar y descifrar
*/
func buscarCuenta(usuario string, passAES []byte) {
// Mensajes de INFO/ERROR
var mensajeBuscarCuenta string
// Nombre de la cuenta a buscar
var nombreCuenta = ""
var opRepetirBusqueda = "1"
// Struct que nos devolvera el servidor
var jr jsonResultado
scanner := bufio.NewScanner(os.Stdin)
for opRepetirBusqueda != "2" {
// Mostramos el menu del usuario
for len(nombreCuenta) == 0 {
limpiarPantallaWindows()
fmt.Println("+---------------------+")
fmt.Println("| Buscar una cuenta |")
fmt.Println("+---------------------+")
fmt.Printf(mensajeBuscarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Introduce el nombre de la cuenta que deseas buscar: ")
scanner.Scan()
nombreCuenta = scanner.Text()
if len(nombreCuenta) == 0 {
mensajeBuscarCuenta = "[ERROR] Introduce un nombre de cuenta para realizar la busqueda\n"
} else {
mensajeBuscarCuenta = ""
}
}
// Enviamos al servidor los datos para realizar la busqueda de cuenta
datosJSON := jsonBuscar{Usuario: usuario, Cuenta: nombreCuenta}
// Enviamos al servidor los datos
decoder := send("buscar", datosJSON)
decoder.Decode(&jr)
if jr.Encontrado == false {
mensajeBuscarCuenta = "[INFO] No existe ninguna cuenta con ese nombre\n"
}
limpiarPantallaWindows()
fmt.Println("+---------------------+")
fmt.Println("| Buscar una cuenta |")
fmt.Println("+---------------------+")
fmt.Printf(mensajeBuscarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
// Mostramos la cuenta descifrada
if jr.Encontrado == true {
fmt.Printf("Cuenta: [%s] \n", jr.Cuenta)
fmt.Printf("Password: [%s]\n", decrypt([]byte(decode64(jr.Password)), passAES))
}
// Le preguntamos al usuario si desea realizar otra busqueda
fmt.Println("\nDeseas realizar otra busqueda? ")
fmt.Println("[1] Si")
fmt.Println("[2] No")
fmt.Printf("Opcion: ")
scanner.Scan()
opRepetirBusqueda = scanner.Text()
if opRepetirBusqueda != "1" && opRepetirBusqueda != "2" {
opRepetirBusqueda = "1"
} else {
nombreCuenta = ""
mensajeBuscarCuenta = ""
}
}
}
/*
Funcion que añadira una nueva cuenta
Parametros entrada:
usuario : string -> Nombre del usuario que esta logueado
passAES : []byte -> Clave AES para cifrar y descifrar
Devuelve:
mensajeAdministracion : string -> Mensaje de INFO/ERROR del menu de administracion
*/
func añadirCuenta(usuario string, passAES []byte) string {
// Mensajes de INFO/ERROR
var mensajeNuevaCuenta string
var mensajeAdministracion string
// Nombre de la cuenta que se quiere eliminar
var nombreCuenta = ""
// Flag que permitira al usuario obtener un password aleatorio
var opAleatoria string
var passCuenta string
// Struct que nos devolvera el servidor
var jr jsonResultado
var jis jsonIdentificacionServidor
scanner := bufio.NewScanner(os.Stdin)
// Mostramos el menu del usuario
for len(nombreCuenta) == 0 {
limpiarPantallaWindows()
fmt.Println("+---------------------------+")
fmt.Println("| Añadir una nueva cuenta |")
fmt.Println("+---------------------------+")
fmt.Printf(mensajeNuevaCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Introduce el nombre de la cuenta (Ejemplo: facebook): ")
scanner.Scan()
nombreCuenta = scanner.Text()
if len(nombreCuenta) == 0 {
mensajeNuevaCuenta = "[ERROR] Debes introducir un nombre para la nueva cuenta\n"
} else {
// Enviamos al servidor los datos para realizar la busqueda de cuenta
datosJSON := jsonBuscar{Usuario: usuario, Cuenta: nombreCuenta}
// Enviamos al servidor los datos
decoder := send("buscar", datosJSON)
decoder.Decode(&jr)
if jr.Encontrado == true {
mensajeNuevaCuenta = "[INFO] Ya existe una cuenta con este nombre\n"
nombreCuenta = ""
} else {
mensajeNuevaCuenta = ""
}
}
}
for opAleatoria != "1" && opAleatoria != "2" {
limpiarPantallaWindows()
fmt.Println("+---------------------------+")
fmt.Println("| Añadir una nueva cuenta |")
fmt.Println("+---------------------------+")
fmt.Printf(mensajeNuevaCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Println("Deseas generar aleatoriamente el password para la cuenta [" + nombreCuenta + "]? ")
fmt.Println("[1] Si")
fmt.Println("[2] No")
fmt.Printf("Opcion: ")
scanner.Scan()
opAleatoria = scanner.Text()
}
// Password aleatorio
if opAleatoria == "1" {
// Generamos el password de forma aleatoriamente
passCuenta = randomPassword(15, charset)
//fmt.Printf("[DEBUG] [random password] passCuenta: [%s]\n", passCuenta)
} else { // Password manual
passCuenta = ""
repitePassCuenta := ""
for len(passCuenta) == 0 || len(repitePassCuenta) == 0 || passCuenta != repitePassCuenta {
limpiarPantallaWindows()
fmt.Println("+---------------------------+")
fmt.Println("| Añadir una nueva cuenta |")
fmt.Println("+---------------------------+")
fmt.Printf(mensajeNuevaCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Password: ")
passCuenta = leePassword()
fmt.Print("\nRepite el password: ")
repitePassCuenta = leePassword()
if len(passCuenta) == 0 || len(repitePassCuenta) == 0 {
mensajeNuevaCuenta = "[ERROR] Debes introducir un password\n"
} else if passCuenta != repitePassCuenta {
mensajeNuevaCuenta = "[ERROR] Los password deben coincidir\n"
} else {
mensajeNuevaCuenta = ""
}
}
}
// Ciframos con AES
datosJSON := jsonNewPass{Usuario: usuario, Cuenta: nombreCuenta, Password: encode64(encrypt([]byte(passCuenta), passAES))}
//fmt.Printf("[DEBUG] [enviado a servidor] encode64(encrypt([]byte(passCuenta): [%s]\n", datosJSON.Password)
// Enviamos al servidor los datos
decoder := send("add", datosJSON)
decoder.Decode(&jis)
// Comprobamos la respuesta del servidor
if jis.Valido == true {
mensajeAdministracion = "[INFO] La cuenta se ha añadido correctamente!\n"
} else {
mensajeAdministracion = "[INFO] " + jis.Mensaje + "\n"
}
return mensajeAdministracion
}
/*
Funcion que modificara una cuenta
Parametros entrada:
usuario : string -> Nombre del usuario que esta logueado
passAES : []byte -> Clave AES para cifrar y descifrar
Devuelve:
mensajeAdministracion : string -> Mensaje de INFO/ERROR del menu de administracion
*/
func modificarCuenta(usuario string, passAES []byte) string {
// Mensajes de INFO/ERROR
var mensajeModificarCuenta string
var mensajeAdministracion string
// Nombre de la cuenta que se quiere eliminar
var nombreCuenta = ""
// Flag que permitira al usuario obtener un password aleatorio
var opAleatoria string
var passCuenta string
// Struct que nos devolvera el servidor
var jr jsonResultado
var jis jsonIdentificacionServidor
scanner := bufio.NewScanner(os.Stdin)
// Mostramos el menu del usuario
for len(nombreCuenta) == 0 {
limpiarPantallaWindows()
fmt.Println("+------------------------+")
fmt.Println("| Modificar una cuenta |")
fmt.Println("+------------------------+")
fmt.Printf(mensajeModificarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Introduce el nombre de la cuenta (Ejemplo: facebook): ")
scanner.Scan()
nombreCuenta = scanner.Text()
if len(nombreCuenta) == 0 {
mensajeModificarCuenta = "[ERROR] Debes introducir un nombre para la nueva cuenta\n"
} else {
// Enviamos al servidor los datos para realizar la busqueda de cuenta
datosJSON := jsonBuscar{Usuario: usuario, Cuenta: nombreCuenta}
// Enviamos al servidor los datos
decoder := send("buscar", datosJSON)
decoder.Decode(&jr)
if jr.Encontrado == true {
mensajeModificarCuenta = ""
} else {
mensajeModificarCuenta = "[INFO] No existe ninguna cuenta con ese nombre\n"
nombreCuenta = ""
}
}
}
for opAleatoria != "1" && opAleatoria != "2" {
limpiarPantallaWindows()
fmt.Println("+------------------------+")
fmt.Println("| Modificar una cuenta |")
fmt.Println("+------------------------+")
fmt.Printf(mensajeModificarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Println("Deseas generar aleatoriamente el password para la cuenta [" + nombreCuenta + "]? ")
fmt.Println("[1] Si")
fmt.Println("[2] No")
fmt.Printf("Opcion: ")
scanner.Scan()
opAleatoria = scanner.Text()
}
// Password aleatorio
if opAleatoria == "1" {
// Generamos el password de forma aleatoriamente
passCuenta = randomPassword(15, charset)
//fmt.Printf("[DEBUG] [random password] passCuenta: [%s]\n", passCuenta)
} else { // Password manual
passCuenta = ""
repitePassCuenta := ""
for len(passCuenta) == 0 || len(repitePassCuenta) == 0 || passCuenta != repitePassCuenta {
limpiarPantallaWindows()
fmt.Println("+------------------------+")
fmt.Println("| Modificar una cuenta |")
fmt.Println("+------------------------+")
fmt.Printf(mensajeModificarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Password: ")
passCuenta = leePassword()
fmt.Print("Repite el password: ")
repitePassCuenta = leePassword()
if len(passCuenta) == 0 || len(repitePassCuenta) == 0 {
mensajeModificarCuenta = "[ERROR] Debes introducir un password\n"
} else if passCuenta != repitePassCuenta {
mensajeModificarCuenta = "[ERROR] Los password deben coincidir\n"
} else {
mensajeModificarCuenta = ""
}
}
}
// Ciframos con AES
datosJSON := jsonNewPass{Usuario: usuario, Cuenta: nombreCuenta, Password: encode64(encrypt([]byte(passCuenta), passAES))}
// Enviamos al servidor los datos
decoder := send("modify", datosJSON)
decoder.Decode(&jis)
// Comprobamos la respuesta del servidor
if jis.Valido == true {
mensajeAdministracion = "[INFO] La cuenta se ha modificado correctamente!\n"
} else {
mensajeAdministracion = "[INFO] " + jis.Mensaje + "\n"
}
return mensajeAdministracion
}
/*
Funcion que eliminara una cuenta
Parametros entrada:
usuario : string -> Nombre del usuario que esta logueado
Devuelve
mensajeAdministracion : string -> Mensaje de INFO/ERROR del menu de administracion
*/
func eliminarCuenta(usuario string) string {
// Mensajes de INFO/ERROR
var mensajeEliminarCuenta string
var mensajeAdministracion string
// Nombre de la cuenta que se quiere eliminar
var nombreCuenta = ""
// Flag de confirmacion para eliminar la cuenta
var opEliminar string
// Struct de datos que nos devolvera el servidor
var jis jsonIdentificacionServidor
var jr jsonResultado
scanner := bufio.NewScanner(os.Stdin)
// Mostramos el menu del usuario
for len(nombreCuenta) == 0 {
limpiarPantallaWindows()
fmt.Println("+-----------------------+")
fmt.Println("| Eliminar una cuenta |")
fmt.Println("+-----------------------+")
fmt.Printf(mensajeEliminarCuenta)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Print("Introduce el nombre de la cuenta que deseas eliminar: ")
scanner.Scan()
nombreCuenta = scanner.Text()
if len(nombreCuenta) == 0 {
mensajeEliminarCuenta = "[ERROR] Introduce un nombre de cuenta para eliminarla\n"
} else {
// Enviamos al servidor los datos para realizar la busqueda de cuenta
datosJSON := jsonBuscar{Usuario: usuario, Cuenta: nombreCuenta}
// Enviamos al servidor los datos
decoder := send("buscar", datosJSON)
decoder.Decode(&jr)
if jr.Encontrado == false {
mensajeEliminarCuenta = "[INFO] La cuenta que se quiere eliminar no existe\n"
nombreCuenta = ""
} else {
mensajeEliminarCuenta = ""
fmt.Println("Estas seguro de querer eliminar la cuenta [" + nombreCuenta + "]? ")
fmt.Println("[1] Si")
fmt.Println("[2] No")
fmt.Printf("Opcion: ")
scanner.Scan()
opEliminar = scanner.Text()
if opEliminar == "1" {
// Enviamos al servidor los datos para realizar el borrado de la cuenta
datosJSON := jsonBuscar{Usuario: usuario, Cuenta: nombreCuenta}
// Enviamos al servidor los datos
decoder := send("delete", datosJSON)
decoder.Decode(&jis)
if jis.Valido == true {
mensajeAdministracion = "[INFO] La cuenta se ha eliminado correctamente!\n"
} else {
mensajeAdministracion = "[INFO] " + jis.Mensaje + "\n"
}
}
}
}
}
return mensajeAdministracion
}
/*
Funcion que mostrara el menu de administracion del usuario
Parametros entrada:
usuario : string -> Nombre del usuario que esta logueado
passAES : []byte -> Clave AES para cifrar y descifrar
Devuelve:
opMenuPrincipal : string -> Operacion elegida en el menu principal
*/
func menuUsuario(usuario string, passAES []byte) string {
var opMenuUsuario = "0"
var opMenuPrincipal string
// Mensajes de INFO/ERROR
var mensajeAdministracion string
scanner := bufio.NewScanner(os.Stdin)
for opMenuUsuario != "5" {
if opMenuUsuario == "0" {
limpiarPantallaWindows()
// Mostramos el menu del usuario
fmt.Println("+---------------------------+")
fmt.Println("| Panel de administracion |")
fmt.Println("+---------------------------+")
fmt.Printf(mensajeAdministracion)
fmt.Printf("Estas logueado como: [%s] \n", usuario)
fmt.Println("[1] Buscar una cuenta")
fmt.Println("[2] Añadir una nueva cuenta")
fmt.Println("[3] Modificar una cuenta")
fmt.Println("[4] Eliminar una cuenta")
fmt.Println("[5] Salir")
fmt.Print("Elige una opcion: ")
scanner.Scan()
opMenuUsuario = scanner.Text()
mensajeAdministracion = ""
if len(opMenuUsuario) != 1 {
opMenuUsuario = "0"
}
}
if opMenuUsuario != "5" {
limpiarPantallaWindows()
switch opMenuUsuario {
case "1": // BUSCAR CUENTA
buscarCuenta(usuario, passAES)
case "2": // AÑADIR CUENTA
mensajeAdministracion = añadirCuenta(usuario, passAES)
case "3": // MODIFICAR CUENTA
mensajeAdministracion = modificarCuenta(usuario, passAES)
case "4": // ELIMINAR CUENTA
mensajeAdministracion = eliminarCuenta(usuario)
}
// Colocamos el flag a 0 para que vuelva a mostrar el menu del usuario
opMenuUsuario = "0"
}
}
// Colocamos el flag a 0 para volver a mostrar el menu principal
opMenuPrincipal = "0"
return opMenuPrincipal
}
/*
Funcion que mostrara el menu para que el usuario se pueda loguear
Parametros entrada:
mensajeLogin : string -> Mensaje de INFO/ERROR del menu de login
Devuelve:
opMenuPrincipal : string -> Opcion elegida del menu principal
mensajeLogin : string -> Mensaje de INFO/ERROR del menu de login
*/
func menuLoginUsuario(mensajeLogin string) (string, string) {
// Variables para el login de usuario
var usuario string
var passLogin string
var claveCorreo string
// Operacion del menu principal
var opMenuPrincipal string
// Clave para cifrar y descrifrar AES en el cliente
var passAES []byte
// JSON que recibiremos del servidor
var jis jsonIdentificacionServidor
scanner := bufio.NewScanner(os.Stdin)
// Limpiamos la terminal y mostramos el menu
limpiarPantallaWindows()
fmt.Println("+----------------------------------------------+")
fmt.Println("| Introduce tus datos de usuario para entrar |")
fmt.Println("+----------------------------------------------+")
fmt.Printf(mensajeLogin)
fmt.Print("Email: ")
scanner.Scan()
usuario = scanner.Text()
fmt.Print("Password: ")
passLogin = leePassword()
// Comprobamos que el usuario ha introducido los datos correctamente
if len(usuario) > 0 && len(passLogin) > 0 {
// Resumimos el password con SHA3
passLoginSHA3 := hashSha512(passLogin)
// Partimos el SHA3 generado
passSHA3 := passLoginSHA3[0:32]
passAES = passLoginSHA3[32:64]
// Convertimos a JSON los datos que le enviaremos al servidor
datosJSON := jsonIdentificacion{Usuario: usuario, Password: encode64(passSHA3)}
// Enviamos al servidor los datos
decoder := send("login", datosJSON)
decoder.Decode(&jis)
// Comprobamos que el usuario y password son correctos
if jis.Valido == true {
// Solicitamos al usuario la clave que se le ha enviado por correo
fmt.Print("\nIntroduce la clave: ")
scanner.Scan()
claveCorreo = scanner.Text()
// Convertimos a JSON los datos que le enviaremos al servidor
datosJSON := jsonCodigoIdentificacion{Codigo: claveCorreo, Usuario: usuario}
// Enviamos al servidor los datos
decoder := send("confirmarlogin", datosJSON)
decoder.Decode(&jis)
// Comprobamos que la clave introducida es correcta
if jis.Valido == true {
//fmt.Printf("[DEBUG] Token valido")
mensajeLogin = ""
// Mostramos el menu de usuario
opMenuPrincipal = menuUsuario(usuario, passAES)
} else {
//fmt.Printf("[DEBUG] Token invalido")
mensajeLogin = "[ERROR] " + jis.Mensaje + "\n"
// Mostramos el login otra vez
opMenuPrincipal = "1"
}
} else {
mensajeLogin = "[ERROR] " + jis.Mensaje + "\n"
// Mostramos el login otra vez
opMenuPrincipal = "1"
}
} else {
mensajeLogin = "[ERROR] El nombre de usuario y/o password no pueden quedar vacios\n"
// Mostramos el login otra vez
opMenuPrincipal = "1"
}
return opMenuPrincipal, mensajeLogin
}
// Funcion principal
func main() {
// Opcion elegida del menu principal
var opMenuPrincipal = "0"
// Mensajes de INFO/ERROR en los menus
var mensajeMenuPrincipal string
var mensajeLogin string
var mensajeMenuRegistro string
scanner := bufio.NewScanner(os.Stdin)
// Se mostrara el menu hasta que el usuario elija la opcion 'Salir'
for opMenuPrincipal != "3" {
limpiarPantallaWindows()
if opMenuPrincipal == "0" {
fmt.Println("+-----------------------------------------+")
fmt.Println("| Bienvenido a tu Gestor de Contraseñas! |")
fmt.Println("+-----------------------------------------+")
fmt.Printf(mensajeMenuPrincipal)
fmt.Println("[1] Entrar")
fmt.Println("[2] Registrate")
fmt.Println("[3] Salir")
fmt.Print("Elige una opcion: ")
scanner.Scan()
opMenuPrincipal = scanner.Text()
if len(opMenuPrincipal) != 1 {
opMenuPrincipal = "0"
}
}
if opMenuPrincipal != "3" {
mensajeMenuPrincipal = ""
switch opMenuPrincipal {
case "1": // LOGIN DE USUARIO
opMenuPrincipal, mensajeLogin = menuLoginUsuario(mensajeLogin)
//fmt.Printf("[DEBUG] opMenuPrincipal: [%s] mensajeMenuRegistro: [%s]\n", opMenuPrincipal, mensajeMenuRegistro)
case "2": // REGISTRO DE USUARIO
opMenuPrincipal, mensajeMenuPrincipal, mensajeMenuRegistro = menuRegistroUsuario(mensajeMenuRegistro)
//fmt.Printf("[DEBUG] opMenuPrincipal: [%s] mensajeMenuPrincipal: [%s] mensajeMenuRegistro: [%s]\n", opMenuPrincipal, mensajeMenuPrincipal, mensajeMenuRegistro)
}
}
}
}
|
package models
import (
"errors"
"github.com/jinzhu/gorm"
"github.com/lib/pq"
)
type YChaseModel struct {
WalletAddress string `gorm:"primary_key" json:"walletAddress"`
Assets pq.StringArray `gorm:"not null;type:varchar(64)[]" json:"assets"`
}
func (u *YChaseModel) Create(db *gorm.DB) (*YChaseModel, error) {
err := db.Debug().Create(&u).Error
if err != nil {
return &YChaseModel{}, err
}
return u, nil
}
func (u *YChaseModel) Read(db *gorm.DB, WalletAddress string) (*YChaseModel, error) {
err := db.Debug().Model(YChaseModel{}).Where("wallet_address = ?", WalletAddress).Take(&u).Error
if err != nil {
return &YChaseModel{}, err
}
if gorm.IsRecordNotFoundError(err) {
return &YChaseModel{}, errors.New("YChaseModel Not Found")
}
return u, err
}
func (u *YChaseModel) Update(db *gorm.DB, WalletAddress string) (*YChaseModel, error) {
db = db.Debug().Model(&YChaseModel{}).Where("wallet_address = ?", WalletAddress).Take(&YChaseModel{}).UpdateColumns(
map[string]interface{}{
"assets": u.Assets,
},
)
if db.Error != nil {
return &YChaseModel{}, db.Error
}
return u, nil
}
func (u *YChaseModel) Delete(db *gorm.DB, WalletAddress string) (int64, error) {
db = db.Debug().Model(&YChaseModel{}).Where("wallet_address = ?", WalletAddress).Take(&YChaseModel{}).Delete(&YChaseModel{})
if db.Error != nil {
return 0, db.Error
}
return db.RowsAffected, nil
}
|
package symbol
import "fmt"
// Symbol is for creating symbols that represent
// values when evaluating the AST
type Symbol interface {
GetType() string
GetValue() string
}
// Integer symbol
type Integer struct {
Value int64
}
// GetType returns the INTEGER symbol type
func (integer *Integer) GetType() string {
return "INTEGER"
}
// GetValue returns a string representation of the
// value of an integer
func (integer *Integer) GetValue() string {
return fmt.Sprintf("%d", integer.Value)
}
// Dummy symbol for when a function has no value to return
type Dummy struct {
Value string
}
// GetType returns the "" Dummy symbol type
func (dummy *Dummy) GetType() string {
return ""
}
// GetValue returns an empty string
func (dummy *Dummy) GetValue() string {
return fmt.Sprintf(dummy.Value)
}
// SymbolTable for storing variables at evaluation - uses
// a map of key value pairs that can be accessed and updated
type SymbolTable struct {
Table map[string]Symbol
}
// CreateSymbolTable creates a new instance of SymbolTable
func CreateSymbolTable() *SymbolTable {
table := make(map[string]Symbol)
return &SymbolTable{Table: table}
}
// Get will get a value from a SymbolTable instance
func (symbolTable *SymbolTable) Get(identifier string) (Symbol, bool) {
table, ok := symbolTable.Table[identifier]
return table, ok
}
// Set will set a value in a SymbolTable instance
func (symbolTable *SymbolTable) Set(identifier string, value Symbol) Symbol {
symbolTable.Table[identifier] = value
return value
}
// Error symbol stores errors that occur in evaluation
type Error struct {
Message string
}
// GetType returns the ERROR type
func (err *Error) GetType() string {
return "ERROR"
}
// GetValue returns the error message
func (err *Error) GetValue() string {
return "ERROR: " + err.Message
}
|
package main
import (
"fmt"
"os"
"time"
)
func count() {
abord := make(chan struct{})
go func() {
key := make([]byte, 1)
os.Stdin.Read(key)
abord <- struct{}{}
}()
// tick := time.Tick(time.Second * 1)
tick := time.NewTicker(time.Second * 1)
defer tick.Stop()
for num := 10; num > 0; num-- {
fmt.Println(num)
select {
case <-tick.C:
case <-abord:
fmt.Println("Abord")
return
}
}
}
|
package delta
import (
"bytes"
"fmt"
"github.com/boltdb/bolt"
)
var bucket = []byte("delta")
func SetupDB() (*bolt.DB, error) {
DB, err := bolt.Open("/var/lib/bolt.db", 0644, nil)
if err != nil {
return nil, fmt.Errorf("Failed to create DB File: %s", err)
}
DB.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket(bucket)
if err != nil {
return fmt.Errorf("create bucket: %s", err)
}
return nil
})
return DB, nil
}
func Put(db *bolt.DB, key []byte, value []byte) error {
err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
err := b.Put(key, value)
return err
})
if err != nil {
return err
}
return nil
}
func Get(db *bolt.DB, key []byte) []byte {
var val []byte
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
val = b.Get(key)
return nil
})
return val
}
func GetLimit(db *bolt.DB, min []byte, max []byte) {
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(bucket).Cursor()
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
fmt.Printf("%s: %s\n", k, v)
}
return nil
})
}
|
package gsm
import (
"fmt"
"os"
"path"
)
var (
Version string
Rev string
progName string
)
func init() {
progName = path.Base(os.Args[0])
if Version == "" {
Version = "<unknown>"
}
if Rev == "" {
Rev = "<unknown>"
}
}
func ProgVersion() string {
return fmt.Sprintf("%s %s", progName, Version)
}
|
package main
func (ll *LinkedList) reverse() {
var prev, curr, next *Node
curr = ll.head
for curr != nil {
// get the next item and put it in next
next = curr.next
// actually reverse the current node
curr.next = prev
// move prev forward
prev = curr
// we can still move curr "forward" bcs we already have it in next
curr = next
}
// curr is now nil so prev is at tail
// and tail is the new head
ll.head = prev
}
func main() {
var list LinkedList
list.prepend(Node{data: 1})
list.prepend(Node{data: 2})
list.prepend(Node{data: 3})
list.prepend(Node{data: 4})
list.prepend(Node{data: 5})
list.prepend(Node{data: 6})
list.prepend(Node{data: 7})
list.prepend(Node{data: 8})
list.prepend(Node{data: 9})
list.prepend(Node{data: 10})
list.print()
list.reverse()
list.print()
} |
package jsonp
// Copyright (C) Philip Schlump, 2013-2015.
// License in ./LICENSE file - MIT.
// Version: 1.0.0
import (
"fmt"
"net/http"
"net/url"
)
//
// Example of Use
//
// In a handler you build some JSON then call JsonP on the return value.
//
// func handleVersion(res http.ResponseWriter, req *http.Request) {
// res.Header().Set("Content-Type", "application/json")
// io.WriteString(res, jsonp.JsonP(fmt.Sprintf(`{"status":"success", "version":"1.0.0"}`+"\n"), res, req))
// }
//
var JSON_Prefix string = ""
// Set a prefix that will be pre-pended to every return JSON string.
func SetJsonPrefix(p string) {
JSON_Prefix = p
}
// Take a string 's' in JSON and if a get parameter "callback" is specified then format this for JSONP callback.
// If it is not a JSONp call (no "callback" parameter) then add JSON_Prefix to the beginning.
func JsonP(s string, res http.ResponseWriter, req *http.Request) string {
u, err := url.ParseRequestURI(req.RequestURI)
if err != nil {
return JSON_Prefix + s
}
m, err := url.ParseQuery(u.RawQuery)
if err != nil {
return JSON_Prefix + s
}
callback := m.Get("callback")
if callback != "" {
res.Header().Set("Content-Type", "application/javascript")
return fmt.Sprintf("%s(%s);", callback, s)
} else {
return JSON_Prefix + s
}
}
// If "callback" is not "" then convert the JSON string 's' to a JSONp callback.
// If it is not a JSONp call (no "callback" parameter) then add JSON_Prefix to the beginning.
func JsonP_Param(s string, res http.ResponseWriter, callback string) string {
if callback != "" {
res.Header().Set("Content-Type", "application/javascript")
return fmt.Sprintf("%s(%s);", callback, s)
} else {
return JSON_Prefix + s
}
}
// For non-JSONP callable - just prepend the prefix and return.
func PrependPrefix(s string) string {
return JSON_Prefix + s
}
|
package rock
const (
DefaultClientAddr = "/"
DefaultServerAddr = ":80"
POST = "9b466094ec991a03cb95c489c19c4d75635f0ae5"
GET = "783923e57ba5e8f1044632c31fd806ee24814bb5"
V = "▼"
Terror byte = iota
Tbool
Tstring
Tint
Tint8
Tint16
Tint32
Tint64
Tuint
Tuint8
Tuint16
Tuint32
Tuint64
Tuintptr
Tbyte
Tbytes
Trune
Tfloat32
Tfloat64
Tcomplex64
Tcomplex128
)
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotedialer
import (
"io"
"sync"
"time"
"github.com/gorilla/websocket"
)
type wsConn struct {
sync.Mutex
conn *websocket.Conn
}
func newWSConn(conn *websocket.Conn) *wsConn {
w := &wsConn{
conn: conn,
}
w.setupDeadline()
return w
}
func (w *wsConn) WriteMessage(messageType int, data []byte) error {
w.Lock()
defer w.Unlock()
w.conn.SetWriteDeadline(time.Now().Add(PingWaitDuration))
return w.conn.WriteMessage(messageType, data)
}
func (w *wsConn) NextReader() (int, io.Reader, error) {
return w.conn.NextReader()
}
func (w *wsConn) setupDeadline() {
w.conn.SetReadDeadline(time.Now().Add(PingWaitDuration))
w.conn.SetPingHandler(func(string) error {
w.Lock()
w.conn.WriteControl(websocket.PongMessage, []byte(""), time.Now().Add(time.Second))
w.Unlock()
return w.conn.SetReadDeadline(time.Now().Add(PingWaitDuration))
})
w.conn.SetPongHandler(func(string) error {
return w.conn.SetReadDeadline(time.Now().Add(PingWaitDuration))
})
}
|
package main
//Invalid
//Checks if the cases are on the same type as that of the switch expression
func f () {
switch a:=0; a {
case 0 : { }
case 'x': { }
default :
}
} |
package config
import (
"github.com/benbjohnson/clock"
"github.com/golang/glog"
"github.com/prebid/prebid-server/analytics"
"github.com/prebid/prebid-server/analytics/clients"
"github.com/prebid/prebid-server/analytics/filesystem"
"github.com/prebid/prebid-server/analytics/pubstack"
"github.com/prebid/prebid-server/config"
)
// Modules that need to be logged to need to be initialized here
func NewPBSAnalytics(analytics *config.Analytics) analytics.PBSAnalyticsModule {
modules := make(enabledAnalytics, 0)
if len(analytics.File.Filename) > 0 {
if mod, err := filesystem.NewFileLogger(analytics.File.Filename); err == nil {
modules = append(modules, mod)
} else {
glog.Fatalf("Could not initialize FileLogger for file %v :%v", analytics.File.Filename, err)
}
}
if analytics.Pubstack.Enabled {
pubstackModule, err := pubstack.NewModule(
clients.GetDefaultHttpInstance(),
analytics.Pubstack.ScopeId,
analytics.Pubstack.IntakeUrl,
analytics.Pubstack.ConfRefresh,
analytics.Pubstack.Buffers.EventCount,
analytics.Pubstack.Buffers.BufferSize,
analytics.Pubstack.Buffers.Timeout,
clock.New())
if err == nil {
modules = append(modules, pubstackModule)
} else {
glog.Errorf("Could not initialize PubstackModule: %v", err)
}
}
return modules
}
// Collection of all the correctly configured analytics modules - implements the PBSAnalyticsModule interface
type enabledAnalytics []analytics.PBSAnalyticsModule
func (ea enabledAnalytics) LogAuctionObject(ao *analytics.AuctionObject) {
for _, module := range ea {
module.LogAuctionObject(ao)
}
}
func (ea enabledAnalytics) LogVideoObject(vo *analytics.VideoObject) {
for _, module := range ea {
module.LogVideoObject(vo)
}
}
func (ea enabledAnalytics) LogCookieSyncObject(cso *analytics.CookieSyncObject) {
for _, module := range ea {
module.LogCookieSyncObject(cso)
}
}
func (ea enabledAnalytics) LogSetUIDObject(so *analytics.SetUIDObject) {
for _, module := range ea {
module.LogSetUIDObject(so)
}
}
func (ea enabledAnalytics) LogAmpObject(ao *analytics.AmpObject) {
for _, module := range ea {
module.LogAmpObject(ao)
}
}
func (ea enabledAnalytics) LogNotificationEventObject(ne *analytics.NotificationEvent) {
for _, module := range ea {
module.LogNotificationEventObject(ne)
}
}
|
package main
import (
"database/sql"
"fmt"
"net/http"
"encoding/json"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
var (
err error
db *sql.DB
contador int
contactos []Contacto
estados []Estado
)
type Contacto struct {
IdContacto int `json:"idContacto"`
NombreCompleto string `json:"nombreCompleto"`
Email string `json:"email"`
TelMovil string `json:"telMovil"`
Mensaje string `json:"mensaje"`
ID_Estado *Estado
}
type Estado struct {
ID_Estado int32 `json:"idEstado"`
Nombre string `json:"nombreEstado"`
}
func main() {
contactos = append(contactos, Contacto{IdContacto: 1, NombreCompleto: "Itzi Cabrera Sanchez", Email: "itzi@gmail.com", TelMovil: "5512402704", Mensaje: "Hols!", ID_Estado: &Estado{ID_Estado: 1, Nombre: "Mexico"}})
estados = append(estados, Estado{ID_Estado: 1, Nombre: "Mexico"})
estados = append(estados, Estado{ID_Estado: 2, Nombre: "Aguascalientes"})
estados = append(estados, Estado{ID_Estado: 3, Nombre: "Ciudad de México"})
estados = append(estados, Estado{ID_Estado: 4, Nombre: "Cancún"})
estados = append(estados, Estado{ID_Estado: 5, Nombre: "Baja California Sur"})
estados = append(estados, Estado{ID_Estado: 6, Nombre: "Baja California"})
estados = append(estados, Estado{ID_Estado: 7, Nombre: "Campeche"})
estados = append(estados, Estado{ID_Estado: 8, Nombre: "Veracruz"})
router := mux.NewRouter()
router.HandleFunc("/", LandingPage).Methods("GET")
router.HandleFunc("/contactos", GetContactosEndPoint).Methods("GET")
router.HandleFunc("/contacto", PostContactosEndPoint).Methods("POST")
handlerCORS := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowCredentials: true,
}).Handler(router)
http.ListenAndServe("localhost:3306", handlerCORS)
}
func separador() {
fmt.Println("_______________________________________")
}
func LandingPage(w http.ResponseWriter, r *http.Request) {
fmt.Println("Request")
fmt.Fprintf(w, "Hola! La peticion ha sido recibida..")
contador += 1
fmt.Println("Vistas: ", contador)
json.NewEncoder(w).Encode(contador)
}
func GetContactosEndPoint(w http.ResponseWriter, r *http.Request) {
fmt.Println("Peticion Contactos")
json.NewEncoder(w).Encode(contactos)
}
func PostContactosEndPoint(w http.ResponseWriter, r *http.Request) {
fmt.Println("Peticion Post")
var nuevo Contacto
json.NewDecoder(r.Body).Decode(&nuevo)
contactos = append(contactos, nuevo)
json.NewEncoder(w).Encode(contactos)
separador()
fmt.Println("Total de Registros: ", len(contactos))
fmt.Println("Estados:", estados)
separador()
}
|
package config
import (
"github.com/isaacRevan24/gamification-toolkit-logic/controller"
"github.com/isaacRevan24/gamification-toolkit-logic/handler"
"github.com/isaacRevan24/gamification-toolkit-logic/repository"
"github.com/isaacRevan24/gamification-toolkit-logic/utility"
)
// Run required configs at start of the application.
func RunConfigs() {
loggingSetup()
}
// Initialize logging config for all clases that needed.
func loggingSetup() {
logs := utility.NewLogging()
handler.Logs = logs
controller.Logs = logs
repository.Logs = logs
logs.LogInfo("Global logs setup done.")
}
|
package tests
import (
"reflect"
"testing"
)
/**
* [637] Average of Levels in Binary Tree
*
* Given a non-empty binary tree, return the average value of the nodes on each level in the form of an array.
*
* Example 1:
*
* Input:
* 3
* / \
* 9 20
* / \
* 15 7
* Output: [3, 14.5, 11]
* Explanation:
* The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].
*
*
*
* Note:
*
* The range of node's value is in the range of 32-bit signed integer.
*
*
*/
func TestAverageofLevelsinBinaryTree(t *testing.T) {
x := &TreeNode{Val: 3, Left: &TreeNode{Val: 9},
Right: &TreeNode{Val: 20, Left: &TreeNode{Val: 15}, Right: &TreeNode{Val: 7}}}
var cases = []struct {
input *TreeNode
output []float64
}{
{
input: x,
output: []float64{3, 14.5, 11},
},
}
for _, c := range cases {
x := averageOfLevels(c.input)
if !reflect.DeepEqual(x, c.output) {
t.Fail()
}
}
}
// submission codes start here
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func averageOfLevels(root *TreeNode) []float64 {
if root == nil {
return nil
}
res := []float64{}
cur := []*TreeNode{root}
for len(cur) > 0 {
levelSum := 0
next := []*TreeNode{}
var i float64
for _, n := range cur {
levelSum += n.Val
i++
if n.Left != nil {
next = append(next, n.Left)
}
if n.Right != nil {
next = append(next, n.Right)
}
}
cur = next
res = append(res, float64(levelSum)/i)
}
return res
}
// submission codes end
|
package data_test
import (
"context"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/raymonstah/ardanlabs-go-service/internal/data"
"github.com/raymonstah/ardanlabs-go-service/internal/platform/auth"
"github.com/raymonstah/ardanlabs-go-service/internal/tests"
)
func TestUserCreate(t *testing.T) {
db, teardown := tests.NewUnit(t)
t.Cleanup(teardown)
ctx := context.Background()
now := time.Now()
user := data.NewUser{
Email: "rho@launchdarkly.com",
Name: "Raymond Ho",
}
t.Log("creating user...")
userCreated, err := data.Users.Create(ctx, db, user, now)
if err != nil {
t.Fatal(err)
}
t.Log("retreiving user...")
// claims is information about the person making the request.
claims := auth.NewClaims(
"718ffbea-f4a1-4667-8ae3-b349da52675e", // This is just some random UUID.
[]string{auth.RoleAdmin, auth.RoleUser},
now, time.Hour,
)
gotUser, err := data.Users.Retrieve(ctx, claims, db, userCreated.ID)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(userCreated, gotUser); diff != "" {
t.Fatal("retreived user not same as created user")
}
}
|
package cpu
import (
"testing"
"github.com/funsun/peridot/common"
)
type testBus struct {
code map[uint16]uint8
}
func NewTestBus(code map[uint16]uint8) *testBus {
t := &testBus{}
t.code = code
return t
}
func (t *testBus) Write(addr uint16, val uint8) {
t.code[addr] = val
}
func (t *testBus) Read(addr uint16) uint8 {
v, ok := t.code[addr]
if !ok {
t.code[addr] = 0
return 0
}
return v
}
func prepareCPU(codes []uint8) (*CPU, *testBus) {
common.Echo = true
c := new(CPU).Init()
c.test = true
codeMap := map[uint16]uint8{}
for i, code := range codes {
codeMap[0xe000+uint16(i)] = code
}
codeMap[0xe000+uint16(len(codes))] = 0x02
codeMap[0xfffc] = 0x00
codeMap[0xfffd] = 0xe0
tb := NewTestBus(codeMap)
c.SetBus(tb)
return c, tb
}
func runCPU(c *CPU) {
c.Start()
<-c.stopped
}
const (
LDA_IMME = 0xa9
LDA_ZP = 0xa5
LDA_ZP_X = 0xb5
LDA_ABS = 0xad
LDA_ABS_X = 0xbd
LDA_ABS_Y = 0xb9
LDA_IND_X = 0xa1
LDA_IND_Y = 0xb1
LSR_AC = 0x4a
SEC = 0x38
ROR_AC = 0x6a
)
func TestCPU(t *testing.T) {
c, _ := prepareCPU([]uint8{
LDA_IMME,
0x03,
})
c.Start()
<-c.stopped
if c.a != 0x03 {
t.Fail()
}
}
func TestLSR_AC(t *testing.T) {
c, _ := prepareCPU([]uint8{LDA_IMME, 0x03, LSR_AC})
runCPU(c)
if !c.C || c.a != 0x01 {
t.Fail()
}
}
func TestROR_AC(t *testing.T) {
c, _ := prepareCPU([]uint8{SEC, LDA_IMME, 0x03, ROR_AC})
runCPU(c)
if !c.C || c.a != 0x81 {
t.Fail()
}
c, _ = prepareCPU([]uint8{SEC, LDA_IMME, 0x02, ROR_AC})
runCPU(c)
if c.C || c.a != 0x81 {
t.Log(c.C, c.a)
t.Fail()
}
}
|
package main
import (
"github.com/abbot/go-http-auth"
"golang.org/x/crypto/bcrypt"
"net/http"
)
func Secret(user, realm string) string {
if user == "${username}" {
hashedPassword, err := bcrypt.GenerateFromPassword([]byte("${password}"), bcrypt.DefaultCost)
if err == nil {
return string(hashedPassword)
}
}
return ""
}
func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
http.FileServer(http.Dir("${path}")).ServeHTTP(w, &r.Request)
}
func main() {
authenticator := auth.NewBasicAuthenticator("", Secret)
http.HandleFunc("/", authenticator.Wrap(handle))
http.ListenAndServe(":8000", nil)
}
|
package main
import (
"fmt"
)
func main() {
my_map := map[string][]int{
"GK": {1, 99},
"CB": {3, 4, 6, 32, 51},
}
my_map["ST"] = []int{9, 11, 21, 99}
fmt.Println(my_map)
delete(my_map, "GK")
fmt.Println(my_map)
}
|
package repository
import (
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// SyslogType 系统日志类型
type SyslogType string
const (
// SyslogTypeError 错误日志类型
SyslogTypeError SyslogType = "ERROR"
// SyslogTypeAction 行为动作类型
SyslogTypeAction SyslogType = "ACTION"
// SyslogTypeSystem 系统类型
SyslogTypeSystem SyslogType = "SYSTEM"
)
// Syslog 系统日志存储
type Syslog struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"id"`
Type SyslogType `bson:"type" json:"type"`
Context map[string]interface{} `bson:"context" json:"context"`
Body string `bson:"body" json:"body"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
}
// SyslogRepo 系统日志仓库
type SyslogRepo interface {
Add(al Syslog) (id primitive.ObjectID, err error)
Get(id primitive.ObjectID) (al Syslog, err error)
Paginate(filter bson.M, offset, limit int64) (logs []Syslog, next int64, err error)
Delete(filter bson.M) error
DeleteID(id primitive.ObjectID) error
}
|
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
)
func helpMessage(originalProgramName string) string {
programName := filepath.Base(originalProgramName)
return fmt.Sprintf(`%s [OPTIONS] [FILEs...]
OPTIONS
-n, --number 行番号を表示する.
-b, --number-nonblank 行番号を表示する.ただし空白行には付けない.
-s, --squeeze-blank 連続した空行を1行にする.
-t, --table 2つ目以降のファイルでは先頭行を無視する.
-h, --help このメッセージを出力します.
ARGUMENTS
FILEs... 中身の確認または結合を行うファイル.`, programName)
}
func perform(opts *options) int {
if len(opts.args) < 1 {
scanner := bufio.NewScanner(os.Stdin)
scanOut(opts, scanner, 0)
} else {
fileLoop(opts)
}
return 0
}
func goMain(args []string) int {
opts, err := parseArgs(args)
if err != nil {
fmt.Printf("parsing args fail: %s\n", err.Error())
fmt.Println(helpMessage(filepath.Base(args[0])))
return 1
}
if opts.help {
fmt.Println(helpMessage(filepath.Base(args[0])))
return 0
}
return perform(opts)
}
func main() {
status := goMain(os.Args)
os.Exit(status)
}
|
package pond
import (
"runtime"
)
var maxProcs = runtime.GOMAXPROCS(0)
// Preset pool resizing strategies
var (
// Eager maximizes responsiveness at the expense of higher resource usage,
// which can reduce throughput under certain conditions.
// This strategy is meant for worker pools that will operate at a small percentage of their capacity
// most of the time and may occasionally receive bursts of tasks. It's the default strategy.
Eager = func() ResizingStrategy { return RatedResizer(1) }
// Balanced tries to find a balance between responsiveness and throughput.
// It's suitable for general purpose worker pools or those
// that will operate close to 50% of their capacity most of the time.
Balanced = func() ResizingStrategy { return RatedResizer(maxProcs / 2) }
// Lazy maximizes throughput at the expense of responsiveness.
// This strategy is meant for worker pools that will operate close to their max. capacity most of the time.
Lazy = func() ResizingStrategy { return RatedResizer(maxProcs) }
)
// ratedResizer implements a rated resizing strategy
type ratedResizer struct {
rate uint64
hits uint64
}
// RatedResizer creates a resizing strategy which can be configured
// to create workers at a specific rate when the pool has no idle workers.
// rate: determines the number of tasks to receive before creating an extra worker.
// A value of 3 can be interpreted as: "Create a new worker every 3 tasks".
func RatedResizer(rate int) ResizingStrategy {
if rate < 1 {
rate = 1
}
return &ratedResizer{
rate: uint64(rate),
}
}
func (r *ratedResizer) Resize(runningWorkers, minWorkers, maxWorkers int) bool {
if r.rate == 1 || runningWorkers == 0 {
return true
}
r.hits++
return r.hits%r.rate == 1
}
|
package math
import (
"AlgorithmPractice/src/DataStructure/list"
"testing"
)
/**
* @author liujun
* @version V1.0
* @date 2022/7/9 13:54
* @author-Email ljfirst@mail.ustc.edu.cn
* @description
*/
func Test_addMethod(t *testing.T) {
params1 := &list.Node{
Value: 9,
Next: &list.Node{
Value: 9,
Next: &list.Node{
Value: 3,
Next: &list.Node{
Value: 1,
Next: nil,
},
},
},
}
//1-4 -4->9 -1
params2 := &list.Node{
Value: 4,
Next: &list.Node{
Value: 5,
Next: &list.Node{
Value: 6,
Next: nil,
},
},
}
params3 := addMethod(params1, params2)
params3.Print()
}
|
package store
import (
"bytes"
"fmt"
"io/ioutil"
"os"
)
func ExampleLevelDbStore_readWrite() {
dbPath, err := ioutil.TempDir("", "transformer-leveldb-test")
if err != nil {
panic(err)
}
store := NewLevelDbStore(dbPath, LevelDbReadWrite)
if err := store.BeginWriting(); err != nil {
panic(err)
}
writeRecord := func(record *Record) {
if err := store.WriteRecord(record); err != nil {
panic(err)
}
}
writeRecord(NewRecord("a", "x", 0))
writeRecord(NewRecord("c", "z", 0))
writeRecord(NewRecord("b", "y", 0))
if err := store.EndWriting(); err != nil {
panic(err)
}
if err := store.BeginReading(); err != nil {
panic(err)
}
for {
record, err := store.ReadRecord()
if err != nil {
panic(err)
}
if record == nil {
break
}
fmt.Printf("%s: %s\n", record.Key, record.Value)
}
if err := store.EndReading(); err != nil {
panic(err)
}
if err := os.RemoveAll(dbPath); err != nil {
panic(err)
}
// Output:
// a: x
// b: y
// c: z
}
func ExampleLevelDbStore_seek() {
dbPath, err := ioutil.TempDir("", "transformer-leveldb-test")
if err != nil {
panic(err)
}
store := NewLevelDbStore(dbPath, LevelDbReadWrite)
if err := store.BeginWriting(); err != nil {
panic(err)
}
writeRecord := func(record *Record) {
if err := store.WriteRecord(record); err != nil {
panic(err)
}
}
writeRecord(NewRecord("a", "x", 0))
writeRecord(NewRecord("b", "y", 0))
writeRecord(NewRecord("c", "z", 0))
writeRecord(NewRecord("d", "x", 0))
writeRecord(NewRecord("f", "y", 0))
if err := store.EndWriting(); err != nil {
panic(err)
}
if err := store.BeginReading(); err != nil {
panic(err)
}
for {
record, err := store.ReadRecord()
if err != nil {
panic(err)
}
if record == nil {
break
}
fmt.Printf("%s: %s\n", record.Key, record.Value)
if bytes.Equal(record.Key, []byte("a")) {
store.Seek([]byte("c"))
} else if bytes.Equal(record.Key, []byte("c")) {
store.Seek([]byte("e"))
}
}
if err := store.EndReading(); err != nil {
panic(err)
}
if err := os.RemoveAll(dbPath); err != nil {
panic(err)
}
// Output:
// a: x
// c: z
// f: y
}
func ExampleLevelDbStore_deleteAll() {
dbPath, err := ioutil.TempDir("", "transformer-leveldb-test")
if err != nil {
panic(err)
}
store := NewLevelDbStore(dbPath, LevelDbReadWrite)
if err := store.BeginWriting(); err != nil {
panic(err)
}
writeRecord := func(record *Record) {
if err := store.WriteRecord(record); err != nil {
panic(err)
}
}
writeRecord(NewRecord("a", "x", 0))
writeRecord(NewRecord("c", "z", 0))
writeRecord(NewRecord("b", "y", 0))
if err := store.EndWriting(); err != nil {
panic(err)
}
if err := store.BeginWriting(); err != nil {
panic(err)
}
if err := store.DeleteAllRecords(); err != nil {
panic(err)
}
if err := store.EndWriting(); err != nil {
panic(err)
}
if err := store.BeginReading(); err != nil {
panic(err)
}
for {
record, err := store.ReadRecord()
if err != nil {
panic(err)
}
if record == nil {
break
}
fmt.Printf("%s: %s\n", record.Key, record.Value)
}
fmt.Printf("End of records\n")
if err := store.EndReading(); err != nil {
panic(err)
}
if err := os.RemoveAll(dbPath); err != nil {
panic(err)
}
// Output:
// End of records
}
|
package quips
import (
G "github.com/ionous/sashimi/game"
)
type followsCb func(leads G.IObject, directly bool) bool
// evaluate all quips which constrain this clip
// ex. for QuipHelp(x).DirectlyFollows(y), then visit(x) will call cb(y, true)
func visitFollowConstraints(g G.Play, follower G.IObject, cb followsCb) (okay bool) {
// search all following quips
for quips := g.Query("following quips", true); quips.HasNext(); {
quip := quips.Next()
// yes, this entry talks about our position relative to some other quip
if following := quip.Get("following").Object(); following.Exists() && following.Equals(follower) {
// grab that other quip
if leading := quip.Get("leading").Object(); leading.Exists() {
// call the visitor
directly := quip.Is("directly following")
if ok := cb(leading, directly); ok {
okay = true
break
}
}
}
}
return
}
// QuipHelp provides object oriented functions for evaluating quip relations
type QuipHelp struct {
quip G.IObject
}
// Quip returns QuipHelp
func Quip(quip G.IObject) QuipHelp {
return QuipHelp{quip}
}
// Follows provides information about the order of quips in a conversation.
func (q QuipHelp) Follows(leader G.IObject) DirectInfo {
return DirectInfo{q.quip, leader}
}
// Directly returns true if the quip should only be displayed after Follows
func (info DirectInfo) Directly(g G.Play) bool {
return visitFollowConstraints(g, info.follower, func(leading G.IObject, directly bool) bool {
return directly && info.leader.Equals(leading)
})
}
// Recently provides information about the order of quips in a conversation.
func (q QuipHelp) Recently(history QuipHistory) RecentInfo {
return RecentInfo{q.quip, history}
}
// Follows ranks the Quip against all recent history.
// Returns -1 if the quip follows no other quip;
// Returns 0 if the quip follows something, but not one of the recent quips;
// Otherwise, the higher the number, the more recent the quip that it follows.
func (info RecentInfo) Follows(g G.Play) (ret int, direct bool) {
isAFollower := false
visitFollowConstraints(g, info.quip, func(leading G.IObject, directly bool) bool {
// find the most recent (highest rank) quip.
// we only want to consider directly following quips if we are indeed directly following them.
if rank := info.qh.Rank(leading); (rank > ret) && (!directly || rank == QuipHistoryDepth) {
ret, direct = rank, directly
}
isAFollower = true
return false // searches all
})
if !isAFollower {
ret = -1
}
return ret, direct
}
// DirectInfo provides object oriented functions for evaluating quip relations
type DirectInfo struct {
follower, leader G.IObject
}
// RecentInfo provides object oriented functions for evaluating quip relations
type RecentInfo struct {
quip G.IObject
qh QuipHistory
}
|
package cap
import (
"context"
"fmt"
"strings"
"time"
"github.com/capatazlib/go-capataz/internal/c"
)
// nodeSepToken is the token use to separate sub-trees and child node names in
// the supervision tree
const nodeSepToken = "/"
////////////////////////////////////////////////////////////////////////////////
func handleChildNodeError(
eventNotifier EventNotifier,
supRuntimeName string,
supChildren map[string]c.Child,
supNotifyCh chan c.ChildNotification,
prevCh c.Child,
prevChErr error,
) *c.ErrorToleranceReached {
chSpec := prevCh.GetSpec()
eventNotifier.processFailed(chSpec.GetTag(), prevCh.GetRuntimeName(), prevChErr)
switch chSpec.GetRestart() {
case c.Permanent, c.Transient:
// On error scenarios, Permanent and Transient try as much as possible
// to restart the failing child
return oneForOneRestartLoop(
eventNotifier,
supRuntimeName,
supChildren,
supNotifyCh,
false, /* was complete */
prevCh,
)
default: /* Temporary */
// Temporary children can complete or fail, supervisor will not restart them
delete(supChildren, chSpec.GetName())
return nil
}
}
func handleChildNodeCompletion(
eventNotifier EventNotifier,
supRuntimeName string,
supChildren map[string]c.Child,
supNotifyCh chan c.ChildNotification,
prevCh c.Child,
) *c.ErrorToleranceReached {
if prevCh.IsWorker() {
eventNotifier.workerCompleted(prevCh.GetRuntimeName())
}
chSpec := prevCh.GetSpec()
switch chSpec.GetRestart() {
case c.Transient, c.Temporary:
delete(supChildren, chSpec.GetName())
// Do nothing
return nil
default: /* Permanent */
// On child completion, the supervisor still restart the child when the
// c.Restart is Permanent
return oneForOneRestartLoop(
eventNotifier,
supRuntimeName,
supChildren,
supNotifyCh,
true, /* was complete */
prevCh,
)
}
}
func handleChildNodeNotification(
eventNotifier EventNotifier,
supRuntimeName string,
supChildren map[string]c.Child,
supNotifyCh chan c.ChildNotification,
prevCh c.Child,
chNotification c.ChildNotification,
) *c.ErrorToleranceReached {
chErr := chNotification.Unwrap()
if chErr != nil {
// if the notification contains an error, we send a notification
// saying that the process failed
return handleChildNodeError(
eventNotifier,
supRuntimeName,
supChildren,
supNotifyCh,
prevCh,
chErr,
)
}
return handleChildNodeCompletion(
eventNotifier,
supRuntimeName,
supChildren,
supNotifyCh,
prevCh,
)
}
////////////////////////////////////////////////////////////////////////////////
// startChildNode is responsible of starting a single child. This function will
// deal with the child lifecycle notification. It will return an error if
// something goes wrong with the initialization of this child.
func startChildNode(
spec SupervisorSpec,
supRuntimeName string,
notifyCh chan c.ChildNotification,
chSpec c.ChildSpec,
) (c.Child, error) {
eventNotifier := spec.getEventNotifier()
startedTime := time.Now()
ch, chStartErr := chSpec.DoStart(supRuntimeName, notifyCh)
// NOTE: The error handling code bellow gets executed when the children
// fails at start time
if chStartErr != nil {
cRuntimeName := strings.Join(
[]string{supRuntimeName, chSpec.GetName()},
nodeSepToken,
)
eventNotifier.processStartFailed(chSpec.GetTag(), cRuntimeName, chStartErr)
return c.Child{}, chStartErr
}
// NOTE: we only notify when child is a worker because sub-trees supervisors
// are responsible of their own notification
if chSpec.IsWorker() {
eventNotifier.workerStarted(ch.GetRuntimeName(), startedTime)
}
return ch, nil
}
// startChildNodes iterates over all the children (specified with `cap.WithNodes`
// and `cap.WithSubtree`) starting a goroutine for each. The children iteration
// will be sorted as specified with the `cap.WithStartOrder` option. In case any child
// fails to start, the supervisor start operation will be aborted and all the
// started children so far will be stopped in the reverse order.
func startChildNodes(
spec SupervisorSpec,
supChildrenSpecs []c.ChildSpec,
supRuntimeName string,
notifyCh chan c.ChildNotification,
) (map[string]c.Child, error) {
children := make(map[string]c.Child)
// Start children in the correct order
for _, chSpec := range spec.order.sortStart(supChildrenSpecs) {
// the function above will modify the children internally
ch, chStartErr := startChildNode(
spec,
supRuntimeName,
notifyCh,
chSpec,
)
if chStartErr != nil {
nodeErrMap := terminateChildNodes(spec, supChildrenSpecs, children)
// Is important we stop the children before we finish the supervisor
return nil, &SupervisorError{
supRuntimeName: supRuntimeName,
nodeErr: chStartErr,
nodeErrMap: nodeErrMap,
}
}
children[chSpec.GetName()] = ch
}
return children, nil
}
// terminateChildNode executes the Terminate procedure on the given child, in case
// there is an error on termination it notifies the event system
func terminateChildNode(
eventNotifier EventNotifier,
ch c.Child,
) error {
chSpec := ch.GetSpec()
stoppingTime := time.Now()
terminationErr := ch.Terminate()
if terminationErr != nil {
// we also notify that the process failed
eventNotifier.processFailed(chSpec.GetTag(), ch.GetRuntimeName(), terminationErr)
return terminationErr
}
// we need to notify that the process stopped
eventNotifier.processTerminated(chSpec.GetTag(), ch.GetRuntimeName(), stoppingTime)
return nil
}
// terminateChildNodes is used on the shutdown of the supervisor tree, it stops
// children in the desired order.
func terminateChildNodes(
spec SupervisorSpec,
supChildrenSpecs0 []c.ChildSpec,
supChildren map[string]c.Child,
) map[string]error {
eventNotifier := spec.eventNotifier
supChildrenSpecs := spec.order.sortTermination(supChildrenSpecs0)
supNodeErrMap := make(map[string]error)
for _, chSpec := range supChildrenSpecs {
ch, ok := supChildren[chSpec.GetName()]
// There are scenarios where is ok to ignore supChildren not having the
// entry:
//
// * On start, there may be a failure mid-way in the initialization and on
// the rollback we iterate over children spec that are not present in the
// runtime children map
//
// * On stop, there may be a Transient child that completed, or a Temporary child
// that completed or failed.
if ok {
terminationErr := terminateChildNode(eventNotifier, ch)
if terminationErr != nil {
// if a child fails to stop (either because of a legit failure or a
// timeout), we store the terminationError so that we can report all of them
// later
supNodeErrMap[chSpec.GetName()] = terminationErr
}
}
}
return supNodeErrMap
}
// terminateSupervisor stops all children an signal any errors to the
// given onTerminate callback
func terminateSupervisor(
supSpec SupervisorSpec,
supChildrenSpecs []c.ChildSpec,
supRuntimeName string,
supRscCleanup CleanupResourcesFn,
supChildren map[string]c.Child,
onTerminate func(error),
restartErr *c.ErrorToleranceReached,
) error {
var terminateErr *SupervisorError
supNodeErrMap := terminateChildNodes(supSpec, supChildrenSpecs, supChildren)
supRscCleanupErr := supRscCleanup()
// If any of the children fails to stop, we should report that as an
// error
if len(supNodeErrMap) > 0 || supRscCleanupErr != nil {
// On async strategy, we notify that the spawner terminated with an
// error
terminateErr = &SupervisorError{
supRuntimeName: supRuntimeName,
nodeErrMap: supNodeErrMap,
rscCleanupErr: supRscCleanupErr,
}
}
// If we have a terminateErr or a restartErr, we should report that back to the
// parent
if restartErr != nil && terminateErr != nil {
supErr := &SupervisorRestartError{
supRuntimeName: supRuntimeName,
terminateErr: terminateErr,
nodeErr: restartErr,
}
onTerminate(supErr)
return supErr
}
// If we have a restartErr only, report the restart error only
if restartErr != nil {
supErr := &SupervisorRestartError{
supRuntimeName: supRuntimeName,
nodeErr: restartErr,
}
onTerminate(supErr)
return supErr
}
// If we have a terminateErr only, report the termination error only
if terminateErr != nil {
onTerminate(terminateErr)
return terminateErr
}
onTerminate(nil)
return nil
}
////////////////////////////////////////////////////////////////////////////////
// runMonitorLoop does the initialization of supervisor's children and then runs
// an infinite loop that monitors each child error.
//
// This function is used for both async and sync strategies, given this, we
// receive an onStart and onTerminate callbacks that behave differently
// depending on which strategy is used:
//
// 1) When called with the async strategy, these callbacks will interact with
// gochans that communicate with the spawner goroutine.
//
// 2) When called with the sync strategy, these callbacks will return the given
// error, note this implementation returns the result of the callback calls
//
func runMonitorLoop(
ctx context.Context,
supSpec SupervisorSpec,
supChildrenSpecs []c.ChildSpec,
supRuntimeName string,
supRscCleanup CleanupResourcesFn,
supNotifyCh chan c.ChildNotification,
ctrlCh chan ctrlMsg,
supStartTime time.Time,
onStart c.NotifyStartFn,
onTerminate notifyTerminationFn,
) error {
// Start children
supChildren, restartErr := startChildNodes(
supSpec,
supChildrenSpecs,
supRuntimeName,
supNotifyCh,
)
if restartErr != nil {
// in case we run in the async strategy we notify the spawner that we
// started with an error
onStart(restartErr)
return restartErr
}
// Supervisors are responsible of notifying their start events, this is
// important because only the supervisor goroutine nows the exact time it gets
// started (we would get race-conditions if we notify from the parent
// otherwise).
eventNotifier := supSpec.getEventNotifier()
eventNotifier.supervisorStarted(supRuntimeName, supStartTime)
/// Once children have been spawned, we notify to the caller thread that the
// main loop has started without errors.
onStart(nil)
// Supervisor Loop
for {
select {
// parent context is done
case <-ctx.Done():
return terminateSupervisor(
supSpec,
supChildrenSpecs,
supRuntimeName,
supRscCleanup,
supChildren,
onTerminate,
nil, /* restart error */
)
case chNotification := <-supNotifyCh:
prevCh, ok := supChildren[chNotification.GetName()]
if !ok {
// TODO: Expand on this case, I think this is highly unlikely, but would
// like to exercise this branch in test somehow (if possible)
panic(
fmt.Errorf(
"something horribly wrong happened here (name: %s, tag: %s)",
prevCh.GetRuntimeName(),
prevCh.GetTag(),
),
)
}
restartErr := handleChildNodeNotification(
eventNotifier,
supRuntimeName,
supChildren,
supNotifyCh,
prevCh,
chNotification,
)
if restartErr != nil {
return terminateSupervisor(
supSpec,
supChildrenSpecs,
supRuntimeName,
supRscCleanup,
supChildren,
onTerminate,
restartErr,
)
}
case msg := <-ctrlCh:
supChildrenSpecs, supChildren = handleCtrlMsg(
eventNotifier,
supSpec,
supChildrenSpecs,
supRuntimeName,
supChildren,
supNotifyCh,
msg,
)
}
}
}
|
//Package rjshared contains structures used both in broker and workers
package rjshared
//Glvn struct
type Glvn struct {
//Key field
Key string
//Value field
Value string
}
|
package uaparser
import "net/http"
func (parser *UAParser) ParseFromHTTPRequest(r *http.Request) *UA {
str := r.Header.Get("User-Agent")
return parser.Parse(str)
}
|
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"os"
"github.com/kiasaki/yelp-dataset-api/data"
"labix.org/v2/mgo"
)
// Readln returns a single line (without the ending \n)
// from the input buffered reader.
// An error is returned iff there is an error with the
// buffered reader.
func Readln(r *bufio.Reader) (string, error) {
var (
isPrefix bool = true
err error = nil
line, ln []byte
)
for isPrefix && err == nil {
line, isPrefix, err = r.ReadLine()
ln = append(ln, line...)
}
return string(ln), err
}
var fileLocation = flag.String("file", "", "Location on Yelp json dump to import")
var importType = flag.String("type", "user", "Import type, options are: user, business, review")
var dbUrl = flag.String("mongo-url", "mongodb://localhost:27017/yelp-dataset-api", "MongoDB url")
func dialMongo(url string) *mgo.Session {
dbSession, err := mgo.Dial(url)
if err != nil {
panic(err)
}
data.Index(dbSession.DB(""))
return dbSession
}
func acquireFileHandle(location string) *os.File {
if location == "" {
fmt.Println("File to import location is required")
os.Exit(1)
} else {
fmt.Println("Importing: " + location)
}
fileHandle, err := os.Open(location)
if err != nil {
fmt.Printf("Error opening file: %v\n", err)
os.Exit(1)
}
return fileHandle
}
func clearTypeTable(dbSession *mgo.Session, importType string) {
var collection string
if importType == "user" {
collection = "users"
} else if importType == "business" {
collection = "businesses"
} else if importType == "review" {
collection = "reviews"
} else {
fmt.Println("Import type didn't match user, business or review")
os.Exit(1)
}
// Empty db
if _, err := dbSession.DB("").C(collection).RemoveAll(nil); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func handleFatalError(err error) {
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func main() {
flag.Parse()
dbSession := dialMongo(*dbUrl)
fileHandle := acquireFileHandle(*fileLocation)
clearTypeTable(dbSession, *importType)
i := 0
reader := bufio.NewReader(fileHandle)
fan := make(chan bool)
errChannel := make(chan error)
lineRequestChan := make(chan bool)
lineFeedChan := make(chan string)
go func() {
for {
select {
case <-lineRequestChan:
if line, err := Readln(reader); err != nil {
errChannel <- err
break
} else {
lineFeedChan <- line
}
}
}
}()
for count := 0; count < 30; count++ {
go func() {
workerSession := dbSession.Copy()
for {
var err error
lineRequestChan <- true
line := <-lineFeedChan
var model data.Model
if *importType == "user" {
parsed := data.YelpUser{}
err = json.Unmarshal([]byte(line), &parsed)
model = parsed
} else if *importType == "business" {
parsed := data.YelpBusiness{}
err = json.Unmarshal([]byte(line), &parsed)
parsed.Loc = []float32{parsed.Longitude, parsed.Latitude}
model = parsed
} else if *importType == "review" {
parsed := data.YelpReview{}
err = json.Unmarshal([]byte(line), &parsed)
model = parsed
}
if err != nil {
errChannel <- err
break
}
if err = data.Save(workerSession.DB(""), model); err != nil {
errChannel <- err
break
}
fan <- true
}
}()
}
for {
select {
case <-fan:
i++
if i%1000 == 0 {
fmt.Printf("Processed %d\n", i)
}
case err := <-errChannel:
if err.Error() == "EOF" {
fmt.Println("\nDone!")
os.Exit(0)
break
} else {
panic(err)
break
}
}
}
}
|
package terminal
import (
"bytes"
)
// Bar presents progress bar
type Bar struct {
// Fill is the default character representing completed progress
Fill byte
// Head is the default character that moves when progress is updated
Head byte
// Empty is the default character that represents the empty progress
Empty byte
// LeftEnd is the default character in the left most part of the progress indicator
LeftEnd byte
// RightEnd is the default character in the right most part of the progress indicator
RightEnd byte
// Width is the default width of the progress bar
Width int
}
// NewBar creates new progress bar renderer
func NewBar() *Bar {
return &Bar{
Fill: '=',
Head: '>',
Empty: '-',
LeftEnd: '[',
RightEnd: ']',
}
}
// Render returns the byte presentation of the progress bar
func (b *Bar) Render(width int, current, total int64) []byte {
if total == 0 {
return []byte{}
}
completedWidth := int(float64(width) * (float64(current) / float64(total)))
// add fill and empty bits
var buf bytes.Buffer
for i := 0; i < completedWidth; i++ {
buf.WriteByte(b.Fill)
}
for i := 0; i < width-completedWidth; i++ {
buf.WriteByte(b.Empty)
}
// set head bit
pb := buf.Bytes()
if completedWidth > 0 && completedWidth < width {
pb[completedWidth-1] = b.Head
}
// set left and right ends bits
pb[0], pb[len(pb)-1] = b.LeftEnd, b.RightEnd
return pb
}
|
package functional
func Filter[T any](arr []T, predicate func(val T) bool) []T {
ret := make([]T, 0)
for _, val := range arr {
if predicate(val) {
ret = append(ret, val)
}
}
return ret
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build analytics
package analytics
import "time"
const (
batchBuffer = 2000
batchDelay = time.Second
)
type batcher struct {
endpoint endpoint
input chan Payload
sync chan func()
encoder encoder
}
func newBatcher(p endpoint, e encoder) sender {
out := &batcher{
endpoint: p,
input: make(chan Payload, batchBuffer),
sync: make(chan func()),
encoder: e,
}
go out.run()
return out
}
func (b *batcher) send(p Payload) {
b.input <- p
}
func (b *batcher) flush() {
done := make(chan struct{})
b.sync <- func() { close(done) }
<-done
}
func (b *batcher) run() {
size, payloads := 0, make([]string, 0, maxHitsPerBatch)
flush := func() {
if len(payloads) > 0 {
if err := b.endpoint(payloads); err != nil {
OnError(err)
}
}
size, payloads = 0, payloads[:0]
}
for {
select {
case p := <-b.input:
buf, err := b.encoder(p)
if err != nil {
OnError(err)
continue
}
bufSizeWithNL := buf.Len() + 1
if bufSizeWithNL > maxBatchSize {
flush()
}
size += bufSizeWithNL
payloads = append(payloads, buf.String())
if len(payloads) == maxHitsPerBatch {
flush()
}
case done := <-b.sync:
flush()
done()
case <-time.After(batchDelay):
flush()
}
}
}
|
package ga
import (
"fmt"
"testing"
)
func TestDefaultOutputFunc(t *testing.T) {
t.Parallel()
var genA = NewGeneticAlgorithm()
genA.Output("Test")
t.Log("Didn't panic. Assume success. No other way to test.")
}
func TestSetOutputFunc(t *testing.T) {
t.Parallel()
var genA GeneticAlgorithm
var gotOutput string
output := func(s string) {
gotOutput = s
}
genA.SetOutputFunc(func(a ...interface{}) {
output(fmt.Sprint(a))
})
genA.Output("output string")
expectedOutput := "[output string]"
if expectedOutput != gotOutput {
t.Error("Output func not set. Expected:", expectedOutput, "Got:", gotOutput)
} else {
t.Log("Output func set correctly. Expected:", expectedOutput, "Got:", gotOutput)
}
}
|
package game
import (
"time"
log "github.com/sirupsen/logrus"
"github.com/talesmud/talesmud/pkg/entities"
"github.com/talesmud/talesmud/pkg/mudserver/game/messages"
)
func (game *Game) handleDefaultMessage(message *messages.Message) {
user := ""
if message.FromUser != nil {
user = message.FromUser.Nickname
if message.Character != nil {
user = message.Character.Name
}
}
out := messages.NewRoomBasedMessage(user, message.Data)
if message.Character != nil {
out.AudienceID = message.Character.CurrentRoomID
}
game.SendMessage() <- out
}
func (game *Game) handleUserQuit(user *entities.User) {
log.Info("Handle User Quit " + user.Nickname)
// set user offline
user.IsOnline = false
game.Facade.UsersService().Update(user.RefID, user)
character, _ := game.Facade.CharactersService().FindByID(user.LastCharacter)
room, _ := game.Facade.RoomsService().FindByID(character.CurrentRoomID)
//TOOD: move update to queue
room.RemoveCharacter(character.ID)
game.Facade.RoomsService().Update(room.ID, room)
game.SendMessage() <- messages.CharacterLeftRoom{
MessageResponse: messages.MessageResponse{
Audience: messages.MessageAudienceRoomWithoutOrigin,
OriginID: character.ID,
AudienceID: character.CurrentRoomID,
Message: character.Name + " left.",
},
}
}
// Find the matching character for the user where the message originated
func (game *Game) attachCharacterToMessage(msg *messages.Message) {
if msg.Character != nil {
return
}
// could be a processed message that got the user removed
if msg.FromUser == nil || msg.FromUser.LastCharacter == "" {
return
}
if character, err := game.Facade.CharactersService().FindByID(msg.FromUser.LastCharacter); err == nil {
msg.Character = character
} else {
log.Error("Couldt not load character for user")
}
}
func (game *Game) handleUserJoined(user *entities.User) {
// get active character for user
if user.LastCharacter == "" {
if chars, err := game.Facade.CharactersService().FindAllForUser(user.ID); err == nil {
// take first character for now
// TODO: let the player choose?
if len(chars) > 0 {
user.LastCharacter = chars[0].ID
user.LastSeen = time.Now()
user.IsOnline = true
//TODO: send updates via message queue?
game.Facade.UsersService().Update(user.RefID, user)
}
} else {
// player has no character yet, respnd with createCharacter Message
game.SendMessage() <- messages.NewCreateCharacterMessage(user.ID)
return
}
}
if character, err := game.Facade.CharactersService().FindByID(user.LastCharacter); err != nil {
log.WithField("user", user.Name).Error("Could not select character for user")
// player character may be broken, let the user create a new one
//game.SendMessage(messages.NewCreateCharacterMessage(user.ID))
// send list characters command
game.onMessageReceived <- messages.NewMessage(user, "lc")
} else {
// send message as userwould do it
selectCharacterMsg := messages.NewMessage(user, "selectcharacter "+character.Name)
game.OnMessageReceived() <- selectCharacterMsg
}
}
|
package parsehtml
import (
"strconv"
"fmt"
"sync"
)
func MainProcess() {
//获取所有uids
allUids := getAllUids()
urlPref := "https://www.jianshu.com/u/"
//获取所有用户信息
var userInfos []UserInfo
var channel = make(chan *UserInfo, 50)
for _, v := range allUids {
go func() {
for _, innerV := range *v {
userInfo, err := GetUserInfo(urlPref + innerV)
if err != nil {
continue
}
userInfo.UserId = innerV
channel <- userInfo
}
}()
}
for _, v := range allUids {
for range *v {
userInfos = append(userInfos, *(<-channel))
}
}
fmt.Println(len(userInfos))
fmt.Println(userInfos)
}
func getAllUids() []*([]string) {
var allUIds []*([]string)
ch := make(chan *([]string), 100)
path := "https://www.jianshu.com/recommendations/users?page="
for i := 1; i <= 100; i++ {
url := path + strconv.Itoa(i)
//开100个协程,并行获取用户id
go getUserIdSync(url, ch)
}
for i := 1; i <= 100; i++ {
allUIds = append(allUIds, <-ch)
}
return allUIds;
}
func getUserIdSync(url string, ch chan *([]string)) {
uids := GetUsersId(url)
ch <- &uids
}
//获取文章详情
func getUserArtiDetail(userinfo UserInfo) map[string]string {
total := userinfo.ArticalNum
//分页
var pageSize int = 9
var totalPage int
if total%pageSize == 0 {
totalPage = total / pageSize
} else {
totalPage = (total / pageSize) + 1
}
var wg sync.WaitGroup
var resultSet [] map[string]string
for i := 1; i <= totalPage; i++ {
url := userinfo.UserUrl + "?order_by=shared_at&page=" + strconv.Itoa(i)
wg.Add(1)
go func() {
resultSet = append(resultSet, ParseAbs(url))
defer wg.Done()
}()
}
wg.Wait()
detailArtil :=make(map[string]string)
for _, result := range resultSet {
for k, v := range result {
rs := ParseDetail(v)
detailArtil[k] = rs
}
}
return detailArtil
}
//通过用户发表的文章,提取用户关键字,关键字为20个
func getUserKeyWord(detailArtil map[string]string) []string{
return nil
}
|
// Copyright 2020 Trey Dockendorf
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"bytes"
"context"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
var (
mmpmonTimeout = kingpin.Flag("collector.mmpmon.timeout", "Timeout for mmpmon execution").Default("5").Int()
mmpmonMap = map[string]string{
"_fs_": "FS",
"_nn_": "NodeName",
"_br_": "ReadBytes",
"_bw_": "WriteBytes",
"_rdc_": "Reads",
"_wc_": "Writes",
"_oc_": "Opens",
"_cc_": "Closes",
"_dir_": "ReadDir",
"_iu_": "InodeUpdates",
}
MmpmonExec = mmpmon
)
type PerfMetrics struct {
FS string
NodeName string
ReadBytes int64
WriteBytes int64
Reads int64
Writes int64
Opens int64
Closes int64
ReadDir int64
InodeUpdates int64
}
type MmpmonCollector struct {
read_bytes *prometheus.Desc
write_bytes *prometheus.Desc
operations *prometheus.Desc
info *prometheus.Desc
logger log.Logger
}
func init() {
registerCollector("mmpmon", true, NewMmpmonCollector)
}
func NewMmpmonCollector(logger log.Logger) Collector {
return &MmpmonCollector{
read_bytes: prometheus.NewDesc(prometheus.BuildFQName(namespace, "perf", "read_bytes_total"),
"GPFS read bytes", []string{"fs"}, nil),
write_bytes: prometheus.NewDesc(prometheus.BuildFQName(namespace, "perf", "write_bytes_total"),
"GPFS write bytes", []string{"fs"}, nil),
operations: prometheus.NewDesc(prometheus.BuildFQName(namespace, "perf", "operations_total"),
"GPFS operationgs reported by mmpmon", []string{"fs", "operation"}, nil),
info: prometheus.NewDesc(prometheus.BuildFQName(namespace, "perf", "info"),
"GPFS client information", []string{"fs", "nodename"}, nil),
logger: logger,
}
}
func (c *MmpmonCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.read_bytes
ch <- c.write_bytes
ch <- c.operations
ch <- c.info
}
func (c *MmpmonCollector) Collect(ch chan<- prometheus.Metric) {
level.Debug(c.logger).Log("msg", "Collecting mmpmon metrics")
collectTime := time.Now()
timeout := 0
errorMetric := 0
perfs, err := c.collect()
if err == context.DeadlineExceeded {
timeout = 1
level.Error(c.logger).Log("msg", "Timeout executing mmpmon")
} else if err != nil {
level.Error(c.logger).Log("msg", err)
errorMetric = 1
}
for _, perf := range perfs {
ch <- prometheus.MustNewConstMetric(c.read_bytes, prometheus.CounterValue, float64(perf.ReadBytes), perf.FS)
ch <- prometheus.MustNewConstMetric(c.write_bytes, prometheus.CounterValue, float64(perf.WriteBytes), perf.FS)
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.Reads), perf.FS, "reads")
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.Writes), perf.FS, "writes")
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.Opens), perf.FS, "opens")
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.Closes), perf.FS, "closes")
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.ReadDir), perf.FS, "read_dir")
ch <- prometheus.MustNewConstMetric(c.operations, prometheus.CounterValue, float64(perf.InodeUpdates), perf.FS, "inode_updates")
ch <- prometheus.MustNewConstMetric(c.info, prometheus.GaugeValue, 1, perf.FS, perf.NodeName)
}
ch <- prometheus.MustNewConstMetric(collectError, prometheus.GaugeValue, float64(errorMetric), "mmpmon")
ch <- prometheus.MustNewConstMetric(collecTimeout, prometheus.GaugeValue, float64(timeout), "mmpmon")
ch <- prometheus.MustNewConstMetric(collectDuration, prometheus.GaugeValue, time.Since(collectTime).Seconds(), "mmpmon")
}
func (c *MmpmonCollector) collect() ([]PerfMetrics, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*mmpmonTimeout)*time.Second)
defer cancel()
mmpmon_out, err := MmpmonExec(ctx)
if err != nil {
return nil, err
}
perfs := mmpmon_parse(mmpmon_out, c.logger)
return perfs, nil
}
func mmpmon(ctx context.Context) (string, error) {
cmd := execCommand(ctx, *sudoCmd, "/usr/lpp/mmfs/bin/mmpmon", "-s", "-p")
cmd.Stdin = strings.NewReader("fs_io_s\n")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if ctx.Err() == context.DeadlineExceeded {
return "", ctx.Err()
} else if err != nil {
return "", err
}
return out.String(), nil
}
func mmpmon_parse(out string, logger log.Logger) []PerfMetrics {
var metrics []PerfMetrics
lines := strings.Split(out, "\n")
for _, l := range lines {
if !strings.HasPrefix(l, "_") {
continue
}
var headers []string
var values []string
items := strings.Split(l, " ")
for _, i := range items[1:] {
if strings.HasPrefix(i, "_") {
headers = append(headers, i)
} else {
values = append(values, i)
}
}
var perf PerfMetrics
ps := reflect.ValueOf(&perf) // pointer to struct - addressable
s := ps.Elem() // struct
for i, h := range headers {
if field, ok := mmpmonMap[h]; ok {
f := s.FieldByName(field)
if f.Kind() == reflect.String {
f.SetString(values[i])
} else if f.Kind() == reflect.Int64 {
if val, err := strconv.ParseInt(values[i], 10, 64); err == nil {
f.SetInt(val)
} else {
level.Error(logger).Log("msg", fmt.Sprintf("Error parsing %s value %s: %s", h, values[i], err.Error()))
}
}
}
}
metrics = append(metrics, perf)
}
return metrics
}
|
package main
import "fmt"
func main() {
var name string
fmt.Println("What's your name")
//inputs, _ := fmt.Scanf("%s", &name) // input from scanf separated by space -> prints each value with the new-line character
inputs, _ := fmt.Scanf("%q", &name) // input has to be within ""
switch inputs {
case 0:
fmt.Printf("You must enter a name!\n")
case 1:
fmt.Printf("Hello %s! You input %d input value\n", name, inputs)
}
}
|
package cliutil
import (
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/logger"
)
func Action(actionFunc cli.ActionFunc) cli.ActionFunc {
return WithErrorHandler(actionFunc)
}
func ConfiguredAction(actionFunc cli.ActionFunc) cli.ActionFunc {
// Adapt actionFunc to the type signature required by ConfiguredActionWithWarnings
f := func(context *cli.Context, _ string) error {
return actionFunc(context)
}
return ConfiguredActionWithWarnings(f)
}
// Just like ConfiguredAction, but accepts a second parameter with configuration warnings.
func ConfiguredActionWithWarnings(actionFunc func(*cli.Context, string) error) cli.ActionFunc {
return WithErrorHandler(func(c *cli.Context) error {
warnings, err := setFlagsFromConfigFile(c)
if err != nil {
return err
}
return actionFunc(c, warnings)
})
}
func setFlagsFromConfigFile(c *cli.Context) (configWarnings string, err error) {
const errorExitCode = 1
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
inputSource, warnings, err := config.ReadConfigFile(c, log)
if err != nil {
if err == config.ErrNoConfigFile {
return "", nil
}
return "", cli.Exit(err, errorExitCode)
}
if err := altsrc.ApplyInputSource(c, inputSource); err != nil {
return "", cli.Exit(err, errorExitCode)
}
return warnings, nil
}
|
package burrow
import (
"time"
"github.com/gorilla/websocket"
)
const (
writeWait = 10 * time.Second
pongWait = 60 * time.Second
pingPeriod = (pongWait * 9) / 10
maxMessageSize = 4096
)
// connection is an middleman between the websocket connection and the hub.
type Connection struct {
ws *websocket.Conn
messages chan *ResponseMessage
}
var upgrader = websocket.Upgrader{
ReadBufferSize: 4096,
WriteBufferSize: 4096,
}
func (c *Connection) Close() {
close(c.messages)
c.ws.Close()
}
func (c *Connection) Write(content interface{}) {
c.Respond(SuccessMsg(content))
}
func (c *Connection) Respond(msg *ResponseMessage) {
c.messages <- msg
}
// readPump pumps messages from the websocket connection to the hub.
func (c *Connection) listen(methods map[string]Method) *CodedError {
go c.speak()
//defer c.Close()
c.ws.SetReadLimit(maxMessageSize)
c.ws.SetReadDeadline(time.Now().Add(pongWait))
c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
var req RequestMessage
if err := c.ws.ReadJSON(&req); err != nil {
cerr := cerrorf(RpcInvalidRequest, err.Error())
c.respond(cerr.ResponseMessage())
sprintf("readjson error %s", cerr)
return cerr
} else if cerr := req.Validate(); cerr != nil {
sprintf("validate error %s", cerr)
c.respond(cerr.ResponseMessage())
} else {
ctx := &RequestContext{
Request: &req,
Connection: c,
}
if method, ok := methods[ctx.Request.MethodName()]; !ok {
msg := cerrorf(RpcMethodNotFound, "The method does not exist! %s", method).ResponseMessage()
warn(c.respond(msg), "conn respond error")
} else {
if msg := method.Execute(ctx); msg != nil {
warn(c.respond(msg), "conn respond error")
}
}
}
}
}
// Format, vals will be sprintf'd
func (c *Connection) notify(format string, vals ...interface{}) error {
msg := sprintf(format, vals...)
return c.respond(SuccessMsg(msg))
}
func (c *Connection) respond(msg *ResponseMessage) error {
if payload, err := msg.Marshal(); err != nil {
return err
} else {
//info("ws.write: %s", payload)
c.write(websocket.TextMessage, payload)
}
return nil
}
// write writes a message with the given message type and payload.
func (c *Connection) write(mt int, payload []byte) error {
c.ws.SetWriteDeadline(time.Now().Add(writeWait))
return c.ws.WriteMessage(mt, payload)
}
// writePump pumps messages from the hub to the websocket connection.
func (c *Connection) speak() {
//subscribe to tileset channel...
//hub.subscribe <- c
pinger := time.NewTicker(pingPeriod)
defer func() {
pinger.Stop()
//c.Close()
}()
for {
select {
case msg := <-c.messages:
if err := c.respond(msg); err != nil {
warn(err, "speaker")
return
}
case <-pinger.C:
if err := c.write(websocket.PingMessage, []byte{}); err != nil {
warn(err, "pinger")
return
}
}
}
}
func NewConnection(ws *websocket.Conn) *Connection {
return &Connection{
ws: ws,
messages: make(chan *ResponseMessage),
}
}
|
package camo
import (
"encoding/json"
"expvar"
"io"
)
// MetricInt ...
type MetricInt struct {
expvar.Int
}
// MarshalJSON ...
func (i *MetricInt) MarshalJSON() ([]byte, error) {
return json.Marshal(i.Value())
}
// IOMetric ...
type IOMetric struct {
ReadBytes *MetricInt `json:"read_bytes"`
WriteBytes *MetricInt `json:"write_bytes"`
}
// NewIOMetric ...
func NewIOMetric() *IOMetric {
return &IOMetric{
ReadBytes: new(MetricInt),
WriteBytes: new(MetricInt),
}
}
// TunnelMetrics ...
type TunnelMetrics struct {
*IOMetric
Streams *MetricInt `json:"streams"`
Lags *MetricInt `json:"lags"`
Drops *MetricInt `json:"drops"`
}
// NewTunnelMetrics ...
func NewTunnelMetrics() *TunnelMetrics {
return &TunnelMetrics{
IOMetric: NewIOMetric(),
Streams: new(MetricInt),
Lags: new(MetricInt),
Drops: new(MetricInt),
}
}
// BufferPoolMetrics ...
type BufferPoolMetrics struct {
FreeBytes *MetricInt `json:"free_bytes"`
TotalBytes *MetricInt `json:"total_bytes"`
}
// NewBufferPoolMetrics ...
func NewBufferPoolMetrics() *BufferPoolMetrics {
return &BufferPoolMetrics{
FreeBytes: new(MetricInt),
TotalBytes: new(MetricInt),
}
}
// Metrics ...
type Metrics struct {
Iface *IOMetric `json:"iface"`
Tunnels *TunnelMetrics `json:"tunnels"`
Buffer *BufferPoolMetrics `json:"buffer"`
}
// NewMetrics ...
func NewMetrics() *Metrics {
return &Metrics{
Iface: NewIOMetric(),
Tunnels: NewTunnelMetrics(),
Buffer: NewBufferPoolMetrics(),
}
}
// String returns a valid json string
func (m *Metrics) String() string {
b, _ := json.Marshal(m)
return string(b)
}
type ioMetricWrapper struct {
rw io.ReadWriteCloser
metric *IOMetric
}
func (m *ioMetricWrapper) Read(b []byte) (int, error) {
n, err := m.rw.Read(b)
m.metric.ReadBytes.Add(int64(n))
return n, err
}
func (m *ioMetricWrapper) Write(b []byte) (int, error) {
n, err := m.rw.Write(b)
m.metric.WriteBytes.Add(int64(n))
return n, err
}
func (m *ioMetricWrapper) Close() error {
return m.rw.Close()
}
// WithIOMetric ...
func WithIOMetric(rw io.ReadWriteCloser, metric *IOMetric) io.ReadWriteCloser {
return &ioMetricWrapper{
rw: rw,
metric: metric,
}
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package qcloud
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/log"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/util/timeutils"
"yunion.io/x/pkg/utils"
billingapi "yunion.io/x/onecloud/pkg/apis/billing"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SlaveInstanceInfo struct {
Region string
Vip string
VpcId int
Vport int
Zone string
}
type SlaveInfo struct {
First SlaveInstanceInfo
Second SlaveInstanceInfo
}
type SDrInfo struct {
Status int
Zone string
InstanceId string
Region string
SyncStatus string
InstanceName string
InstanceType string
}
type SMasterInfo struct {
Region string
RegionId int
ZoneId int
Zone string
InstanceId string
ResourceId string
Status int
InstanceName string
InstanceType int
TaskStatus int
Memory int
Volume int
DeviceType string
Qps int
VpcId int
SubnetId int
ExClusterId string
ExClusterName string
}
type SRoGroup struct {
RoGroupMode string
RoGroupId string
RoGroupName string
RoOfflineDelay int
RoMaxDelayTime int
MinRoInGroup int
WeightMode string
Weight int
// RoInstances
Vip string
Vport int
UniqVpcId string
UniqSubnetId string
RoGroupRegion string
RoGroupZone string
}
type SRoVipInfo struct {
RoVipStatus int
RoSubnetId int
RoVpcId int
RoVport int
RoVip string
}
type SMySQLInstance struct {
region *SRegion
multicloud.SDBInstanceBase
multicloud.QcloudTags
AutoRenew int
CdbError int
Cpu int
CreateTime time.Time
DeadlineTime string
DeployGroupId string
DeployMode int
DeviceClass string
DeviceType string
DrInfo []SDrInfo
EngineVersion string
ExClusterId string
HourFeeStatus int
InitFlag int
InstanceId string
InstanceName string
InstanceType int
IsolateTime string
MasterInfo SMasterInfo
Memory int
OfflineTime string
PayType int
PhysicalId string
ProjectId int
ProtectMode string
Qps int
Region string
RegionId string
ResourceId string
RoGroups []SRoGroup
RoVipInfo SRoVipInfo
SecurityGroupIds []string
SlaveInfo SlaveInfo
Status int
SubnetId int
//TagList": null,
TaskStatus int
UniqSubnetId string
UniqVpcId string
Vip string
Volume int
VpcId int
Vport int
WanDomain string
WanPort int
WanStatus int
Zone string
ZoneId int
ZoneName string
}
func (self *SMySQLInstance) GetId() string {
return self.InstanceId
}
func (self *SMySQLInstance) GetGlobalId() string {
return self.InstanceId
}
func (self *SMySQLInstance) GetName() string {
if len(self.InstanceName) > 0 {
return self.InstanceName
}
return self.InstanceId
}
func (self *SMySQLInstance) GetDiskSizeGB() int {
return self.Volume
}
func (self *SMySQLInstance) GetEngine() string {
return api.DBINSTANCE_TYPE_MYSQL
}
func (self *SMySQLInstance) GetEngineVersion() string {
return self.EngineVersion
}
func (self *SMySQLInstance) GetIVpcId() string {
return self.UniqVpcId
}
func (self *SMySQLInstance) Refresh() error {
rds, err := self.region.GetMySQLInstanceById(self.InstanceId)
if err != nil {
return errors.Wrapf(err, "GetMySQLInstanceById(%s)", self.InstanceId)
}
return jsonutils.Update(self, rds)
}
func (self *SMySQLInstance) GetInstanceType() string {
return fmt.Sprintf("%d核%dMB", self.Cpu, self.Memory)
}
func (self *SMySQLInstance) GetMaintainTime() string {
timeWindow, err := self.region.DescribeMySQLTimeWindow(self.InstanceId)
if err != nil {
log.Errorf("DescribeMySQLTimeWindow %s error: %v", self.InstanceId, err)
return ""
}
return timeWindow.String()
}
func (self *SMySQLInstance) GetDBNetworks() ([]cloudprovider.SDBInstanceNetwork, error) {
return []cloudprovider.SDBInstanceNetwork{
cloudprovider.SDBInstanceNetwork{NetworkId: self.UniqSubnetId, IP: self.Vip},
}, nil
}
func (self *SMySQLInstance) GetConnectionStr() string {
if self.WanStatus == 1 {
return fmt.Sprintf("%s:%d", self.WanDomain, self.WanPort)
}
return ""
}
func (self *SMySQLInstance) GetInternalConnectionStr() string {
return fmt.Sprintf("%s:%d", self.Vip, self.Vport)
}
func (self *SMySQLInstance) Reboot() error {
return self.region.RebootMySQLInstance(self.InstanceId)
}
func (self *SMySQLInstance) ChangeConfig(ctx context.Context, opts *cloudprovider.SManagedDBInstanceChangeConfig) error {
mb := self.GetVmemSizeMB()
if len(opts.InstanceType) > 0 {
re := regexp.MustCompile(`(\d{1,4})核(\d{1,20})MB$`)
params := re.FindStringSubmatch(opts.InstanceType)
if len(params) != 3 {
return fmt.Errorf("invalid rds instance type %s", opts.InstanceType)
}
_mb, _ := strconv.Atoi(params[2])
mb = int(_mb)
}
if opts.DiskSizeGB == 0 {
opts.DiskSizeGB = self.GetDiskSizeGB()
}
return self.region.UpgradeMySQLDBInstance(self.InstanceId, mb, opts.DiskSizeGB)
}
func (self *SMySQLInstance) GetMasterInstanceId() string {
return self.MasterInfo.InstanceId
}
func (self *SMySQLInstance) GetSecurityGroupIds() ([]string, error) {
if len(self.SecurityGroupIds) > 0 {
return self.SecurityGroupIds, nil
}
if self.DeviceType == "BASIC" {
return []string{}, nil
}
secgroups, err := self.region.DescribeMySQLDBSecurityGroups(self.InstanceId)
if err != nil {
return []string{}, errors.Wrapf(err, "DescribeMySQLDBSecurityGroups")
}
ids := []string{}
for i := range secgroups {
ids = append(ids, secgroups[i].SecurityGroupId)
}
return ids, nil
}
func (self *SMySQLInstance) SetSecurityGroups(ids []string) error {
return self.region.ModifyMySQLInstanceSecurityGroups(self.InstanceId, ids)
}
func (self *SRegion) ModifyMySQLInstanceSecurityGroups(rdsId string, secIds []string) error {
params := map[string]string{
"InstanceId": rdsId,
}
for idx, id := range secIds {
params[fmt.Sprintf("SecurityGroupIds.%d", idx)] = id
}
_, err := self.cdbRequest("ModifyDBInstanceSecurityGroups", params)
return err
}
func (self *SMySQLInstance) Renew(bc billing.SBillingCycle) error {
month := bc.GetMonths()
return self.region.RenewMySQLDBInstance(self.InstanceId, month)
}
func (self *SMySQLInstance) OpenPublicConnection() error {
if self.WanStatus == 0 {
return self.region.OpenMySQLWanService(self.InstanceId)
}
return nil
}
func (self *SMySQLInstance) ClosePublicConnection() error {
if self.WanStatus == 1 {
return self.region.CloseMySQLWanService(self.InstanceId)
}
return nil
}
func (self *SMySQLInstance) GetPort() int {
return self.Vport
}
func (self *SMySQLInstance) GetStatus() string {
if self.InitFlag == 0 {
return api.DBINSTANCE_INIT
}
switch self.Status {
case 4:
return api.DBINSTANCE_ISOLATING
case 5:
return api.DBINSTANCE_ISOLATE
}
switch self.TaskStatus {
case 0:
switch self.Status {
case 0:
return api.DBINSTANCE_DEPLOYING
case 1:
return api.DBINSTANCE_RUNNING
}
case 1:
return api.DBINSTANCE_UPGRADING
case 2: //数据导入中
return api.DBINSTANCE_IMPORTING
case 3, 4: //开放关闭外网地址
return api.DBINSTANCE_DEPLOYING
case 10:
return api.DBINSTANCE_REBOOTING
case 12:
return api.DBINSTANCE_MIGRATING
default:
return api.DBINSTANCE_DEPLOYING
}
return api.DBINSTANCE_UNKNOWN
}
func (self *SMySQLInstance) GetCategory() string {
category := strings.ToLower(self.DeviceType)
if category == "universal" {
category = "ha"
}
return category
}
func (self *SMySQLInstance) GetStorageType() string {
switch self.DeviceType {
case "BASIC":
return api.QCLOUD_DBINSTANCE_STORAGE_TYPE_CLOUD_SSD
default:
return api.QCLOUD_DBINSTANCE_STORAGE_TYPE_LOCAL_SSD
}
}
func (self *SMySQLInstance) GetCreatedAt() time.Time {
// 2019-12-25 09:00:43 #非UTC时间
return self.CreateTime.Add(time.Hour * -8)
}
func (self *SMySQLInstance) GetBillingType() string {
if self.PayType == 0 {
return billingapi.BILLING_TYPE_PREPAID
}
return billingapi.BILLING_TYPE_POSTPAID
}
func (self *SMySQLInstance) SetAutoRenew(autoRenew bool) error {
return self.region.ModifyMySQLAutoRenewFlag([]string{self.InstanceId}, autoRenew)
}
func (self *SMySQLInstance) IsAutoRenew() bool {
return self.AutoRenew == 1
}
func (self *SMySQLInstance) GetExpiredAt() time.Time {
offline, _ := timeutils.ParseTimeStr(self.OfflineTime)
if !offline.IsZero() {
return offline.Add(time.Hour * -8)
}
deadline, _ := timeutils.ParseTimeStr(self.DeadlineTime)
if !deadline.IsZero() {
return deadline.Add(time.Hour * -8)
}
return time.Time{}
}
func (self *SMySQLInstance) GetVcpuCount() int {
return self.Cpu
}
func (self *SMySQLInstance) GetVmemSizeMB() int {
return self.Memory
}
func (self *SMySQLInstance) GetZone1Id() string {
return self.Zone
}
func (self *SMySQLInstance) GetZone2Id() string {
return self.SlaveInfo.First.Zone
}
func (self *SMySQLInstance) GetZone3Id() string {
return self.SlaveInfo.Second.Zone
}
func (self *SMySQLInstance) GetProjectId() string {
return fmt.Sprintf("%d", self.ProjectId)
}
func (self *SMySQLInstance) Delete() error {
err := self.region.IsolateMySQLDBInstance(self.InstanceId)
if err != nil {
return errors.Wrapf(err, "IsolateMySQLDBInstance")
}
return self.region.OfflineIsolatedMySQLInstances([]string{self.InstanceId})
}
func (self *SRegion) ListMySQLInstances(ids []string, offset, limit int) ([]SMySQLInstance, int, error) {
if limit < 1 || limit > 50 {
limit = 50
}
params := map[string]string{
"Offset": fmt.Sprintf("%d", offset),
"Limit": fmt.Sprintf("%d", limit),
}
for idx, id := range ids {
params[fmt.Sprintf("InstanceIds.%d", idx)] = id
}
resp, err := self.cdbRequest("DescribeDBInstances", params)
if err != nil {
return nil, 0, errors.Wrapf(err, "DescribeDBInstances")
}
items := []SMySQLInstance{}
err = resp.Unmarshal(&items, "Items")
if err != nil {
return nil, 0, errors.Wrapf(err, "resp.Unmarshal")
}
total, _ := resp.Float("TotalCount")
return items, int(total), nil
}
type SAsyncRequestResult struct {
Info string
Status string
}
func (self *SRegion) DescribeMySQLAsyncRequestInfo(id string) (*SAsyncRequestResult, error) {
resp, err := self.cdbRequest("DescribeAsyncRequestInfo", map[string]string{"AsyncRequestId": id})
if err != nil {
return nil, errors.Wrapf(err, "DescribeAsyncRequestInfo")
}
result := SAsyncRequestResult{}
err = resp.Unmarshal(&result)
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
return &result, nil
}
func (self *SRegion) waitAsyncAction(action string, resId, asyncRequestId string) error {
if len(asyncRequestId) == 0 {
return errors.Error("Missing AsyncRequestId")
}
return cloudprovider.Wait(time.Second*10, time.Minute*20, func() (bool, error) {
result, err := self.DescribeMySQLAsyncRequestInfo(asyncRequestId)
if err != nil {
return false, errors.Wrapf(err, action)
}
log.Debugf("task %s(%s) for mysql instance %s status: %s", action, asyncRequestId, resId, result.Status)
switch result.Status {
case "FAILED", "KILLED", "REMOVED", "PAUSED":
return true, errors.Errorf(result.Info)
case "SUCCESS":
return true, nil
default:
return false, nil
}
})
}
func (self *SRegion) RebootMySQLInstance(id string) error {
resp, err := self.cdbRequest("RestartDBInstances", map[string]string{"InstanceIds.0": id})
if err != nil {
return errors.Wrapf(err, "RestartDBInstances")
}
asyncRequestId, _ := resp.GetString("AsyncRequestId")
return self.waitAsyncAction("RestartDBInstances", id, asyncRequestId)
}
func (self *SRegion) DescribeMySQLDBInstanceInfo(id string) (*SMySQLInstance, error) {
resp, err := self.cdbRequest("DescribeDBInstanceInfo", map[string]string{"InstanceId": id})
if err != nil {
return nil, errors.Wrapf(err, "DescribeDBInstanceInfo")
}
result := SMySQLInstance{region: self}
err = resp.Unmarshal(&result)
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
return &result, nil
}
func (self *SRegion) RenewMySQLDBInstance(id string, month int) error {
params := map[string]string{
"InstanceId": id,
"TimeSpan": fmt.Sprintf("%d", month),
}
_, err := self.cdbRequest("RenewDBInstance", params)
if err != nil {
return errors.Wrapf(err, "RenewDBInstance")
}
return nil
}
func (self *SRegion) OfflineIsolatedMySQLInstances(ids []string) error {
params := map[string]string{}
for idx, id := range ids {
params[fmt.Sprintf("InstanceIds.%d", idx)] = id
}
_, err := self.cdbRequest("OfflineIsolatedInstances", params)
if err != nil {
return errors.Wrapf(err, "OfflineIsolatedInstances")
}
return nil
}
func (self *SRegion) ReleaseIsolatedMySQLDBInstances(ids []string) error {
params := map[string]string{}
for idx, id := range ids {
params[fmt.Sprintf("InstanceIds.%d", idx)] = id
}
resp, err := self.cdbRequest("ReleaseIsolatedDBInstances", params)
if err != nil {
return errors.Wrapf(err, "ReleaseIsolatedDBInstances")
}
result := []struct {
InstanceId string
Code int
Message string
}{}
err = resp.Unmarshal(&result, "Items")
if err != nil {
return errors.Wrapf(err, "resp.Unmarshal")
}
msg := []string{}
for i := range result {
if result[i].Code != 0 {
msg = append(msg, fmt.Sprintf("instance %s release isolate error: %s", result[i].InstanceId, result[i].Message))
}
}
if len(msg) > 0 {
return errors.Error(strings.Join(msg, " "))
}
return cloudprovider.Wait(time.Second, time.Minute*10, func() (bool, error) {
instances, _, err := self.ListMySQLInstances(ids, 0, len(ids))
if err != nil {
return false, errors.Wrapf(err, "ListMySQLInstances")
}
for i := range instances {
if instances[i].Status == 4 || instances[i].Status == 5 {
log.Debugf("mysql instance %s(%s) current be isolate", instances[i].InstanceName, instances[i].InstanceId)
return false, nil
}
}
return true, nil
})
}
func (self *SRegion) IsolateMySQLDBInstance(id string) error {
params := map[string]string{"InstanceId": id}
resp, err := self.cdbRequest("IsolateDBInstance", params)
if err != nil {
return errors.Wrapf(err, "IsolateDBInstance")
}
asyncRequestId, _ := resp.GetString("AsyncRequestId")
if len(asyncRequestId) > 0 {
return self.waitAsyncAction("IsolateDBInstance", id, asyncRequestId)
}
return cloudprovider.Wait(time.Second*10, time.Minute*5, func() (bool, error) {
instances, _, err := self.ListMySQLInstances([]string{id}, 0, 1)
if err != nil {
return false, errors.Wrapf(err, "ListMySQLInstances(%s)", id)
}
statusMap := map[int]string{0: "创建中", 1: "运行中", 4: "隔离中", 5: "已隔离"}
for _, rds := range instances {
status, _ := statusMap[rds.Status]
log.Debugf("instance %s(%s) status %d(%s)", rds.InstanceName, rds.InstanceId, rds.Status, status)
if rds.Status != 5 {
return false, nil
}
}
return true, nil
})
}
func (self *SRegion) CloseMySQLWanService(id string) error {
params := map[string]string{"InstanceId": id}
resp, err := self.cdbRequest("CloseWanService", params)
if err != nil {
return errors.Wrapf(err, "CloseWanService")
}
asyncRequestId, _ := resp.GetString("AsyncRequestId")
return self.waitAsyncAction("CloseWanService", id, asyncRequestId)
}
func (self *SRegion) OpenMySQLWanService(id string) error {
params := map[string]string{"InstanceId": id}
resp, err := self.cdbRequest("OpenWanService", params)
if err != nil {
return errors.Wrapf(err, "OpenWanService")
}
asyncRequestId, _ := resp.GetString("AsyncRequestId")
return self.waitAsyncAction("OpenWanService", id, asyncRequestId)
}
func (self *SRegion) InitMySQLDBInstances(ids []string, password string, parameters map[string]string, vport int) error {
params := map[string]string{"NewPassword": password}
for idx, id := range ids {
params[fmt.Sprintf("InstanceIds.%d", idx)] = id
}
i := 0
for k, v := range parameters {
params[fmt.Sprintf("Parameters.%d.name", i)] = k
params[fmt.Sprintf("Parameters.%d.value", i)] = v
i++
}
if vport >= 1024 && vport <= 65535 {
params["Vport"] = fmt.Sprintf("%d", vport)
}
resp, err := self.cdbRequest("InitDBInstances", params)
if err != nil {
return errors.Wrapf(err, "InitDBInstances")
}
asyncRequestIds := []string{}
err = resp.Unmarshal(&asyncRequestIds, "AsyncRequestIds")
if err != nil {
return errors.Wrapf(err, "resp.Unmarshal")
}
for idx, requestId := range asyncRequestIds {
err = self.waitAsyncAction("InitDBInstances", fmt.Sprintf("%d", idx), requestId)
if err != nil {
return err
}
}
return nil
}
func (self *SRegion) UpgradeMySQLDBInstance(id string, memoryMb int, volumeGb int) error {
params := map[string]string{
"InstanceId": id,
"Memory": fmt.Sprintf("%d", memoryMb),
"Volume": fmt.Sprintf("%d", volumeGb),
}
resp, err := self.cdbRequest("UpgradeDBInstance", params)
if err != nil {
return errors.Wrapf(err, "UpgradeDBInstance")
}
asyncRequestId, _ := resp.GetString("AsyncRequestId")
return self.waitAsyncAction("UpgradeDBInstance", id, asyncRequestId)
}
func (self *SRegion) ModifyMySQLAutoRenewFlag(ids []string, autoRenew bool) error {
params := map[string]string{}
for idx, id := range ids {
params[fmt.Sprintf("InstanceIds.%d", idx)] = id
}
params["AutoRenew"] = "0"
if autoRenew {
params["AutoRenew"] = "1"
}
_, err := self.cdbRequest("ModifyAutoRenewFlag", params)
return err
}
type SMaintenanceTime struct {
Monday []string
Tuesday []string
Wednesday []string
Thursday []string
Friday []string
Saturday []string
Sunday []string
}
func (w SMaintenanceTime) String() string {
windows := []string{}
for k, v := range map[string][]string{
"Monday": w.Monday,
"Tuesday": w.Tuesday,
"Wednesday": w.Wednesday,
"Thursday": w.Thursday,
"Friday": w.Friday,
"Saturday": w.Saturday,
"Sunday": w.Sunday,
} {
if len(v) > 0 {
windows = append(windows, fmt.Sprintf("%s: %s", k, strings.Join(v, " ")))
}
}
return strings.Join(windows, "\n")
}
func (self *SRegion) DescribeMySQLTimeWindow(id string) (*SMaintenanceTime, error) {
params := map[string]string{"InstanceId": id}
resp, err := self.cdbRequest("DescribeTimeWindow", params)
if err != nil {
return nil, errors.Wrapf(err, "DescribeTimeWindow")
}
timeWindow := &SMaintenanceTime{}
err = resp.Unmarshal(timeWindow)
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
return timeWindow, nil
}
type SDBSecgroup struct {
ProjectId int
CreateTime time.Time
SecurityGroupId string
SecurityGroupName string
SecurityGroupRemark string
}
func (self *SRegion) DescribeMySQLDBSecurityGroups(instanceId string) ([]SDBSecgroup, error) {
params := map[string]string{
"InstanceId": instanceId,
}
resp, err := self.cdbRequest("DescribeDBSecurityGroups", params)
if err != nil {
return nil, errors.Wrapf(err, "DescribeDBSecurityGroups")
}
result := []SDBSecgroup{}
err = resp.Unmarshal(&result, "Groups")
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
return result, nil
}
func (self *SRegion) CreateMySQLDBInstance(opts *cloudprovider.SManagedDBInstanceCreateConfig) (*SMySQLInstance, error) {
params := map[string]string{
"InstanceName": opts.Name,
"GoodsNum": "1",
"Memory": fmt.Sprintf("%d", opts.VmemSizeMb),
"Volume": fmt.Sprintf("%d", opts.DiskSizeGB),
"EngineVersion": opts.EngineVersion,
}
if len(opts.VpcId) > 0 {
params["UniqVpcId"] = opts.VpcId
}
if len(opts.NetworkId) > 0 {
params["UniqSubnetId"] = opts.NetworkId
}
if len(opts.ProjectId) > 0 {
params["ProjectId"] = opts.ProjectId
}
if opts.Port > 1024 && opts.Port < 65535 {
params["Port"] = fmt.Sprintf("%d", opts.Port)
}
if len(opts.Password) > 0 {
params["Password"] = opts.Password
}
for i, secId := range opts.SecgroupIds {
params[fmt.Sprintf("SecurityGroup.%d", i)] = secId
}
action := "CreateDBInstanceHour"
if opts.BillingCycle != nil {
params["Period"] = fmt.Sprintf("%d", opts.BillingCycle.GetMonths())
params["AutoRenewFlag"] = "0"
if opts.BillingCycle.AutoRenew {
params["AutoRenewFlag"] = "1"
}
action = "CreateDBInstance"
}
if len(opts.Zone1) > 0 {
params["Zone"] = opts.Zone1
}
params["DeployMode"] = "0"
switch opts.Category {
case api.QCLOUD_DBINSTANCE_CATEGORY_BASIC:
params["DeviceType"] = strings.ToUpper(opts.Category)
case api.QCLOUD_DBINSTANCE_CATEGORY_HA:
params["DeviceType"] = strings.ToUpper(opts.Category)
if len(opts.Zone2) > 0 {
params["SlaveZone"] = opts.Zone2
}
case api.QCLOUD_DBINSTANCE_CATEGORY_FINANCE:
params["DeviceType"] = "HA"
params["ProtectMode"] = "2"
if len(opts.Zone2) > 0 {
params["SlaveZone"] = opts.Zone2
}
if len(opts.Zone3) > 0 {
params["BackupZone"] = opts.Zone3
}
}
if len(opts.Zone1) > 0 && len(opts.Zone2) > 0 && opts.Zone1 != opts.Zone2 {
params["DeployMode"] = "1"
}
params["ClientToken"] = utils.GenRequestId(20)
i := 0
for k, v := range opts.Tags {
params[fmt.Sprintf("ResourceTags.%d.TagKey", i)] = k
params[fmt.Sprintf("ResourceTags.%d.TagValue", i)] = v
i++
}
var create = func(action string, params map[string]string) (jsonutils.JSONObject, error) {
startTime := time.Now()
var resp jsonutils.JSONObject
var err error
for time.Now().Sub(startTime) < time.Minute*10 {
resp, err = self.cdbRequest(action, params)
if err != nil {
if strings.Contains(err.Error(), "OperationDenied.OtherOderInProcess") || strings.Contains(err.Error(), "Message=请求已经在处理中") {
time.Sleep(time.Second * 20)
continue
}
return nil, errors.Wrapf(err, "cdbRequest")
}
return resp, nil
}
return resp, err
}
resp, err := create(action, params)
if err != nil {
return nil, errors.Wrapf(err, "cdbRequest")
}
instanceIds := []string{}
err = resp.Unmarshal(&instanceIds, "InstanceIds")
if err != nil {
return nil, errors.Wrapf(err, "resp.Unmarshal")
}
if len(instanceIds) == 0 {
return nil, fmt.Errorf("%s not return InstanceIds", action)
}
err = cloudprovider.Wait(time.Second*10, time.Minute*20, func() (bool, error) {
instances, _, err := self.ListMySQLInstances(instanceIds, 0, 1)
if err != nil {
return false, errors.Wrapf(err, "ListMySQLInstances(%s)", instanceIds)
}
for _, rds := range instances {
log.Debugf("instance %s(%s) task status: %d", rds.InstanceName, rds.InstanceId, rds.TaskStatus)
if rds.TaskStatus == 1 {
return false, nil
}
}
return true, nil
})
if err != nil {
return nil, errors.Wrapf(err, "cloudprovider.Wait After create")
}
return self.GetMySQLInstanceById(instanceIds[0])
}
func (self *SRegion) GetMySQLInstanceById(id string) (*SMySQLInstance, error) {
part, total, err := self.ListMySQLInstances([]string{id}, 0, 20)
if err != nil {
return nil, errors.Wrapf(err, "ListMySQLInstances")
}
if total > 1 {
return nil, errors.Wrapf(cloudprovider.ErrDuplicateId, "id: [%s]", id)
}
if total < 1 {
return nil, errors.Wrapf(cloudprovider.ErrNotFound, id)
}
part[0].region = self
return &part[0], nil
}
func (self *SMySQLInstance) CreateDatabase(opts *cloudprovider.SDBInstanceDatabaseCreateConfig) error {
return cloudprovider.ErrNotSupported
}
func (self *SMySQLInstance) CreateAccount(opts *cloudprovider.SDBInstanceAccountCreateConfig) error {
return self.region.CreateMySQLAccount(self.InstanceId, opts)
}
func (self *SMySQLInstance) CreateIBackup(opts *cloudprovider.SDBInstanceBackupCreateConfig) (string, error) {
tables := map[string]string{}
for _, d := range opts.Databases {
tables[d] = ""
}
return self.region.CreateMySQLBackup(self.InstanceId, tables)
}
func (self *SMySQLInstance) GetTags() (map[string]string, error) {
tags, err := self.region.FetchResourceTags("cdb", "instanceId", []string{self.GetId()})
if err != nil {
return nil, errors.Wrap(err, "self.region.FetchResourceTags")
}
if _, ok := tags[self.GetId()]; !ok {
return map[string]string{}, nil
}
return *tags[self.GetId()], nil
}
func (self *SMySQLInstance) SetTags(tags map[string]string, replace bool) error {
return self.region.SetResourceTags("cdb", "instanceId", []string{self.InstanceId}, tags, replace)
}
|
package scanner
import (
"testing"
)
func assertTrue(t *testing.T, b bool) {
if !b {
t.Errorf("Not true")
}
}
func assertFalse(t *testing.T, b bool) {
if b {
t.Errorf("Not false")
}
}
func TestR(t *testing.T) {
l := r('c')
assertTrue(t, l('c'))
assertFalse(t, l('a'))
assertFalse(t, l('b'))
}
func TestOr(t *testing.T) {
l := or(r('a'), r('b'), r('c'))
assertTrue(t, l('a'))
assertTrue(t, l('b'))
assertTrue(t, l('c'))
assertFalse(t, l('d'))
assertFalse(t, l('e'))
}
|
package test
import (
"context"
"testing"
"github.com/ncarlier/readflow/pkg/constant"
"github.com/ncarlier/readflow/pkg/assert"
"github.com/ncarlier/readflow/pkg/model"
ruleengine "github.com/ncarlier/readflow/pkg/rule-engine"
)
func newTestRule(rule string, category uint) model.Rule {
id := uint(1)
return model.Rule{
ID: &id,
Alias: "test",
CategoryID: &category,
Rule: rule,
}
}
func TestBadRuleProcessor(t *testing.T) {
rule := newTestRule("", 1)
processor, err := ruleengine.NewRuleProcessor(rule)
assert.NotNil(t, err, "error should be not nil")
assert.True(t, processor == nil, "processor should be nil")
}
func TestRuleProcessor(t *testing.T) {
ctx := context.TODO()
categoryID := uint(9)
rule := newTestRule("article.Title == \"test\"", categoryID)
processor, err := ruleengine.NewRuleProcessor(rule)
assert.Nil(t, err, "error should be nil")
assert.True(t, processor != nil, "processor should not be nil")
builder := model.NewArticleBuilder()
article := builder.Random().UserID(uint(1)).Title("test").Build()
applied, err := processor.Apply(ctx, article)
assert.Nil(t, err, "error should be nil")
assert.True(t, applied, "processor should be applied")
assert.True(t, article.CategoryID != nil, "category should be not nil")
assert.Equal(t, categoryID, *article.CategoryID, "category should be updated")
builder = model.NewArticleBuilder()
article = builder.Random().UserID(uint(1)).Title("foo").Build()
applied, err = processor.Apply(ctx, article)
assert.Nil(t, err, "error should be nil")
assert.True(t, !applied, "processor should not be applied")
assert.True(t, article.CategoryID == nil, "category should be nil")
}
func TestProcessorPipeline(t *testing.T) {
ctx := context.TODO()
rules := []model.Rule{
newTestRule("article.Title == \"test\"", uint(1)),
newTestRule("article.Title == \"foo\"", uint(2)),
newTestRule("article.Title == \"bar\"", uint(3)),
}
pipeline, err := ruleengine.NewProcessorsPipeline(rules)
assert.Nil(t, err, "error should be nil")
assert.True(t, pipeline != nil, "pipeline should not be nil")
builder := model.NewArticleBuilder()
article := builder.Random().UserID(uint(1)).Title("foo").Build()
applied, err := pipeline.Apply(ctx, article)
assert.Nil(t, err, "error should be nil")
assert.True(t, applied, "pipeline should be applied")
assert.True(t, article.CategoryID != nil, "category should be not nil")
assert.Equal(t, uint(2), *article.CategoryID, "category should be updated")
builder = model.NewArticleBuilder()
article = builder.Random().UserID(uint(1)).Title("other").Build()
applied, err = pipeline.Apply(ctx, article)
assert.Nil(t, err, "error should be nil")
assert.True(t, !applied, "pipeline should not be applied")
assert.True(t, article.CategoryID == nil, "category should be nil")
}
func TestRuleProcessorWithContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), constant.APIKeyAlias, "test")
categoryID := uint(9)
rule := newTestRule("key == \"test\"", categoryID)
processor, err := ruleengine.NewRuleProcessor(rule)
assert.Nil(t, err, "error should be nil")
assert.True(t, processor != nil, "processor should not be nil")
builder := model.NewArticleBuilder()
article := builder.Random().UserID(uint(1)).Title("test").Build()
applied, err := processor.Apply(ctx, article)
assert.Nil(t, err, "error should be nil")
assert.True(t, applied, "processor should be applied")
assert.True(t, article.CategoryID != nil, "category should be not nil")
assert.Equal(t, categoryID, *article.CategoryID, "category should be updated")
}
|
package test
import (
"fmt"
"github.com/coredumptoday/practice/tree"
"github.com/coredumptoday/practice/utils"
"testing"
)
func TestPrefixTree(t *testing.T) {
arrLen := 100
strLen := 20
testTimes := 100000
for i := 0; i < testTimes; i++ {
arr := utils.GenerateRandomStringArray(arrLen, strLen)
preTree := tree.NewPrefixTree()
right := tree.NewPrefixRight()
for j := 0; j < len(arr); j++ {
decide := utils.GetRandNum()
if decide < 0.25 {
preTree.Insert(arr[j])
right.Insert(arr[j])
} else if decide < 0.5 {
preTree.Delete(arr[j])
right.Delete(arr[j])
} else if decide < 0.75 {
ans1 := preTree.Search(arr[j])
ans2 := right.Search(arr[j])
if ans1 != ans2 {
fmt.Println("Search Oops!", ans1, ans2)
t.Fail()
return
}
} else {
ans1 := preTree.PrefixCount(arr[j])
ans2 := right.PrefixCount(arr[j])
if ans1 != ans2 {
fmt.Println("PrefixCount Oops!", ans1, ans2, arr[j], arr, j)
t.Fail()
return
}
}
}
}
fmt.Println("finish!")
}
func TestBinTreePrint(t *testing.T) {
root := &tree.BinTreeNode{Value: 1}
root.LeftNode = &tree.BinTreeNode{Value: 2}
root.RightNode = &tree.BinTreeNode{Value: 3}
root.LeftNode.LeftNode = &tree.BinTreeNode{Value: 4}
root.LeftNode.RightNode = &tree.BinTreeNode{Value: 5}
root.RightNode.LeftNode = &tree.BinTreeNode{Value: 6}
root.RightNode.RightNode = &tree.BinTreeNode{Value: 7}
tree.PrintBinTreePre(root)
fmt.Println("========先序递归")
tree.PrintBinTreePreNonRecursion(root)
fmt.Println("========先序非递归")
tree.PrintBinTreeMid(root)
fmt.Println("========中序递归")
tree.PrintBinTreeMidNonRecursion(root)
fmt.Println("========中序非递归")
tree.PrintBinTreePost(root)
fmt.Println("========后续递归")
tree.PrintBinTreeForDeep(root)
fmt.Println("========层序")
}
func TestTreeMaxWidth(t *testing.T) {
root := &tree.BinTreeNode{Value: 1}
root.LeftNode = &tree.BinTreeNode{Value: 2}
root.RightNode = &tree.BinTreeNode{Value: 3}
root.LeftNode.LeftNode = &tree.BinTreeNode{Value: 4}
root.LeftNode.RightNode = &tree.BinTreeNode{Value: 5}
root.RightNode.LeftNode = &tree.BinTreeNode{Value: 6}
root.RightNode.RightNode = &tree.BinTreeNode{Value: 7}
root.LeftNode.RightNode.LeftNode = &tree.BinTreeNode{Value: 8}
tree.PrintBinTreeForDeep(root)
fmt.Println("========层序")
fmt.Println(tree.GetBinTreeMaxWidth(root))
}
func TestTreeComplete(t *testing.T) {
root := &tree.BinTreeNode{Value: 1}
root.LeftNode = &tree.BinTreeNode{Value: 2}
root.RightNode = &tree.BinTreeNode{Value: 3}
root.LeftNode.LeftNode = &tree.BinTreeNode{Value: 4}
root.LeftNode.RightNode = &tree.BinTreeNode{Value: 5}
root.RightNode.LeftNode = &tree.BinTreeNode{Value: 6}
root.RightNode.RightNode = &tree.BinTreeNode{Value: 7}
root.LeftNode.RightNode.LeftNode = &tree.BinTreeNode{Value: 8}
tree.PrintBinTreeForDeep(root)
fmt.Println("========层序")
fmt.Println(tree.IsCompleteBinaryTree(root))
root = &tree.BinTreeNode{Value: 1}
root.LeftNode = &tree.BinTreeNode{Value: 2}
root.RightNode = &tree.BinTreeNode{Value: 3}
root.LeftNode.LeftNode = &tree.BinTreeNode{Value: 4}
root.LeftNode.RightNode = &tree.BinTreeNode{Value: 5}
root.RightNode.LeftNode = &tree.BinTreeNode{Value: 6}
root.RightNode.RightNode = &tree.BinTreeNode{Value: 7}
tree.PrintBinTreeForDeep(root)
fmt.Println("========层序")
fmt.Println(tree.IsCompleteBinaryTree(root))
}
func TestIsBST(t *testing.T) {
maxLevel := 4
maxValue := 100
testTimes := 1000000
for i := 0; i < testTimes; i++ {
head := utils.GenerateRandomBST(maxLevel, maxValue)
res1 := utils.IsBST(head)
res2 := tree.IsBinSearchTree(head)
if res1 != res2 {
tree.PrintBinTreeMid(head)
fmt.Println("Oops!", res1, res2)
t.Fail()
return
}
}
fmt.Println("finish!")
}
|
/*
Copyright (c) 2014
Dario Brandes
Thies Johannsen
Paul Kröger
Sergej Mann
Roman Naumann
Sebastian Thobe
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* -*- Mode: Go; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4 -*- */
package db
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"time"
"../../logger"
)
type Post struct {
Id int64 `json:"id"`
Message string `json:"message" sql:"type:text;not null"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
DeletedAt time.Time `json:"deletedAt"`
TTL uint8 `json:"ttl"`
Published bool `json:"published" sql:"not null;default:0"`
Originator Onion `json:"-"`
OriginatorId int64 `json:"originator" sql:"not null"`
Author Onion `json:"-"`
AuthorId int64 `json:"author" sql:"not null"`
PostedAt time.Time `json:"posted_at" sql:"not null"`
PublishedAt time.Time `json:"published_at"`
RemotePublishedAt time.Time `json:"remote_published_at"`
Hash string `json:"hash" sql:"unique; not null" `
ParentId int64 `json:"parent"`
ParentHash string `sql:"-"`
Circles []Circle `json:"-" gorm:"many2many:circle_posts;"`
}
func (post *Post) CalcHash() {
if post.Author.Onion == "" {
logger.Error("Post.CalcHash: Author onion not set!")
}
bin := append(append([]byte(post.Message), []byte(fmt.Sprintf("%ld", post.PostedAt.Unix()))...), []byte(post.Author.Onion)...)
hash := sha256.Sum256(bin)
post.Hash = base64.StdEncoding.EncodeToString(hash[:])
}
|
package main
import (
"fastdb"
"fastdb/cmd"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"syscall"
)
func init() {
logo, _ := ioutil.ReadFile("../../logo.txt")
fmt.Println(string(logo))
}
var config = flag.String("config", "", "the config file for fastdb")
func main() {
flag.Parse()
// Set the config.
var cfg fastdb.Config
if *config == "" {
log.Println("no config set, using the default config.")
cfg = fastdb.DefaultConfig()
}
//fmt.Println(cfg)
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, os.Kill, syscall.SIGHUP,
syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
server, err := cmd.NewServer(cfg)
if err != nil {
log.Printf("create rosedb server err: %+v\n", err)
return
}
go server.Listen(cfg.Addr)
// server.ln.Close()
<-sig
log.Println("rosedb is ready to exit, bye...")
}
|
// Package feed is responsible for getting data from external source (RSS).
package feed
import (
"fmt"
"time"
"github.com/mmcdole/gofeed"
)
// Storage describes persistent datastorage.
type Storage interface {
GetLastUpdate(feed string) time.Time
SaveLastUpdate(feed string, t time.Time) error
}
// RSSFeed reads data from RSS feed.
type RSSFeed struct {
url string
storage Storage
parser *gofeed.Parser
}
// NewRSSFeed returns new RSS feed.
func NewRSSFeed(s Storage, url string) *RSSFeed {
p := gofeed.NewParser()
p.Client = newHTTPClient()
return &RSSFeed{
url: url,
parser: p,
storage: s,
}
}
// Fetch fetches last item from RSS feed.
func (f *RSSFeed) Fetch() ([]Item, error) {
last := f.storage.GetLastUpdate(f.url)
if last.IsZero() {
// First access
if err := f.storage.SaveLastUpdate(f.url, time.Now()); err != nil {
return nil, fmt.Errorf("save last update time: %w", err)
}
return nil, nil
}
feed, err := f.parser.ParseURL(f.url)
if err != nil {
return nil, fmt.Errorf("parse url: %w", err)
}
if len(feed.Items) == 0 {
return nil, nil
}
var items []Item //nolint: prealloc
for _, fitem := range feed.Items {
item := parse(fitem)
if !item.Published.After(last) {
break
}
items = append(items, item)
}
if len(items) == 0 {
return nil, nil
}
if err := f.storage.SaveLastUpdate(f.url, items[0].Published); err != nil {
return nil, fmt.Errorf("save last update time: %w", err)
}
return items, nil
}
func parse(in *gofeed.Item) Item {
item := Item{
Link: in.Link,
Published: time.Now(),
}
if in.PublishedParsed != nil {
item.Published = *in.PublishedParsed
}
if in.UpdatedParsed != nil {
item.Published = *in.UpdatedParsed
}
return item
}
|
package parser
type Kind uint
const (
// Operators
PLUS Kind = 0
MINUS
MUL
DIV
IDENT
NUMBER
)
type Token struct {
Kind Kind
Content string
}
|
package usersvc
import (
"github.com/resilva87/usersvc/user"
"github.com/go-kit/kit/endpoint"
"golang.org/x/net/context"
)
type signUpRequest struct {
Data user.User `json:"data"`
}
type signUpResponse struct {
Data user.User `json:"data,omitempty"`
Err error `json:"error,omitempty"`
}
func (r signUpResponse) error() error { return r.Err }
type Endpoints struct {
SignUpEndpoint endpoint.Endpoint
}
func MakeServerEndpoints(s Service) Endpoints {
return Endpoints{
SignUpEndpoint: MakeSignUpEndpoint(s),
}
}
func MakeSignUpEndpoint(s Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (response interface{}, err error) {
req := request.(signUpRequest)
user := req.Data
e := s.SignUp(&user)
return signUpResponse{Err: e, Data: user}, nil
}
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runner
import (
"context"
"errors"
"fmt"
"io"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/filemon"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/trigger"
)
func NewSkaffoldListener(monitor filemon.Monitor, trigger trigger.Trigger, cache graph.SourceDependenciesCache,
intentChan <-chan bool) *SkaffoldListener {
return &SkaffoldListener{
Monitor: monitor,
Trigger: trigger,
sourceDependenciesCache: cache,
intentChan: intentChan,
}
}
type Listener interface {
WatchForChanges(context.Context, io.Writer, func() error) error
LogWatchToUser(io.Writer)
}
type SkaffoldListener struct {
Monitor filemon.Monitor
Trigger trigger.Trigger
sourceDependenciesCache graph.SourceDependenciesCache
intentChan <-chan bool
}
func (l *SkaffoldListener) LogWatchToUser(out io.Writer) {
l.Trigger.LogWatchToUser(out)
}
// WatchForChanges listens to a trigger, and when one is received, computes file changes and
// conditionally runs the dev loop.
func (l *SkaffoldListener) WatchForChanges(ctx context.Context, out io.Writer, devLoop func() error) error {
ctxTrigger, cancelTrigger := context.WithCancel(ctx)
defer cancelTrigger()
trigger, err := trigger.StartTrigger(ctxTrigger, l.Trigger)
if err != nil {
return fmt.Errorf("unable to start trigger: %w", err)
}
// exit if file monitor fails the first time
if err := l.Monitor.Run(l.Trigger.Debounce()); err != nil {
return fmt.Errorf("failed to monitor files: %w", err)
}
l.LogWatchToUser(out)
for {
select {
case <-ctx.Done():
return nil
case <-l.intentChan:
if err := l.do(devLoop); err != nil {
return err
}
case <-trigger:
if err := l.do(devLoop); err != nil {
return err
}
}
}
}
func (l *SkaffoldListener) do(devLoop func() error) error {
// reset the dependencies resolver cache at the start of every dev loop.
l.sourceDependenciesCache.Reset()
if err := l.Monitor.Run(l.Trigger.Debounce()); err != nil {
log.Entry(context.TODO()).Warnf("Ignoring changes: %s", err.Error())
return nil
}
if err := devLoop(); err != nil {
// propagating this error up causes a new runner to be created
// and a new dev loop to start
if errors.Is(err, ErrorConfigurationChanged) {
return err
}
log.Entry(context.TODO()).Errorf("error running dev loop: %s", err.Error())
}
return nil
}
|
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
basename "book/ch03/basename/v2"
)
var (
addr = flag.String("addr", "", "listening address")
port = flag.Int("port", 8003, "listening port")
)
func main() {
flag.Parse()
if *addr != "" {
url := fmt.Sprintf("%s:%d", *addr, *port)
handler := func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, basename.Basename(r.URL.Path))
}
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(url, nil))
}
var path *string
if len(flag.Args()) > 0 {
path = &flag.Args()[0]
} else {
path = &os.Args[0]
}
fmt.Println(basename.Basename(*path))
}
|
package forest
import (
"bytes"
"git.sr.ht/~whereswaldon/forest-go/fields"
)
// serializer is a type that can describe how to serialize and deserialize itself
type serializer interface {
SerializationOrder() []fields.BidirectionalBinaryMarshaler
}
func MarshalBinary(s serializer) ([]byte, error) {
buf := new(bytes.Buffer)
if err := fields.MarshalAllInto(buf, fields.AsMarshaler(s.SerializationOrder())...); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func UnmarshalBinary(s serializer, b []byte) error {
if _, err := fields.UnmarshalAll(b, fields.AsUnmarshaler(s.SerializationOrder())...); err != nil {
return err
}
return nil
}
func BytesConsumed(s serializer) int {
return fields.TotalBytesConsumed(s.SerializationOrder()...)
}
|
package openface
import (
"os/exec"
"fa/s3util"
"fmt"
"io"
"os"
)
func Train(imgDir string, alignDir string, featureDir string, userId string, idToken string) error {
err := AlignImages(imgDir, alignDir)
if err != nil {
return err
}
err = GenReps(alignDir, featureDir)
if err != nil {
return err
}
err = ConcatFeatures(featureDir, userId, idToken)
if err != nil {
return err
}
err = CreatePickle(featureDir)
if err != nil {
return err
}
return nil
}
func AlignImages(imgDir string, alignDir string) error {
script := "/root/openface/scripts/align.sh"
cmd := exec.Command(script, imgDir, alignDir)
err := cmd.Run()
if err != nil {
return err
}
return nil
}
func GenReps(alignDir string, featureDir string) error {
script := "/root/openface/batch-represent/main.lua"
cmd := exec.Command(script, "-outDir", featureDir, "-data", alignDir)
err := cmd.Run()
if err != nil {
return err
}
return nil
}
func ConcatFeatures(featureDir string, userId string, idToken string) error {
labels, err := os.OpenFile(fmt.Sprintf("%s/labels.csv", featureDir), os.O_APPEND|os.O_RDWR, 0666)
if err != nil {
return err
}
defer labels.Close()
reps, err := os.OpenFile(fmt.Sprintf("%s/reps.csv", featureDir), os.O_APPEND|os.O_RDWR, 0666)
if err != nil {
return err
}
defer reps.Close()
l, err := os.Create(featureDir + "/labels")
if err != nil {
return err
}
defer l.Close()
r, err := os.Create(featureDir + "/reps")
if err != nil {
return err
}
defer r.Close()
_, err = io.Copy(l, labels)
if err != nil {
return err
}
_, err = io.Copy(r, reps)
if err != nil {
return err
}
err = s3util.GetFeature("labels.csv", userId, idToken, labels)
if err != nil {
return err
}
err = s3util.GetFeature("reps.csv", userId, idToken, reps)
if err != nil {
return err
}
return nil
}
func CreatePickle(featureDir string) error {
script := "/root/openface/scripts/classifier.py"
cmd := exec.Command(script, "train", featureDir)
err := cmd.Run()
if err != nil {
return err
}
return nil
}
|
package bst
import "fmt"
func (r *node) getRank(t *item) uint {
if r == nil || t == nil {
return 0
} else if t.equal(r.data) {
return r.leftSize + 1
}
var rank uint
if t.greater(r.data) {
rank = r.leftSize + r.count + r.right.getRank(t)
} else {
rank = r.left.getRank(t)
}
return rank
}
func (r *node) selectOrder(order uint) *node {
if r == nil || order > r.leftSize && order <= r.leftSize+r.count {
return r
}
if order > r.leftSize {
return r.right.selectOrder(order - r.leftSize - r.count)
}
return r.left.selectOrder(order)
}
func (r *node) delete(count uint) {
if r == nil {
return
} else if count < r.count && count > 0 {
r.count -= count
r.reduceParentsSize(count, nil)
return
}
if r.deleteChildless() || r.deleteOneChildNode() {
return
}
r.deleteTwoChildNode()
}
func (r *node) deleteTwoChildNode() bool {
if r.left == nil || r.right == nil {
return false
}
r.reduceParentsSize(r.count, nil)
predecessor := r.findPre()
predecessor.reduceParentsSize(predecessor.count, r.parent)
r.swapValue(predecessor)
if predecessor.right == nil {
predecessor.parent.left = nil
} else {
predecessor.parent.left = predecessor.right
predecessor.right.parent = predecessor.parent
}
return true
}
func (r *node) deleteOneChildNode() bool {
hasRight := r.right != nil
hasLeft := r.left != nil
var child *node
if hasLeft && hasRight || !hasLeft && !hasRight {
return false
} else if hasLeft {
child = r.left
} else {
child = r.right
}
r.reduceParentsSize(r.count, nil)
position := r.whichChild()
if position == 1 {
r.parent.right = child
} else if position == -1 {
r.parent.left = child
}
child.parent = r.parent
return true
}
func (r *node) deleteChildless() bool {
if r.left != nil || r.right != nil {
return false
}
r.reduceParentsSize(r.count, nil)
position := r.whichChild()
if position == 1 {
r.parent.right = nil
} else if position == -1 {
r.parent.left = nil
}
r.destroy()
return true
}
func (r *node) whichChild() int {
if r.parent == nil {
return 0
}
if r.greater(r.parent) {
return 1
}
return -1
}
func (r *node) reduceParentsSize(count uint, limit *node) {
movingPtr := r
var position int
for movingPtr != limit {
position = movingPtr.whichChild()
// fmt.Println(movingPtr.data, position)
if position == 1 {
movingPtr.parent.rightSize -= count
} else if position == -1 {
movingPtr.parent.leftSize -= count
} else {
break
}
movingPtr = movingPtr.parent
}
}
func Print(r *node) {
r.traverseInOrder()
fmt.Printf("\n")
}
func (r *node) traverseInOrder() {
if r == nil {
return
}
r.left.traverseInOrder()
r.print()
fmt.Printf("(%v) ", r.count)
r.right.traverseInOrder()
}
func (r *node) findPre() *node {
// if it has left branch, then return the maximum of the left subtree
if r.left != nil {
return r.left.max()
}
// else return the first parent that's less than the node
movingPtr := r.parent
for movingPtr != nil && movingPtr.greater(r) {
movingPtr = movingPtr.parent
}
return movingPtr
}
func (r *node) max() *node {
if r == nil {
return nil
}
movingPtr := r
for movingPtr.right != nil {
movingPtr = movingPtr.right
}
return movingPtr
}
func (r *node) min() *node {
if r != nil {
for r.left != nil {
r = r.left
}
}
return r
}
func (r *node) search(t *item) *node {
if r == nil || t.equal(r.data) {
return r
}
if t.greater(r.data) {
return r.right.search(t)
}
return r.left.search(t)
}
func (r *node) insert(t *item) *node {
k := &node{data: t, count: 1}
if r == nil {
return k
}
movingPtr := r
var parent *node
var isRight bool
for movingPtr != nil && !movingPtr.equal(k) {
parent = movingPtr
if k.greater(movingPtr) {
movingPtr.rightSize++
movingPtr = movingPtr.right
isRight = true
} else {
movingPtr.leftSize++
movingPtr = movingPtr.left
isRight = false
}
}
if movingPtr == nil {
movingPtr = k
movingPtr.parent = parent
if isRight {
parent.right = movingPtr
} else {
parent.left = movingPtr
}
} else {
movingPtr.count++
}
movingPtr.leftSize, movingPtr.rightSize = 0, 0
return movingPtr
}
func (r *node) destroy() {
r.data.destroy()
r.count, r.leftSize, r.rightSize = 0, 0, 0
r.left, r.right, r.parent = nil, nil, nil
}
func (r *node) swap(k *node) {
r.swapValue(k)
r.swapSizes(k)
r.swapPointers(k)
}
func (r *node) swapPointers(k *node) {
r.left, k.left = k.left, r.left
r.right, k.right = k.right, r.right
r.parent, k.parent = k.parent, r.parent
}
func (r *node) swapSizes(k *node) {
r.leftSize, k.leftSize = k.leftSize, r.leftSize
r.rightSize, k.rightSize = k.rightSize, r.rightSize
}
func (r *node) swapValue(k *node) {
r.data.swap(k.data)
r.count, k.count = k.count, r.count
}
func (r *node) print() {
fmt.Printf("%v", r.data.Val)
}
func (r *node) greater(m *node) bool {
return r.data.greater(m.data)
}
func (r *node) equal(m *node) bool {
return r.data.equal(m.data)
}
type node struct {
left *node
right *node
parent *node
data *item
count uint
leftSize uint
rightSize uint
}
|
package controllers
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"github.com/darkcl/Notorious/lib/webview"
"github.com/darkcl/Notorious/models"
"github.com/mitchellh/go-homedir"
)
// FolderController - Folder Controller
type FolderController struct {
Folder *models.Folder `json:"folderTree"`
CurrentContent string `json:"currentContent,omitempty"`
CurrentPath string `json:"currentPath,omitempty"`
webView webview.WebView
}
// NewFolderController - Create New Folder Controller
func NewFolderController(webView webview.WebView) *FolderController {
path, err := homedir.Dir()
if err != nil {
panic(err)
}
settingPath := filepath.Join(path, ".notorious")
folders := buildTree(settingPath)
data, _ := json.Marshal(folders)
fmt.Println(string(data))
return &FolderController{
webView: webView,
Folder: folders,
}
}
// Clear - clear current folder tree
func (f *FolderController) Clear() {
f.Folder = nil
}
// Create - create a file
func (f *FolderController) Create(title string, workspace string) {
notePath := filepath.Join(f.Folder.Path, workspace, fmt.Sprintf("%s.md", title))
noteContent := []byte(fmt.Sprintf("# %s", title))
err := ioutil.WriteFile(notePath, noteContent, 0644)
if err != nil {
fmt.Println(err)
panic(err)
}
path, err := homedir.Dir()
if err != nil {
panic(err)
}
settingPath := filepath.Join(path, ".notorious")
f.Open(notePath)
f.Folder = buildTree(settingPath)
}
// Open - open a file with path
func (f *FolderController) Open(path string) {
data, err := ioutil.ReadFile(path) // just pass the file name
if err != nil {
fmt.Print(err)
return
}
f.CurrentContent = string(data)
f.CurrentPath = path
}
// Save - save data to path
func (f *FolderController) Save(content string) {
err := ioutil.WriteFile(f.CurrentPath, []byte(content), 0644)
if err != nil {
fmt.Print(err)
return
}
f.CurrentContent = content
}
// CreateWorkspace - create a workspace
func (f *FolderController) CreateWorkspace(name string) {
path, err := homedir.Dir()
if err != nil {
panic(err)
}
workspacePath := filepath.Join(path, ".notorious", name)
if _, err := os.Stat(workspacePath); os.IsNotExist(err) {
os.Mkdir(workspacePath, os.ModePerm)
}
f.Folder = buildTree(filepath.Join(path, ".notorious"))
}
// OpenWorkspace - open workspace
func (f *FolderController) OpenWorkspace(name string) {
f.CurrentContent = ""
f.CurrentPath = ""
for _, folder := range f.Folder.Folders {
if folder.Name == name && len(folder.Files) > 0 {
f.Open(folder.Files[0].Path)
}
}
}
// Private Functions
func buildTree(dir string) *models.Folder {
dir = path.Clean(dir)
var tree *models.Folder
var nodes = map[string]interface{}{}
var walkFun filepath.WalkFunc = func(p string, info os.FileInfo, err error) error {
if info.IsDir() {
nodes[p] = &models.Folder{Name: path.Base(p), Files: []*models.File{}, Folders: []*models.Folder{}, Path: p}
} else {
if filepath.Ext(p) == ".md" {
nodes[p] = &models.File{Name: path.Base(p), Path: p}
}
}
return nil
}
err := filepath.Walk(dir, walkFun)
if err != nil {
log.Fatal(err)
}
for key, value := range nodes {
var parentFolder *models.Folder
if key == dir {
tree = value.(*models.Folder)
continue
} else {
parentFolder = nodes[path.Dir(key)].(*models.Folder)
}
switch v := value.(type) {
case *models.File:
parentFolder.Files = append(parentFolder.Files, v)
case *models.Folder:
parentFolder.Folders = append(parentFolder.Folders, v)
}
}
return tree
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
var DRIVER string = "mysql"
var URL string = "root:toor@tcp(127.0.0.1:3306)/?parseTime=true"
var DB_NAME string = "testdb"
var CREATE_TABLE_QUERY string = "create table if not exists TODO( " +
" id integer AUTO_INCREMENT PRIMARY KEY, " +
" name varchar(50), " +
" description varchar(256)," +
" priority integer, " +
" due date default (CURRENT_DATE), " +
" completed bool, " +
" completion_date date " +
");"
func initialDbSetup() {
fmt.Println("Connnecting to DB ...")
db, err := sql.Open(DRIVER, URL)
if err != nil {
panic(err.Error())
}
defer db.Close()
fmt.Println("...Connected")
execQuery(db, "CREATE DATABASE IF NOT EXISTS "+DB_NAME)
execQuery(db, "USE "+DB_NAME)
execQuery(db, CREATE_TABLE_QUERY)
}
func execQuery(d *sql.DB, query string) {
_, err := d.Exec(query)
if err != nil {
panic(err.Error())
}
}
func insertData(todo Todo) Todo {
fmt.Println("Connnecting to DB ...")
db, err := sql.Open(DRIVER, URL)
if err != nil {
panic(err.Error())
}
execQuery(db, "USE "+DB_NAME)
defer db.Close()
fmt.Println("...Connected")
var due_date = "null"
if todo.Due_Date.Valid {
due_date = fmt.Sprintf("str_to_date(\"%v\",\"%%m-%%d-%%Y\")", todo.Due_Date.Time.Format("01-02-2006"))
}
var completion_date = "null"
if todo.Completion_Date.Valid {
completion_date = fmt.Sprintf("str_to_date(\"%v\",\"%%m-%%d-%%Y\")", todo.Completion_Date.Time.Format("01-02-2006"))
}
insert_query := fmt.Sprintf("insert into todo (name,"+
"description,priority,due,completed,completion_date)"+
"value(\"%v\",\"%v\",%v,%v,%v,%v)", todo.Name.String, todo.Description.String,
todo.Priority.Int64, due_date, todo.Completed.Bool, completion_date)
fmt.Println(insert_query)
result, err := db.Query(insert_query)
if err != nil {
panic(err.Error())
}
for result.Next() {
err = result.Scan(&todo.Id, &todo.Name, &todo.Description, &todo.Priority, &todo.Due_Date, &todo.Completed, &todo.Completion_Date)
if err != nil {
panic(err.Error())
}
fmt.Println(todo.Due_Date)
break
}
return todo
}
func readTable(query string) []Todo {
var todos []Todo
fmt.Println("Connnecting to DB ...")
db, err := sql.Open(DRIVER, URL)
if err != nil {
panic(err.Error())
}
execQuery(db, "USE "+DB_NAME)
defer db.Close()
fmt.Println("...Connected")
fmt.Println(query)
result, err := db.Query(query)
if err != nil {
panic(err.Error())
}
for result.Next() {
var todo Todo
err = result.Scan(&todo.Id, &todo.Name, &todo.Description, &todo.Priority, &todo.Due_Date, &todo.Completed, &todo.Completion_Date)
if err != nil {
panic(err.Error())
}
fmt.Println(todo.Due_Date)
todos = append(todos, todo)
}
return todos
}
func updateTodo(todo Todo) Todo {
fmt.Println("Connnecting to DB ...")
db, err := sql.Open(DRIVER, URL)
if err != nil {
panic(err.Error())
}
execQuery(db, "USE "+DB_NAME)
defer db.Close()
fmt.Println("...Connected")
var due_date = "null"
if todo.Due_Date.Valid {
due_date = fmt.Sprintf("str_to_date(\"%v\",\"%%m-%%d-%%Y\")", todo.Due_Date.Time.Format("01-02-2006"))
}
var completion_date = "null"
if todo.Completion_Date.Valid {
completion_date = fmt.Sprintf("str_to_date(\"%v\",\"%%m-%%d-%%Y\")", todo.Completion_Date.Time.Format("01-02-2006"))
}
update_query := fmt.Sprintf("update todo set name=\"%v\",description=\"%v\",priority=%v,due=%v,completed=%v,"+
"completion_date=%v where id=%v", todo.Name.String, todo.Description.String,
todo.Priority.Int64, due_date, todo.Completed.Bool, completion_date, todo.Id)
fmt.Println(update_query)
execQuery(db, update_query)
return todo
}
func deleteTodo(id string) bool {
fmt.Println("Connnecting to DB ...")
db, err := sql.Open(DRIVER, URL)
if err != nil {
panic(err.Error())
}
execQuery(db, "USE "+DB_NAME)
defer db.Close()
fmt.Println("...Connected")
result, err := db.Query("delete from todo where id=" + id)
if err != nil {
panic(err.Error())
}
return result.Next()
}
|
package mdb
import (
"container/heap"
"fmt"
"log"
"sync"
"time"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type Session struct {
*mgo.Session
ref int
index int
}
type SessionHeap []*Session
func (h SessionHeap) Len() int {
return len(h)
}
func (h SessionHeap) Less(i, j int) bool {
return h[i].ref < h[j].ref
}
func (h SessionHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].index = i
h[j].index = j
}
func (h *SessionHeap) Push(s interface{}) {
s.(*Session).index = len(*h)
*h = append(*h, s.(*Session))
}
func (h *SessionHeap) Pop() interface{} {
l := len(*h)
s := (*h)[l-1]
s.index = -1
*h = (*h)[:l-1]
return s
}
type DialContext struct {
sync.Mutex
sessions SessionHeap
}
//goroutine safe
func Dial(url string, sessionNum int) (*DialContext, error) {
c, err := DiaWithTimeout(url, sessionNum, 10*time.Second, 5*time.Second)
return c, err
}
//goroutine safe
func DiaWithTimeout(url string, sessionNum int, dialTimeout, timeout time.Duration) (*DialContext, error) {
if sessionNum <= 0 {
sessionNum = 100
log.Println("DiaWithTimeout invalid sessionNum,reset to ", sessionNum)
}
s, err := mgo.DialWithTimeout(url, dialTimeout)
if err != nil {
return nil, err
}
s.SetSyncTimeout(timeout)
s.SetSocketTimeout(timeout)
c := new(DialContext)
c.sessions = make(SessionHeap, sessionNum)
c.sessions[0] = &Session{s, 0, 0}
for i := 1; i < sessionNum; i++ {
c.sessions[i] = &Session{s.New(), 0, i}
}
heap.Init(&c.sessions)
return c, nil
}
//goroutine safe
func (c *DialContext) Close() {
c.Lock()
for _, s := range c.sessions {
s.Close()
if s.ref != 0 {
fmt.Println("error session ref = ", s.ref)
}
}
c.Unlock()
}
//goroutine safe
func (c *DialContext) Ref() *Session {
c.Lock()
defer c.Unlock()
s := c.sessions[0]
if s.ref == 0 {
s.Refresh()
}
s.ref++
heap.Fix(&c.sessions, 0)
return s
}
//goroutine safe
func (c *DialContext) UnRef(s *Session) {
c.Lock()
defer c.Unlock()
s.ref--
heap.Fix(&c.sessions, s.index)
}
//goroutine safe
func (c *DialContext) EnsureCounter(db, collection, id string) error {
s := c.Ref()
defer c.UnRef(s)
err := s.DB(db).C(collection).Insert(bson.M{
"_id": id,
"seq": 0,
})
if mgo.IsDup(err) {
return nil
} else {
return err
}
}
//goroutine safe
func (c *DialContext) NextSeq(db, collection, id string) (int, error) {
s := c.Ref()
defer c.UnRef(s)
var res struct {
Seq int
}
_, err := s.DB(db).C(collection).FindId(id).Apply(mgo.Change{
Update: bson.M{"$inc": bson.M{"seq": 2}},
ReturnNew: true,
}, &res)
return res.Seq, err
}
// goroutine safe
//创建唯一索引
func (c *DialContext) EnsureIndex(db, collection string, key []string) error {
s := c.Ref()
defer c.UnRef(s)
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: false,
Sparse: true,
})
}
// goroutine safe
//添加唯一索引
func (c *DialContext) EnsureUniqueIndex(db, collection string, key []string) error {
s := c.Ref()
defer c.UnRef(s)
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: true,
Sparse: true,
})
}
// goroutine safe
//得到索引
func (c *DialContext) GetIndexs(db, collection string) ([]mgo.Index, error) {
s := c.Ref()
defer c.UnRef(s)
return s.DB(db).C(collection).Indexes()
}
//goroutine safe
//存库
func (c *DialContext) SendData(db, collection, id string, data interface{}) error {
s := c.Ref()
defer c.UnRef(s)
tbl := s.DB(db).C(collection)
_, err := tbl.UpsertId(id, data)
if err != nil {
return err
}
return nil
}
func (c *DialContext) Find(db, collection string, data bson.M) (result interface{}, err error) {
s := c.Ref()
defer c.UnRef(s)
err = s.DB(db).C(collection).Find(data).One(&result)
return
}
func (c *DialContext) Remove(db, collection string, query bson.M) error {
s := c.Ref()
defer c.UnRef(s)
err := s.DB(db).C(collection).Remove(query)
return err
}
func getNewID() string {
objid := bson.NewObjectId()
id := fmt.Sprintf(`%x`, string(objid))
return id
}
|
package main
//1716. 计算力扣银行的钱
//Hercy 想要为购买第一辆车存钱。他 每天 都往力扣银行里存钱。
//
//最开始,他在周一的时候存入 1块钱。从周二到周日,他每天都比前一天多存入 1块钱。在接下来每一个周一,他都会比 前一个周一 多存入 1块钱。
//
//给你n,请你返回在第 n天结束的时候他在力扣银行总共存了多少块钱。
//
//
//
//示例 1:
//
//输入:n = 4
//输出:10
//解释:第 4 天后,总额为 1 + 2 + 3 + 4 = 10 。
//示例 2:
//
//输入:n = 10
//输出:37
//解释:第 10 天后,总额为 (1 + 2 + 3 + 4 + 5 + 6 + 7) + (2 + 3 + 4) = 37 。注意到第二个星期一,Hercy 存入 2 块钱。
//示例 3:
//
//输入:n = 20
//输出:96
//解释:第 20 天后,总额为 (1 + 2 + 3 + 4 + 5 + 6 + 7) + (2 + 3 + 4 + 5 + 6 + 7 + 8) + (3 + 4 + 5 + 6 + 7 + 8) = 96 。
//
//
//提示:
//
//1 <= n <= 1000
func totalMoney(n int) int {
weeks := n / 7
firstWeek := (1 + 7) * 7 / 2
lastWeek := (weeks + weeks + 6) * 7 / 2
res := (firstWeek + lastWeek) * weeks / 2
otherDay := n % 7
firstDay := weeks + 1
lastDay := firstDay + otherDay - 1
res += (firstDay + lastDay) * otherDay / 2
return res
}
|
package server
import (
"encoding/json"
"mime"
"net/http"
"os"
"path"
"path/filepath"
"github.com/facette/facette/pkg/library"
"github.com/facette/facette/pkg/logger"
"github.com/facette/facette/thirdparty/github.com/fatih/set"
)
func (server *Server) serveError(writer http.ResponseWriter, status int) {
err := server.execTemplate(
writer,
status,
struct {
URLPrefix string
ReadOnly bool
Status int
}{
URLPrefix: server.Config.URLPrefix,
ReadOnly: server.Config.ReadOnly,
Status: status,
},
path.Join(server.Config.BaseDir, "template", "layout.html"),
path.Join(server.Config.BaseDir, "template", "error.html"),
)
if err != nil {
logger.Log(logger.LevelError, "server", "%s", err)
server.serveResponse(writer, nil, status)
}
}
func (server *Server) serveReload(writer http.ResponseWriter, request *http.Request) {
if request.Method != "GET" && request.Method != "HEAD" {
server.serveResponse(writer, serverResponse{mesgMethodNotAllowed}, http.StatusMethodNotAllowed)
return
} else if server.Config.ReadOnly {
server.serveResponse(writer, serverResponse{mesgReadOnlyMode}, http.StatusForbidden)
return
}
// Reload resources without reloading configuration
server.Reload(false)
server.serveResponse(writer, nil, http.StatusOK)
}
func (server *Server) serveResponse(writer http.ResponseWriter, data interface{}, status int) {
var (
err error
output []byte
)
if data != nil {
output, err = json.Marshal(data)
if err != nil {
server.serveResponse(writer, nil, http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
}
writer.WriteHeader(status)
if len(output) > 0 {
writer.Write(output)
writer.Write([]byte("\n"))
}
}
func (server *Server) serveStatic(writer http.ResponseWriter, request *http.Request) {
mimeType := mime.TypeByExtension(filepath.Ext(request.URL.Path))
if mimeType == "" {
mimeType = "application/octet-stream"
}
writer.Header().Set("Content-Type", mimeType)
// Handle static files
http.ServeFile(writer, request, path.Join(server.Config.BaseDir, request.URL.Path))
}
func (server *Server) serveStats(writer http.ResponseWriter, request *http.Request) {
if request.Method != "GET" && request.Method != "HEAD" {
server.serveResponse(writer, serverResponse{mesgMethodNotAllowed}, http.StatusMethodNotAllowed)
return
}
server.serveResponse(writer, server.getStats(writer, request), http.StatusOK)
}
func (server *Server) serveWait(writer http.ResponseWriter, request *http.Request) {
err := server.execTemplate(
writer,
http.StatusServiceUnavailable,
struct {
URLPrefix string
ReadOnly bool
}{
URLPrefix: server.Config.URLPrefix,
ReadOnly: server.Config.ReadOnly,
},
path.Join(server.Config.BaseDir, "template", "layout.html"),
path.Join(server.Config.BaseDir, "template", "wait.html"),
)
if err != nil {
if os.IsNotExist(err) {
server.serveError(writer, http.StatusNotFound)
} else {
logger.Log(logger.LevelError, "server", "%s", err)
server.serveError(writer, http.StatusInternalServerError)
}
}
}
func (server *Server) getStats(writer http.ResponseWriter, request *http.Request) *statsResponse {
sourceSet := set.New(set.ThreadSafe)
metricSet := set.New(set.ThreadSafe)
for _, origin := range server.Catalog.Origins {
for key, source := range origin.Sources {
sourceSet.Add(key)
for key := range source.Metrics {
metricSet.Add(key)
}
}
}
sourceGroupsCount := 0
metricGroupsCount := 0
for _, group := range server.Library.Groups {
if group.Type == library.LibraryItemSourceGroup {
sourceGroupsCount++
} else {
metricGroupsCount++
}
}
return &statsResponse{
Origins: len(server.Catalog.Origins),
Sources: sourceSet.Size(),
Metrics: metricSet.Size(),
Graphs: len(server.Library.Graphs),
Collections: len(server.Library.Collections),
SourceGroups: sourceGroupsCount,
MetricGroups: metricGroupsCount,
}
}
|
// 遍历操作字符串,统计 UD 和 LR 的出现次数
// U, up++
// D, up--
// R, right++
// L, right--
// 所有操作后 up 和 right 都为 0 则返回 true,否则 false
package judgecircle
func judgeCircle(moves string) bool {
var up, right int
for _, v := range moves {
switch v {
case 85: // 'U'
up++
case 68: // 'D'
up--
case 82: // 'R'
right++
case 76: // 'L'
right--
default:
return false
}
}
return up == 0 && right == 0
}
|
package rdb
import (
"testing"
)
func TestCounter_GetLargestEntries(t *testing.T) {
//e := &Entry{
// Key: "RELATIONSFOLLOWERIDS6420000664",
// Bytes: 1,
// Type: "sortedset",
// NumOfElem: 1,
// LenOfLargestElem:1,
// FieldOfLargestElem: "test",
//}
c := NewCounter()
decoder := NewDecoder()
c.Count(decoder.Entries)
}
|
package roles
import (
"testing"
"github.com/stretchr/testify/suite"
)
func TestHelpersSuite(t *testing.T) {
suite.Run(t, new(HelpersTestSuite))
}
type HelpersTestSuite struct {
suite.Suite
}
func (suite *HelpersTestSuite) TestParseRight() {
const someRight1 Right = 1
const someRight2 Right = 2
const someRight3 Right = 3
const someRight4 Right = 4
set1 := ParseRights("a,1,3")
suite.Equal([]Right{someRight1, someRight3}, set1)
set2 := ParseRights("admin")
suite.Equal([]Right{}, set2)
set3 := ParseRights("")
suite.Equal([]Right{}, set3)
}
func (suite *HelpersTestSuite) TestSerializeRight() {
const someRight1 Right = 1
const someRight2 Right = 2
const someRight3 Right = 3
const someRight4 Right = 4
set1 := SerializeRights(someRight1, someRight3)
suite.Equal("1,3", set1)
set2 := SerializeRights()
suite.Equal("", set2)
}
|
package internal
import (
"context"
"errors"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/fsnotify/fsnotify"
"github.com/stretchr/testify/assert"
)
func Test_Dev_Escort_New(t *testing.T) {
t.Parallel()
assert.NotNil(t, newEscort(config{}))
}
func Test_Dev_Escort_Init(t *testing.T) {
t.Parallel()
at := assert.New(t)
e := getEscort()
at.Nil(e.init())
defer func() {
_ = os.Remove(e.binPath)
}()
at.Contains(e.root, "internal")
at.NotEmpty(e.binPath)
}
func Test_Dev_Escort_Run(t *testing.T) {
at := assert.New(t)
setupCmd()
defer teardownCmd()
e := getEscort()
var err error
e.root, err = ioutil.TempDir("", "test_run")
at.Nil(err)
defer func() {
_ = os.RemoveAll(e.root)
}()
e.sig = make(chan os.Signal, 1)
go func() {
time.Sleep(time.Millisecond * 500)
e.sig <- syscall.SIGINT
}()
at.Nil(e.run())
}
func Test_Dev_Escort_RunBin(t *testing.T) {
setupCmd(errFlag)
defer teardownCmd()
e := getEscort()
e.bin = exec.Command("go", "version")
_, err := e.bin.CombinedOutput()
assert.Nil(t, err)
rc := ioutil.NopCloser(strings.NewReader(""))
e.stdoutPipe = rc
e.stderrPipe = rc
e.runBin()
}
func Test_Dev_Escort_WatchingPipes(t *testing.T) {
t.Parallel()
e := getEscort()
e.bin = exec.Command("go", "version")
_, err := e.bin.CombinedOutput()
assert.Nil(t, err)
e.watchingPipes()
}
func Test_Dev_Escort_WatchingBin(t *testing.T) {
t.Parallel()
var count int32
e := getEscort()
e.delay = time.Millisecond * 50
e.hitCh = make(chan struct{})
e.hitFunc = func() { atomic.AddInt32(&count, 1) }
go e.watchingBin()
e.hitCh <- struct{}{}
e.hitCh <- struct{}{}
time.Sleep(time.Millisecond * 75)
e.hitCh <- struct{}{}
time.Sleep(time.Millisecond * 75)
assert.Equal(t, int32(2), atomic.LoadInt32(&count))
}
func Test_Dev_Escort_WatchingFiles(t *testing.T) {
t.Parallel()
var (
at = assert.New(t)
err error
)
e := getEscort()
e.hitCh = make(chan struct{})
e.w, err = fsnotify.NewWatcher()
at.Nil(err)
e.extensions = []string{"go"}
e.watcherEvents = make(chan fsnotify.Event)
e.watcherErrors = make(chan error)
e.root, err = ioutil.TempDir("", "test_watch")
at.Nil(err)
defer func() {
_ = os.RemoveAll(e.root)
}()
_, err = ioutil.TempDir(e.root, ".git")
at.Nil(err)
newDir, err := ioutil.TempDir(e.root, "")
at.Nil(err)
ignoredFile, err := ioutil.TempFile(e.root, "")
at.Nil(err)
defer func() { at.Nil(ignoredFile.Close()) }()
e.excludeFiles = []string{filepath.Base(ignoredFile.Name())}
f, err := ioutil.TempFile(e.root, "*.go")
at.Nil(err)
defer func() { at.Nil(f.Close()) }()
name := f.Name()
go e.watchingFiles()
e.watcherErrors <- errors.New("fake error")
e.watcherEvents <- fsnotify.Event{Name: name, Op: fsnotify.Chmod}
e.watcherEvents <- fsnotify.Event{Name: name, Op: fsnotify.Remove}
e.watcherEvents <- fsnotify.Event{Name: name + "non", Op: fsnotify.Create}
e.watcherEvents <- fsnotify.Event{Name: newDir, Op: fsnotify.Create}
select {
case <-e.hitCh:
case <-time.NewTimer(time.Second).C:
at.Fail("should hit")
}
e.watcherEvents <- fsnotify.Event{Name: ignoredFile.Name(), Op: fsnotify.Create}
e.watcherEvents <- fsnotify.Event{Name: name, Op: fsnotify.Create}
e.terminate()
select {
case <-e.hitCh:
case <-time.NewTimer(time.Second).C:
at.Fail("should hit")
}
}
func Test_Dev_Escort_WalkForWatcher(t *testing.T) {
t.Parallel()
e := getEscort()
e.walkForWatcher(" ")
}
func Test_Dev_Escort_HitExtensions(t *testing.T) {
t.Parallel()
at := assert.New(t)
e := getEscort()
e.extensions = []string{"go"}
at.False(e.hitExtension(""))
at.True(e.hitExtension(".go"))
at.False(e.hitExtension(".js"))
}
func Test_Dev_Escort_IgnoredDirs(t *testing.T) {
t.Parallel()
at := assert.New(t)
e := getEscort()
e.excludeDirs = []string{"a"}
at.True(e.ignoredDirs(".git"))
at.True(e.ignoredDirs("a"))
at.False(e.ignoredDirs("b"))
}
func Test_Dev_Escort_IgnoredFiles(t *testing.T) {
t.Parallel()
at := assert.New(t)
e := getEscort()
e.excludeFiles = []string{"a"}
at.True(e.ignoredFiles("a"))
at.False(e.ignoredFiles("b"))
}
func Test_Dev_IsRemoved(t *testing.T) {
t.Parallel()
cases := []struct {
fsnotify.Op
bool
}{
{fsnotify.Create, false},
{fsnotify.Write, false},
{fsnotify.Remove, true},
{fsnotify.Rename, false},
{fsnotify.Chmod, false},
}
for _, tc := range cases {
t.Run(tc.Op.String(), func(t *testing.T) {
assert.Equal(t, tc.bool, isRemoved(tc.Op))
})
}
}
func Test_Dev_IsCreated(t *testing.T) {
t.Parallel()
cases := []struct {
fsnotify.Op
bool
}{
{fsnotify.Create, true},
{fsnotify.Write, false},
{fsnotify.Remove, false},
{fsnotify.Rename, false},
{fsnotify.Chmod, false},
}
for _, tc := range cases {
t.Run(tc.Op.String(), func(t *testing.T) {
assert.Equal(t, tc.bool, isCreated(tc.Op))
})
}
}
func Test_Dev_IsChmoded(t *testing.T) {
t.Parallel()
cases := []struct {
fsnotify.Op
bool
}{
{fsnotify.Create, false},
{fsnotify.Write, false},
{fsnotify.Remove, false},
{fsnotify.Rename, false},
{fsnotify.Chmod, true},
}
for _, tc := range cases {
t.Run(tc.Op.String(), func(t *testing.T) {
assert.Equal(t, tc.bool, isChmoded(tc.Op))
})
}
}
func getEscort() *escort {
c, t := context.WithCancel(context.Background())
return &escort{
config: config{
root: ".",
target: ".",
},
ctx: c,
terminate: t,
}
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tcpip
import (
"bytes"
"fmt"
"io"
"net"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestLimitedWriter_Write(t *testing.T) {
var b bytes.Buffer
l := LimitedWriter{
W: &b,
N: 5,
}
if n, err := l.Write([]byte{0, 1, 2}); err != nil {
t.Errorf("got l.Write(3/5) = (_, %s), want nil", err)
} else if n != 3 {
t.Errorf("got l.Write(3/5) = (%d, _), want 3", n)
}
if n, err := l.Write([]byte{3, 4, 5}); err != io.ErrShortWrite {
t.Errorf("got l.Write(3/2) = (_, %s), want io.ErrShortWrite", err)
} else if n != 2 {
t.Errorf("got l.Write(3/2) = (%d, _), want 2", n)
}
if l.N != 0 {
t.Errorf("got l.N = %d, want 0", l.N)
}
l.N = 1
if n, err := l.Write([]byte{5}); err != nil {
t.Errorf("got l.Write(1/1) = (_, %s), want nil", err)
} else if n != 1 {
t.Errorf("got l.Write(1/1) = (%d, _), want 1", n)
}
if diff := cmp.Diff(b.Bytes(), []byte{0, 1, 2, 3, 4, 5}); diff != "" {
t.Errorf("%T wrote incorrect data: (-want +got):\n%s", l, diff)
}
}
func TestSubnetContains(t *testing.T) {
tests := []struct {
s string
m string
a string
want bool
}{
{"\xa0", "\xf0", "\x90", false},
{"\xa0", "\xf0", "\xa0", true},
{"\xa0", "\xf0", "\xa5", true},
{"\xa0", "\xf0", "\xaf", true},
{"\xa0", "\xf0", "\xb0", false},
{"\xa0", "\xf0", "", false},
{"\xc2\x80", "\xff\xf0", "\xc2\x80", true},
{"\xc2\x80", "\xff\xf0", "\xc2\x00", false},
{"\xc2\x00", "\xff\xf0", "\xc2\x00", true},
{"\xc2\x00", "\xff\xf0", "\xc2\x80", false},
}
for _, tt := range tests {
s, err := NewSubnet(AddrFromSlice(padTo4(tt.s)), MaskFromBytes(padTo4(tt.m)))
if err != nil {
t.Errorf("NewSubnet(%v, %v) = %v", tt.s, tt.m, err)
continue
}
if got := s.Contains(AddrFromSlice(padTo4(tt.a))); got != tt.want {
t.Errorf("Subnet(%v).Contains(%v) = %v, want %v", s, tt.a, got, tt.want)
}
}
}
func TestSubnetContainsDifferentLength(t *testing.T) {
s, err := NewSubnet(AddrFromSlice([]byte("\xa0\x00\x00\x00")), MaskFromBytes([]byte("\xf0\x00\x00\x00")))
if err != nil {
t.Fatalf("NewSubnet(a0::, f0::) = %v", err)
}
if got := s.Contains(AddrFrom16Slice([]byte("\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"))); got != false {
t.Fatalf("Subnet(%v).Contains(a0::) = %v, want %v", s, got, false)
}
}
func TestSubnetBits(t *testing.T) {
tests := []struct {
a string
want1 int
want0 int
}{
{"\x00", 0, 32},
{"\x36", 0, 32},
{"\x5c", 0, 32},
{"\x5c\x5c", 0, 32},
{"\x5c\x36", 0, 32},
{"\x36\x5c", 0, 32},
{"\x36\x36", 0, 32},
{"\xff", 8, 24},
{"\xff\xff", 16, 16},
}
for _, tt := range tests {
s := &Subnet{mask: MaskFromBytes(padTo4(tt.a))}
got1, got0 := s.Bits()
if got1 != tt.want1 || got0 != tt.want0 {
t.Errorf("Subnet{mask: %x}.Bits() = %d, %d, want %d, %d", tt.a, got1, got0, tt.want1, tt.want0)
}
}
}
func TestSubnetPrefix(t *testing.T) {
tests := []struct {
a string
want int
}{
{"\x00", 0},
{"\x00\x00", 0},
{"\x36", 0},
{"\x86", 1},
{"\xc5", 2},
{"\xff\x00", 8},
{"\xff\x36", 8},
{"\xff\x8c", 9},
{"\xff\xc8", 10},
{"\xff", 8},
{"\xff\xff", 16},
}
for _, tt := range tests {
s := &Subnet{mask: MaskFromBytes(padTo4(tt.a))}
got := s.Prefix()
if got != tt.want {
t.Errorf("Subnet{mask: %x}.Bits() = %d want %d", tt.a, got, tt.want)
}
}
}
func TestSubnetCreation(t *testing.T) {
tests := []struct {
a string
m string
want error
}{
{"\xa0", "\xf0", nil},
{"\xaa", "\xf0", errSubnetAddressMasked},
{"", "", nil},
}
for _, tt := range tests {
if _, err := NewSubnet(AddrFromSlice(padTo4(tt.a)), MaskFromBytes(padTo4(tt.m))); err != tt.want {
t.Errorf("NewSubnet(%v, %v) = %v, want %v", tt.a, tt.m, err, tt.want)
}
}
}
func TestSubnetCreationDifferentLength(t *testing.T) {
addr := []byte("\xa0\xa0\x00\x00")
mask := []byte("\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
if _, err := NewSubnet(AddrFromSlice(addr), MaskFromBytes(mask)); err != errSubnetLengthMismatch {
t.Errorf("NewSubnet(%v, %v) = %v, want %v", addr, mask, err, errSubnetLengthMismatch)
}
}
func TestAddressString(t *testing.T) {
for _, want := range []string{
// Taken from stdlib.
"2001:db8::123:12:1",
"2001:db8::1",
"2001:db8:0:1:0:1:0:1",
"2001:db8:1:0:1:0:1:0",
"2001::1:0:0:1",
"2001:db8:0:0:1::",
"2001:db8::1:0:0:1",
"2001:db8::a:b:c:d",
// Leading zeros.
"::1",
// Trailing zeros.
"8::",
// No zeros.
"1:1:1:1:1:1:1:1",
// Longer sequence is after other zeros, but not at the end.
"1:0:0:1::1",
// Longer sequence is at the beginning, shorter sequence is at
// the end.
"::1:1:1:0:0",
// Longer sequence is not at the beginning, shorter sequence is
// at the end.
"1::1:1:0:0",
// Longer sequence is at the beginning, shorter sequence is not
// at the end.
"::1:1:0:0:1",
// Neither sequence is at an end, longer is after shorter.
"1:0:0:1::1",
// Shorter sequence is at the beginning, longer sequence is not
// at the end.
"0:0:1:1::1",
// Shorter sequence is at the beginning, longer sequence is at
// the end.
"0:0:1:1:1::",
// Short sequences at both ends, longer one in the middle.
"0:1:1::1:1:0",
// Short sequences at both ends, longer one in the middle.
"0:1::1:0:0",
// Short sequences at both ends, longer one in the middle.
"0:0:1::1:0",
// Longer sequence surrounded by shorter sequences, but none at
// the end.
"1:0:1::1:0:1",
} {
addr := AddrFromSlice(net.ParseIP(want))
if got := addr.String(); got != want {
t.Errorf("Address(%x).String() = '%s', want = '%s'", addr, got, want)
}
}
}
func TestAddressWithPrefixSubnet(t *testing.T) {
tests := []struct {
addr string
prefixLen int
subnetAddr string
subnetMask string
}{
{"\xaa\x55\x33\x42", -1, "\x00\x00\x00\x00", "\x00\x00\x00\x00"},
{"\xaa\x55\x33\x42", 0, "\x00\x00\x00\x00", "\x00\x00\x00\x00"},
{"\xaa\x55\x33\x42", 1, "\x80\x00\x00\x00", "\x80\x00\x00\x00"},
{"\xaa\x55\x33\x42", 7, "\xaa\x00\x00\x00", "\xfe\x00\x00\x00"},
{"\xaa\x55\x33\x42", 8, "\xaa\x00\x00\x00", "\xff\x00\x00\x00"},
{"\xaa\x55\x33\x42", 24, "\xaa\x55\x33\x00", "\xff\xff\xff\x00"},
{"\xaa\x55\x33\x42", 31, "\xaa\x55\x33\x42", "\xff\xff\xff\xfe"},
{"\xaa\x55\x33\x42", 32, "\xaa\x55\x33\x42", "\xff\xff\xff\xff"},
{"\xaa\x55\x33\x42", 33, "\xaa\x55\x33\x42", "\xff\xff\xff\xff"},
}
for _, tt := range tests {
ap := AddressWithPrefix{Address: AddrFromSlice([]byte(tt.addr)), PrefixLen: tt.prefixLen}
gotSubnet := ap.Subnet()
wantSubnet, err := NewSubnet(AddrFromSlice([]byte(tt.subnetAddr)), MaskFromBytes([]byte(tt.subnetMask)))
if err != nil {
t.Errorf("NewSubnet(%q, %q) failed: %s", tt.subnetAddr, tt.subnetMask, err)
continue
}
if gotSubnet != wantSubnet {
t.Errorf("got subnet = %q, want = %q", gotSubnet, wantSubnet)
}
}
}
func TestAddressUnspecified(t *testing.T) {
tests := []struct {
addr string
unspecified bool
}{
{
addr: "",
unspecified: true,
},
{
addr: "\x00",
unspecified: true,
},
{
addr: "\x01",
unspecified: false,
},
{
addr: "\x00\x00",
unspecified: true,
},
{
addr: "\x01\x00",
unspecified: false,
},
{
addr: "\x00\x01",
unspecified: false,
},
{
addr: "\x01\x01",
unspecified: false,
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("addr=%s", test.addr), func(t *testing.T) {
if got := AddrFromSlice(padTo4(test.addr)).Unspecified(); got != test.unspecified {
t.Fatalf("got addr.Unspecified() = %t, want = %t", got, test.unspecified)
}
})
}
}
func TestAddressMatchingPrefix(t *testing.T) {
tests := []struct {
addrA string
addrB string
prefix uint8
}{
{
addrA: "\x01\x01",
addrB: "\x01\x01",
prefix: 32,
},
{
addrA: "\x01\x01",
addrB: "\x01\x00",
prefix: 15,
},
{
addrA: "\x01\x01",
addrB: "\x81\x00",
prefix: 0,
},
{
addrA: "\x01\x01",
addrB: "\x01\x80",
prefix: 8,
},
{
addrA: "\x01\x01",
addrB: "\x02\x80",
prefix: 6,
},
}
for _, test := range tests {
if got := AddrFromSlice(padTo4(test.addrA)).MatchingPrefix(AddrFromSlice(padTo4(test.addrB))); got != test.prefix {
t.Errorf("got (%s).MatchingPrefix(%s) = %d, want = %d", test.addrA, test.addrB, got, test.prefix)
}
}
}
func padTo4(partial string) []byte {
for len(partial) < 4 {
partial += "\x00"
}
return []byte(partial)
}
|
package config
import (
"code.google.com/p/gcfg"
golog "github.com/op/go-logging"
)
var log = golog.MustGetLogger("main")
var Config = struct {
Templates struct {
Path string
}
Database struct {
Url string
}
Admins struct {
Name []string
}
}{}
func LoadConfig(filename string) {
err := gcfg.ReadFileInto(&Config, filename)
if err != nil {
log.Error("Error parsing config file: %v", err)
}
}
|
package picodi_test
import (
"errors"
"fmt"
"math/rand"
"testing"
"github.com/quintans/picodi"
"github.com/stretchr/testify/require"
)
type Namer interface {
Name() string
}
type Foo struct {
name string
}
func (foo Foo) Name() string {
return foo.name
}
type Bar struct {
Foo Foo `wire:"foo"`
Foo2 Foo `wire:""`
Other Namer `wire:"foo"`
inner *Foo `wire:"fooptr"`
inner2 Foo `wire:"foo"`
Fun Foo `wire:"foofn"`
Fun2 Foo `wire:"foofn,transient"` // a new instance will be created
FooPtr *Foo `wire:"fooptr"`
afterWire bool
}
func (b *Bar) SetInner(v *Foo) {
b.inner = v
}
func (b *Bar) AfterWire() (picodi.Clean, error) {
// after wire called
b.afterWire = true
return nil, nil
}
func TestStructWire(t *testing.T) {
counter := 0
di := picodi.New()
di.NamedProvider("fooptr", &Foo{"Foo"})
di.NamedProvider("foo", Foo{"Foo"})
di.NamedProvider("foofn", func() Foo {
counter++
return Foo{fmt.Sprintf("FooFn-%d", counter)}
})
di.Providers(Foo{"Foo"})
var bar = Bar{}
_, err := di.DryRun(&bar)
require.NoError(t, err)
_, err = di.Wire(&bar)
require.NoError(t, err)
require.True(t, bar.afterWire, "AfterWire() was not called")
if bar.Foo.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for Foo, got", bar.Foo.Name())
}
if bar.FooPtr.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for FooPtr, got", bar.FooPtr.Name())
}
if bar.Other.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for Other, got", bar.Other.Name())
}
if bar.Foo2.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for Foo2, got", bar.Foo2.Name())
}
if bar.inner.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for inner, got", bar.inner.Name())
}
if bar.inner2.Name() != "Foo" {
t.Fatal("Expected \"Foo\" for inner2, got", bar.inner.Name())
}
if bar.Fun.Name() != "FooFn-1" {
t.Fatal("Expected \"FooFn-1\" for Fun, got", bar.Fun.Name())
}
require.Equal(t, &bar.Foo, &bar.Foo2, "Injected instances are not singletons")
// Fun2, marked as transient, will have different instance
require.NotEqual(t, bar.Fun, bar.Fun2, "Injected instances are not transients")
}
type Faulty struct {
bar Bar `wire:"missing"`
}
func TestErrorWire(t *testing.T) {
var pico = picodi.New()
_, err := pico.Wire(&Faulty{})
require.Contains(t, err.Error(), "no provider was found for name")
}
type Message string
func NewMessage() Message {
return Message("Hi there!")
}
type Greeter interface {
Greet() Message
}
type GreeterImpl struct {
Message Message
Chaos int
}
func (g GreeterImpl) Greet() Message {
return g.Message
}
// NewGreeter returns an implementation of Greeter
func NewGreeter(m Message) (*GreeterImpl, picodi.Clean, error) {
g := &GreeterImpl{
Message: m,
Chaos: rand.Intn(100),
}
return g, func() {
if g.Chaos <= -1 {
// should never be called
g.Chaos--
} else {
g.Chaos = -1
}
}, nil
}
type Event struct {
Greeter Greeter
}
// NewEvent receives a Greeter interface
func NewEvent(g Greeter) Event {
return Event{Greeter: g}
}
func (e Event) Start() string {
msg := e.Greeter.Greet()
return string(msg)
}
func TestWireByName(t *testing.T) {
var di = picodi.New()
err := di.NamedProvider("event", NewEvent)
require.NoError(t, err)
err = di.Providers(NewMessage, NewGreeter)
require.NoError(t, err)
var clean1 picodi.Clean
e, clean1, err := di.Resolve("event")
require.NoError(t, err)
event1 := e.(Event)
actual := event1.Start()
require.Equal(t, "Hi there!", actual)
// second resolve should return the same instance
var clean2 picodi.Clean
e, clean2, err = di.Resolve("event")
require.NoError(t, err)
event2 := e.(Event)
require.NoError(t, err)
require.NotNil(t, event2.Greeter)
if event1.Greeter != event1.Greeter {
t.Fatal("Injected instances are not singletons")
}
g := event1.Greeter.(*GreeterImpl)
require.NotEqual(t, -1, g.Chaos)
clean1()
require.Equal(t, -1, g.Chaos)
// clean1 == clean2 and calling a second time will have no effect
clean2()
require.Equal(t, -1, g.Chaos)
}
func TestWireFuncByName(t *testing.T) {
di := picodi.New()
di.NamedProviders(picodi.NamedProviders{
"message1": "hello",
"message2": "world",
"message3": 1, // this will not inject
})
var m1, m2 string
size := 0
fn := func(m map[picodi.Named]string) {
m1 = m["message1"]
m2 = m["message2"]
size = len(m)
}
_, err := di.DryRun(fn)
require.NoError(t, err)
// only strings will passed to factory
clean, err := di.Wire(fn)
require.NoError(t, err)
require.Nil(t, clean)
require.Equal(t, m1, "hello")
require.Equal(t, m2, "world")
require.Equal(t, size, 2)
}
func TestWire(t *testing.T) {
var di = picodi.New()
di.NamedProviders(picodi.NamedProviders{
"event": NewEvent,
})
di.Providers(NewMessage, NewGreeter)
e, _, err := di.Resolve("event")
require.NoError(t, err)
event1a := e.(Event)
actual := event1a.Start()
require.Equal(t, "Hi there!", actual)
e, _, err = di.Resolve("event")
require.NoError(t, err)
event1b := e.(Event)
if event1a.Greeter != event1b.Greeter {
t.Fatal("Injected instances are not singletons")
}
di.Providers(NewEvent)
e, _, err = di.GetByType(Event{})
require.NoError(t, err)
event2a := e.(Event)
require.NotNil(t, event2a.Greeter)
e, _, err = di.GetByType(Event{})
require.NoError(t, err)
event2b := e.(Event)
require.NotNil(t, event2b.Greeter)
if event2a.Greeter != event2b.Greeter {
t.Fatal("Injected instances are not singletons")
}
event3a := Event{}
_, err = di.Wire(func(g Greeter) {
event3a.Greeter = g
})
require.NoError(t, err)
require.NotNil(t, event3a.Greeter)
event3b := Event{}
_, err = di.Wire(func(g Greeter) {
event3b.Greeter = g
})
require.NoError(t, err)
require.NotNil(t, event3b.Greeter)
if event3a.Greeter != event3b.Greeter {
t.Fatal("Injected instances are not singletons")
}
if event1a.Greeter != event2a.Greeter {
t.Fatal("Injected instances are not singletons")
}
if event2a.Greeter != event3a.Greeter {
t.Fatal("Injected instances are not singletons")
}
}
func TestDryRunWithError(t *testing.T) {
var di = picodi.New()
err := di.Providers(NewGreeter, NewEvent)
require.NoError(t, err)
_, err = di.DryRun(func(event Event) {})
require.Contains(t, err.Error(), "no provider was found for type")
}
var errGrumpy = errors.New("could not create event: I am grumpy")
func NewGrumpyEvent(g Greeter) (Event, error) {
return Event{}, errGrumpy
}
func TestWireWithError(t *testing.T) {
var di = picodi.New()
err := di.NamedProvider("event", NewGrumpyEvent)
require.NoError(t, err)
err = di.Providers(NewMessage, NewGreeter)
require.NoError(t, err)
_, _, err = di.Resolve("event") // will wire if not already
require.Error(t, err)
require.True(t, errors.Is(err, errGrumpy), err)
}
|
package design
import (
. "github.com/goadesign/goa/design"
. "github.com/goadesign/goa/design/apidsl"
)
var _ = Resource("counters", func() {
BasePath("/counters")
DefaultMedia(Counter)
Security(APIKey)
Action("list", func() {
Routing(
GET(""),
)
Description("Retrieve all Upstreams.")
Response(OK, State)
Response(NotFound)
})
Action("show", func() {
Routing(
GET("/:name"),
)
Description("Get counter by name.")
Params(func() {
Param("name", String, "Counter name")
})
Response(OK)
Response(NotFound)
})
Action("create", func() {
Routing(
POST(""),
)
Description("Get counter by name.")
Params(func() {
Param("name", String, "Counter name.")
Required("name")
})
Response(OK, NoContent)
Response(NotFound)
})
Action("delete", func() {
Routing(
DELETE("/:name"),
)
Description("Get counter by name.")
Params(func() {
Param("name", String, "Counter name")
Required("name")
})
Response(OK, NoContent)
Response(NotFound)
})
})
var _ = Resource("inc", func() {
Parent("counters")
BasePath("/inc")
DefaultMedia(Counter)
Security(APIKey)
Action("increment", func() {
Routing(
POST(""),
)
Description("Increment a counter.")
Params(func() {
Param("name", String, "Counter name.")
Param("value", Integer, "Value to add to counter.")
Required("name", "value")
})
BindTo("counter", "&app.Counter", func() {
BindParam("Name", "Name")
BindParam("Value", "Value")
})
Response(OK, "text/plain")
Response(NotFound)
})
})
var _ = Resource("dec", func() {
Parent("counters")
BasePath("/dec")
DefaultMedia(Counter)
Security(APIKey)
Action("decrement", func() {
Routing(
POST(""),
)
Description("Decrement a counter.")
Params(func() {
Param("name", String, "Counter name.")
Param("value", Integer, "Value to add to counter.")
Required("name", "value")
})
BindTo("counter", "&app.Counter", func() {
BindParam("Name", "Name")
BindParam("Value", "Value")
})
Response(OK, "text/plain")
Response(NotFound)
})
})
|
package conf
import (
"fmt"
"github.com/spf13/viper"
)
func init() {
viper.SetConfigName("config")
viper.AddConfigPath(".")
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Error reading config file: %s\n", err.Error()))
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package heapprofiler
import (
"context"
"fmt"
"math"
"os"
"strconv"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/server/dumpstore"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
var (
maxProfiles = settings.RegisterIntSetting(
"server.mem_profile.max_profiles",
"maximum number of profiles to be kept per ramp-up of memory usage. "+
"A ramp-up is defined as a sequence of profiles with increasing usage.",
5,
)
maxCombinedFileSize = settings.RegisterByteSizeSetting(
"server.mem_profile.total_dump_size_limit",
"maximum combined disk size of preserved memory profiles",
128<<20, // 128MiB
)
)
func init() {
s := settings.RegisterIntSetting(
"server.heap_profile.max_profiles", "use server.mem_profile.max_profiles instead", 5)
s.SetRetired()
b := settings.RegisterByteSizeSetting(
"server.heap_profile.total_dump_size_limit",
"use server.mem_profile.total_dump_size_limit instead",
128<<20, // 128MiB
)
b.SetRetired()
}
// profileStore represents the directory where heap profiles are stored.
// It supports automatic garbage collection of old profiles.
type profileStore struct {
*dumpstore.DumpStore
prefix string
suffix string
st *cluster.Settings
}
func newProfileStore(
store *dumpstore.DumpStore, prefix, suffix string, st *cluster.Settings,
) *profileStore {
s := &profileStore{DumpStore: store, prefix: prefix, suffix: suffix, st: st}
return s
}
func (s *profileStore) gcProfiles(ctx context.Context, now time.Time) {
s.GC(ctx, now, s)
}
func (s *profileStore) makeNewFileName(timestamp time.Time, curHeap int64) string {
// We place the timestamp immediately after the (immutable) file
// prefix to ensure that a directory listing sort also sorts the
// profiles in timestamp order.
fileName := fmt.Sprintf("%s.%s.%d%s",
s.prefix, timestamp.Format(timestampFormat), curHeap, s.suffix)
return s.GetFullPath(fileName)
}
// PreFilter is part of the dumpstore.Dumper interface.
func (s *profileStore) PreFilter(
ctx context.Context, files []os.FileInfo, cleanupFn func(fileName string) error,
) (preserved map[int]bool, _ error) {
maxP := maxProfiles.Get(&s.st.SV)
preserved = s.cleanupLastRampup(ctx, files, maxP, cleanupFn)
return
}
// CheckOwnsFile is part of the dumpstore.Dumper interface.
func (s *profileStore) CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool {
ok, _, _ := s.parseFileName(ctx, fi.Name())
return ok
}
// cleanupLastRampup parses the filenames in files to detect the
// last ramp-up (sequence of increasing heap usage). If there
// are more than maxD entries in the last ramp-up, the fn closure
// is called for each of them.
//
// files is assumed to be sorted in chronological order already,
// oldest entry first.
//
// The preserved return value contains the indexes in files
// corresponding to the last ramp-up that were not passed to fn.
func (s *profileStore) cleanupLastRampup(
ctx context.Context, files []os.FileInfo, maxP int64, fn func(string) error,
) (preserved map[int]bool) {
preserved = make(map[int]bool)
curMaxHeap := uint64(math.MaxUint64)
numFiles := int64(0)
for i := len(files) - 1; i >= 0; i-- {
ok, _, curHeap := s.parseFileName(ctx, files[i].Name())
if !ok {
continue
}
if curHeap > curMaxHeap {
// This is the end of a ramp-up sequence. We're done.
break
}
// Keep the currently seen heap for the next iteration.
curMaxHeap = curHeap
// We saw one file.
numFiles++
// Did we encounter the maximum?
if numFiles > maxP {
// Yes: clean this up.
if err := fn(files[i].Name()); err != nil {
log.Warningf(ctx, "%v", err)
}
} else {
// No: we preserve this file.
preserved[i] = true
}
}
return preserved
}
// parseFileName retrieves the components of a file name generated by makeNewFileName().
func (s *profileStore) parseFileName(
ctx context.Context, fileName string,
) (ok bool, timestamp time.Time, heapUsage uint64) {
parts := strings.Split(fileName, ".")
numParts := 4 /* prefix, date/time, milliseconds, heap usage */
if len(parts) < numParts || parts[0] != s.prefix {
// Not for us. Silently ignore.
return
}
if len(parts[2]) < 3 {
// At some point in the v20.2 cycle the timestamps were generated
// with format .999, which caused the trailing zeroes to be
// omitted. During parsing, they must be present, so re-add them
// here.
//
// TODO(knz): Remove this code in v21.1.
parts[2] += "000"[:3-len(parts[2])]
}
maybeTimestamp := parts[1] + "." + parts[2]
var err error
timestamp, err = time.Parse(timestampFormat, maybeTimestamp)
if err != nil {
log.Warningf(ctx, "%v", errors.Wrapf(err, "%s", fileName))
return
}
heapUsage, err = strconv.ParseUint(parts[3], 10, 64)
if err != nil {
log.Warningf(ctx, "%v", errors.Wrapf(err, "%s", fileName))
return
}
ok = true
return
}
|
package db
import "github.com/jmoiron/sqlx"
const (
GET_STATUS_VISIT = "SELECT id, status_visit FROM status_visits"
)
type TStatusVisit struct {
ID int64 `db:"id"`
StatusVisit string `db:"status_visit"`
}
func GetStatusVisits(database *sqlx.DB) ([]TStatusVisit, error) {
var statusVisit = []TStatusVisit{}
err := database.Select(&statusVisit, GET_STATUS_VISIT)
return statusVisit, err
}
|
package server
import (
"W2ONLINE/AssessmentROUND2/bottlehtml/btm/database_set"
"fmt"
"html/template"
"math/rand"
"net/http"
"strings"
"time"
)
//type MyMux struct {
//}
type MyForm struct {
NAME string
DATE string
MESSAGE string
}
//func (p *MyMux) ServeHTTP(w http.ResponseWriter, r *http.Request){
// if r.URL.Path == "/"{
// SayHelloName(w,r)
// return
// }
// if r.URL.Path == "/about"{
// About(w,r)
// return
// }
// if r.URL.Path == "/login"{
// Login(w,r)
// return
// }
// http.NotFound(w,r)
// return
//}
func SayHelloName(w http.ResponseWriter,r *http.Request){
//解析url传递的参数
r.ParseForm() //解析参数, 默认是不会解析的
fmt.Println(r.Form) //这些是服务器端的打印信息
fmt.Println("path", r.URL.Path) // r.URL类里面有关于URL的相关方法和属性
fmt.Println("scheme", r.URL.Scheme)
fmt.Println(r.Form["url_long"])
for k, v := range r.Form {
fmt.Println("key:", k)
fmt.Println("val:", strings.Join(v, ""))
}
fmt.Fprintf(w, "欢迎来到漂流瓶!") //输出到客户端的信息
}
func About(w http.ResponseWriter,r *http.Request){
fmt.Fprintf(w,"The Test Of Server.\n")
}
func Login(w http.ResponseWriter,r *http.Request){
fmt.Println("method:",r.Method)
if r.Method == "GET" {
t, _ := template.ParseFiles("bottle.html")
t.Execute(w, nil)
} else {
r.ParseForm()
Bottle := r.Form["bottle"]
Date := r.Form["date"]
Author := r.Form["author"]
Message :=r.Form["message"]
fmt.Println("Bottle:",Bottle)
fmt.Println("Date:",Date)
fmt.Println("Author:",Author)
fmt.Println("Message:",Message)
for i,v :=range Bottle{
fmt.Println(i)
fmt.Fprintf(w,"Bottle:%v\n",v)
}
for k,n :=range Date {
fmt.Println(k)
fmt.Fprintf(w, "Date:%v\n", n)
}
for i,v :=range Author{
fmt.Println(i)
fmt.Fprintf(w,"Name:%v\n",v)
}
for j,b :=range Message{
fmt.Println(j)
fmt.Fprintf(w,"Message:%v\n",b)
}
//database_set.ADD_DB("2019-11-11","LYK","TEST")
database_set.ADD_DB(Date[0],Author[0],Message[0])
}
}
func Query(w http.ResponseWriter,r *http.Request){
database_set.Query_DB(w)
}
func Delete(w http.ResponseWriter,r *http.Request){
rand.Seed(time.Now().UnixNano())
database_set.DEL_DB(rand.Intn(15),w)
}
|
package main
import "os"
import "fmt"
func main() {
const (
path = "/sys"
)
fd, err := os.Open(path)
if err != nil {
panic(err.Error())
}
fileinfo, err := fd.Readdir(0)
for _, fi := range fileinfo {
fmt.Println(fi.Name())
}
}
|
package acceptance
import (
"context"
"os"
"testing"
. "github.com/databrickslabs/terraform-provider-databricks/access"
"github.com/databrickslabs/terraform-provider-databricks/identity"
"github.com/databrickslabs/terraform-provider-databricks/common"
"github.com/databrickslabs/terraform-provider-databricks/internal/acceptance"
"github.com/databrickslabs/terraform-provider-databricks/qa"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAccSecretAclResource(t *testing.T) {
if _, ok := os.LookupEnv("CLOUD_ENV"); !ok {
t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set")
}
acceptance.AccTest(t, resource.TestCase{
Steps: []resource.TestStep{
{
Config: qa.EnvironmentTemplate(t, `
resource "databricks_group" "ds" {
display_name = "data-scientists-{var.RANDOM}"
}
resource "databricks_secret_scope" "app" {
name = "app-{var.RANDOM}"
}
resource "databricks_secret_acl" "ds_can_read_app" {
principal = databricks_group.ds.display_name
permission = "READ"
scope = databricks_secret_scope.app.name
}`),
Check: func(s *terraform.State) error {
client := common.CommonEnvironmentClient()
ctx := context.Background()
usersAPI := identity.NewUsersAPI(ctx, client)
me, err := usersAPI.Me()
require.NoError(t, err)
secretACLAPI := NewSecretAclsAPI(ctx, client)
scope := s.RootModule().Resources["databricks_secret_scope.app"].Primary.ID
acls, err := secretACLAPI.List(scope)
require.NoError(t, err)
assert.Equal(t, 2, len(acls))
m := map[string]string{}
for _, acl := range acls {
m[acl.Principal] = string(acl.Permission)
}
group := s.RootModule().Resources["databricks_group.ds"].Primary.Attributes["display_name"]
require.Contains(t, m, group)
assert.Equal(t, "READ", m[group])
assert.Equal(t, "MANAGE", m[me.UserName])
return nil
},
},
},
})
}
func TestAccSecretAclResourceDefaultPrincipal(t *testing.T) {
acceptance.AccTest(t, resource.TestCase{
Steps: []resource.TestStep{
{
Config: qa.EnvironmentTemplate(t, `
resource "databricks_secret_scope" "app" {
name = "app-{var.RANDOM}"
initial_manage_principal = "users"
}
resource "databricks_secret_acl" "ds_can_read_app" {
principal = "users"
permission = "READ"
scope = databricks_secret_scope.app.name
}`),
Check: acceptance.ResourceCheck("databricks_secret_scope.app",
func(ctx context.Context, client *common.DatabricksClient, id string) error {
secretACLAPI := NewSecretAclsAPI(ctx, client)
acls, err := secretACLAPI.List(id)
require.NoError(t, err)
assert.Equal(t, 1, len(acls))
assert.Equal(t, "users", acls[0].Principal)
assert.Equal(t, "READ", string(acls[0].Permission))
return nil
}),
},
},
})
}
|
package builder
// this function is helper to build a house
type director struct {
builder HouseBuilderIFace
}
func NewDirector(b HouseBuilderIFace) director {
return director{
builder: b,
}
}
func (d *director) BuildHouse() House {
d.builder.SetWindowsType()
d.builder.SetFloorType()
d.builder.SetNumOfDoors()
d.builder.SetNumOfWindows()
d.builder.SetSwimmingPool()
d.builder.SetWindowsType()
return d.builder.CreateHouse()
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package magic
import (
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"encoding/binary"
"errors"
"io"
"os"
"path"
"strings"
"github.com/xi2/xz"
)
type FileType int
type CompressionType int
const (
FileTypeUnknown FileType = iota
FileTypeRPM
FileTypeDEB
FileTypePGP
FileTypeJAR
FileTypePKCS7
FileTypePECOFF
FileTypeMSI
FileTypeCAB
FileTypeAppManifest
FileTypeCAT
FileTypeAPPX
FileTypeVSIX
FileTypeXAP
FileTypeAPK
FileTypeMachO
FileTypeMachOFat
FileTypeIPA
FileTypeXAR
)
const (
CompressedNone CompressionType = iota
CompressedGzip
CompressedXz
)
func hasPrefix(br *bufio.Reader, blob []byte) bool {
return atPosition(br, blob, 0)
}
func contains(br *bufio.Reader, blob []byte, n int) bool {
d, _ := br.Peek(n)
if len(d) < len(blob) {
return false
}
return bytes.Contains(d, blob)
}
func atPosition(br *bufio.Reader, blob []byte, n int) bool {
l := n + len(blob)
d, _ := br.Peek(l)
if len(d) < l {
return false
}
return bytes.Equal(d[n:], blob)
}
// Detect a handful of package and signature file types based on the first few
// bytes of the file contents.
func Detect(r io.Reader) FileType {
br := bufio.NewReader(r)
switch {
case hasPrefix(br, []byte{0xed, 0xab, 0xee, 0xdb}):
return FileTypeRPM
case hasPrefix(br, []byte("!<arch>\ndebian")):
return FileTypeDEB
case hasPrefix(br, []byte("-----BEGIN PGP")):
return FileTypePGP
case contains(br, []byte{0x06, 0x09, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x0A, 0x01}, 256):
// OID certTrustList
return FileTypeCAT
case contains(br, []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02}, 256):
// OID signedData
return FileTypePKCS7
case isTar(br):
return detectTar(br)
case hasPrefix(br, []byte("MZ")):
if blob, _ := br.Peek(0x3e); len(blob) == 0x3e {
reloc := binary.LittleEndian.Uint16(blob[0x3c:0x3e])
if blob, err := br.Peek(int(reloc) + 4); err == nil {
if bytes.Equal(blob[reloc:reloc+4], []byte("PE\x00\x00")) {
return FileTypePECOFF
}
}
}
case hasPrefix(br, []byte{0xd0, 0xcf}):
return FileTypeMSI
case hasPrefix(br, []byte("MSCF")):
return FileTypeCAB
case contains(br, []byte("<assembly"), 256),
contains(br, []byte(":assembly"), 256):
return FileTypeAppManifest
case hasPrefix(br, []byte{0xcf, 0xfa, 0xed, 0xfe}), hasPrefix(br, []byte{0xce, 0xfa, 0xed, 0xfe}):
return FileTypeMachO
case hasPrefix(br, []byte{0xca, 0xfe, 0xba, 0xbe}):
return FileTypeMachOFat
case hasPrefix(br, []byte{0x78, 0x61, 0x72, 0x21}):
return FileTypeXAR
case hasPrefix(br, []byte{0x89}), hasPrefix(br, []byte{0xc2}), hasPrefix(br, []byte{0xc4}):
return FileTypePGP
}
return FileTypeUnknown
}
func DetectCompressed(f *os.File) (FileType, CompressionType) {
br := bufio.NewReader(f)
ftype := FileTypeUnknown
switch {
case hasPrefix(br, []byte{0x1f, 0x8b}):
zr, err := gzip.NewReader(br)
if err == nil {
zbr := bufio.NewReader(zr)
if isTar(zbr) {
ftype = detectTar(zbr)
}
}
return ftype, CompressedGzip
case hasPrefix(br, []byte("\xfd7zXZ\x00")):
zr, err := xz.NewReader(br, 0)
if err == nil {
zbr := bufio.NewReader(zr)
if isTar(zbr) {
ftype = detectTar(zbr)
}
}
return ftype, CompressedXz
case hasPrefix(br, []byte{0x50, 0x4b, 0x03, 0x04}):
return detectZip(f), CompressedNone
}
return Detect(br), CompressedNone
}
func Decompress(r io.Reader, ctype CompressionType) (io.Reader, error) {
switch ctype {
case CompressedNone:
return r, nil
case CompressedGzip:
return gzip.NewReader(r)
case CompressedXz:
return xz.NewReader(r, 0)
default:
return nil, errors.New("invalid compression type")
}
}
func isTar(br *bufio.Reader) bool {
return atPosition(br, []byte("ustar"), 257)
}
func detectTar(r io.Reader) FileType {
return FileTypeUnknown
}
func detectZip(f *os.File) FileType {
size, err := f.Seek(0, io.SeekEnd)
if err != nil {
return FileTypeUnknown
}
inz, err := zip.NewReader(f, size)
if err != nil {
return FileTypeUnknown
}
var isJar bool
for _, zf := range inz.File {
name := zf.Name
if strings.HasPrefix(name, "/") {
name = "." + name
}
name = path.Clean(name)
switch name {
case "AndroidManifest.xml":
return FileTypeAPK
case "AppManifest.xaml":
return FileTypeXAP
case "AppxManifest.xml", "AppxMetadata/AppxBundleManifest.xml":
return FileTypeAPPX
case "extension.vsixmanifest":
return FileTypeVSIX
case "META-INF/MANIFEST.MF":
// APKs are also JARs so save this for last
isJar = true
}
switch {
case strings.HasSuffix(name, ".app/Info.plist"):
return FileTypeIPA
case strings.HasSuffix(name, ".app/Contents/Info.plist"):
return FileTypeIPA
}
}
if isJar {
return FileTypeJAR
}
return FileTypeUnknown
}
|
package handler
type Web struct{}
|
package odoo
import (
"fmt"
)
// AccountFiscalPositionTemplate represents account.fiscal.position.template model.
type AccountFiscalPositionTemplate struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AccountIds *Relation `xmlrpc:"account_ids,omptempty"`
AutoApply *Bool `xmlrpc:"auto_apply,omptempty"`
ChartTemplateId *Many2One `xmlrpc:"chart_template_id,omptempty"`
CountryGroupId *Many2One `xmlrpc:"country_group_id,omptempty"`
CountryId *Many2One `xmlrpc:"country_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
Note *String `xmlrpc:"note,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
StateIds *Relation `xmlrpc:"state_ids,omptempty"`
TaxIds *Relation `xmlrpc:"tax_ids,omptempty"`
VatRequired *Bool `xmlrpc:"vat_required,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
ZipFrom *Int `xmlrpc:"zip_from,omptempty"`
ZipTo *Int `xmlrpc:"zip_to,omptempty"`
}
// AccountFiscalPositionTemplates represents array of account.fiscal.position.template model.
type AccountFiscalPositionTemplates []AccountFiscalPositionTemplate
// AccountFiscalPositionTemplateModel is the odoo model name.
const AccountFiscalPositionTemplateModel = "account.fiscal.position.template"
// Many2One convert AccountFiscalPositionTemplate to *Many2One.
func (afpt *AccountFiscalPositionTemplate) Many2One() *Many2One {
return NewMany2One(afpt.Id.Get(), "")
}
// CreateAccountFiscalPositionTemplate creates a new account.fiscal.position.template model and returns its id.
func (c *Client) CreateAccountFiscalPositionTemplate(afpt *AccountFiscalPositionTemplate) (int64, error) {
ids, err := c.CreateAccountFiscalPositionTemplates([]*AccountFiscalPositionTemplate{afpt})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountFiscalPositionTemplate creates a new account.fiscal.position.template model and returns its id.
func (c *Client) CreateAccountFiscalPositionTemplates(afpts []*AccountFiscalPositionTemplate) ([]int64, error) {
var vv []interface{}
for _, v := range afpts {
vv = append(vv, v)
}
return c.Create(AccountFiscalPositionTemplateModel, vv)
}
// UpdateAccountFiscalPositionTemplate updates an existing account.fiscal.position.template record.
func (c *Client) UpdateAccountFiscalPositionTemplate(afpt *AccountFiscalPositionTemplate) error {
return c.UpdateAccountFiscalPositionTemplates([]int64{afpt.Id.Get()}, afpt)
}
// UpdateAccountFiscalPositionTemplates updates existing account.fiscal.position.template records.
// All records (represented by ids) will be updated by afpt values.
func (c *Client) UpdateAccountFiscalPositionTemplates(ids []int64, afpt *AccountFiscalPositionTemplate) error {
return c.Update(AccountFiscalPositionTemplateModel, ids, afpt)
}
// DeleteAccountFiscalPositionTemplate deletes an existing account.fiscal.position.template record.
func (c *Client) DeleteAccountFiscalPositionTemplate(id int64) error {
return c.DeleteAccountFiscalPositionTemplates([]int64{id})
}
// DeleteAccountFiscalPositionTemplates deletes existing account.fiscal.position.template records.
func (c *Client) DeleteAccountFiscalPositionTemplates(ids []int64) error {
return c.Delete(AccountFiscalPositionTemplateModel, ids)
}
// GetAccountFiscalPositionTemplate gets account.fiscal.position.template existing record.
func (c *Client) GetAccountFiscalPositionTemplate(id int64) (*AccountFiscalPositionTemplate, error) {
afpts, err := c.GetAccountFiscalPositionTemplates([]int64{id})
if err != nil {
return nil, err
}
if afpts != nil && len(*afpts) > 0 {
return &((*afpts)[0]), nil
}
return nil, fmt.Errorf("id %v of account.fiscal.position.template not found", id)
}
// GetAccountFiscalPositionTemplates gets account.fiscal.position.template existing records.
func (c *Client) GetAccountFiscalPositionTemplates(ids []int64) (*AccountFiscalPositionTemplates, error) {
afpts := &AccountFiscalPositionTemplates{}
if err := c.Read(AccountFiscalPositionTemplateModel, ids, nil, afpts); err != nil {
return nil, err
}
return afpts, nil
}
// FindAccountFiscalPositionTemplate finds account.fiscal.position.template record by querying it with criteria.
func (c *Client) FindAccountFiscalPositionTemplate(criteria *Criteria) (*AccountFiscalPositionTemplate, error) {
afpts := &AccountFiscalPositionTemplates{}
if err := c.SearchRead(AccountFiscalPositionTemplateModel, criteria, NewOptions().Limit(1), afpts); err != nil {
return nil, err
}
if afpts != nil && len(*afpts) > 0 {
return &((*afpts)[0]), nil
}
return nil, fmt.Errorf("account.fiscal.position.template was not found with criteria %v", criteria)
}
// FindAccountFiscalPositionTemplates finds account.fiscal.position.template records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountFiscalPositionTemplates(criteria *Criteria, options *Options) (*AccountFiscalPositionTemplates, error) {
afpts := &AccountFiscalPositionTemplates{}
if err := c.SearchRead(AccountFiscalPositionTemplateModel, criteria, options, afpts); err != nil {
return nil, err
}
return afpts, nil
}
// FindAccountFiscalPositionTemplateIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountFiscalPositionTemplateIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountFiscalPositionTemplateModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountFiscalPositionTemplateId finds record id by querying it with criteria.
func (c *Client) FindAccountFiscalPositionTemplateId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountFiscalPositionTemplateModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.fiscal.position.template was not found with criteria %v and options %v", criteria, options)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.