text stringlengths 11 4.05M |
|---|
package coinpayments
import "net/url"
//getBasicInfoResponse is the api response of a "get_basic_info" call
type getBasicInfoResponse struct {
Username string `json:"username"`
MerchantID string `json:"merchant_id"`
Email string `json:"email"`
PublicName string `json:"public_name"`
}
//GetBasicInfo calls the "get_basic_info" command
func (c *Client) GetBasicInfo(optionals ...OptionalValue) (*getBasicInfoResponse, error) {
values := &url.Values{}
addOptionals(optionals, values)
var resp struct {
errResponse
Result *getBasicInfoResponse `json:"result"`
}
if err := c.call("get_basic_info", values, &resp); err != nil {
return nil, err
}
return resp.Result, nil
}
//ratesResponse is the api response of a "rates" call
type ratesResponse map[string]struct {
IsFiat int `json:"is_fiat"`
RateBTC string `json:"rate_btc"`
LastUpdate string `json:"last_update"`
TxFee string `json:"tx_fee"`
Status string `json:"status"`
Name string `json:"name"`
Confirms string `json:"confirms"`
Capabilities []string `json:"capabilities"`
Accepted int `json:"accepted"`
}
//Rates calls the "rates" command
func (c *Client) Rates(optionals ...OptionalValue) (*ratesResponse, error) {
values := &url.Values{}
addOptionals(optionals, values)
var resp struct {
errResponse
Result *ratesResponse `json:"result"`
}
if err := c.call("rates", values, &resp); err != nil {
return nil, err
}
return resp.Result, nil
}
|
package machinery
import "k8s.io/client-go/tools/clientcmd/api"
func (d *Diff) Apply(existing, incoming *api.Config, handler ConflictResolver) error {
for _, item := range d.Items {
// isComplex := (item.ChangeType & ChangeTypeComplex) != 0
switch {
case (item.ChangeType & ChangeTypeNew) != 0:
clusterName := item.AffectedIncoming.Cluster
authInfoName := item.AffectedIncoming.AuthInfo
contextName := item.AffectedIncoming.Name
if (item.Complex & ComplexDiffRenameRequired) != 0 {
if _, exists := existing.Clusters[clusterName]; exists {
clusterName = handler.Rename("Cluster", clusterName, func(s string) error {
if _, exists := existing.Clusters[s]; exists {
return ErrItemAlreadyExists
}
return nil
})
}
if _, exists := existing.AuthInfos[authInfoName]; exists {
authInfoName = handler.Rename("AuthInfo", authInfoName, func(s string) error {
if _, exists := existing.AuthInfos[s]; exists {
return ErrItemAlreadyExists
}
return nil
})
}
if _, exists := existing.Contexts[contextName]; exists {
contextName = handler.Rename("Context", contextName, func(s string) error {
if _, exists := existing.Contexts[s]; exists {
return ErrItemAlreadyExists
}
return nil
})
}
}
existing.Clusters[clusterName] = incoming.Clusters[item.AffectedIncoming.Cluster].DeepCopy()
existing.AuthInfos[authInfoName] = incoming.AuthInfos[item.AffectedIncoming.AuthInfo].DeepCopy()
existing.Contexts[contextName] = &api.Context{
Cluster: clusterName,
AuthInfo: authInfoName,
}
case (item.ChangeType & ChangeTypeRename) != 0:
existingContextName := item.AffectedExisting.Name
existingClusterName := item.AffectedExisting.Cluster
existingAuthInfoName := item.AffectedExisting.AuthInfo
incomingContextName := item.AffectedIncoming.Name
incomingClusterName := item.AffectedIncoming.Cluster
incomingAuthInfoName := item.AffectedIncoming.AuthInfo
if existingClusterName != incomingClusterName {
existing.Clusters[incomingClusterName] = existing.Clusters[existingClusterName]
delete(existing.Clusters, existingClusterName)
existing.Contexts[existingContextName].Cluster = incomingClusterName
}
if existingAuthInfoName != incomingAuthInfoName {
existing.AuthInfos[incomingAuthInfoName] = existing.AuthInfos[existingAuthInfoName]
delete(existing.AuthInfos, existingAuthInfoName)
existing.Contexts[existingContextName].AuthInfo = incomingAuthInfoName
}
if existingContextName != incomingContextName {
existing.Contexts[incomingContextName] = existing.Contexts[existingContextName]
delete(existing.Contexts, existingContextName)
}
case (item.ChangeType & ChangeTypeDelete) != 0:
clusterName := item.AffectedExisting.Cluster
authInfoName := item.AffectedExisting.AuthInfo
contextName := item.AffectedExisting.Name
delete(existing.Clusters, clusterName)
delete(existing.AuthInfos, authInfoName)
delete(existing.Contexts, contextName)
case (item.ChangeType & ChangeTypeReplace) != 0:
existingContextName := item.AffectedExisting.Name
existingClusterName := item.AffectedExisting.Cluster
existingAuthInfoName := item.AffectedExisting.AuthInfo
delete(existing.Clusters, existingClusterName)
delete(existing.AuthInfos, existingAuthInfoName)
delete(existing.Contexts, existingContextName)
existing.Clusters[item.AffectedIncoming.Cluster] =
incoming.Clusters[item.AffectedIncoming.Cluster].DeepCopy()
existing.AuthInfos[item.AffectedIncoming.AuthInfo] =
incoming.AuthInfos[item.AffectedIncoming.AuthInfo].DeepCopy()
existing.Contexts[item.AffectedIncoming.Name] =
incoming.Contexts[item.AffectedIncoming.Name].DeepCopy()
case (item.ChangeType & ChangeTypeModify) != 0:
cmplx := item.Complex
for cmplx != ComplexDiffTypeNone {
switch {
case (cmplx & ComplexDiffServerChanged) != 0:
existing.Clusters[item.AffectedExisting.Cluster].Server =
incoming.Clusters[item.AffectedIncoming.Cluster].Server
cmplx &^= ComplexDiffServerChanged
case (cmplx & ComplexDiffUserAuthChanged) != 0:
existing.AuthInfos[item.AffectedExisting.AuthInfo] =
incoming.AuthInfos[item.AffectedIncoming.AuthInfo].DeepCopy()
cmplx &^= ComplexDiffUserAuthChanged
case (cmplx & ComplexDiffClusterCAChanged) != 0:
existing.Clusters[item.AffectedExisting.Cluster].CertificateAuthorityData =
incoming.Clusters[item.AffectedIncoming.Cluster].CertificateAuthorityData
cmplx &^= ComplexDiffClusterCAChanged
case (cmplx & ComplexDiffPreferencesChanged) != 0:
// No-op
cmplx &^= ComplexDiffPreferencesChanged
case (cmplx & ComplexDiffRenameRequired) != 0:
// This isn't used in ChangeTypeModify
panic("bug: ComplexDiffRenameRequired used in ChangeTypeModify")
}
}
}
}
return nil
}
|
package field
import (
"encoding/binary"
"fmt"
"io"
"time"
)
// StartTime is the date/time the track started playing in Serato.
type StartTime struct {
header *Header
data []byte
}
// Value returns the start time.
func (f *StartTime) Value() time.Time {
ts := binary.BigEndian.Uint32(f.data)
return time.Unix(int64(ts), 0).UTC()
}
func (f *StartTime) String() string {
return fmt.Sprintf("%v", f.Value())
}
// NewStartTimeField returns an initialised StartTime, using the given field
// header.
func NewStartTimeField(header *Header, r io.Reader) (*StartTime, error) {
if header.Identifier != starttimeID {
return nil, ErrUnexpectedIdentifier
}
data := make([]byte, header.Length)
if err := binary.Read(r, binary.BigEndian, &data); err != nil {
return nil, err
}
return &StartTime{
header: header,
data: data[:],
}, nil
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestStart(t *testing.T) {
gps := gpsTracker{}
assert.NotNil(t, gps)
}
|
package mongodb
import (
"context"
"log"
"os"
"time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type MongoDB struct {
conn *mongo.Client
}
func New() *MongoDB {
uri := os.Getenv("MONGO_URI")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
conn, err := mongo.Connect(
ctx,
options.Client().ApplyURI(uri),
)
if err != nil {
log.Fatal(err)
}
return &MongoDB{
conn: conn,
}
}
func (db *MongoDB) StoreTokens(refresh, access string, expires float64) error {
log.Println(refresh, access, expires)
return nil
}
|
package api
import "errors"
var userIDValue = 0
var userDataStore = []user{}
//UserDao exposes the methods to be able to store everything in the database
type UserDao interface {
isEmailIDUnique(email string) (bool, error)
saveUser(u *UserSignUpRequest)
}
//InMemoryUserDao handles the user populationg mechanism in memory
type InMemoryUserDao struct {
}
//make InMemoryUserDao implement userDao
func (dao *InMemoryUserDao) isEmailIDUnique(email string) (bool, error) {
for _, user := range userDataStore {
if user.Email == email {
return false, errors.New("This Email Id has already been taken")
}
}
return true, nil
}
func (dao *InMemoryUserDao) saveUser(u *UserSignUpRequest) {
userIDValue = userIDValue + 1
newUser := user{UserID: userIDValue, UserName: u.UserName, Email: u.Email, Password: u.Password}
userDataStore = append(userDataStore, newUser)
}
|
package main
import (
"encoding/json"
)
const configFile = ".deltarc"
// Config represents user-provided options (via the file specified by configFile)
type Config struct {
Context *int `json:"context"`
ShowEmpty *bool `json:"showEmpty"`
ShouldCollapse *bool `json:"shouldCollapse"`
Highlight *bool `json:"highlight"`
UnmodifiedOpacity *float32 `json:"unmodifiedOpacity"`
DiffFontSize *int32 `json:"diffFontSize"`
}
func loadConfig() (config Config, err error) {
cfg := `{
"context": 9,
"showEmpty": true,
"shouldCollapse": false,
"highlight": false,
"unmodifiedOpacity": 0.8,
"diffFontSize": 12
}
`
err = json.Unmarshal([]byte(cfg), &config)
return
}
|
// A file will only be deleted from disk once all hard links are removed.
package main
import (
"fmt"
"log"
"os"
)
func main() {
// Create a hard link. We will create two file names that point to the same
// contents, changing the contents of one will change the other.
// Deleteing/renaming one will not affect the other.
err := os.Link("original.txt", "original_also.txt")
if err != nil {
log.Fatal(err)
}
// Create a symlink.
err = os.Symlink("original.txt", "original_sym.txt")
if err != nil {
log.Fatal(err)
}
// Lstat will return file info/symlink.
fileInfo, err := os.Lstat("original_sym.txt")
if err != nil {
log.Fatal(err)
}
fmt.Printf("Link info: %#v\n", fileInfo)
// Change ownership of the symlink and not the file it points to.
err = os.Lchown("original_sym.txt", os.Getuid(), os.Getgid())
if err != nil {
log.Fatal(err)
}
}
|
package dev
import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/open-horizon/anax/cli/cliutils"
cliexchange "github.com/open-horizon/anax/cli/exchange"
"github.com/open-horizon/anax/cli/register"
"github.com/open-horizon/anax/cutil"
"github.com/open-horizon/anax/events"
"github.com/open-horizon/anax/exchange"
"github.com/open-horizon/anax/persistence"
"github.com/open-horizon/anax/policy"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"strings"
)
const DEPENDENCY_COMMAND = "dependency"
const DEPENDENCY_FETCH_COMMAND = "fetch"
const DEPENDENCY_LIST_COMMAND = "list"
const DEPENDENCY_REMOVE_COMMAND = "remove"
// This function assumes that 1 of specRef or url is set, and that org is set. Everything else is optional.
func createLogMessage(specRef string, url string, org string, version string, arch string) string {
// Create the right log message.
target := fmt.Sprintf("specRef: %v, org: %v", specRef, org)
if url != "" {
target = fmt.Sprintf("url: %v, org: %v", url, org)
}
if version != "" {
target += fmt.Sprintf(", version: %v", version)
}
if arch != "" {
target += fmt.Sprintf(", arch: %v", arch)
}
return target
}
// This is the entry point for the hzn dev dependency fetch command.
func DependencyFetch(homeDirectory string, project string, specRef string, url string, org string, version string, arch string, userCreds string, keyFiles []string, userInputFile string) {
// Check input parameters for correctness.
dir, err := verifyFetchInput(homeDirectory, project, specRef, url, org, version, arch, userCreds)
if err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'dependency %v' %v", DEPENDENCY_FETCH_COMMAND, err)
}
target := project
// Go get the dependency metadata.
if project != "" {
if err := fetchLocalProjectDependency(dir, project, userInputFile); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'dependency %v' %v", DEPENDENCY_FETCH_COMMAND, err)
}
} else {
if err := fetchExchangeProjectDependency(dir, specRef, url, org, version, arch, userCreds, keyFiles, userInputFile); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'dependency %v' %v", DEPENDENCY_FETCH_COMMAND, err)
}
// Create the right log message.
target = createLogMessage(specRef, url, org, version, arch)
}
fmt.Printf("New dependency on %v created.\n", target)
}
// This is the entry point for the hzn dev dependency list command.
func DependencyList(homeDirectory string) {
dir, err := setup(homeDirectory, true, false, "")
if err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_LIST_COMMAND, err)
}
// Get the service definition, so that we can look at the service dependencies.
serviceDef, sderr := GetServiceDefinition(dir, SERVICE_DEFINITION_FILE)
if sderr != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_LIST_COMMAND, sderr)
}
// Now get all the dependencies
deps, err := GetServiceDependencies(dir, serviceDef.RequiredServices)
if err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_LIST_COMMAND, err)
}
marshalListOut(deps)
}
func marshalListOut(deps interface{}) {
jsonBytes, err := json.MarshalIndent(deps, "", " ")
if err != nil {
cliutils.Fatal(cliutils.JSON_PARSING_ERROR, "'%v %v' unable to create json object from dependencies, %v", DEPENDENCY_COMMAND, DEPENDENCY_LIST_COMMAND, err)
}
fmt.Printf("%v\n", string(jsonBytes))
}
// This is the entry point for the hzn dev dependency remove command.
func DependencyRemove(homeDirectory string, specRef string, url string, version string, arch string) {
// Check input parameters for correctness.
dir, err := verifyRemoveInput(homeDirectory, specRef, url, version, arch)
if err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'dependency %v' %v", DEPENDENCY_REMOVE_COMMAND, err)
}
var theDep cliexchange.AbstractServiceFile
var depFileInfo os.FileInfo
uniqueDep := true
// Grab the dependency files from the filesystem.
deps, err := GetDependencyFiles(dir, SERVICE_DEFINITION_FILE)
if err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_REMOVE_COMMAND, err)
}
// Make sure we can uniquely identify the dependency to be removed.
var tempDep cliexchange.AbstractServiceFile
for _, fileInfo := range deps {
tempDep = nil
if dep, err := GetServiceDefinition(path.Join(dir, DEFAULT_DEPENDENCY_DIR), fileInfo.Name()); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_REMOVE_COMMAND, err)
} else {
tempDep = dep
}
if (tempDep.GetURL() == specRef || tempDep.GetURL() == url) && (version == "" || (version != "" && tempDep.GetVersion() == version)) && (arch == "" || (arch != "" && tempDep.GetArch() == arch)) {
if theDep != nil {
uniqueDep = false
break
}
theDep = tempDep
depFileInfo = fileInfo
}
}
// If we did not find the dependency, then return the error. If the input did not uniquely identify the dependency, then return
// the error. Otherwise remove the dependency.
if theDep == nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'dependency %v' dependency not found.", DEPENDENCY_REMOVE_COMMAND)
} else if !uniqueDep {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'dependency %v' dependency %v is not unique. Please specify version and/or architecture to uniquely identify the dependency.", DEPENDENCY_REMOVE_COMMAND, specRef)
} else {
cliutils.Verbose("Found dependency: %v", depFileInfo.Name())
// We know which dependency to remove, so remove it.
if err := os.Remove(path.Join(dir, DEFAULT_DEPENDENCY_DIR, depFileInfo.Name())); err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'dependency %v' dependency %v could not be removed, error: %v", DEPENDENCY_REMOVE_COMMAND, depFileInfo.Name(), err)
}
// Update the service definition with the new dependencies.
if err := RemoveServiceDependency(dir, theDep); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'dependency %v' error updating project definition: %v", DEPENDENCY_REMOVE_COMMAND, err)
}
// Update the default userinputs removing any configured variables.
if err := RemoveConfiguredVariables(dir, theDep); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, "'dependency %v' error updating userinputs: %v", DEPENDENCY_REMOVE_COMMAND, err)
}
}
// Create the right log message.
fmt.Printf("Removed dependency %v.\n", createLogMessage(specRef, url, theDep.GetOrg(), version, arch))
}
// Returns an os.FileInfo object for each dependency file. This function assumes the caller has
// determined the exact location of the files.
func GetDependencyFiles(directory string, fileSuffix string) ([]os.FileInfo, error) {
res := make([]os.FileInfo, 0, 10)
depPath := path.Join(directory, DEFAULT_DEPENDENCY_DIR)
if files, err := ioutil.ReadDir(depPath); err != nil {
return res, errors.New(fmt.Sprintf("unable to get list of dependency files in %v, error: %v", depPath, err))
} else {
for _, fileInfo := range files {
if strings.HasSuffix(fileInfo.Name(), fileSuffix) && !fileInfo.IsDir() {
res = append(res, fileInfo)
}
}
}
return res, nil
}
func GetServiceDependencies(directory string, deps []exchange.ServiceDependency) ([]*cliexchange.ServiceFile, error) {
res := make([]*cliexchange.ServiceFile, 0, 10)
depFiles, err := GetDependencyFiles(directory, SERVICE_DEFINITION_FILE)
if err != nil {
return res, err
}
for _, fileInfo := range depFiles {
d, err := GetServiceDefinition(path.Join(directory, DEFAULT_DEPENDENCY_DIR), fileInfo.Name())
if err != nil {
return res, err
} else if d.IsDependent(deps) {
res = append(res, d)
}
}
return res, nil
}
// Check for the existence of the dependency directory in the project.
func DependenciesExists(directory string, okToCreate bool) (bool, error) {
if exists, err := FileExists(directory, DEFAULT_DEPENDENCY_DIR); err != nil {
return false, err
} else if !exists && okToCreate {
newDir := path.Join(directory, DEFAULT_DEPENDENCY_DIR)
if err := os.MkdirAll(newDir, 0755); err != nil {
return false, errors.New(fmt.Sprintf("could not create dependency directory %v, error: %v", newDir, err))
}
} else if !exists {
return false, nil
}
return true, nil
}
// Validate that the dependencies are complete and coherent with the rest of the definitions in the project.
// Any errors will be returned to the caller.
func ValidateDependencies(directory string, userInputs *register.InputFile, userInputsFilePath string, projectType string) error {
if projectType == SERVICE_COMMAND || IsServiceProject(directory) {
d, err := GetServiceDefinition(directory, SERVICE_DEFINITION_FILE)
if err != nil {
return err
}
// For each service definition file in the dependencies directory, verify it.
deps, err := GetDependencyFiles(directory, SERVICE_DEFINITION_FILE)
if err != nil {
return err
}
for _, fileInfo := range deps {
if err := ValidateServiceDefinition(path.Join(directory, DEFAULT_DEPENDENCY_DIR), fileInfo.Name()); err != nil {
return errors.New(fmt.Sprintf("dependency %v did not validate, error: %v", fileInfo.Name(), err))
} else if err := ValidateService(directory, fileInfo, userInputs, userInputsFilePath); err != nil {
return errors.New(fmt.Sprintf("dependency %v did not validate, error: %v", fileInfo.Name(), err))
}
}
// Validate that the project defintion's dependencies are present in the dependencies directory.
for _, rs := range d.RequiredServices {
found := false
for _, fileInfo := range deps {
if dDef, err := GetServiceDefinition(path.Join(directory, DEFAULT_DEPENDENCY_DIR), fileInfo.Name()); err != nil {
return errors.New(fmt.Sprintf("dependency validation failed, unable to read %v, error: %v", fileInfo.Name(), err))
} else if vRange, err := policy.Version_Expression_Factory(rs.Version); err != nil {
return errors.New(fmt.Sprintf("dependency validation failed, dependency %v has an invalid version %v, error: %v", fileInfo.Name(), rs.Version, err))
} else if inRange, err := vRange.Is_within_range(dDef.Version); err != nil {
return errors.New(fmt.Sprintf("dependency validation failed, unable to verify version range %v is within required range %v, error: %v", dDef.Version, vRange.Get_expression(), err))
} else if inRange {
found = true
break
}
}
if !found {
return errors.New(fmt.Sprintf("dependency %v at version %v does not exist in %v.", rs.URL, rs.Version, path.Join(directory, DEFAULT_DEPENDENCY_DIR)))
}
}
}
return nil
}
func ValidateService(directory string, fInfo os.FileInfo, userInputs *register.InputFile, userInputsFilePath string) error {
d, err := GetServiceDefinition(path.Join(directory, DEFAULT_DEPENDENCY_DIR), fInfo.Name())
if err != nil {
return err
}
// Userinputs from the dependency without a default value must be set in the userinput file.
return validateDependencyUserInputs(d, d.GetUserInputs(), userInputs.Services, userInputsFilePath)
}
func validateDependencyUserInputs(d cliexchange.AbstractServiceFile, uis []exchange.UserInput, configUserInputs []register.MicroWork, userInputsFilePath string) error {
for _, ui := range uis {
if ui.DefaultValue == "" {
found := false
for _, msUI := range configUserInputs {
if d.GetURL() == msUI.Url {
if _, ok := msUI.Variables[ui.Name]; ok {
found = true
break
}
}
}
if !found {
return errors.New(fmt.Sprintf("variable %v has no default and must be specified in %v", ui.Name, userInputsFilePath))
}
}
}
return nil
}
func verifyFetchInput(homeDirectory string, project string, specRef string, url string, org string, version string, arch string, userCreds string) (string, error) {
// Shut off the Anax runtime logging.
flag.Set("v", "0")
// Verify that the environment and inputs are usable.
dir, err := VerifyEnvironment(homeDirectory, true, true, userCreds)
if err != nil {
return "", err
}
// Valid inputs are either project or the others, but not both. version and arch are optional when specref and org are used.
// url and specRef are mutually exclusive with each other.
if specRef != "" && url != "" {
return "", errors.New(fmt.Sprintf("--specRef is mutually exclusive with --url."))
} else if project != "" && (specRef != "" || org != "" || url != "") {
return "", errors.New(fmt.Sprintf("--project is mutually exclusive with --specRef, --org and --url."))
} else if project == "" && specRef == "" && org == "" && url == "" {
return "", errors.New(fmt.Sprintf("one of --project, or --specRef and --org, or --url and --org must be specified."))
} else if (specRef != "" && org == "") || (specRef == "" && org != "" && url == "") || (url != "" && org == "") {
return "", errors.New(fmt.Sprintf("either --specRef and --org, or --url and --org must be specified."))
}
// Verify that the inputs match with the project type.
if specRef != "" && IsServiceProject(dir) {
return "", errors.New(fmt.Sprintf("use --url with service projects."))
}
// Verify that if --project was specified, it points to a valid horizon project directory.
if project != "" {
if !IsServiceProject(project) {
return "", errors.New(fmt.Sprintf("--project %v does not contain Horizon service metadata.", project))
} else {
if err := ValidateServiceDefinition(project, SERVICE_DEFINITION_FILE); err != nil {
return "", err
}
}
}
cliutils.Verbose("Reading Horizon metadata from %s", dir)
return dir, nil
}
func verifyRemoveInput(homeDirectory string, specRef string, url string, version string, arch string) (string, error) {
// Shut off the Anax runtime logging.
flag.Set("v", "0")
// Verify that the environment and inputs are usable.
dir, err := VerifyEnvironment(homeDirectory, true, false, "")
if err != nil {
return "", err
}
// Valid inputs are specRef with the others being optional.
if specRef == "" && url == "" {
return "", errors.New(fmt.Sprintf("--specRef or --url is required for remove."))
} else if specRef != "" && url != "" {
return "", errors.New(fmt.Sprintf("--specRef and --url are mutually exclusive."))
}
cliutils.Verbose("Reading Horizon metadata from %s", dir)
return dir, nil
}
// The caller is trying to use a local project (i.e. a project that is on the same machine) as a dependency.
// If the dependency is a local project then we can validate it and copy the project metadata.
func fetchLocalProjectDependency(homeDirectory string, project string, userInputFile string) error {
// Get the setup info and context for running the command.
dir, err := setup(homeDirectory, true, false, "")
if err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_FETCH_COMMAND, err)
}
// If the dependent project is not validate-able then we cant reliably use it as a dependency.
if err := AbstractServiceValidation(project); err != nil {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, "'%v %v' %v", DEPENDENCY_COMMAND, DEPENDENCY_FETCH_COMMAND, err)
}
CommonProjectValidation(project, userInputFile, DEPENDENCY_COMMAND, DEPENDENCY_FETCH_COMMAND)
fmt.Printf("Service project %v verified.\n", dir)
// The rest of this function gets the dependency's user input and adds it to this project's user input, and it reads
// this project's workload definition and updates it with the reference to the ms. In the files that are read and
// then written we want those to preserve the env vars as env vars.
envVarSetting := os.Getenv("HZN_DONT_SUBST_ENV_VARS")
os.Setenv("HZN_DONT_SUBST_ENV_VARS", "1")
// Pull the metadata from the dependent project. Log the filesystem location of the dependent metadata.
if absProject, err := filepath.Abs(project); err != nil {
return err
} else {
cliutils.Verbose("Reading Horizon metadata from dependency: %v", absProject)
}
// Get the dependency's definition.
sDef, err := GetAbstractDefinition(project)
if err != nil {
return err
}
// Get the dependency's variable configurations.
depVarConfig, err := GetUserInputsVariableConfiguration(project, userInputFile)
if err != nil {
return err
}
cliutils.Verbose("Found dependency %v, Org: %v", sDef.GetURL(), sDef.GetOrg())
// Harden the new dependency in a file in this project's dependency store.
if err := UpdateDependencyFile(homeDirectory, sDef); err != nil {
return err
}
// Harden the dependent's dependencies so that the current project will be able to get all
// the dependencies running.
if err := UpdateDependentDependencies(homeDirectory, project); err != nil {
return err
}
// Update the project's definition dependencies to make sure the dependency is included.
if err := RefreshServiceDependencies(homeDirectory, sDef); err != nil {
return err
}
// Update this project's userinputs with variable configuration from the dependency's userinputs.
currentUIs, uerr := UpdateVariableConfiguration(homeDirectory, sDef, depVarConfig)
if uerr != nil {
return uerr
}
// Get the dependency's userinputs to get the global attribute settings.
depUserInputs, _, uierr := GetUserInputs(project, userInputFile)
if uierr != nil {
return uierr
}
// Find the global attributes in the dependency and move them into this project.
for _, depGlobal := range depUserInputs.Global {
found := false
for _, currentUIGlobal := range currentUIs.Global {
if currentUIGlobal.Type == depGlobal.Type && reflect.DeepEqual(currentUIGlobal.Variables, depGlobal.Variables) {
found = true
break
}
}
// If the global setting was already in the current project, then dont copy anything from the dependency.
if found {
continue
} else {
// Copy the global setting so that the dependency continues to work correctly. Also tag the global setting with the
// dependencies URL so that the system knows it only applies to this dependency.
if len(depGlobal.ServiceSpecs) == 0 {
depGlobal.ServiceSpecs = append(depGlobal.ServiceSpecs, *persistence.NewServiceSpec(sDef.GetURL(), sDef.GetOrg()))
}
currentUIs.Global = append(currentUIs.Global, depGlobal)
}
}
// Update the user input file in the filesystem.
if err := CreateFile(homeDirectory, USERINPUT_FILE, currentUIs); err != nil {
return err
}
cliutils.Verbose("Updated %v/%v with the dependency's variable and global attribute configuration.", homeDirectory, USERINPUT_FILE)
os.Setenv("HZN_DONT_SUBST_ENV_VARS", envVarSetting) // restore this setting
return nil
}
func fetchExchangeProjectDependency(homeDirectory string, specRef string, url string, org string, version string, arch string, userCreds string, keyFiles []string, userInputFile string) error {
projectType := "service"
// Pull the metadata from the exchange, including any of this dependency's dependencies.
sDef, err := getExchangeDefinition(homeDirectory, specRef, url, org, version, arch, userCreds, keyFiles, userInputFile)
if err != nil {
return err
}
// Harden the new dependency in the file.
if err := UpdateDependencyFile(homeDirectory, sDef); err != nil {
return err
}
// The rest of this function gets the dependency's user input and adds it to this project's user input, and it reads
// this project's workload definition and updates it with the reference to the ms. In the files that are read and
// then written we want those to preserve the env vars as env vars.
envVarSetting := os.Getenv("HZN_DONT_SUBST_ENV_VARS")
os.Setenv("HZN_DONT_SUBST_ENV_VARS", "1")
// Update the workload definition dependencies to make sure the dependency is included. The APISpec array
// in the workload definition is rebuilt from the dependencies.
if err := RefreshServiceDependencies(homeDirectory, sDef); err != nil {
return err
}
// Loop through this project's variable configurations and add skeletal non-default variables that
// are defined by the new dependency.
foundUIs := false
varConfigs, err := GetUserInputsVariableConfiguration(homeDirectory, "")
for _, currentUI := range varConfigs {
if currentUI.Url == sDef.GetURL() && currentUI.Org == org && currentUI.VersionRange == sDef.GetVersion() {
// The new dependency already has userinputs configured in this project.
cliutils.Verbose("The current project already has userinputs defined for this dependency.")
foundUIs = true
break
}
}
// If there are no variables already defined, and there are non-defaulted variables, then add skeletal variables.
if !foundUIs {
foundNonDefault := false
vars := make(map[string]interface{})
for _, ui := range sDef.GetUserInputs() {
if ui.DefaultValue == "" {
foundNonDefault = true
vars[ui.Name] = ""
}
}
if foundNonDefault {
skelVarConfig := register.MicroWork{
Org: org,
Url: sDef.GetURL(),
VersionRange: sDef.GetVersion(),
Variables: vars,
}
if err := SetUserInputsVariableConfiguration(homeDirectory, sDef, []register.MicroWork{skelVarConfig}); err != nil {
return err
}
cliutils.Verbose("Updated %v/%v with the dependency's variable configuration.", homeDirectory, USERINPUT_FILE)
fmt.Printf("Please provide a value for the dependency's non-default variables in the %v section of this project's userinput file to ensure that the dependency operates correctly. The userInputs section of the new dependency contains a definition for each user input variable.\n", projectType)
}
}
fmt.Printf("To ensure that the dependency operates correctly, please add variable values to the userinput.json file if this service needs any.\n")
os.Setenv("HZN_DONT_SUBST_ENV_VARS", envVarSetting) // restore this setting
return nil
}
func getExchangeDefinition(homeDirectory string, specRef string, surl string, org string, version string, arch string, userCreds string, keyFiles []string, userInputFile string) (cliexchange.AbstractServiceFile, error) {
if IsServiceProject(homeDirectory) {
return getServiceDefinition(homeDirectory, surl, org, version, arch, userCreds, keyFiles)
} else {
return nil, errors.New(fmt.Sprintf("unsupported project type"))
}
}
func UpdateDependencyFile(homeDirectory string, sDef cliexchange.AbstractServiceFile) error {
fileName := createDependencyFileName(sDef.GetURL(), sDef.GetVersion(), SERVICE_DEFINITION_FILE)
filePath := path.Join(homeDirectory, DEFAULT_DEPENDENCY_DIR)
if err := CreateFile(filePath, fileName, sDef); err != nil {
return err
}
cliutils.Verbose("Created %v/%v as a new dependency.", filePath, fileName)
return nil
}
func createDependencyFileName(url string, version string, suffix string) string {
// Create the dependency filename.
re := regexp.MustCompile(`^[A-Za-z0-9+.-]*?://`)
url2 := re.ReplaceAllLiteralString(cliutils.ExpandEnv(url), "")
re = regexp.MustCompile(`[$!*,;/?@&~=%]`)
url3 := re.ReplaceAllLiteralString(url2, "-")
return fmt.Sprintf("%v_%v.%v", url3, cliutils.ExpandEnv(version), suffix)
}
// Copy the dependency files out, validate them and write them back.
func UpdateDependentDependencies(homeDirectory string, depProject string) error {
// Return early for non-service projects
if !IsServiceProject(homeDirectory) {
return nil
}
// If there is a local project dependency, get the local dependency files.
if depProject != "" {
deps, err := GetDependencyFiles(depProject, SERVICE_DEFINITION_FILE)
if err != nil {
return err
}
for _, dep := range deps {
if sDef, err := GetServiceDefinition(path.Join(depProject, DEFAULT_DEPENDENCY_DIR), dep.Name()); err != nil {
return err
} else if err := ValidateServiceDefinition(path.Join(depProject, DEFAULT_DEPENDENCY_DIR), dep.Name()); err != nil {
return errors.New(fmt.Sprintf("dependency %v did not validate, error: %v", dep.Name(), err))
} else if err := CreateFile(path.Join(homeDirectory, DEFAULT_DEPENDENCY_DIR), dep.Name(), sDef); err != nil {
return err
}
}
}
return nil
}
// Iterate through the dependencies of the given service and create a dependency for each one.
func getServiceDefinitionDependencies(homeDirectory string, serviceDef *cliexchange.ServiceFile, userCreds string, keyFiles []string) error {
for _, rs := range serviceDef.RequiredServices {
// Get the service definition for each required service. Dependencies refer to each other by version range, so the
// service we're looking for might not be at the exact version specified in the required service element.
if sDef, err := getServiceDefinition(homeDirectory, rs.URL, rs.Org, "", rs.Arch, userCreds, keyFiles); err != nil {
return err
} else if err := UpdateDependencyFile(homeDirectory, sDef); err != nil {
return err
}
}
return nil
}
func getServiceDefinition(homeDirectory, surl string, org string, version string, arch string, userCreds string, keyFiles []string) (*cliexchange.ServiceFile, error) {
// Construct the resource URL suffix.
resSuffix := fmt.Sprintf("orgs/%v/services?url=%v", org, surl)
if version != "" {
resSuffix += fmt.Sprintf("&version=%v", version)
}
if arch == "" {
arch = cutil.ArchString()
}
resSuffix += fmt.Sprintf("&arch=%v", arch)
// Create an object to hold the response.
resp := new(exchange.GetServicesResponse)
// Call the exchange to get the service definition.
if userCreds == "" {
userCreds = os.Getenv(DEVTOOL_HZN_USER)
}
cliutils.SetWhetherUsingApiKey(userCreds)
cliutils.ExchangeGet(cliutils.GetExchangeUrl(), resSuffix, cliutils.OrgAndCreds(os.Getenv(DEVTOOL_HZN_ORG), userCreds), []int{200}, resp)
// Parse the response and extract the highest version service definition or return an error.
var serviceDef exchange.ServiceDefinition
var serviceId string
if len(resp.Services) > 1 {
highest, sDef, sId, err := exchange.GetHighestVersion(resp.Services, nil)
if err != nil {
return nil, err
} else if highest == "" {
return nil, errors.New(fmt.Sprintf("unable to find highest version of %v %v in the exchange: %v", surl, org, resp.Services))
} else {
serviceDef = sDef
serviceId = sId
}
} else if len(resp.Services) == 0 {
return nil, errors.New(fmt.Sprintf("no services found in the exchange."))
} else {
for sId, sDef := range resp.Services {
serviceDef = sDef
serviceId = sId
break
}
}
cliutils.Verbose("Creating dependency on: %v, Org: %v", serviceDef, org)
sDef_cliex := new(cliexchange.ServiceFile)
// Get container images into the local docker
dc := make(map[string]interface{})
if serviceDef.Deployment != "" {
if err := json.Unmarshal([]byte(serviceDef.Deployment), &dc); err != nil {
return nil, errors.New(fmt.Sprintf("failed to unmarshal deployment %v: %v", serviceDef.Deployment, err))
}
// Get this project's userinputs so that the downloader can use any special authorization attributes that might
// be specified in the global section of the user inputs.
currentUIs, _, err := GetUserInputs(homeDirectory, "")
if err != nil {
return nil, err
}
// convert the image server info into torrent
torrent := getImageReferenceAsTorrent(&serviceDef)
// verify the image server url
url1, err := url.Parse(torrent.Url)
if err != nil {
return nil, fmt.Errorf("ill-formed URL: %v, error %v", torrent.Url, err)
}
// Get docker auth for the service
auth_url := fmt.Sprintf("orgs/%v/services/%v/dockauths", org, exchange.GetId(serviceId))
docker_auths := make([]exchange.ImageDockerAuth, 0)
cliutils.SetWhetherUsingApiKey(userCreds)
cliutils.ExchangeGet(cliutils.GetExchangeUrl(), auth_url, cliutils.OrgAndCreds(os.Getenv(DEVTOOL_HZN_ORG), userCreds), []int{200, 404}, &docker_auths)
img_auths := make([]events.ImageDockerAuth, 0)
if docker_auths != nil {
for _, iau_temp := range docker_auths {
img_auths = append(img_auths, events.ImageDockerAuth{Registry: iau_temp.Registry, UserName: "token", Password: iau_temp.Token})
}
}
cliutils.Verbose("The image docker auths for the service %v/%v are: %v", org, surl, img_auths)
cc := events.NewContainerConfig(*url1, torrent.Signature, serviceDef.Deployment, serviceDef.DeploymentSignature, "", "", img_auths)
// get the images
if err := getContainerImages(cc, keyFiles, currentUIs); err != nil {
return nil, errors.New(fmt.Sprintf("failed to get images for %v/%v: %v", org, surl, err))
}
}
// Fill in the parts of the dependency that come from the service definition.
sDef_cliex.Org = org
sDef_cliex.URL = serviceDef.URL
sDef_cliex.Version = serviceDef.Version
sDef_cliex.Arch = serviceDef.Arch
sDef_cliex.Label = serviceDef.Label
sDef_cliex.Description = serviceDef.Description
sDef_cliex.Public = serviceDef.Public
sDef_cliex.Sharable = serviceDef.Sharable
sDef_cliex.UserInputs = serviceDef.UserInputs
sDef_cliex.Deployment = dc
sDef_cliex.MatchHardware = serviceDef.MatchHardware
sDef_cliex.RequiredServices = serviceDef.RequiredServices
sDef_cliex.ImageStore = serviceDef.ImageStore
// If this service has dependencies, bring them in.
if serviceDef.HasDependencies() {
if err := getServiceDefinitionDependencies(homeDirectory, sDef_cliex, userCreds, keyFiles); err != nil {
return nil, err
}
}
return sDef_cliex, nil
}
|
package cos418_hw1_1
import (
"bufio"
"io"
"os"
"strconv"
)
// Sum numbers from channel `nums` and output sum to `out`.
// You should only output to `out` once.
// Do NOT modify function signature.
func sumWorker(nums chan int, out chan int) {
// TODO: implement me
// HINT: use for loop over `nums`
result := 0
for num := range nums {
result += num
}
//fmt.Println("Done calculated in worker with result", result)
out <- result
}
// Read integers from the file `fileName` and return sum of all values.
// This function must launch `num` go routines running
// `sumWorker` to find the sum of the values concurrently.
// You should use `checkError` to handle potential errors.
// Do NOT modify function signature.
func sum(num int, fileName string) int {
// TODO: implement me
// HINT: use `readInts` and `sumWorkers`
// HINT: used buffered channels for splitting numbers between workers
f, err := os.Open(fileName)
checkError(err)
reader := bufio.NewReader(f)
data, err := readInts(reader)
checkError(err)
out := make(chan int)
in := make(chan int)
for i := 0; i < num; i++ {
go sumWorker(in, out)
}
//fmt.Println("Launched", num, "workers")
for _, integer := range data {
in <- integer
}
close(in)
//fmt.Println("Inputed data")
result := 0
for i := 0; i < num; i++ {
subSum := <-out
//fmt.Println("Sub sum", subSum)
result += subSum
}
//fmt.Println("Aggregated result", result)
return result
}
// Read a list of integers separated by whitespace from `r`.
// Return the integers successfully read with no error, or
// an empty slice of integers and the error that occurred.
// Do NOT modify this function.
func readInts(r io.Reader) ([]int, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
var elems []int
for scanner.Scan() {
val, err := strconv.Atoi(scanner.Text())
if err != nil {
return elems, err
}
elems = append(elems, val)
}
return elems, nil
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package basic_datastore_test
import (
"github.com/kurtosis-tech/kurtosis-go/lib/networks"
"github.com/kurtosis-tech/kurtosis-go/lib/services"
"github.com/kurtosis-tech/kurtosis-go/lib/testsuite"
"github.com/kurtosis-tech/kurtosis-go/testsuite/services_impl/datastore"
"github.com/palantir/stacktrace"
"github.com/sirupsen/logrus"
"time"
)
const (
datastoreServiceId services.ServiceID = "datastore"
waitForStartupTimeBetweenPolls = 1 * time.Second
waitForStartupMaxPolls = 30
testKey = "test-key"
testValue = "test-value"
)
type BasicDatastoreTest struct {
datastoreImage string
}
func NewBasicDatastoreTest(datastoreImage string) *BasicDatastoreTest {
return &BasicDatastoreTest{datastoreImage: datastoreImage}
}
func (test BasicDatastoreTest) Setup(networkCtx *networks.NetworkContext) (networks.Network, error) {
datastoreContainerInitializer := datastore.NewDatastoreContainerInitializer(test.datastoreImage)
_, availabilityChecker, err := networkCtx.AddService(datastoreServiceId, datastoreContainerInitializer)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred adding the datastore service")
}
if err := availabilityChecker.WaitForStartup(waitForStartupTimeBetweenPolls, waitForStartupMaxPolls); err != nil {
return nil, stacktrace.Propagate(err, "An error occurred waiting for the datastore service to become available")
}
return networkCtx, nil
}
func (test BasicDatastoreTest) Run(network networks.Network, testCtx testsuite.TestContext) {
// Necessary because Go doesn't have generics
castedNetwork := network.(*networks.NetworkContext)
uncastedService, err := castedNetwork.GetService(datastoreServiceId)
if err != nil {
testCtx.Fatal(stacktrace.Propagate(err, "An error occurred getting the datastore service"))
}
// Necessary again due to no Go generics
castedService := uncastedService.(*datastore.DatastoreService)
logrus.Infof("Verifying that key '%v' doesn't already exist...", testKey)
exists, err := castedService.Exists(testKey)
if err != nil {
testCtx.Fatal(stacktrace.Propagate(err, "An error occurred checking if the test key exists"))
}
testCtx.AssertTrue(!exists, stacktrace.NewError("Test key should not exist yet"))
logrus.Infof("Confirmed that key '%v' doesn't already exist", testKey)
logrus.Infof("Inserting value '%v' at key '%v'...", testKey, testValue)
if err := castedService.Upsert(testKey, testValue); err != nil {
testCtx.Fatal(stacktrace.Propagate(err, "An error occurred upserting the test key"))
}
logrus.Infof("Inserted value successfully")
logrus.Infof("Getting the key we just inserted to verify the value...")
value, err := castedService.Get(testKey)
if err != nil {
testCtx.Fatal(stacktrace.Propagate(err, "An error occurred getting the test key after upload"))
}
logrus.Info("Value verified")
testCtx.AssertTrue(
value == testValue,
stacktrace.NewError("Returned value '%v' != test value '%v'", value, testValue))
}
func (test *BasicDatastoreTest) GetTestConfiguration() testsuite.TestConfiguration {
return testsuite.TestConfiguration{}
}
func (test BasicDatastoreTest) GetExecutionTimeout() time.Duration {
return 60 * time.Second
}
func (test BasicDatastoreTest) GetSetupTeardownBuffer() time.Duration {
return 60 * time.Second
}
|
package three
func Three() string {
return "three"
}
|
package filters
import (
"topazdev/stocks-game-api/app/controllers"
"topazdev/stocks-game-api/app/models"
"github.com/revel/revel"
"gopkg.in/mgo.v2/bson"
"strings"
)
// AuthFilter ...
func AuthFilter(c *revel.Controller, fc []revel.Filter) {
if strings.Contains(c.Request.URL.String(), "@tests") {
fc[0](c, fc[1:]) // Execute the next filter the chain.
return
}
authenticated := (c.Session["userID"] != "")
if !authenticated {
c.Response.Status = 401
c.Result = c.RenderJSON(nil)
return
}
id, err := controllers.ConvertToObjectIDHex(c.Session["userID"])
if err != nil {
c.Response.Status = 401
c.Result = c.RenderJSON(
controllers.JSONResponse{
Body: bson.M{
"Message": "Invalid session cookie",
},
},
)
return
}
_, err = models.GetUser(id)
if err != nil {
c.Response.Status = 401
c.Result = c.RenderJSON(
controllers.JSONResponse{
Body: bson.M{
"Message": "Unable to locate the user record",
},
},
)
return
}
fc[0](c, fc[1:]) // Execute the next filter the chain.
}
|
package model
import (
"database/sql"
//"github.com/CourseComment/conf"
_ "github.com/go-sql-driver/mysql"
//"os"
"time"
)
// var (
// db *sql.DB
// )
// func init() {
// db = conf.DB
// }
type Lecture struct {
Id idtype
Course
Professor
Student_score float32
Level float32
Student_score_number int32
Level_number int32
Comments []LectureComment
}
func GetLecture(c *Course, p *Professor) *Lecture {
var rows *sql.Rows
res := &Lecture{Course: *c, Professor: *p}
rows, _ = db.Query("select id, student_score, level, student_score_number, level_number from lecture where course_id=? and professor_id=?",
c.Id, p.Id)
if !rows.Next() {
return nil
}
rows.Scan(&res.Id, &res.Student_score, &res.Level, &res.Student_score_number, &res.Level_number)
return res
}
func (l Lecture) updateDB() {
stmt, _ := db.Prepare("update lecture set student_score=?, level=?, student_score_number=?, level_number=? where id=?")
stmt.Exec(l.Student_score, l.Level, l.Student_score_number, l.Level_number, l.Id)
}
func (l *Lecture) RecordStudentScore(u User, score int8) bool {
rows, _ := db.Query("select * from lectureStudentScoreRecord where user_id=? and lecture_id=?", u.Id, l.Id)
if rows.Next() {
return false
}
l.Student_score = (l.Student_score*float32(l.Student_score_number) + float32(score)) / float32(l.Student_score_number+1)
l.Student_score_number++
stmt, _ := db.Prepare("insert lectureStudentScoreRecord set user_id=?, lecture_id=?, socre=?")
stmt.Exec(u.Id, l.Id, score)
l.updateDB()
return true
}
func (l *Lecture) RecordLevel(u User, level int8) bool {
rows, _ := db.Query("select * from lectureLevelRecord where user_id=? and lecture_id=?", u.Id, l.Id)
if rows.Next() {
return false
}
l.Level = (l.Level*float32(l.Level_number) + float32(level)) / float32(l.Level_number+1)
l.Level_number++
stmt, _ := db.Prepare("insert lectureLevelRecord set user_id=?, lecture_id=?, level=?")
stmt.Exec(u.Id, l.Id, level)
l.updateDB()
return true
}
func (l *Lecture) GetComments() {
l.Comments = make([]LectureComment, 0)
rows, _ := db.Query("select id, user_id, content, super_number, time from lectureComment where lecture_id=?", l.Id)
for rows.Next() {
var (
id idtype
user_id idtype
content string
super_number int32
t string //time
tmp LectureComment
)
rows.Scan(&id, &user_id, &content, &super_number, &t)
tmpt, _ := time.Parse(timeLayout, t)
tmp = LectureComment{Id: id,
Lecture: *l,
User: *GetUser("id", user_id),
Content: content,
Super_number: super_number,
Time: tmpt}
l.Comments = append(l.Comments, tmp)
}
}
|
package minikube
import (
"bytes"
b64 "encoding/base64"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"text/template"
"github.com/docker/machine/libmachine/state"
"github.com/hashicorp/terraform/helper/schema"
"k8s.io/minikube/cmd/minikube/cmd"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/cluster"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
// Register drivers
_ "k8s.io/minikube/pkg/minikube/registry/drvs"
pkgutil "k8s.io/minikube/pkg/util"
)
const clusterNotRunningStatusFlag = 1 << 1
const pspFileName = "psp.yaml"
var (
minimumDiskSizeInMB int = 3000
// PSPyml is config template for create PodSecurityPolicy roles and bindings if PSP enabled for minikube
// based on https://minikube.sigs.k8s.io/docs/tutorials/using_psp/
PSPyml = template.Must(template.New("PSPYml-addon").Parse(`apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: "*"
labels:
addonmanager.kubernetes.io/mode: EnsureExists
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- "*"
volumes:
- "*"
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
labels:
addonmanager.kubernetes.io/mode: EnsureExists
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
labels:
addonmanager.kubernetes.io/mode: EnsureExists
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- privileged
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
labels:
addonmanager.kubernetes.io/mode: EnsureExists
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- restricted
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: default:restricted
labels:
addonmanager.kubernetes.io/mode: EnsureExists
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:restricted
subjects:
- kind: Group
name: system:authenticated
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: default:privileged
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:privileged
subjects:
- kind: Group
name: system:masters
apiGroup: rbac.authorization.k8s.io
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
- kind: Group
name: system:serviceaccount:kube-system
apiGroup: rbac.authorization.k8s.io
# Authorize all service accounts in a kube-system namespace
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:serviceaccounts
---
# patch for storage-provisioner RBAC
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:persistent-volume-provisioner
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- update
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- watch
- list
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- list
- update
`))
)
type CustomConfig struct {
Bootstrapper string // name of cluster bootstrapper
CacheImages bool // cache images
InstallAddons bool // install addons into minikube
Preload bool // If set, download tarball of preloaded images if available to improve start time. Defaults to true.
ProfileName string // profile (cluster) name
PSP bool // enable PodSecurityPolicy
}
func resourceMinikube() *schema.Resource {
return &schema.Resource{
Create: resourceMinikubeCreate,
Read: resourceMinikubeRead,
Delete: resourceMinikubeDelete,
// https://github.com/kubernetes/minikube/blob/e098a3c4ca91f7907705a99e4e3466868afca482/cmd/minikube/cmd/start_flags.go
Schema: map[string]*schema.Schema{
"addons": &schema.Schema{
Type: schema.TypeMap,
Description: "Enable addons. see `minikube addons list` for a list of valid addon names. Enabled by default: default-storageclass, storage-provisioner",
Elem: schema.TypeBool,
ForceNew: true,
Optional: true,
},
"apiserver_name": &schema.Schema{
Type: schema.TypeString,
Description: "The apiserver name which is used in the generated certificate for localkube/kubernetes. This can be used if you want to make the apiserver available from outside the machine (default \"minikubeCA\")",
Default: "minikubeCA",
ForceNew: true,
Optional: true,
},
"cache_images": &schema.Schema{
Type: schema.TypeBool,
Description: "If true, cache docker images for the current bootstrapper and load them into the machine. (default true)",
Default: true,
ForceNew: true,
Optional: true,
},
"container_runtime": &schema.Schema{
Type: schema.TypeString,
Description: "The container runtime to be used",
Default: "docker",
ForceNew: true,
Optional: true,
},
"cpus": &schema.Schema{
Type: schema.TypeInt,
Description: "Number of CPUs allocated to the minikube VM (default 2)",
Default: 2,
ForceNew: true,
Optional: true,
},
"disable_driver_mounts": &schema.Schema{
Type: schema.TypeBool,
Description: "Disables the filesystem mounts provided by the VirtualBox",
Default: true,
ForceNew: true,
Optional: true,
},
"disk_size": &schema.Schema{
Type: schema.TypeString,
Description: "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g) (default \"20g\")",
Default: "20g",
ForceNew: true,
Optional: true,
},
"dns_domain": &schema.Schema{
Type: schema.TypeString,
Description: "The cluster dns domain name used in the kubernetes cluster (default \"cluster.local\")",
Default: "cluster.local",
ForceNew: true,
Optional: true,
},
"docker_env": &schema.Schema{
Type: schema.TypeList,
Description: "Environment variables to pass to the Docker daemon. (format: key=value)",
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
Optional: true,
DefaultFunc: func() (interface{}, error) {
return []string{}, nil
},
},
"docker_opt": &schema.Schema{
Type: schema.TypeList,
Description: "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)",
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
Optional: true,
DefaultFunc: func() (interface{}, error) {
return []string{}, nil
},
},
"driver": &schema.Schema{
Type: schema.TypeString,
Description: fmt.Sprintf("Driver is one of: %v (defaults to virtualbox)", driver.DisplaySupportedDrivers()),
Default: "virtualbox",
ForceNew: true,
Optional: true,
},
"extra_options": &schema.Schema{
Type: schema.TypeString,
Description: `A set of key=value pairs that describe configuration that may be passed to different components.
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
Valid components are: kubelet, apiserver, controller-manager, etcd, proxy, scheduler.`,
Default: "",
ForceNew: true,
Optional: true,
},
"host_only_nic_type": &schema.Schema{
Type: schema.TypeString,
Description: "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (default: \"virtio\") (virtualbox driver only)",
Default: "virtio",
ForceNew: true,
Optional: true,
},
"hyperv_virtual_switch": &schema.Schema{
Type: schema.TypeString,
Description: "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)",
Default: "",
ForceNew: true,
Optional: true,
},
"hyperv_use_external_switch": &schema.Schema{
Type: schema.TypeBool,
Description: "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)",
Default: false,
ForceNew: true,
Optional: true,
},
"hyperv_external_adapter": &schema.Schema{
Type: schema.TypeString,
Description: "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)",
Default: "",
ForceNew: true,
Optional: true,
},
"feature_gates": &schema.Schema{
Type: schema.TypeString,
Description: "A set of key=value pairs that describe feature gates for alpha/experimental features.",
Default: "",
ForceNew: true,
Optional: true,
},
"host_only_cidr": &schema.Schema{
Type: schema.TypeString,
Description: "The CIDR to be used for the minikube VM (virtualbox driver only) (default \"192.168.99.1/24\")",
Default: "192.168.99.1/24",
ForceNew: true,
Optional: true,
},
"insecure_registry": &schema.Schema{
Type: schema.TypeList,
Description: "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.",
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
Optional: true,
DefaultFunc: func() (interface{}, error) {
return []string{constants.DefaultServiceCIDR}, nil
},
},
"install_addons": &schema.Schema{
Type: schema.TypeBool,
Description: "If set, install addons. Defaults to true",
Default: true,
ForceNew: true,
Optional: true,
},
"iso_skip_checksum": &schema.Schema{
Type: schema.TypeBool,
Description: "Skip minikube ISO checksum verification on download (default: false)",
Default: false,
ForceNew: true,
Optional: true,
},
"iso_url": &schema.Schema{
Type: schema.TypeString,
Description: "Location of the minikube iso (default \"https://storage.googleapis.com/minikube/iso/minikube-v1.9.0.iso\")",
Default: "https://storage.googleapis.com/minikube/iso/minikube-v1.9.0.iso",
ForceNew: true,
Optional: true,
},
"keep_context": &schema.Schema{
Type: schema.TypeBool,
Description: "This will keep the existing kubectl context and will create a minikube context.",
Default: false,
ForceNew: true,
Optional: true,
},
"kubernetes_version": &schema.Schema{
Type: schema.TypeString,
Description: `The kubernetes version that the minikube VM will use (ex: v1.2.3)
OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) (default "v1.16.8")`,
Default: "v1.16.8",
ForceNew: true,
Optional: true,
},
"kvm_gpu": &schema.Schema{
Type: schema.TypeBool,
Description: "Enable experimental NVIDIA GPU support in minikube",
Default: false,
ForceNew: true,
Optional: true,
},
"kvm_hidden": &schema.Schema{
Type: schema.TypeBool,
Description: "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)",
Default: false,
ForceNew: true,
Optional: true,
},
"kvm_network": &schema.Schema{
Type: schema.TypeString,
Description: "The KVM network name. (kvm2 driver only) (default \"default\")",
Default: "default",
ForceNew: true,
Optional: true,
},
"kvm_qemu_uri": &schema.Schema{
Type: schema.TypeString,
Description: "The KVM QEMU connection URI. (kvm2 driver only) (default \"qemu:///system\")",
Default: "qemu:///system",
ForceNew: true,
Optional: true,
},
"memory": &schema.Schema{
Type: schema.TypeInt,
Description: "Amount of RAM allocated to the minikube VM (default 2048)",
Default: 2048,
ForceNew: true,
Optional: true,
},
//"mount": &schema.Schema{
// Type: schema.TypeBool,
// Description: "This will start the mount daemon and automatically mount files into minikube",
// Default: false,
// ForceNew: true,
// Optional: true,
//},
//"mount_string": &schema.Schema{
// Type: schema.TypeString,
// Description: "The argument to pass the minikube mount command on start (default \"/Users:/minikube-host\")",
// Default: "/Users:/minikube-host",
// ForceNew: true,
// Optional: true,
//},
"nat_nic_type": &schema.Schema{
Type: schema.TypeString,
Description: "NIC Type used for NAT network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (default: \"virtio\") (virtualbox driver only)",
Default: "virtio",
ForceNew: true,
Optional: true,
},
"network_plugin": &schema.Schema{
Type: schema.TypeString,
Description: "The name of the network plugin",
Default: "",
ForceNew: true,
Optional: true,
},
"pod_security_policy": &schema.Schema{
Type: schema.TypeBool,
Description: "Enable PodSecurityPolicy (PSP) inside minikube (default: false)",
Default: false,
ForceNew: true,
Optional: true,
},
"preload": &schema.Schema{
Type: schema.TypeBool,
Description: "If set, download tarball of preloaded images if available to improve start time. Defaults to true",
Default: true,
ForceNew: true,
Optional: true,
},
"registry_mirror": &schema.Schema{
Type: schema.TypeList,
Description: "Registry mirrors to pass to the Docker daemon",
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
Optional: true,
DefaultFunc: func() (interface{}, error) {
return []string{}, nil
},
},
//"scheme": &schema.Schema{
// Type: schema.TypeString,
// Description: "HTTP or HTTPS",
// Default: "https",
// ForceNew: true,
// Optional: true,
//},
"client_certificate": &schema.Schema{
Type: schema.TypeString,
Description: "Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.",
Computed: true,
},
"client_key": &schema.Schema{
Type: schema.TypeString,
Description: "Base64 encoded private key used by clients to authenticate to the cluster endpoint.",
Computed: true,
},
"cluster_ca_certificate": &schema.Schema{
Type: schema.TypeString,
Description: "Base64 encoded public certificate that is the root of trust for the cluster.",
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Description: "Endpoint that can be used to reach API server",
Computed: true,
},
},
}
}
func resourceMinikubeRead(d *schema.ResourceData, meta interface{}) error {
api, err := machine.NewAPIClient()
if err != nil {
return fmt.Errorf("Error getting client: %s\n", err)
}
defer api.Close()
profileName := d.Id()
hostSt, err := machine.Status(api, profileName)
if err != nil {
log.Printf("Error getting host status for %s: %+v", profileName, err)
return err
}
kubeletSt := state.None.String()
apiserverSt := state.None.String()
ks := state.None.String()
clusterConfig, customConfig, err := getClusterConfigFromResource(d)
if err != nil {
log.Printf("Error getting minikube cluster config from terraform resource: %+v", err)
return err
}
nodeConfig := clusterConfig.Nodes[0]
if hostSt == state.Running.String() {
clusterBootstrapper, err := cluster.Bootstrapper(api, customConfig.Bootstrapper, clusterConfig, nodeConfig)
if err != nil {
log.Printf("Error getting cluster bootstrapper: %+v", err)
return err
}
kubeletSt, err = clusterBootstrapper.GetKubeletStatus()
if err != nil {
log.Printf("Error kubelet status: %+v", err)
return err
}
ip, err := cluster.GetHostDriverIP(api, profileName)
if err != nil {
log.Printf("Error host driver ip status: %+v", err)
return err
}
apiserverPort := nodeConfig.Port
apiserverSt, err = clusterBootstrapper.GetAPIServerStatus(ip.String(), apiserverPort)
returnCode := 0
if err != nil {
log.Printf("Error api-server status: %+v", err)
return err
} else if apiserverSt != state.Running.String() {
returnCode |= clusterNotRunningStatusFlag
}
kstatus := kubeconfig.VerifyEndpoint(profileName, ip.String(), apiserverPort)
//if err != nil {
// log.Printf("Error kubeconfig status: %v", err)
// return err
//}
if kstatus != nil {
ks = "Correctly Configured: pointing to minikube-vm at " + ip.String()
} else {
ks = "Misconfigured: pointing to stale minikube-vm." +
"\nTo fix the kubectl context, run minikube update-context"
}
}
status := cmd.Status{
Name: profileName,
Host: hostSt,
Kubelet: kubeletSt,
APIServer: apiserverSt,
Kubeconfig: ks,
Worker: false,
}
log.Printf("resourceMinikubeRead result: %+v", status)
if kubeletSt == state.None.String() || apiserverSt == state.None.String() || ks == state.None.String() {
// If the resource does not exist, inform Terraform. We want to immediately
// return here to prevent further processing.
d.SetId("")
return nil
}
return nil
}
func resourceMinikubeCreate(d *schema.ResourceData, meta interface{}) error {
// Load current profile cluster config from file
//prof, err := cfg.LoadProfile(profile)
//// && !os.IsNotExist(err)
//
//cc := cfg.ClusterConfig{}
//
//if err != nil {
// log.Printf("Error loading profile config: %v. Assume that we create cluster first time", err)
//
// cc, err = getClusterConfigFromResource(d)
// if err != nil {
// log.Printf("Error getting DEFAULT cluster config from resource: %s\n", err)
// return err
// }
//} else {
// cc = *prof.Config
//}
newClusterConfig, customConfig, err := getClusterConfigFromResource(d)
if err != nil {
log.Printf("Error getting cluster config from resource: %s\n", err)
return err
}
profileName := newClusterConfig.Name
existing, err := cfg.Load(profileName)
if err != nil && !cfg.IsNotExist(err) {
log.Printf("Error loading cluster config: %v", err)
return err
}
//skipISOChecksum := d.Get("iso_skip_checksum").(bool)
nodeConfig := newClusterConfig.Nodes[0]
api, err := machine.NewAPIClient()
if err != nil {
log.Printf("Error getting client: %s\n", err)
return err
}
defer api.Close()
log.Printf("Starting local Kubernetes %s cluster...\n", newClusterConfig.KubernetesConfig.KubernetesVersion)
log.Printf("clusterConfig: %+v", newClusterConfig)
log.Printf("nodeConfig: %+v", nodeConfig)
log.Printf("Addons: %+v", newClusterConfig.Addons)
validateSpecifiedDriver(existing, newClusterConfig.Driver)
ds := selectDriver(existing, newClusterConfig)
driverName := ds.Name
log.Printf("selected driver: %s", driverName)
validateDriver(ds, existing)
//if driver.IsVM(driverName) {
// urls := []string{newClusterConfig.MinikubeISO}
// urls = append(urls, download.DefaultISOURLs()...)
//
// url, err := download.ISO(urls, skipISOChecksum)
// if err != nil {
// log.Printf("Failed to cache ISO: %v", err)
// return err
// }
// newClusterConfig.MinikubeISO = url
//}
//log.Printf("Save current/new configuration to %s because we need it before VM start", profileName)
//if err := os.MkdirAll(cfg.ProfileFolderPath(profileName), 0777); err != nil {
// log.Printf("error creating profile directory '%s': %v", cfg.ProfileFolderPath(profileName), err)
// return err
//}
//
//if err := cfg.Write(profileName, &newClusterConfig); err != nil {
// log.Printf("Could not save current config to %s: %v", profileFilePath(profileName), err)
// return err
//}
if customConfig.PSP {
log.Printf("Preparing PodSecurityPolicy manifest...")
if err := preparePSP(); err != nil {
log.Printf("cannot prepare PSP YAML: %+v", err)
return err
}
// add option to enable PSP
e := cfg.ExtraOption{
Component: "apiserver",
Key: "enable-admission-plugins",
Value: "PodSecurityPolicy",
}
es := &(newClusterConfig.KubernetesConfig.ExtraOptions)
*es = append(*es, e)
}
var existingAddons = map[string]bool{}
if existing != nil {
existingAddons = existing.Addons
}
kcs, host, err := Start(newClusterConfig, nodeConfig, customConfig, existingAddons)
if err != nil {
log.Printf("Error starting node: %+v", err)
return err
}
//log.Println("Getting VM IP address...")
//ip, err := host.Driver.GetIP()
//if err != nil {
// log.Printf("Error getting VM IP address: %v", err)
// return err
//}
//
//// set (new) external IP of cluster node
//newClusterConfig.KubernetesConfig.NodeIP = ip
//newClusterConfig.Nodes[0].IP = ip
//// is this same thing?
//nodeConfig.IP = ip
//
//if existing != nil {
// oldKubernetesVersion, err := semver.Make(strings.TrimPrefix(existing.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
// if err != nil {
// log.Printf("Error parsing version semver: %v", err)
// }
//
// newKubernetesVersion, err := semver.Make(strings.TrimPrefix(newClusterConfig.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
// if err != nil {
// log.Printf("Error parsing version semver: %v", err)
// }
//
// // Check if it's an attempt to downgrade version. Avoid version downgrad.
// if newKubernetesVersion.LT(oldKubernetesVersion) {
// newClusterConfig.KubernetesConfig.KubernetesVersion = version.VersionPrefix + oldKubernetesVersion.String()
// log.Println("Kubernetes version downgrade is not supported. Using version:", newClusterConfig.KubernetesConfig.KubernetesVersion)
// }
//
// log.Printf("Merge old k8s cluster configuration with new one")
// newClusterConfig.KubernetesConfig.NodeIP = ip
//
// if err := mergo.Merge(&newClusterConfig, existing); err != nil {
// log.Printf("could not merge old and new profiles: %+v", err)
// return err
// }
//
// log.Printf("Save new configuration as profile '%s' after merge", profileName)
// if err := cfg.Write(profileName, &newClusterConfig); err != nil {
// log.Printf("could not save profile to %s after merge: %v", profileName, err)
// return err
// }
//}
//
////cfg.SaveProfile(newClusterConfig.Name, &newClusterConfig)
//
//k8sBootstrapper, err := cluster.Bootstrapper(api, customConfig.Bootstrapper, newClusterConfig, nodeConfig)
//if err != nil {
// log.Printf("Error getting cluster bootstrapper: %+v", err)
// return err
//}
//
//log.Println("Update cluster configuration...")
//if err := k8sBootstrapper.UpdateCluster(newClusterConfig); err != nil {
// log.Printf("Error updating cluster: %+v", err)
// return err
//}
//
//log.Println("Setting up certs...")
//if err := k8sBootstrapper.SetupCerts(newClusterConfig.KubernetesConfig, nodeConfig); err != nil {
// log.Printf("Error configuring authentication: %+v", err)
// return err
//}
log.Println("Connecting to cluster...")
kubeHost, err := host.Driver.GetURL()
if err != nil {
log.Printf("Error connecting to cluster: %+v", err)
}
kubeHost = strings.Replace(kubeHost, "tcp://", "https://", -1)
kubeHost = strings.Replace(kubeHost, ":2376", ":"+strconv.Itoa(constants.APIServerPort), -1)
//log.Println("Starting cluster components...")
//
//if err := k8sBootstrapper.StartCluster(newClusterConfig); err != nil {
// log.Printf("Error (re)starting cluster: %v", err)
// return err
//}
//
//log.Println("Update cluster configuration... Again")
//if err := k8sBootstrapper.UpdateCluster(newClusterConfig); err != nil {
// log.Printf("Error updating cluster again: %+v", err)
// return err
//}
//
//log.Println("Setting up kubeconfig...")
//kcs := kubeconfig.Settings{
// ClusterName: profileName,
// ClusterServerAddress: kubeHost,
// ClientCertificate: localpath.ClientCert(profileName),
// CertificateAuthority: localpath.CACert(),
// ClientKey: localpath.ClientKey(profileName),
// KeepContext: newClusterConfig.KeepContext,
// // EmbedCerts: false,
//}
//kcs.SetPath(kubeconfig.PathFromEnv())
//
//// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
//if err := kubeconfig.Update(&kcs); err != nil {
// log.Printf("Failed to update kubeconfig file: %+v", err)
// return err
//}
//// start 9p server mount
//if mount {
// log.Printf("Setting up hostmount on %s...\n", mountString)
//
// path := os.Args[0]
// mountDebugVal := 0
// mountCmd := exec.Command(path, "mount", fmt.Sprintf("--v=%d", mountDebugVal), mountString)
// mountCmd.Env = append(os.Environ(), constants.IsMinikubeChildProcess+"=true")
// err = mountCmd.Start()
// if err != nil {
// log.Printf("Error running command minikube mount %s", err)
// return err
// }
// err = ioutil.WriteFile(filepath.Join(constants.GetMinipath(), constants.MountProcessFileName), []byte(strconv.Itoa(mountCmd.Process.Pid)), 0644)
// if err != nil {
// log.Printf("Error writing mount process pid to file: %s", err)
// return err
// }
//}
if kcs.KeepContext {
log.Printf("The local Kubernetes cluster has started. The kubectl context has not been altered, kubectl will require \"--context=%s\" to use the local Kubernetes cluster.\n",
kcs.ClusterName)
} else {
log.Println("Kubectl is now configured to use the cluster.")
}
if newClusterConfig.Driver == "none" {
log.Println(`===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks
`)
if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" {
log.Println(`When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below:
sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
sudo chown -R $USER $HOME/.kube
sudo chgrp -R $USER $HOME/.kube
sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
sudo chown -R $USER $HOME/.minikube
sudo chgrp -R $USER $HOME/.minikube
This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true`)
}
if err := pkgutil.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil {
log.Printf("Error recursively changing ownership of directory %s: %s",
localpath.MiniPath(), err)
return err
}
}
//log.Println("Loading cached images from config file.")
//err = cmd.LoadCachedImagesInConfigFile()
//if err != nil {
// log.Println("Unable to load cached images from config file.")
//}
d.SetId(profileName)
clientCertificate, err := readFileAsBase64String(kcs.ClientCertificate)
if err != nil {
log.Printf("Failed to read client_certificate (%s)", kcs.ClientCertificate)
return err
}
clientKey, err := readFileAsBase64String(kcs.ClientKey)
if err != nil {
log.Printf("Failed to read client_key (%s)", kcs.ClientKey)
return err
}
clusterCACertificate, err := readFileAsBase64String(kcs.CertificateAuthority)
if err != nil {
log.Printf("Failed to read cluster_ca_certificate (%s)", kcs.CertificateAuthority)
return err
}
d.Set("client_certificate", clientCertificate)
d.Set("client_key", clientKey)
d.Set("cluster_ca_certificate", clusterCACertificate)
d.Set("endpoint", kubeHost)
return resourceMinikubeRead(d, meta)
}
func getClusterConfigFromResource(d *schema.ResourceData) (cfg.ClusterConfig, CustomConfig, error) {
customConfig := CustomConfig{
Bootstrapper: bootstrapper.Kubeadm,
CacheImages: false,
InstallAddons: true,
ProfileName: constants.DefaultClusterName,
PSP: false,
}
machineName := constants.DefaultClusterName
apiserverName := d.Get("apiserver_name").(string)
cacheImages := d.Get("cache_images").(bool)
containerRuntime := d.Get("container_runtime").(string)
cpus := d.Get("cpus").(int)
disableDriverMounts := d.Get("disable_driver_mounts").(bool)
diskSize := d.Get("disk_size").(string)
dnsDomain := d.Get("dns_domain").(string)
dockerEnv := d.Get("docker_env")
dockerOpt := d.Get("docker_opt")
//if !ok {
// dockerOpt = []string{}
//}
hostOnlyNicType := d.Get("host_only_nic_type").(string)
hypervVirtualSwitch := d.Get("hyperv_virtual_switch").(string)
hypervUseExternalSwitch := d.Get("hyperv_use_external_switch").(bool)
hypervExternalAdapter := d.Get("hyperv_external_adapter").(string)
featureGates := d.Get("feature_gates").(string)
hostOnlyCIDR := d.Get("host_only_cidr").(string)
insecureRegistry := d.Get("insecure_registry")
isoURL := d.Get("iso_url").(string)
keepContext := d.Get("keep_context").(bool)
kubernetesVersion := d.Get("kubernetes_version").(string)
kvmNetwork := d.Get("kvm_network").(string)
kvmQemuURI := d.Get("kvm_qemu_uri").(string)
kvmGPU := d.Get("kvm_gpu").(bool)
kvmHidden := d.Get("kvm_hidden").(bool)
memory := d.Get("memory").(int)
natNicType := d.Get("nat_nic_type").(string)
networkPlugin := d.Get("network_plugin").(string)
preload := d.Get("preload").(bool)
psp := d.Get("pod_security_policy").(bool)
registryMirror := d.Get("registry_mirror")
vmDriver := d.Get("driver").(string)
installAddons := d.Get("install_addons").(bool)
addonsM, ok := d.GetOk("addons")
if !ok {
addonsM = map[string]interface{}{"default-storageclass": true, "storage-provisioner": true}
}
// typecast map to bool values
addons := map[string]bool{}
for k, v := range addonsM.(map[string]interface{}) {
addons[k] = v.(bool)
}
//log.Printf("addons: %+v", addons)
extraOptionsStr := d.Get("extra_options").(string)
extraOptions := cfg.ExtraOptionSlice{}
if extraOptionsStr != "" {
err := extraOptions.Set(extraOptionsStr)
if err != nil {
log.Printf("Error parsing extra options: %v", err)
return cfg.ClusterConfig{}, customConfig, err
}
if strings.EqualFold(extraOptions.Get("enable-admission-plugins"), "PodSecurityPolicy") {
log.Printf("For some reasons, 'enable-admission-plugins=PodSecurityPolicy' specified in extra_options cause Minikube bootstrap to hang, so you should use a 'pod_security_policy=true' instead if you want PSP.")
return cfg.ClusterConfig{}, customConfig, errors.New("for some reasons, 'enable-admission-plugins=PodSecurityPolicy' specified in extra_options cause Minikube bootstrap to hang, so you should use a 'pod_security_policy=true' instead if you want PSP")
}
}
// set custom options
customConfig.CacheImages = cacheImages
customConfig.InstallAddons = installAddons
customConfig.Preload = preload
customConfig.PSP = psp
diskSizeMB, err := pkgutil.CalculateSizeInMB(diskSize)
if err != nil {
log.Printf("Error parsing disk size %s: %v", diskSize, err)
return cfg.ClusterConfig{}, customConfig, err
}
if diskSizeMB < minimumDiskSizeInMB {
err := fmt.Errorf("Disk Size %dMB (%s) is too small, the minimum disk size is %dMB", diskSizeMB, diskSize, minimumDiskSizeInMB)
return cfg.ClusterConfig{}, customConfig, err
}
//log.Println("=================== Creating Minikube Cluster ==================")
nodeConfig := cfg.Node{
Name: machineName,
KubernetesVersion: kubernetesVersion,
ControlPlane: true,
Worker: true,
Port: constants.APIServerPort,
//IP: "127.0.0.1",
}
kubeConfig := cfg.KubernetesConfig{
KubernetesVersion: kubernetesVersion,
ClusterName: machineName,
APIServerName: apiserverName,
DNSDomain: dnsDomain,
ContainerRuntime: containerRuntime,
NetworkPlugin: networkPlugin,
FeatureGates: featureGates,
ServiceCIDR: constants.DefaultServiceCIDR,
ExtraOptions: extraOptions,
ShouldLoadCachedImages: cacheImages,
EnableDefaultCNI: false,
NodePort: constants.APIServerPort,
//NodeIP: "127.0.0.1",
//NodePort: 0,
NodeName: machineName,
}
log.Printf("kubeConfig: %v", kubeConfig)
// https://pkg.go.dev/k8s.io/minikube/pkg/minikube/config?tab=doc#ClusterConfig
conf := cfg.ClusterConfig{
Name: machineName,
KeepContext: keepContext,
MinikubeISO: isoURL,
Memory: memory,
CPUs: cpus,
DiskSize: diskSizeMB,
Driver: vmDriver,
DockerEnv: flattenStringList(dockerEnv),
InsecureRegistry: flattenStringList(insecureRegistry),
RegistryMirror: flattenStringList(registryMirror),
HostOnlyCIDR: hostOnlyCIDR, // Only used by the virtualbox driver
HostOnlyNicType: hostOnlyNicType,
HypervVirtualSwitch: hypervVirtualSwitch,
HypervUseExternalSwitch: hypervUseExternalSwitch,
HypervExternalAdapter: hypervExternalAdapter,
KVMNetwork: kvmNetwork, // Only used by the KVM driver
KVMQemuURI: kvmQemuURI,
KVMGPU: kvmGPU,
KVMHidden: kvmHidden,
NatNicType: natNicType,
DockerOpt: flattenStringList(dockerOpt), // Each entry is formatted as KEY=VALUE.
DisableDriverMounts: disableDriverMounts, // Only used by virtualbox
KubernetesConfig: kubeConfig,
Nodes: []cfg.Node{nodeConfig},
Addons: addons,
}
log.Printf("clusterConfig: %v", conf)
return conf, customConfig, nil
}
func readFileAsBase64String(path string) (string, error) {
file, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
return b64.StdEncoding.EncodeToString(file), nil
}
func resourceMinikubeDelete(d *schema.ResourceData, _ interface{}) error {
log.Println("Deleting local Kubernetes cluster...")
config, _, err := getClusterConfigFromResource(d)
if err != nil {
log.Printf("Error getting cluster config: %s\n", err)
return err
}
api, err := machine.NewAPIClient()
if err != nil {
log.Printf("Error getting client: %s\n", err)
return err
}
defer api.Close()
if err = machine.DeleteHost(api, config.Name); err != nil {
log.Println("Errors occurred deleting machine: ", err)
return err
}
fmt.Println("Machine deleted.")
//if err := KillMountProcess(); err != nil {
// log.Println("Errors occurred deleting mount process: ", err)
//}
if err := cfg.DeleteProfile(config.Name); err != nil {
log.Println("Error deleting machine profile config")
return err
}
d.SetId("")
return nil
}
func flattenStringList(in interface{}) []string {
inlist := in.([]interface{})
var out = make([]string, len(inlist))
for i, v := range inlist {
out[i] = v.(string)
}
return out
}
// all code from https://github.com/kubernetes/minikube/blob/master/cmd/minikube/cmd/start.go
func selectDriver(existing *cfg.ClusterConfig, new cfg.ClusterConfig) registry.DriverState {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(new.KVMQemuURI)
// By default, the driver is whatever we used last time
if existing != nil {
old := hostDriver(existing)
ds := driver.Status(old)
log.Printf("Using the %s driver based on existing profile", ds.String())
return ds
}
// Default to looking at the new driver parameter
if d := new.Driver; d != "" {
ds := driver.Status(d)
if ds.Name == "" {
log.Printf("The driver '%s' is not supported on %s", d, runtime.GOOS)
return registry.DriverState{
Name: d,
Priority: 0,
State: registry.State{Error: errors.New(fmt.Sprintf("The driver '%s' is not supported on %s", d, runtime.GOOS))},
Rejection: fmt.Sprintf("The driver '%s' is not supported on %s", d, runtime.GOOS),
}
}
log.Printf("Using the %s driver based on user configuration", ds.String())
return ds
}
choices := driver.Choices(true)
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
log.Printf("Unable to pick a default driver. Here is what was considered, in preference order:")
for _, r := range rejects {
log.Printf("%s: %s", r.Name, r.Rejection)
}
log.Printf("Try specifying a 'driver' resource option manually")
os.Exit(exit.Unavailable)
}
if len(alts) > 1 {
altNames := []string{}
for _, a := range alts {
altNames = append(altNames, a.String())
}
log.Printf("Automatically selected the %s driver. Other choices: %s", pick.Name, strings.Join(altNames, ", "))
} else {
log.Printf("Automatically selected the %s driver", pick.String())
}
return pick
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
func hostDriver(existing *cfg.ClusterConfig) string {
if existing == nil {
return ""
}
api, err := machine.NewAPIClient()
if err != nil {
log.Printf("hostDriver NewAPIClient: %v", err)
return existing.Driver
}
cp, err := cfg.PrimaryControlPlane(existing)
if err != nil {
log.Printf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
log.Printf("hostDriver api.Load: %v", err)
return existing.Driver
}
return h.Driver.DriverName()
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *cfg.ClusterConfig, requested string) error {
if existing == nil {
return nil
}
if requested == "" {
return nil
}
old := hostDriver(existing)
if requested == old {
return nil
}
return errors.New(fmt.Sprintf("The existing minikube VM was created using the %s driver, and is incompatible with the %s driver.", old, requested))
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *cfg.ClusterConfig) {
name := ds.Name
log.Printf("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
}
st := ds.State
log.Printf("status for %s: %+v", name, st)
if st.Error != nil {
out.ErrLn("")
out.WarningT("'{{.driver}}' driver reported an issue: {{.error}}", out.V{"driver": name, "error": st.Error})
out.ErrT(out.Tip, "Suggestion: {{.fix}}", out.V{"fix": translate.T(st.Fix)})
if st.Doc != "" {
out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": st.Doc})
}
out.ErrLn("")
if !st.Installed {
if existing != nil {
if old := hostDriver(existing); name == old {
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name})
}
}
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name})
}
}
}
// profileFilePath returns path of profile config file
func profileFilePath(profile string, miniHome ...string) string {
miniPath := localpath.MiniPath()
if len(miniHome) > 0 {
miniPath = miniHome[0]
}
return filepath.Join(miniPath, "profiles", profile, "config.json")
}
func preparePSP() error {
var policiesContent bytes.Buffer
opts := struct{}{}
if err := PSPyml.Execute(&policiesContent, opts); err != nil {
log.Printf("cannot template PSP: %+v", err)
return err
}
addonsFolder := localpath.MakeMiniPath("files", "etc", "kubernetes", "addons")
if err := os.MkdirAll(addonsFolder, 0755); err != nil {
log.Printf("error creating minikube addons directory '%s': %v", addonsFolder, err)
return err
}
addonsPath := []string{addonsFolder}
addonsPath = append(addonsPath, pspFileName)
policiesFilePath := filepath.Join(addonsPath...)
return ioutil.WriteFile(policiesFilePath, policiesContent.Bytes(), 0644)
}
|
package person
type Person struct {
ID string
Name string
LastName string
Age int
}
|
package main
// hack in imports to make godep happy about some binaries we vendor
import (
_ "github.com/jteeuwen/go-bindata/go-bindata"
)
func main() {}
|
package main
import "fmt"
func main() {
fmt.Println("Valid Parenthesis String")
doTest("(())(())(((()*()()()))()((()()(*()())))(((*)()")
doTest("(((()*())))((()(((()(()))()**(*)())))())()()*")
doTest("(()())")
doTest("")
doTest("(())")
doTest("(***")
doTest("((**")
doTest("((***))(((")
doTest("((***)***)(((")
doTest("(*))")
doTest("*(*)**))")
doTest("(*)")
doTest("(())**(()))**")
doTest("(((***))***())")
doTest("(((***)***())")
doTest("(((**)*()))")
doTest("(((**)*()")
}
func doTest(s string) {
fmt.Printf("%s= ", s)
res := checkValidString(s)
fmt.Println(res)
}
func checkValidString(s string) bool {
input, l, count := []rune(s), len(s), 0
for i := 0; i < l; i++ {
if input[i] == '(' || input[i] == '*' {
count++
} else {
count--
}
if count < 0 {
return false
}
}
count = 0
for i := l - 1; i > -1; i-- {
if input[i] == ')' || input[i] == '*' {
count++
} else {
count--
}
if count < 0 {
return false
}
}
return true
}
func checkValidString_not_accepted(s string) bool {
in, l, star, left, right, flag := []rune(s), len(s), 0, 0, 0, false
for i := 0; i < l; i++ {
if in[i] == '*' {
star++
} else if in[i] == '(' {
if flag {
if left == right {
left, right, star, flag = 0, 0, 0, false
} else if left > right {
left = left - right
right, flag = 0, false
} else {
star = star + left - right
if star < 0 {
return false
}
left, right, flag = 0, 0, false
}
}
left++
} else if in[i] == ')' {
flag = true
right++
}
}
diff := getDifference(left, right)
fmt.Printf("left= %d, right= %d, star= %d", left, right, star)
if diff == 0 || diff <= star {
return true
}
return false
}
func getDifference(a, b int) int {
if a > b {
return a - b
}
return b - a
}
// type item struct {
// left int
// star int
// }
// type stack []item
// func newStack(int size) *stack {
// stack := make([]item, size)
// return &stack
// }
// func (s *stack) push(item r) {
// s = append(s, r)
// }
// func (s *stack) pop() item {
// l := len(stack)
// r := stack[l-1]
// stack = stack[:l]
// return r
// }
// func (s *stack) top() item {
// return s[len(s)-1]
// }
// func (s *stack) isEmpty() bool {
// return len(s) == 0
// }
|
package main
import "fmt"
// Panic function adalah function yg bisa kita gunakan untuk menghentikan program
// Panic function biasanya dipanggil ketika terjadi error pada saat program kita berjalan
// Saat panic function dipanggil, program akan terhenti, namun defer function tetap akan dieksekusi
func endApp() {
fmt.Println("end app")
}
func runApp(error bool) {
defer endApp()
if error {
panic("APP ERROR")
}
fmt.Println("app running properly")
}
func main() {
//runApp(false)
runApp(true)
}
|
package controllers
import (
"github.com/labstack/echo/v4"
"github.com/minuchi/go-echo-auth/lib"
userService "github.com/minuchi/go-echo-auth/services/user"
"gorm.io/gorm"
"net/http"
"time"
)
type (
loginRequest struct {
Email string `json:"email" validate:"required,email"`
Password string `json:"password" validate:"required"`
}
signUpRequest struct {
Email string `json:"email" validate:"required,email"`
Password string `json:"password" validate:"required"`
PasswordConfirm string `json:"password_confirm" validate:"required,eqfield=Password"`
}
getAccessTokenRequest struct {
RefreshToken string `json:"refresh_token"`
}
getTimeResponse struct {
Time string `json:"time"`
}
)
func GetTime(c echo.Context) error {
t := &getTimeResponse{
Time: time.Now().Format(time.RFC3339),
}
return c.JSON(http.StatusOK, t)
}
func Login(c echo.Context) error {
body := new(loginRequest)
if err := c.Bind(body); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
if err := c.Validate(body); err != nil {
return err
}
db := c.Get("db").(*gorm.DB)
email := body.Email
userHashedPassword, err := userService.GetUserPasswordByEmail(db, email)
if err != nil {
return err
}
password := body.Password
result, _ := lib.VerifyPassword(userHashedPassword, password)
if result == false {
return echo.ErrBadRequest
}
userId := userService.GetUserIdByEmail(db, email)
refreshToken := lib.CreateRefreshToken(userId)
return c.JSON(http.StatusOK, echo.Map{"refresh_token": refreshToken})
}
func SignUp(c echo.Context) error {
body := new(signUpRequest)
if err := c.Bind(body); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
if err := c.Validate(body); err != nil {
return err
}
db := c.Get("db").(*gorm.DB)
email := body.Email
userCount := userService.CheckUserExists(db, email)
if userCount > 0 {
return echo.NewHTTPError(http.StatusBadRequest, "This email already exists.")
}
hashedPassword := lib.HashPassword(body.Password)
userService.CreateUser(db, email, hashedPassword)
return c.JSON(http.StatusOK, body)
}
func IssueAccessToken(c echo.Context) error {
body := new(getAccessTokenRequest)
if err := c.Bind(body); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
if err := c.Validate(body); err != nil {
return err
}
id := lib.DecryptRefreshToken(body.RefreshToken)
accessToken := lib.CreateAccessToken(id)
return c.JSON(http.StatusOK, echo.Map{"access_token": accessToken})
}
func Verify(c echo.Context) error {
userId := c.Get("userId").(uint)
return c.JSON(http.StatusOK, echo.Map{"ok": true, "user_id": userId})
}
|
package worker
import (
"log"
"os"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/tasks"
"github.com/hibiken/asynq"
)
func Workers() {
r := asynq.RedisClientOpt{Addr: os.Getenv("REDIS_ADDR_PORT")}
srv := asynq.NewServer(r, asynq.Config{
Concurrency: 10,
})
mux := asynq.NewServeMux()
mux.HandleFunc(tasks.PaymentEmail, tasks.HandlePaymentEmailTask)
mux.HandleFunc(tasks.PaymentPassedEmail, tasks.HandlePaymentPassedEmailTask)
mux.HandleFunc(tasks.PromotionEmail, tasks.HandlePromotionEmailTask)
if err := srv.Run(mux); err != nil {
log.Fatal(err)
}
}
|
package tumblr
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
)
const (
// Version is the current version of this lib
Version = "0.0.1"
// BaseURL is the shared path to the tumblr api
BaseURL = "http://api.tumblr.com/v2/"
// UserAgent is the user agent when making requests
UserAgent = "github.com/lestopher/tumblr v" + Version
)
// Client manages communication with Tumblr
type Client struct {
// client HTTP client to communicate with api
client *http.Client
// BaseURL URL used to communicate with api
BaseURL *url.URL
// UserAgent agent used with the api
UserAgent string
// ClientID the id of your oauth application
ClientID string
// ClientSecret the secret of your oauth application
ClientSecret string
// AccessToken token used when user is authenticated through oauth
AccessToken string
// Services used to communicate with the tumblr api
Blog *BlogService
Users *UsersService
Tagged *TaggedService
Response *Response
}
// ResponseMeta the meta response for requests
type ResponseMeta struct {
Status int
Msg string
}
// Response the main response from tumblr api
type Response struct {
HTTPResponse *http.Response
Meta *ResponseMeta `json:"meta, omitempty"`
Response interface{} `json:"response, omitempty"`
}
// NewClient returns an initialized client
func NewClient(httpClient *http.Client) *Client {
if httpClient == nil {
httpClient = http.DefaultClient
}
baseURL, _ := url.Parse(BaseURL)
c := &Client{
client: httpClient,
BaseURL: baseURL,
UserAgent: UserAgent,
}
c.Users = &UsersService{client: c}
c.Blog = &BlogService{client: c}
c.Tagged = &TaggedService{client: c}
return c
}
// Do sends an API request and returns the API response. The API response is
// decoded and stored in the value pointed to by v, or returned as an error if
// an API error has occurred.
func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) {
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// err = CheckResponse(resp)
// if err != nil {
// return resp, err
// }
r := &Response{HTTPResponse: resp}
if v != nil {
r.Response = v
err = json.NewDecoder(resp.Body).Decode(r)
c.Response = r
}
return resp, err
}
// NewRequest creates an API request. A relative URL can be provided in urlStr,
// in which case it is resolved relative to the BaseURL of the Client.
// Relative URLs should always be specified without a preceding slash. If
// specified
func (c *Client) NewRequest(method, urlStr string, body string) (*http.Request, error) {
rel, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
u := c.BaseURL.ResolveReference(rel)
q := u.Query()
if c.AccessToken != "" && q.Get("access_token") == "" {
q.Set("access_token", c.AccessToken)
}
if c.ClientID != "" && q.Get("client_id") == "" {
q.Set("client_id", c.ClientID)
}
if c.ClientSecret != "" && q.Get("client_secret") == "" {
q.Set("client_secret", c.ClientSecret)
}
u.RawQuery = q.Encode()
req, err := http.NewRequest(method, u.String(), bytes.NewBufferString(body))
if err != nil {
return nil, err
}
if method == "POST" {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
req.Header.Add("User-Agent", c.UserAgent)
return req, nil
}
// CheckResponse checks the API response for error, and returns it
// if present. A response is considered an error if it has non StatusOK
// code.
// func CheckResponse(r *http.Response) error {
// if r.StatusCode == http.StatusOK {
// return nil
// }
//
// resp := new(ErrorResponse)
// resp.Response = r
//
// // Sometimes Instagram returns 500 with plain message
// // "Oops, an error occurred.".
// if r.StatusCode == http.StatusInternalServerError {
// meta := &ResponseMeta{
// ErrorType: "Internal Server Error",
// Code: 500,
// ErrorMessage: "Oops, an error occurred.",
// }
// resp.Meta = meta
//
// return resp
// }
//
// data, err := ioutil.ReadAll(r.Body)
// if err == nil && data != nil {
// json.Unmarshal(data, resp)
// }
// return resp
// }
|
package controllers
import (
"bwa-startup/auth"
"bwa-startup/helpers"
"bwa-startup/users"
"fmt"
"net/http"
"github.com/gin-gonic/gin"
)
type userController struct {
userService users.Service
authService auth.Service
}
func NewUserController (userService users.Service, authService auth.Service) *userController{
return &userController{userService, authService}
}
func (h *userController) RegisterUser(c *gin.Context){
//Tangkap input dari user
//Map input user ke struct RegisterUserInput
//Struct diatas di pasrsing sebagai parameter service
var input users.RegisterUserInput
err := c.BindJSON(&input)
if err != nil {
errors := helpers.FormatValidationError(err)
errorMessage := gin.H{"errors": errors}
response := helpers.APIResponse("Register account failed", http.StatusUnprocessableEntity, "error", errorMessage)
c.JSON(http.StatusUnprocessableEntity, response)
return
}
user, err := h.userService.RegisterUser(input)
if err != nil {
response := helpers.APIResponse("Register account failed", http.StatusBadRequest, "error", nil)
c.JSON(http.StatusBadRequest, response)
return
}
token, err := h.authService.GenerateToken(user.ID)
if err != nil {
response := helpers.APIResponse("Register account failed", http.StatusBadRequest, "error", nil)
c.JSON(http.StatusBadRequest, response)
return
}
formatter := users.FormatUser(user, token)
response := helpers.APIResponse("Account has ben registered", http.StatusOK, "success", formatter)
c.JSON(http.StatusOK, response)
}
func (h *userController) Login(c *gin.Context) {
//step login
//user memasukan input (email & password)
//input ditangkap handler/controller
//mapping dati input user ke input struct
//input struct passing ke service
//di service mencari dgn bantuan repositori user dengan email x
//mencocokan password
var input users.LoginInput
err := c.ShouldBindJSON(&input)
if err != nil {
errors := helpers.FormatValidationError(err)
errorMessage := gin.H{"errors": errors}
response := helpers.APIResponse("Login failed", http.StatusUnprocessableEntity, "error", errorMessage)
c.JSON(http.StatusUnprocessableEntity, response)
return
}
loggedInUser, err := h.userService.Login(input)
if err != nil {
errorMessage := gin.H{"errors": err.Error()}
response := helpers.APIResponse("Login failed", http.StatusUnprocessableEntity, "error", errorMessage)
c.JSON(http.StatusUnprocessableEntity, response)
return
}
token, err := h.authService.GenerateToken(loggedInUser.ID)
if err != nil {
response := helpers.APIResponse("Loginfailed", http.StatusBadRequest, "error", nil)
c.JSON(http.StatusBadRequest, response)
return
}
formatter := users.FormatUser(loggedInUser, token)
response := helpers.APIResponse("Login Successfully", http.StatusOK, "success", formatter)
c.JSON(http.StatusOK, response)
}
func (h *userController) CheckEmailAvailability(c *gin.Context){
//Input email dari user
//input email di mapping ke struct input
//struct input di passing ke service
//service akan manggil repository - email masih tersedia atau tidak
//repositori - db
var input users.CheckEmailInput
err := c.ShouldBindJSON(&input)
if err != nil {
errors := helpers.FormatValidationError(err)
errorMessage := gin.H{"errors": errors}
response := helpers.APIResponse("Email Checking failed", http.StatusUnprocessableEntity, "error", errorMessage)
c.JSON(http.StatusUnprocessableEntity, response)
return
}
isEmailAvailable, err := h.userService.IsEmailAvailable(input)
if err != nil {
errorMessage := gin.H{"errors": "Server error"}
response := helpers.APIResponse("Email Checking failed", http.StatusUnprocessableEntity, "error", errorMessage)
c.JSON(http.StatusUnprocessableEntity, response)
return
}
data := gin.H{
"is_available": isEmailAvailable,
}
metaMessage := "Email has been registered"
if isEmailAvailable {
metaMessage = "Email is available"
}
response := helpers.APIResponse(metaMessage, http.StatusOK, "success", data)
c.JSON(http.StatusOK, response)
}
func (h *userController) UploadAvatar(c *gin.Context){
file, err := c.FormFile("avatar")
if err != nil {
data := gin.H{"is_uploaded": false}
response := helpers.APIResponse("Failed to upload avatar image", http.StatusBadRequest, "error", data)
c.JSON(http.StatusBadRequest, response)
return
}
//get context isinya user yang telah di isi dari authMiddleware
currentUser := c.MustGet("currentUser").(users.User)
userID := currentUser.ID
path := fmt.Sprintf("images/%d-%s", userID, file.Filename)
err = c.SaveUploadedFile(file, path)
if err != nil {
data := gin.H{"is_uploaded": false}
response := helpers.APIResponse("Failed to upload avatar image", http.StatusBadRequest, "error", data)
c.JSON(http.StatusBadRequest, response)
return
}
_, err = h.userService.SaveAvatar(userID, path)
if err != nil {
data := gin.H{"is_uploaded": false}
response := helpers.APIResponse("Failed to upload avatar image", http.StatusBadRequest, "error", data)
c.JSON(http.StatusBadRequest, response)
return
}
data := gin.H{"is_uploaded": true}
response := helpers.APIResponse("Avatar successfuly uploaded", http.StatusOK, "success", data)
c.JSON(http.StatusOK, response)
}
func (h *userController) FetchUser(c *gin.Context) {
currentUser := c.MustGet("currentUser").(users.User)
formatter := users.FormatUser(currentUser, "")
response := helpers.APIResponse("Successfuly fetch user data", http.StatusOK, "success", formatter)
c.JSON(http.StatusOK, response)
}
|
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
type job struct {
x int64
}
type result struct {
*job
result int64
}
var jobChan = make(chan *job, 100)
var resultChan = make(chan *result, 100)
var wg sync.WaitGroup
func a(a chan<- *job) {
defer wg.Done()
for {
x := rand.Int63()
newJob := &job{
x: x,
}
a <- newJob
time.Sleep(time.Millisecond * 500)
}
}
func b(a <-chan *job, resultChan chan<- *result) {
defer wg.Done()
for {
job := <-a
var sum int64 = 0
n := job.x
for n > 0 {
sum += n % 10
n = n / 10
}
newReuslt := &result{
job: job,
result: sum,
}
resultChan <- newReuslt
}
}
func main() {
wg.Add(1)
go a(jobChan)
for i := 0; i < 24; i++ {
wg.Add(24)
go b(jobChan, resultChan)
}
for result := range resultChan {
fmt.Printf("value:%d result:%d\n", result.job.x, result.result)
}
wg.Wait()
}
|
package lecimg
import (
"image"
"image/color"
"log"
"testing"
)
func testAutoCrop(t *testing.T, img image.Image, option AutoCropOption, expectedWidth, expectedHeight int) {
// Run Filter
result := NewAutoCropFilter(option).Run(NewFilterSource(img, "filename", 0))
// Test result image size
destBounds := result.Img().Bounds()
widthMatch := destBounds.Dx() == expectedWidth
heightMatch := destBounds.Dy() == expectedHeight
if !widthMatch || !heightMatch {
resultRect := result.(AutoCropResult).rect
log.Printf("result rect = %v\n", resultRect)
}
if !widthMatch {
t.Errorf("width mismatch. exepcted=%v, actual=%v", expectedWidth, destBounds.Dx())
}
if !heightMatch {
t.Errorf("height mismatch. exepcted=%v, actual=%v", expectedHeight, destBounds.Dy())
}
}
func TestAutoCropMargin(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 128,
MinRatio: 1.0, MaxRatio: 3.0,
MaxWidthCropRate: 0.5, MaxHeightCropRate: 0.5,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
},
120, // max(width - leftSpace - rightSpace + marginLeft + marginRight, width * maxWidthCropRate)
270, // max(height - topSpace - bottomSpace + marginTop + marginBottom, height * maxHeightCropRate)
)
}
func TestAutoCropMaxRatio(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 128,
MinRatio: 1.0, MaxRatio: 2.0,
MaxWidthCropRate: 0.5, MaxHeightCropRate: 0.5,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
},
135, // max(120, expectedHeight / maxRatio)
270, // 270
)
}
func TestAutoCropMaxCropRatio(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 100,
MinRatio: 1.0, MaxRatio: 2.0,
MaxWidthCropRate: 0.1, MaxHeightCropRate: 0.2,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
},
180, // max(width - leftSpace - rightSpace + marginLeft + marginRight, width * maxWidthCropRate)
280, // max(height - topSpace - bottomSpace + marginTop + marginBottom, height * maxHeightCropRate)
)
}
func TestAutoCropInnerDetectionPadding1(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
DrawLine(img, 0, 0, 200, 0, color.Black)
DrawLine(img, 25, 0, 25, 350, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 100,
MinRatio: 1.0, MaxRatio: 10.0,
MaxWidthCropRate: 0.5, MaxHeightCropRate: 0.5,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
PaddingTop: 10, PaddingLeft: 25,
},
145,
350,
)
}
func TestAutoCropInnerDetectionPadding2(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
DrawLine(img, 0, 20, 200, 20, color.Black)
DrawLine(img, 10, 0, 10, 350, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 100,
MinRatio: 1.0, MaxRatio: 10.0,
MaxWidthCropRate: 0.5, MaxHeightCropRate: 0.5,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
PaddingTop: 21, PaddingLeft: 11,
},
120,
270,
)
}
func TestAutoCropMaxCrop(t *testing.T) {
img := CreateImage(200, 350, color.White)
FillRect(img, 50, 50, 150, 300, color.Black)
testAutoCrop(t, img, AutoCropOption{
Threshold: 128,
MinRatio: 1.0, MaxRatio: 3.0,
MaxWidthCropRate: 0.5, MaxHeightCropRate: 0.5,
MarginTop: 10, MarginBottom: 10, MarginLeft: 10, MarginRight: 10,
MaxCropTop: 0, MaxCropBottom: 100, MaxCropLeft: 0, MaxCropRight: 100,
},
160, // max(width - rightSpace + marginRight, width * maxWidthCropRate)
310, // max(height - bottomSpace + marginBottom, height * maxHeightCropRate)
)
}
|
package main
import (
"fmt"
"time"
)
// Default unbuffered
func main() {
channel := make(chan string, 1)
// buffer = 1 so sender goroutine in this case following func doesnt block
// if we add channel <- "test2" to following func, buffer will be filled so func will be blocked
go func() {
channel <- "test"
fmt.Println("Send!")
}()
go func() {
time.Sleep(3 * time.Second)
fmt.Println(<-channel)
}()
time.Sleep(4 * time.Second)
fmt.Println("Exited!")
}
|
package util
import (
"testing"
"fmt"
"flag"
)
func TestConfig(t *testing.T) {
// NewConfigWithFile("/Users/derek/go/src/sixedu/data/config.json")
// c := GetConfig()
fmt.Println("")
args := []string{
"-conf=这是测试命令行参数",
}
flag.CommandLine.Parse(args)
GetConfig()
// fmt.Println(c)
} |
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"encoding/json"
"fmt"
"testing"
"github.com/kubevela/pkg/util/compression"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
)
func TestApplicationRevisionCompression(t *testing.T) {
// Fill data
spec := &ApplicationRevisionSpec{}
spec.Application = Application{Spec: ApplicationSpec{Components: []common.ApplicationComponent{{Name: "test-name"}}}}
spec.ComponentDefinitions = make(map[string]*ComponentDefinition)
spec.ComponentDefinitions["def"] = &ComponentDefinition{Spec: ComponentDefinitionSpec{PodSpecPath: "path"}}
spec.WorkloadDefinitions = make(map[string]WorkloadDefinition)
spec.WorkloadDefinitions["def"] = WorkloadDefinition{Spec: WorkloadDefinitionSpec{Reference: common.DefinitionReference{Name: "testdef"}}}
spec.TraitDefinitions = make(map[string]*TraitDefinition)
spec.TraitDefinitions["def"] = &TraitDefinition{Spec: TraitDefinitionSpec{ControlPlaneOnly: true}}
spec.PolicyDefinitions = make(map[string]PolicyDefinition)
spec.PolicyDefinitions["def"] = PolicyDefinition{Spec: PolicyDefinitionSpec{ManageHealthCheck: true}}
spec.WorkflowStepDefinitions = make(map[string]*WorkflowStepDefinition)
spec.WorkflowStepDefinitions["def"] = &WorkflowStepDefinition{Spec: WorkflowStepDefinitionSpec{Reference: common.DefinitionReference{Name: "testname"}}}
spec.ReferredObjects = []common.ReferredObject{{RawExtension: runtime.RawExtension{Raw: []byte("123")}}}
testAppRev := &ApplicationRevision{Spec: *spec}
marshalAndUnmarshal := func(in *ApplicationRevision) (*ApplicationRevision, int) {
out := &ApplicationRevision{}
b, err := json.Marshal(in)
assert.NoError(t, err)
if in.Spec.Compression.Type != compression.Uncompressed {
assert.Contains(t, string(b), fmt.Sprintf("\"type\":\"%s\",\"data\":\"", in.Spec.Compression.Type))
}
err = json.Unmarshal(b, out)
assert.NoError(t, err)
assert.Equal(t, out.Spec.Compression.Type, in.Spec.Compression.Type)
assert.Equal(t, out.Spec.Compression.Data, "")
return out, len(b)
}
// uncompressed
testAppRev.Spec.Compression.SetType(compression.Uncompressed)
uncomp, uncompsize := marshalAndUnmarshal(testAppRev)
// zstd compressed
testAppRev.Spec.Compression.SetType(compression.Zstd)
zstdcomp, zstdsize := marshalAndUnmarshal(testAppRev)
// We will compare content later. Clear compression methods since it will interfere
// comparison and is verified earlier.
zstdcomp.Spec.Compression.SetType(compression.Uncompressed)
// gzip compressed
testAppRev.Spec.Compression.SetType(compression.Gzip)
gzipcomp, gzipsize := marshalAndUnmarshal(testAppRev)
gzipcomp.Spec.Compression.SetType(compression.Uncompressed)
assert.Equal(t, uncomp, zstdcomp)
assert.Equal(t, zstdcomp, gzipcomp)
assert.Less(t, zstdsize, uncompsize)
assert.Less(t, gzipsize, uncompsize)
}
|
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/hex"
"fmt"
"os"
)
// To encode publicKey use:
// publicKeyBytes, _ = x509.MarshalPKIXPublicKey(&private_key.PublicKey)
// Private Key:
// 3081a40201010430d35b96ee7ced244b5a47de8968b07ecd38a6dd756f0ffb40a72ccd5895e96f24310c1fc544d7f8d026c55213c8fa2ef2a00706052b81040022a164036200040ef0f59ad36a9661ef93044b53e5c2ca2e7b5ce23323367a3428ebeb256716b8c2cfc63225fd88174193cbe13c3137b41719058cd0fabd5713b91bc7b314f8086fba4b29734d675fccd6a7b4a4ec6af96d499ba64d792522f4710791d214ac45
// Public Key:
// 3076301006072a8648ce3d020106052b81040022036200040ef0f59ad36a9661ef93044b53e5c2ca2e7b5ce23323367a3428ebeb256716b8c2cfc63225fd88174193cbe13c3137b41719058cd0fabd5713b91bc7b314f8086fba4b29734d675fccd6a7b4a4ec6af96d499ba64d792522f4710791d214ac45
func main() {
p384 := elliptic.P384()
priv1, _ := ecdsa.GenerateKey(p384, rand.Reader)
privateKeyBytes, _ := x509.MarshalECPrivateKey(priv1)
encodedPrivateBytes := hex.EncodeToString(privateKeyBytes)
privateKeyBytesRestored, _ := hex.DecodeString(encodedPrivateBytes)
priv2, _ := x509.ParseECPrivateKey(privateKeyBytesRestored)
publicKeyBytes, _ := x509.MarshalPKIXPublicKey(&priv1.PublicKey)
encodedPublicBytes := hex.EncodeToString(publicKeyBytes)
fmt.Println("Public key is: %s", encodedPublicBytes)
fmt.Println("Private key is: %s", encodedPrivateBytes)
data := []byte("data")
// Signing by priv1
r, s, _ := ecdsa.Sign(rand.Reader, priv1, data)
// Verifying against priv2 (restored from priv1)
if !ecdsa.Verify(&priv2.PublicKey, data, r, s) {
fmt.Printf("Error")
return
}
fmt.Printf("Key was restored from string successfully\n")
fmt.Printf("go run ink-miner.go 127.0.0.1:12345 %s %s\n", encodedPublicBytes, encodedPrivateBytes)
// Write key pair to file
fmt.Println("Start writing to file")
f, err := os.Create("./key-pairs.txt")
if err != nil {
fmt.Println(err)
return
}
f.Write([]byte(encodedPrivateBytes))
f.Write([]byte("\n"))
f.Write([]byte(encodedPublicBytes))
f.Write([]byte("\n"))
f.Close()
fmt.Println("Finished writing to file")
}
|
package gotten
import (
"github.com/Hexilee/gotten/headers"
"io"
"net/http"
"net/url"
)
type (
Response interface {
StatusCode() int
Header() http.Header
Body() io.ReadCloser
ContentType() string
Cookies() []*http.Cookie
// Location returns the URL of the response's "Location" header,
// if present. Relative redirects are resolved relative to
// the Response's Request. ErrNoLocation is returned if no
// Location header is present.
Location() (*url.URL, error)
// ProtoAtLeast reports whether the HTTP protocol used
// in the response is at least major.minor.
ProtoAtLeast(major, minor int) bool
Unmarshal(ptr interface{}) error
}
ResponseImpl struct {
*http.Response
unmarshaler ReadUnmarshaler
}
)
func newResponse(resp *http.Response, unmarshaler ReadUnmarshaler) Response {
return &ResponseImpl{resp, unmarshaler}
}
func (resp ResponseImpl) StatusCode() int {
return resp.Response.StatusCode
}
func (resp ResponseImpl) Header() http.Header {
return resp.Response.Header
}
func (resp ResponseImpl) Body() io.ReadCloser {
return resp.Response.Body
}
func (resp ResponseImpl) ContentType() string {
return resp.Response.Header.Get(headers.HeaderContentType)
}
func (resp ResponseImpl) Unmarshal(ptr interface{}) error {
defer resp.Body().Close()
return resp.unmarshaler.Unmarshal(resp.Body(), resp.Header(), ptr)
}
|
package http
import (
"context"
"fmt"
"github.com/tppgit/we_service/pkg/auth"
"github.com/tppgit/we_service/pkg/services"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"google.golang.org/grpc"
)
type HandlerFunc func(ctx context.Context, mux *runtime.ServeMux, address string, opts []grpc.DialOption) (err error)
type Server struct {
Mux *runtime.ServeMux
Server *http.Server
GRPCAddress string
Address string
DialOptions []grpc.DialOption
HandlerFuncs []HandlerFunc
FileHandler services.FileHandling `inject:"file_upload_handling"`
JwtHandler *auth.JWTAuthenticationBackend `inject:"jwt_handler"`
}
type ServerOptionFunc func(s *Server)
func WithGRPCAddress(address string) ServerOptionFunc {
return func(s *Server) {
s.GRPCAddress = address
}
}
func WithAddress(address string) ServerOptionFunc {
return func(s *Server) {
s.Address = address
}
}
func WithOption(opts ...grpc.DialOption) ServerOptionFunc {
return func(s *Server) {
s.DialOptions = append(s.DialOptions, opts...)
}
}
func WithHandler(fs ...HandlerFunc) ServerOptionFunc {
return func(s *Server) {
s.HandlerFuncs = append(s.HandlerFuncs, fs...)
}
}
func New(opts ...ServerOptionFunc) *Server {
s := &Server{
DialOptions: []grpc.DialOption{grpc.WithInsecure()},
}
for _, opt := range opts {
opt(s)
}
s.Mux = runtime.NewServeMux()
httpMux := http.NewServeMux()
httpMux.Handle("/", s.Mux)
httpMux.HandleFunc("/upload", func(writer http.ResponseWriter, request *http.Request) {
s.FileHandler.UploadFilterHandler(writer, request)
})
s.Server = &http.Server{
Addr: s.Address,
Handler: httpMux,
}
return s
}
func (s *Server) Run(ctx context.Context) error {
for _, f := range s.HandlerFuncs {
if err := f(ctx, s.Mux, s.GRPCAddress, s.DialOptions); err != nil {
return err
}
}
fmt.Printf(`Starting HTTP server at: "%s" (GRPC server at: "%s")`, s.Address, s.GRPCAddress)
fmt.Println()
return s.Start(ctx)
}
func (s *Server) Start(ctx context.Context) error {
if err := s.Server.ListenAndServe(); err != nil {
return err
}
return nil
}
func (s *Server) Stop(ctx context.Context) {
s.Server.Shutdown(ctx)
}
|
package main
import (
"net/http"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func main() {
// Echo Instance
e := echo.New()
//Middleware
e.Use(middleware.Logger())
e.Use(middleware.Recover())
// Route => handler
e.GET("/", func(c echo.Context) error {
return c.String(http.StatusOK, "Hello World!\n")
})
e.GET("/json", func(c echo.Context) error {
return c.JSONBlob(
http.StatusOK,
[]byte(`{"id": "1", "msg": "Hello"}`),
)
})
e.GET("/html", func(c echo.Context) error {
return c.HTML(
http.StatusOK,
"<h1>Hello</h1>",
)
})
//Start Server
e.Logger.Fatal(e.Start(":1323"))
}
|
package queries
import (
"database/sql"
"log"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/attributes/models"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/configuration"
)
const EDIT_FACTOR_ATTRIBUTE_SQL = `
UPDATE attributes."Attributes"
SET
"AttributeEnumId" = $1
,"SourceId" = $2
,"Value" = $3
,"Uncertainty" = $4
,"ValueUnitId1" = $5
,"ValueUnitId2" = $6
,"ValueUnitId3" = $7
WHERE
"AttributeId" = $8;`
func EditFactorAttribute(factorAttribute models.FactorAttribute) (err error) {
db, err := sql.Open("postgres", configuration.ConnectionString)
if err != nil {
log.Fatal(err)
return
}
defer db.Close()
transaction, err := db.Begin()
if err != nil {
log.Fatal(err)
return
}
stmt, err := transaction.Prepare(EDIT_FACTOR_ATTRIBUTE_SQL)
if err != nil {
log.Fatal(err)
return
}
_, err = stmt.Exec(
factorAttribute.FactorId,
factorAttribute.SourceId,
factorAttribute.Value,
factorAttribute.Uncertainty,
factorAttribute.ValueUnitId1,
factorAttribute.ValueUnitId2,
factorAttribute.ValueUnitId3,
factorAttribute.FactorAttributeId,
)
if err != nil {
log.Fatal(err)
return
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
return
}
err = transaction.Commit()
if err != nil {
log.Fatal(err)
}
return
}
|
package service
import (
"math"
"github.com/helloferdie/stdgo/db"
"github.com/helloferdie/stdgo/language"
"github.com/helloferdie/stdgo/libresponse"
"github.com/helloferdie/stdgo/libslice"
"github.com/helloferdie/stdgo/libvalidator"
)
// FormatOutput -
func FormatOutput(obj *language.Language, format map[string]interface{}) map[string]interface{} {
m := libresponse.MapOutput(obj, true, format)
return m
}
// ListRequest -
type ListRequest struct {
Page int64 `json:"page" loc:"general" validate:"required,numeric,min=1"`
ItemsPerPage int64 `json:"items_per_page" loc:"general" validate:"required,numeric,min=1,max=500"`
OrderByField string `json:"order_by_field" loc:"general"`
OrderByDir string `json:"order_by_direction" loc:"general"`
ShowRelationship bool `json:"show_relationship" loc:"general"`
ID string `json:"id" loc:"general" validate:"omitempty,numeric"`
Label string `json:"label" loc:"language"`
LabelShort string `json:"label_short" loc:"language"`
}
// List -
func List(r *ListRequest, format map[string]interface{}) *libresponse.Default {
res, err := libvalidator.Validate(r)
if err != nil {
return res
}
d, _ := db.Open("")
defer d.Close()
params := map[string]interface{}{
"id": r.ID,
"label": r.Label,
"label_short": r.LabelShort,
}
orderParams := map[string]interface{}{
"field": r.OrderByField,
"direction": r.OrderByDir,
"start": ((r.Page - 1) * r.ItemsPerPage),
"limit": r.ItemsPerPage,
}
list, totalItems, err := language.List(d, params, orderParams)
if err != nil {
res.Code = 500
res.Message = "general.error_internal"
res.Error = "general.error_list"
} else {
tmp := make([]interface{}, len(list))
format["show_relationship"] = r.ShowRelationship
for k, obj := range list {
tmp[k] = FormatOutput(&obj, format)
}
res.Success = true
res.Code = 200
res.Message = "general.success_list"
totalPages := math.Ceil(float64(totalItems) / float64(r.ItemsPerPage))
res.Data = map[string]interface{}{
"items": tmp,
"total_items": totalItems,
"total_pages": totalPages,
}
}
return res
}
// ViewRequest -
type ViewRequest struct {
ID int64 `json:"id" loc:"general" validate:"required,numeric"`
}
// View -
func View(r *ViewRequest, format map[string]interface{}) *libresponse.Default {
res, err := libvalidator.Validate(r)
if err != nil {
return res
}
d, _ := db.Open("")
defer d.Close()
la := new(language.Language)
exist, err := la.GetByID(d, r.ID)
if err == nil && exist {
res.Success = true
res.Code = 200
res.Message = "general.success_data_found"
res.Data = FormatOutput(la, format)
} else {
res.Code = 404
res.Error = "general.error_data_not_found"
}
return res
}
// CheckRequest -
type CheckRequest struct {
ID []int64 `json:"id" loc:"general" validate:"required,min=1"`
}
// Check -
func Check(r *CheckRequest, format map[string]interface{}) *libresponse.Default {
res, err := libvalidator.Validate(r)
if err != nil {
return res
}
d, _ := db.Open("")
defer d.Close()
valid, err := language.MassCheckID(d, libslice.UniqueInt64(r.ID))
if err == nil && valid {
res.Success = true
res.Code = 200
res.Message = "general.success_check"
} else {
res.Success = true
res.Code = 422
res.Message = "general.error_validation"
res.Error = "general.error_check"
}
return res
}
|
package model
import "time"
type RealAgent struct {
Name string `json:"name"`
Version string `json:"version"`
Status string `json:"status"`
Timestamp string `json:"timestamp"`
}
type DesiredAgent struct {
Name string `json:"name"`
Version string `json:"version"`
Tarball string `json:"tarball"`
Md5 string `json:"md5"`
Cmd string `json:"cmd"`
}
type HeartbeatReques struct {
Hostname string `json:"hostname"`
RealAgents []*RealAgent `json:"realAgents"`
}
type HeartbeatResponse struct {
ErrorMessage string `json:"errorMessage"`
DesiredAgent []*DesiredAgent `json:"desiredAgents"`
}
//----------------------------sql model-----------------------------------
type Reminder struct {
Id int64 `json:"id"`
Message string `sql:"size:1024" json:"message"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
DeletedAt time.Time `json:"-"`
} |
package data_test
import (
"testing"
"math/rand"
data "github.com/bgokden/veri/data"
)
func randFloats64(min, max float64, n int) []float64 {
res := make([]float64, n)
for i := range res {
res[i] = min + rand.Float64()*(max-min)
}
return res
}
func randFloats32(min, max float32, n int) []float32 {
res := make([]float32, n)
for i := range res {
res[i] = min + rand.Float32()*(max-min)
}
return res
}
var vector64_0 = randFloats64(-1, 1, 512)
var vector64_1 = randFloats64(-1, 1, 512)
var vector32_0 = randFloats32(-1, 1, 512)
var vector32_1 = randFloats32(-1, 1, 512)
func BenchmarkCosineSimilarity64(b *testing.B) {
for i := 0; i < b.N; i++ {
data.CosineSimilarity64(vector64_0, vector64_1)
}
}
func BenchmarkCosineSimilarity32(b *testing.B) {
for i := 0; i < b.N; i++ {
data.CosineSimilarity(vector32_0, vector32_1)
}
}
|
package nes
import (
"encoding/gob"
"log"
)
type Mapper4 struct {
*Cartridge
console *Console
register byte
registers [8]byte
prgMode byte
chrMode byte
prgOffsets [4]int
chrOffsets [8]int
reload byte
counter byte
irqEnable bool
}
func NewMapper4(console *Console, cartridge *Cartridge) Mapper {
m := Mapper4{Cartridge: cartridge, console: console}
m.prgOffsets[0] = m.prgBankOffset(0)
m.prgOffsets[1] = m.prgBankOffset(1)
m.prgOffsets[2] = m.prgBankOffset(-2)
m.prgOffsets[3] = m.prgBankOffset(-1)
return &m
}
func (m *Mapper4) Save(encoder *gob.Encoder) error {
encoder.Encode(m.register)
encoder.Encode(m.registers)
encoder.Encode(m.prgMode)
encoder.Encode(m.chrMode)
encoder.Encode(m.prgOffsets)
encoder.Encode(m.chrOffsets)
encoder.Encode(m.reload)
encoder.Encode(m.counter)
encoder.Encode(m.irqEnable)
return nil
}
func (m *Mapper4) Load(decoder *gob.Decoder) error {
decoder.Decode(&m.register)
decoder.Decode(&m.registers)
decoder.Decode(&m.prgMode)
decoder.Decode(&m.chrMode)
decoder.Decode(&m.prgOffsets)
decoder.Decode(&m.chrOffsets)
decoder.Decode(&m.reload)
decoder.Decode(&m.counter)
decoder.Decode(&m.irqEnable)
return nil
}
func (m *Mapper4) Step() {
ppu := m.console.PPU
if ppu.Cycle != 280 { // TODO: this *should* be 260
return
}
if ppu.ScanLine > 239 && ppu.ScanLine < 261 {
return
}
if ppu.flagShowBackground == 0 && ppu.flagShowSprites == 0 {
return
}
m.HandleScanLine()
}
func (m *Mapper4) HandleScanLine() {
if m.counter == 0 {
m.counter = m.reload
} else {
m.counter--
if m.counter == 0 && m.irqEnable {
m.console.CPU.triggerIRQ()
}
}
}
func (m *Mapper4) Read(address uint16) byte {
switch {
case address < 0x2000:
bank := address / 0x0400
offset := address % 0x0400
return m.CHR[m.chrOffsets[bank]+int(offset)]
case address >= 0x8000:
address = address - 0x8000
bank := address / 0x2000
offset := address % 0x2000
return m.PRG[m.prgOffsets[bank]+int(offset)]
case address >= 0x6000:
return m.SRAM[int(address)-0x6000]
default:
log.Fatalf("unhandled mapper4 read at address: 0x%04X", address)
}
return 0
}
func (m *Mapper4) Write(address uint16, value byte) {
switch {
case address < 0x2000:
bank := address / 0x0400
offset := address % 0x0400
m.CHR[m.chrOffsets[bank]+int(offset)] = value
case address >= 0x8000:
m.writeRegister(address, value)
case address >= 0x6000:
m.SRAM[int(address)-0x6000] = value
default:
log.Fatalf("unhandled mapper4 write at address: 0x%04X", address)
}
}
func (m *Mapper4) writeRegister(address uint16, value byte) {
switch {
case address <= 0x9FFF && address%2 == 0:
m.writeBankSelect(value)
case address <= 0x9FFF && address%2 == 1:
m.writeBankData(value)
case address <= 0xBFFF && address%2 == 0:
m.writeMirror(value)
case address <= 0xBFFF && address%2 == 1:
m.writeProtect(value)
case address <= 0xDFFF && address%2 == 0:
m.writeIRQLatch(value)
case address <= 0xDFFF && address%2 == 1:
m.writeIRQReload(value)
case address <= 0xFFFF && address%2 == 0:
m.writeIRQDisable(value)
case address <= 0xFFFF && address%2 == 1:
m.writeIRQEnable(value)
}
}
func (m *Mapper4) writeBankSelect(value byte) {
m.prgMode = (value >> 6) & 1
m.chrMode = (value >> 7) & 1
m.register = value & 7
m.updateOffsets()
}
func (m *Mapper4) writeBankData(value byte) {
m.registers[m.register] = value
m.updateOffsets()
}
func (m *Mapper4) writeMirror(value byte) {
switch value & 1 {
case 0:
m.Cartridge.Mirror = MirrorVertical
case 1:
m.Cartridge.Mirror = MirrorHorizontal
}
}
func (m *Mapper4) writeProtect(value byte) {
}
func (m *Mapper4) writeIRQLatch(value byte) {
m.reload = value
}
func (m *Mapper4) writeIRQReload(value byte) {
m.counter = 0
}
func (m *Mapper4) writeIRQDisable(value byte) {
m.irqEnable = false
}
func (m *Mapper4) writeIRQEnable(value byte) {
m.irqEnable = true
}
func (m *Mapper4) prgBankOffset(index int) int {
if index >= 0x80 {
index -= 0x100
}
index %= len(m.PRG) / 0x2000
offset := index * 0x2000
if offset < 0 {
offset += len(m.PRG)
}
return offset
}
func (m *Mapper4) chrBankOffset(index int) int {
if index >= 0x80 {
index -= 0x100
}
index %= len(m.CHR) / 0x0400
offset := index * 0x0400
if offset < 0 {
offset += len(m.CHR)
}
return offset
}
func (m *Mapper4) updateOffsets() {
switch m.prgMode {
case 0:
m.prgOffsets[0] = m.prgBankOffset(int(m.registers[6]))
m.prgOffsets[1] = m.prgBankOffset(int(m.registers[7]))
m.prgOffsets[2] = m.prgBankOffset(-2)
m.prgOffsets[3] = m.prgBankOffset(-1)
case 1:
m.prgOffsets[0] = m.prgBankOffset(-2)
m.prgOffsets[1] = m.prgBankOffset(int(m.registers[7]))
m.prgOffsets[2] = m.prgBankOffset(int(m.registers[6]))
m.prgOffsets[3] = m.prgBankOffset(-1)
}
switch m.chrMode {
case 0:
m.chrOffsets[0] = m.chrBankOffset(int(m.registers[0] & 0xFE))
m.chrOffsets[1] = m.chrBankOffset(int(m.registers[0] | 0x01))
m.chrOffsets[2] = m.chrBankOffset(int(m.registers[1] & 0xFE))
m.chrOffsets[3] = m.chrBankOffset(int(m.registers[1] | 0x01))
m.chrOffsets[4] = m.chrBankOffset(int(m.registers[2]))
m.chrOffsets[5] = m.chrBankOffset(int(m.registers[3]))
m.chrOffsets[6] = m.chrBankOffset(int(m.registers[4]))
m.chrOffsets[7] = m.chrBankOffset(int(m.registers[5]))
case 1:
m.chrOffsets[0] = m.chrBankOffset(int(m.registers[2]))
m.chrOffsets[1] = m.chrBankOffset(int(m.registers[3]))
m.chrOffsets[2] = m.chrBankOffset(int(m.registers[4]))
m.chrOffsets[3] = m.chrBankOffset(int(m.registers[5]))
m.chrOffsets[4] = m.chrBankOffset(int(m.registers[0] & 0xFE))
m.chrOffsets[5] = m.chrBankOffset(int(m.registers[0] | 0x01))
m.chrOffsets[6] = m.chrBankOffset(int(m.registers[1] & 0xFE))
m.chrOffsets[7] = m.chrBankOffset(int(m.registers[1] | 0x01))
}
}
|
package model
import (
"database/sql"
"goTodo/initialization"
"goTodo/mylog"
"goTodo/util"
)
type UserModel struct {
Username string `form:"username"`
Password string `form:"password"`
}
type RegisterModel struct {
UserModel
PasswordAgain string `form:"passwordAgain"`
}
func (user *UserModel) ValidUser() bool {
row := initialization.Db.QueryRow("SELECT password_hash FROM " + initialization.DbUserName + " WHERE username = ?", user.Username)
var passwordHash string
if err := row.Scan(&passwordHash); err != nil {
if err == sql.ErrNoRows {
// 说明没有该用户
return false
}
mylog.GoTodoLogger.Panicln("验证用户信息发生错误:", err)
}
return passwordHash == util.GenStringHash(user.Password)
}
func (user *UserModel) SaveUser() int64 {
passwordHash := util.GenStringHash(user.Password)
res, err := initialization.Db.Exec("INSERT INTO " + initialization.DbUserName + "(username, password_hash) values(?, ?)", user.Username, passwordHash)
if err != nil {
mylog.GoTodoLogger.Panicln("注册新用户发生错误:", err)
}
id, err := res.LastInsertId()
if err != nil {
mylog.GoTodoLogger.Panicln("注册新用户发生错误:", err)
}
return id
}
func (user *UserModel) ExistUser() bool {
row := initialization.Db.QueryRow("SELECT id FROM " + initialization.DbUserName + " WHERE username = ?", user.Username)
var id int
if err := row.Scan(&id); err != nil {
if err == sql.ErrNoRows {
return false
}
mylog.GoTodoLogger.Panicln("查询用户信息发生错误:", err)
}
return true
}
// 用来查用户设置,目前只查 webhook
type UserSetting struct {
// 用byte,如果是null,转成string则为空字符串
CurrentWebHook []byte
}
func ShowSettings(username string) *UserSetting {
row := initialization.Db.QueryRow("SELECT webhook FROM " + initialization.DbUserName + " WHERE username = ?", username)
userSetting := new(UserSetting)
if err := row.Scan(&userSetting.CurrentWebHook); err != nil {
mylog.GoTodoLogger.Panicln("查询用户设置发生错误:", err)
}
return userSetting
}
func UpdateWebHook(username, webhook string) string {
res, err := initialization.Db.Exec("UPDATE " + initialization.DbUserName + " SET webhook = ? WHERE username = ? ", webhook, username)
if err != nil {
mylog.GoTodoLogger.Panicln(username + "添加webhook出错", err)
}
_, err = res.LastInsertId()
if err != nil {
mylog.GoTodoLogger.Panicln(username + "添加webhook出错", err)
}
return webhook
} |
package s3
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"k8s.io/apimachinery/pkg/util/uuid"
corev1 "k8s.io/api/core/v1"
installer "github.com/openshift/installer/pkg/types"
opapi "github.com/openshift/cluster-image-registry-operator/pkg/apis/imageregistry/v1alpha1"
"github.com/openshift/cluster-image-registry-operator/pkg/clusterconfig"
"github.com/openshift/cluster-image-registry-operator/pkg/storage/util"
)
type driver struct {
Name string
Namespace string
Config *opapi.ImageRegistryConfigStorageS3
}
func NewDriver(crname string, crnamespace string, c *opapi.ImageRegistryConfigStorageS3) *driver {
return &driver{
Name: crname,
Namespace: crnamespace,
Config: c,
}
}
func (d *driver) GetName() string {
return "s3"
}
func (d *driver) ConfigEnv() (envs []corev1.EnvVar, err error) {
envs = append(envs,
corev1.EnvVar{Name: "REGISTRY_STORAGE", Value: d.GetName()},
corev1.EnvVar{Name: "REGISTRY_STORAGE_S3_BUCKET", Value: d.Config.Bucket},
corev1.EnvVar{Name: "REGISTRY_STORAGE_S3_REGION", Value: d.Config.Region},
corev1.EnvVar{Name: "REGISTRY_STORAGE_S3_REGIONENDPOINT", Value: d.Config.RegionEndpoint},
corev1.EnvVar{Name: "REGISTRY_STORAGE_S3_ENCRYPT", Value: fmt.Sprintf("%v", d.Config.Encrypt)},
corev1.EnvVar{
Name: "REGISTRY_STORAGE_S3_ACCESSKEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: d.Name + "-private-configuration",
},
Key: "REGISTRY_STORAGE_S3_ACCESSKEY",
},
},
},
corev1.EnvVar{
Name: "REGISTRY_STORAGE_S3_SECRETKEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: d.Name + "-private-configuration",
},
Key: "REGISTRY_STORAGE_S3_SECRETKEY",
},
},
},
)
return
}
func (d *driver) Volumes() ([]corev1.Volume, []corev1.VolumeMount, error) {
return nil, nil, nil
}
// checkBucketExists checks if an S3 bucket with the given name exists
func (d *driver) checkBucketExists(svc *s3.S3) error {
_, err := svc.HeadBucket(&s3.HeadBucketInput{
Bucket: aws.String(d.Config.Bucket),
})
return err
}
// createBucket attempts to create an s3 bucket with the given name
func (d *driver) createAndTagBucket(svc *s3.S3, installConfig *installer.InstallConfig, customResourceStatus *opapi.ImageRegistryStatus) error {
createBucketInput := &s3.CreateBucketInput{
Bucket: aws.String(d.Config.Bucket),
}
// Create the S3 bucket
if _, err := svc.CreateBucket(createBucketInput); err != nil {
return err
}
// Wait until the bucket exists
if err := svc.WaitUntilBucketExists(&s3.HeadBucketInput{
Bucket: aws.String(d.Config.Bucket),
}); err != nil {
return err
}
// Tag the bucket with the openshiftClusterID
// along with any user defined tags from the cluster configuration
if installConfig.Platform.AWS != nil {
var tagSet []*s3.Tag
tagSet = append(tagSet, &s3.Tag{Key: aws.String("openshiftClusterID"), Value: aws.String(installConfig.ClusterID)})
for k, v := range installConfig.Platform.AWS.UserTags {
tagSet = append(tagSet, &s3.Tag{Key: aws.String(k), Value: aws.String(v)})
}
tagBucketInput := &s3.PutBucketTaggingInput{
Bucket: aws.String(d.Config.Bucket),
Tagging: &s3.Tagging{
TagSet: tagSet,
},
}
if _, err := svc.PutBucketTagging(tagBucketInput); err != nil {
return err
}
}
// Enable default encryption on the bucket
defaultEncryption := &s3.ServerSideEncryptionByDefault{SSEAlgorithm: aws.String(s3.ServerSideEncryptionAes256)}
encryptionRule := &s3.ServerSideEncryptionRule{ApplyServerSideEncryptionByDefault: defaultEncryption}
encryptionRules := []*s3.ServerSideEncryptionRule{encryptionRule}
encryptionConfig := &s3.ServerSideEncryptionConfiguration{Rules: encryptionRules}
bucketEncryptionInput := &s3.PutBucketEncryptionInput{Bucket: aws.String(d.Config.Bucket), ServerSideEncryptionConfiguration: encryptionConfig}
_, err := svc.PutBucketEncryption(bucketEncryptionInput)
if err != nil {
return err
}
customResourceStatus.Storage.Managed = true
return nil
}
func (d *driver) createOrUpdatePrivateConfiguration(accessKey string, secretKey string) error {
data := make(map[string]string)
data["REGISTRY_STORAGE_S3_ACCESSKEY"] = accessKey
data["REGISTRY_STORAGE_S3_SECRETKEY"] = secretKey
return util.CreateOrUpdateSecret("image-registry", "openshift-image-registry", data)
}
func (d *driver) CompleteConfiguration(customResourceStatus *opapi.ImageRegistryStatus) error {
cfg, err := clusterconfig.GetAWSConfig()
if err != nil {
return err
}
installConfig, err := clusterconfig.GetInstallConfig()
if err != nil {
return err
}
sess, err := session.NewSession(&aws.Config{
Credentials: credentials.NewStaticCredentials(cfg.Storage.S3.AccessKey, cfg.Storage.S3.SecretKey, ""),
Region: &cfg.Storage.S3.Region,
})
if err != nil {
return err
}
svc := s3.New(sess)
if len(d.Config.Bucket) == 0 {
d.Config.Bucket = cfg.Storage.S3.Bucket
}
if len(d.Config.Region) == 0 {
d.Config.Region = cfg.Storage.S3.Region
}
if len(d.Config.Bucket) == 0 {
for {
d.Config.Bucket = fmt.Sprintf("%s-%s-%s-%s", clusterconfig.StoragePrefix, d.Config.Region, strings.Replace(installConfig.ClusterID, "-", "", -1), strings.Replace(string(uuid.NewUUID()), "-", "", -1))[0:62]
if err := d.createAndTagBucket(svc, installConfig, customResourceStatus); err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeBucketAlreadyExists:
continue
default:
return err
}
}
} else {
break
}
}
} else {
if err := d.checkBucketExists(svc); err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchBucket:
if err = d.createAndTagBucket(svc, installConfig, customResourceStatus); err != nil {
return err
}
default:
return err
}
}
}
}
if err := d.createOrUpdatePrivateConfiguration(cfg.Storage.S3.AccessKey, cfg.Storage.S3.SecretKey); err != nil {
return err
}
customResourceStatus.Storage.State.S3 = d.Config
return nil
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"fmt"
"github.com/sethvargo/go-password/password"
)
var generator *password.Generator
// GeneratePassword generates a password with length at least 12
func GeneratePassword(length uint8) (string, error) {
if length < 12 {
panic("length under 12!")
}
password, err := generator.Generate(int(length), 4, 4, false, true)
if err != nil {
return "", err
}
return password, nil
}
func init() {
var err error
// generator is created with characters allowed
// potential confusing characters, like i/l/| or 0/O, are removed to ease human readability
generator, err = password.NewGenerator(&password.GeneratorInput{
LowerLetters: "abcdefghjkmnopqrstuvwxyz",
UpperLetters: "ABCDEFGHJKLMNPQRSTUVWXYZ",
Digits: "123456789",
Symbols: "-+*/.,:()[]{}#_",
})
if err != nil {
panic(fmt.Sprintf("Failed to create password generator: %s!", err.Error()))
}
}
|
package eks
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ssh"
)
// A NodeGroupService provides helpers for nodegroup creation
type NodeGroupService struct {
cluster *api.ClusterConfig
ec2API ec2iface.EC2API
}
// NewNodeGroupService creates a new NodeGroupService
func NewNodeGroupService(clusterConfig *api.ClusterConfig, ec2API ec2iface.EC2API) *NodeGroupService {
return &NodeGroupService{
cluster: clusterConfig,
ec2API: ec2API,
}
}
// Normalize normalizes nodegroups
func (m *NodeGroupService) Normalize(nodeGroups []*api.NodeGroupBase) error {
for _, ng := range nodeGroups {
// load or use SSH key - name includes cluster name and the
// fingerprint, so if unique keys are provided, each will get
// loaded and used as intended and there is no need to have
// nodegroup name in the key name
publicKeyName, err := ssh.LoadKey(ng.SSH, m.cluster.Metadata.Name, ng.Name, m.ec2API)
if err != nil {
return err
}
if publicKeyName != "" {
ng.SSH.PublicKeyName = &publicKeyName
}
}
return nil
}
|
package main
import (
"fmt"
)
func main() {
//2 ways to declare an array
//1st
var arr [3]int
arr[0] = 1
arr[1] = 2
arr[2] = 3
fmt.Println(arr)
//Declare with initialize
arr2 := [3]int{2, 3, 4}
fmt.Println(arr2)
//Slice
//Dynamic Array
slice := []int{1, 2, 3}
fmt.Println("Slice Value", slice)
//we can append one value
slice = append(slice, 4)
fmt.Println("Slice Value After Single Append", slice)
//we can also append multiple values
slice = append(slice, 4, 27, 90)
fmt.Println("Slice Value After Multiple Append", slice)
//slice of slice
s2 := slice[:]
fmt.Println("Slice s2: ", s2)
s3 := slice[1:3]
fmt.Println("Slice s3 start index is 1 end index is 3: ", s3)
s4 := slice[1:]
fmt.Println("Slice s3 start index is 1 ", s4)
s5 := slice[:4]
fmt.Println("slice s5 with end Index 4", s5)
//MAP
m := map[string]int{"foo": 43}
fmt.Println(m)
fmt.Println(m["foo"])
//we can chnage the Key Value of map anytime
m["foo"] = 48
fmt.Println(m)
//the map provides builtin Delete =>{1st parameter is map name 2ndParameter is key}
delete(m, "foo")
fmt.Println(m)
//struct
//Declare struct
type user struct {
ID int
Fname string
Lname string
}
//Long way to use struct and assign values
var u user
fmt.Println(u)
u.ID = 1
u.Fname = "Sneha"
u.Lname = "Konkati"
fmt.Println(u)
//shortest way to use and Multiline assigneshould alwasy end with comma
u2 := user{
ID: 2,
Fname: "Vijay",
Lname: "Konkati",
}
fmt.Println(u2)
}
// OUTPUT:-
// C:\Go_Code>go run Collection.go
// [1 2 3]
// [2 3 4]
// Slice Value [1 2 3]
// Slice Value After Single Append [1 2 3 4]
// Slice Value After Multiple Append [1 2 3 4 4 27 90]
// Slice s2: [1 2 3 4 4 27 90]
// Slice s3 start index is 1 end index is 3: [2 3]
// Slice s3 start index is 1 [2 3 4 4 27 90]
// slice s5 with end Index 4 [1 2 3 4]
// map[foo:43]
// 43
// map[foo:48]
// map[]
// {0 }
// {1 Sneha Konkati}
// {2 Vijay Konkati}
|
package map_slice_array
import (
"fmt"
"gengine/builder"
"gengine/context"
"gengine/engine"
"reflect"
"testing"
"time"
)
type MS struct {
MII *map[int]int
MSI map[string]int
MIS map[int]string
}
const m_1 = `
rule "map test" "m dec"
begin
//map in struct
a = -1
MS.MII[-1] = 22
println("MS.MII[-1]--->",MS.MII[-1])
println("MS.MII[a]--->",MS.MII[a])
b = "1"
x = (227289 - 227288) * 10 - 20 * 3
MS.MSI["1"] = x
println("MS.MSI[\"1\"]--->",MS.MSI["1"])
println("MS.MSI[b]---->", MS.MSI[b])
c = "2"
//
MS.MSI["2"] = 33333
println("MS.MSI[\"2\"]--->", MS.MSI["2"])
println("MS.MSI[c]--->", MS.MSI[c])
d = 1
MS.MIS[1] = "hekwld"
println("MS.MIS[1]--->", MS.MIS[1])
println("MS.MIS[d]--->", MS.MIS[d])
//single map
a = 1
MM[a] = 2222
println("MM[a]->",MM[a])
println("MM[1]->",MM[a])
//MMM[1] = 11111111
println(MMM[1])
end
`
func Test_m1(t *testing.T) {
MS := &MS{
MII: &map[int]int{-1: 1},
MSI: map[string]int{"hello": 1},
MIS: map[int]string{1: "helwo"},
}
var MM map[int]int
MM = map[int]int{1: 1000, 2: 1000}
var MMM map[int]int
MMM = map[int]int{1: 1000, 2: 1000}
dataContext := context.NewDataContext()
dataContext.Add("MS", MS)
//single map inject, must be ptr
dataContext.Add("MM", &MM)
dataContext.Add("MMM", MMM)
dataContext.Add("println", fmt.Println)
//init rule engine
ruleBuilder := builder.NewRuleBuilder(dataContext)
//读取规则
start1 := time.Now().UnixNano()
err := ruleBuilder.BuildRuleFromString(m_1)
end1 := time.Now().UnixNano()
println(fmt.Sprintf("rules num:%d, load rules cost time:%d ns", len(ruleBuilder.Kc.RuleEntities), end1-start1))
if err != nil {
panic(err)
}
eng := engine.NewGengine()
start := time.Now().UnixNano()
// true: means when there are many rules, if one rule execute error,continue to execute rules after the occur error rule
err = eng.Execute(ruleBuilder, true)
end := time.Now().UnixNano()
if err != nil {
panic(err)
}
println(fmt.Sprintf("execute rule cost %d ns", end-start))
}
func Test_map_un(t *testing.T) {
x := make(map[int]int)
reflect.ValueOf(x).SetMapIndex(reflect.ValueOf(1), reflect.ValueOf(5))
println("x--->", x[1])
}
|
package rc
import (
"github.com/square/p2/Godeps/_workspace/src/github.com/Sirupsen/logrus"
"github.com/square/p2/pkg/kp"
"github.com/square/p2/pkg/kp/consulutil"
"github.com/square/p2/pkg/kp/rcstore"
"github.com/square/p2/pkg/labels"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/rc/fields"
)
// The Farm is responsible for spawning and reaping replication controllers
// as they are added to and deleted from Consul. Multiple farms can exist
// simultaneously, but each one must hold a different Consul session. This
// ensures that the farms do not instantiate the same replication controller
// multiple times.
type Farm struct {
// constructor arguments for rcs created by this farm
kpStore kp.Store
rcStore rcstore.Store
scheduler Scheduler
labeler labels.Applicator
// session stream for the rcs locked by this farm
sessions <-chan string
children map[fields.ID]childRC
session kp.Session
logger logging.Logger
}
type childRC struct {
rc ReplicationController
unlocker kp.Unlocker
quit chan<- struct{}
}
func NewFarm(
kpStore kp.Store,
rcs rcstore.Store,
scheduler Scheduler,
labeler labels.Applicator,
sessions <-chan string,
logger logging.Logger,
) *Farm {
return &Farm{
kpStore: kpStore,
rcStore: rcs,
scheduler: scheduler,
labeler: labeler,
sessions: sessions,
logger: logger,
children: make(map[fields.ID]childRC),
}
}
// Start is a blocking function that monitors Consul for replication controllers.
// The Farm will attempt to claim replication controllers as they appear and,
// if successful, will start goroutines for those replication controllers to do
// their job. Closing the quit channel will cause this function to return,
// releasing all locks it holds.
//
// Start is not safe for concurrent execution. Do not execute multiple
// concurrent instances of Start.
func (rcf *Farm) Start(quit <-chan struct{}) {
consulutil.WithSession(quit, rcf.sessions, func(sessionQuit <-chan struct{}, sessionID string) {
rcf.logger.WithField("session", sessionID).Infoln("Acquired new session")
rcf.session = rcf.kpStore.NewUnmanagedSession(sessionID, "")
rcf.mainLoop(sessionQuit)
})
}
func (rcf *Farm) mainLoop(quit <-chan struct{}) {
subQuit := make(chan struct{})
defer close(subQuit)
rcWatch, rcErr := rcf.rcStore.WatchNew(subQuit)
START_LOOP:
for {
select {
case <-quit:
rcf.logger.NoFields().Infoln("Session expired, releasing replication controllers")
rcf.session = nil
rcf.releaseChildren()
return
case err := <-rcErr:
rcf.logger.WithError(err).Errorln("Could not read consul replication controllers")
case rcFields := <-rcWatch:
rcf.logger.WithField("n", len(rcFields)).Debugln("Received replication controller update")
// track which children were found in the returned set
foundChildren := make(map[fields.ID]struct{})
for _, rcField := range rcFields {
rcLogger := rcf.logger.SubLogger(logrus.Fields{
"rc": rcField.ID,
"pod": rcField.Manifest.ID(),
})
if _, ok := rcf.children[rcField.ID]; ok {
// this one is already ours, skip
rcLogger.NoFields().Debugln("Got replication controller already owned by self")
foundChildren[rcField.ID] = struct{}{}
continue
}
rcUnlocker, err := rcf.rcStore.LockForOwnership(rcField.ID, rcf.session)
if _, ok := err.(kp.AlreadyLockedError); ok {
// someone else must have gotten it first - log and move to
// the next one
rcLogger.NoFields().Debugln("Lock on replication controller was denied")
continue
} else if err != nil {
rcLogger.WithError(err).Errorln("Got error while locking replication controller - session may be expired")
// stop processing this update and go back to the select
// chances are this error is a network problem or session
// expiry, and all the others in this update would also fail
continue START_LOOP
}
// at this point the rc is ours, time to spin it up
rcLogger.NoFields().Infoln("Acquired lock on new replication controller, spawning")
newChild := New(
rcField,
rcf.kpStore,
rcf.rcStore,
rcf.scheduler,
rcf.labeler,
rcLogger,
)
childQuit := make(chan struct{})
rcf.children[rcField.ID] = childRC{
rc: newChild,
quit: childQuit,
unlocker: rcUnlocker,
}
foundChildren[rcField.ID] = struct{}{}
go func() {
// disabled-ness is handled in watchdesires
for err := range newChild.WatchDesires(childQuit) {
rcLogger.WithError(err).Errorln("Got error in replication controller loop")
}
// NOTE: if WatchDesires experiences an unrecoverable error, we don't release the replication controller.
// However, it is unlikely that another farm instance would fare any better so that's okay
}()
}
// now remove any children that were not found in the result set
rcf.logger.NoFields().Debugln("Pruning replication controllers that have disappeared")
for id := range rcf.children {
if _, ok := foundChildren[id]; !ok {
rcf.releaseChild(id)
}
}
}
}
}
// close one child
func (rcf *Farm) releaseChild(id fields.ID) {
rcf.logger.WithField("rc", id).Infoln("Releasing replication controller")
close(rcf.children[id].quit)
// if our lock is active, attempt to gracefully release it on this rc
if rcf.session != nil {
unlocker := rcf.children[id].unlocker
err := unlocker.Unlock()
if err != nil {
rcf.logger.WithField("rc", id).Warnln("Could not release replication controller lock")
}
}
delete(rcf.children, id)
}
// close all children
func (rcf *Farm) releaseChildren() {
for id := range rcf.children {
// it's safe to delete this element during iteration,
// because we have already iterated over it
rcf.releaseChild(id)
}
}
|
package admin
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
m "github.com/im2kl/ProxyShed/Client/models"
)
// rawlist to be returned after scrapping.
var rawlist []m.ProxySource
// GetURLList retreive latest url list for scraping
func GetURLList() []m.ProxySource {
client := http.Client{}
req, err := http.NewRequest("GET", "http://localhost:5000/api/v1/list", nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("x-access-token", "dingding")
if err != nil {
log.Fatalln(err)
}
resp, err := client.Do(req)
if err != nil {
log.Fatalln(err)
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
body, err := ioutil.ReadAll(resp.Body) //only intrested in body just now
if err != nil {
log.Fatalln(err)
}
//log.Println(string(body))
json.Unmarshal(body, &rawlist)
} else {
log.Println(string(resp.StatusCode))
}
return rawlist
}
|
package simplegfs
import (
"bufio"
"fmt"
"github.com/wweiw/simplegfs/pkg/testutil"
log "github.com/Sirupsen/logrus"
"time"
"os"
"testing"
"strings"
"strconv"
"sync"
"runtime"
)
// Global test config.
const MasterAddr = ":4444"
const ck1Addr = ":5555"
const ck2Addr = ":5556"
const ck3Addr = ":5557"
const ck4Addr = ":5558"
const ck5Addr = ":5559"
const ck6Addr = ":5560"
const ck1Path = "/var/tmp/ck1"
const ck2Path = "/var/tmp/ck2"
const ck3Path = "/var/tmp/ck3"
const ck4Path = "/var/tmp/ck4"
const ck5Path = "/var/tmp/ck5"
const ck6Path = "/var/tmp/ck6"
const testFile1 = "/a"
const testFile2 = "/b"
const testFile3 = "/c"
const testData1 = "The quick brown fox jumps over the lazy dog.\n"
const testData2 = "Perfection is reached not when there is nothing left" +
" to add, but when there is nothing left to take away.\n"
// Print a logging message indicatin the test has started.
//
// param - none
// return - none
func testStart() {
log.SetLevel(log.WarnLevel)
pc, _, _, ok := runtime.Caller(1)
if ok {
test := runtime.FuncForPC(pc)
if test != nil {
fmt.Println()
fmt.Println("+++++ Start\t", test.Name())
return
}
}
fmt.Println()
fmt.Println("+++++ Start\tUnknown")
}
// Print a logging message indicatin the test has finished.
//
// param - none
// return - none
func testEnd() {
pc, _, _, ok := runtime.Caller(1)
if ok {
test := runtime.FuncForPC(pc)
if test != nil {
fmt.Println("----- Finish\t", test.Name())
fmt.Println()
return
}
}
fmt.Println("----- Finish\tUnknown")
fmt.Println()
}
// Construct a master server instance given master's address
// params - addr: Master server's address.
// ckAddrs: An array of chunk server addresses.
// return - A pointer to a MasterServer instance.
func initMaster(addr string, ckAddrs []string) *MasterServer{
ms := StartMasterServer(addr, ckAddrs)
time.Sleep(HeartbeatInterval)
return ms
}
// Kills the master server instance
// params - ms: A pointer to a MasterServer instance.
// return - None.
func killMaster(ms *MasterServer) {
ms.Kill()
}
// Construct an array of chunk server instances given the master's address, an
// array of chunk server addresses, and an array of chunk server paths.
// params - msAddr: Master server's address.
// ckAddrs: An array of chunk server addresses.
// ckPaths: An array of chunk server paths.
// return - An array of ChunkServer pointers
func initChunkServers(msAddr string, ckAddrs []string,
ckPaths []string) []*ChunkServer{
// Error checking
if len(ckAddrs) != len(ckPaths) {
log.Fatal("Must provide same amount of chunk server addresses and " +
"chunk server paths")
}
// Return value
ckServers := make([]*ChunkServer, len(ckAddrs))
for i, ckAddr := range ckAddrs {
os.Mkdir(ckPaths[i], FilePermRWX)
ckServers[i] = StartChunkServer(msAddr, ckAddr, ckPaths[i])
}
// Sleep
time.Sleep(2 * HeartbeatInterval)
return ckServers
}
// Kills an array of ChunkServer instances, and removes chunk server paths.
// params - ckAddrs: An array of pointer to chunk server instances.
// ckPaths: An array of chunk server paths.
// return - None.
func killChunkServers(cks []*ChunkServer, ckPaths []string) {
for _, ck := range cks {
ck.Kill()
}
for _, ckPath := range ckPaths {
os.RemoveAll(ckPath)
}
}
// Construct an array of Client instances given master's address.
// params - msAddr: Master server's address.
// n: Number of Client instances.
// return - An array of pointers of Client instances.
func initClients(msAddr string, n int) []*Client {
cs := make([]*Client, n)
for i, _ := range cs {
cs[i] = NewClient(msAddr)
}
// Sleep
time.Sleep(2 * HeartbeatInterval)
return cs
}
func TestNewClientId(t *testing.T) {
testStart()
ms := StartMasterServer(":4444", []string{})
time.Sleep(HeartbeatInterval)
// Read master server's meta data to determine what the next clientId
// is suppose to be.
f, err := os.OpenFile("serverMeta:4444", os.O_RDONLY, 0666)
if err != nil {
t.Error(err)
}
defer f.Close()
var cid uint64
scanner := bufio.NewScanner(f)
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
if fields[0] == "clientId" {
cid, _ = strconv.ParseUint(fields[1], 0, 64)
break
}
}
c0 := NewClient(":4444")
defer c0.Stop()
testutil.AssertEquals(t, c0.clientId, cid)
time.Sleep(HeartbeatInterval)
cid++
c1 := NewClient(":4444")
defer c1.Stop()
testutil.AssertEquals(t, c1.clientId, cid)
time.Sleep(HeartbeatInterval)
ms.Kill()
testEnd()
}
func testCreate(c *Client, path string) bool {
ok, err := c.Create(path)
if err != nil {
fmt.Println(err)
}
return ok
}
func testMkdir(c *Client, path string) bool {
ok, err := c.Mkdir(path)
if err != nil {
fmt.Println(err)
}
return ok
}
func testDelete(c *Client, path string) bool {
ok, err := c.Delete(path)
if err != nil {
fmt.Println(err)
}
return ok
}
func TestNamespaceManagement(t *testing.T) {
testStart()
ms := StartMasterServer(":4444", []string{})
time.Sleep(HeartbeatInterval)
c := NewClient(":4444")
defer c.Stop()
testutil.AssertTrue(t, testCreate(c, "/a"), "create /a returns true.")
testutil.AssertFalse(t, testCreate(c, "/a"), "create /a returns false.")
testutil.AssertFalse(t, testMkdir(c, "/var/tmp"), "mkdir /var/tmp returns false.")
testutil.AssertTrue(t, testMkdir(c, "/var"), "mkdir /var returns true.")
testutil.AssertTrue(t, testMkdir(c, "/var/tmp"), "mkdir /var/tmp returns true.")
testutil.AssertTrue(t, testCreate(c, "/var/tmp/a"), "create /var/tmp/a returns true.")
testutil.AssertTrue(t, testCreate(c, "/var/tmp/b"), "create /var/tmp/b returns true.")
testutil.AssertTrue(t, testCreate(c, "/var/tmp/c"), "create /var/tmp/c returns true.")
testutil.AssertTrue(t, testCreate(c, "/var/tmp/d"), "create /var/tmp/d returns true.")
fmt.Println(c.List("/var/tmp"))
testutil.AssertFalse(t, testDelete(c, "/var"), "delete /var returns false.")
testutil.AssertFalse(t, testDelete(c, "/var/tmp"), "delete /var/tmp returns false.")
testutil.AssertTrue(t, testDelete(c, "/var/tmp/a"), "delete /var/tmp/a returns true.")
testutil.AssertTrue(t, testDelete(c, "/var/tmp/b"), "delete /var/tmp/b returns true.")
testutil.AssertTrue(t, testDelete(c, "/var/tmp/c"), "delete /var/tmp/c returns true.")
testutil.AssertTrue(t, testDelete(c, "/var/tmp/d"), "delete /var/tmp/d returns true.")
fmt.Println(c.List("/var/tmp"))
testutil.AssertTrue(t, testDelete(c, "/var/tmp"), "delete /var/tmp returns true.")
testutil.AssertTrue(t, testDelete(c, "/var"), "delete /var returns true.")
fmt.Println(c.List("/var"))
time.Sleep(HeartbeatInterval)
ms.Kill()
testEnd()
}
func TestReadWrite(t *testing.T) {
testStart()
ms := StartMasterServer(":4444", []string{":5555", ":6666", ":7777"})
time.Sleep(HeartbeatInterval)
os.Mkdir("/var/tmp/ck1", 0777)
os.Mkdir("/var/tmp/ck2", 0777)
os.Mkdir("/var/tmp/ck3", 0777)
cs1 := StartChunkServer(":4444", ":5555", "/var/tmp/ck1")
cs2 := StartChunkServer(":4444", ":6666", "/var/tmp/ck2")
cs3 := StartChunkServer(":4444", ":7777", "/var/tmp/ck3")
c := NewClient(":4444")
defer c.Stop()
if ok, err := c.Create("/a"); err != nil || ok != true {
t.Error("c should create '/a' successfully.")
}
if ok := c.Write("/a", 0, []byte("hello, world. nice to meet you.")); !ok {
t.Error("Write request failed.")
}
time.Sleep(HeartbeatInterval)
data := make([]byte, 31)
if n, _ := c.Read("/a", 0, data); n != 31 || string(data) != "hello, world. nice to meet you." {
t.Error("c actually reads", string(data))
}
data = make([]byte, 100)
if n, _ := c.Read("/a", 0, data); n != 31 || string(data[0:n]) != "hello, world. nice to meet you." {
t.Error("c actually reads", n, "chars:", string(data))
}
if ok, err := c.Create("/b"); err != nil || ok != true {
t.Error("c should create '/b' successfully.")
}
test := "how are you. fine thank you and you? I'm fine too."
c.Write("/b", 15, []byte(test))
fmt.Println("#############", len(test))
time.Sleep(HeartbeatInterval)
data = make([]byte, 100)
if n, _ := c.Read("/b", 10, data); n != len(test) + 5 || string(data[5:5 + len(test)]) != test {
t.Error("c actually reads", n, "chars:", string(data))
}
test = "abcdefghijklmnopqrstuvwxyz"
c.Write("/b", 85, []byte(test))
time.Sleep(HeartbeatInterval)
data = make([]byte, 50)
if n, _ := c.Read("/b", 60, data); n != 50 {
t.Error("c actually reads", n, "chars:", data[:n])
}
time.Sleep(10 * HeartbeatInterval)
cs1.Kill()
cs2.Kill()
cs3.Kill()
ms.Kill()
os.RemoveAll("/var/tmp/ck1")
os.RemoveAll("/var/tmp/ck2")
os.RemoveAll("/var/tmp/ck3")
testEnd()
}
// 3 chunk servers + 3 clients sequantial read/write + concurrent read/write
// test.
func TestChunkServerLease(t *testing.T) {
testStart()
// Master definitions.
msAddr := ":4444"
// Chunkserver definitions.
ck1Path := "/var/tmp/ck1"
ck2Path := "/var/tmp/ck2"
ck3Path := "/var/tmp/ck3"
ck1Addr := ":5555"
ck2Addr := ":6666"
ck3Addr := ":7777"
// Client definitions.
testFile1 := "/a"
testFile2 := "/b"
testData1 := "testData1: The quick brown fox jumps over the lazy dog.\n"
testData2 := "testData2: The quick brown fox jumps over the lazy dog.\n"
testData3 := "testData3: The quick brown fox jumps over the lazy dog.\n"
testData4 := "testData4: The quick brown fox jumps over the lazy dog.\n"
readBuf := make([]byte, len(testData1))
readBuf2 := make([]byte, 7000)
// Fire up master server.
ms := StartMasterServer(msAddr, []string{ck1Addr, ck2Addr, ck3Addr})
time.Sleep(2 * HeartbeatInterval)
// Make space on local for chunkserver to store data.
os.Mkdir(ck1Path, FilePermRWX)
os.Mkdir(ck2Path, FilePermRWX)
os.Mkdir(ck3Path, FilePermRWX)
// Fire up chunk servers.
cs1 := StartChunkServer(msAddr, ck1Addr, ck1Path)
cs2 := StartChunkServer(msAddr, ck2Addr, ck2Path)
cs3 := StartChunkServer(msAddr, ck3Addr, ck3Path)
// Create client instances.
c1 := NewClient(msAddr)
c2 := NewClient(msAddr)
c3 := NewClient(msAddr)
time.Sleep(HeartbeatInterval)
// ----- Test sequential read and write -----
// Create a test file.
if ok, err := c1.Create(testFile1); err != nil || ok != true {
t.Error("Failed to create testfile")
}
// Write once.
offset := uint64(0)
if ok := c1.Write(testFile1, offset, []byte(testData1)); !ok {
t.Error("Write request failed")
}
// Read and verify.
if n, _ := c1.Read(testFile1, 0, readBuf); n != len(testData1) ||
string(readBuf) != testData1 {
t.Error("Client 1 reads:", string(readBuf),". Should read:", testData1)
}
// Write once.
offset += uint64(len(testData1))
if ok := c2.Write(testFile1, offset, []byte(testData2)); !ok {
t.Error("Write request failed")
}
// Read and verify.
if n, _ := c2.Read(testFile1, offset, readBuf); n != len(testData2) ||
string(readBuf) != testData2 {
t.Error("Client 2 reads:", string(readBuf),". Should read:", testData2)
}
// Write once.
offset += uint64(len(testData2))
if ok := c3.Write(testFile1, offset, []byte(testData3)); !ok {
t.Error("Write request failed")
}
// Read and verify.
if n, _ := c3.Read(testFile1, offset, readBuf); n != len(testData3) ||
string(readBuf) != testData3 {
t.Error("Client 3 reads:", string(readBuf),". Should read:", testData3)
}
// Create a second test file.
if ok, err := c2.Create(testFile2); err != nil || ok != true {
t.Error("Failed to create testfile2")
}
// Write once.
offset = uint64(0)
if ok := c1.Write(testFile2, offset, []byte(testData4)); !ok {
t.Error("Write request failed")
}
// Read and verify.
if n, _ := c3.Read(testFile2, offset, readBuf); n != len(testData4) ||
string(readBuf) != testData4 {
t.Error("Client 3 reads:", string(readBuf),". Should read:", testData4)
}
// ----- Test concurrent write -----
// Write concurrently, there is no way to deterministically the read output
// against preset value, therefore we can only verify partial outputs are
// valid and testDatas are not interleaving each other.
//
// Each client runs for 5 seconds, and writes to the same file conccurently,
// starting from offset 0.
go func() {
duration := time.Now().Add(5 * time.Second)
offset = uint64(0)
for time.Now().Before(duration) {
time.Sleep(HeartbeatInterval)
c1.Write(testFile1, offset, []byte(testData1))
offset += uint64(len(testData1))
}
}()
go func() {
duration := time.Now().Add(5 * time.Second)
offset = uint64(0)
for time.Now().Before(duration) {
time.Sleep(HeartbeatInterval)
c2.Write(testFile1, offset, []byte(testData2))
offset += uint64(len(testData2))
}
}()
go func() {
duration := time.Now().Add(5 * time.Second)
offset = uint64(0)
for time.Now().Before(duration) {
time.Sleep(HeartbeatInterval)
c3.Write(testFile1, offset, []byte(testData3))
offset += uint64(len(testData3))
}
}()
// Read contents of the file while writes are still ongoing.
time.Sleep(1 * time.Second)
n, err := c1.Read(testFile1, 0, readBuf2)
if err != nil {
t.Error(err)
} else {
fmt.Println("Read", n, "from testFile", testFile1)
fmt.Println(string(readBuf2))
}
// Read contents of the file after writes are finished.
time.Sleep(5 * time.Second)
n, err = c2.Read(testFile1, 0, readBuf2)
if err != nil {
t.Error(err)
} else {
fmt.Println("Read", n, "from testFile", testFile1)
fmt.Println(string(readBuf2))
}
// Shutdown master and chunk servers.
ms.Kill()
cs1.Kill()
cs2.Kill()
cs3.Kill()
// Remove local disk space allocated for chunkserver.
os.RemoveAll(ck1Path)
os.RemoveAll(ck2Path)
os.RemoveAll(ck3Path)
testEnd()
}
func TestAppend(t *testing.T) {
testStart()
// Local test config.
numClients := 5
localTestData1 := strings.Repeat(testData1, 20)
localTestData2 := strings.Repeat(testData2, 20)
ckAddrs := [...]string{ck1Addr, ck2Addr, ck3Addr}
ckPaths := [...]string{ck1Path, ck2Path, ck3Path}
// Init master server, chunk servers, and clients.
ms := initMaster(MasterAddr, ckAddrs[:])
cks := initChunkServers(MasterAddr, ckAddrs[:], ckPaths[:])
cs := initClients(MasterAddr, numClients)
// Create a file to write to.
if ok, err := cs[0].Create(testFile1); !ok {
log.Fatal("Failed to create testFile")
t.Error(err)
}
// Issue concurrent appends.
var wg sync.WaitGroup
for _, c := range cs {
wg.Add(1)
fmt.Println("Client ID", c.clientId)
var data []byte
if c.clientId % 2 == 0 {
data = []byte(localTestData1)
} else {
data = []byte(localTestData2)
}
go func(c *Client, data []byte) {
fmt.Println("Client ID", c.clientId)
defer wg.Done()
offset, err := c.Append(testFile1, data)
if err != nil {
t.Error(err)
}
fmt.Println("Client", c.clientId, "appended to offset", offset)
time.Sleep(2 * time.Second)
readBuf := make([]byte, len(data))
_, err = c.Read(testFile1, offset, readBuf);
if err != nil {
t.Error(err)
}
if string(readBuf) != string(data) {
t.Error("Read does not match append.")
}
fmt.Println("Client", c.clientId, "read", string(readBuf))
}(c, data)
}
// Shut down.
wg.Wait()
killChunkServers(cks, ckPaths[:])
killMaster(ms)
time.Sleep(time.Second)
testEnd()
}
// TestAppend2 tests record append for when appending data exceeds more then
// one chunk.
func TestAppend2(t *testing.T) {
testStart()
// Local test config.
numClients := 5
localTestData1 := strings.Repeat(testData1, 298261)
ckAddrs := [...]string{ck1Addr, ck2Addr, ck3Addr}
ckPaths := [...]string{ck1Path, ck2Path, ck3Path}
// Init master server, chunk servers, and clients.
ms := initMaster(MasterAddr, ckAddrs[:])
cks := initChunkServers(MasterAddr, ckAddrs[:], ckPaths[:])
cs := initClients(MasterAddr, numClients)
// Create a file to write to.
if ok, err := cs[0].Create(testFile1); !ok {
log.Fatal("Failed to create testFile")
t.Error(err)
}
// Issue concurrent appends.
var wg sync.WaitGroup
for _, c := range cs {
wg.Add(1)
fmt.Println("Client ID", c.clientId)
go func(c *Client) {
fmt.Println("Client ID", c.clientId)
defer wg.Done()
offset, err := c.Append(testFile1, []byte(localTestData1))
if err != nil {
t.Error(err)
return
}
fmt.Println("Client", c.clientId, "appended to offset", offset)
time.Sleep(2 * time.Second)
readBuf := make([]byte, len(localTestData1))
_, err = c.Read(testFile1, offset, readBuf);
if err != nil {
t.Error(err)
}
if string(readBuf) != localTestData1 {
t.Error("Read does not match append.")
return
}
}(c)
}
// Shut down.
wg.Wait()
killChunkServers(cks, ckPaths[:])
killMaster(ms)
time.Sleep(time.Second)
testEnd()
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core_test
import (
"context"
"fmt"
"math/rand"
"strings"
"testing"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/stretchr/testify/require"
)
func TestAnalyzeMVIndex(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, b int, c int, j json,
index(a), index(b),
index idx(a, b, (cast(j as signed array)), c),
index idx2(a, b, (cast(j->'$.str' as char(10) array)), c))`)
tk.MustExec("set tidb_analyze_version=2")
tk.MustExec("analyze table t")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows(
"Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t, reason to use this rate is \"use min(1, 110000/10000) as the sample-rate=1\"",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx2"))
tk.MustExec("analyze table t index idx")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows(
"Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t, reason to use this rate is \"TiDB assumes that the table is empty, use sample-rate=1\"",
"Warning 1105 The version 2 would collect all statistics not only the selected indexes",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx2"))
tk.MustExec("set tidb_analyze_version=1")
tk.MustExec("analyze table t")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows(
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx2"))
tk.MustExec("analyze table t index idx")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows(
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx"))
tk.MustExec("analyze table t index a")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows())
tk.MustExec("analyze table t index a, idx, idx2")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows(
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx",
"Warning 1105 analyzing multi-valued indexes is not supported, skip idx2"))
}
func TestIndexMergeJSONMemberOf(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(
a int, j0 json, j1 json,
index j0_0((cast(j0->'$.path0' as signed array))),
index j0_1((cast(j0->'$.path1' as signed array))),
index j0_string((cast(j0->'$.path_string' as char(10) array))),
index j0_date((cast(j0->'$.path_date' as date array))),
index j1((cast(j1 as signed array))))`)
var input []string
var output []struct {
SQL string
Plan []string
}
planSuiteData := core.GetIndexMergeSuiteData()
planSuiteData.LoadTestCases(t, &input, &output)
for i, query := range input {
testdata.OnRecord(func() {
output[i].SQL = query
})
result := tk.MustQuery("explain format = 'brief' " + query)
testdata.OnRecord(func() {
output[i].Plan = testdata.ConvertRowsToStrings(result.Rows())
})
result.Check(testkit.Rows(output[i].Plan...))
}
}
func TestDNFOnMVIndex(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, b int, c int, j json,
index idx1((cast(j as signed array))),
index idx2(a, b, (cast(j as signed array)), c))`)
var input []string
var output []struct {
SQL string
Plan []string
}
planSuiteData := core.GetIndexMergeSuiteData()
planSuiteData.LoadTestCases(t, &input, &output)
for i, query := range input {
testdata.OnRecord(func() {
output[i].SQL = query
})
result := tk.MustQuery("explain format = 'brief' " + query)
testdata.OnRecord(func() {
output[i].Plan = testdata.ConvertRowsToStrings(result.Rows())
})
result.Check(testkit.Rows(output[i].Plan...))
}
}
func TestCompositeMVIndex(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, b int , c int, j json,
index idx(a, b, (cast(j as signed array)), c),
index idx2(a, b, (cast(j->'$.str' as char(10) array)), c))`)
var input []string
var output []struct {
SQL string
Plan []string
}
planSuiteData := core.GetIndexMergeSuiteData()
planSuiteData.LoadTestCases(t, &input, &output)
for i, query := range input {
testdata.OnRecord(func() {
output[i].SQL = query
})
result := tk.MustQuery("explain format = 'brief' " + query)
testdata.OnRecord(func() {
output[i].Plan = testdata.ConvertRowsToStrings(result.Rows())
})
result.Check(testkit.Rows(output[i].Plan...))
}
}
func TestMVIndexSelection(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, j json,
index i_int((cast(j->'$.int' as signed array))))`)
var input []string
var output []struct {
SQL string
Plan []string
}
planSuiteData := core.GetIndexMergeSuiteData()
planSuiteData.LoadTestCases(t, &input, &output)
for i, query := range input {
testdata.OnRecord(func() {
output[i].SQL = query
})
result := tk.MustQuery("explain format = 'brief' " + query)
testdata.OnRecord(func() {
output[i].Plan = testdata.ConvertRowsToStrings(result.Rows())
})
result.Check(testkit.Rows(output[i].Plan...))
}
}
func TestMVIndexIndexMergePlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(j json, index kj((cast(j as signed array))))`)
tk.MustExec("prepare st from 'select /*+ use_index_merge(t, kj) */ * from t where (1 member of (j))'")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip prepared plan-cache: query accesses generated columns is un-cacheable"))
tk.MustExec("execute st")
tk.MustExec("execute st")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
}
func TestMVIndexPointGet(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(j json, unique kj((cast(j as signed array))))`)
for _, sql := range []string{
"select j from t where j=1",
"select j from t where j=1 or j=2",
"select j from t where j in (1, 2)",
} {
plan := tk.MustQuery("explain " + sql).Rows()
hasPointGet := false
for _, line := range plan {
if strings.Contains(strings.ToLower(line[0].(string)), "point") {
hasPointGet = true
}
}
require.True(t, !hasPointGet) // no point-get plan
}
}
func TestEnforceMVIndex(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, j json, index kj((cast(j as signed array))))`)
var input []string
var output []struct {
SQL string
Plan []string
Err string
}
planSuiteData := core.GetIndexMergeSuiteData()
planSuiteData.LoadTestCases(t, &input, &output)
for i, query := range input {
testdata.OnRecord(func() {
output[i].SQL = query
})
rs, err := tk.Exec("explain format = 'brief' " + query)
if err != nil {
testdata.OnRecord(func() {
output[i].Err = err.Error()
output[i].Plan = nil
})
require.Equal(t, output[i].Err, err.Error())
} else {
result := tk.ResultSetToResultWithCtx(context.Background(), rs, "")
testdata.OnRecord(func() {
output[i].Err = ""
output[i].Plan = testdata.ConvertRowsToStrings(result.Rows())
})
result.Check(testkit.Rows(output[i].Plan...))
}
}
}
func TestMVIndexInvisible(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(a int, j json, index kj((cast(j as signed array))))`)
tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows(
"IndexMerge 10.00 root type: union",
"├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo",
"└─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo",
))
tk.MustExec(`ALTER TABLE t ALTER INDEX kj INVISIBLE`)
tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows(
"TableReader 8000.00 root data:Selection",
"└─Selection 8000.00 cop[tikv] json_memberof(cast(1, json BINARY), test.t.j)",
" └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo"))
tk.MustQuery(`explain format='brief' select /*+ use_index_merge(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows(
"TableReader 8000.00 root data:Selection",
"└─Selection 8000.00 cop[tikv] json_memberof(cast(1, json BINARY), test.t.j)",
" └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo"))
tk.MustExec(`ALTER TABLE t ALTER INDEX kj VISIBLE`)
tk.MustQuery(`explain format='brief' select /*+ use_index(t, kj) */ * from t where (1 member of (j))`).Check(testkit.Rows(
`IndexMerge 10.00 root type: union`,
"├─IndexRangeScan(Build) 10.00 cop[tikv] table:t, index:kj(cast(`j` as signed array)) range:[1,1], keep order:false, stats:pseudo",
`└─TableRowIDScan(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo`))
}
func TestMVIndexFullScan(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(j json, index kj((cast(j as signed array))))`)
tk.MustExec(`insert into t values ('[1]')`)
tk.MustExec(`insert into t values ('[1, 2]')`)
tk.MustExec(`insert into t values ('[]')`)
tk.MustExec(`insert into t values (NULL)`)
tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t`).Check(testkit.Rows("4"))
tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where (1 member of (j))`).Check(testkit.Rows("2"))
tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where json_contains((j), '[1]')`).Check(testkit.Rows("2"))
tk.MustQuery(`select /*+ use_index_merge(t, kj) */ count(*) from t where json_overlaps((j), '[1]')`).Check(testkit.Rows("2"))
// Forbid IndexMerge+IndexFullScan since IndexFullScan on MVIndex cannot read all rows some cases.
tk.MustGetErrMsg(`select /*+ use_index(t, kj) */ count(*) from t`, "[planner:1815]Internal : Can't find a proper physical plan for this query")
}
func TestMVIndexEmptyArray(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(j json, index kj((cast(j as signed array))))`)
tk.MustExec(`insert into t values ('[1]')`)
tk.MustExec(`insert into t values ('[1, 2]')`)
tk.MustExec(`insert into t values ('[]')`)
tk.MustExec(`insert into t values (NULL)`)
for _, cond := range []string{
"json_contains(j, '[]')",
"json_contains(j, '[1]')",
"json_contains(j, '[1, 2]')",
"json_contains(j, '[1, 10]')",
"json_overlaps(j, '[]')",
"json_overlaps(j, '[1]')",
"json_overlaps(j, '[1, 2]')",
"json_overlaps(j, '[1, 10]')",
} {
tk.MustQuery(fmt.Sprintf("select /*+ use_index_merge(t) */ * from t where %v", cond)).Sort().Check(
tk.MustQuery(fmt.Sprintf("select /*+ ignore_index(t, kj) */ * from t where %v", cond)).Sort().Rows())
}
}
func TestMVIndexRandom(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
for _, testCase := range []struct {
indexType string
insertValOpts randMVIndexValOpts
queryValsOpts randMVIndexValOpts
}{
{"signed", randMVIndexValOpts{"signed", 0, 3}, randMVIndexValOpts{"signed", 0, 3}},
{"unsigned", randMVIndexValOpts{"unsigned", 0, 3}, randMVIndexValOpts{"unsigned", 0, 3}}, // unsigned-index + unsigned-values
{"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 3, 3}},
{"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 1, 3}},
{"char(3)", randMVIndexValOpts{"string", 3, 3}, randMVIndexValOpts{"string", 5, 3}},
{"date", randMVIndexValOpts{"date", 0, 3}, randMVIndexValOpts{"date", 0, 3}},
} {
tk.MustExec("drop table if exists t")
tk.MustExec(fmt.Sprintf(`create table t(a int, j json, index kj((cast(j as %v array))))`, testCase.indexType))
nRows := 20
rows := make([]string, 0, nRows)
for i := 0; i < nRows; i++ {
va, v1, v2 := rand.Intn(testCase.insertValOpts.distinct), randMVIndexValue(testCase.insertValOpts), randMVIndexValue(testCase.insertValOpts)
if testCase.indexType == "date" {
rows = append(rows, fmt.Sprintf(`(%v, json_array(cast(%v as date), cast(%v as date)))`, va, v1, v2))
} else {
rows = append(rows, fmt.Sprintf(`(%v, '[%v, %v]')`, va, v1, v2))
}
}
tk.MustExec(fmt.Sprintf("insert into t values %v", strings.Join(rows, ", ")))
nQueries := 20
for i := 0; i < nQueries; i++ {
conds := randMVIndexConds(rand.Intn(3)+1, testCase.queryValsOpts)
r1 := tk.MustQuery("select /*+ ignore_index(t, kj) */ * from t where " + conds).Sort()
tk.MustQuery("select /*+ use_index_merge(t, kj) */ * from t where " + conds).Sort().Check(r1.Rows())
}
}
}
func randMVIndexConds(nConds int, valOpts randMVIndexValOpts) string {
var conds string
for i := 0; i < nConds; i++ {
if i > 0 {
if rand.Intn(5) < 1 { // OR
conds += " OR "
} else { // AND
conds += " AND "
}
}
cond := randMVIndexCond(rand.Intn(4), valOpts)
conds += cond
}
return conds
}
func randMVIndexCond(condType int, valOpts randMVIndexValOpts) string {
switch condType {
case 0: // member_of
return fmt.Sprintf(`(%v member of (j))`, randMVIndexValue(valOpts))
case 1: // json_contains
return fmt.Sprintf(`json_contains(j, '%v')`, randArray(valOpts))
case 2: // json_overlaps
return fmt.Sprintf(`json_overlaps(j, '%v')`, randArray(valOpts))
default: // others
return fmt.Sprintf(`a < %v`, rand.Intn(valOpts.distinct))
}
}
func randArray(opts randMVIndexValOpts) string {
n := rand.Intn(5) // n can be 0
var vals []string
for i := 0; i < n; i++ {
vals = append(vals, randMVIndexValue(opts))
}
return "[" + strings.Join(vals, ", ") + "]"
}
type randMVIndexValOpts struct {
valType string // INT, UNSIGNED, STR, DATE
maxStrLen int
distinct int
}
func randMVIndexValue(opts randMVIndexValOpts) string {
switch strings.ToLower(opts.valType) {
case "signed":
return fmt.Sprintf("%v", rand.Intn(opts.distinct)-(opts.distinct/2))
case "unsigned":
return fmt.Sprintf("%v", rand.Intn(opts.distinct))
case "string":
return fmt.Sprintf(`"%v"`, strings.Repeat(fmt.Sprintf("%v", rand.Intn(opts.distinct)), rand.Intn(opts.maxStrLen)+1))
case "date":
return fmt.Sprintf(`"2000-01-%v"`, rand.Intn(opts.distinct)+1)
}
return ""
}
func TestIndexMergeJSONMemberOf2(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t(
a int, j0 json, j1 json,
index j0_0((cast(j0->'$.path0' as signed array))));`)
tk.MustExec("insert into t values(1, '{\"path0\" : [1,2,3]}', null ); ")
tk.MustQuery("select /*+ no_index_merge() */ a from t where (1 member of (j0->'$.path0')); ").Check(testkit.Rows("1"))
tk.MustQuery("select /*+ no_index_merge() */ a from t where ('1' member of (j0->'$.path0')); ").Check(testkit.Rows())
tk.MustQuery("select /*+ use_index_merge(t, j0_0) */ a from t where (1 member of (j0->'$.path0')); ").Check(testkit.Rows("1"))
tk.MustQuery("select /*+ use_index_merge(t, j0_0) */ a from t where ('1' member of (j0->'$.path0')); ").Check(testkit.Rows())
}
|
package entity
type CmsTopicCategory struct {
Id int64 `json:"id" xorm:"pk autoincr BIGINT(20) 'id'"`
Name string `json:"name" xorm:"default 'NULL' VARCHAR(100) 'name'"`
Icon string `json:"icon" xorm:"default 'NULL' comment('分类图标') VARCHAR(500) 'icon'"`
SubjectCount int `json:"subject_count" xorm:"default NULL comment('专题数量') INT(11) 'subject_count'"`
ShowStatus int `json:"show_status" xorm:"default NULL INT(2) 'show_status'"`
Sort int `json:"sort" xorm:"default NULL INT(11) 'sort'"`
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00900105 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.009.001.05 Document"`
Message *AcceptorReconciliationRequestV05 `xml:"AccptrRcncltnReq"`
}
func (d *Document00900105) AddMessage() *AcceptorReconciliationRequestV05 {
d.Message = new(AcceptorReconciliationRequestV05)
return d.Message
}
// The AcceptorReconciliationRequest message is sent by an acceptor (or its agent) to the acquirer (or its agent) , to ensure that the debits and credits performed by the acceptor matches the computed balances of the acquirer for the debits and credits performed during the same reconciliation period.
// If the acceptor or the acquirer notices a difference in totals, the discrepancy will be resolved by other means, outside the scope of the protocol.
type AcceptorReconciliationRequestV05 struct {
// Reconciliation request message management information.
Header *iso20022.Header30 `xml:"Hdr"`
// Information related to the reconciliation request.
ReconciliationRequest *iso20022.AcceptorReconciliationRequest5 `xml:"RcncltnReq"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *AcceptorReconciliationRequestV05) AddHeader() *iso20022.Header30 {
a.Header = new(iso20022.Header30)
return a.Header
}
func (a *AcceptorReconciliationRequestV05) AddReconciliationRequest() *iso20022.AcceptorReconciliationRequest5 {
a.ReconciliationRequest = new(iso20022.AcceptorReconciliationRequest5)
return a.ReconciliationRequest
}
func (a *AcceptorReconciliationRequestV05) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
// Package api will hopefully implement a go wrapper around the trailforks api
package api
|
package cmd
import "github.com/spf13/cobra"
var cmdNew = &cobra.Command{
Use: "new",
Short: "Creates a new project",
Long: "",
Run: func(cmd *cobra.Command, args []string) {
},
}
|
package main
import (
"fmt"
"net/http"
)
func main() {
fmt.Println("starting up...")
go func() {
for i := 0; i < 10; i++ {
go func() {
i := 0
for {
i++
}
}()
}
}()
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Healthy")
})
if err := http.ListenAndServe(":80", nil); err != nil {
fmt.Println(err)
}
}
|
/*
* Copyright © 2020-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"fmt"
"strings"
"testing"
"time"
"github.com/SoftwareAG/adabas-go-api/adatypes"
"github.com/stretchr/testify/assert"
)
func TestHoldResponse(t *testing.T) {
initTestLogWithFile(t, "connection_hold.log")
adatypes.Central.Log.Infof("TEST: %s", t.Name())
wait := make(chan bool)
end := make(chan bool)
go parallelAccessHoldResponse(t, wait, end, false)
connection, err := NewConnection("ada;target=24")
if !assert.NoError(t, err) {
return
}
defer connection.Close()
//fmt.Println(connection)
err = connection.Open()
if !assert.NoError(t, err) {
return
}
readRequest, rErr := connection.CreateFileReadRequest(11)
assert.NoError(t, rErr)
readRequest.SetHoldRecords(adatypes.HoldResponse)
err = readRequest.QueryFields("AA")
if !assert.NoError(t, err) {
return
}
fmt.Println("Waiting for hold thread ....")
w := <-wait
fmt.Println("Read hold ....")
_, rerr := readRequest.ReadISN(1)
if !assert.Error(t, rerr) {
fmt.Println("Got error", rerr)
return
}
assert.True(t, strings.HasPrefix(rerr.Error(), "ADAGE91000:"))
end <- true
for w {
select {
case <-time.After(20 * time.Second):
fmt.Println("timeout received")
assert.Fail(t, "timeout received")
w = false
case w = <-wait:
fmt.Println("wait received")
case e := <-end:
assert.True(t, e)
w = false
}
}
}
func TestHoldRead(t *testing.T) {
initTestLogWithFile(t, "connection_hold.log")
adatypes.Central.Log.Infof("TEST: %s", t.Name())
wait := make(chan bool)
end := make(chan bool)
go parallelAccessHoldResponse(t, wait, end, true)
connection, err := NewConnection("ada;target=24")
if !assert.NoError(t, err) {
return
}
defer connection.Close()
//fmt.Println(connection)
connection.Open()
readRequest, rErr := connection.CreateFileReadRequest(11)
assert.NoError(t, rErr)
readRequest.SetHoldRecords(adatypes.HoldAccess)
err = readRequest.QueryFields("AA")
if !assert.NoError(t, err) {
return
}
fmt.Println("Waiting for hold thread ....")
<-wait
fmt.Println("Read hold ....")
_, rerr := readRequest.ReadLogicalWith("AA=50005800")
if !assert.NoError(t, rerr) {
return
}
fmt.Println("Wait timeout ended ....")
select {
case e := <-end:
assert.True(t, e, "Wrong end")
case <-time.After(5 * time.Second):
end <- true
}
fmt.Println("Wait hold thread ended ....")
<-end
}
func parallelAccessHoldResponse(t *testing.T, wait chan bool, end chan bool, useTimeout bool) {
fmt.Println("Start hold access ....")
connection, err := NewConnection("ada;target=24")
if !assert.NoError(t, err) {
return
}
defer connection.Close()
//fmt.Println(connection)
connection.Open()
readRequest, rErr := connection.CreateFileReadRequest(11)
assert.NoError(t, rErr)
readRequest.SetHoldRecords(adatypes.HoldResponse)
err = readRequest.QueryFields("AA")
if !assert.NoError(t, err) {
return
}
fmt.Println("Read in hold ISN 1 ....")
for i := adatypes.Isn(1); i < 10; i++ {
_, rerr := readRequest.ReadISN(i)
if !assert.NoError(t, rerr) {
fmt.Println("Error parallel access.", rerr)
end <- false
return
}
}
fmt.Println("In hold ISN 1 ....")
wait <- true
if useTimeout {
fmt.Println("Sleep 10 seconds ....")
time.Sleep(10 * time.Second)
_ = connection.Release()
fmt.Println("Release ....")
}
<-end
fmt.Println("End parallel access.")
end <- true
}
|
package config
import (
"../structs"
"github.com/jinzhu/gorm"
"github.com/joho/godotenv"
"log"
"os"
)
// DBInit create connection to database
func DBInit() *gorm.DB {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
dbHost := os.Getenv("HOST_DB_DEV")
dbName := os.Getenv("NAME_DB_DEV")
dbRoot := os.Getenv("USER_DB_DEV")
dbPass := os.Getenv("PASS_DB_DEV")
db, err := gorm.Open("mysql", dbRoot+":"+dbPass+"("+dbHost+":3306)/"+dbName+"?charset=utf8&parseTime=True&loc=Local")
if err != nil {
panic("failed to connect to database")
}
db.AutoMigrate(structs.TbUserLogins{})
db.AutoMigrate(structs.TbOutstanding{})
db.AutoMigrate(structs.TbDelivery{})
db.AutoMigrate(structs.TbLog{})
db.AutoMigrate(structs.TbRetail{})
return db
}
|
package vsphere
import (
"encoding/hex"
"encoding/json"
"fmt"
"net"
"net/netip"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
machineapi "github.com/openshift/api/machine/v1beta1"
"github.com/openshift/installer/pkg/asset/installconfig"
"github.com/openshift/installer/pkg/tfvars/internal/cache"
vtypes "github.com/openshift/installer/pkg/types/vsphere"
)
type folder struct {
Name string `json:"name"`
Datacenter string `json:"vsphere_datacenter"`
}
type config struct {
OvaFilePath string `json:"vsphere_ova_filepath"`
DiskType vtypes.DiskType `json:"vsphere_disk_type"`
VCenters map[string]vtypes.VCenter `json:"vsphere_vcenters"`
NetworksInFailureDomains map[string]string `json:"vsphere_networks"`
ControlPlanes []*machineapi.VSphereMachineProviderSpec `json:"vsphere_control_planes"`
ControlPlaneNetworkKargs []string `json:"vsphere_control_plane_network_kargs"`
BootStrapNetworkKargs string `json:"vsphere_bootstrap_network_kargs"`
DatacentersFolders map[string]*folder `json:"vsphere_folders"`
ImportOvaFailureDomainMap map[string]vtypes.FailureDomain `json:"vsphere_import_ova_failure_domain_map"`
FailureDomainMap map[string]vtypes.FailureDomain `json:"vsphere_failure_domain_map"`
}
// TFVarsSources contains the parameters to be converted into Terraform variables
type TFVarsSources struct {
ControlPlaneConfigs []*machineapi.VSphereMachineProviderSpec
ImageURL string
DiskType vtypes.DiskType
NetworksInFailureDomain map[string]string
InstallConfig *installconfig.InstallConfig
InfraID string
ControlPlaneMachines []machineapi.Machine
}
// TFVars generate vSphere-specific Terraform variables
func TFVars(sources TFVarsSources) ([]byte, error) {
var err error
cachedImage := ""
failureDomainMap, importOvaFailureDomainMap := createFailureDomainMaps(sources.InstallConfig.Config.VSphere.FailureDomains, sources.InfraID)
if len(importOvaFailureDomainMap) > 0 {
cachedImage, err = cache.DownloadImageFile(sources.ImageURL)
if err != nil {
return nil, errors.Wrap(err, "failed to use cached vsphere image")
}
}
vcenterZones := convertVCentersToMap(sources.InstallConfig.Config.VSphere.VCenters)
datacentersFolders, err := createDatacenterFolderMap(sources.InfraID, sources.InstallConfig.Config.VSphere.FailureDomains)
if err != nil {
return nil, err
}
cfg := &config{
OvaFilePath: cachedImage,
DiskType: sources.DiskType,
VCenters: vcenterZones,
NetworksInFailureDomains: sources.NetworksInFailureDomain,
ControlPlanes: sources.ControlPlaneConfigs,
DatacentersFolders: datacentersFolders,
ImportOvaFailureDomainMap: importOvaFailureDomainMap,
FailureDomainMap: failureDomainMap,
ControlPlaneNetworkKargs: []string{},
}
if len(sources.InstallConfig.Config.VSphere.Hosts) > 0 {
logrus.Debugf("Applying static IP configs")
err = processGuestNetworkConfiguration(cfg, sources)
if err != nil {
return nil, err
}
}
return json.MarshalIndent(cfg, "", " ")
}
func createFailureDomainMaps(failureDomains []vtypes.FailureDomain, infraID string) (map[string]vtypes.FailureDomain, map[string]vtypes.FailureDomain) {
importOvaFailureDomainMap := make(map[string]vtypes.FailureDomain)
failureDomainMap := make(map[string]vtypes.FailureDomain)
for _, fd := range failureDomains {
if fd.Topology.Folder == "" {
fd.Topology.Folder = infraID
}
if fd.Topology.Template == "" {
fd.Topology.Template = fmt.Sprintf("%s-rhcos-%s-%s", infraID, fd.Region, fd.Zone)
importOvaFailureDomainMap[fd.Name] = fd
}
failureDomainMap[fd.Name] = fd
}
return failureDomainMap, importOvaFailureDomainMap
}
// createDatacenterFolderMap()
// This function loops over the range of failure domains
// Each failure domain defines the vCenter datacenter and folder
// to be used for the virtual machines within that domain.
// The datacenter could be reused but a folder could be
// unique - the key then becomes a string that contains
// both the datacenter name and the folder to be created.
func createDatacenterFolderMap(infraID string, failureDomains []vtypes.FailureDomain) (map[string]*folder, error) {
folders := make(map[string]*folder)
for i, fd := range failureDomains {
tempFolder := new(folder)
tempFolder.Datacenter = fd.Topology.Datacenter
tempFolder.Name = fd.Topology.Folder
// Only if the folder is empty do we create a folder resource
// If a folder has been provided it means that it already exists
// and it is to be used.
if tempFolder.Name == "" {
tempFolder.Name = infraID
failureDomains[i].Topology.Folder = infraID
key := fmt.Sprintf("%s-%s", tempFolder.Datacenter, tempFolder.Name)
folders[key] = tempFolder
}
}
return folders, nil
}
func convertVCentersToMap(values []vtypes.VCenter) map[string]vtypes.VCenter {
vcenterMap := make(map[string]vtypes.VCenter)
for _, v := range values {
vcenterMap[v.Server] = v
}
return vcenterMap
}
func getSubnetMask(prefix netip.Prefix) (string, error) {
prefixLength := net.IPv4len * 8
if prefix.Addr().Is6() {
prefixLength = net.IPv6len * 8
}
ipMask := net.CIDRMask(prefix.Masked().Bits(), prefixLength)
maskBytes, err := hex.DecodeString(ipMask.String())
if err != nil {
return "", err
}
ip := net.IP(maskBytes)
maskStr := ip.To16().String()
return maskStr, nil
}
func constructKargsFromNetworkConfig(ipAddrs []string, nameservers []string, gateway string) (string, error) {
outKargs := ""
var gatewayIP netip.Addr
if len(gateway) > 0 {
ip, err := netip.ParseAddr(gateway)
if err != nil {
return "", err
}
if ip.Is6() {
gateway = fmt.Sprintf("[%s]", gateway)
}
gatewayIP = ip
}
for _, address := range ipAddrs {
prefix, err := netip.ParsePrefix(address)
if err != nil {
return "", err
}
var ipStr, gatewayStr, maskStr string
addr := prefix.Addr()
switch {
case addr.Is6():
maskStr = fmt.Sprintf("%d", prefix.Bits())
ipStr = fmt.Sprintf("[%s]", addr.String())
if len(gateway) > 0 && gatewayIP.Is6() {
gatewayStr = gateway
}
case addr.Is4():
maskStr, err = getSubnetMask(prefix)
if err != nil {
return "", err
}
if len(gateway) > 0 && gatewayIP.Is4() {
gatewayStr = gateway
}
ipStr = addr.String()
default:
return "", errors.New("IP address must adhere to IPv4 or IPv6 format")
}
outKargs += fmt.Sprintf("ip=%s::%s:%s:::none ", ipStr, gatewayStr, maskStr)
}
for _, nameserver := range nameservers {
ip := net.ParseIP(nameserver)
if ip.To4() == nil {
nameserver = fmt.Sprintf("[%s]", nameserver)
}
outKargs += fmt.Sprintf("nameserver=%s ", nameserver)
}
outKargs = strings.Trim(outKargs, " ")
logrus.Debugf("Generated karg: [%v].", outKargs)
return outKargs, nil
}
// processGuestNetworkConfiguration takes the config and sources data and generates the kernel arguments (kargs)
// needed to boot RHCOS with static IP configurations.
func processGuestNetworkConfiguration(cfg *config, sources TFVarsSources) error {
platform := sources.InstallConfig.Config.Platform.VSphere
// Generate bootstrap karg using vsphere platform info from install-config
for _, host := range platform.Hosts {
if host.Role == vtypes.BootstrapRole {
logrus.Debugf("Generating kargs for bootstrap")
network := host.NetworkDevice
kargs, err := constructKargsFromNetworkConfig(network.IPAddrs, network.Nameservers, network.Gateway)
if err != nil {
return err
}
cfg.BootStrapNetworkKargs = kargs
break
}
}
// Generate control plane kargs using info from machine network config
for _, machine := range sources.ControlPlaneConfigs {
logrus.Debugf("Generating kargs for control plane %v", machine.GenerateName)
network := machine.Network.Devices[0]
kargs, err := constructKargsFromNetworkConfig(network.IPAddrs, network.Nameservers, network.Gateway)
if err != nil {
return err
}
cfg.ControlPlaneNetworkKargs = append(cfg.ControlPlaneNetworkKargs, kargs)
}
return nil
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package testsuite
/*
An object that will be passed in to every test, which the user can use to manipulate the results of the test
*/
type TestContext struct {}
/*
Fails the test with the given error
*/
func (context TestContext) Fatal(err error) {
// We rely on panicking here because we want to completely stop whatever the test is doing
failTest(err)
}
/*
Asserts that the given condition is true, and if not then fails the test and returns the given error
*/
func (context TestContext) AssertTrue(condition bool, err error) {
if (!condition) {
failTest(err)
}
}
func failTest(err error) {
panic(err)
}
|
package monitor_step_test
import (
"errors"
"net/http"
"net/url"
"time"
"github.com/cloudfoundry-incubator/executor/sequence"
"github.com/cloudfoundry-incubator/executor/sequence/fake_step"
. "github.com/cloudfoundry-incubator/executor/steps/monitor_step"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
)
var _ = Describe("MonitorStep", func() {
var (
check sequence.Step
checkResults []error
checkTimes chan time.Time
interruptCheck chan struct{}
healthyThreshold uint
unhealthyThreshold uint
healthyHookURL *url.URL
unhealthyHookURL *url.URL
step sequence.Step
hookServer *ghttp.Server
)
BeforeEach(func() {
stepSequence := 0
checkResults = []error{}
checkTimes = make(chan time.Time, 1024)
interruptCheck = make(chan struct{})
check = fake_step.FakeStep{
WhenPerforming: func() error {
checkTimes <- time.Now()
if len(checkResults) <= stepSequence {
<-interruptCheck
return nil
}
result := checkResults[stepSequence]
stepSequence++
return result
},
}
hookServer = ghttp.NewServer()
healthyHookURL = &url.URL{
Scheme: "http",
Host: hookServer.HTTPTestServer.Listener.Addr().String(),
Path: "/healthy",
}
unhealthyHookURL = &url.URL{
Scheme: "http",
Host: hookServer.HTTPTestServer.Listener.Addr().String(),
Path: "/unhealthy",
}
})
JustBeforeEach(func() {
step = New(
check,
healthyThreshold,
unhealthyThreshold,
&http.Request{
Method: "PUT",
URL: healthyHookURL,
},
&http.Request{
Method: "PUT",
URL: unhealthyHookURL,
},
)
})
Describe("Perform", func() {
Context("when the healthy and unhealthy threshold is 2", func() {
BeforeEach(func() {
healthyThreshold = 2
unhealthyThreshold = 2
})
JustBeforeEach(func() {
go step.Perform()
})
AfterEach(func() {
// unblocking check sequence; opens the floodgates, so ignore any
// requests after this point
hookServer.AllowUnhandledRequests = true
close(interruptCheck)
step.Cancel()
})
Context("when the check succeeds", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil)
})
It("does not hit any endpoint", func() {
Consistently(hookServer.ReceivedRequests()).Should(BeEmpty())
})
Context("and then fails", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
})
It("checked again after half a second", func() {
time1 := <-checkTimes
time2 := <-checkTimes
Ω(time2.Sub(time1)).Should(BeNumerically(">=", 500*time.Millisecond))
Ω(time2.Sub(time1)).Should(BeNumerically("<", 1*time.Second))
})
})
Context("and succeeds again", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil)
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/healthy"),
)
})
It("hits the healthy endpoint", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
})
Context("when hitting the endpoint fails", func() {
BeforeEach(func() {
hookServer.SetHandler(0, func(w http.ResponseWriter, r *http.Request) {
hookServer.HTTPTestServer.CloseClientConnections()
})
})
It("keeps calm and carries on", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
})
})
Context("and again", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil)
})
It("hits the healthy endpoint once and only once", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
Consistently(hookServer.ReceivedRequests).Should(HaveLen(1))
})
Context("and again", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil)
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/healthy"),
)
})
It("hits the healthy endpoint a total of two times", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(2))
})
It("had checked on an exponentially increasing backoff", func() {
time1 := <-checkTimes
time2 := <-checkTimes
Ω(time2.Sub(time1)).Should(BeNumerically(">=", 500*time.Millisecond))
Ω(time2.Sub(time1)).Should(BeNumerically("<", 1*time.Second))
time3 := <-checkTimes
Ω(time3.Sub(time2)).Should(BeNumerically(">=", 1*time.Second))
Ω(time3.Sub(time2)).Should(BeNumerically("<", 2*time.Second))
time4 := <-checkTimes
Ω(time4.Sub(time3)).Should(BeNumerically(">=", 2*time.Second))
Ω(time4.Sub(time3)).Should(BeNumerically("<", 3*time.Second))
})
})
})
})
})
Context("when the check fails", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
})
It("does not hit any endpoint", func() {
Consistently(hookServer.ReceivedRequests()).Should(BeEmpty())
})
Context("and fails again", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/unhealthy"),
)
})
It("hits the unhealthy endpoint", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
})
Context("and again", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
})
It("hits the unhealthy endpoint once and only once", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
Consistently(hookServer.ReceivedRequests).Should(HaveLen(1))
})
Context("and again", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/unhealthy"),
)
})
It("hits the unhealthy endpoint a total of two times", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(2))
})
It("had checked again after half a second", func() {
time1 := <-checkTimes
time2 := <-checkTimes
Ω(time2.Sub(time1)).Should(BeNumerically(">=", 500*time.Millisecond))
Ω(time2.Sub(time1)).Should(BeNumerically("<", 1*time.Second))
time3 := <-checkTimes
Ω(time3.Sub(time2)).Should(BeNumerically(">=", 500*time.Millisecond))
Ω(time3.Sub(time2)).Should(BeNumerically("<", 1*time.Second))
time4 := <-checkTimes
Ω(time4.Sub(time3)).Should(BeNumerically(">=", 500*time.Millisecond))
Ω(time4.Sub(time3)).Should(BeNumerically("<", 1*time.Second))
})
})
})
})
})
Context("when the check succeeds, fails, succeeds, and fails", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil, errors.New("nope"), nil, errors.New("nope"))
})
It("does not hit any endpoint", func() {
Consistently(hookServer.ReceivedRequests).Should(BeEmpty())
})
})
})
Context("when the healthy and unhealthy thresholds are not specified", func() {
BeforeEach(func() {
healthyThreshold = 0
unhealthyThreshold = 0
})
JustBeforeEach(func() {
go step.Perform()
})
AfterEach(func() {
// unblocking check sequence; opens the floodgates, so ignore any
// requests after this point
hookServer.AllowUnhandledRequests = true
close(interruptCheck)
step.Cancel()
})
Context("when the check succeeds", func() {
BeforeEach(func() {
checkResults = append(checkResults, nil)
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/healthy"),
)
})
It("hits the healthy endpoint", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
})
})
Context("when the check fails", func() {
BeforeEach(func() {
checkResults = append(checkResults, errors.New("nope"))
hookServer.AppendHandlers(
ghttp.VerifyRequest("PUT", "/unhealthy"),
)
})
It("hits the unhealthy endpoint", func() {
Eventually(hookServer.ReceivedRequests, 10).Should(HaveLen(1))
})
})
})
})
Describe("Cancel", func() {
It("interrupts the monitoring", func() {
performResult := make(chan error)
go func() { performResult <- step.Perform() }()
step.Cancel()
Eventually(performResult).Should(Receive())
})
})
})
|
package main
import (
"fmt"
"log"
"net/http"
)
func main() {
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe("localhost:8000", nil))
}
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s %s %s\n", r.URL, r.Method, r.Proto)
for key, val := range r.Header {
fmt.Fprintf(w, "[%q] = %q\n", key, val)
}
fmt.Fprintf(w, "Host = %s\n", r.Host)
fmt.Fprintf(w, "Remote Address = %s\n", r.RemoteAddr)
}
|
package handler
import (
e "crud/error"
"crud/model"
"github.com/gofiber/fiber/v2"
log "crud/logger"
)
func ErrorResponseHandler(c *fiber.Ctx, err error) error {
// DefinedError
if definedError, ok := err.(e.DefinedError); ok {
log.Logger().Warn("Handle DefinedError.")
errorResponse := model.ErrorResponse(definedError.Code, definedError.Message, definedError.Detail)
return c.Status(definedError.HttpStatus).JSON(errorResponse)
}
// FieldValidationError
if validationError, ok := err.(e.FieldValidationError); ok {
log.Logger().Warn("Handle FieldValidationError.")
errorResponse := model.ErrorResponse(validationError.Code, validationError.Message, validationError.FieldErrors)
return c.Status(400).JSON(errorResponse)
}
// undefined error
log.Logger().Errorf("Handle UndefinedError. Caused: %v.", err)
errorResponse := model.ErrorResponse("500", err.Error(), nil)
return c.Status(500).JSON(errorResponse)
}
|
package managers
var lang = []string{"en",
"fr",
"it",
"es",
"se",
"nl",
"tr",
"pt",
"pl",
"ru",
"ir",
"id",
"jp",
}
|
//go:build go1.13
// +build go1.13
package socketmode
import (
"context"
"errors"
"testing"
"time"
"github.com/slack-go/slack"
"github.com/slack-go/slack/slacktest"
"github.com/stretchr/testify/assert"
)
func Test_passContext(t *testing.T) {
s := slacktest.NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
cli := New(api)
ctx, cancel := context.WithTimeout(context.TODO(), time.Nanosecond)
defer cancel()
t.Run("RunWithContext", func(t *testing.T) {
// should fail imidiatly.
assert.EqualError(t, cli.RunContext(ctx), context.DeadlineExceeded.Error())
})
t.Run("openAndDial", func(t *testing.T) {
_, _, err := cli.openAndDial(ctx, func(_ string) error { return nil })
// should fail imidiatly.
assert.EqualError(t, errors.Unwrap(err), context.DeadlineExceeded.Error())
})
t.Run("OpenWithContext", func(t *testing.T) {
_, _, err := cli.OpenContext(ctx)
// should fail imidiatly.
assert.EqualError(t, errors.Unwrap(err), context.DeadlineExceeded.Error())
})
}
|
/*
Challenge
Sandbox post
Given a positive integer (K) Output a uniformly-random integer (Y) between [0, K).
If Y > 0 Assume K = Y and repeat the process until Y = 0.
Rules
Input must be printed at first
Output format as you wish
Your program must finish.
0 must be the final output, Optionally an empty line instead 0
*/
package main
import (
"crypto/rand"
"fmt"
"math/big"
)
func main() {
randomize(10000)
}
func randomize(k int64) {
z := big.NewInt(0)
for {
y, _ := rand.Int(rand.Reader, big.NewInt(k))
fmt.Println(y)
if y.Cmp(z) == 0 {
break
}
}
}
|
package version
var (
// BuildDate is provided at build time.
BuildDate string
// Revision is provided at build time.
Revision string
)
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import (
"encoding/binary"
"reflect"
"unsafe"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mathutil"
)
// Codec is used to:
// 1. encode a Chunk to a byte slice.
// 2. decode a Chunk from a byte slice.
type Codec struct {
// colTypes is used to check whether a Column is fixed sized and what the
// fixed size for every element.
// NOTE: It's only used for decoding.
colTypes []*types.FieldType
}
// NewCodec creates a new Codec object for encode or decode a Chunk.
func NewCodec(colTypes []*types.FieldType) *Codec {
return &Codec{colTypes}
}
// Encode encodes a Chunk to a byte slice.
func (c *Codec) Encode(chk *Chunk) []byte {
buffer := make([]byte, 0, chk.MemoryUsage())
for _, col := range chk.columns {
buffer = c.encodeColumn(buffer, col)
}
return buffer
}
func (*Codec) encodeColumn(buffer []byte, col *Column) []byte {
var lenBuffer [4]byte
// encode length.
binary.LittleEndian.PutUint32(lenBuffer[:], uint32(col.length))
buffer = append(buffer, lenBuffer[:4]...)
// encode nullCount.
binary.LittleEndian.PutUint32(lenBuffer[:], uint32(col.nullCount()))
buffer = append(buffer, lenBuffer[:4]...)
// encode nullBitmap.
if col.nullCount() > 0 {
numNullBitmapBytes := (col.length + 7) / 8
buffer = append(buffer, col.nullBitmap[:numNullBitmapBytes]...)
}
// encode offsets.
if !col.isFixed() {
numOffsetBytes := (col.length + 1) * 8
offsetBytes := i64SliceToBytes(col.offsets)
buffer = append(buffer, offsetBytes[:numOffsetBytes]...)
}
// encode data.
buffer = append(buffer, col.data...)
return buffer
}
func i64SliceToBytes(i64s []int64) (b []byte) {
if len(i64s) == 0 {
return nil
}
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
hdr.Len = len(i64s) * 8
hdr.Cap = hdr.Len
hdr.Data = uintptr(unsafe.Pointer(&i64s[0]))
return b
}
// Decode decodes a Chunk from a byte slice, return the remained unused bytes.
func (c *Codec) Decode(buffer []byte) (*Chunk, []byte) {
chk := &Chunk{}
for ordinal := 0; len(buffer) > 0; ordinal++ {
col := &Column{}
buffer = c.decodeColumn(buffer, col, ordinal)
chk.columns = append(chk.columns, col)
}
return chk, buffer
}
// DecodeToChunk decodes a Chunk from a byte slice, return the remained unused bytes.
func (c *Codec) DecodeToChunk(buffer []byte, chk *Chunk) (remained []byte) {
for i := 0; i < len(chk.columns); i++ {
buffer = c.decodeColumn(buffer, chk.columns[i], i)
}
return buffer
}
// decodeColumn decodes a Column from a byte slice, return the remained unused bytes.
func (c *Codec) decodeColumn(buffer []byte, col *Column, ordinal int) (remained []byte) {
// Todo(Shenghui Wu): Optimize all data is null.
// decode length.
col.length = int(binary.LittleEndian.Uint32(buffer))
buffer = buffer[4:]
// decode nullCount.
nullCount := int(binary.LittleEndian.Uint32(buffer))
buffer = buffer[4:]
// decode nullBitmap.
if nullCount > 0 {
numNullBitmapBytes := (col.length + 7) / 8
col.nullBitmap = buffer[:numNullBitmapBytes:numNullBitmapBytes]
buffer = buffer[numNullBitmapBytes:]
} else {
c.setAllNotNull(col)
}
// decode offsets.
numFixedBytes := getFixedLen(c.colTypes[ordinal])
numDataBytes := int64(numFixedBytes * col.length)
if numFixedBytes == -1 {
numOffsetBytes := (col.length + 1) * 8
col.offsets = bytesToI64Slice(buffer[:numOffsetBytes:numOffsetBytes])
buffer = buffer[numOffsetBytes:]
numDataBytes = col.offsets[col.length]
} else if cap(col.elemBuf) < numFixedBytes {
col.elemBuf = make([]byte, numFixedBytes)
}
// decode data.
col.data = buffer[:numDataBytes:numDataBytes]
// The column reference the data of the grpc response, the memory of the grpc message cannot be GCed if we reuse
// this column. Thus, we set `avoidReusing` to true.
col.avoidReusing = true
return buffer[numDataBytes:]
}
var allNotNullBitmap [128]byte
func (*Codec) setAllNotNull(col *Column) {
numNullBitmapBytes := (col.length + 7) / 8
col.nullBitmap = col.nullBitmap[:0]
for i := 0; i < numNullBitmapBytes; {
numAppendBytes := mathutil.Min(numNullBitmapBytes-i, cap(allNotNullBitmap))
col.nullBitmap = append(col.nullBitmap, allNotNullBitmap[:numAppendBytes]...)
i += numAppendBytes
}
}
func bytesToI64Slice(b []byte) (i64s []int64) {
if len(b) == 0 {
return nil
}
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&i64s))
hdr.Len = len(b) / 8
hdr.Cap = hdr.Len
hdr.Data = uintptr(unsafe.Pointer(&b[0]))
return i64s
}
// varElemLen indicates this Column is a variable length Column.
const varElemLen = -1
func getFixedLen(colType *types.FieldType) int {
switch colType.GetType() {
case mysql.TypeFloat:
return 4
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong,
mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeYear, mysql.TypeDuration:
return 8
case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp:
return sizeTime
case mysql.TypeNewDecimal:
return types.MyDecimalStructSize
default:
return varElemLen
}
}
// GetFixedLen get the memory size of a fixed-length type.
// if colType is not fixed-length, it returns varElemLen, aka -1.
func GetFixedLen(colType *types.FieldType) int {
return getFixedLen(colType)
}
// EstimateTypeWidth estimates the average width of values of the type.
// This is used by the planner, which doesn't require absolutely correct results;
// it's OK (and expected) to guess if we don't know for sure.
//
// mostly study from https://github.com/postgres/postgres/blob/REL_12_STABLE/src/backend/utils/cache/lsyscache.c#L2356
func EstimateTypeWidth(colType *types.FieldType) int {
colLen := getFixedLen(colType)
// Easy if it's a fixed-width type
if colLen != varElemLen {
return colLen
}
colLen = colType.GetFlen()
if colLen > 0 {
if colLen <= 32 {
return colLen
}
if colLen < 1000 {
return 32 + (colLen-32)/2 // assume 50%
}
/*
* Beyond 1000, assume we're looking at something like
* "varchar(10000)" where the limit isn't actually reached often, and
* use a fixed estimate.
*/
return 32 + (1000-32)/2
}
// Oops, we have no idea ... wild guess time.
return 32
}
func init() {
for i := 0; i < 128; i++ {
allNotNullBitmap[i] = 0xFF
}
}
// Decoder decodes the data returned from the coprocessor and stores the result in Chunk.
// How Decoder works:
// 1. Initialization phase: Decode a whole input byte slice to Decoder.intermChk(intermediate chunk) using Codec.Decode.
// intermChk is introduced to simplify the implementation of decode phase. This phase uses pointer operations with
// less CPU and memory cost.
// 2. Decode phase:
// 2.1 Set the number of rows to be decoded to a value that is a multiple of 8 and greater than
// `chk.RequiredRows() - chk.NumRows()`. This reduces the overhead of copying the srcCol.nullBitMap into
// destCol.nullBitMap.
// 2.2 Append srcCol.offsets to destCol.offsets when the elements is of var-length type. And further adjust the
// offsets according to descCol.offsets[destCol.length]-srcCol.offsets[0].
// 2.3 Append srcCol.nullBitMap to destCol.nullBitMap.
// 3. Go to step 1 when the input byte slice is consumed.
type Decoder struct {
intermChk *Chunk
codec *Codec
remainedRows int
}
// NewDecoder creates a new Decoder object for decode a Chunk.
func NewDecoder(chk *Chunk, colTypes []*types.FieldType) *Decoder {
return &Decoder{intermChk: chk, codec: NewCodec(colTypes), remainedRows: 0}
}
// Decode decodes multiple rows of Decoder.intermChk and stores the result in chk.
func (c *Decoder) Decode(chk *Chunk) {
requiredRows := chk.RequiredRows() - chk.NumRows()
// Set the requiredRows to a multiple of 8.
requiredRows = (requiredRows + 7) >> 3 << 3
if requiredRows > c.remainedRows {
requiredRows = c.remainedRows
}
for i := 0; i < chk.NumCols(); i++ {
c.decodeColumn(chk, i, requiredRows)
}
c.remainedRows -= requiredRows
}
// Reset decodes data and store the result in Decoder.intermChk. This decode phase uses pointer operations with less
// CPU and memory costs.
func (c *Decoder) Reset(data []byte) {
c.codec.DecodeToChunk(data, c.intermChk)
c.remainedRows = c.intermChk.NumRows()
}
// IsFinished indicates whether Decoder.intermChk has been dried up.
func (c *Decoder) IsFinished() bool {
return c.remainedRows == 0
}
// RemainedRows indicates Decoder.intermChk has remained rows.
func (c *Decoder) RemainedRows() int {
return c.remainedRows
}
// ReuseIntermChk swaps `Decoder.intermChk` with `chk` directly when `Decoder.intermChk.NumRows()` is no less
// than `chk.requiredRows * factor` where `factor` is 0.8 now. This can avoid the overhead of appending the
// data from `Decoder.intermChk` to `chk`. Moreover, the column.offsets needs to be further adjusted
// according to column.offset[0].
func (c *Decoder) ReuseIntermChk(chk *Chunk) {
for i, col := range c.intermChk.columns {
col.length = c.remainedRows
elemLen := getFixedLen(c.codec.colTypes[i])
if elemLen == varElemLen {
// For var-length types, we need to adjust the offsets before reuse.
if deltaOffset := col.offsets[0]; deltaOffset != 0 {
for j := 0; j < len(col.offsets); j++ {
col.offsets[j] -= deltaOffset
}
}
}
}
chk.SwapColumns(c.intermChk)
c.remainedRows = 0
}
func (c *Decoder) decodeColumn(chk *Chunk, ordinal int, requiredRows int) {
elemLen := getFixedLen(c.codec.colTypes[ordinal])
numDataBytes := int64(elemLen * requiredRows)
srcCol := c.intermChk.columns[ordinal]
destCol := chk.columns[ordinal]
if elemLen == varElemLen {
// For var-length types, we need to adjust the offsets after appending to destCol.
numDataBytes = srcCol.offsets[requiredRows] - srcCol.offsets[0]
deltaOffset := destCol.offsets[destCol.length] - srcCol.offsets[0]
destCol.offsets = append(destCol.offsets, srcCol.offsets[1:requiredRows+1]...)
for i := destCol.length + 1; i <= destCol.length+requiredRows; i++ {
destCol.offsets[i] = destCol.offsets[i] + deltaOffset
}
srcCol.offsets = srcCol.offsets[requiredRows:]
}
numNullBitmapBytes := (requiredRows + 7) >> 3
if destCol.length%8 == 0 {
destCol.nullBitmap = append(destCol.nullBitmap, srcCol.nullBitmap[:numNullBitmapBytes]...)
} else {
destCol.appendMultiSameNullBitmap(false, requiredRows)
bitMapLen := len(destCol.nullBitmap)
// bitOffset indicates the number of valid bits in destCol.nullBitmap's last byte.
bitOffset := destCol.length % 8
startIdx := (destCol.length - 1) >> 3
for i := 0; i < numNullBitmapBytes; i++ {
destCol.nullBitmap[startIdx+i] |= srcCol.nullBitmap[i] << bitOffset
// The high order 8-bitOffset bits in `srcCol.nullBitmap[i]` should be appended to the low order of the next slot.
if startIdx+i+1 < bitMapLen {
destCol.nullBitmap[startIdx+i+1] |= srcCol.nullBitmap[i] >> (8 - bitOffset)
}
}
}
// Set all the redundant bits in the last slot of destCol.nullBitmap to 0.
numRedundantBits := uint(len(destCol.nullBitmap)*8 - destCol.length - requiredRows)
bitMask := byte(1<<(8-numRedundantBits)) - 1
destCol.nullBitmap[len(destCol.nullBitmap)-1] &= bitMask
srcCol.nullBitmap = srcCol.nullBitmap[numNullBitmapBytes:]
destCol.length += requiredRows
destCol.data = append(destCol.data, srcCol.data[:numDataBytes]...)
srcCol.data = srcCol.data[numDataBytes:]
}
|
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
cfgFile string
rootCmd = &cobra.Command{
Use: "yakshop",
Short: "A cli to work with YakShop",
Long: "YakShop CLI allows you to query data or start an HTTP server",
}
)
func Execute() error {
return rootCmd.Execute()
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
getDataCmd.PersistentFlags().StringP("file", "f", "", "XML file to read from")
getDataCmd.PersistentFlags().Int64P("days", "d", 0, "Elapsed time in days")
getDataCmd.MarkPersistentFlagRequired("file")
getDataCmd.MarkPersistentFlagRequired("days")
rootCmd.AddCommand(getDataCmd)
rootCmd.AddCommand(appCmd)
}
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := os.UserHomeDir()
cobra.CheckErr(err)
// Search config in home directory with name ".cobra" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
viper.SetConfigName(".cobra")
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
|
package main
import (
"fmt"
"net"
)
func whois(dom, server string) string {
conn, err := net.Dial("tcp", server+":43")
if err != nil {
fmt.Println("Error")
}
conn.Write([]byte(dom + "\r\n"))
buf := make([]byte, 1024)
res := []byte{}
for {
numbytes, err := conn.Read(buf)
sbuf := buf[0:numbytes]
res = append(res, sbuf...)
if err != nil {
break
}
}
conn.Close()
return string(res)
}
func main() {
fmt.Println(whois("hello.com", "com.whois-servers.net"))
}
|
package types
import (
"html/template"
"net/url"
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/modules/utils"
"github.com/GoAdminGroup/go-admin/plugins/admin/models"
)
type Button interface {
Content() (template.HTML, template.JS)
GetAction() Action
URL() string
METHOD() string
ID() string
Type() string
GetName() string
SetName(name string)
IsType(t string) bool
}
type BaseButton struct {
Id, Url, Method, Name, TypeName string
Title template.HTML
Action Action
}
func (b *BaseButton) Content() (template.HTML, template.JS) { return "", "" }
func (b *BaseButton) GetAction() Action { return b.Action }
func (b *BaseButton) ID() string { return b.Id }
func (b *BaseButton) URL() string { return b.Url }
func (b *BaseButton) Type() string { return b.TypeName }
func (b *BaseButton) IsType(t string) bool { return b.TypeName == t }
func (b *BaseButton) METHOD() string { return b.Method }
func (b *BaseButton) GetName() string { return b.Name }
func (b *BaseButton) SetName(name string) { b.Name = name }
type DefaultButton struct {
*BaseButton
Color template.HTML
TextColor template.HTML
Icon string
Direction template.HTML
Group bool
}
func GetDefaultButton(title template.HTML, icon string, action Action, colors ...template.HTML) *DefaultButton {
return defaultButton(title, "right", icon, action, false, colors...)
}
func GetDefaultButtonGroup(title template.HTML, icon string, action Action, colors ...template.HTML) *DefaultButton {
return defaultButton(title, "right", icon, action, true, colors...)
}
func defaultButton(title, direction template.HTML, icon string, action Action, group bool, colors ...template.HTML) *DefaultButton {
id := btnUUID()
action.SetBtnId("." + id)
var color, textColor template.HTML
if len(colors) > 0 {
color = colors[0]
}
if len(colors) > 1 {
textColor = colors[1]
}
node := action.GetCallbacks()
return &DefaultButton{
BaseButton: &BaseButton{
Id: id,
Title: title,
Action: action,
Url: node.Path,
Method: node.Method,
},
Group: group,
Color: color,
TextColor: textColor,
Icon: icon,
Direction: direction,
}
}
func GetColumnButton(title template.HTML, icon string, action Action, colors ...template.HTML) *DefaultButton {
return defaultButton(title, "", icon, action, true, colors...)
}
func (b *DefaultButton) Content() (template.HTML, template.JS) {
color := template.HTML("")
if b.Color != template.HTML("") {
color = template.HTML(`background-color:`) + b.Color + template.HTML(`;`)
}
textColor := template.HTML("")
if b.TextColor != template.HTML("") {
textColor = template.HTML(`color:`) + b.TextColor + template.HTML(`;`)
}
style := template.HTML("")
addColor := color + textColor
if addColor != template.HTML("") {
style = template.HTML(`style="`) + addColor + template.HTML(`"`)
}
h := template.HTML("")
if b.Group {
h += `<div class="btn-group pull-` + b.Direction + `" style="margin-right: 10px">`
}
h += `<a ` + style + ` class="` + template.HTML(b.Id) + ` btn btn-sm btn-default ` + b.Action.BtnClass() + `" ` + b.Action.BtnAttribute() + `>
<i class="fa ` + template.HTML(b.Icon) + `"></i> ` + b.Title + `
</a>`
if b.Group {
h += `</div>`
}
return h + b.Action.ExtContent(), b.Action.Js()
}
type ActionButton struct {
*BaseButton
}
func GetActionButton(title template.HTML, action Action, ids ...string) *ActionButton {
id := ""
if len(ids) > 0 {
id = ids[0]
} else {
id = "action-info-btn-" + utils.Uuid(10)
}
action.SetBtnId("." + id)
node := action.GetCallbacks()
return &ActionButton{
BaseButton: &BaseButton{
Id: id,
Title: title,
Action: action,
Url: node.Path,
Method: node.Method,
},
}
}
func (b *ActionButton) Content() (template.HTML, template.JS) {
h := template.HTML(`<li style="cursor: pointer;"><a data-id="{{.Id}}" class="`+template.HTML(b.Id)+` `+
b.Action.BtnClass()+`" `+b.Action.BtnAttribute()+`>`+b.Title+`</a></li>`) + b.Action.ExtContent()
return h, b.Action.Js()
}
type ActionIconButton struct {
Icon template.HTML
*BaseButton
}
func GetActionIconButton(icon string, action Action, ids ...string) *ActionIconButton {
id := ""
if len(ids) > 0 {
id = ids[0]
} else {
id = "action-info-btn-" + utils.Uuid(10)
}
action.SetBtnId("." + id)
node := action.GetCallbacks()
return &ActionIconButton{
Icon: template.HTML(icon),
BaseButton: &BaseButton{
Id: id,
Action: action,
Url: node.Path,
Method: node.Method,
},
}
}
func (b *ActionIconButton) Content() (template.HTML, template.JS) {
h := template.HTML(`<a data-id="{{.Id}}" class="`+template.HTML(b.Id)+` `+
b.Action.BtnClass()+`" `+b.Action.BtnAttribute()+`><i class="fa `+b.Icon+`" style="font-size: 16px;"></i></a>`) + b.Action.ExtContent()
return h, b.Action.Js()
}
type Buttons []Button
func (b Buttons) Add(btn Button) Buttons {
return append(b, btn)
}
func (b Buttons) Content() (template.HTML, template.JS) {
h := template.HTML("")
j := template.JS("")
for _, btn := range b {
hh, jj := btn.Content()
h += hh
j += jj
}
return h, j
}
func (b Buttons) Copy() Buttons {
var c = make(Buttons, len(b))
copy(c, b)
return c
}
func (b Buttons) FooterContent() template.HTML {
footer := template.HTML("")
for _, btn := range b {
footer += btn.GetAction().FooterContent()
}
return footer
}
func (b Buttons) CheckPermission(user models.UserModel) Buttons {
btns := make(Buttons, 0)
for _, btn := range b {
if btn.IsType(ButtonTypeNavDropDown) {
items := make([]Button, 0)
for _, navItem := range btn.(*NavDropDownButton).Items {
if user.CheckPermissionByUrlMethod(btn.URL(), btn.METHOD(), url.Values{}) {
items = append(items, navItem)
}
}
if len(items) > 0 {
btns = append(btns, btn)
}
} else if user.CheckPermissionByUrlMethod(btn.URL(), btn.METHOD(), url.Values{}) {
btns = append(btns, btn)
}
}
return btns
}
func (b Buttons) CheckPermissionWhenURLAndMethodNotEmpty(user models.UserModel) Buttons {
btns := make(Buttons, 0)
for _, b := range b {
if b.URL() == "" || b.METHOD() == "" || user.CheckPermissionByUrlMethod(b.URL(), b.METHOD(), url.Values{}) {
btns = append(btns, b)
}
}
return btns
}
func (b Buttons) AddNavButton(ico, name string, action Action) Buttons {
if !b.CheckExist(name) {
return append(b, GetNavButton("", ico, action, name))
}
return b
}
func (b Buttons) RemoveButtonByName(name string) Buttons {
if name == "" {
return b
}
for i := 0; i < len(b); i++ {
if b[i].GetName() == name {
b = append(b[:i], b[i+1:]...)
}
}
return b
}
func (b Buttons) CheckExist(name string) bool {
if name == "" {
return false
}
for i := 0; i < len(b); i++ {
if b[i].GetName() == name {
return true
}
}
return false
}
func (b Buttons) Callbacks() []context.Node {
cbs := make([]context.Node, 0)
for _, btn := range b {
cbs = append(cbs, btn.GetAction().GetCallbacks())
}
return cbs
}
const (
NavBtnSiteName = "go_admin_site_navbtn"
NavBtnInfoName = "go_admin_info_navbtn"
NavBtnToolName = "go_admin_tool_navbtn"
NavBtnPlugName = "go_admin_plug_navbtn"
)
func (b Buttons) RemoveSiteNavButton() Buttons {
return b.RemoveButtonByName(NavBtnSiteName)
}
func (b Buttons) RemoveInfoNavButton() Buttons {
return b.RemoveButtonByName(NavBtnInfoName)
}
func (b Buttons) RemoveToolNavButton() Buttons {
return b.RemoveButtonByName(NavBtnToolName)
}
func (b Buttons) RemovePlugNavButton() Buttons {
return b.RemoveButtonByName(NavBtnPlugName)
}
type NavButton struct {
*BaseButton
Icon string
}
func GetNavButton(title template.HTML, icon string, action Action, names ...string) *NavButton {
id := btnUUID()
action.SetBtnId("." + id)
node := action.GetCallbacks()
name := ""
if len(names) > 0 {
name = names[0]
}
return &NavButton{
BaseButton: &BaseButton{
Id: id,
Title: title,
Action: action,
Url: node.Path,
Method: node.Method,
Name: name,
},
Icon: icon,
}
}
func (n *NavButton) Content() (template.HTML, template.JS) {
ico := template.HTML("")
title := template.HTML("")
if n.Icon != "" {
ico = template.HTML(`<i class="fa ` + n.Icon + `"></i>`)
}
if n.Title != "" {
title = `<span>` + n.Title + `</span>`
}
h := template.HTML(`<li>
<a class="`+template.HTML(n.Id)+` `+n.Action.BtnClass()+`" `+n.Action.BtnAttribute()+`>
`+ico+`
`+title+`
</a>
</li>`) + n.Action.ExtContent()
return h, n.Action.Js()
}
type NavDropDownButton struct {
*BaseButton
Icon string
Items []*NavDropDownItemButton
}
type NavDropDownItemButton struct {
*BaseButton
}
func GetDropDownButton(title template.HTML, icon string, items []*NavDropDownItemButton, names ...string) *NavDropDownButton {
id := btnUUID()
name := ""
if len(names) > 0 {
name = names[0]
}
return &NavDropDownButton{
BaseButton: &BaseButton{
Id: id,
Title: title,
Name: name,
TypeName: ButtonTypeNavDropDown,
Action: new(NilAction),
},
Items: items,
Icon: icon,
}
}
func (n *NavDropDownButton) SetItems(items []*NavDropDownItemButton) {
n.Items = items
}
func (n *NavDropDownButton) AddItem(item *NavDropDownItemButton) {
n.Items = append(n.Items, item)
}
func (n *NavDropDownButton) Content() (template.HTML, template.JS) {
ico := template.HTML("")
title := template.HTML("")
if n.Icon != "" {
ico = template.HTML(`<i class="fa ` + n.Icon + `"></i>`)
}
if n.Title != "" {
title = `<span>` + n.Title + `</span>`
}
content := template.HTML("")
js := template.JS("")
for _, item := range n.Items {
c, j := item.Content()
content += c
js += j
}
did := utils.Uuid(10)
h := template.HTML(`<li class="dropdown" id="` + template.HTML(did) + `">
<a class="` + template.HTML(n.Id) + ` dropdown-toggle" data-toggle="dropdown" style="cursor:pointer;">
` + ico + `
` + title + `
</a>
<ul class="dropdown-menu" aria-labelledby="` + template.HTML(did) + `">
` + content + `
</ul>
</li>`)
return h, js
}
const (
ButtonTypeNavDropDownItem = "navdropdownitem"
ButtonTypeNavDropDown = "navdropdown"
)
func GetDropDownItemButton(title template.HTML, action Action, names ...string) *NavDropDownItemButton {
id := btnUUID()
action.SetBtnId("." + id)
node := action.GetCallbacks()
name := ""
if len(names) > 0 {
name = names[0]
}
return &NavDropDownItemButton{
BaseButton: &BaseButton{
Id: id,
Title: title,
Action: action,
Url: node.Path,
Method: node.Method,
Name: name,
TypeName: ButtonTypeNavDropDownItem,
},
}
}
func (n *NavDropDownItemButton) Content() (template.HTML, template.JS) {
title := template.HTML("")
if n.Title != "" {
title = `<span>` + n.Title + `</span>`
}
h := template.HTML(`<li><a class="dropdown-item `+template.HTML(n.Id)+` `+
n.Action.BtnClass()+`" `+n.Action.BtnAttribute()+`>
`+title+`
</a></li>`) + n.Action.ExtContent()
return h, n.Action.Js()
}
|
package tccpoutputs
import "github.com/giantswarm/microerror"
var invalidConfigError = µerror.Error{
Kind: "invalidConfigError",
}
// IsInsserts invalidConfigError.
func IsInvalidConfig(err error) bool {
return microerror.Cause(err) == invalidConfigError
}
var executionFailedError = µerror.Error{
Kind: "executionFailedError",
}
// IsExecutionFailed executionFailedError.
func IsExecutionFailed(err error) bool {
return microerror.Cause(err) == executionFailedError
}
|
package main
import (
"database/sql"
"encoding/json"
"log"
"net/http"
)
type UserHandler struct {
*sql.DB
}
func (h *UserHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
var head string
head, req.URL.Path = ShiftPath(req.URL.Path)
match := Matcher(req.Method, head)
switch {
case match("GET", ""):
h.Create(res, req)
default:
http.Error(res, "Not Found", http.StatusNotFound)
}
}
func NewUserHandler(db *sql.DB) *UserHandler {
handler := new(UserHandler)
_, err := db.Exec("CREATE TABLE IF NOT EXISTS USERS (id text, name text, email text, password text, role integer)")
if err != nil {
log.Fatal(err)
}
handler.DB = db
return handler
}
// Create a new user
func (h *UserHandler) Create(res http.ResponseWriter, req *http.Request) {
var u PlainUser
dec := json.NewDecoder(req.Body)
dec.DisallowUnknownFields()
err := dec.Decode(&u)
if err != nil {
http.Error(res, err.Error(), http.StatusBadRequest)
return
}
user, err := NewUser(u)
if err != nil {
http.Error(res, err.Error(), http.StatusBadRequest)
return
}
js, err := json.Marshal(user)
res.Write(js)
}
|
package main
import (
"flag"
"fmt"
"os"
"time"
yaml "gopkg.in/yaml.v2"
)
var (
yamlFile = "cidrs.yaml" // Stores YAML filename (default 'cidrs.yaml')
apiToken = os.Getenv("PHPIPAMTOKEN") // Env variable that stores API token
masterSubnetId = 231 // phpipam master subnet ID
apiURL = "https://<some_domain>/api/netops" // Base API URL
)
type CIDR struct {
Name string `yaml:"Name"`
Mask int `yaml:"Mask"`
}
// Checks for error
func checkError(e error) {
if e != nil {
fmt.Println(e)
os.Exit(1)
}
}
// Processes program flags
func processFlags() {
flag.StringVar(&yamlFile, "f", yamlFile, "Specify the YAML filename.")
flag.StringVar(&apiURL, "u", apiURL, "API base URL.")
flag.IntVar(&masterSubnetId, "m", masterSubnetId, "Master subnet id for nested subnet.")
flag.Parse()
}
// Checks to see if API token available
func checkAPIToken(s string) error {
if s == "" {
return fmt.Errorf("PHPIPAMTOKEN env variable not found or empty")
}
return nil
}
// Checks to see if any CIDRs found in yaml file
func checkCIDRs(m map[string][]CIDR) error {
if len(m["CIDRs"]) == 0 {
return fmt.Errorf("No CIDRs found in '%v' file.", yamlFile)
}
return nil
}
func main() {
processFlags()
fmt.Println("Attempting to get PHPIPAM Token..")
err := checkAPIToken(apiToken)
checkError(err)
fmt.Println("PHPIPAM Token found!")
fmt.Printf("Attempting to open '%v'..\n", yamlFile)
f, err := os.ReadFile(yamlFile)
checkError(err)
fmt.Printf("'%v' file successfully loaded!\n", yamlFile)
cidrs := make(map[string][]CIDR)
err = yaml.Unmarshal(f, &cidrs) // Unmarshal yaml file
checkError(err)
err = checkCIDRs(cidrs)
checkError(err)
for _, cidr := range cidrs["CIDRs"] {
createSubnet(cidr)
time.Sleep(time.Second) // Sleep a second after each API call
}
fmt.Println("Execution complete!")
}
|
package middleware
import (
"net/http"
"github.com/gin-gonic/gin"
)
const ApiTokenHeaderKey = "X-MessageDB-Api-Token"
func ApiTokenMiddleware() gin.HandlerFunc {
return func(ctx *gin.Context) {
token := ctx.Request.Header.Get(ApiTokenHeaderKey)
if len(token) == 0 {
ctx.AbortWithStatus(http.StatusForbidden)
return
}
valid, err := isValidApiToken(token)
if err != nil {
ctx.AbortWithStatus(http.StatusInternalServerError)
return
}
if !valid {
ctx.AbortWithStatus(http.StatusForbidden)
return
}
ctx.Next()
}
}
func isValidApiToken(token string) (bool, error) {
//TODO: check if Api Token is a valid one here...
return true, nil
}
|
// This program listens to the host and port specified by the -listen flag and
// dumps any incoming data to standard output.
package main
import (
"flag"
"fmt"
"io"
"log"
"net"
"os"
)
var addr = flag.String("listen", "localhost:8000", "server listen address")
type dumpWriter struct {
c net.Conn
w io.Writer
}
func (w dumpWriter) Write(v []byte) (int, error) {
fmt.Fprintf(w.w, "[%v->%v] ", w.c.RemoteAddr(), w.c.LocalAddr())
return w.w.Write(v)
}
func main() {
flag.Parse()
l, err := net.Listen("tcp", *addr)
if err != nil {
log.Fatal(err)
}
log.Println("Listening on", l.Addr())
for {
c, err := l.Accept()
if err != nil {
log.Println(err)
continue
}
go io.Copy(dumpWriter{c, os.Stdout}, c)
}
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type ArrayCoerceExpr struct {
Xpr ast.Node
Arg ast.Node
Elemfuncid Oid
Resulttype Oid
Resulttypmod int32
Resultcollid Oid
IsExplicit bool
Coerceformat CoercionForm
Location int
}
func (n *ArrayCoerceExpr) Pos() int {
return n.Location
}
|
package integration
import (
"fmt"
. "gopkg.in/check.v1"
)
func (s *RunSuite) TestDelete(c *C) {
p := s.ProjectFromText(c, "up", SimpleTemplate)
name := fmt.Sprintf("%s_%s_1", p, "hello")
cn := s.GetContainerByName(c, name)
c.Assert(cn, NotNil)
c.Assert(cn.State.Running, Equals, true)
s.FromText(c, p, "rm", "--force", `
hello:
image: busybox
stdin_open: true
tty: true
`)
cn = s.GetContainerByName(c, name)
c.Assert(cn, IsNil)
}
func (s *RunSuite) TestDeleteWithVol(c *C) {
p := s.ProjectFromText(c, "up", SimpleTemplate)
name := fmt.Sprintf("%s_%s_1", p, "hello")
cn := s.GetContainerByName(c, name)
c.Assert(cn, NotNil)
c.Assert(cn.State.Running, Equals, true)
s.FromText(c, p, "rm", "--force", "-v", `
hello:
image: busybox
stdin_open: true
tty: true
`)
cn = s.GetContainerByName(c, name)
c.Assert(cn, IsNil)
}
|
package game_map
import (
"github.com/faiface/pixel/pixelgl"
"github.com/steelx/go-rpg-cgm/state_machine"
"reflect"
)
type FollowPathState struct {
Character *Character
Map GameMap
Entity Entity
Controller *state_machine.StateMachine
}
func FollowPathStateCreate(args ...interface{}) state_machine.State {
charV := reflect.ValueOf(args[0])
character := charV.Interface().(*Character)
gMapV := reflect.ValueOf(args[1])
gMap := gMapV.Interface().(*GameMap)
s := FollowPathState{}
s.Character = character
s.Map = *gMap
s.Entity = *character.Entity
s.Controller = character.Controller
return &s
}
//The StateMachine requires each state to have
// four functions: Enter, Exit, Render and Update
func (s FollowPathState) IsFinished() bool {
return true
}
func (s *FollowPathState) Enter(data ...interface{}) {
if s.Character.PathIndex >= len(s.Character.Path) || len(s.Character.Path) == 0 {
s.Character.DefaultState = s.Character.PrevDefaultState //we set at Character.FollowPath
s.Controller.Change(s.Character.DefaultState, Direction{0, 0})
return
}
direction := s.Character.Path[s.Character.PathIndex]
if direction == "left" {
s.Controller.Change("move", Direction{-1, 0})
} else if direction == "right" {
s.Controller.Change("move", Direction{1, 0})
} else if direction == "up" {
s.Controller.Change("move", Direction{0, -1})
} else if direction == "down" {
s.Controller.Change("move", Direction{0, 1})
}
}
func (s *FollowPathState) Render(win *pixelgl.Window) {}
func (s *FollowPathState) Exit() {
s.Character.PathIndex = s.Character.PathIndex + 1
}
func (s *FollowPathState) Update(dt float64) {}
|
package nv7haven
import (
"net/url"
"strings"
"github.com/gofiber/fiber/v2"
"github.com/jdkato/prose/v2"
)
func (d *Nv7Haven) calcHella(c *fiber.Ctx) error {
input, err := url.PathUnescape(c.Params("input"))
if err != nil {
return err
}
doc, _ := prose.NewDocument(input)
done := make([]string, 0)
// Iterate over the doc's tokens:
for _, tok := range doc.Tokens() {
if tok.Tag == "JJ" || tok.Tag == "JJR" || tok.Tag == "JJS" {
if !(isIn(tok.Tag, done)) {
done = append(done, tok.Tag)
input = strings.Replace(input, tok.Text, "hella-"+tok.Text, -1)
}
}
}
return c.SendString(input)
}
func isIn(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
|
package main
import (
"io/ioutil"
"os"
"testing"
)
func TestNeatPrint(t *testing.T) {
type tests struct {
testName string
input [][]string
expected string
}
intputSameLength := [][]string{
{"price", "display price of item"},
{"price", "display price of item"},
{"price", "display price of item"},
{"price", "display price of item"},
{"price", "display price of item"},
{"price", "display price of item"},
}
expectedSameLength :=
"#################################\n" +
"# price - display price of item #\n" +
"# price - display price of item #\n" +
"# price - display price of item #\n" +
"# price - display price of item #\n" +
"# price - display price of item #\n" +
"# price - display price of item #\n" +
"#################################\n"
intputMixedLength := [][]string{
{"price", "display price of item"},
{"owner", "show owner of item"},
{"update", "update database and then sync cache with database"},
{"delete", "delete item"},
{"disassociate", "remove item from owner"},
{"billing inquiries", "history of billing"},
}
expectedMixedLength := "#########################################################################\n" +
"# price - display price of item #\n" +
"# owner - show owner of item #\n" +
"# update - update database and then sync cache with database #\n" +
"# delete - delete item #\n" +
"# disassociate - remove item from owner #\n" +
"# billing inquiries - history of billing #\n" +
"#########################################################################\n"
inputLongLeftAndRight := [][]string{
{"price", "display price of item"},
{"owner", "show owner of item"},
{"update", "sync cache with database"},
{"delete", "delete item"},
{"disassociate", "remove item from owner"},
{"billing inquiries", "history of billing statements and billing questions that a user has"},
}
expectedLongLeftAndRight := "###########################################################################################\n" +
"# price - display price of item #\n" +
"# owner - show owner of item #\n" +
"# update - sync cache with database #\n" +
"# delete - delete item #\n" +
"# disassociate - remove item from owner #\n" +
"# billing inquiries - history of billing statements and billing questions that a user has #\n" +
"###########################################################################################\n"
testCases := []tests{
{"Same Length Input", intputSameLength, expectedSameLength},
{"Mixed Length Input", intputMixedLength, expectedMixedLength},
{"Long Left and Right Input", inputLongLeftAndRight, expectedLongLeftAndRight},
}
for _, testValues := range testCases {
tmpStdOut := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
neatPrint(testValues.input)
w.Close()
actual, _ := ioutil.ReadAll(r)
os.Stdout = tmpStdOut
if string(actual) != testValues.expected {
t.Errorf("Expected%s, got%s", testValues.expected, actual)
}
}
}
func BenchmarkNeatPrint(b *testing.B) {
intputMixedLength := [][]string{
{"price", "display price of item"},
{"owner", "show owner of item"},
{"update", "update database and then sync cache with database"},
{"delete", "delete item"},
{"disassociate", "remove item from owner"},
{"billing inquiries", "history of billing"},
}
for n := 0; n < b.N; n++ {
neatPrint(intputMixedLength)
}
}
|
package PowerNLP
import (
"github.com/ksclouds/PowerNLP/Seg"
)
//默认分词方法
func Segment(sentence string) []string {
return Seg.DefaultSegment().Segment(sentence)
}
|
// ˅
package main
// ˄
type Book struct {
// ˅
// ˄
title string
// ˅
// ˄
}
func NewBook(title string) *Book {
// ˅
return &Book{title}
// ˄
}
// ˅
// ˄
|
package gojson
import "reflect"
func (enc *encoder) marshalSlice(v reflect.Value) ([]byte, error) {
var result string
var data []byte
var err error
if data, err = enc.marshalSliceElems(v); err != nil {
return nil, err
}
if data != nil {
result = result + string(data)
}
return []byte("[ " + result + " ]"), nil
}
func (enc *encoder) marshalSliceElems(v reflect.Value) ([]byte, error) {
var result string
if ((v.Type().Kind() == reflect.Slice) && v.IsNil()) || (v.Len() == 0) {
return nil, nil
}
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
var err error
var data []byte
elemKind := elem.Type().Kind()
if (elemKind == reflect.Ptr || elemKind == reflect.Array || elemKind == reflect.Interface || elemKind == reflect.Slice || elemKind == reflect.Map) && elem.IsZero() {
data = []byte("null")
} else if data, err = enc.marshal(elem); err != nil {
return nil, err
}
if !empty(result) {
result = result + ", "
}
result = result + string(data)
}
return []byte(result), nil
}
|
package eth
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
"web3.go/consts"
"web3.go/providers"
)
func TestGetBalance(t *testing.T) {
web3 := NewEth(providers.NewHTTPProvider(consts.HOST_HTTP_PROVIDER_LOCAL, 10))
t.Run("address err", func(t *testing.T) {
_, err := web3.GetBalance("", "")
assert.NotNil(t, err)
})
t.Run("eth_getBalance", func(t *testing.T) {
balance, err := web3.GetBalance("", "")
assert.Nil(t, err)
assert.Equal(t, "0", balance.String())
fmt.Println(balance)
})
t.Run("eth_gasPrice", func(t *testing.T) {
v, err := web3.GasPrice()
assert.Nil(t, err)
assert.NotEqual(t, "0", v.String())
})
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLServiceLevelObjectiveSchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "Monitoring/ServiceLevelObjective",
Description: "The Monitoring ServiceLevelObjective resource",
StructName: "ServiceLevelObjective",
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a ServiceLevelObjective",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceLevelObjective",
Required: true,
Description: "A full instance of a ServiceLevelObjective",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a ServiceLevelObjective",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceLevelObjective",
Required: true,
Description: "A full instance of a ServiceLevelObjective",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a ServiceLevelObjective",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceLevelObjective",
Required: true,
Description: "A full instance of a ServiceLevelObjective",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all ServiceLevelObjective",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "service",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
List: &dcl.Path{
Description: "The function used to list information about many ServiceLevelObjective",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "service",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"ServiceLevelObjective": &dcl.Component{
Title: "ServiceLevelObjective",
ID: "projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}",
ParentContainer: "project",
LabelsField: "userLabels",
HasCreate: true,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"name",
"goal",
"project",
"service",
},
Properties: map[string]*dcl.Property{
"calendarPeriod": &dcl.Property{
Type: "string",
GoName: "CalendarPeriod",
GoType: "ServiceLevelObjectiveCalendarPeriodEnum",
Description: "A calendar period, semantically \"since the start of the current ``\". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and `MONTH` are supported. Possible values: CALENDAR_PERIOD_UNSPECIFIED, DAY, WEEK, FORTNIGHT, MONTH, QUARTER, HALF, YEAR",
Conflicts: []string{
"rollingPeriod",
},
Enum: []string{
"CALENDAR_PERIOD_UNSPECIFIED",
"DAY",
"WEEK",
"FORTNIGHT",
"MONTH",
"QUARTER",
"HALF",
"YEAR",
},
},
"createTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "CreateTime",
ReadOnly: true,
Description: "Time stamp of the `Create` or most recent `Update` command on this `Slo`.",
Immutable: true,
},
"deleteTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "DeleteTime",
ReadOnly: true,
Description: "Time stamp of the `Update` or `Delete` command that made this no longer a current `Slo`. This field is not populated in `ServiceLevelObjective`s returned from calls to `GetServiceLevelObjective` and `ListServiceLevelObjectives`, because it is always empty in the current version. It is populated in `ServiceLevelObjective`s representing previous versions in the output of `ListServiceLevelObjectiveVersions`. Because all old configuration versions are stored, `Update` operations mark the obsoleted version as deleted.",
Immutable: true,
},
"displayName": &dcl.Property{
Type: "string",
GoName: "DisplayName",
Description: "Name used for UI elements listing this SLO.",
},
"goal": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Goal",
Description: "The fraction of service that must be good in order for this objective to be met. `0 < goal <= 0.999`.",
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Resource name for this `ServiceLevelObjective`. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]",
Immutable: true,
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "The project for the resource",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"rollingPeriod": &dcl.Property{
Type: "string",
GoName: "RollingPeriod",
Description: "A rolling time period, semantically \"in the past ``\". Must be an integer multiple of 1 day no larger than 30 days.",
Conflicts: []string{
"calendarPeriod",
},
},
"service": &dcl.Property{
Type: "string",
GoName: "Service",
Description: "The service for the resource",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Monitoring/Service",
Field: "name",
Parent: true,
},
},
},
"serviceLevelIndicator": &dcl.Property{
Type: "object",
GoName: "ServiceLevelIndicator",
GoType: "ServiceLevelObjectiveServiceLevelIndicator",
Description: "The definition of good service, used to measure and calculate the quality of the `Service`'s performance with respect to a single aspect of service quality.",
Properties: map[string]*dcl.Property{
"basicSli": &dcl.Property{
Type: "object",
GoName: "BasicSli",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSli",
Description: "Basic SLI on a well-known service type.",
Conflicts: []string{
"requestBased",
"windowsBased",
},
Properties: map[string]*dcl.Property{
"availability": &dcl.Property{
Type: "object",
GoName: "Availability",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability",
Description: "Good service is defined to be the count of requests made to this service that return successfully.",
Conflicts: []string{
"latency",
"operationAvailability",
"operationLatency",
},
Properties: map[string]*dcl.Property{},
},
"latency": &dcl.Property{
Type: "object",
GoName: "Latency",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency",
Description: "Good service is defined to be the count of requests made to this service that are fast enough with respect to `latency.threshold`.",
Conflicts: []string{
"availability",
"operationAvailability",
"operationLatency",
},
Properties: map[string]*dcl.Property{
"experience": &dcl.Property{
Type: "string",
GoName: "Experience",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum",
Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING",
Enum: []string{
"LATENCY_EXPERIENCE_UNSPECIFIED",
"DELIGHTING",
"SATISFYING",
"ANNOYING",
},
},
"threshold": &dcl.Property{
Type: "string",
GoName: "Threshold",
Description: "Good service is defined to be the count of requests made to this service that return in no more than `threshold`.",
},
},
},
"location": &dcl.Property{
Type: "array",
GoName: "Location",
Description: "OPTIONAL: The set of locations to which this SLI is relevant. Telemetry from other locations will not be used to calculate performance for this SLI. If omitted, this SLI applies to all locations in which the Service has activity. For service types that don't support breaking down by location, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"method": &dcl.Property{
Type: "array",
GoName: "Method",
Description: "OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from other methods will not be used to calculate performance for this SLI. If omitted, this SLI applies to all the Service's methods. For service types that don't support breaking down by method, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"operationAvailability": &dcl.Property{
Type: "object",
GoName: "OperationAvailability",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability",
Description: "Good service is defined to be the count of operations performed by this service that return successfully",
Conflicts: []string{
"availability",
"latency",
"operationLatency",
},
Properties: map[string]*dcl.Property{},
},
"operationLatency": &dcl.Property{
Type: "object",
GoName: "OperationLatency",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency",
Description: "Good service is defined to be the count of operations performed by this service that are fast enough with respect to `operation_latency.threshold`.",
Conflicts: []string{
"availability",
"latency",
"operationAvailability",
},
Properties: map[string]*dcl.Property{
"experience": &dcl.Property{
Type: "string",
GoName: "Experience",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum",
Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING",
Enum: []string{
"LATENCY_EXPERIENCE_UNSPECIFIED",
"DELIGHTING",
"SATISFYING",
"ANNOYING",
},
},
"threshold": &dcl.Property{
Type: "string",
GoName: "Threshold",
Description: "Good service is defined to be the count of operations that are completed in no more than `threshold`.",
},
},
},
"version": &dcl.Property{
Type: "array",
GoName: "Version",
Description: "OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry from other API versions will not be used to calculate performance for this SLI. If omitted, this SLI applies to all API versions. For service types that don't support breaking down by version, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
},
},
"requestBased": &dcl.Property{
Type: "object",
GoName: "RequestBased",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBased",
Description: "Request-based SLIs",
Conflicts: []string{
"basicSli",
"windowsBased",
},
Properties: map[string]*dcl.Property{
"distributionCut": &dcl.Property{
Type: "object",
GoName: "DistributionCut",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut",
Description: "`distribution_cut` is used when `good_service` is a count of values aggregated in a `Distribution` that fall into a good range. The `total_service` is the total count of all values aggregated in the `Distribution`.",
Conflicts: []string{
"goodTotalRatio",
},
Properties: map[string]*dcl.Property{
"distributionFilter": &dcl.Property{
Type: "string",
GoName: "DistributionFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` aggregating values. Must have `ValueType = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"range": &dcl.Property{
Type: "object",
GoName: "Range",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange",
Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.",
Properties: map[string]*dcl.Property{
"max": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Max",
Description: "Range maximum.",
},
"min": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Min",
Description: "Range minimum.",
},
},
},
},
},
"goodTotalRatio": &dcl.Property{
Type: "object",
GoName: "GoodTotalRatio",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio",
Description: "`good_total_ratio` is used when the ratio of `good_service` to `total_service` is computed from two `TimeSeries`.",
Conflicts: []string{
"distributionCut",
},
Properties: map[string]*dcl.Property{
"badServiceFilter": &dcl.Property{
Type: "string",
GoName: "BadServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying bad service, either demanded service that was not provided or demanded service that was of inadequate quality. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"goodServiceFilter": &dcl.Property{
Type: "string",
GoName: "GoodServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying good service provided. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"totalServiceFilter": &dcl.Property{
Type: "string",
GoName: "TotalServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying total demanded service. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
},
},
},
},
"windowsBased": &dcl.Property{
Type: "object",
GoName: "WindowsBased",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBased",
Description: "Windows-based SLIs",
Conflicts: []string{
"basicSli",
"requestBased",
},
Properties: map[string]*dcl.Property{
"goodBadMetricFilter": &dcl.Property{
Type: "string",
GoName: "GoodBadMetricFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if any `true` values appear in the window.",
Conflicts: []string{
"goodTotalRatioThreshold",
"metricMeanInRange",
"metricSumInRange",
},
},
"goodTotalRatioThreshold": &dcl.Property{
Type: "object",
GoName: "GoodTotalRatioThreshold",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold",
Description: "A window is good if its `performance` is high enough.",
Conflicts: []string{
"goodBadMetricFilter",
"metricMeanInRange",
"metricSumInRange",
},
Properties: map[string]*dcl.Property{
"basicSliPerformance": &dcl.Property{
Type: "object",
GoName: "BasicSliPerformance",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance",
Description: "`BasicSli` to evaluate to judge window quality.",
Conflicts: []string{
"performance",
},
Properties: map[string]*dcl.Property{
"availability": &dcl.Property{
Type: "object",
GoName: "Availability",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability",
Description: "Good service is defined to be the count of requests made to this service that return successfully.",
Conflicts: []string{
"latency",
"operationAvailability",
"operationLatency",
},
Properties: map[string]*dcl.Property{},
},
"latency": &dcl.Property{
Type: "object",
GoName: "Latency",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency",
Description: "Good service is defined to be the count of requests made to this service that are fast enough with respect to `latency.threshold`.",
Conflicts: []string{
"availability",
"operationAvailability",
"operationLatency",
},
Properties: map[string]*dcl.Property{
"experience": &dcl.Property{
Type: "string",
GoName: "Experience",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum",
Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING",
Enum: []string{
"LATENCY_EXPERIENCE_UNSPECIFIED",
"DELIGHTING",
"SATISFYING",
"ANNOYING",
},
},
"threshold": &dcl.Property{
Type: "string",
GoName: "Threshold",
Description: "Good service is defined to be the count of requests made to this service that return in no more than `threshold`.",
},
},
},
"location": &dcl.Property{
Type: "array",
GoName: "Location",
Description: "OPTIONAL: The set of locations to which this SLI is relevant. Telemetry from other locations will not be used to calculate performance for this SLI. If omitted, this SLI applies to all locations in which the Service has activity. For service types that don't support breaking down by location, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"method": &dcl.Property{
Type: "array",
GoName: "Method",
Description: "OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from other methods will not be used to calculate performance for this SLI. If omitted, this SLI applies to all the Service's methods. For service types that don't support breaking down by method, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"operationAvailability": &dcl.Property{
Type: "object",
GoName: "OperationAvailability",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability",
Description: "Good service is defined to be the count of operations performed by this service that return successfully",
Conflicts: []string{
"availability",
"latency",
"operationLatency",
},
Properties: map[string]*dcl.Property{},
},
"operationLatency": &dcl.Property{
Type: "object",
GoName: "OperationLatency",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency",
Description: "Good service is defined to be the count of operations performed by this service that are fast enough with respect to `operation_latency.threshold`.",
Conflicts: []string{
"availability",
"latency",
"operationAvailability",
},
Properties: map[string]*dcl.Property{
"experience": &dcl.Property{
Type: "string",
GoName: "Experience",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum",
Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING",
Enum: []string{
"LATENCY_EXPERIENCE_UNSPECIFIED",
"DELIGHTING",
"SATISFYING",
"ANNOYING",
},
},
"threshold": &dcl.Property{
Type: "string",
GoName: "Threshold",
Description: "Good service is defined to be the count of operations that are completed in no more than `threshold`.",
},
},
},
"version": &dcl.Property{
Type: "array",
GoName: "Version",
Description: "OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry from other API versions will not be used to calculate performance for this SLI. If omitted, this SLI applies to all API versions. For service types that don't support breaking down by version, setting this field will result in an error.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
},
},
"performance": &dcl.Property{
Type: "object",
GoName: "Performance",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance",
Description: "`RequestBasedSli` to evaluate to judge window quality.",
Conflicts: []string{
"basicSliPerformance",
},
Properties: map[string]*dcl.Property{
"distributionCut": &dcl.Property{
Type: "object",
GoName: "DistributionCut",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut",
Description: "`distribution_cut` is used when `good_service` is a count of values aggregated in a `Distribution` that fall into a good range. The `total_service` is the total count of all values aggregated in the `Distribution`.",
Conflicts: []string{
"goodTotalRatio",
},
Properties: map[string]*dcl.Property{
"distributionFilter": &dcl.Property{
Type: "string",
GoName: "DistributionFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` aggregating values. Must have `ValueType = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"range": &dcl.Property{
Type: "object",
GoName: "Range",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange",
Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.",
Properties: map[string]*dcl.Property{
"max": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Max",
Description: "Range maximum.",
},
"min": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Min",
Description: "Range minimum.",
},
},
},
},
},
"goodTotalRatio": &dcl.Property{
Type: "object",
GoName: "GoodTotalRatio",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio",
Description: "`good_total_ratio` is used when the ratio of `good_service` to `total_service` is computed from two `TimeSeries`.",
Conflicts: []string{
"distributionCut",
},
Properties: map[string]*dcl.Property{
"badServiceFilter": &dcl.Property{
Type: "string",
GoName: "BadServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying bad service, either demanded service that was not provided or demanded service that was of inadequate quality. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"goodServiceFilter": &dcl.Property{
Type: "string",
GoName: "GoodServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying good service provided. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
"totalServiceFilter": &dcl.Property{
Type: "string",
GoName: "TotalServiceFilter",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying total demanded service. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.",
},
},
},
},
},
"threshold": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Threshold",
Description: "If window `performance >= threshold`, the window is counted as good.",
},
},
},
"metricMeanInRange": &dcl.Property{
Type: "object",
GoName: "MetricMeanInRange",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange",
Description: "A window is good if the metric's value is in a good range, averaged across returned streams.",
Conflicts: []string{
"goodBadMetricFilter",
"goodTotalRatioThreshold",
"metricSumInRange",
},
Properties: map[string]*dcl.Property{
"range": &dcl.Property{
Type: "object",
GoName: "Range",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange",
Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.",
Properties: map[string]*dcl.Property{
"max": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Max",
Description: "Range maximum.",
},
"min": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Min",
Description: "Range minimum.",
},
},
},
"timeSeries": &dcl.Property{
Type: "string",
GoName: "TimeSeries",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying the `TimeSeries` to use for evaluating window quality.",
},
},
},
"metricSumInRange": &dcl.Property{
Type: "object",
GoName: "MetricSumInRange",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange",
Description: "A window is good if the metric's value is in a good range, summed across returned streams.",
Conflicts: []string{
"goodBadMetricFilter",
"goodTotalRatioThreshold",
"metricMeanInRange",
},
Properties: map[string]*dcl.Property{
"range": &dcl.Property{
Type: "object",
GoName: "Range",
GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange",
Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.",
Properties: map[string]*dcl.Property{
"max": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Max",
Description: "Range maximum.",
},
"min": &dcl.Property{
Type: "number",
Format: "double",
GoName: "Min",
Description: "Range minimum.",
},
},
},
"timeSeries": &dcl.Property{
Type: "string",
GoName: "TimeSeries",
Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying the `TimeSeries` to use for evaluating window quality.",
},
},
},
"windowPeriod": &dcl.Property{
Type: "string",
GoName: "WindowPeriod",
Description: "Duration over which window quality is evaluated. Must be an integer fraction of a day and at least `60s`.",
},
},
},
},
},
"serviceManagementOwned": &dcl.Property{
Type: "boolean",
GoName: "ServiceManagementOwned",
ReadOnly: true,
Description: "Output only. If set, this SLO is managed at the [Service Management](https://cloud.google.com/service-management/overview) level. Therefore the service yaml file is the source of truth for this SLO, and API `Update` and `Delete` operations are forbidden.",
Immutable: true,
},
"userLabels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "UserLabels",
Description: "Labels which have been used to annotate the service-level objective. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.",
},
},
},
},
},
},
}
}
|
package services
import (
"github.com/martinyonathann/bookstore_items-api/domain/items"
"github.com/martinyonathann/bookstore_items-api/utils/errors"
)
var (
ItemsService itemsServiceInterface = &itemsService{}
)
type itemsService struct {
}
type itemsServiceInterface interface {
GetItemByID(int64) (*items.Item, *errors.RestErr)
GetAll(string) (items.Items, *errors.RestErr)
CreateBook(items.Item) (*items.Item, *errors.RestErr)
}
func (item *itemsService) GetAll(flagActive string) (items.Items, *errors.RestErr) {
dao := &items.Item{}
return dao.GetAllBooks(flagActive)
}
func (item *itemsService) GetItemByID(itemsID int64) (*items.Item, *errors.RestErr) {
result := &items.Item{ID: itemsID}
if err := result.Get(); err != nil {
return nil, err
}
return result, nil
}
func (item *itemsService) CreateBook(book items.Item) (*items.Item, *errors.RestErr) {
book.FlagActive = "1"
if err := book.CreateBook(); err != nil {
return nil, err
}
return &book, nil
}
|
/*
I can't believe we don't have this already.. It's one of the most important data-structures in programming, yet still simple enough to implement it in a code-golf:
Challenge
Your task is to implement a stack that allows pushing and popping numbers, to test your implementation and keep I/O simple we'll use the following setup:
Input will be a list of non-negative integers
Every positive integer n indicates a push(n) and every 0 indicates a pop() - discarding the top element.
Output will be the resulting stack
Rules
Input will be a list of non-negative integers in any default I/O format
you may use a negative integer to signify the end of a stream of integers
Output will be a list/matrix/.. of the resulting stack
your choice where the top element will be (at the beginning or end), the output just has to be consistent
output is flexible (eg. integers separated by new-lines would be fine), the only thing that matters is the order
you may use a negative integer to signify the bottom of the stack
You're guaranteed that there will never be a 0 when the stack is empty
Examples
[] -> []
[1] -> [1]
[1,0,2] -> [2]
[4,0,1,12] -> [12,1]
[8,3,1,2,3] -> [3,2,1,3,8]
[1,3,7,0,0,0] -> []
[13,0,13,10,1,0,1005,5,0,0,0] -> [13]
[12,3,0,101,11,1,0,0,14,0,28] -> [28,101,12]
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([]int{}, []int{})
test([]int{1}, []int{1})
test([]int{1, 0, 2}, []int{2})
test([]int{4, 0, 1, 12}, []int{12, 1})
test([]int{8, 3, 1, 2, 3}, []int{3, 2, 1, 3, 8})
test([]int{1, 3, 7, 0, 0, 0}, []int{})
test([]int{13, 0, 13, 10, 1, 0, 1005, 5, 0, 0, 0}, []int{13})
test([]int{12, 3, 0, 101, 11, 1, 0, 0, 14, 0, 28}, []int{28, 101, 12})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(a, r []int) {
p := stacky(a)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func stacky(a []int) []int {
r := []int{}
for _, v := range a {
n := len(r)
if v != 0 {
r = append(r, v)
} else if n > 0 {
r = r[:n-1]
}
}
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return r
}
|
package main
import (
"context"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"google.golang.org/grpc"
"grpc-training/blog/blogpb"
"log"
"net"
"os"
"os/signal"
)
var collection *mongo.Collection
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
const URL = "mongodb://localhost:27017"
clientOptions := options.Client().ApplyURI(URL)
client, err := mongo.NewClient(clientOptions)
if err != nil {
log.Fatalf("Error while creating MongoDB connection: %v\n", err)
}
log.Println("Connection to MongoDB")
if err := client.Connect(context.Background()); err != nil {
log.Fatalf("Error while connecting to MongoDB: %v\n", err)
}
collection = client.Database("blogdb").Collection("blog")
log.Println("Blog Service Started")
listener, err := net.Listen("tcp", "0.0.0.0:50051")
if err != nil {
log.Fatalf("Failed to listen: %v\n", err)
}
server := grpc.NewServer([]grpc.ServerOption{}...)
blogpb.RegisterBlogServiceServer(server, &BlogServer{})
go func(server *grpc.Server) {
log.Println("Starting the server")
if err := server.Serve(listener); err != nil {
log.Fatalf("Failed to serve: %v\n", err)
}
}(server)
// Wait for Control-C to exit
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, os.Interrupt)
// Block until a signal is received
<-interruptChannel
log.Println("Stopping the server")
server.Stop()
log.Println("Closing the listener")
_ = listener.Close()
log.Println("Closing MongoDB connection")
if err := client.Disconnect(context.Background()); err != nil {
log.Fatalf("Error while closing MongoDB connhection: %v\n", err)
}
log.Println("Program terminated")
}
|
package controllers
import (
"github.com/gin-gonic/gin"
"github.com/hernancabral/Library/api/models"
"github.com/hernancabral/Library/api/utils"
"github.com/hernancabral/Library/api/utils/formaterror"
"net/http"
"strconv"
"time"
)
func (server *Server) PostBook(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
var input models.BookRequest
// If there was an error parsing json body to Book body struct, request is aborted with Bad Request status code.
if err := c.BindJSON(&input); err != nil {
errList["Invalid_request"] = "Invalid Request"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
// Check if the body is valid
if len(models.Validate(&input)) > 0 {
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": models.Validate(&input),
})
return
}
newBook := models.Book{
Title: input.Title,
Author1: input.Author1,
Author2: input.Author2,
Author3: input.Author3,
Pages: input.Pages,
ISBN: input.ISBN,
Year: input.Year,
Genre: input.Genre,
Publisher: input.Publisher,
CreatedAt: time.Time{},
UpdatedAt: time.Time{},
}
updatedBook, err := models.SaveBook(server.DB, &newBook)
if err != nil {
errList := formaterror.FormatError(err.Error())
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusCreated,
"response": updatedBook,
})
}
func (server *Server) GetBooks(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
books, err := models.FindAllBooks(server.DB)
if err != nil {
errList["No_books"] = "No books Found"
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"response": books,
})
}
func (server *Server) GetBookById(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
bookID := c.Param("id")
uid, err := strconv.ParseUint(bookID, 10, 32)
if err != nil {
errList["Invalid_request"] = "Invalid Request"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
bookFound, err := models.FindBookByID(server.DB, uint32(uid))
if err != nil {
errList["No_book"] = "No Book Found"
c.JSON(http.StatusNotFound, gin.H{
"status": http.StatusNotFound,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"response": bookFound,
})
}
func (server *Server) UpdateBook(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
var input models.BookRequest
// If there was an error parsing json body to Book body struct, request is aborted with Bad Request status code.
if err := c.BindJSON(&input); err != nil {
errList["Invalid_request"] = "Invalid Request"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
// Check if the id is valid
bookID := c.Param("id")
id, err := strconv.ParseUint(bookID, 10, 32)
if bookID == "" || !utils.IsNumeric(bookID) || err != nil {
errList["invalid_id"] = "invalid_id"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
// Check if the body is valid
if len(models.Validate(&input)) > 0 {
errList["invalid_id"] = "invalid id"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
// Check if the books exists
_, err = models.FindBookByID(server.DB, uint32(id))
if err != nil {
errList["book_not_exist"] = "the book does not exist"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.NotFound,
"error": errList,
})
return
}
newBook := models.Book{
ID: uint32(id),
Title: input.Title,
Author1: input.Author1,
Author2: input.Author2,
Author3: input.Author3,
Pages: input.Pages,
ISBN: input.ISBN,
Year: input.Year,
Genre: input.Genre,
Publisher: input.Publisher,
CreatedAt: time.Time{},
UpdatedAt: time.Time{},
}
updatedBook, err := models.UpdateBook(server.DB, &newBook, uint32(id))
if err != nil {
errList := formaterror.FormatError(err.Error())
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"response": updatedBook,
})
}
func (server *Server) DeleteBook(c *gin.Context) {
// clear previous error if any
errList = map[string]string{}
bookID := c.Param("id")
uid, err := strconv.ParseUint(bookID, 10, 32)
if err != nil {
errList["Invalid_request"] = "Invalid Request"
c.JSON(http.StatusBadRequest, gin.H{
"status": http.StatusBadRequest,
"error": errList,
})
return
}
book, err := models.DeleteBook(server.DB, uint32(uid))
if err != nil {
errList["error_deleting"] = "Error deleting book"
c.JSON(http.StatusInternalServerError, gin.H{
"status": http.StatusInternalServerError,
"error": errList,
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": http.StatusOK,
"response": book,
})
}
|
package users
import (
"errors"
"net/http"
"cinemo.com/shoping-cart/internal/errorcode"
"github.com/gorilla/mux"
)
// Handlers handles users routes
func Handlers(r *mux.Router, service Service) {
r.Path("/login").Methods(http.MethodPost).HandlerFunc(LoginHandlers(service))
r.Path("/signup").Methods(http.MethodPost).HandlerFunc(SignUpHandler(service))
}
func statusAndErrorCodeForServiceError(err error) (int, string) {
if errors.As(err, &errorcode.ValidationError{}) {
return http.StatusBadRequest, errorcode.ErrorsInRequestData
} else if errors.As(err, &errorcode.DBError{}) {
return http.StatusInternalServerError, errorcode.DatabaseProcessError
}
return http.StatusInternalServerError, errorcode.InternalError
}
|
package stateStore
import (
dbComm "github.com/HNB-ECO/HNB-Blockchain/HNB/db/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/ledger/stateStore/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/logging"
"bytes"
)
type stateStore struct {
cache *StateCache
db dbComm.KVStore
}
func NewStateStore(db dbComm.KVStore) common.StateStore {
StateLog = logging.GetLogIns()
ss := &stateStore{db: db}
ss.cache = NewStateCache()
return ss
}
func (ss *stateStore) GetState(chainID string, key []byte) ([]byte, error) {
var err error
key1 := BytesCombine([]byte(chainID+"#"), key)
v := ss.cache.GetState(key1)
if v == nil {
v, err = ss.db.Get(key1)
}
StateLog.Debugf(LOGTABLE_STATE, "get state db key:%v value:%v", string(key1), string(v))
return v, err
}
func (ss *stateStore) SetState(chainID string, key []byte, state []byte) error {
key1 := BytesCombine([]byte(chainID+"#"), key)
ss.cache.SetState(key1, state)
StateLog.Debugf(LOGTABLE_STATE, "set state db key:%v value:%v", string(key1), string(state))
return ss.db.Put(key1, state)
}
func (ss *stateStore) DeleteState(chainID string, key []byte) error {
key1 := BytesCombine([]byte(chainID+"#"), key)
ss.cache.DeleteState(key1)
StateLog.Debugf(LOGTABLE_STATE, "remove state db key:%v", string(key1))
return ss.db.Delete(key1)
}
func BytesCombine(pBytes ...[]byte) []byte {
return bytes.Join(pBytes, []byte(""))
}
|
package tools
import (
"crypto/md5"
"encoding/hex"
"io/ioutil"
)
func Md5(file string) (error, string) {
data, err := ioutil.ReadFile(file)
if err != nil {
return err, ""
}
md5Ctx := md5.New()
md5Ctx.Write(data)
cipherStr := md5Ctx.Sum(nil)
return nil, hex.EncodeToString(cipherStr)
}
|
package api
import (
"github.com/gin-gonic/gin"
r "github.com/huhaophp/eblog/controllers"
"github.com/huhaophp/eblog/models"
"github.com/huhaophp/eblog/request"
"github.com/unknwon/com"
)
// ArticleIndex 标签列表
func ArticleIndex(c *gin.Context) {
title := c.Query("title")
state := com.StrTo(c.DefaultQuery("state", "-1")).MustInt()
offset := com.StrTo(c.DefaultQuery("offset", "0")).MustInt()
limit := com.StrTo(c.DefaultQuery("limit", "20")).MustInt()
where := &models.Article{Title: title, State: state}
total := models.GetArticlesTotal(where)
articles := models.GetArticles(where, limit, offset)
r.Json(c, 0, "", gin.H{
"items": articles, "total": total,
})
}
// ArticleAdd 文章添加
func ArticleAdd(c *gin.Context) {
validErr, article := request.ArticleAddRequestValid(c)
if validErr != nil {
r.Json(c, 422, validErr.Error(), gin.H{})
return
}
if addErr := models.AddArticle(&article); addErr != nil {
r.Json(c, 422, addErr.Error(), gin.H{})
} else {
r.Json(c, 0, "添加成功", gin.H{})
}
}
// ArticleEdit 文章修改
func ArticleEdit(c *gin.Context) {
validErr, article := request.ArticleAddRequestValid(c)
if validErr != nil {
r.Json(c, 422, validErr.Error(), gin.H{})
return
}
id := com.StrTo(c.Param("id")).MustInt()
if editErr := models.EditArticle(id, &article); editErr != nil {
r.Json(c, 422, editErr.Error(), gin.H{})
} else {
r.Json(c, 0, "编辑文章成功", article)
}
}
// ArticleDelete 文章删除
func ArticleDelete(c *gin.Context) {
id := com.StrTo(c.Param("id")).MustInt()
if delErr := models.DelArticle(id); delErr != nil {
r.Json(c, 422, delErr.Error(), gin.H{})
} else {
r.Json(c, 0, "删除成功", gin.H{})
}
}
|
/*
* @lc app=leetcode id=28 lang=golang
*
* [28] Implement strStr()
*/
func strStr(haystack string, needle string) int {
nl := len(needle)
hl := len(haystack)
if nl == 0 {
return 0
}
for i := 0 ; i < hl ; i++ {
if nl > hl - i {
break
}
found := true
for j := 0 ; j < nl ; j++ {
if needle[j] != haystack[i+j] {
found = false
break
}
}
if found {
return i
}
}
return -1
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-09 10:09
# @File : _236_Lowest_Common_Ancestor_of_a_Binary_Tree.go
# @Description : 找到2个节点的最近公共祖先
分治法
1.
# @Attention :
*/
package v0
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
// 从根节点开始遍历
if nil == root {
return nil
}
// 判断是否到了当前需要判断的节点
// 如果自身是root 节点,则直接返回root
if root == p || root == q {
return root
}
// 分: 获取左右节点的root 节点
left := lowestCommonAncestor(root.Left, p, q)
right := lowestCommonAncestor(root.Right, p, q)
// 如果左右都不为空,说明刚好这个节点是直属root节点
if left != nil && right != nil {
return root
}
if nil != left {
return left
}
if nil != right {
return right
}
return nil
}
|
package structs
type Group struct {
CreatedAt int `json:"createdAt"`
GroupId string `json:"groupId"`
Users []string `json:"users"`
UserCount int `json:"userCount"`
APICallId string `json:"apiCallId"`
}
type GetAllGroupsReturn struct {
Message string `json:"message"`
Count int `json:"count"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
Groups []Group `json:"groups"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type GetGroupReturn struct {
Message string `json:"message"`
CreatedAt int `json:"createdAt"`
Users []string `json:"users"`
UserCount int `json:"userCount"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type CheckGroupExistsReturn struct {
Message string `json:"message"`
Exists bool `json:"exists"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type CreateGroupReturn struct {
Message string `json:"message"`
Description string `json:"description"`
GroupId string `json:"groupId"`
Status int `json:"status"`
CreatedAt int `json:"createdAt"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type AddUserToGroupReturn struct {
Message string `json:"message"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type RemoveUserFromGroupReturn struct {
Message string `json:"message"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
type DeleteGroupReturn struct {
Message string `json:"message"`
Status int `json:"status"`
TimeTaken string `json:"timeTaken"`
ResponseCode string `json:"responseCode"`
APICallId string `json:"apiCallId"`
}
|
package distance
import (
"testing"
"github.com/stretchr/testify/require"
)
var input = []struct {
point int
dist float64
}{
{1, 0},
{12, 3},
{23, 2},
{1024, 31},
{312051, 430},
}
func TestDistance(t *testing.T) {
assert := require.New(t)
for _, in := range input {
assert.Equal(in.dist, Distance(in.point))
}
}
func TestCeil(t *testing.T) {
assert := require.New(t)
assert.Equal(312453, Ceil(312051))
}
|
package consensus
import (
"fmt"
"github.com/hashicorp/raft"
)
// keep a map of rafts for later
var rafts map[raft.ServerAddress]*raft.Raft
func init() {
rafts = make(map[raft.ServerAddress]*raft.Raft)
}
// raftSet stores all the setup material we need
type raftSet struct {
Config *raft.Config
Store *raft.InmemStore
SnapShotStore raft.SnapshotStore
FSM *FSM
Transport raft.LoopbackTransport
Configuration raft.Configuration
}
// generate n raft sets to bootstrap the raft cluster
func getRaftSet(num int) []*raftSet {
rs := make([]*raftSet, num)
servers := make([]raft.Server, num)
for i := 0; i < num; i++ {
addr := raft.ServerAddress(fmt.Sprint(i))
_, transport := raft.NewInmemTransport(addr)
servers[i] = raft.Server{
Suffrage: raft.Voter,
ID: raft.ServerID(addr),
Address: addr,
}
config := raft.DefaultConfig()
config.LocalID = raft.ServerID(addr)
rs[i] = &raftSet{
Config: config,
Store: raft.NewInmemStore(),
SnapShotStore: raft.NewInmemSnapshotStore(),
FSM: NewFSM(),
Transport: transport,
}
}
// configuration needs to be consistent between
// services and so we need the full serverlist in this
// case
for _, r := range rs {
r.Configuration = raft.Configuration{Servers: servers}
}
return rs
}
|
package NFA
type DFARule struct {
State int32
Character int32
NextState int32
}
func (d DFARule) AppliesTo(state, character int32) bool {
return d.State == state && d.Character == character
}
func (d DFARule) Follow() int32 {
return d.NextState
}
type DFARulebook struct {
Rules []DFARule
}
func (d *DFARulebook) AddRule(startState, switchCharacter, nextState int32) {
d.Rules = append(d.Rules, DFARule{
State: startState,
Character: switchCharacter,
NextState: nextState,
})
}
func (d DFARulebook) NextStates(states []int32, character int32) (followRules map[DFARule]struct{}) {
followRules = make(map[DFARule]struct{})
for _, v := range states {
d.followRulesFor(v, character, followRules)
}
return
}
func (d DFARulebook) followRulesFor(state, character int32, followRules map[DFARule]struct{}) {
d.rulesFor(state, character, followRules)
for rule := range followRules {
rule.Follow()
}
}
func (d DFARulebook) rulesFor(state, character int32, followRules map[DFARule]struct{}) {
for _, v := range d.Rules {
if v.AppliesTo(state, character) {
followRules[v] = struct{}{}
}
}
}
func (d *DFARulebook) followFreeMoves(nextStates []int32) []int32 {
followedRules := d.NextStates(nextStates, -1)
moreStates := make([]int32, len(followedRules))
j := 0
for i := range followedRules {
moreStates[j] = i.NextState
j++
}
if subset(moreStates, nextStates) {
return nextStates
} else {
return d.followFreeMoves(append(nextStates, moreStates...))
}
}
func subset(first, second []int32) bool {
set := make(map[int32]int32)
for _, value := range second {
set[value] += 1
}
for _, value := range first {
if count, found := set[value]; !found {
return false
} else if count < 1 {
return false
} else {
set[value] = count - 1
}
}
return true
}
|
package ShopPositions
func MaxProfit(n int, m int, c []int) int {
return 0
}
|
package linters
import (
"go/ast"
"strings"
"golang.org/x/tools/go/analysis"
)
var TodoAnalyzer = &analysis.Analyzer{
Name: "todo",
Doc: "finds todos without author",
Run: run,
}
func run(pass *analysis.Pass) (interface{}, error) {
for _, file := range pass.Files {
ast.Inspect(file, func(n ast.Node) bool {
if comment, ok := n.(*ast.Comment); ok {
if strings.HasPrefix(comment.Text, "// TODO:") || strings.HasPrefix(comment.Text, "// TODO():") {
pass.Report(analysis.Diagnostic{
Pos: comment.Pos(),
End: 0,
Category: "todo",
Message: "TODO comment has no author",
SuggestedFixes: nil,
})
}
}
return true
})
}
return nil, nil
}
|
package utils
import (
"crypto/md5"
"encoding/json"
"fmt"
"strconv"
"strings"
)
// 字符串转数字
func Atoi(i string) int {
v, err := strconv.Atoi(strings.TrimSpace(i))
if err != nil {
panic(err)
}
return v
}
// 数字转字符串
func Itoa(i int64) string {
return strconv.Itoa(int(i))
}
// 两数最大值
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// 两数最小值
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// 序列化json
func JsonMarshalOb(ob interface{}) string {
bt, err := json.Marshal(ob)
if err != nil {
panic(err)
return ""
}
return string(bt)
}
// 反序列化
func JsonUnMarshalOb(obstr string, ob interface{}) error {
err := json.Unmarshal([]byte(obstr), ob)
return err
}
// 取md5 大写
func Md5(s string) string {
return fmt.Sprintf("%X", md5.Sum([]byte(s)))
}
// 取md5 小写
func Md5Lower(s string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(s)))
}
|
// +build !windows,!plan9
package main
import (
"log"
"github.com/facebookgo/grace/gracehttp"
"github.com/labstack/echo/engine/standard"
)
func gracefulRun(std *standard.Server) {
log.Fatal(gracehttp.Serve(std.Server))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.