text stringlengths 11 4.05M |
|---|
package cli
import (
"encoding/json"
"fmt"
"github.com/kohirens/stdlib"
"io/fs"
"os"
"path/filepath"
"strings"
"text/template"
"text/template/parse"
)
const (
TmplManifest = "template.json"
)
// GenerateATemplateManifest Make a JSON file with your templates placeholders.
func GenerateATemplateManifest(tmplPath string, fec *stdlib.FileExtChecker, excludes []string) (map[string]string, error) {
if !stdlib.PathExist(tmplPath) {
return nil, fmt.Errorf(Errors.pathNotExist, tmplPath)
}
// Traverse the path recursively, filtering out files that should be excluded
templates, err := ManifestParseDir(tmplPath, fec, excludes)
if err != nil {
return nil, err
}
actions := make(map[string]string)
// Parse the file as a template
for _, tmpl := range templates {
fmt.Printf("checking %v\n", tmpl)
t, e := template.ParseFiles(tmpl)
if e != nil {
return nil, fmt.Errorf(Errors.parsingFile, tmpl, e.Error())
}
ListTemplateFields(t, actions)
}
if e := saveFile(tmplPath+PS+TmplManifest, actions); e != nil {
return nil, e
}
// extract all actions from each file.
return actions, nil
}
// ListTemplateFields list actions in Go templates. See SO answer: https://stackoverflow.com/a/40584967/419097
func ListTemplateFields(t *template.Template, res map[string]string) {
listNodeFields(t.Tree.Root, res)
}
// ManifestParseDir Recursively walk a directory parsing all files along the way as Go templates.
func ManifestParseDir(path string, fec *stdlib.FileExtChecker, excludes []string) ([]string, error) {
// Normalize the path separator in these 2 variables before comparing them.
nPath := strings.ReplaceAll(path, "/", PS)
nPath = strings.ReplaceAll(nPath, "\\", PS)
var files []string
i := 0
// Recursively walk the template directory.
err := filepath.Walk(nPath, func(fPath string, info fs.FileInfo, err error) error {
i++
//fmt.Printf("%-2d %v\n", i, fPath)
file, e1 := filterFile(fPath, nPath, info, err, fec, excludes)
if err != nil {
return e1
}
if file != "" {
files = append(files, file)
}
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
// filterFile
func filterFile(sourcePath, nPath string, info os.FileInfo, wErr error, fec *stdlib.FileExtChecker, excludes []string) (string, error) {
if wErr != nil {
return "", wErr
}
if info.IsDir() {
return "", nil
}
// skip certain .git files/directories
if strings.Contains(sourcePath, PS+gitDir+PS) {
return "", nil
}
currFile := filepath.Base(sourcePath)
// Skip files by extension.
// TODO: Add globbing is added. filepath.Glob(pattern)
if currFile == EmptyFile || currFile == TmplManifest { // Use an exclusion list, include every file by default.
return "", nil
}
// Normalize the path separator in these 2 variables before comparing them.
normSourcePath := strings.ReplaceAll(sourcePath, "/", PS)
normSourcePath = strings.ReplaceAll(normSourcePath, "\\", PS)
// Skip files that are listed in the excludes.
if excludes != nil {
fileToCheck := strings.ReplaceAll(normSourcePath, nPath, "")
fileToCheck = strings.ReplaceAll(fileToCheck, PS, "")
for _, exclude := range excludes {
fileToCheckB := strings.ReplaceAll(exclude, "\\", "")
fileToCheckB = strings.ReplaceAll(exclude, "/", "")
if fileToCheckB == fileToCheck {
return "", nil
}
}
}
return sourcePath, nil
}
// ListTemplateFields list actions in Go templates. See SO answer: https://stackoverflow.com/a/40584967/419097
func listNodeFields(node parse.Node, res map[string]string) {
if node.Type() == parse.NodeAction {
res[strings.Trim(node.String(), "{}.")] = ""
}
if ln, ok := node.(*parse.ListNode); ok {
for _, n := range ln.Nodes {
listNodeFields(n, res)
}
}
}
type templateSchema struct {
Placeholders []byte
}
// save configuration file.
func saveFile(jsonFile string, actions map[string]string) error {
data, e1 := json.Marshal(actions)
if e1 != nil {
return fmt.Errorf(Errors.encodingJson, jsonFile, e1.Error())
}
tmpl := template.Must(template.New(tmplJsonTmpl).Parse(tmplJsonTmpl))
f, e2 := os.Create(jsonFile)
if e2 != nil {
return e2
}
// Write the template.json manifest to disk.
if e := tmpl.Execute(f, templateSchema{Placeholders: data}); e != nil {
return fmt.Errorf(Errors.savingManifest, jsonFile, e.Error())
}
return nil
}
|
package clusters
import (
envoy_cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/kumahq/kuma/pkg/xds/envoy/endpoints/v3"
)
type DnsClusterConfigurer struct {
Name string
Address string
Port uint32
}
var _ ClusterConfigurer = &DnsClusterConfigurer{}
func (e *DnsClusterConfigurer) Configure(c *envoy_cluster.Cluster) error {
c.Name = e.Name
c.ClusterDiscoveryType = &envoy_cluster.Cluster_Type{Type: envoy_cluster.Cluster_STRICT_DNS}
c.LbPolicy = envoy_cluster.Cluster_ROUND_ROBIN
c.LoadAssignment = endpoints.CreateStaticEndpoint(e.Name, e.Address, e.Port)
return nil
}
|
//nolint:gocritic
package lgtm
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"text/template"
"time"
"github.com/sirupsen/logrus"
"github.com/ti-community-infra/tichi/internal/pkg/ownersclient"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const (
// PluginName will register into prow.
PluginName = "ti-community-lgtm"
// ReviewNotificationName defines the name used in the title for the review notifications.
ReviewNotificationName = "Review Notification"
// ReviewNotificationIdentifier defines the identifier for the review notifications.
ReviewNotificationIdentifier = "Review Notification Identifier"
)
var (
// notificationRegex is the regex that matches the notifications.
notificationRegex = regexp.MustCompile("<!--" + ReviewNotificationIdentifier + "-->$")
// reviewersRegex is the regex that matches the reviewers, such as: - hi-rustin.
reviewersRegex = regexp.MustCompile(`(?i)- [@]*([a-z0-9](?:-?[a-z0-9]){0,38})`)
)
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(_ *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityLgtm: []tiexternalplugins.TiCommunityLgtm{
{
Repos: []string{"ti-community-infra/test-dev"},
PullOwnersEndpoint: "https://prow-dev.tidb.io/ti-community-owners",
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The ti-community-lgtm plugin manages the 'status/LGT{number}' (Looks Good To Me) label.",
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.PullRequestReviewEvent, tiexternalplugins.PullRequestEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "Triggered by GitHub review action: 'Approve' or 'Request Changes'.",
Description: "Add or remove the 'status/LGT{number}' label.",
Featured: true,
WhoCanUse: "Reviewers of this pull request.",
Examples: []string{
"<a href=\"https://help.github.com/articles/about-pull-request-reviews/\">'Approve' or 'Request Changes'</a>"},
})
return pluginHelp, nil
}
}
type githubClient interface {
AddLabel(owner, repo string, number int, label string) error
RemoveLabel(owner, repo string, number int, label string) error
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
CreateComment(owner, repo string, number int, comment string) error
EditComment(org, repo string, id int, comment string) error
ListIssueComments(org, repo string, number int) ([]github.IssueComment, error)
DeleteComment(org, repo string, ID int) error
BotUserChecker() (func(candidate string) bool, error)
}
// reviewCtx contains information about each review event.
type reviewCtx struct {
author, issueAuthor, body, htmlURL string
repo github.Repo
number int
}
func HandlePullReviewEvent(gc githubClient, pullReviewEvent *github.ReviewEvent,
cfg *tiexternalplugins.Configuration, ol ownersclient.OwnersLoader, log *logrus.Entry) error {
rc := reviewCtx{
author: pullReviewEvent.Review.User.Login,
issueAuthor: pullReviewEvent.PullRequest.User.Login,
repo: pullReviewEvent.Repo,
number: pullReviewEvent.PullRequest.Number,
body: pullReviewEvent.Review.Body,
htmlURL: pullReviewEvent.Review.HTMLURL,
}
// Only react to reviews that are being submitted (not edited or dismissed).
if pullReviewEvent.Action != github.ReviewActionSubmitted {
return nil
}
// The review webhook returns state as lowercase, while the review API
// returns state as uppercase. Uppercase the value here so it always
// matches the constant.
reviewState := github.ReviewState(strings.ToUpper(string(pullReviewEvent.Review.State)))
// If we review with Approve, add lgtm if necessary.
// If we review with Request Changes, remove lgtm if necessary.
wantLGTM := false
if reviewState == github.ReviewStateApproved {
wantLGTM = true
} else if reviewState == github.ReviewStateChangesRequested {
wantLGTM = false
} else {
return nil
}
// Use common handler to do the rest.
return handle(wantLGTM, cfg, rc, gc, ol, log)
}
func HandlePullRequestEvent(gc githubClient, pe *github.PullRequestEvent,
config *tiexternalplugins.Configuration, log *logrus.Entry) error {
if pe.Action != github.PullRequestActionOpened {
log.Debug("Not a pull request opened action, skipping...")
return nil
}
org := pe.PullRequest.Base.Repo.Owner.Login
repo := pe.PullRequest.Base.Repo.Name
number := pe.PullRequest.Number
tichiURL := fmt.Sprintf(ownersclient.OwnersURLFmt, config.TichiWebURL, org, repo, number)
reviewMsg, err := getMessage(nil, config.CommandHelpLink, config.PRProcessLink, tichiURL, org, repo)
if err != nil {
return err
}
return gc.CreateComment(org, repo, number, *reviewMsg)
}
func handle(wantLGTM bool, config *tiexternalplugins.Configuration, rc reviewCtx,
gc githubClient, ol ownersclient.OwnersLoader, log *logrus.Entry) error {
funcStart := time.Now()
defer func() {
log.WithField("duration", time.Since(funcStart).String()).Debug("Completed handle")
}()
author := rc.author
number := rc.number
body := rc.body
htmlURL := rc.htmlURL
org := rc.repo.Owner.Login
repo := rc.repo.Name
fetchErr := func(context string, err error) error {
return fmt.Errorf("failed to get %s for %s/%s#%d: %v", context, org, repo, number, err)
}
// Get ti-community-lgtm config.
opts := config.LgtmFor(rc.repo.Owner.Login, rc.repo.Name)
tichiURL := fmt.Sprintf(ownersclient.OwnersURLFmt, config.TichiWebURL, org, repo, number)
reviewersAndNeedsLGTM, err := ol.LoadOwners(opts.PullOwnersEndpoint, org, repo, number)
if err != nil {
return fetchErr("owners info", err)
}
reviewers := sets.String{}
for _, reviewer := range reviewersAndNeedsLGTM.Reviewers {
reviewers.Insert(reviewer)
}
// Not reviewers but want to add LGTM.
if !reviewers.Has(author) && wantLGTM {
resp := "Thanks for your review. "
resp += "The bot only counts approvals from reviewers and higher roles in [list](" + tichiURL + "), "
resp += "but you're still welcome to leave your comments."
log.Infof("Reply approve pull request in comment: \"%s\"", resp)
return gc.CreateComment(org, repo, number, tiexternalplugins.FormatResponseRaw(body, htmlURL, author, resp))
}
// Not reviewers but want to remove LGTM.
if !reviewers.Has(author) && !wantLGTM {
resp := "Request changes is only allowed for the reviewers in [list](" + tichiURL + ")."
log.Infof("Reply request changes pull request in comment: \"%s\"", resp)
return gc.CreateComment(org, repo, number, tiexternalplugins.FormatResponseRaw(body, htmlURL, author, resp))
}
labels, err := gc.GetIssueLabels(org, repo, number)
if err != nil {
return fetchErr("issue labels", err)
}
botUserChecker, err := gc.BotUserChecker()
if err != nil {
return fetchErr("bot name", err)
}
issueComments, err := gc.ListIssueComments(org, repo, number)
if err != nil {
return fetchErr("issue comments", err)
}
notifications := filterComments(issueComments, notificationMatcher(botUserChecker))
latestNotification := getLastComment(notifications)
cleanupRedundantNotifications := func() {
if len(notifications) != 0 {
for _, notification := range notifications[:len(notifications)-1] {
notif := notification
if err := gc.DeleteComment(org, repo, notif.ID); err != nil {
log.WithError(err).Errorf("Failed to delete comment from %s/%s#%d, ID: %d.", org, repo, number, notif.ID)
}
}
}
}
// Now we update the LGTM labels, having checked all cases where changing.
// Only add the label if it doesn't have it, and vice versa.
currentLabel, nextLabel := getCurrentAndNextLabel(tiexternalplugins.LgtmLabelPrefix, labels,
reviewersAndNeedsLGTM.NeedsLgtm)
// Remove the label if necessary, we're done after this.
if currentLabel != "" && !wantLGTM {
newMsg, err := getMessage(nil, config.CommandHelpLink, config.PRProcessLink, tichiURL, org, repo)
if err != nil {
return err
}
// Create or update the review notification comment.
if latestNotification == nil {
err := gc.CreateComment(org, repo, number, *newMsg)
if err != nil {
return err
}
} else {
err := gc.EditComment(org, repo, latestNotification.ID, *newMsg)
if err != nil {
return err
}
}
log.Info("Removing LGTM label.")
if err := gc.RemoveLabel(org, repo, number, currentLabel); err != nil {
return err
}
// Clean up redundant notifications after we added the new notification.
cleanupRedundantNotifications()
} else if nextLabel != "" && wantLGTM {
reviewedReviewers := getReviewersFromNotification(latestNotification)
// Ignore already reviewed reviewer.
if reviewedReviewers.Has(author) {
log.Infof("Ignore %s's multiple reviews.", author)
return nil
}
// Add author as reviewers and create new notification.
reviewedReviewers.Insert(author)
newMsg, err := getMessage(reviewedReviewers.List(), config.CommandHelpLink, config.PRProcessLink, tichiURL, org, repo)
if err != nil {
return err
}
// Create or update the review notification comment.
if latestNotification == nil {
err := gc.CreateComment(org, repo, number, *newMsg)
if err != nil {
return err
}
} else {
err := gc.EditComment(org, repo, latestNotification.ID, *newMsg)
if err != nil {
return err
}
}
log.Info("Adding LGTM label.")
// Remove current label.
if currentLabel != "" {
if err := gc.RemoveLabel(org, repo, number, currentLabel); err != nil {
return err
}
}
if err := gc.AddLabel(org, repo, number, nextLabel); err != nil {
return err
}
// Clean up redundant notifications after we added the new notification.
cleanupRedundantNotifications()
}
return nil
}
// getCurrentAndNextLabel returns pull request current label and next required label.
func getCurrentAndNextLabel(prefix string, labels []github.Label, needsLgtm int) (string, string) {
currentLabel := ""
nextLabel := ""
for _, label := range labels {
if strings.Contains(label.Name, prefix) {
currentLabel = label.Name
currentLgtmNumber, _ := strconv.Atoi(strings.Trim(label.Name, prefix))
if currentLgtmNumber < needsLgtm {
nextLabel = fmt.Sprintf("%s%d", prefix, currentLgtmNumber+1)
}
}
}
if currentLabel == "" {
nextLabel = fmt.Sprintf("%s%d", prefix, 1)
}
return currentLabel, nextLabel
}
// getReviewersFromNotification get the reviewers from latest notification.
func getReviewersFromNotification(latestNotification *github.IssueComment) sets.String {
result := sets.String{}
if latestNotification == nil {
return result
}
reviewers := reviewersRegex.FindAllStringSubmatch(latestNotification.Body, -1)
reviewerNameIndex := 1
for _, reviewer := range reviewers {
// Example: - a => [[- a a]]
if len(reviewer) == reviewerNameIndex+1 {
result.Insert(reviewer[reviewerNameIndex])
}
}
return result
}
// filterComments will filtering the issue comments by filter.
func filterComments(comments []github.IssueComment,
filter func(comment *github.IssueComment) bool) []*github.IssueComment {
filtered := make([]*github.IssueComment, 0, len(comments))
for _, comment := range comments {
c := comment
if filter(&c) {
filtered = append(filtered, &c)
}
}
return filtered
}
// getLastComment get the last issue comment.
func getLastComment(issueComments []*github.IssueComment) *github.IssueComment {
if len(issueComments) == 0 {
return nil
}
return issueComments[len(issueComments)-1]
}
// getMessage returns the comment body that we want the approve plugin to display on PRs
// The comment shows:
// - a list of reviewed reviewers
// - how an approver can indicate their lgtm
// - how an approver can cancel their lgtm
func getMessage(reviewedReviewers []string, commandHelpLink,
prProcessLink, ownersLink, org, repo string) (*string, error) {
// nolint:lll
message, err := generateTemplate(`
{{if .reviewers}}
This pull request has been approved by:
{{range $index, $reviewer := .reviewers}}- {{$reviewer}}`+"\n"+`{{end}}
{{else}}
This pull request has not been approved.
{{end}}
To complete the [pull request process]({{ .prProcessLink }}), please ask the reviewers in the [list]({{ .ownersLink }}) to review by filling `+"`/cc @reviewer`"+` in the comment.
After your PR has acquired the required number of LGTMs, you can assign this pull request to the committer in the [list]({{ .ownersLink }}) by filling `+"`/assign @committer`"+` in the comment to help you merge this pull request.
The full list of commands accepted by this bot can be found [here]({{ .commandHelpLink }}?repo={{ .org }}%2F{{ .repo }}).
<details>
Reviewer can indicate their review by submitting an approval review.
Reviewer can cancel approval by submitting a request changes review.
</details>
<!--{{ .reviewNotificationIdentifier }}-->
`, "message", map[string]interface{}{
"reviewers": reviewedReviewers,
"commandHelpLink": commandHelpLink,
"prProcessLink": prProcessLink,
"ownersLink": ownersLink,
"org": org,
"repo": repo,
"reviewNotificationIdentifier": ReviewNotificationIdentifier,
})
if err != nil {
return nil, err
}
return notification(ReviewNotificationName, "", message), nil
}
// generateTemplate takes a template, name and data, and generates
// the corresponding string.
func generateTemplate(templ, name string, data interface{}) (string, error) {
buf := bytes.NewBufferString("")
if messageTemplate, err := template.New(name).Parse(templ); err != nil {
return "", fmt.Errorf("failed to parse template for %s: %v", name, err)
} else if err := messageTemplate.Execute(buf, data); err != nil {
return "", fmt.Errorf("failed to execute template for %s: %v", name, err)
}
return buf.String(), nil
}
// notification create a notification message.
func notification(name, arguments, context string) *string {
str := "[" + strings.ToUpper(name) + "]"
args := strings.TrimSpace(arguments)
if args != "" {
str += " " + args
}
ctx := strings.TrimSpace(context)
if ctx != "" {
str += "\n\n" + ctx
}
return &str
}
// notificationMatcher matches issue comments for notifications.
func notificationMatcher(isBot func(string) bool) func(comment *github.IssueComment) bool {
return func(c *github.IssueComment) bool {
// Only match robot's comment.
if !isBot(c.User.Login) {
return false
}
match := notificationRegex.FindStringSubmatch(c.Body)
return len(match) > 0
}
}
|
package main
import (
"net/http"
"time"
"github.com/PhongVX/taskmanagement/internal/app/api"
"github.com/PhongVX/taskmanagement/internal/pkg/log"
)
func main() {
log.Infof("Initializing HTTP routing...")
r, err := api.NewRouter()
if err != nil {
log.Panicf("Failed to init routing, error %v", err)
}
log.Infof("Creating HTTP Server...")
srv := &http.Server{
Addr: "0.0.0.0:8585",
// Good practice to set timeouts to avoid Slowloris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: r, // Pass our instance of gorilla/mux in.
}
log.Infof("Server is listening at port %s", ":8585")
if err := srv.ListenAndServe(); err != nil {
log.Panicf("Failed to init server, error %v", err)
}
}
|
package httphandlers
import (
"encoding/json"
"fmt"
"net/http"
)
type User struct {
Name string
}
type UserService interface {
Register(user User) (id string, err error)
}
type UserServer struct {
service UserService
}
func NewUserServer(s UserService) *UserServer {
return &UserServer{service: s}
}
func (s *UserServer) RegisterUser(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var newUser User
err := json.NewDecoder(r.Body).Decode(&newUser)
if err != nil {
http.Error(w, fmt.Sprintf("could not decode user payload, %v", err), http.StatusBadRequest)
return
}
userID, err := s.service.Register(newUser)
if err != nil {
http.Error(w, fmt.Sprintf("unable to register user: %v", err), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, userID)
}
|
package sdkconnector
import (
"fmt"
"github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/retry"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
)
//JoinChannel joins given organization's peers to channel
func JoinChannel(setup *OrgSetup, channelName string) error {
resourceManagerClientContext := setup.sdk.Context(fabsdk.WithUser(setup.AdminName), fabsdk.WithOrg(setup.OrgName))
resMgmtClient, err := resmgmt.New(resourceManagerClientContext)
if err != nil {
return err
}
err = resMgmtClient.JoinChannel(channelName, resmgmt.WithRetry(retry.DefaultResMgmtOpts), resmgmt.WithOrdererEndpoint("orderer.example.com"))
if err != nil {
fmt.Println("err", err)
}
return nil
}
|
package main
import "fmt"
type Person struct {
name string
sex byte
age int
}
type Student struct {
Person
id int
addr string
}
func (tmp *Person) PrintlnInfo() {
fmt.Printf("name=%s,sex=%c,age=%d\n", tmp.name, tmp.sex, tmp.age)
}
func (tmp *Student) PrintlnInfo() {
fmt.Println("Student: tmp = ", tmp)
fmt.Printf("Pointer: %p , %v \n", tmp, tmp)
}
func main() {
s := Student{Person{"Tom", 'm', 22}, 66, "bj"}
//就近原则 ,先找本作用域的方法,找不到再用继承的方法
s.PrintlnInfo()
//显示调用继承的方法
s.Person.PrintlnInfo()
sFunc := s.PrintlnInfo //这个就是方法值,调动函数时,无需再传递接受者,隐藏了接受者
sFunc()
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"fmt"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
"github.com/pkg/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/utils"
)
// GetClusterLabelSelectorInTopology get cluster label selector in topology policy spec
func GetClusterLabelSelectorInTopology(topology *v1alpha1.TopologyPolicySpec) map[string]string {
if topology.ClusterLabelSelector != nil {
return topology.ClusterLabelSelector
}
if utilfeature.DefaultMutableFeatureGate.Enabled(features.DeprecatedPolicySpec) {
return topology.DeprecatedClusterSelector
}
return nil
}
// GetPlacementsFromTopologyPolicies get placements from topology policies with provided client
func GetPlacementsFromTopologyPolicies(ctx context.Context, cli client.Client, appNs string, policies []v1beta1.AppPolicy, allowCrossNamespace bool) ([]v1alpha1.PlacementDecision, error) {
placements := make([]v1alpha1.PlacementDecision, 0)
placementMap := map[string]struct{}{}
addCluster := func(cluster string, ns string, validateCluster bool) error {
if validateCluster {
if _, e := multicluster.NewClusterClient(cli).Get(ctx, cluster); e != nil {
return errors.Wrapf(e, "failed to get cluster %s", cluster)
}
}
if !allowCrossNamespace && (ns != appNs && ns != "") {
return errors.Errorf("cannot cross namespace")
}
placement := v1alpha1.PlacementDecision{Cluster: cluster, Namespace: ns}
name := placement.String()
if _, found := placementMap[name]; !found {
placementMap[name] = struct{}{}
placements = append(placements, placement)
}
return nil
}
hasTopologyPolicy := false
for _, policy := range policies {
if policy.Type == v1alpha1.TopologyPolicyType {
if policy.Properties == nil {
return nil, fmt.Errorf("topology policy %s must not have empty properties", policy.Name)
}
hasTopologyPolicy = true
topologySpec := &v1alpha1.TopologyPolicySpec{}
if err := utils.StrictUnmarshal(policy.Properties.Raw, topologySpec); err != nil {
return nil, errors.Wrapf(err, "failed to parse topology policy %s", policy.Name)
}
clusterLabelSelector := GetClusterLabelSelectorInTopology(topologySpec)
switch {
case topologySpec.Clusters != nil:
for _, cluster := range topologySpec.Clusters {
if err := addCluster(cluster, topologySpec.Namespace, true); err != nil {
return nil, err
}
}
case clusterLabelSelector != nil:
clusterList, err := multicluster.NewClusterClient(cli).List(ctx, client.MatchingLabels(clusterLabelSelector))
if err != nil {
return nil, errors.Wrapf(err, "failed to find clusters in topology %s", policy.Name)
}
if len(clusterList.Items) == 0 && !topologySpec.AllowEmpty {
return nil, errors.New("failed to find any cluster matches given labels")
}
for _, cluster := range clusterList.Items {
if err = addCluster(cluster.Name, topologySpec.Namespace, false); err != nil {
return nil, err
}
}
default:
if err := addCluster(pkgmulticluster.Local, topologySpec.Namespace, false); err != nil {
return nil, err
}
}
}
}
if !hasTopologyPolicy {
placements = []v1alpha1.PlacementDecision{{Cluster: multicluster.ClusterLocalName}}
}
return placements, nil
}
|
package controllers
import (
"devbook-api/src/authentication"
"devbook-api/src/database"
"devbook-api/src/models"
"devbook-api/src/repositories"
"devbook-api/src/responses"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
)
func CreatePost(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusUnauthorized, err)
return
}
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
responses.Error(w, http.StatusUnprocessableEntity, err)
return
}
var post models.Post
if err = json.Unmarshal(requestBody, &post); err != nil {
responses.Error(w, http.StatusBadRequest, err)
return
}
post.AuthorID = userId
if err = post.Prepare(); err != nil {
responses.Error(w, http.StatusBadRequest, err)
return
}
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
createdPost, err := repository.Create(post)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusCreated, createdPost)
}
func FindPostById(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
postId := params["postId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
findPost, err := repository.FindPostById(postId)
if err != nil {
responses.Error(w, http.StatusNotFound, err)
return
}
responses.JSON(w, http.StatusOK, findPost)
}
func GetAllPosts(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusUnauthorized, err)
return
}
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
posts, err := repository.GetPosts(userId)
if err != nil {
responses.Error(w, http.StatusNotFound, err)
return
}
responses.JSON(w, http.StatusOK, posts)
}
func UpdatePost(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusUnauthorized, err)
return
}
postId := mux.Vars(r)["postId"]
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
responses.Error(w, http.StatusUnprocessableEntity, err)
return
}
var postData models.Post
if err = json.Unmarshal(requestBody, &postData); err != nil {
responses.Error(w, http.StatusBadRequest, err)
return
}
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
findPost, err := repository.FindPostById(postId)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
if findPost.AuthorID != userId {
responses.Error(w, http.StatusUnauthorized, errors.New("unauthorized"))
return
}
updatedPost, err := repository.UpdatePost(postId, postData)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, updatedPost)
}
func DeletePost(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusUnauthorized, err)
return
}
postId := mux.Vars(r)["postId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
findPost, err := repository.FindPostById(postId)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
if findPost.AuthorID != userId {
responses.Error(w, http.StatusUnauthorized, errors.New("unauthorized"))
return
}
err = repository.DeletePost(postId)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusNoContent, nil)
}
func GetPostsByUserId(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
userId := params["userId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
userPosts, err := repository.GetPostsByUserId(userId)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, userPosts)
}
func LikePost(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
params := mux.Vars(r)
postId := params["postId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
likeError := repository.LikePost(postId, userId)
if likeError != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusNoContent, nil)
}
func UnlikePost(w http.ResponseWriter, r *http.Request) {
userId, err := authentication.ExtractUserId(r)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
params := mux.Vars(r)
postId := params["postId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
likeError := repository.UnlikePost(postId, userId)
if likeError != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusNoContent, nil)
}
func GetPostLikes(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
postId := params["postId"]
db, err := database.Connect()
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
repository := repositories.NewPostRepository(db)
likes, err := repository.GetPostLikes(postId)
if err != nil {
responses.Error(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, likes)
}
|
package controllers
import (
"admigo/common"
"admigo/model/users"
"encoding/json"
"github.com/julienschmidt/httprouter"
"net/http"
"time"
)
const (
UUID_COOKI string = "uuidcookie"
)
// POST /signup
// Create an user account
func SignupAccount(w http.ResponseWriter, request *http.Request, ps httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
res := users.UserCreate(request)
output, _ := json.MarshalIndent(res, "", "\t")
if res.Errors != nil {
w.WriteHeader(500)
}
w.Write(output)
}
// GET /confirm/*filepath
// Confirm registration to user
func ConfirmUser(w http.ResponseWriter, request *http.Request, ps httprouter.Params) {
vals := request.URL.Query()
key := vals.Get("key")
ckey := common.Encrypt(vals.Get("email"))
data := map[string]interface{}{"logged": nil, "menuitem": "none"}
if key != ckey {
data["msgs"] = []string{
"Sorry, but You cannot register with this link",
"Try register again",
}
common.GenerateHTML(w, data, "layout", "sidebar", "nav", "error")
return
}
uuid, err := users.UserForceLogin(vals.Get("email"))
if err != nil {
data["msgs"] = []string{err.Error()}
common.GenerateHTML(w, data, "layout", "sidebar", "nav", "error")
return
}
setUuidCookie(w, uuid)
http.Redirect(w, request, "/", 302)
}
// POST
// Authenticate the user given the email and password
func Login(w http.ResponseWriter, request *http.Request, ps httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
res, uuid := users.UserLogin(request)
output, _ := json.MarshalIndent(res, "", "\t")
if res.Errors != nil {
w.WriteHeader(500)
}
setUuidCookie(w, uuid)
w.Write(output)
}
// User Logout
func Logout(w http.ResponseWriter, request *http.Request, ps httprouter.Params) {
user := LoggedUser(request)
if user == nil {
return
}
err := user.DeleteSessions()
if err != nil {
WriteError("all", w, err)
return
}
removeUuidCookie(w)
}
func LoggedUser(r *http.Request) (user *users.UserModel) {
if cook, err := r.Cookie(UUID_COOKI); err == nil {
user = users.SessionUser(cook.Value)
}
return
}
func setUuidCookie(w http.ResponseWriter, uuid string) {
cookie := http.Cookie{
Name: UUID_COOKI,
Path: "/",
Value: uuid,
HttpOnly: true,
Expires: time.Now().Add(30 * 24 * time.Hour),
}
http.SetCookie(w, &cookie)
}
func removeUuidCookie(w http.ResponseWriter) {
rc := http.Cookie{
Name: UUID_COOKI,
Path: "/",
MaxAge: -1,
Expires: time.Unix(1, 0),
}
http.SetCookie(w, &rc)
}
|
package namegen
import (
"compress/gzip"
"crypto/rand"
"encoding/json"
"fmt"
"log"
"math/big"
"os"
"strings"
)
const (
legalDNSChars = "abcdefghijklmnopqrstuvwxyz-ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
// NameGenerator describes an object capable of generating new environment names
type NameGenerator interface {
New() (string, error)
}
// Wordset contains synsets for POS taken from WordNet
type Wordset struct {
Adjective []string `json:"adjective"`
Noun []string `json:"noun"`
}
// WordnetNameGenerator generates names from WordNet data
type WordnetNameGenerator struct {
ws *Wordset
logger *log.Logger
}
// NewWordnetNameGenerator loads filename (must be a gzip & JSON-encoded wordset list)
// and returns a WordnetNameGenerator
func NewWordnetNameGenerator(filename string, logger *log.Logger) (*WordnetNameGenerator, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
gz, err := gzip.NewReader(f)
if err != nil {
return nil, err
}
defer gz.Close()
wng := &WordnetNameGenerator{
ws: &Wordset{},
logger: logger,
}
d := json.NewDecoder(gz)
err = d.Decode(&wng.ws)
if err != nil {
return nil, err
}
return wng, nil
}
// filter replaces whitespace and underscores and removes any non-DNS-compliant characters
func (wng WordnetNameGenerator) filter(name string) string {
mf := func(r rune) rune {
switch {
case r == ' ' || r == '_':
return '-'
case strings.Contains(legalDNSChars, string(r)):
return r
default:
return rune(-1)
}
}
return strings.Map(mf, name)
}
// New returns a randomly-generated name of the form {adjective}-{noun}
func (wng *WordnetNameGenerator) New() (string, error) {
ai, err := RandomRange(int64(len(wng.ws.Adjective)))
if err != nil {
return "", fmt.Errorf("error getting random index for adjective: %v", err)
}
ni, err := RandomRange(int64(len(wng.ws.Noun)))
if err != nil {
return "", fmt.Errorf("error getting random index for noun: %v", err)
}
return wng.filter(fmt.Sprintf("%v-%v", wng.ws.Adjective[ai], wng.ws.Noun[ni])), nil
}
// RandomRange returns a random integer (using rand.Reader as the entropy source) between 0 and max
func RandomRange(max int64) (int64, error) {
maxBig := *big.NewInt(max)
n, err := rand.Int(rand.Reader, &maxBig)
if err != nil {
return 0, err
}
return n.Int64(), nil
}
|
package switcher
import (
//"crypto/md5"
//"crypto/rand"
//"database/sql"
"os"
"strconv"
"time"
//"encoding/base64"
//"encoding/hex"
//"encoding/json"
//"fmt"
//"io"
//"io/ioutil"
"log"
//"net/http"
//"path"
//sw "sqliteToMysql/switcher"
"strings"
//xupload "xinlanAdminTest/xinlanUpload"
//"github.com/codegangsta/negroni"
_ "github.com/go-sql-driver/mysql"
//"github.com/julienschmidt/httprouter"
_ "github.com/mattn/go-sqlite3"
)
type HotClicks struct {
Ip string
LogDate string
hot_id string
}
type HotComments struct {
Id int
Name string
Img string
Content string
LogDate string
EventId int
}
type HotEvents struct {
Id int
Title string
Status string
Content string
LogDate string
HotId int
UserId int
}
type Hots struct {
Id int
Title string
Description string
LogDate string
TopImg string
}
type UserInfo struct {
Id int
UserName string
PassWord string
Privilege string
LogAt string
}
type HotZans struct {
EventId int
UserId int
}
func Hot_zans() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v HotZans
var str string
rows, err := db.Query("select * from zans")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("hotzans.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.EventId, &v.UserId)
str += strconv.Itoa(v.EventId) + "<>" + strconv.Itoa(v.UserId) + "\n"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("hotzans.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\n")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, "<>")
stmt, err6 := tx.Prepare("insert into zans values(?,?)")
perrorWithRollBack(err6, "准备失败", tx)
_, err = stmt.Exec(s1[0], s1[1])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
func User_info() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v UserInfo
var str string
var t time.Time
rows, err := db.Query("select * from userinfo")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("userinfo.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.Id, &v.UserName, &v.PassWord, &v.Privilege, &t)
v.LogAt = t.Format("2006-01-02 15:04:05")
str += strconv.Itoa(v.Id) + "<>" + v.UserName + "<>" + v.PassWord + "<>" + v.Privilege + "<>" + v.LogAt + "\n"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("userinfo.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\n")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, "<>")
stmt, err6 := tx.Prepare("insert into userinfo values(?,?,?,?,?)")
perrorWithRollBack(err6, "准备失败", tx)
_, err = stmt.Exec(s1[0], s1[1], s1[2], s1[3], s1[4])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
func Hotsall() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v Hots
var str string
var t time.Time
rows, err := db.Query("select * from hots")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("hots.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.Id, &v.Title, &v.Description, &t, &v.TopImg)
v.LogDate = t.Format("2006-01-02 15:04:05")
str += strconv.Itoa(v.Id) + "<>" + v.Title + "<>" + v.Description + "<>" + v.LogDate + "<>" + v.TopImg + "\n"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("hots.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\n")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, "<>")
stmt, err6 := tx.Prepare("insert into hots values(?,?,?,?,?)")
perrorWithRollBack(err6, "准备失败", tx)
_, err = stmt.Exec(s1[0], s1[1], s1[2], s1[3], s1[4])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
func Hot_events() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v HotEvents
var str string
var t time.Time
rows, err := db.Query("select * from events")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("hotevents.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.Id, &v.Title, &v.Status, &v.Content, &t, &v.HotId, &v.UserId)
v.LogDate = t.Format("2006-01-02 15:04:05")
str += strconv.Itoa(v.Id) + " {} " + v.Title + " {} " + v.Status + " {} " + v.Content + " {} " + v.LogDate + " {} " + strconv.Itoa(v.HotId) + " {} " + strconv.Itoa(v.UserId) + "\\"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("hotevents.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\\")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, " {} ")
stmt, err6 := tx.Prepare("insert into events values(?,?,?,?,?,?,?)")
if err6 != nil {
log.Println(err)
}
perrorWithRollBack(err6, "准备失败", tx)
log.Println(s1[3])
_, err = stmt.Exec(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5], s1[6])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
func Hot_comments() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v HotComments
var str string
var t time.Time
rows, err := db.Query("select * from comments")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("hotcomments.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.Id, &v.Name, &v.Img, &v.Content, &t, &v.EventId)
v.LogDate = t.Format("2006-01-02 15:04:05")
str += strconv.Itoa(v.Id) + "</>" + v.Name + "</>" + v.Img + "</>" + v.Content + "</>" + v.LogDate + "</>" + strconv.Itoa(v.EventId) + "\n"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("hotcomments.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\n")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, "</>")
stmt, err6 := tx.Prepare("insert into comments values(?,?,?,?,?,?)")
perrorWithRollBack(err6, "准备失败", tx)
_, err = stmt.Exec(s1[0], s1[1], s1[2], s1[3], s1[4], s1[5])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
func Hot_clicks() {
//读取sqlite数据,写入到txt
db := ConnectDB("./middle_bak.db")
defer func() {
db.Close()
err := recover()
if err != nil {
log.Println(err)
}
}()
var v HotClicks
var str string
var t time.Time
rows, err := db.Query("select * from clicks")
if err != nil {
log.Println(err)
}
f, err2 := os.Create("hotclicks.txt")
if err2 != nil {
panic(err2)
}
defer f.Close()
for rows.Next() {
rows.Scan(&v.Ip, &t, &v.hot_id)
v.LogDate = t.Format("2006-01-02 15:04:05")
str += v.Ip + "<>" + v.LogDate + "<>" + v.hot_id + "\n"
}
f.WriteString(str)
//读取txt数据,写入到mysql中
db = ConnectMySql()
defer func() {
db.Close()
err5 := recover()
if err5 != nil {
log.Println(err5)
}
}()
f2, err3 := os.Open("hotclicks.txt")
if err3 != nil {
panic(err3)
}
defer f2.Close()
stat, err4 := f2.Stat()
if err4 != nil {
log.Println(err4)
}
size := stat.Size()
a := make([]byte, size)
f2.Read(a)
//log.Println(string(a))
arr := strings.Split(string(a), "\n")
tx, err5 := db.Begin()
perrorWithRollBack(err5, "插入失败", tx)
for _, v := range arr {
if v != "" {
s1 := strings.Split(v, "<>")
stmt, err6 := tx.Prepare("insert into clicks values(?,?,?)")
perrorWithRollBack(err6, "准备失败", tx)
_, err = stmt.Exec(s1[0], s1[1], s1[2])
if err != nil {
log.Println(err)
}
}
}
tx.Commit()
}
/*func ConnectDB(dbPath string) *sql.DB {
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
panic(err)
}
return db
}
func ConnectMySql() *sql.DB {
dbinfo := "root" + ":" + "123" + "@/" + "xinlanAdmin" + "?charset=utf8"
db, err := sql.Open("mysql", dbinfo)
if err != nil {
panic(err)
}
return db
}
func perrorWithRollBack(e error, errMsg string, tx *sql.Tx) {
if e != nil {
tx.Rollback()
log.Println(e)
panic(errMsg)
}
}*/
|
package iset
const (
NOP byte = 0x00
// Loads value from DS address into register
// EX: if DS[0x01] is int32 and op used is int8 the value is casted
LOAD byte = iota // load #1, 0x01
FREE byte = iota // load #1
// String to numbers
STRI8 byte = iota // stri8 #1, #2
STRI16 byte = iota
STRI32 byte = iota
STRI64 byte = iota
STRUI8 byte = iota
STRUI16 byte = iota
STRUI32 byte = iota
STRUI64 byte = iota
STRF32 byte = iota
STRF64 byte = iota
// Number casting (Only cast numeric registers)
TOI8 byte = iota // toi8 #1, #2
TOI16 byte = iota
TOI32 byte = iota
TOI64 byte = iota
TOUI8 byte = iota
TOUI16 byte = iota
TOUI32 byte = iota
TOUI64 byte = iota
TOF32 byte = iota
TOF64 byte = iota
// Flow Control
JMP byte = iota // unconditional jump jmp addr
JMPZ byte = iota // jump if z is true jmpz addr
JMPNZ byte = iota // jump if z is not true jmpnz addr
// Bitwise results are stored on register[2]
AND byte = iota // and r1, r2
OR byte = iota
XOR byte = iota
NOT byte = iota
LShift byte = iota
RShift byte = iota
// Math results are stored on register[2]
ADD byte = iota // add r1, r2
SUB byte = iota
MUL byte = iota
DIV byte = iota
SQRT byte = iota
INC byte = iota // inc #1, value
DEC byte = iota // dec #1, value
// String OPS
STRLEN byte = iota // get lenght of string strlen r1, dest
STRCAT byte = iota // concat two strings strcat r1, r2, dest
TOSTR byte = iota // cast register to string tostr r1, dest
// Comparation
// -> results is stored in z-flag
CMP byte = iota // Compare cmp #0, #1
STRCMP byte = iota // strcmp #1, #2
LT byte = iota
GT byte = iota
LTE byte = iota
GTE byte = iota
// Type checking
// -> results is stored in z-flag
ISNULL byte = iota
ISI8 byte = iota // isi8 #1
ISI16 byte = iota
ISI32 byte = iota
ISI64 byte = iota
ISUI8 byte = iota
ISUI16 byte = iota
ISUI32 byte = iota
ISUI64 byte = iota
ISF32 byte = iota
ISF64 byte = iota
ISSTR byte = iota // isstr #1
// Register
MOVE byte = iota // Move from register to another move #1, #2
MOVEZ byte = iota // Move z-flag to register movez #1
COPY byte = iota // Copy from register to another copy #1, #2
COPYZ byte = iota // Copy z-flag to register copyz #1, #2
// Stack
PUSH byte = iota // Push register to stack PUSH reg
POP byte = iota // Pop register to stack POP reg
CALL byte = iota // Store current c flag and jump to addr Call addr
RET byte = iota // Jump to c flag
// Array
AALOC byte = iota // create a new array in reg #1 with #size aaloc #1, size
ALEN byte = iota // get array size and store in #r2 alen #1, #r2
AGET byte = iota // store in #dest an item from array #1 from #addr aget #1, addr, dest
ASET byte = iota // set value #2 to array #1 in addr (causes overflow) aset #1, #2, addr
APUSH byte = iota // insert item to start of array aapush #1, #2
AADD byte = iota // insert item to end of array aadd #1, #2
ALMERGE byte = iota // merge left array #2 in array #1 almerge #1, #2
ARMERGE byte = iota // merge right array #2 in array #1 armerge #1, #2
// SYS
SYSCALL byte = iota // Call VM Function syscall addr
HALT byte = iota // Stop vm
)
|
package combat
import (
"fmt"
"log"
"time"
"github.com/I82Much/rogue/event"
"github.com/I82Much/rogue/math"
"github.com/I82Much/rogue/player"
"github.com/I82Much/rogue/stats"
termbox "github.com/nsf/termbox-go"
)
const (
PlayerDied = "PLAYER_DIED"
AllMonstersDied = "MONSTERS_VANQUISHED"
)
type State string
type Column string
const (
EnemyDescription State = "DESCRIBE_ENEMY"
EnteringAttack State = "ENTERING_ATTACK"
// Player is attacking
Attack State = "ATTACK"
EnteringDefense = "ENTERING_DEFENSE"
// Player is defending
Defense State = "DEFENSE"
Left Column = "LEFT"
Right Column = "RIGHT"
Center Column = "CENTER"
)
var (
columns = []Column{Left, Center, Right}
// TODO(ndunn): this could shorten each time
interRoundTime = time.Duration(750) * time.Millisecond
initialDescriptionTime = time.Duration(1500) * time.Millisecond
)
type Model struct {
Monsters []*Monster
Player *player.Player
words []*AttackWord
listeners []event.Listener
state State
attempts int
hits int
completedWords int
monstersDefeated int
currentTyping *AttackWord
// which round of combat
round int
timeOfTransition time.Time
}
func (m *Model) MakeStats() stats.Stats {
return stats.Stats{
LettersTyped: m.attempts,
Hits: m.hits,
CompletedWords: m.completedWords,
Rounds: m.round,
MonstersDefeated: m.monstersDefeated,
}
}
func (m *Model) AddListener(d event.Listener) {
m.listeners = append(m.listeners, d)
}
func (m *Model) Publish(e string, extras interface{}) {
for _, listener := range m.listeners {
listener.Listen(e, extras)
}
}
func NewCombatModel(p *player.Player, m []*Monster) *Model {
return &Model{
Monsters: m,
Player: p,
state: EnemyDescription,
timeOfTransition: time.Now().Add(initialDescriptionTime),
}
}
func (c *Model) Words() []*AttackWord {
return c.words
}
func (c *Model) CurrentlyTyping() *AttackWord {
return c.currentTyping
}
func (c *Model) getPlayerAttackWords(round int) []*AttackWord {
// Alternate between all of the alive monsters. e.g. round 1 pick first. Round 2, second. etc.
var aliveMonsters []*Monster
for _, m := range c.Monsters {
if !m.IsDead() {
aliveMonsters = append(aliveMonsters, m)
}
}
if len(aliveMonsters) == 0 {
panic("Invariant broken- there should be at least one alive monster")
}
return aliveMonsters[round%len(aliveMonsters)].GetWords(round)
}
func (c *Model) getAttackWords() []*AttackWord {
var allWords []*AttackWord
if c.state == Attack {
allWords = c.getPlayerAttackWords(c.round)
} else if c.state == Defense {
for i, m := range c.Monsters {
if m.IsDead() {
continue
}
for _, word := range m.GetWords(c.round) {
word := word
// Change the column that the word falls from based on which monster it is.
word.Col = columns[i%len(columns)]
allWords = append(allWords, word)
}
}
}
return allWords
}
// KillWord removes the word from model, meaning that's it vanquished
func (c *Model) KillWord(w *AttackWord) {
// TODO(ndunn): score? update exp?
if c.currentTyping == w {
log.Printf("no longer typing %v", c.currentTyping)
c.currentTyping = nil
}
for i, word := range c.words {
if word == w {
c.words = append(c.words[0:i], c.words[i+1:]...)
return
}
}
// The terminal gets really screwed up if we don't shut down termbox first
termbox.Close()
panic(fmt.Sprintf("couldn't find word %v", w))
}
func (c *Model) DamagePlayer(w *AttackWord) {
c.Player.Damage(w.DamageToPlayer())
}
func (c *Model) DamageMonster(w *AttackWord) {
// Pick the first monster that's not dead
for _, monster := range c.Monsters {
if !monster.IsDead() {
monster.Damage(w.DamageToMonster())
// did this attack kill?
if monster.IsDead() {
c.monstersDefeated++
}
return
}
}
}
func (c *Model) State() State {
return c.state
}
// Over determines if the fight is over. Meaning either all enemies are dead, or player is dead
func (c *Model) PublishEndEvents() {
if c.Player.IsDead() {
c.Publish(PlayerDied, c.MakeStats())
}
// If any monster is left, fight's not over
for _, m := range c.Monsters {
if !m.IsDead() {
return
}
}
c.Publish(AllMonstersDied, c.MakeStats())
}
// maybeTransition potentially shifts the model into another phase. e.g. after all the words are done in combat round,
// we enter the EnteringDefense round.
// The transitions are from
// EnteringDefense -> Defense -> EnteringAttack -> Attack -> EnteringDefense and on and on.
func (c *Model) maybeTransition() {
if c.state == EnemyDescription && time.Now().After(c.timeOfTransition) {
c.state = EnteringAttack
c.timeOfTransition = time.Now().Add(interRoundTime)
} else if c.state == Defense && len(c.words) == 0 {
log.Println("defense -> entering attack")
c.state = EnteringAttack
c.timeOfTransition = time.Now().Add(interRoundTime)
} else if c.state == Attack && len(c.words) == 0 {
log.Println("attack -> entering defense")
c.state = EnteringDefense
c.timeOfTransition = time.Now().Add(interRoundTime)
c.round++
} else if c.state == EnteringDefense && time.Now().After(c.timeOfTransition) {
log.Println("entering defense -> defense")
c.state = Defense
if len(c.words) != 0 {
panic(fmt.Sprintf("invariant violated: should have had 0 words; had %v", c.words))
}
c.words = c.getAttackWords()
//fmt.Printf("entered defense with words %v", c.words)
} else if c.state == EnteringAttack && time.Now().After(c.timeOfTransition) {
log.Println("entering attack -> attack")
c.state = Attack
if len(c.words) != 0 {
panic(fmt.Sprintf("invariant violated: should have had 0 words; had %v", c.words))
}
c.words = c.getAttackWords()
//fmt.Printf("entered attack with words %v", c.words)
}
}
func (c *Model) Update(typed []rune) {
// FIXME ndunn take this out
//c.Publish(AllMonstersDied)
now := time.Now()
for _, r := range typed {
c.attempts++
// Does this rune represent the first untyped letter of any of the candidates? If so it's a hit. If not it's a miss
if c.currentTyping != nil {
log.Printf("Currently typing: %v", *c.currentTyping)
runes := []rune(c.currentTyping.word)
if r == runes[len(c.currentTyping.spelled)] {
c.hits++
c.currentTyping.spelled = append(c.currentTyping.spelled, r)
log.Printf("spelled %v", c.currentTyping.spelled)
// Done the word
if len(c.currentTyping.spelled) == len(c.currentTyping.word) {
log.Printf("finished %v", c.currentTyping.word)
if c.state == Attack {
// Finished typing the word - inflict damage if in attack mode
c.DamageMonster(c.currentTyping)
}
c.KillWord(c.currentTyping)
c.completedWords++
}
}
} else {
log.Printf("new letter %v", r)
// See if the rune matches first letter of one of our candidate words
for _, word := range c.Words() {
runes := []rune(word.word)
if r == runes[len(word.spelled)] {
log.Printf("started typing %v", word.word)
c.hits++
word.spelled = append(word.spelled, r)
c.currentTyping = word
break
}
}
}
}
var toRemove []*AttackWord
for _, word := range c.words {
elapsed := now.Sub(word.onScreen)
// What proportion (0..1.0) is complete
word.proportion = math.DoMap(float64(elapsed.Nanoseconds()), 0.0, float64(word.duration.Nanoseconds()), 0, 1.0)
if word.proportion >= 1.0 {
// Inflict damage on the player if in defense mode
if c.state == Defense {
c.DamagePlayer(word)
}
toRemove = append(toRemove, word)
}
}
for _, word := range toRemove {
c.KillWord(word)
}
// Transition phases
c.maybeTransition()
c.PublishEndEvents()
}
|
package main
// ResourceType resource
type ResourceType struct {
ID string `json:"id"`
Name string `json:"name"`
Active bool `json:"active"`
PrivateName string `json:"privateName"`
}
// ResourceTypePrivateData
type ResourceTypePrivateData struct {
ResourceTypeID string `json:"resource_type_id"`
DisplayName string `json:"display_name"`
Active bool `json:"active"`
}
// ResourceTypeIndex
type ResourceTypeIndex struct {
ResourceType
PrivateData ResourceTypePrivateData `json:"private_data"`
}
// ResourceTypeTransactionItem transaction item
type ResourceTypeTransactionItem struct {
TXID string `json:"tx_id"`
ResourceType ResourceTypeIndex `json:"resource_type"`
Timestamp int64 `json:"timestamp"`
}
|
package gofinancial
import (
"encoding/json"
"fmt"
"io"
"math"
"os"
"path"
"time"
"github.com/go-echarts/go-echarts/v2/charts"
"github.com/go-echarts/go-echarts/v2/opts"
"github.com/razorpay/go-financial/enums/interesttype"
)
// Amortization struct holds the configuration and financial details.
type Amortization struct {
Config *Config
Financial Financial
}
// NewAmortization return a new amortisation object with config and financial fields initialised.
func NewAmortization(c *Config) (*Amortization, error) {
a := Amortization{Config: c}
if err := a.Config.setPeriodsAndDates(); err != nil {
return nil, err
}
switch a.Config.InterestType {
case interesttype.REDUCING:
a.Financial = &Reducing{}
case interesttype.FLAT:
a.Financial = &Flat{}
}
return &a, nil
}
// Row represents a single row in an amortization schedule.
type Row struct {
Period int64
StartDate time.Time
EndDate time.Time
Payment float64
Interest float64
Principal float64
}
// GenerateTable constructs the amortization table based on the configuration.
func (a Amortization) GenerateTable() ([]Row, error) {
var result []Row
for i := int64(1); i <= a.Config.periods; i++ {
var row Row
row.Period = i
row.StartDate = a.Config.startDates[i-1]
row.EndDate = a.Config.endDates[i-1]
payment := a.Financial.GetPayment(*a.Config)
principalPayment := a.Financial.GetPrincipal(*a.Config, i)
interestPayment := a.Financial.GetInterest(*a.Config, i)
if a.Config.Round {
row.Payment = math.Round(payment)
row.Principal = math.Round(principalPayment)
row.Interest = math.Round(interestPayment)
} else {
row.Payment = payment
row.Principal = principalPayment
row.Interest = interestPayment
}
if i == a.Config.periods {
PerformErrorCorrectionDueToRounding(&row, result, a.Config.AmountBorrowed, a.Config.Round)
}
if row.Payment != row.Principal+row.Interest {
return nil, ErrPayment
}
result = append(result, row)
}
return result, nil
}
// PerformErrorCorrectionDueToRounding takes care of errors in principal and payment amount due to rounding.
// Only the final row is adjusted for rounding errors.
func PerformErrorCorrectionDueToRounding(finalRow *Row, rows []Row, principal int64, round bool) {
principalCollected := finalRow.Principal
for _, row := range rows {
principalCollected += row.Principal
}
if round {
diff := math.Abs(float64(principal)) - math.Abs(principalCollected)
if diff > 0 {
// subtracting diff coz payment, principal and interest are -ve.
finalRow.Payment = math.Round(finalRow.Payment - diff)
finalRow.Principal = math.Round(finalRow.Principal - diff)
} else if diff < 0 {
finalRow.Payment = math.Round(finalRow.Payment + diff)
finalRow.Principal = math.Round(finalRow.Principal + diff)
}
} else {
diff := math.Abs(float64(principal)) - math.Abs(principalCollected)
if diff > 0 {
finalRow.Payment = finalRow.Payment - diff
finalRow.Principal = finalRow.Principal - diff
} else {
finalRow.Payment = finalRow.Payment + diff
finalRow.Principal = finalRow.Principal + diff
}
}
}
// PrintRows outputs a formatted json for given rows as input.
func PrintRows(rows []Row) {
bytes, _ := json.MarshalIndent(rows, "", "\t")
fmt.Printf("%s", bytes)
}
// PlotRows uses the go-echarts package to generate an interactive plot from the Rows array.
func PlotRows(rows []Row, fileName string) (err error) {
bar := getStackedBarPlot(rows)
completePath, err := os.Getwd()
if err != nil {
return err
}
filePath := path.Join(completePath, fileName)
f, err := os.Create(fmt.Sprintf("%s.html", filePath))
if err != nil {
return err
}
defer func() {
// setting named err
ferr := f.Close()
if err == nil {
err = ferr
}
}()
return renderer(bar, f)
}
// getStackedBarPlot returns an instance for stacked bar plot.
func getStackedBarPlot(rows []Row) *charts.Bar {
bar := charts.NewBar()
bar.SetGlobalOptions(charts.WithTitleOpts(opts.Title{
Title: "Loan repayment schedule",
},
),
charts.WithInitializationOpts(opts.Initialization{
Width: "1200px",
Height: "600px",
}),
charts.WithToolboxOpts(opts.Toolbox{Show: true}),
charts.WithLegendOpts(opts.Legend{Show: true}),
charts.WithDataZoomOpts(opts.DataZoom{
Type: "inside",
Start: 0,
End: 50,
}),
charts.WithDataZoomOpts(opts.DataZoom{
Type: "slider",
Start: 0,
End: 50,
}),
)
var xAxis []string
var interestArr []opts.BarData
var principalArr []opts.BarData
var paymentArr []opts.BarData
for _, row := range rows {
xAxis = append(xAxis, row.EndDate.Format("2006-01-02"))
interestArr = append(interestArr, opts.BarData{Name: fmt.Sprintf("%v", -row.Interest), Value: -row.Interest})
principalArr = append(principalArr, opts.BarData{Value: -row.Principal})
paymentArr = append(paymentArr, opts.BarData{Value: -row.Payment})
}
// Put data into instance
bar.SetXAxis(xAxis).
AddSeries("Principal", principalArr).
AddSeries("Interest", interestArr).
AddSeries("Payment", paymentArr).SetSeriesOptions(
charts.WithBarChartOpts(opts.BarChart{
Stack: "stackA",
}))
return bar
}
// renderer renders the bar into the writer interface
func renderer(bar *charts.Bar, writer io.Writer) error {
return bar.Render(writer)
}
|
// Copyright 2019 John Papandriopoulos. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// Zydis is a Go wrapper for the fast and lightweight Zydis x86/x86-64
// disassembler library, found at http://zydis.re/. This package provides
// bindings via cgo and is considered a "complete" wrapper of the Zydis API,
// ready for production use.
//
// It was created because the pure-Go disassembler, found at
// https://godoc.org/golang.org/x/arch/x86/x86asm, is significantly lacking in
// x86-64 support. Decoding x86* is complex business, and it was more
// straightforward to write these bindings instead of digging deep into the
// pure-Go package.
//
// Requires Git-LFS
//
// **This package uses Git LFS found to store a precompiled version of the Zydis
// library (see below), so please make sure you have it installed before
// getting this package.** Learn about Git LFS at https://git-lfs.github.com/.
//
// Sample Code
//
// See the file `cmd/demo.go`.
//
// Upgrading the Zydis Library
//
// The Zydis library is packaged as a static syso object file so that this
// package is go gettable. Precompiled macOS (amd64, arm64), Linux (amd64,
// arm64), and Windows (amd64, 386) binaries are provided.
//
// Use the Makefile in the `lib/` folder to upgrade to a newer version,
// rebuild, or add support for another platform. The default Makefile target
// clones the Zydis repo and its submodule, performs the build, and creates the
// syso files for Go linkage under macOS with suitable cross-compilers
// installed.
//
package zydis // import "go.jpap.org/zydis"
// To install: `go install go.jpap.org/godoc-readme-gen`
//
//go:generate godoc-readme-gen -f -title "Zydis Bindings for Go"
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"github.com/SoftwareAG/adabas-go-api/adatypes"
)
// Record one result record of the result list received by
// record list or in the stream callback.
//
// To extract the values in the record you might request the
// value using the SearchValue() methods. Alternatively you
// might use the Traverse() callback method to call a method
// for each Adabas field in the tree. The tree includes group
// nodes of the Adabas record.
type Record struct {
Isn adatypes.Isn `xml:"Isn,attr"`
Quantity uint64 `xml:"Quantity,attr"`
Value []adatypes.IAdaValue
HashFields map[string]adatypes.IAdaValue `xml:"-" json:"-"`
fields map[string]*queryField
definition *adatypes.Definition
adabasMap *Map
LobEndTransaction bool
}
func traverseHashValues(adaValue adatypes.IAdaValue, x interface{}) (adatypes.TraverseResult, error) {
record := x.(*Record)
//if _, ok := record.HashFields[adaValue.Type().Name()]; !ok {
if !adaValue.Type().HasFlagSet(adatypes.FlagOptionMUGhost) {
adatypes.Central.Log.Debugf("Add hash to %s", adaValue.Type().Name())
record.HashFields[adaValue.Type().Name()] = adaValue
}
//}
return adatypes.Continue, nil
}
// NewRecord create new result record infrastructure based on the given definition
func NewRecord(definition *adatypes.Definition) (*Record, error) {
adatypes.Central.Log.Debugf("Create new record")
if definition == nil {
adatypes.Central.Log.Debugf("Definition empty")
return nil, adatypes.NewGenericError(69)
}
if definition.Values == nil {
adatypes.Central.Log.Debugf("Definition values empty")
err := definition.CreateValues(false)
if err != nil {
adatypes.Central.Log.Debugf("Error creating Definition values")
return nil, err
}
}
record := &Record{Value: definition.Values, definition: definition, LobEndTransaction: false}
definition.Values = nil
record.HashFields = make(map[string]adatypes.IAdaValue)
t := adatypes.TraverserValuesMethods{EnterFunction: traverseHashValues}
_, err := record.Traverse(t, record)
if err != nil {
return nil, err
}
return record, nil
}
// NewRecordIsn create a new result record with ISN or ISN quantity
func NewRecordIsn(isn adatypes.Isn, isnQuantity uint64, definition *adatypes.Definition) (*Record, error) {
record, err := NewRecord(definition)
if err != nil {
return nil, err
}
record.Isn = isn
record.Quantity = isnQuantity
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("New record with ISN=%d and ISN quantity=%d", isn, isnQuantity)
}
return record, nil
}
// recordValuesTraverser create buffer used to output values
func recordValuesTraverser(adaValue adatypes.IAdaValue, x interface{}) (adatypes.TraverseResult, error) {
buffer := x.(*bytes.Buffer)
buffer.WriteString(fmt.Sprintf(" %s=%#v\n", adaValue.Type().Name(), adaValue.String()))
return adatypes.Continue, nil
}
// createRecordBuffer create a record buffer
func (record *Record) createRecordBuffer(helper *adatypes.BufferHelper, option *adatypes.BufferOption) (err error) {
adatypes.Central.Log.Debugf("Create store record buffer")
t := adatypes.TraverserValuesMethods{EnterFunction: createStoreRecordBufferTraverser}
stRecTraverser := &storeRecordTraverserStructure{record: record, helper: helper, option: option}
_, err = record.Traverse(t, stRecTraverser)
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Create record buffer done len=%d", len(helper.Buffer()))
}
return
}
// String string representation of the record
func (record *Record) String() string {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("ISN=%d quantity=%d\n", record.Isn, record.Quantity))
t := adatypes.TraverserValuesMethods{EnterFunction: recordValuesTraverser}
_, _ = record.Traverse(t, &buffer)
return buffer.String()
}
// Traverse step/traverse through all record entries and call methods
func (record *Record) Traverse(t adatypes.TraverserValuesMethods, x interface{}) (ret adatypes.TraverseResult, err error) {
if record == nil {
return adatypes.EndTraverser, adatypes.NewGenericError(33)
}
debug := adatypes.Central.IsDebugLevel()
for _, value := range record.Value {
if debug {
adatypes.Central.Log.Debugf("Go through value %s %d", value.Type().Name(), value.Type().Type())
}
if t.EnterFunction != nil {
if debug {
adatypes.Central.Log.Debugf("Enter field=%s Type=%d", value.Type().Name(), value.Type().Type())
}
ret, err = t.EnterFunction(value, x)
if err != nil {
return
}
if ret == adatypes.SkipStructure {
continue
}
}
if value.Type().IsStructure() {
if debug {
adatypes.Central.Log.Debugf("Go through structure %s %d", value.Type().Name(), value.Type().Type())
}
ret, err = value.(adatypes.StructureValueTraverser).Traverse(t, x)
if err != nil || ret == adatypes.EndTraverser {
return
}
}
if t.LeaveFunction != nil {
if debug {
adatypes.Central.Log.Debugf("Leave %s %d", value.Type().Name(), value.Type().Type())
}
ret, err = t.LeaveFunction(value, x)
if err != nil || ret == adatypes.EndTraverser {
return
}
}
}
if debug {
adatypes.Central.Log.Debugf("Traverse ended")
}
return
}
// DumpValues traverse through the tree of values calling a callback method
func (record *Record) DumpValues() {
fmt.Println("Dump all record values")
var buffer bytes.Buffer
t := adatypes.TraverserValuesMethods{PrepareFunction: prepareRecordDump,
EnterFunction: traverseDumpRecord}
_, _ = record.Traverse(t, &buffer)
fmt.Printf("%s", buffer.String())
}
// searchValue search a value using a given field name
func (record *Record) searchValue(field string) (adatypes.IAdaValue, bool) {
fq, err := NewFieldQuery(field)
if err != nil {
adatypes.Central.Log.Debugf("Query failure: %v", err)
return nil, false
}
if adatypes.Central.IsDebugLevel() {
adatypes.Central.Log.Debugf("Search field %s (%s)", fq.Name, field)
for k, v := range record.HashFields {
adatypes.Central.Log.Debugf("Key: %s %#v", k, v)
}
}
name := fq.Name
if fq.Prefix != ' ' {
name = string(fq.Prefix) + fq.Name
}
if adaValue, ok := record.HashFields[name]; ok {
adatypes.Central.Log.Debugf("Found value %s (%v)", adaValue.Type().Name(), adaValue.Type().Type())
if adaValue.Type().Type() == adatypes.FieldTypeMultiplefield && (fq.MultipleIndex > 0 || fq.PeriodicIndex > 0) {
sv := adaValue.(*adatypes.StructureValue)
for _, ve := range sv.Elements {
for _, v := range ve.Values {
adatypes.Central.Log.Debugf("PE %d MU %d", v.PeriodIndex(), v.MultipleIndex())
if fq.PeriodicIndex == v.PeriodIndex() && fq.MultipleIndex == v.MultipleIndex() {
return v, true
}
}
}
}
return adaValue, true
}
adatypes.Central.Log.Debugf("Found no value for %s", name)
if adatypes.Central.IsDebugLevel() {
for k, v := range record.HashFields {
adatypes.Central.Log.Debugf("Key valid %s -> %s(%d)", k, v.Type().Name(), v.Type().Type())
}
}
return nil, false
}
// SetValue set the value for a specific field given by the field name. The
// field value is defined by the interface given.
// The field value index of a period group or multiple field might be defined
// using square brackets. For example AA[1,2] will set the first entry of a
// period group and the second entry of the multiple field.
func (record *Record) SetValue(field string, value interface{}) (err error) {
if field == "" {
err = adatypes.NewGenericError(172)
return
}
adatypes.Central.Log.Debugf("Set value %s", field)
if strings.ContainsRune(field, '[') {
i := strings.IndexRune(field, '[')
c := strings.IndexRune(field[i:], ',')
e := strings.IndexRune(field, ']')
if c > 0 {
eField := field[:i]
index1, xerr := strconv.ParseInt(field[i+1:i+c], 0, 64)
if xerr != nil {
return xerr
}
index2, xerr := strconv.ParseInt(field[i+c+1:e], 0, 64)
if xerr != nil {
return xerr
}
if index1 < 0 || index1 > math.MaxUint32 {
return adatypes.NewGenericError(118, index1)
}
if index2 < 0 || index2 > math.MaxUint32 {
return adatypes.NewGenericError(118, index2)
}
return record.SetValueWithIndex(eField, []uint32{uint32(index1), uint32(index2)}, value)
}
index, xerr := strconv.ParseInt(field[i+1:e], 0, 64)
if xerr != nil {
return xerr
}
eField := field[:i]
f := field[e+1:]
i = strings.IndexRune(f, '[')
if i == -1 {
if index < 0 || index > math.MaxUint32 {
return adatypes.NewGenericError(118, index)
}
return record.SetValueWithIndex(eField, []uint32{uint32(index)}, value)
}
e = strings.IndexRune(f, ']')
muindex, merr := strconv.ParseInt(f[i+1:e], 0, 64)
if merr != nil {
return merr
}
if index < 0 || index > math.MaxUint32 {
return adatypes.NewGenericError(118, index)
}
if muindex < 0 || muindex > math.MaxUint32 {
return adatypes.NewGenericError(118, muindex)
}
return record.SetValueWithIndex(eField, []uint32{uint32(index), uint32(muindex)}, value)
}
if adaValue, ok := record.searchValue(field); ok {
err = adaValue.SetValue(value)
adatypes.Central.Log.Debugf("Set %s [%T] value err=%v", field, adaValue, err)
// TODO check if the field which is not found and stored should be checked
} else {
adatypes.Central.Log.Debugf("Field %s not found %p", field, adaValue)
s, e2 := record.definition.SearchType(field)
adatypes.Central.Log.Debugf("Type %v not found %v", s, e2)
if e2 == nil {
if s.IsSpecialDescriptor() {
err = nil
adatypes.Central.Log.Debugf("Found %v is super descriptor and is ignored", field)
return
}
adatypes.Central.Log.Debugf("Found %v but no super descriptor", field)
}
err = adatypes.NewGenericError(28, field)
}
return
}
// SetValueWithIndex set the value for a specific field given by the field name. The
// field value is defined by the interface given.
// The field value index of a period group or multiple field might be defined
// using the uint32 slice.
func (record *Record) SetValueWithIndex(name string, index []uint32, x interface{}) error {
// TODO why specific?
record.definition.Values = record.Value
adatypes.Central.Log.Debugf("Record value : %#v", record.Value)
return record.definition.SetValueWithIndex(name, index, x)
}
// SetPartialValue set the field value for a partial part of a lob field
func (record *Record) SetPartialValue(name string, offset uint32, data []byte) (err error) {
v, verr := record.SearchValue(name)
if verr != nil {
return verr
}
if v.Type().Type() != adatypes.FieldTypeLBString {
return adatypes.NewGenericError(134, v.Type().Name())
}
av := v.(adatypes.PartialValue)
_ = v.SetValue(data)
// if err != nil {
// return
// }
av.SetPartial(offset, uint32(len(data)))
return nil
}
// extractIndex extract the index information RA[1] of the field
func extractIndex(name string) []uint32 {
var index []uint32
var re = regexp.MustCompile(`(?m)(\w+(\[(\d+),?(\d+)?\])?)`)
for _, s := range re.FindAllStringSubmatch(name, -1) {
v, err := strconv.ParseInt(s[3], 10, 0)
if err != nil {
return index
}
if v < 0 || v > math.MaxUint32 {
return index
}
index = append(index, uint32(v))
if s[4] != "" {
v, err = strconv.ParseInt(s[4], 10, 0)
if err != nil {
return index
}
if v < 0 || v > math.MaxUint32 {
return index
}
index = append(index, uint32(v))
}
}
return index
}
// SearchValue this method search for the value in the tree given by the
// field parameter.
func (record *Record) SearchValue(parameter ...interface{}) (adatypes.IAdaValue, error) {
name := parameter[0].(string)
var index []uint32
if len(parameter) > 1 {
index = parameter[1].([]uint32)
} else {
if strings.ContainsRune(name, '[') {
index = extractIndex(name)
name = name[:strings.IndexRune(name, '[')]
} else {
index = []uint32{0, 0}
}
}
if v, ok := record.HashFields[name]; ok && !v.Type().HasFlagSet(adatypes.FlagOptionPE) {
return v, nil
}
adatypes.Central.Log.Debugf("Search %s index: %#v", name, index)
return record.SearchValueIndex(name, index)
}
// SearchValueIndex search value in the tree with a given index uint32 slice
func (record *Record) SearchValueIndex(name string, index []uint32) (adatypes.IAdaValue, error) {
record.definition.Values = record.Value
adatypes.Central.Log.Debugf("Record value : %#v", record.Value)
return record.definition.SearchByIndex(name, index, false)
}
// PeriodGroup return if it is part of an period group return the period
// group field of level 1. If no period group field, returns nil
func PeriodGroup(v adatypes.IAdaValue) adatypes.IAdaValue {
if v.Type().HasFlagSet(adatypes.FlagOptionPE) {
c := v
for c.Type().Type() != adatypes.FieldTypePeriodGroup {
c = c.Parent()
}
return c
}
return nil
}
// ValueQuantity this method provide the number of quantity of an PE
// or MU field. If the field is referenced with a square bracket index,
// the corresponding MU field of an period group is counted.
// The quantity is not the Adabas record quantity. It represents the
// record result quantity only.
func (record *Record) ValueQuantity(param ...interface{}) int32 {
if len(param) == 0 {
return -1
}
var index []uint32
fieldName := param[0].(string)
debug := adatypes.Central.IsDebugLevel()
if debug {
adatypes.Central.Log.Debugf("Field name: %s", fieldName)
}
if strings.ContainsRune(fieldName, '[') {
index = extractIndex(fieldName)
fieldName = fieldName[:strings.IndexRune(fieldName, '[')]
} else {
for i := 1; i < len(param); i++ {
switch w := param[i].(type) {
case uint32:
index = append(index, w)
case int:
index = append(index, uint32(w))
default:
}
}
}
if debug {
adatypes.Central.Log.Debugf("Index from parser %#v", index)
}
if v, ok := record.HashFields[fieldName]; ok {
if v.Type().HasFlagSet(adatypes.FlagOptionPE) {
if debug {
adatypes.Central.Log.Debugf("Quantity of %s PE", v.Type().Name())
}
if len(index) < 1 {
p := PeriodGroup(v)
pv := p.(*adatypes.StructureValue)
return int32(pv.NrElements())
}
var err error
if debug {
adatypes.Central.Log.Debugf("Search index of PE %s", v.Type().Name())
}
v, err = record.SearchValueIndex(fieldName, index)
if err != nil {
adatypes.Central.Log.Debugf("Error %s/%v: %v", fieldName, index, err)
return -1
}
switch mv := v.(type) {
case *adatypes.StructureValue:
return int32(mv.NrElements())
default:
}
}
if v.Type().Type() == adatypes.FieldTypeMultiplefield {
if debug {
adatypes.Central.Log.Debugf("Quantity of %s MU elements", v.Type().Name())
}
mv := v.(*adatypes.StructureValue)
return int32(mv.NrElements())
}
return 1
}
return -1
}
// Scan scan for different field entries
func (record *Record) Scan(dest ...interface{}) (err error) {
debug := adatypes.Central.IsDebugLevel()
if debug {
adatypes.Central.Log.Debugf("Scan Record %#v", record.fields)
}
if f, ok := record.fields["#isn"]; ok {
if debug {
adatypes.Central.Log.Debugf("Fill Record ISN=%d", record.Isn)
}
*(dest[f.index].(*int)) = int(record.Isn)
}
if f, ok := record.fields["#isnquantity"]; ok {
if debug {
adatypes.Central.Log.Debugf("Fill Record ISN quantity=%d", record.Quantity)
}
*(dest[f.index].(*int)) = int(record.Quantity)
}
// Traverse to current entries
tm := adatypes.TraverserValuesMethods{EnterFunction: scanFieldsTraverser}
sf := &scanFields{fields: record.fields, parameter: dest}
_, err = record.Traverse(tm, sf)
if err != nil {
return err
}
return nil
}
// traverseMarshalXML2 traverser used by the XML Marshaller
func traverseMarshalXML2(adaValue adatypes.IAdaValue, x interface{}) (adatypes.TraverseResult, error) {
enc := x.(*xml.Encoder)
if adaValue.Type().IsStructure() {
switch adaValue.Type().Type() {
case adatypes.FieldTypePeriodGroup:
peName := "Period"
start := xml.StartElement{Name: xml.Name{Local: peName}}
if adaValue.Type().Name() != adaValue.Type().ShortName() {
start.Name.Local = adaValue.Type().Name()
} else {
attrs := make([]xml.Attr, 0)
attrs = append(attrs, xml.Attr{Name: xml.Name{Local: "sn"}, Value: adaValue.Type().Name()})
start.Attr = attrs
}
_ = enc.EncodeToken(start)
case adatypes.FieldTypeMultiplefield:
muName := "Multiple"
if adaValue.Type().Name() != adaValue.Type().ShortName() {
muName = adaValue.Type().Name()
start := xml.StartElement{Name: xml.Name{Local: muName}}
_ = enc.EncodeToken(start)
} else {
start := xml.StartElement{Name: xml.Name{Local: muName}}
attrs := make([]xml.Attr, 0)
attrs = append(attrs, xml.Attr{Name: xml.Name{Local: "sn"}, Value: adaValue.Type().Name()})
start.Attr = attrs
_ = enc.EncodeToken(start)
}
case adatypes.FieldTypeGroup:
grName := "Group"
if adaValue.Type().Name() != adaValue.Type().ShortName() {
grName = adaValue.Type().Name()
start := xml.StartElement{Name: xml.Name{Local: grName}}
_ = enc.EncodeToken(start)
} else {
start := xml.StartElement{Name: xml.Name{Local: grName}}
attrs := make([]xml.Attr, 0)
attrs = append(attrs, xml.Attr{Name: xml.Name{Local: "sn"}, Value: adaValue.Type().Name()})
start.Attr = attrs
_ = enc.EncodeToken(start)
}
default:
start := xml.StartElement{Name: xml.Name{Local: adaValue.Type().Name()}}
_ = enc.EncodeToken(start)
}
} else {
isLink := strings.HasPrefix(adaValue.Type().Name(), "@")
name := adaValue.Type().Name()
if isLink {
name = adaValue.Type().Name()[1:]
}
start := xml.StartElement{Name: xml.Name{Local: name}}
if isLink {
start.Attr = []xml.Attr{{Name: xml.Name{Local: "type"}, Value: "link"}}
}
_ = enc.EncodeToken(start)
x := adaValue.String()
x = strings.Trim(x, " ")
_ = enc.EncodeToken(xml.CharData([]byte(x)))
_ = enc.EncodeToken(start.End())
}
return adatypes.Continue, nil
}
// traverseMarshalXMLEnd2 traverser end function used by the XML Marshaller
func traverseMarshalXMLEnd2(adaValue adatypes.IAdaValue, x interface{}) (adatypes.TraverseResult, error) {
if adaValue.Type().IsStructure() {
enc := x.(*xml.Encoder)
sv := adaValue.(*adatypes.StructureValue)
if adaValue.Type().Type() == adatypes.FieldTypePeriodGroup && sv.NrElements() > 0 {
end := xml.EndElement{Name: xml.Name{Local: "Entry"}}
enc.EncodeToken(end)
}
if adaValue.Type().Type() == adatypes.FieldTypePeriodGroup {
end := xml.EndElement{Name: xml.Name{Local: "Period"}}
enc.EncodeToken(end)
}
if adaValue.Type().Type() == adatypes.FieldTypeMultiplefield {
end := xml.EndElement{Name: xml.Name{Local: "Multiple"}}
enc.EncodeToken(end)
}
if adaValue.Type().Type() == adatypes.FieldTypeGroup {
end := xml.EndElement{Name: xml.Name{Local: "Group"}}
enc.EncodeToken(end)
}
end := xml.EndElement{Name: xml.Name{Local: adaValue.Type().Name()}}
enc.EncodeToken(end)
}
return adatypes.Continue, nil
}
// traverseMarshalXMLElement traverser element function used by the XML Marshaller
func traverseMarshalXMLElement(adaValue adatypes.IAdaValue, nr, max int, x interface{}) (adatypes.TraverseResult, error) {
enc := x.(*xml.Encoder)
if adaValue.Type().Type() == adatypes.FieldTypePeriodGroup {
if nr > 0 {
end := xml.EndElement{Name: xml.Name{Local: "Entry"}}
enc.EncodeToken(end)
}
start := xml.StartElement{Name: xml.Name{Local: "Entry"}}
enc.EncodeToken(start)
}
return adatypes.Continue, nil
}
// MarshalXML provide XML marshal method of a record
func (record *Record) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
adatypes.Central.Log.Debugf("Marshal XML record: %d", record.Isn)
var rec xml.StartElement
adatypes.Central.Log.Debugf("Map usage: %#v", record.adabasMap)
if record.adabasMap != nil {
rec = xml.StartElement{Name: xml.Name{Local: record.adabasMap.Name}}
} else {
rec = xml.StartElement{Name: xml.Name{Local: "Record"}}
}
if record.Isn > 0 {
rec.Attr = []xml.Attr{{Name: xml.Name{Local: "ISN"}, Value: strconv.Itoa(int(record.Isn))}}
}
if record.Quantity > 0 {
rec.Attr = []xml.Attr{{Name: xml.Name{Local: "Quantity"}, Value: strconv.Itoa(int(record.Quantity))}}
}
_ = e.EncodeToken(rec)
tm := adatypes.TraverserValuesMethods{EnterFunction: traverseMarshalXML2, LeaveFunction: traverseMarshalXMLEnd2, ElementFunction: traverseMarshalXMLElement}
_, err := record.Traverse(tm, e)
if err != nil {
return err
}
_ = e.EncodeToken(rec.End())
adatypes.Central.Log.Debugf("Marshal XML record finished")
return nil
}
// MarshalJSON provide JSON marshal function of a record
func (record *Record) MarshalJSON() ([]byte, error) {
adatypes.Central.Log.Debugf("Marshal JSON record: %d", record.Isn)
req := &responseJSON{special: true}
tm := adatypes.TraverserValuesMethods{EnterFunction: traverseMarshalJSON, LeaveFunction: traverseMarshalJSONEnd,
ElementFunction: traverseElementMarshalJSON}
req.stack = adatypes.NewStack()
dataMap := make(map[string]interface{})
req.dataMap = &dataMap
req.Values = append(req.Values, req.dataMap)
if record.Isn > 0 {
dataMap["ISN"] = record.Isn
}
if record.Quantity > 0 {
dataMap["Quantity"] = record.Quantity
}
// Traverse record generating JSON
_, err := record.Traverse(tm, req)
if err != nil {
return nil, err
}
adatypes.Central.Log.Debugf("Go JSON response %v -> %s", err, req.buffer.String())
return json.Marshal(req.dataMap)
}
// TrimString search value and provide a trimmed string/alpha representation.
func (record *Record) TrimString(parameter ...interface{}) string {
v, err := record.SearchValue(parameter...)
if err == nil {
return strings.Trim(v.String(), " ")
}
return ""
}
// Bytes search value and provide a raw byte slice representation.
func (record *Record) Bytes(parameter ...interface{}) []byte {
v, err := record.SearchValue(parameter...)
if err == nil {
return v.Bytes()
}
return nil
}
// SelectValue provide selected list of value
func (record *Record) SelectValue(definition *adatypes.Definition) []adatypes.IAdaValue {
activeValues := make([]adatypes.IAdaValue, 0)
fn := definition.Fieldnames()
for _, v := range record.Value {
if contains(fn, v.Type().Name()) {
activeValues = append(activeValues, v)
}
}
return activeValues
}
func contains(array []string, str string) bool {
for _, a := range array {
if a == str {
return true
}
}
return false
}
|
package server
import (
"net/http"
"github.com/qnib/metahub/pkg/daemon"
)
func getBaseHandler(service daemon.Service) http.Handler {
//storageService := env.Storage()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
})
}
|
package serviced
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
log "github.com/sirupsen/logrus"
)
func unmarshal(filename string, v interface{}) (err error) {
jsonBytes, err := ioutil.ReadFile(filename)
if err == nil {
err = json.Unmarshal(jsonBytes, v)
}
return
}
func marshal(filename string, v interface{}) (err error) {
jsonBytes, err := json.MarshalIndent(v, "", " ")
if err == nil {
err = ioutil.WriteFile(filename, jsonBytes, os.ModePerm)
}
return
}
//Service is struct to record service configure
type Service struct {
Name string `json:"name"`
Path string `json:"path"`
Args []string `json:"args"`
Env []string `json:"env"`
Stdout string `json:"stdout"`
Stderr string `json:"stderr"`
Dir string `json:"dir"`
}
//Group is struct to record the service group configure
type Group struct {
Name string `json:"name"`
Services []Service `json:"services"`
Filename string `json:"-"`
Enable int `json:"-"`
}
//Config is current running configure
type Config struct {
Filename string `json:"-"`
Includes map[string]int `json:"includes"`
Groups map[string]Group `json:"-"`
}
func (c *Config) copy() (config *Config) {
config = &Config{
Filename: c.Filename,
Includes: map[string]int{},
Groups: map[string]Group{},
}
for k, v := range c.Includes {
config.Includes[k] = v
}
for k, v := range c.Groups {
config.Groups[k] = v
}
return
}
func (c *Config) init() {
if c.Includes == nil {
c.Includes = map[string]int{}
}
if c.Groups == nil {
c.Groups = map[string]Group{}
}
}
//Load will return configure from data and parset to configure.
func (c *Config) Load() (err error) {
c.init()
err = unmarshal(c.Filename, c)
if os.IsNotExist(err) {
err = c.Save()
return
} else if err != nil {
return
}
for file, enable := range c.Includes {
group := &Group{}
err = unmarshal(file, group)
if err != nil {
return
}
if len(group.Name) < 1 {
log.Warnf("load group from %v fail with group name is empty", file)
continue
}
if len(group.Services) < 1 {
log.Warnf("load group from %v fail with service list is empty", file)
continue
}
group.Filename = file
group.Enable = enable
c.Groups[group.Name] = *group
log.Infof("load group from %v with %v service", file, len(group.Services))
}
return
}
//Save will save current configure to file
func (c *Config) Save() (err error) {
c.init()
log.Infof("save config to %v", c.Filename)
dir := filepath.Dir(c.Filename)
if _, e := os.Stat(dir); os.IsNotExist(e) {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
log.Errorf("save config to %v fail with %v", c.Filename, err)
return
}
}
err = marshal(c.Filename, c)
if err == nil {
log.Infof("save config to %v success", c.Filename)
} else {
log.Errorf("save config to %v fail with %v", c.Filename, err)
}
return
}
//Reload will reload group configure by name
func (c *Config) Reload(name string) (err error) {
c.init()
group, ok := c.Groups[name]
if !ok {
err = fmt.Errorf("%v is not exists", name)
return
}
newGroup := &Group{}
err = unmarshal(group.Filename, newGroup)
if err == nil {
newGroup.Filename = group.Filename
newGroup.Enable = group.Enable
c.Groups[name] = *newGroup
}
return
}
//Find will find the group by name
func (c *Config) Find(name string) (group *Group) {
c.init()
g, ok := c.Groups[name]
if !ok {
return nil
}
return &g
}
//Add will add group service
func (c *Config) Add(filename string, enable int) (group Group, err error) {
c.init()
filename, err = filepath.Abs(filename)
if err != nil {
return
}
group = Group{}
err = unmarshal(filename, &group)
if err != nil {
return
}
group.Filename = filename
group.Enable = enable
old, ok := c.Groups[group.Name]
if ok {
err = fmt.Errorf("group %v is exists from %v", group.Name, old.Filename)
return
}
if len(group.Services) < 1 {
err = fmt.Errorf("group %v services is empty from %v", group.Name, group.Filename)
return
}
for index, service := range group.Services {
if len(service.Name) < 1 || len(service.Path) < 1 {
err = fmt.Errorf("group %v %v service name/path is required", group.Name, index)
return
}
}
copy := c.copy()
copy.Includes[filename] = enable
err = copy.Save()
if err == nil {
c.Includes[filename] = enable
c.Groups[group.Name] = group
}
return
}
//Remove will remote group service
func (c *Config) Remove(name string) (group Group, err error) {
c.init()
group, ok := c.Groups[name]
if !ok {
err = fmt.Errorf("group %v is not exists", name)
return
}
copy := c.copy()
delete(copy.Includes, group.Filename)
err = copy.Save()
if err == nil {
c.Includes = copy.Includes
delete(c.Groups, name)
}
return
}
// //AddService will add service
// func (c *Config) AddService(service Service) (err error) {
// if len(service.Name) < 1 || len(service.Start) < 1 {
// err = fmt.Errorf("name/service is required")
// return
// }
// if s, e := os.Stat(service.Start); e != nil || !s.IsDir() {
// err = fmt.Errorf("start %v is not exists or is folder", service.Start)
// return
// }
// having := c.FindService(service.Name)
// if having != nil {
// err = fmt.Errorf("%v is exists", service.Name)
// return
// }
// for _, after := range service.After {
// having = c.FindService(after)
// if having == nil {
// err = fmt.Errorf("%v is not exists", after)
// return
// }
// }
// config := *c //copy
// config.Services = append(config.Services, service)
// err = config.Save()
// if err == nil {
// c.Services = append(c.Services, service)
// }
// return
// }
// //UpdateService will update service
// func (c *Config) UpdateService(service Service) (err error) {
// if len(service.Name) < 1 || len(service.Start) < 1 {
// err = fmt.Errorf("name/service is required")
// return
// }
// if s, e := os.Stat(service.Start); e != nil || !s.IsDir() {
// err = fmt.Errorf("start %v is not exists or is folder", service.Start)
// return
// }
// having := c.FindService(service.Name)
// if having == nil {
// err = fmt.Errorf("%v is not exists", service.Name)
// return
// }
// return
// }
|
package models_test
import (
"fmt"
"github.com/APTrust/exchange/constants"
"github.com/APTrust/exchange/models"
"github.com/APTrust/exchange/util"
"github.com/APTrust/exchange/util/testutil"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"math/rand"
"testing"
"time"
)
func getGlacierRestoreRequest(gfIdentifier string, accepted bool) *models.GlacierRestoreRequest {
if gfIdentifier == "" {
gfIdentifier = testutil.RandomFileIdentifier("test.edu/testbag")
}
now := time.Now()
randomMinutes := rand.Int31n(20)
requestedAt := now.Add(time.Duration(randomMinutes*-1) * time.Minute)
return &models.GlacierRestoreRequest{
GenericFileIdentifier: gfIdentifier,
GlacierBucket: "bucket",
GlacierKey: uuid.New().String(),
RequestAccepted: accepted,
RequestedAt: requestedAt,
EstimatedDeletionFromS3: requestedAt.Add(time.Duration(5*24) * time.Hour),
SomeoneElseRequested: false,
}
}
func getGlacierRestoreState() *models.GlacierRestoreState {
nsqMessage := testutil.MakeNsqMessage("42")
workItem := testutil.MakeWorkItem()
return models.NewGlacierRestoreState(nsqMessage, workItem)
}
func TestNewGlacierRestoreState(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
assert.NotNil(t, state.WorkSummary)
assert.NotNil(t, state.Requests)
}
func TestGlacierRestoreStateFindRequest(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
for i := 0; i < 10; i++ {
identifier := fmt.Sprintf("test.edu/bag/file_%d", i)
state.Requests = append(state.Requests, getGlacierRestoreRequest(identifier, true))
}
ids := []string{
"test.edu/bag/file_4",
"test.edu/bag/file_6",
}
for _, id := range ids {
req := state.FindRequest(id)
assert.NotNil(t, req)
assert.Equal(t, id, req.GenericFileIdentifier)
}
assert.Nil(t, state.FindRequest("test.edu/bag/file_does_not_exist"))
}
func TestGlacierRestoreStateGetReport(t *testing.T) {
firstRequestTime, _ := time.Parse(time.RFC3339, "2018-08-01T12:00:00+00:00")
firstDeletionTime, _ := time.Parse(time.RFC3339, "2016-08-06T15:33:00+00:00")
state := getGlacierRestoreState()
require.NotNil(t, state)
fileIdentifiers := make([]string, 32)
for i := 0; i < 30; i++ {
identifier := fmt.Sprintf("test.edu/bag/file_%d", i)
fileIdentifiers[i] = identifier
accepted := (i%6 != 0) // every 6th item will be accepted = false
request := getGlacierRestoreRequest(identifier, accepted)
request.RequestedAt = firstRequestTime.Add(time.Minute * time.Duration(i))
request.EstimatedDeletionFromS3 = firstDeletionTime.Add(time.Minute * time.Duration(i))
state.Requests = append(state.Requests, request)
}
fileIdentifiers[30] = "test.edu/bag/file_30"
fileIdentifiers[31] = "test.edu/bag/file_31"
report := state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.Equal(t, len(fileIdentifiers), report.FilesRequired)
assert.Equal(t, len(fileIdentifiers)-2, report.FilesRequested)
assert.Equal(t, 2, len(report.FilesNotRequested))
assert.Equal(t, 5, len(report.RequestsNotAccepted))
assert.True(t, util.StringListContains(report.FilesNotRequested, "test.edu/bag/file_30"))
assert.True(t, util.StringListContains(report.FilesNotRequested, "test.edu/bag/file_31"))
assert.True(t, util.StringListContains(report.RequestsNotAccepted, "test.edu/bag/file_0"))
assert.True(t, util.StringListContains(report.RequestsNotAccepted, "test.edu/bag/file_6"))
assert.True(t, util.StringListContains(report.RequestsNotAccepted, "test.edu/bag/file_12"))
assert.True(t, util.StringListContains(report.RequestsNotAccepted, "test.edu/bag/file_18"))
assert.True(t, util.StringListContains(report.RequestsNotAccepted, "test.edu/bag/file_24"))
assert.Equal(t, firstRequestTime, report.EarliestRequest)
assert.Equal(t, firstRequestTime.Add(time.Minute*time.Duration(29)), report.LatestRequest)
// First request was marked as not accepted in the loop above,
// so second request will have the earliest S3 expiry time.
assert.Equal(t, firstDeletionTime.Add(time.Minute*time.Duration(1)), report.EarliestExpiry)
assert.Equal(t, firstDeletionTime.Add(time.Minute*time.Duration(29)), report.LatestExpiry)
}
func TestNewGlacierRequestReport(t *testing.T) {
report := models.NewGlacierRequestReport()
require.NotNil(t, report)
assert.NotNil(t, report.FilesNotRequested)
assert.Empty(t, report.FilesNotRequested)
assert.NotNil(t, report.RequestsNotAccepted)
assert.Empty(t, report.RequestsNotAccepted)
}
func TestAllRetrievalsInitiated(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
fileIdentifiers := make([]string, 30)
for i := 0; i < 30; i++ {
identifier := fmt.Sprintf("test.edu/bag/file_%d", i)
fileIdentifiers[i] = identifier
state.Requests = append(state.Requests, getGlacierRestoreRequest(identifier, true))
}
report := state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.True(t, report.AllRetrievalsInitiated())
fileIdentifiers = append(fileIdentifiers, "test.edu/bag/file_30")
fileIdentifiers = append(fileIdentifiers, "test.edu/bag/file_31")
report = state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.False(t, report.AllRetrievalsInitiated())
}
func TestAllItemsInS3(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
fileIdentifiers := make([]string, 30)
for i := 0; i < 30; i++ {
identifier := fmt.Sprintf("test.edu/bag/file_%d", i)
fileIdentifiers[i] = identifier
req := getGlacierRestoreRequest(identifier, true)
req.IsAvailableInS3 = true
state.Requests = append(state.Requests, req)
}
report := state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.True(t, report.AllItemsInS3())
assert.Empty(t, report.FilesNotRequested)
assert.Empty(t, report.RequestsNotAccepted)
assert.Empty(t, report.FilesNotYetInS3)
fileIdentifiers = append(fileIdentifiers, "test.edu/bag/file_30")
fileIdentifiers = append(fileIdentifiers, "test.edu/bag/file_31")
report = state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.False(t, report.AllItemsInS3())
req := getGlacierRestoreRequest("ned/flanders", true)
req.IsAvailableInS3 = false
state.Requests = append(state.Requests, req)
req = getGlacierRestoreRequest("maude/flanders", true)
req.IsAvailableInS3 = false
state.Requests = append(state.Requests, req)
report = state.GetReport(fileIdentifiers)
require.NotNil(t, report)
assert.False(t, report.AllItemsInS3())
assert.Equal(t, 2, len(report.FilesNotYetInS3))
}
func TestGetFileIdentifiers(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
assert.Empty(t, state.GetFileIdentifiers())
state.GenericFile = testutil.MakeGenericFile(0, 0, "test.edu/bag")
gfIdentifiers := state.GetFileIdentifiers()
assert.Equal(t, 1, len(gfIdentifiers))
assert.Equal(t, state.GenericFile.Identifier, gfIdentifiers[0])
state = getGlacierRestoreState()
require.NotNil(t, state)
state.IntellectualObject = testutil.MakeIntellectualObject(20, 0, 0, 0)
gfIdentifiers = state.GetFileIdentifiers()
assert.Equal(t, 20, len(gfIdentifiers))
}
func TestGetStorageOption(t *testing.T) {
state := getGlacierRestoreState()
require.NotNil(t, state)
option, err := state.GetStorageOption()
assert.Empty(t, option)
assert.NotEmpty(t, err)
assert.Equal(t, "State has neither IntellectualObject nor GenericFile", err.Error())
// Should be able to get storage option from either object or file,
// whichever is preset.
state.GenericFile = &models.GenericFile{
StorageOption: constants.StorageStandard,
}
option, err = state.GetStorageOption()
assert.Nil(t, err)
assert.Equal(t, constants.StorageStandard, option)
state.GenericFile = nil
state.IntellectualObject = &models.IntellectualObject{
StorageOption: constants.StorageGlacierDeepVA,
}
option, err = state.GetStorageOption()
assert.Nil(t, err)
assert.Equal(t, constants.StorageGlacierDeepVA, option)
}
|
package tarot
import (
"fmt"
"sort"
)
type Player struct {
Id int
CardsRemaining map[Card]bool
}
type PlayerJson struct {
AllCards []Card `json:"cards"`
Heart []int `json:"0"`
Club []int `json:"1"`
Diamond []int `json:"2"`
Spade []int `json:"3"`
Trump []int `json:"4"`
Excuse []int `json:"5"`
}
func (p *Player) hasCard(c Card) bool {
notAlreadyPlayed, hasCard := p.CardsRemaining[c]
if notAlreadyPlayed && hasCard {
return true
}
return false
}
func (p *Player) validCard(c Card, t Table) bool {
// First player can play any card
if p.Id == t.FirstPlayer || c.Color == EXCUSE {
return true
}
// Find color of this trick
colorTrick := t.Cards[t.FirstPlayer].Color
if colorTrick == EXCUSE {
if p.Id == (t.FirstPlayer+1)%NB_PLAYERS {
return true
}
colorTrick = t.Cards[(t.FirstPlayer+1)%NB_PLAYERS].Color
}
// Check the card played is valid
switch colorTrick {
case TRUMP:
if !p.hasTrumps() {
return true
}
if c.Color != TRUMP {
return false
}
// has trumps and play a trump card
if !p.hasBiggerTrump(t) {
return true
}
for i := 0; i < NB_PLAYERS; i++ {
if t.Cards[i].Color == TRUMP && c.Number < t.Cards[i].Number {
return false
}
}
return true
default:
if p.hasColor(colorTrick) {
if c.Color == colorTrick {
return true
} else {
return false
}
} else {
if p.hasTrumps() {
if c.Color != TRUMP {
return false
}
if !p.hasBiggerTrump(t) {
return true
}
for i := 0; i < NB_PLAYERS; i++ {
if t.Cards[i].Color == TRUMP && c.Number < t.Cards[i].Number {
return false
}
}
}
return true
}
}
}
func (p *Player) hasBiggerTrump(t Table) bool {
maxTrump := 0
for _, c := range t.Cards {
if c.Color == TRUMP && maxTrump < c.Number {
maxTrump = c.Number
}
}
for c, b := range p.CardsRemaining {
if b && c.Color == TRUMP && maxTrump < c.Number {
return true
}
}
return false
}
func (p *Player) hasTrumps() bool {
for c, b := range p.CardsRemaining {
if b && c.Color == TRUMP {
return true
}
}
return false
}
func (p *Player) hasColor(color Color) bool {
for c, b := range p.CardsRemaining {
if b && c.Color == color {
return true
}
}
return false
}
func (p *Player) removeCard(c Card) {
notAlreadyPlayed, hasCard := p.CardsRemaining[c]
if notAlreadyPlayed && hasCard {
p.CardsRemaining[c] = false
return
}
panic(fmt.Errorf("This player does not have card %q\n", c))
}
func (p *Player) CardsToJson() PlayerJson {
var pl PlayerJson
pl.AllCards = make([]Card, 0)
pl.Heart = make([]int, 0)
pl.Club = make([]int, 0)
pl.Diamond = make([]int, 0)
pl.Spade = make([]int, 0)
pl.Trump = make([]int, 0)
pl.Excuse = make([]int, 0)
for c, b := range p.CardsRemaining {
if b {
pl.AllCards = append(pl.AllCards, c)
switch c.Color {
case HEART:
pl.Heart = append(pl.Heart, c.Number)
case CLUB:
pl.Club = append(pl.Club, c.Number)
case DIAMOND:
pl.Diamond = append(pl.Diamond, c.Number)
case SPADE:
pl.Spade = append(pl.Spade, c.Number)
case TRUMP:
pl.Trump = append(pl.Trump, c.Number)
case EXCUSE:
pl.Excuse = append(pl.Excuse, c.Number)
}
}
}
sort.Ints(pl.Heart)
sort.Ints(pl.Club)
sort.Ints(pl.Diamond)
sort.Ints(pl.Spade)
sort.Ints(pl.Trump)
sort.Ints(pl.Excuse)
return pl
}
|
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package k8smeta
import (
log "github.com/sirupsen/logrus"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"px.dev/pixie/src/shared/k8s"
"px.dev/pixie/src/vizier/services/metadata/storepb"
)
func createHandlers(convert func(obj interface{}) *K8sResourceMessage, ch chan *K8sResourceMessage) cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
msg := convert(obj)
if msg != nil {
msg.EventType = watch.Added
ch <- msg
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
msg := convert(newObj)
if msg != nil {
msg.EventType = watch.Modified
ch <- msg
}
},
DeleteFunc: func(obj interface{}) {
msg := convert(obj)
if msg != nil {
msg.EventType = watch.Deleted
ch <- msg
}
},
}
}
func startNodeWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factory informers.SharedInformerFactory) {
nodes := factory.Core().V1().Nodes()
inf := nodes.Informer()
_, _ = inf.AddEventHandler(createHandlers(nodeConverter, ch))
go inf.Run(quitCh)
cache.WaitForCacheSync(quitCh, inf.HasSynced)
// A cache sync doesn't guarantee that the handlers have been called,
// so instead we manually list and call the Add handlers since subsequent
// resources depend on these.
list, err := nodes.Lister().List(labels.Everything())
if err != nil {
log.WithError(err).Errorf("Failed to init nodes")
}
for i := range list {
msg := nodeConverter(list[i])
if msg != nil {
msg.EventType = watch.Added
ch <- msg
}
}
}
func startNamespaceWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factory informers.SharedInformerFactory) {
inf := factory.Core().V1().Namespaces().Informer()
_, _ = inf.AddEventHandler(createHandlers(namespaceConverter, ch))
go inf.Run(quitCh)
}
func startPodWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factories []informers.SharedInformerFactory) {
for _, factory := range factories {
pods := factory.Core().V1().Pods()
inf := pods.Informer()
_, _ = inf.AddEventHandler(createHandlers(podConverter, ch))
go inf.Run(quitCh)
cache.WaitForCacheSync(quitCh, inf.HasSynced)
// A cache sync doesn't guarantee that the handlers have been called,
// so instead we manually list and call the Add handlers since subsequent
// resources depend on these.
list, err := pods.Lister().List(labels.Everything())
if err != nil {
log.WithError(err).Errorf("Failed to init pods")
}
for i := range list {
msg := podConverter(list[i])
if msg != nil {
msg.EventType = watch.Added
ch <- msg
}
}
}
}
func startServiceWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factories []informers.SharedInformerFactory) {
for _, factory := range factories {
inf := factory.Core().V1().Services().Informer()
_, _ = inf.AddEventHandler(createHandlers(serviceConverter, ch))
go inf.Run(quitCh)
}
}
func startEndpointsWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factories []informers.SharedInformerFactory) {
for _, factory := range factories {
inf := factory.Core().V1().Endpoints().Informer()
_, _ = inf.AddEventHandler(createHandlers(endpointsConverter, ch))
go inf.Run(quitCh)
}
}
func startReplicaSetWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factories []informers.SharedInformerFactory) {
for _, factory := range factories {
inf := factory.Apps().V1().ReplicaSets().Informer()
_, _ = inf.AddEventHandler(createHandlers(replicaSetConverter, ch))
go inf.Run(quitCh)
}
}
func startDeploymentWatcher(ch chan *K8sResourceMessage, quitCh <-chan struct{}, factories []informers.SharedInformerFactory) {
for _, factory := range factories {
inf := factory.Apps().V1().Deployments().Informer()
_, _ = inf.AddEventHandler(createHandlers(deploymentConverter, ch))
go inf.Run(quitCh)
}
}
func podConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "pods",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Pod{
Pod: k8s.PodToProto(obj.(*v1.Pod)),
},
},
}
}
func serviceConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "services",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Service{
Service: k8s.ServiceToProto(obj.(*v1.Service)),
},
},
}
}
func namespaceConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "namespaces",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Namespace{
Namespace: k8s.NamespaceToProto(obj.(*v1.Namespace)),
},
},
}
}
func endpointsConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "endpoints",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Endpoints{
Endpoints: k8s.EndpointsToProto(obj.(*v1.Endpoints)),
},
},
}
}
func nodeConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "nodes",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Node{
Node: k8s.NodeToProto(obj.(*v1.Node)),
},
},
}
}
func replicaSetConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "replicasets",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_ReplicaSet{
ReplicaSet: k8s.ReplicaSetToProto(obj.(*apps.ReplicaSet)),
},
},
}
}
func deploymentConverter(obj interface{}) *K8sResourceMessage {
return &K8sResourceMessage{
ObjectType: "deployments",
Object: &storepb.K8SResource{
Resource: &storepb.K8SResource_Deployment{
Deployment: k8s.DeploymentToProto(obj.(*apps.Deployment)),
},
},
}
}
|
package main
import (
"fmt"
"net"
"os"
"os/exec"
"strconv"
"time"
)
var laddr *net.UDPAddr
var current_count byte = 0
var message_size int = 1
var alive_timeout_seconds int = 3
func main() {
// check for correct parameters on the command line
if len(os.Args) != 2 {
fmt.Println("Usage: go run safe_counter.go $PORT")
return
}
// get port number from command line
port := os.Args[1]
port_num, err := strconv.Atoi(port)
checkErr(err)
if port_num < 1025 {
fmt.Println("Port number needs to be higher than 1024.")
return
}
fmt.Println("---- backup mode ----")
// create Listener
laddr, err := net.ResolveUDPAddr("udp", "localhost:" + port)
checkErr(err)
listenConn, err := net.ListenUDP("udp", laddr)
checkErr(err)
listenConn_rcv_ch := make(chan byte)
go udp_connection_reader(listenConn, message_size, listenConn_rcv_ch)
L:
for {
select {
case <- time.After(time.Second * 3):
break L
case current_count = <-listenConn_rcv_ch:
break
}
}
fmt.Println("---- active mode ----")
// spawn backup process
backupPort := strconv.Itoa(port_num+1)
backup := exec.Command("/bin/bash")
stdin, _ := backup.StdinPipe()
backup.Start()
stdin.Write([]byte("gnome-terminal -x bash -c \"go run safe_counter.go " + backupPort + "\" \n"))
// wait until backup process is up
time.Sleep(time.Second * 3)
// create transmit socket to backup process
baddr, err := net.ResolveUDPAddr("udp", "localhost:" + backupPort)
checkErr(err)
backupConn, err := net.DialUDP("udp", nil, baddr)
checkErr(err)
defer backupConn.Close()
buf := make([]byte, message_size)
for {
fmt.Println("Count:", current_count)
current_count++
buf[0] = current_count
backupConn.Write(buf)
time.Sleep(time.Second * 1)
}
}
func udp_connection_reader(conn *net.UDPConn, message_size int, rcv_ch chan byte) {
defer func() {
if r := recover(); r != nil {
fmt.Println("ERROR in udp_connection_reader: %s \n Closing connection.", r)
conn.Close()
}
}()
buf := make([]byte, message_size)
for {
n, _, err := conn.ReadFromUDP(buf)
if err != nil || n < 0 {
fmt.Printf("Error: udp_connection_reader: reading\n")
panic(err)
}
rcv_ch <- buf[0]
}
}
func checkErr(err error) {
if err != nil {
fmt.Println("error:", err)
}
}
|
package main
func detectCycle(head *ListNode) *ListNode {
// a为链表头到成环点的长度,c为环的长度。可以知道,a+nc是链表头到城环点的距离。又
// f = 2s
// f = s + nc 快指针比慢指针多走n圈
// 所以f = 2nc ,s = nc
// 所以在相交点的时候,只要再走a距离就到成环点了
fast := head
slow := head
for {
if fast == nil || slow == nil || fast.Next == nil {
return nil
}
fast = fast.Next.Next
slow = slow.Next
if fast == slow {
temp := head
for temp != slow {
temp = temp.Next
slow = slow.Next
}
return temp
}
}
}
|
package localnet
import (
"net"
"strings"
"sort"
"time"
"fmt"
)
var (
localIP string
broadcastIP string
IPList [] string
IPTimestamps map[string]time.Time
)
func Init(){
IP()
BroadcastIP()
IPList = make([]string,0,20)
IPTimestamps = make(map[string]time.Time)
}
func IP() (string, error) {
if localIP == "" {
conn, err := net.DialTCP("tcp4", nil, &net.TCPAddr{IP: []byte{8, 8, 8, 8}, Port: 53})
if err != nil {
return "", err
}
defer conn.Close()
localIP = strings.Split(conn.LocalAddr().String(), ":")[0]
}
return localIP, nil
}
func BroadcastIP() (string, error) {
if broadcastIP == "" {
if localIP == "" {
IP()
}
temp := strings.Split(localIP, ".")
broadcastIP = temp[0]+"."+temp[1]+"."+temp[2]+".255"
}
return broadcastIP, nil
}
/*
func KnownIPs()([]string, error){
return IPList, nil
}
/*
func GetNumberOfNodes()(int){
return len(IPList) +1
}
*/
func PeerUpdate(IPPing string){
for i:=0; i<len(IPList); i++ {
if IPList[i] == IPPing {
IPTimestamps[IPPing] = time.Now()
return
}
}
// If IP is not in list:
IPList = append(IPList, IPPing)
sort.Strings(IPList)
IPTimestamps[IPPing] = time.Now()
}
/*
func NewNode(newIP string)(error){
for i:=0; i<len(IPList); i++ {
if IPList[i] == newIP {
return errors.New("IP is already in list")
}
}
IPList = append(IPList, newIP)
sort.Strings(IPList)
return nil
}
/*
func RemoveNode(IP string)(error){
for i:=0; i<len(IPList); i++ {
if IPList[i] == IP {
IPList = append(IPList[:i], IPList[i+1:]...)
return nil
}
}
return errors.New("IP to delete is not in list")
}
*/
func RemoveDeadConns(timeout time.Duration)(bool){
updateNeeded := false
for i:=0; i<len(IPList)-1; i++ {
timestamp, IPexists := IPTimestamps[IPList[i]]
if IPexists {
if time.Since(timestamp) > timeout {
delete(IPTimestamps, IPList[i])
IPList = append(IPList[:i], IPList[i+1:]...)
updateNeeded = true
}
}
}
return updateNeeded
}
func NextNode()(string){
if len(IPList) == 0 {
fmt.Println("No IPs")
return "bad"
}
if len(IPList) == 1 {
return IPList[0]
}
// smallest member
if localIP < IPList[0] {
return IPList[0]
}
// somewhere inbetween
for i:=0;i<len(IPList)-1;i++ {
if localIP == IPList[i] {
return "BadIPlist" //shouldn't happen
} else if localIP > IPList[i] && localIP < IPList[i+1] {
return IPList[i+1]
}
}
// reached end of list, wrap around
return IPList[0]
}
func NodeNumber()(int){
for i:=0;i<len(IPList):i++{
if localIP < IPList[i] {
return i
}
}
return i
}
|
package 数组
func minCount(coinPiles []int) int {
minTimesOfTakingAwayAllCoins := 0
for _, coins := range coinPiles {
minTimesOfTakingAwayAllCoins += getMinTimesOfTakingAway(coins)
}
return minTimesOfTakingAwayAllCoins
}
func getMinTimesOfTakingAway(coins int) int {
return (coins + 1) / 2
}
/*
题目链接:https://leetcode-cn.com/problems/na-ying-bi/
*/
|
package utils
import (
"math/rand"
"time"
)
var CaptchaTool = &CaptchaUtil{}
type CaptchaUtil struct{}
var captchaCodeStr="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func (*CaptchaUtil)Generate(len int32) string{
code:=[]byte{}
rand.Seed(time.Now().Unix())
for i:=int32(1);i<=len ;i++ {
a:=rand.Intn(61)
code=append(code,captchaCodeStr[a])
}
return string(code)
} |
package main
import (
"fmt"
)
func main() {
matrix := [][]string{
[]string{"Tony", "Hawk", "Skate"},
[]string{"Bob", "Dylan", "Guitar"},
[]string{"Freddie", "Mercury", "Sing"},
}
for _, v := range matrix {
fmt.Println()
for _, i := range v {
fmt.Printf("%s\t\t\t", i)
}
}
}
|
package main
import (
"reflect"
"errors"
"fmt"
)
type CellHint interface {
SetDefaults(cell *ModbusCell)
Validete(cell *ModbusCell) error
}
type HoldingCellHint struct{}
type InputCellHint struct{}
type CoilCellHint struct{}
type DiscreteInputCellHint struct{}
func validateComon(cell *ModbusCell) error {
if cell.Name() == "" {
return errors.New(fmt.Sprintf("The cell id=%v has empty name", cell.ID()))
}
return nil
}
func (this HoldingCellHint) SetDefaults(cell *ModbusCell) {
cell.Access = MB_CELL_ACCESS_RW
cell.ValueType = reflect.Uint16
}
func (this HoldingCellHint) Validete(cell *ModbusCell) error {
if err := validateComon(cell); err != nil {
return err
}
return nil
}
func (this InputCellHint) SetDefaults(cell *ModbusCell) {
cell.Access = MB_CELL_ACCESS_READ
cell.ValueType = reflect.Uint16
}
func (this InputCellHint) Validete(cell *ModbusCell) error {
if err := validateComon(cell); err != nil {
return err
}
return nil
}
func (this CoilCellHint) SetDefaults(cell *ModbusCell) {
cell.Access = MB_CELL_ACCESS_RW
cell.ValueType = reflect.Bool
}
func (this CoilCellHint) Validete(cell *ModbusCell) error {
if err := validateComon(cell); err != nil {
return err
}
return nil
}
func (this DiscreteInputCellHint) SetDefaults(cell *ModbusCell) {
cell.Access = MB_CELL_ACCESS_READ
cell.ValueType = reflect.Bool
}
func (this DiscreteInputCellHint) Validete(cell *ModbusCell) error {
if err := validateComon(cell); err != nil {
return err
}
return nil
}
|
package authorization
import (
"budget-calendar/database"
"github.com/gin-gonic/gin"
)
func ValidSession(db *database.DB) gin.HandlerFunc {
return func(c *gin.Context) {
session, err := db.SessionStore.Get(c.Request, "session")
if err != nil {
c.AbortWithStatusJSON(500, "The server was unable to retrieve this session")
return
}
if session.ID == "" {
c.AbortWithStatusJSON(401, "This user has no current session. Use of this endpoint is thus unauthorized")
return
}
}
} |
package main
import (
"fmt"
"time"
)
// UN CANAL ES UN espacio de memoria para dialogo entre rutinas
func main() {
canal1 := make(chan time.Duration)
go bucle(canal1)
fmt.Println("llegue hasta aca")
// para poner a alguien a la espera de que la rutina ha terminado
//espera a que canal1 tenga valor (parece promesas / async await)
msg := <-canal1
fmt.Println(msg)
}
func bucle(canal1 chan time.Duration) {
inicio := time.Now()
for i := 0; i < 100000000000; i++ {
}
final := time.Now()
// asignarle valor al canal
canal1 <- final.Sub(inicio)
}
|
package controllers
import (
"fmt"
"github.com/astaxie/beego"
)
// BaseController 结构体
type BaseController struct {
beego.Controller
}
func (c *BaseController) Prepare() {
phone := c.GetSession("phone")
password := c.GetSession("password")
if phone != nil && password != nil && phone == c.Ctx.GetCookie("phone") && password == c.Ctx.GetCookie("password") {
c.Data["phone"] = phone
c.Data["password"] = password
fmt.Println(phone, password, c.Ctx.GetCookie("phone"), c.Ctx.GetCookie("password"))
} else {
fmt.Println(phone, password, c.Ctx.GetCookie("phone"), c.Ctx.GetCookie("password"))
code, message := DecodeErr(ErrNoPermission)
c.Data["json"] = ErrResponse{code, message}
c.ServeJSON()
}
}
type Response struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
type ErrResponse struct {
Code int `json:"code"`
Message string `json:"message"`
}
|
package nifi
import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NIFI_HOST", nil),
},
"api_path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NIFI_API_PATH", "nifi-api"),
},
},
ResourcesMap: map[string]*schema.Resource{
"nifi_process_group": ResourceProcessGroup(),
"nifi_processor": ResourceProcessor(),
"nifi_connection": ResourceConnection(),
"nifi_controller_service": ResourceControllerService(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
Host: d.Get("host").(string),
ApiPath: d.Get("api_path").(string),
}
client := NewClient(config)
return client, nil
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package types
import (
"bytes"
"github.com/reed/common/byteutil/byteconv"
"github.com/reed/crypto"
)
type UTXO struct {
ID Hash `json:"id"`
OutputId Hash `json:"outputId"`
SourceId Hash `json:"sourceId"`
IsCoinbase bool `json:"isCoinbase"`
SourcePos uint64 `json:"sourcePos"`
Amount uint64 `json:"amount"`
Address []byte `json:"address"`
ScriptPk []byte `json:"scriptPK"`
}
func NewUtxo(outputId Hash, sourceId Hash, isCoinbase bool, sourcePos uint64, amount uint64, address []byte, scriptPK []byte) *UTXO {
u := &UTXO{
OutputId: outputId,
SourceId: sourceId,
IsCoinbase: isCoinbase,
SourcePos: sourcePos,
Amount: amount,
Address: address,
ScriptPk: scriptPK,
}
u.ID = u.GenerateID()
return u
}
func (u *UTXO) GenerateID() Hash {
split := []byte(":")
data := bytes.Join([][]byte{
u.OutputId.Bytes(),
split,
u.SourceId.Bytes(),
split,
byteconv.BoolToByte(u.IsCoinbase),
split,
byteconv.Uint64ToByte(u.SourcePos),
split,
byteconv.Uint64ToByte(u.Amount),
split,
u.Address,
split,
u.ScriptPk,
}, []byte{})
return BytesToHash(crypto.Sha256(data))
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfigs
import (
"crypto/sha256"
"fmt"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/lithammer/dedent"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
clientsetfake "k8s.io/client-go/kubernetes/fake"
kubeletconfig "k8s.io/kubelet/config/v1beta1"
utilpointer "k8s.io/utils/pointer"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
// kubeletMarshalCases holds common marshal test cases for both the marshal and unmarshal tests
var kubeletMarshalCases = []struct {
name string
obj *kubeletConfig
yaml string
}{
{
name: "Empty config",
obj: &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletconfig.SchemeGroupVersion,
},
config: kubeletconfig.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletconfig.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
},
},
yaml: dedent.Dedent(`
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous: {}
webhook:
cacheTTL: 0s
x509: {}
authorization:
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
runtimeRequestTimeout: 0s
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
`),
},
{
name: "Non empty config",
obj: &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletconfig.SchemeGroupVersion,
},
config: kubeletconfig.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletconfig.SchemeGroupVersion.String(),
Kind: "KubeletConfiguration",
},
Address: "1.2.3.4",
Port: 12345,
RotateCertificates: true,
},
},
yaml: dedent.Dedent(`
address: 1.2.3.4
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous: {}
webhook:
cacheTTL: 0s
x509: {}
authorization:
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
port: 12345
rotateCertificates: true
runtimeRequestTimeout: 0s
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
`),
},
}
func TestKubeletMarshal(t *testing.T) {
for _, test := range kubeletMarshalCases {
t.Run(test.name, func(t *testing.T) {
b, err := test.obj.Marshal()
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
got := strings.TrimSpace(string(b))
expected := strings.TrimSpace(test.yaml)
if expected != string(got) {
t.Fatalf("Missmatch between expected and got:\nExpected:\n%s\n---\nGot:\n%s", expected, string(got))
}
})
}
}
func TestKubeletUnmarshal(t *testing.T) {
for _, test := range kubeletMarshalCases {
t.Run(test.name, func(t *testing.T) {
gvkmap, err := kubeadmutil.SplitYAMLDocuments([]byte(test.yaml))
if err != nil {
t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err)
}
got := &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletconfig.SchemeGroupVersion,
},
}
if err = got.Unmarshal(gvkmap); err != nil {
t.Fatalf("unexpected failure of Unmarshal: %v", err)
}
if !reflect.DeepEqual(got, test.obj) {
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.obj, got)
}
})
}
}
func TestKubeletDefault(t *testing.T) {
var resolverConfig string
if isSystemdResolvedActive, _ := isServiceActive("systemd-resolved"); isSystemdResolvedActive {
// If systemd-resolved is active, we need to set the default resolver config
resolverConfig = kubeletSystemdResolverConfig
}
tests := []struct {
name string
clusterCfg kubeadmapi.ClusterConfiguration
expected kubeletConfig
}{
{
name: "No specific defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{kubeadmapiv1beta2.DefaultClusterDNSIP},
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: constants.CACertName,
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
{
name: "Service subnet, no dual stack defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{
Networking: kubeadmapi.Networking{
ServiceSubnet: "192.168.0.0/16",
},
},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{"192.168.0.10"},
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: constants.CACertName,
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
{
name: "Service subnet, explicitly disabled dual stack defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{
FeatureGates: map[string]bool{
features.IPv6DualStack: false,
},
Networking: kubeadmapi.Networking{
ServiceSubnet: "192.168.0.0/16",
},
},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{
features.IPv6DualStack: false,
},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{"192.168.0.10"},
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: constants.CACertName,
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
{
name: "Service subnet, enabled dual stack defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{
FeatureGates: map[string]bool{
features.IPv6DualStack: true,
},
Networking: kubeadmapi.Networking{
ServiceSubnet: "192.168.0.0/16",
},
},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{
features.IPv6DualStack: true,
},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{"192.168.0.10"},
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: constants.CACertName,
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
{
name: "DNS domain defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{
Networking: kubeadmapi.Networking{
DNSDomain: "example.com",
},
},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{kubeadmapiv1beta2.DefaultClusterDNSIP},
ClusterDomain: "example.com",
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: constants.CACertName,
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
{
name: "CertificatesDir defaulting works",
clusterCfg: kubeadmapi.ClusterConfiguration{
CertificatesDir: "/path/to/certs",
},
expected: kubeletConfig{
config: kubeletconfig.KubeletConfiguration{
FeatureGates: map[string]bool{},
StaticPodPath: kubeadmapiv1beta2.DefaultManifestsDir,
ClusterDNS: []string{kubeadmapiv1beta2.DefaultClusterDNSIP},
Authentication: kubeletconfig.KubeletAuthentication{
X509: kubeletconfig.KubeletX509Authentication{
ClientCAFile: filepath.Join("/path/to/certs", constants.CACertName),
},
Anonymous: kubeletconfig.KubeletAnonymousAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationAnonymousEnabled),
},
Webhook: kubeletconfig.KubeletWebhookAuthentication{
Enabled: utilpointer.BoolPtr(kubeletAuthenticationWebhookEnabled),
},
},
Authorization: kubeletconfig.KubeletAuthorization{
Mode: kubeletconfig.KubeletAuthorizationModeWebhook,
},
HealthzBindAddress: kubeletHealthzBindAddress,
HealthzPort: utilpointer.Int32Ptr(constants.KubeletHealthzPort),
RotateCertificates: kubeletRotateCertificates,
ResolverConfig: resolverConfig,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// This is the same for all test cases so we set it here
expected := test.expected
expected.configBase.GroupVersion = kubeletconfig.SchemeGroupVersion
got := &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletconfig.SchemeGroupVersion,
},
}
got.Default(&test.clusterCfg, &kubeadmapi.APIEndpoint{}, &kubeadmapi.NodeRegistrationOptions{})
if !reflect.DeepEqual(got, &expected) {
t.Fatalf("Missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", expected, *got)
}
})
}
}
// runKubeletFromTest holds common test case data and evaluation code for kubeletHandler.From* functions
func runKubeletFromTest(t *testing.T, perform func(t *testing.T, in string) (kubeadmapi.ComponentConfig, error)) {
tests := []struct {
name string
in string
out *kubeletConfig
expectErr bool
}{
{
name: "Empty document map should return nothing successfully",
},
{
name: "Non-empty non-kubelet document map returns nothing successfully",
in: dedent.Dedent(`
apiVersion: api.example.com/v1
kind: Configuration
`),
},
{
name: "Old kubelet version returns an error",
in: dedent.Dedent(`
apiVersion: kubelet.config.k8s.io/v1alpha1
kind: KubeletConfiguration
`),
expectErr: true,
},
{
name: "Wrong kubelet kind returns an error",
in: dedent.Dedent(`
apiVersion: kubelet.config.k8s.io/v1beta1
kind: Configuration
`),
expectErr: true,
},
{
name: "Valid kubelet only config gets loaded",
in: dedent.Dedent(`
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 1.2.3.4
port: 12345
rotateCertificates: true
`),
out: &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletHandler.GroupVersion,
userSupplied: true,
},
config: kubeletconfig.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletHandler.GroupVersion.String(),
Kind: "KubeletConfiguration",
},
Address: "1.2.3.4",
Port: 12345,
RotateCertificates: true,
},
},
},
{
name: "Valid kubelet config gets loaded when coupled with an extra document",
in: dedent.Dedent(`
apiVersion: api.example.com/v1
kind: Configuration
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 1.2.3.4
port: 12345
rotateCertificates: true
`),
out: &kubeletConfig{
configBase: configBase{
GroupVersion: kubeletHandler.GroupVersion,
userSupplied: true,
},
config: kubeletconfig.KubeletConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeletHandler.GroupVersion.String(),
Kind: "KubeletConfiguration",
},
Address: "1.2.3.4",
Port: 12345,
RotateCertificates: true,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
componentCfg, err := perform(t, test.in)
if err != nil {
if !test.expectErr {
t.Errorf("unexpected failure: %v", err)
}
} else {
if test.expectErr {
t.Error("unexpected success")
} else {
if componentCfg == nil {
if test.out != nil {
t.Error("unexpected nil result")
}
} else {
if got, ok := componentCfg.(*kubeletConfig); !ok {
t.Error("different result type")
} else {
if test.out == nil {
t.Errorf("unexpected result: %v", got)
} else {
if !reflect.DeepEqual(test.out, got) {
t.Errorf("missmatch between expected and got:\nExpected:\n%v\n---\nGot:\n%v", test.out, got)
}
}
}
}
}
}
})
}
}
func TestKubeletFromDocumentMap(t *testing.T) {
runKubeletFromTest(t, func(t *testing.T, in string) (kubeadmapi.ComponentConfig, error) {
gvkmap, err := kubeadmutil.SplitYAMLDocuments([]byte(in))
if err != nil {
t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err)
}
return kubeletHandler.FromDocumentMap(gvkmap)
})
}
func TestKubeletFromCluster(t *testing.T) {
runKubeletFromTest(t, func(t *testing.T, in string) (kubeadmapi.ComponentConfig, error) {
clusterCfg := &kubeadmapi.ClusterConfiguration{
KubernetesVersion: constants.CurrentKubernetesVersion.String(),
}
k8sVersion := version.MustParseGeneric(clusterCfg.KubernetesVersion)
client := clientsetfake.NewSimpleClientset(
&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.GetKubeletConfigMapName(k8sVersion),
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
constants.KubeletBaseConfigurationConfigMapKey: in,
},
},
)
return kubeletHandler.FromCluster(client, clusterCfg)
})
}
func TestGeneratedKubeletFromCluster(t *testing.T) {
testYAML := dedent.Dedent(`
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 1.2.3.4
port: 12345
rotateCertificates: true
`)
testYAMLHash := fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(testYAML)))
// The SHA256 sum of "The quick brown fox jumps over the lazy dog"
const mismatchHash = "sha256:d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"
tests := []struct {
name string
hash string
userSupplied bool
}{
{
name: "Matching hash means generated config",
hash: testYAMLHash,
},
{
name: "Missmatching hash means user supplied config",
hash: mismatchHash,
userSupplied: true,
},
{
name: "No hash means user supplied config",
userSupplied: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
clusterCfg := &kubeadmapi.ClusterConfiguration{
KubernetesVersion: constants.CurrentKubernetesVersion.String(),
}
k8sVersion := version.MustParseGeneric(clusterCfg.KubernetesVersion)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.GetKubeletConfigMapName(k8sVersion),
Namespace: metav1.NamespaceSystem,
},
Data: map[string]string{
constants.KubeletBaseConfigurationConfigMapKey: testYAML,
},
}
if test.hash != "" {
configMap.Annotations = map[string]string{
constants.ComponentConfigHashAnnotationKey: test.hash,
}
}
client := clientsetfake.NewSimpleClientset(configMap)
cfg, err := kubeletHandler.FromCluster(client, clusterCfg)
if err != nil {
t.Fatalf("unexpected failure of FromCluster: %v", err)
}
got := cfg.IsUserSupplied()
if got != test.userSupplied {
t.Fatalf("mismatch between expected and got:\n\tExpected: %t\n\tGot: %t", test.userSupplied, got)
}
})
}
}
|
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package redis
import (
"time"
"github.com/go-redis/redis"
)
// Client defines the interface for communicating with a Storj redis instance
type Client interface {
Get(key string) ([]byte, error)
Set(key string, value []byte, ttl time.Duration) error
Ping() error
}
// Client is the entrypoint into Redis
type redisClient struct {
DB *redis.Client
}
// NewRedisClient returns a configured Client instance, verifying a sucessful connection to redis
func NewRedisClient(address, password string, db int) (Client, error) {
c := &redisClient{
DB: redis.NewClient(&redis.Options{
Addr: address,
Password: password,
DB: db,
}),
}
// ping here to verify we are able to connect to the redis instacne with the initialized client.
if err := c.DB.Ping().Err(); err != nil {
return nil, err
}
return c, nil
}
// Get looks up the provided key from the redis cache returning either an error or the result.
func (c *redisClient) Get(key string) ([]byte, error) {
return c.DB.Get(key).Bytes()
}
// Set adds a value to the provided key in the Redis cache, returning an error on failure.
func (c *redisClient) Set(key string, value []byte, ttl time.Duration) error {
return c.DB.Set(key, value, ttl).Err()
}
// Ping returns an error if pinging the underlying redis server failed
func (c *redisClient) Ping() error {
return c.DB.Ping().Err()
}
|
package webhooks
import (
"fmt"
"net/http"
)
func HandleSettleDebt(w http.ResponseWriter, r *http.Request) {
payments := split.RemoveDebt(lastSpeaker)
if len(payments) > 0 {
msg := fmt.Sprintf("Okay! Here's the list of payments you must perform\n")
for _, p := range payments {
msg += fmt.Sprintf("* %d to %s\n", p.Quantity, p.To.Name+" "+p.To.LastName)
}
w.Write([]byte(`{"text":"` + msg + `"}`))
} else {
msg := "Hey, you have nothing to pay! Congrats!! ;)"
w.Write([]byte(`{"text":"` + msg + `"}`))
}
}
|
package channelserver
import (
"encoding/hex"
"fmt"
"io"
"net"
"sync"
"github.com/Andoryuuta/Erupe/common/stringstack"
"github.com/Andoryuuta/Erupe/common/stringsupport"
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/Erupe/network/mhfpacket"
"github.com/Andoryuuta/byteframe"
"go.uber.org/zap"
"golang.org/x/text/encoding/japanese"
)
// Session holds state for the channel server connection.
type Session struct {
sync.Mutex
logger *zap.Logger
server *Server
rawConn net.Conn
cryptConn *network.CryptConn
sendPackets chan []byte
clientContext *clientctx.ClientContext
stageID string
stage *Stage
reservationStage *Stage // Required for the stateful MsgSysUnreserveStage packet.
charID uint32
logKey []byte
// A stack containing the stage movement history (push on enter/move, pop on back)
stageMoveStack *stringstack.StringStack
}
// NewSession creates a new Session type.
func NewSession(server *Server, conn net.Conn) *Session {
s := &Session{
logger: server.logger.Named(conn.RemoteAddr().String()),
server: server,
rawConn: conn,
cryptConn: network.NewCryptConn(conn),
sendPackets: make(chan []byte, 20),
clientContext: &clientctx.ClientContext{
StrConv: &stringsupport.StringConverter{
Encoding: japanese.ShiftJIS,
},
},
stageMoveStack: stringstack.New(),
}
return s
}
// Start starts the session packet send and recv loop(s).
func (s *Session) Start() {
go func() {
s.logger.Info("Channel server got connection!", zap.String("remoteaddr", s.rawConn.RemoteAddr().String()))
// Unlike the sign and entrance server,
// the client DOES NOT initalize the channel connection with 8 NULL bytes.
go s.sendLoop()
s.recvLoop()
}()
}
// QueueSend queues a packet (raw []byte) to be sent.
func (s *Session) QueueSend(data []byte) {
if s.server.erupeConfig.DevMode && s.server.erupeConfig.DevModeOptions.LogOutboundMessages {
fmt.Printf("Sending To CharID: '%x'\n", s.charID)
fmt.Printf("Sent Data:\n%s\n", hex.Dump(data))
}
s.sendPackets <- data
}
// QueueSendNonBlocking queues a packet (raw []byte) to be sent, dropping the packet entirely if the queue is full.
func (s *Session) QueueSendNonBlocking(data []byte) {
select {
case s.sendPackets <- data:
// Enqueued properly.
default:
// Couldn't enqueue, likely something wrong with the connection.
s.logger.Warn("Dropped packet for session because of full send buffer, something is probably wrong")
}
}
// QueueSendMHF queues a MHFPacket to be sent.
func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) {
// Make the header
bf := byteframe.NewByteFrame()
bf.WriteUint16(uint16(pkt.Opcode()))
// Build the packet onto the byteframe.
pkt.Build(bf, s.clientContext)
// Queue it.
s.QueueSend(bf.Data())
}
// QueueAck is a helper function to queue an MSG_SYS_ACK with the given ack handle and data.
func (s *Session) QueueAck(ackHandle uint32, data []byte) {
bf := byteframe.NewByteFrame()
bf.WriteUint16(uint16(network.MSG_SYS_ACK))
bf.WriteUint32(ackHandle)
bf.WriteBytes(data)
s.QueueSend(bf.Data())
}
func (s *Session) sendLoop() {
for {
// TODO(Andoryuuta): Test making this into a buffered channel and grouping the packet together before sending.
rawPacket := <-s.sendPackets
if rawPacket == nil {
s.logger.Debug("Got nil from s.SendPackets, exiting send loop")
return
}
// Make a copy of the data.
terminatedPacket := make([]byte, len(rawPacket))
copy(terminatedPacket, rawPacket)
// Append the MSG_SYS_END tailing opcode.
terminatedPacket = append(terminatedPacket, []byte{0x00, 0x10}...)
s.cryptConn.SendPacket(terminatedPacket)
}
}
func (s *Session) recvLoop() {
for {
pkt, err := s.cryptConn.ReadPacket()
if err == io.EOF {
s.logger.Info(fmt.Sprintf("Character(%d) disconnected", s.charID))
logoutPlayer(s)
return
}
if err != nil {
s.logger.Warn("Error on ReadPacket, exiting recv loop", zap.Error(err))
return
}
s.handlePacketGroup(pkt)
}
}
func (s *Session) handlePacketGroup(pktGroup []byte) {
// This shouldn't be needed, but it's better to recover and let the connection die than to panic the server.
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered from panic ", r)
}
}()
bf := byteframe.NewByteFrameFromBytes(pktGroup)
opcode := network.PacketID(bf.ReadUint16())
// Print any (non-common spam) packet opcodes and data.
if opcode != network.MSG_SYS_END &&
opcode != network.MSG_SYS_PING &&
opcode != network.MSG_SYS_NOP &&
opcode != network.MSG_SYS_TIME &&
opcode != network.MSG_SYS_EXTEND_THRESHOLD {
fmt.Printf("CharID: '%x'\n", s.charID)
fmt.Printf("Opcode: %s\n", opcode)
fmt.Printf("Data:\n%s\n", hex.Dump(pktGroup))
}
// Get the packet parser and handler for this opcode.
mhfPkt := mhfpacket.FromOpcode(opcode)
if mhfPkt == nil {
fmt.Println("Got opcode which we don't know how to parse, can't parse anymore for this group")
return
}
// Parse the packet.
err := mhfPkt.Parse(bf, s.clientContext)
if err != nil {
panic(err)
}
// Handle the packet.
handlerTable[opcode](s, mhfPkt)
// If there is more data on the stream that the .Parse method didn't read, then read another packet off it.
remainingData := bf.DataFromCurrent()
if len(remainingData) >= 2 {
s.handlePacketGroup(remainingData)
}
}
|
/*
Utilizando o exercício anterior, remova uma entrada do map e demonstre o map inteiro utilizando range.
*/
package main
import (
"fmt"
)
func main() {
dadosPessoais := map[string][]string{
"João_Bruno": []string{
"Programar",
"Jogar Video Game",
},
"Maria_Fátima": []string{
"Reclamar da vida",
"Andar por aí",
},
"Laís_Farias": []string{
"Ler Livros",
"Contruir móveis",
},
}
dadosPessoais["Paula_Noronha"] = []string{
"Escrever",
"Ler Hqs",
}
for nome, hobbies := range dadosPessoais {
fmt.Printf("Nome da Pessoa: %v\t", nome)
for _, valor := range hobbies {
fmt.Printf("Hobbie: %v\t", valor)
}
fmt.Println()
}
fmt.Println("Excluindo um elemento do map....")
delete(dadosPessoais, "João_Bruno")
for nome, hobbies := range dadosPessoais {
fmt.Printf("Nome da Pessoa: %v\t", nome)
for _, valor := range hobbies {
fmt.Printf("Hobbie: %v\t", valor)
}
fmt.Println()
}
}
|
// vim: ts=4 sts=4 sw=4
package executor
import (
"sync"
)
//////////////
// Executor //
//////////////
type QuitChan chan bool
type Runer interface {
Run(QuitChan)
}
type Executor struct {
quitChan QuitChan
waitGroup *sync.WaitGroup
}
func New() *Executor {
return &Executor{make(QuitChan), new(sync.WaitGroup)}
}
func (executor *Executor) Run(routine func(QuitChan)) func(QuitChan) {
executor.waitGroup.Add(1)
go func() {
routine(executor.quitChan)
executor.waitGroup.Done()
}()
// Fluent
return routine
}
func (executor *Executor) Loop(runable Runer) Runer {
executor.Run(func(quitChan QuitChan) {
RunLoop: for {
select {
case <-quitChan:
break RunLoop
default:
runable.Run(quitChan)
}
}
})
// Fluent
return runable
}
func (executor *Executor) Wait() {
// Wait for the WaitGroup to be entirely closed
executor.waitGroup.Wait()
}
func (executor *Executor) Interrupt() {
// Closing the QuitChan will unlock every receive operations on it
close(executor.quitChan)
// Create another one for future reuse of the Executor
executor.quitChan = make(QuitChan)
// Wait for the WaitGroup to be entirely closed,
executor.Wait()
}
|
// Licensed to SolID under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SolID licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package middleware
import (
"encoding/json"
"log"
"net/http"
"github.com/golang/protobuf/ptypes/wrappers"
corev1 "zntr.io/solid/api/gen/go/oidc/core/v1"
"zntr.io/solid/pkg/sdk/rfcerrors"
"zntr.io/solid/pkg/server/clientauthentication"
"zntr.io/solid/pkg/server/storage"
)
// ClientAuthentication is a middleware to handle client authentication.
func ClientAuthentication(clients storage.ClientReader) Adapter {
// Prepare client authentication
clientAuth := clientauthentication.PrivateKeyJWT(clients)
// Return middleware
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var (
ctx = r.Context()
q = r.URL.Query()
clientIDRaw = q.Get("client_id")
)
// client_id
if clientIDRaw != "" {
// Retrieve client details
client, err := clients.Get(ctx, clientIDRaw)
if err != nil {
log.Printf("unable to retrieve client '%s': %v", clientIDRaw, err)
json.NewEncoder(w).Encode(rfcerrors.InvalidClient().Build())
return
}
// Process authentication
if client.ClientType == corev1.ClientType_CLIENT_TYPE_CONFIDENTIAL {
resAuth, err := clientAuth.Authenticate(ctx, &corev1.ClientAuthenticationRequest{
ClientAssertionType: &wrappers.StringValue{
Value: q.Get("client_assertion_type"),
},
ClientAssertion: &wrappers.StringValue{
Value: q.Get("client_assertion"),
},
})
if err != nil {
log.Println("unable to authenticate client:", err)
json.NewEncoder(w).Encode(resAuth.GetError())
return
}
// Assign client to context
ctx = clientauthentication.Inject(ctx, resAuth.Client)
}
if client.ClientType == corev1.ClientType_CLIENT_TYPE_PUBLIC {
// Assign client to context
ctx = clientauthentication.Inject(ctx, client)
}
} else {
resAuth, err := clientAuth.Authenticate(ctx, &corev1.ClientAuthenticationRequest{
ClientAssertionType: &wrappers.StringValue{
Value: q.Get("client_assertion_type"),
},
ClientAssertion: &wrappers.StringValue{
Value: q.Get("client_assertion"),
},
})
if err != nil {
log.Println("unable to authenticate client:", err)
json.NewEncoder(w).Encode(resAuth.GetError())
return
}
// Assign client to context
ctx = clientauthentication.Inject(ctx, resAuth.Client)
}
// Delegate to next handler
h.ServeHTTP(w, r.WithContext(ctx))
})
}
}
|
/*
Package retoil provides simple functionality for restarting toilers (i.e., workers).
A toiler that has a Toil() method, that does work, blocks (i.e., doesn't return) until
the work is done, and panic()s if there is a problem it cannot or doesn't want to deal
with.
Usage
To use, create one or more types that implement the toil.Toiler interface. For example:
type awesomeToiler struct{}
func newAwesomeToiler() {
toiler := awesomeToiler{}
return &toiler
}
func (toiler *awesomeToiler) Toil() {
//@TODO: Do work here.
//
// And this blocks (i.e., not not return)
// until the work is done.
//
// It also panic()s if it encounters a problem
// it cannot or doesn't want to deal with.
}
Then create a retoiler that wraps that toiler. (Also choosing a retoil strategy, when doing that.)
toiler := newAwesomeToiler()
strategizer := DelayedLimitedStrategy(16, 5 * time.Second)
//strategizer := LimitedExponentialBackoffStrategy(16, 5 * time.Second)
retoiler := retoil.New(toiler, strategizer)
Observers
A toiler's Toil method can finish in one of two ways. Either it will return gracefully, or
it will panic().
The retoiler is OK with either.
But also, the retoiler provides the toiler with a convenient way of being notified
of each case.
If a toiler also has a ReturnedNotice() method, then the retoiler will call the toiler's
ReturnedNotice() method when the toiler's Toil() method has returned gracefully. For example:
type awesomeToiler struct{}
func newAwesomeToiler() {
toiler := awesomeToiler{}
return &toiler
}
func (toiler *awesomeToiler) Toil() {
//@TODO: Do work here.
}
func (toiler *awesomeToiler) ReturnedNotice() {
//@TODO: Do something with this notification.
}
If a toiler also has a PanickedNotice() method, then the retoiler will call the toiler's
PanickedNotice() method when the toiler's Toil() method has panic()ed. For example:
type awesomeToiler struct{}
func newAwesomeToiler() {
toiler := awesomeToiler{}
return &toiler
}
func (toiler *awesomeToiler) Toil() {
//@TODO: Do work here.
}
func (toiler *awesomeToiler) PanickedNotice() {
//@TODO: Do something with this notification.
}
If a toiler also has a RecoveredNotice() method, then the retoiler will call the toiler's
RecoveredNotice() method when the toiler's Toil() method has restarted after a panic(). For example:
type awesomeToiler struct{}
func newAwesomeToiler() {
toiler := awesomeToiler{}
return &toiler
}
func (toiler *awesomeToiler) Toil() {
//@TODO: Do work here.
}
func (toiler *awesomeToiler) RecoveredNotice() {
//@TODO: Do something with this notification.
}
And of course, a toiler can take advantage of both of these notifications and have
both a ReturnedNotice(), PanickedNotice() and RecoveredNotice() method. For example:
type awesomeToiler struct{}
func newAwesomeToiler() {
toiler := awesomeToiler{}
return &toiler
}
func (toiler *awesomeToiler) Toil() {
//@TODO: Do work here.
}
func (toiler *awesomeToiler) ReturnedNotice() {
//@TODO: Do something with this notification.
}
func (toiler *awesomeToiler) PanickedNotice() {
//@TODO: Do something with this notification.
}
func (toiler *awesomeToiler) RecoveredNotice() {
//@TODO: Do something with this notification.
}
*/
package retoil
|
package main
import "fmt"
func main() {
a := 2.5
fmt.Println(myPow(a, 16))
}
func myPow(x float64, n int) float64 {
if n == 0 {
return 1
}
if n < 0 {
n = -n
x = 1 / x
}
res := 0.0
if n%2 == 0 {
temp := myPow(x, n/2)
fmt.Println(temp)
res = temp * temp
} else {
temp := myPow(x, n/2)
fmt.Println(temp)
res = x * temp * temp
}
return res
}
|
package param
import (
"fmt"
"github.com/wlMalk/gapi/constants"
"github.com/wlMalk/gapi/validation"
)
type Params struct {
params map[string]*Param
containsFiles bool
containsBodyParams bool
isLocked bool
}
func NewParams() *Params {
return &Params{
params: map[string]*Param{},
}
}
func (p *Params) Get() map[string]*Param {
return p.params
}
func (p *Params) ContainsFiles() bool {
return p.containsFiles
}
func (p *Params) ContainsBodyParams() bool {
return p.containsBodyParams
}
func (p *Params) Append(params ...*Param) {
if !p.isLocked {
for i := 0; i < len(params); i++ {
name := params[i].name
if name == "" {
panic("Detected a param without a name.")
}
if _, ok := p.params[name]; ok {
panic(fmt.Sprintf("Detected 2 params with the same name: \"%s\".", name))
}
p.params[name] = params[i]
}
} else {
panic("Can not edit Params while it's locked.")
}
}
func (p *Params) Set(params ...*Param) {
if !p.isLocked {
p.params = map[string]*Param{}
p.Append(params...)
} else {
panic("Can not edit Params while it's locked.")
}
}
func (p *Params) Len() int {
return len(p.params)
}
func (p *Params) Lock() {
p.isLocked = true
}
// Param
type Param struct {
name string
validators []validation.Validator
def interface{}
as int
strSep string
isRequired bool
isMultiple bool
isFile bool
isInPath bool
isInQuery bool
isInHeader bool
isInBody bool
preprocessor func(*validation.Value)
postprocessor func(*validation.Value)
}
func New(name string) *Param {
return &Param{
name: name,
as: constants.TYPE_STRING,
}
}
func PathParam(name string) *Param {
return New(name).In(constants.IN_PATH)
}
func QueryParam(name string) *Param {
return New(name).In(constants.IN_QUERY)
}
func HeaderParam(name string) *Param {
return New(name).In(constants.IN_HEADER)
}
func BodyParam(name string) *Param {
return New(name).In(constants.IN_BODY)
}
func (p *Param) Name(name string) *Param {
p.name = name
return p
}
func (p *Param) GetName() string {
return p.name
}
func (p *Param) Required() *Param {
p.isRequired = true
return p
}
func (p *Param) IsRequired() bool {
return p.isRequired
}
func (p *Param) File() *Param {
p.isFile = true
return p
}
func (p *Param) IsFile() bool {
return p.isFile
}
func (p *Param) Multiple() *Param {
p.isMultiple = true
return p
}
func (p *Param) IsMultiple() bool {
return p.isMultiple
}
func (p *Param) As(as int) *Param {
if as == constants.TYPE_STRING ||
as == constants.TYPE_INT ||
as == constants.TYPE_INT64 ||
as == constants.TYPE_FLOAT ||
as == constants.TYPE_FLOAT64 ||
as == constants.TYPE_BOOL {
p.as = as
}
return p
}
func (p *Param) GetAs() int {
return p.as
}
// If a Param is in path then it is required.
func (p *Param) In(in ...int) *Param {
for _, i := range in {
switch i {
case constants.IN_PATH:
p.isInPath = true
p.isRequired = true
case constants.IN_QUERY:
p.isInQuery = true
case constants.IN_HEADER:
p.isInHeader = true
case constants.IN_BODY:
p.isInBody = true
}
}
return p
}
// If a Param is in path then it is required.
func (p *Param) IsIn(in ...int) bool {
for _, i := range in {
switch i {
case constants.IN_PATH:
if !p.isInPath {
return false
}
case constants.IN_QUERY:
if !p.isInQuery {
return false
}
case constants.IN_HEADER:
if !p.isInHeader {
return false
}
case constants.IN_BODY:
if !p.isInBody {
return false
}
}
}
return true
}
// Must sets the validators to use.
func (p *Param) Must(validators ...validation.Validator) *Param {
p.validators = validators
return p
}
// Validate returns the first error it encountered
func (p *Param) Validate(v *validation.Value, req validation.Requester) error {
for _, va := range p.validators {
err := va.Validate(v, req)
if err != nil {
return err
}
}
return nil
}
// ValidateAll returns all the errors it encountered
func (p *Param) ValidateAll(v *validation.Value, req validation.Requester) []error {
var errs []error
for _, va := range p.validators {
err := va.Validate(v, req)
if err != nil {
errs = append(errs, err)
}
}
return errs
}
|
package github
import (
"fmt"
"testing"
)
func CreateTestPRManager() PRManager {
c := testCmd{}
pm := defaultPRManager{cmd: &c}
return &pm
}
type testCmd struct {}
func (c *testCmd) run(args []string) (string, string, error) {
// checkPRExists func: run([]string{"pr","view", baseBranch, "--json",
// "state,url", "-q", "select(.state == \"OPEN\")" })
if args[0] == "pr" && args[1] == "view" {
if args[2] == "new-branch" {
return "","",nil
} else if args[2] == "already-existing-open-pr-branch" {
return "{\"state\":\"OPEN\",\"url\":\"https://github.com/some-org/some-repo/pull/42\"}", "", nil
}
}
// createPR func: run([]string{"pr","create","--title", title, "--body", body, "--base", baseBranch })
// returns error on fail, we don't care about stdout/stderr
// will fail if title or baseBranch already matches a PR in the repo (TODO: double-check)
if args[0] == "pr" && args[1] == "create" {
if args[7] == "main" {
return "made-up PR creation succeeded message", "", nil
}
}
return "", "", fmt.Errorf("received unexpected arguments %v", args)
}
func TestBranchAlreadyExists(t *testing.T) {
m := CreateTestPRManager()
_, err := m.CreateOrUpdatePR("new user-specified title", "user-specified description", "main", "already-existing-open-pr-branch")
if err != nil {
t.Fatalf(err.Error())
}
}
func TestNewBranch(t *testing.T) {
m := CreateTestPRManager()
_, err := m.CreateOrUpdatePR("new user-specified title", "user-specified description", "main", "new-branch")
if err != nil {
t.Fatalf(err.Error())
}
}
/* TODO: it seems github doesn't care if multiple titles have the same title, double-check
func TestTitleAlreadyExists(t *testing.T) {
m := CreateTestPRManager()
_, err := m.CreateOrUpdatePR("already-existing user-specified title", "user-specified description", "main", "new-branch")
if err != nil {
t.Fatalf(err.Error())
}}
func TestTitleAndBranchAlreadyExists(t *testing.T) {
m := CreateTestPRManager()
_, err := m.CreateOrUpdatePR("already-existing user-specified title", "user-specified description", "main", "new-branch")
if err != nil {
t.Fatalf(err.Error())
}}
*/ |
package src
import "crypto/sha256"
//decrypt datakey (using kms) and emails (using datakey)
//returns hash of email. does it need to be sha 256? prob not.
func HashEmailForTimingLogs(email string) []byte {
h:= sha256.New()
h.Write([]byte(email))
return h.Sum(nil)
} |
package tenantfetcher
type EventsType int
const (
CreatedEventsType EventsType = iota
DeletedEventsType
UpdatedEventsType
)
type TenantEventsResponse []byte
|
package weather_provider
import (
"encoding/json"
"fmt"
"interface-testing/api/clients/restclient"
"interface-testing/api/domain/weather_domain"
"io/ioutil"
"log"
"net/http"
)
const (
weatherUrl = "https://api.darksky.net/forecast/%s/%v,%v"
)
type weatherProvider struct {}
type weatherServiceInterface interface {
GetWeather(request weather_domain.WeatherRequest) (*weather_domain.Weather, *weather_domain.WeatherError)
}
var (
WeatherProvider weatherServiceInterface = &weatherProvider{}
)
func (p *weatherProvider) GetWeather(request weather_domain.WeatherRequest) (*weather_domain.Weather, *weather_domain.WeatherError) {
url := fmt.Sprintf(weatherUrl, request.ApiKey, request.Latitude, request.Longitude)
response, err := restclient.ClientStruct.Get(url)
if err != nil {
log.Println(fmt.Sprintf("error when trying to get weather from dark sky api %s", err.Error()))
return nil, &weather_domain.WeatherError{
Code: http.StatusBadRequest,
ErrorMessage: err.Error(),
}
}
bytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, &weather_domain.WeatherError{
Code: http.StatusBadRequest,
ErrorMessage: err.Error(),
}
}
defer response.Body.Close()
//The api owner can decide to change datatypes, etc. When this happen, it might affect the error format returned
if response.StatusCode > 299 {
var errResponse weather_domain.WeatherError
if err := json.Unmarshal(bytes, &errResponse); err != nil {
return nil, &weather_domain.WeatherError{
Code: http.StatusInternalServerError,
ErrorMessage: "invalid json response body",
}
}
errResponse.Code = response.StatusCode
return nil, &errResponse
}
var result weather_domain.Weather
if err := json.Unmarshal(bytes, &result); err != nil {
log.Println(fmt.Sprintf("error when trying to unmarshal weather successful response: %s", err.Error()))
return nil, &weather_domain.WeatherError{Code: http.StatusInternalServerError, ErrorMessage: "error unmarshaling weather fetch response"}
}
return &result, nil
}
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01500105 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.015.001.05 Document"`
Message *IntraPositionMovementConfirmationV05 `xml:"IntraPosMvmntConf"`
}
func (d *Document01500105) AddMessage() *IntraPositionMovementConfirmationV05 {
d.Message = new(IntraPositionMovementConfirmationV05)
return d.Message
}
// Scope
// An account servicer sends a IntraPositionMovementConfirmation to an account owner to confirm the movement of securities within its holding from one sub-balance to another, for example, blocking of securities.
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure acting on behalf of their participants
// - an agent (sub-custodian) acting on behalf of their global custodian customer, or
// - a custodian acting on behalf of an investment management institution or a broker/dealer.
//
// Usage
// The message may also be used to:
// - re-send a message previously sent,
// - provide a third party with a copy of a message for information,
// - re-send to a third party a copy of a message for information
// using the relevant elements in the Business Application Header.
type IntraPositionMovementConfirmationV05 struct {
// Additional parameters to the transaction.
AdditionalParameters *iso20022.AdditionalParameters21 `xml:"AddtlParams,omitempty"`
// Party that legally owns the account.
AccountOwner *iso20022.PartyIdentification92Choice `xml:"AcctOwnr,omitempty"`
// Account to or from which a securities entry is made.
SafekeepingAccount *iso20022.SecuritiesAccount24 `xml:"SfkpgAcct"`
// Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD).
SafekeepingPlace *iso20022.SafekeepingPlaceFormat10Choice `xml:"SfkpgPlc,omitempty"`
// Financial instrument representing a sum of rights of the investor vis-a-vis the issuer.
FinancialInstrumentIdentification *iso20022.SecurityIdentification19 `xml:"FinInstrmId"`
// Elements characterising a financial instrument.
FinancialInstrumentAttributes *iso20022.FinancialInstrumentAttributes63 `xml:"FinInstrmAttrbts,omitempty"`
// Intra-position movement transaction details.
IntraPositionDetails *iso20022.IntraPositionDetails34 `xml:"IntraPosDtls"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (i *IntraPositionMovementConfirmationV05) AddAdditionalParameters() *iso20022.AdditionalParameters21 {
i.AdditionalParameters = new(iso20022.AdditionalParameters21)
return i.AdditionalParameters
}
func (i *IntraPositionMovementConfirmationV05) AddAccountOwner() *iso20022.PartyIdentification92Choice {
i.AccountOwner = new(iso20022.PartyIdentification92Choice)
return i.AccountOwner
}
func (i *IntraPositionMovementConfirmationV05) AddSafekeepingAccount() *iso20022.SecuritiesAccount24 {
i.SafekeepingAccount = new(iso20022.SecuritiesAccount24)
return i.SafekeepingAccount
}
func (i *IntraPositionMovementConfirmationV05) AddSafekeepingPlace() *iso20022.SafekeepingPlaceFormat10Choice {
i.SafekeepingPlace = new(iso20022.SafekeepingPlaceFormat10Choice)
return i.SafekeepingPlace
}
func (i *IntraPositionMovementConfirmationV05) AddFinancialInstrumentIdentification() *iso20022.SecurityIdentification19 {
i.FinancialInstrumentIdentification = new(iso20022.SecurityIdentification19)
return i.FinancialInstrumentIdentification
}
func (i *IntraPositionMovementConfirmationV05) AddFinancialInstrumentAttributes() *iso20022.FinancialInstrumentAttributes63 {
i.FinancialInstrumentAttributes = new(iso20022.FinancialInstrumentAttributes63)
return i.FinancialInstrumentAttributes
}
func (i *IntraPositionMovementConfirmationV05) AddIntraPositionDetails() *iso20022.IntraPositionDetails34 {
i.IntraPositionDetails = new(iso20022.IntraPositionDetails34)
return i.IntraPositionDetails
}
func (i *IntraPositionMovementConfirmationV05) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
i.SupplementaryData = append(i.SupplementaryData, newValue)
return newValue
}
|
package main
import (
"flag"
"net/http"
"os"
"strings"
"github.com/huaweicloud/cloudeye-exporter/collector"
"github.com/huaweicloud/cloudeye-exporter/logs"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
clientConfig = flag.String("config", "./clouds.yml", "Path to the cloud configuration file")
filterEnable = flag.Bool("filter-enable", false, "Enabling monitoring metric filter")
debug = flag.Bool("debug", false, "If debug the code.")
)
func handler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Query().Get("services")
if target == "" {
http.Error(w, "'target' parameter must be specified", 400)
return
}
targets := strings.Split(target, ",")
registry := prometheus.NewRegistry()
logs.Logger.Infof("Start to monitor services: %s", targets)
exporter, err := collector.GetMonitoringCollector(*clientConfig, targets)
if err != nil {
w.WriteHeader(500)
_, err := w.Write([]byte(err.Error()))
if err != nil {
logs.Logger.Errorf("Fail to write response body, error: %s", err.Error())
return
}
return
}
registry.MustRegister(exporter)
if err != nil {
logs.Logger.Errorf("Fail to start to morning services: %+v, err: %s", targets, err.Error())
return
}
h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
h.ServeHTTP(w, r)
}
func main() {
flag.Parse()
logs.InitLog(*debug)
config, err := collector.NewCloudConfigFromFile(*clientConfig)
if err != nil {
logs.Logger.Fatal("New Cloud Config From File error: ", err.Error())
return
}
err = collector.InitFilterConfig(*filterEnable)
if err != nil {
logs.Logger.Fatal("Init filter Config error: ", err.Error())
return
}
http.HandleFunc(config.Global.MetricPath, handler)
logs.Logger.Infoln("Start server at ", config.Global.Port)
if err := http.ListenAndServe(config.Global.Port, nil); err != nil {
logs.Logger.Errorf("Error occur when start server %s", err.Error())
os.Exit(1)
}
}
|
package vastflow
import (
"errors"
"fmt"
"github.com/jack0liu/logs"
"reflect"
"time"
)
var (
ErrorRetry = errors.New("river retries to run")
ErrorCanceled = errors.New("river canceled to run")
ErrorBasinCanceled = errors.New("basin canceled to run")
ErrorContinue = errors.New("river needs to continue")
)
type RiverFlow interface {
Update(attr *RiverAttr)
Flow(headwaters *Headwaters) (errCause string, err error)
Cycle(headwaters *Headwaters) (errCause string, err error)
}
type RiverAttr struct {
RetryTimes int32
//seconds
RetryInterval int32
CycleTimes int32
//seconds
CycleInterval int32
Durable bool
Atomic bool
isInner bool
}
type River struct {
attr RiverAttr
down Stream
state streamState
errStr string // failed error str
id string
color string
waterId string // used to restore flow
retryCount int32
cycleCount int32
}
func (an *River) setFail(errStr string, flow RiverFlow, headwaters *Headwaters) error {
logs.Info("[%s][%s]fail", headwaters.RequestId, an.color)
an.state = stateFail
an.errStr = errStr
cause := fmt.Sprintf("%v:%s", reflect.ValueOf(flow).Elem().Type(), errStr)
if err := setFlowEnd(an.id, stateFail.String(), cause); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return err
}
if !an.attr.isInner {
headwaters.Cancel(errors.New(errStr))
headwaters.atlantic.runFail(headwaters, headwaters.atlantic.(AtlanticFlow))
}
return errors.New(cause)
}
func (an *River) setSuccess() error {
an.state = stateSuccess
if err := setFlowEnd(an.id, stateSuccess.String(), ""); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return err
}
return nil
}
func (an *River) setRunning() error {
an.state = stateRunning
if err := setFlowStart(an.id, stateRunning.String()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return err
}
return nil
}
func (an *River) setCycling() error {
an.state = stateCycling
if err := updateFlowState(an.id, stateCycling.String()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return err
}
return nil
}
func (an *River) updateWater(headwaters *Headwaters) error {
if err := updateHeadwaters(headwaters); err != nil {
logs.Error("update water fail, err:%s", err.Error())
return err
}
return nil
}
func (an *River) innerFlow(headwaters *Headwaters, flow RiverFlow) (errCause string, err error) {
select {
case <-headwaters.basinFinish():
if an.attr.Atomic {
return flow.Flow(headwaters)
} else {
return "", ErrorBasinCanceled
}
case <-headwaters.Done():
if an.attr.Atomic {
return flow.Flow(headwaters)
} else {
return "", ErrorCanceled
}
default:
return flow.Flow(headwaters)
}
}
func (an *River) Run(headwaters *Headwaters, flow RiverFlow, syncNext bool) (err error) {
//logs.Info("[%s][%s]%v run start, id :%s", headwaters.RequestId, an.color, reflect.ValueOf(flow).Elem().Type(), an.id)
defer func() {
if e := recover(); e != nil {
logs.Error("[%s][%s]%v", headwaters.RequestId, an.color, e)
PrintStack()
_ = an.setFail("got an panic", flow, headwaters)
err = errors.New("got an panic")
}
}()
an.runInit(flow)
switch an.state {
case stateInit:
if err = an.setRunning(); err != nil {
return err
}
fallthrough
case stateRunning:
if err = an.runFlow(headwaters, flow, syncNext); err != nil {
if err := an.setFail(err.Error(), flow, headwaters); err != nil {
return err
}
return err
}
if an.attr.Durable {
if err := an.updateWater(headwaters); err != nil {
return err
}
}
if an.attr.CycleTimes <= 0 {
// no cycle, set success , then do next
if err = an.setSuccess(); err != nil {
return err
}
return an.runNext(headwaters, syncNext)
}
// do cycle
if err = an.setCycling(); err != nil {
return err
}
fallthrough
case stateCycling:
if err = an.runCycle(headwaters, flow); err != nil {
if err := an.setFail(err.Error(), flow, headwaters); err != nil {
return err
}
return err
}
if an.attr.Durable {
if err = an.updateWater(headwaters); err != nil {
return err
}
}
if err = an.setSuccess(); err != nil {
return err
}
fallthrough
case stateSuccess:
//logs.Info("[%s][%s]%v run success, id :%s", headwaters.RequestId, an.color, reflect.ValueOf(flow).Elem().Type(), an.id)
return an.runNext(headwaters, syncNext)
case stateFail:
logs.Error("[%s][%s]flow has been failed", headwaters.RequestId, an.color)
err = errors.New("flow has been failed")
headwaters.Cancel(err)
return err
default:
logs.Error("invalid river state:%s", an.state.String())
return errors.New("invalid river state:" + an.state.String())
}
}
func (an *River) next() Stream {
return an.down
}
func (an *River) setInner() {
an.attr.isInner = true
}
func (an *River) SetDownStream(down Stream) {
an.down = down
}
func (an *River) setId(id string) {
an.id = id
}
func (an *River) getId() string {
return an.id
}
func (an *River) setColor(color string) {
an.color = color
}
// used for load
func (an *River) colorCurrent(color string) {
an.color = color
}
func (an *River) GetColor() string {
return an.color
}
func (an *River) setState(state streamState) {
an.state = state
}
func (an *River) getState() streamState {
return an.state
}
func (an *River) setWaterId(waterId string) {
an.waterId = waterId
}
func (an *River) getWaterId() string {
return an.waterId
}
func (an *River) doRetry(headwaters *Headwaters, flow RiverFlow) (errStr string, err error) {
var eStr string
for an.retryCount < an.attr.RetryTimes {
logs.Debug("[%s][%s]retry count : %d", headwaters.RequestId, an.color, an.retryCount)
time.Sleep(time.Duration(an.attr.RetryInterval) * time.Second)
eStr, err = an.innerFlow(headwaters, flow)
if err != nil {
an.retryCount++
if err == ErrorRetry {
continue
} else {
return eStr, err
}
} else {
return "", nil
}
}
errStr = fmt.Sprintf("[%s][%s]retry %d times failed, cause:%s", headwaters.RequestId, an.color, an.retryCount, eStr)
return errStr, errors.New("retry timeout")
}
func (an *River) doCycle(headwaters *Headwaters, flow RiverFlow) (errStr string, err error) {
if err := an.setCycling(); err != nil {
return err.Error(), err
}
for an.cycleCount < an.attr.CycleTimes {
time.Sleep(time.Duration(an.attr.CycleInterval) * time.Second)
select {
case <-headwaters.basinFinish():
if an.attr.Atomic {
errStr, err = flow.Cycle(headwaters)
} else {
return "basin canceled", ErrorCanceled
}
case <-headwaters.Done():
if an.attr.Atomic {
errStr, err = flow.Cycle(headwaters)
} else {
return "river canceled", ErrorCanceled
}
default:
errStr, err = flow.Cycle(headwaters)
}
if err == nil {
//out set success
return "", nil
}
if err == ErrorContinue {
an.cycleCount++
continue
}
return err.Error(), err
}
errStr = fmt.Sprintf("[%s][%s]cycle %d times failed, cause:%s", headwaters.RequestId, an.color, an.cycleCount, errStr)
return errStr, errors.New(errStr)
}
func (an *River) runInit(flow RiverFlow) {
flow.Update(&an.attr)
}
func (an *River) runNext(headwaters *Headwaters, syncNext bool) error {
// do next
if an.next() != nil {
if syncNext {
return an.next().Run(headwaters, an.next().(RiverFlow), syncNext)
} else {
go an.next().Run(headwaters, an.next().(RiverFlow), syncNext)
}
} else {
if !an.attr.isInner {
headwaters.atlantic.runSuccess(headwaters, headwaters.atlantic.(AtlanticFlow))
}
}
return nil
}
func (an *River) runCycle(headwaters *Headwaters, flow RiverFlow) error {
var errStr string
var err error
for an.cycleCount < an.attr.CycleTimes {
time.Sleep(time.Duration(an.attr.CycleInterval) * time.Second)
select {
case <-headwaters.basinFinish():
if an.attr.Atomic {
errStr, err = flow.Cycle(headwaters)
} else {
return ErrorCanceled
}
case <-headwaters.Done():
if an.attr.Atomic {
errStr, err = flow.Cycle(headwaters)
} else {
return ErrorCanceled
}
default:
errStr, err = flow.Cycle(headwaters)
}
if err == nil {
//out set success
//logs.Info("[%s][%s]%v cycle success", headwaters.RequestId, an.color, reflect.ValueOf(flow).Elem().Type())
return nil
}
if err == ErrorContinue {
an.cycleCount++
continue
}
logs.Info("[%s][%s]%v cycle err:%s", headwaters.RequestId, an.color, reflect.ValueOf(flow).Elem().Type(), err.Error())
return err
}
errStr = fmt.Sprintf("[%s][%s]cycle %d times failed, cause:%s", headwaters.RequestId, an.color, an.cycleCount, errStr)
return errors.New(errStr)
}
func (an *River) runFlow(headwaters *Headwaters, flow RiverFlow, b bool) error {
// do run
errStr, err := an.innerFlow(headwaters, flow)
if err == nil {
return nil
}
if err != ErrorRetry {
logs.Error("[%s][%s]%v run failed, err: %s", headwaters.RequestId, an.color, reflect.ValueOf(flow).Elem().Type(), err.Error())
return err
}
if an.attr.RetryTimes <= 0 {
return errors.New(errStr)
}
errStr, err = an.doRetry(headwaters, flow)
if err != nil {
return errors.New(errStr)
}
return nil
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"os"
"strconv"
)
type Hello struct {
Name string
Age int
}
func main() {
//fmt.Println(os.Args)
var resMap map[string]interface{}
json.Unmarshal([]byte(os.Args[1]), &resMap)
age, _ := strconv.Atoi(resMap["age"].([]interface{})[0].(string))
hello := Hello{resMap["name"].([]interface{})[0].(string), age}
outline := template.Must(template.ParseFiles("/home/" + os.Getenv("USER") + "/test/temp/mnt/test/go/myapp/template/outline.html"))
fmt.Print("Content-Type: text/html;charset=utf-8\n\n")
err := outline.ExecuteTemplate(os.Stdout, "outline.html", hello)
if err != nil {
fmt.Println("error: ", err.Error())
}
}
|
package restmachinery
// OutboundRequest models of an outbound API call.
type OutboundRequest struct {
// Method specifies the HTTP method to be used.
Method string
// Path specifies a path (relative to the root of the API) to be used.
Path string
// QueryParams optionally specifies any URL query parameters to be used.
QueryParams map[string]string
// IncludeAuthHeader specifies whether to automatically include an
// Authorization header with the client's bearer token in the outbound
// request. If nil, this will default to true (included).
IncludeAuthHeader *bool
// Headers optionally specifies any miscellaneous HTTP headers to be used.
Headers map[string]string
// ReqBodyObj optionally provides an object that can be marshaled to create
// the body of the HTTP request.
ReqBodyObj interface{}
// SuccessCode specifies what HTTP response code should indicate a successful
// API call.
SuccessCode int
// RespObj optionally provides an object into which the HTTP response body can
// be unmarshaled.
RespObj interface{}
}
|
package study
import (
"database/sql"
"fmt"
_ "github.com/alexbrainman/odbc"
"github.com/axgle/mahonia"
"runtime"
"time"
"encoding/json"
)
type PacsInfo struct {
PID string `json:"pid"`
NC string `json:"name"`
SX string `json:"sex"`
BR time.Time `json:"birthday"`
Modality string `json:"modality"`
DISKID string `json:"picpath"`
}
func OdbcTestAccess() {
conn, err := sql.Open("odbc", "driver={Microsoft Access Driver (*.mdb, *.accdb)};dbq=E:\\June\\WorkSpace\\Pacs\\PACS.MDB")
//conn, err := sql.Open("odbc", "driver={Microsoft Access Driver (*.mdb)};dbq=E:\\June\\WorkSpace\\Pacs\\PACS.MDB")//32位系统
fmt.Println(runtime.GOARCH, runtime.GOOS)
if err != nil {
fmt.Println("Connecting Error")
return
}
defer conn.Close()
stmt, err := conn.Prepare("select A.PID,A.NC,A.SX,A.BR,B.Modality,B.DISKID from PATIENT A INNER JOIN STUDY B ON A.PID=B.PID WHERE A.PID='00022236'")
if err != nil {
fmt.Println("Query Error", err)
return
}
defer stmt.Close()
row, err := stmt.Query()
if err != nil {
fmt.Println("Query Error", err)
return
}
defer row.Close()
for row.Next() {
pacsInfo :=new(PacsInfo)
if err := row.Scan(&pacsInfo.PID, &pacsInfo.NC,&pacsInfo.SX, &pacsInfo.BR, &pacsInfo.Modality, &pacsInfo.DISKID); err != nil {
fmt.Println(err)
}
decoder := mahonia.NewDecoder("gb18030")
pacsInfo.NC=decoder.ConvertString(pacsInfo.NC)//gbk转为utf8
pacsInfo.SX=decoder.ConvertString(pacsInfo.SX)//gbk转为utf8
pacsinJsons,err:=json.Marshal(pacsInfo)
if err!=nil {
fmt.Println(err)
return
}
fmt.Println(string(pacsinJsons))
}
fmt.Printf("%s\n", "finish")
return
}
|
package solution
// TreeNode tree node
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 這是個模擬,因為問題沒有寫到建立樹的性質。
// HINT: 建立樹不是從頭開始
func buildTree(values ...int) *TreeNode {
head := &TreeNode{}
currentNode := head
for i, v := range values {
if i == 0{
head.Val = v
head.Left = nil
head.Right = nil
} else {
newNode := &TreeNode{
v,
nil,
nil,
}
if v % 2 == 0 {
currentNode.Right = newNode
} else {
currentNode.Left = newNode
}
currentNode = newNode
}
}
return head
}
|
package config
import "os"
func Domain() string {
return getEnv("GLSAMAKER_DOMAIN", "localhost")
}
func PostgresUser() string {
return getEnv("GLSAMAKER_POSTGRES_USER", "root")
}
func PostgresPass() string {
return getEnv("GLSAMAKER_POSTGRES_PASS", "root")
}
func PostgresDb() string {
return getEnv("GLSAMAKER_POSTGRES_DB", "glsamaker")
}
func PostgresHost() string {
return getEnv("GLSAMAKER_POSTGRES_HOST", "db")
}
func PostgresPort() string {
return getEnv("GLSAMAKER_POSTGRES_PORT", "5432")
}
func Debug() string {
return getEnv("GLSAMAKER_DEBUG", "false")
}
func Quiet() string {
return getEnv("GLSAMAKER_QUIET", "false")
}
func LogFile() string {
return getEnv("GLSAMAKER_LOG_FILE", "/var/log/glsamaker/errors.log")
}
func Version() string {
return getEnv("GLSAMAKER_VERSION", "v0.1.0")
}
func Port() string {
return getEnv("GLSAMAKER_PORT", "5000")
}
func AdminEmail() string {
return getEnv("GLSAMAKER_EMAIL", "admin@gentoo.org")
}
func AdminInitialPassword() string {
return getEnv("GLSAMAKER_INITIAL_ADMIN_PASSWORD", "admin")
}
func CacheControl() string {
return getEnv("GLSAMAKER_CACHE_CONTROL", "max-age=300")
}
func getEnv(key string, fallback string) string {
if os.Getenv(key) != "" {
return os.Getenv(key)
} else {
return fallback
}
}
|
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/jpeg"
"math"
"os"
"golang.org/x/image/tiff"
_ "golang.org/x/image/tiff"
"gonum.org/v1/gonum/mat"
)
var input, output string
func init() {
flag.StringVar(&input, "input", "", "input file")
flag.StringVar(&output, "output", "", "output file")
}
func main() {
flag.Parse()
if len(input) == 0 {
fmt.Fprintf(os.Stderr, "missing input file name\n")
flag.Usage()
os.Exit(1)
}
if len(output) == 0 {
fmt.Fprintf(os.Stderr, "missing output file name\n")
flag.Usage()
os.Exit(1)
}
f, err := os.Open(input)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(255)
}
defer f.Close()
inImg, imageType, err := image.Decode(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(255)
}
bounds := inImg.Bounds()
coordsData := make([]float64, 0, bounds.Max.X*bounds.Max.Y*6)
for y := 0; y < bounds.Max.Y; y++ {
for x := 0; x < bounds.Max.X; x++ {
coordsData = append(coordsData, 1, float64(x), float64(y), float64(x*x), float64(y*y), float64(x*y))
}
}
coords := mat.NewDense(bounds.Max.X*bounds.Max.Y, 6, coordsData)
red := getSolution(inImg, coords, func(c color.Color) float64 {
r, _, _, _ := c.RGBA()
return float64(r)
})
green := getSolution(inImg, coords, func(c color.Color) float64 {
_, g, _, _ := c.RGBA()
return float64(g)
})
blue := getSolution(inImg, coords, func(c color.Color) float64 {
_, _, b, _ := c.RGBA()
return float64(b)
})
outImg := image.NewNRGBA64(bounds)
for y := 0; y < bounds.Max.Y; y++ {
for x := 0; x < bounds.Max.X; x++ {
outImg.Set(x, y, color.NRGBA64{pixel(red, x, y), pixel(green, x, y), pixel(blue, x, y), 0xffff})
}
}
out, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0755)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot open output file %s for writing: %s\n", output, err)
os.Exit(2)
}
switch imageType {
case "jpeg":
err = jpeg.Encode(out, outImg, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot write JPEG image to file %s: %s\n", output, err)
os.Exit(2)
}
case "tiff":
err = tiff.Encode(out, outImg, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
if err != nil {
fmt.Fprintf(os.Stderr, "cannot write TIFF image to file %s: %s\n", output, err)
os.Exit(2)
}
default:
fmt.Fprintf(os.Stderr, "unknown image format\n")
}
}
func getSolution(i image.Image, coords *mat.Dense, cf func(color.Color) float64) *mat.Dense {
values := getValues(i, cf)
solution := mat.NewDense(6, 1, nil)
solution.Solve(coords, values)
return solution
}
func getValues(i image.Image, cf func(color.Color) float64) *mat.Dense {
bounds := i.Bounds()
values := make([]float64, 0, bounds.Max.X*bounds.Max.Y)
for y := 0; y < bounds.Max.Y; y++ {
for x := 0; x < bounds.Max.X; x++ {
values = append(values, cf(i.At(x, y)))
}
}
return mat.NewDense(bounds.Max.X*bounds.Max.Y, 1, values)
}
func pixel(d *mat.Dense, x, y int) uint16 {
return uint16(math.Max(d.At(0, 0)+
d.At(1, 0)*float64(x)+
d.At(2, 0)*float64(y)+
d.At(3, 0)*float64(x*x)+
d.At(4, 0)*float64(y*y)+
d.At(5, 0)*float64(x*y), 0))
}
|
package v1
import (
"context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// AddRequested is invoked when the invoker requests to add an activity.
func (h Handle) AddRequested(
_ context.Context,
_ string,
_ string,
_ string,
) (e error) {
e = status.Error(codes.Unimplemented, "Out of scope")
return
}
|
// time: O(1), space: O(1)
func reverseBits(num uint32) uint32 {
cur := num
res := uint32(0)
for i := 0; i < 32; i++ {
if cur & 1 == 1 {
res++
}
cur = cur >> 1
if i < 32 - 1 {
res = res << 1
}
}
return res
}
|
package leagueranker
import (
"bufio"
"log"
"strings"
"testing"
)
var addCases = []struct {
description string
in string
want string
}{
{
"No space between team and score",
`Lions3, Snakes 3`,
`invalid line format`,
},
{
"No commas between team scores",
`Lions 3 Snakes 1`,
"invalid line format",
},
{
"Too many teams",
`Lions 3, Snakes 1, Sharks 5`,
"invalid line format",
},
{
"Successful sorting",
`Cats 1, Astronaut 1
Bats 1, Astronaut 1
Cats 1, Bats 1`,
`1. Astronaut, 2 pts
1. Bats, 2 pts
1. Cats, 2 pts`,
},
{
"Case of team names matter",
`Lions 5, Snakes 0
lions 1, FC Awesome 0`,
`1. Lions, 3 pts
1. lions, 3 pts
3. FC Awesome, 0 pts
3. Snakes, 0 pts`,
},
{
"Success test case",
`Lions 3, Snakes 3
Tarantulas 1, FC Awesome 0
Lions 1, FC Awesome 1
Tarantulas 3, Snakes 1
Lions 4, Grouches 0`,
`1. Tarantulas, 6 pts
2. Lions, 5 pts
3. FC Awesome, 1 pt
3. Snakes, 1 pt
5. Grouches, 0 pts`,
},
}
func TestRankedTeams(t *testing.T) {
displayResult := func(description, got, want string) {
t.Fatalf(`FAIL: %s
got:
%s
want:
%s`, description, got, want)
}
for _, tc := range addCases {
r := bufio.NewReader(strings.NewReader(tc.in))
ranker, err := NewRanker()
if err != nil {
log.Fatalln(err)
}
scanner := bufio.NewScanner(r)
hasErrors := false
for scanner.Scan() {
err = ranker.Parse(scanner.Text())
if err != nil {
hasErrors = true
if err.Error() != tc.want {
displayResult(tc.description, err.Error(), tc.want)
continue
}
}
}
if hasErrors {
continue
}
teams := ranker.RankedTeams()
got := GetOutput(teams)
if got != tc.want {
displayResult(tc.description, got, tc.want)
}
}
}
func BenchmarkParse(b *testing.B) {
ranker, _ := NewRanker()
for i := 0; i < b.N; i++ {
ranker.Parse("Lions 3, Snakes 3")
}
}
func BenchmarkRankedTeams(b *testing.B) {
ranker, _ := NewRanker()
ranker.Parse("Lions 3, Snakes 3")
ranker.Parse("Tarantulas 1, FC Awesome 0")
ranker.Parse("Lions 1, FC Awesome 1")
ranker.Parse("Tarantulas 3, Snakes 1")
ranker.Parse("Lions 4, Grouches 0")
for i := 0; i < b.N; i++ {
ranker.RankedTeams()
}
}
|
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
authorizationmgmt "github.com/Azure/azure-sdk-for-go/services/authorization/mgmt/2015-07-01/authorization"
"github.com/Azure/azure-sdk-for-go/services/authorization/mgmt/2015-07-01/authorization/authorizationapi"
)
var _ authorizationapi.RoleAssignmentsClientAPI = &MockRoleAssignmentsClient{}
// MockRoleAssignmentsClient is a fake implementation of network.RoleAssignmentsClient.
type MockRoleAssignmentsClient struct {
MockCreate func(ctx context.Context, scope string, roleAssignmentName string, parameters authorizationmgmt.RoleAssignmentCreateParameters) (result authorizationmgmt.RoleAssignment, err error)
MockCreateByID func(ctx context.Context, roleAssignmentID string, parameters authorizationmgmt.RoleAssignmentCreateParameters) (result authorizationmgmt.RoleAssignment, err error)
MockDelete func(ctx context.Context, scope string, roleAssignmentName string) (result authorizationmgmt.RoleAssignment, err error)
MockDeleteByID func(ctx context.Context, roleAssignmentID string) (result authorizationmgmt.RoleAssignment, err error)
MockGet func(ctx context.Context, scope string, roleAssignmentName string) (result authorizationmgmt.RoleAssignment, err error)
MockGetByID func(ctx context.Context, roleAssignmentID string) (result authorizationmgmt.RoleAssignment, err error)
MockList func(ctx context.Context, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error)
MockListForResource func(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error)
MockListForResourceGroup func(ctx context.Context, resourceGroupName string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error)
MockListForScope func(ctx context.Context, scope string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error)
}
// Create calls the MockRoleAssignmentsClient's MockCreate method.
func (c *MockRoleAssignmentsClient) Create(ctx context.Context, scope string, roleAssignmentName string, parameters authorizationmgmt.RoleAssignmentCreateParameters) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockCreate(ctx, scope, roleAssignmentName, parameters)
}
// CreateByID calls the MockRoleAssignmentsClient's MockCreateByID method.
func (c *MockRoleAssignmentsClient) CreateByID(ctx context.Context, roleAssignmentID string, parameters authorizationmgmt.RoleAssignmentCreateParameters) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockCreateByID(ctx, roleAssignmentID, parameters)
}
// Delete calls the MockRoleAssignmentsClient's MockDelete method.
func (c *MockRoleAssignmentsClient) Delete(ctx context.Context, scope string, roleAssignmentName string) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockDelete(ctx, scope, roleAssignmentName)
}
// DeleteByID calls the MockRoleAssignmentsClient's MockDeleteByID method.
func (c *MockRoleAssignmentsClient) DeleteByID(ctx context.Context, roleAssignmentID string) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockDeleteByID(ctx, roleAssignmentID)
}
// Get calls the MockRoleAssignmentsClient's MockGet method.
func (c *MockRoleAssignmentsClient) Get(ctx context.Context, scope string, roleAssignmentName string) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockGet(ctx, scope, roleAssignmentName)
}
// GetByID calls the MockRoleAssignmentsClient's MockGetByID method.
func (c *MockRoleAssignmentsClient) GetByID(ctx context.Context, roleAssignmentID string) (result authorizationmgmt.RoleAssignment, err error) {
return c.MockGetByID(ctx, roleAssignmentID)
}
// List calls the MockRoleAssignmentsClient's MockList method.
func (c *MockRoleAssignmentsClient) List(ctx context.Context, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error) {
return c.MockList(ctx, filter)
}
// ListForResource calls the MockRoleAssignmentsClient's MockListForResource method.
func (c *MockRoleAssignmentsClient) ListForResource(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error) {
return c.MockListForResource(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter)
}
// ListForResourceGroup calls the MockRoleAssignmentsClient's MockListForResourceGroup method.
func (c *MockRoleAssignmentsClient) ListForResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error) {
return c.MockListForResourceGroup(ctx, resourceGroupName, filter)
}
// ListForScope calls the MockRoleAssignmentsClient's MockListForScope method.
func (c *MockRoleAssignmentsClient) ListForScope(ctx context.Context, scope string, filter string) (result authorizationmgmt.RoleAssignmentListResultPage, err error) {
return c.MockListForScope(ctx, scope, filter)
}
|
package main
import (
"container/list"
"fmt"
)
type Item interface{}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Tree struct {
root *TreeNode
}
func (bst *Tree) Insert(key int) {
n := &TreeNode{key, nil, nil}
if bst.root == nil {
bst.root = n
} else {
insertNode(bst.root, n)
}
}
func insertNode(node, newNode *TreeNode) {
if newNode.Val < node.Val {
if node.Left == nil {
node.Left = newNode
} else {
insertNode(node.Left, newNode)
}
} else {
if node.Right == nil {
node.Right = newNode
} else {
insertNode(node.Right, newNode)
}
}
}
func (bst *Tree) String() {
fmt.Println("------------------------------------------------")
stringify(bst.root, 0)
fmt.Println("------------------------------------------------")
}
func stringify(n *TreeNode, level int) {
if n != nil {
format := ""
for i := 0; i < level; i++ {
format += " "
}
format += "---[ "
level++
stringify(n.Left, level)
fmt.Printf(format+"%d\n", n.Val)
stringify(n.Right, level)
}
}
func buildTree(testCase *[]Item) *Tree {
var bst Tree
for _, item := range *testCase {
key, ok := item.(int)
if ok {
bst.Insert(key)
}
}
bst.String()
return &bst
}
func rangeSumBST(root *TreeNode, L int, R int) int {
included := make([]*TreeNode, 0)
queue := list.New()
queue.PushBack(root)
for queue.Len() > 0 {
qnode := queue.Front()
node := qnode.Value.(*TreeNode) // type assertion
if node != nil {
if node.Val >= L && node.Val <= R {
included = append(included, node)
}
queue.PushBack(node.Left)
queue.PushBack(node.Right)
}
queue.Remove(qnode)
}
sum := 0
for _, node := range included {
sum += node.Val
}
return sum
}
func main() {
case1 := []Item{10, 5, 15, 3, 7, nil, 18}
bst := buildTree(&case1)
caseResult := rangeSumBST(bst.root, 7, 15)
if caseResult != 32 {
fmt.Printf("expected 32, return %v", caseResult)
}
case2 := []Item{10, 5, 15, 3, 7, 13, 18, 1, nil, 6}
bst = buildTree(&case2)
caseResult = rangeSumBST(bst.root, 6, 10)
if caseResult != 23 {
fmt.Printf("expected 23, return %v", caseResult)
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//240. Search a 2D Matrix II
//Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
//Integers in each row are sorted in ascending from left to right.
//Integers in each column are sorted in ascending from top to bottom.
//For example,
//Consider the following matrix:
//[
// [1, 4, 7, 11, 15],
// [2, 5, 8, 12, 19],
// [3, 6, 9, 16, 22],
// [10, 13, 14, 17, 24],
// [18, 21, 23, 26, 30]
//]
//Given target = 5, return true.
//Given target = 20, return false.
//func searchMatrix(matrix [][]int, target int) bool {
//}
// Time Is Money |
package spider
import (
"net/url"
"testing"
"time"
"github.com/Willyham/gospider/spider/internal/concurrency"
"github.com/Willyham/gospider/spider/mocks"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var willydURL, _ = url.Parse("http://willdemaine.co.uk")
var willydRobots, _ = url.Parse("http://willdemaine.co.uk/robots.txt")
func TestReadRobotsData(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return([]byte(`
User-agent: *
Disallow: /foo/
Disallow: /bar/
`), nil)
s := New(
WithRoot(willydURL),
WithRequester(requester),
WithUserAgent("agent"),
)
data, err := s.readRobotsData(willydURL)
assert.NoError(t, err)
assert.True(t, data.TestAgent("/", "Agent"))
assert.False(t, data.TestAgent("/foo/a", "Agent"))
assert.False(t, data.TestAgent("/bar/a", "Agent"))
assert.True(t, data.TestAgent("/foo", "Agent"))
assert.True(t, data.TestAgent("/asdf", "Agent"))
}
func TestNoRoot(t *testing.T) {
assert.Panics(t, func() {
New()
})
}
func TestReadRobotsDataHTTPError(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return([]byte{}, httpResponseError{
statusCode: 500,
})
s := New(
WithRoot(willydURL),
WithRequester(requester),
)
data, err := s.readRobotsData(willydURL)
assert.NoError(t, err)
assert.False(t, data.TestAgent("/", "Foo"))
}
func TestReadRobotsDataError(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return([]byte{}, assert.AnError)
s := New(
WithRoot(willydURL),
WithRequester(requester),
)
_, err := s.readRobotsData(willydURL)
assert.Error(t, err)
}
func TestReadRobotsDataMissing(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return([]byte{}, httpResponseError{
statusCode: 404,
})
s := New(
WithRoot(willydURL),
WithRequester(requester),
)
data, err := s.readRobotsData(willydURL)
assert.NoError(t, err)
assert.True(t, data.TestAgent("/", "Foo"))
}
func TestWorkerNoItems(t *testing.T) {
s := New(WithRoot(willydURL))
s.wg.Add(1)
err := s.work()
assert.NoError(t, err)
}
func TestWorker(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydURL).Return([]byte(`
<a href="/foo/bar"></a>
`), nil)
s := New(
WithRoot(willydURL),
WithRequester(requester),
WithConcurrency(1),
WithIgnoreRobots(false),
WithTimeout(time.Minute),
)
s.queue.Append(willydURL)
s.wg.Add(1)
err := s.work()
assert.NoError(t, err)
assert.Len(t, s.queue.urls, 1)
assert.Equal(t, "http://willdemaine.co.uk/foo/bar", s.queue.urls[0].String())
}
func TestWorkerRequestError(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydURL).Return(nil, httpResponseError{
statusCode: 500,
})
s := New(WithRoot(willydURL), WithRequester(requester))
s.queue.Append(willydURL)
s.wg.Add(1)
err := s.work()
assert.Error(t, err)
}
func TestRun(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydURL).Return([]byte("foo"), nil)
s := New(
WithRoot(willydURL),
WithConcurrency(1),
WithRequester(requester),
WithIgnoreRobots(true), // So we don't request robots.txt
)
s.worker = concurrency.WorkFunc(func() error {
next := s.queue.Next()
if next == nil {
return nil
}
defer s.wg.Done()
return nil
})
err := s.Run()
assert.NoError(t, err)
}
func TestRunRobots(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return([]byte("foo"), nil)
requester.On("Request", mock.Anything, willydURL).Return([]byte("foo"), nil)
s := New(
WithRoot(willydURL),
WithConcurrency(1),
WithRequester(requester),
)
s.worker = concurrency.WorkFunc(func() error {
next := s.queue.Next()
if next == nil {
return nil
}
defer s.wg.Done()
return nil
})
err := s.Run()
assert.NoError(t, err)
}
func TestRunRobotsError(t *testing.T) {
requester := &mocks.Requester{}
requester.On("Request", mock.Anything, willydRobots).Return(nil, assert.AnError)
s := New(
WithRoot(willydURL),
WithConcurrency(1),
WithRequester(requester),
)
err := s.Run()
assert.Error(t, err)
}
|
package tvdbapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
)
type searchData struct {
Series []Series `json:"data"`
}
type SearchQuery struct {
Name string
ImdbId string
Zap2itId string
AcceptLanguage string
}
type AiredTime struct {
time.Time
}
const ctLayout = "2006-01-02"
func (ct *AiredTime) UnmarshalJSON(b []byte) (err error) {
if b[0] == '"' && b[len(b)-1] == '"' {
b = b[1 : len(b)-1]
}
if len(b) == 0 {
// sometimes AiredDate is empty. Lets it will be 1900-01-01
ct.Time, err = time.Parse(ctLayout, "2999-12-31")
} else {
ct.Time, err = time.Parse(ctLayout, string(b))
}
return
}
type Series struct {
Id int `json:"id"`
SeriesName string `json:"seriesName"`
Aliases []string `json:"aliases"`
Banner string `json:"banner"`
SeriesId string `json:"seriesId"`
Status string `json:"status"`
FirstAired AiredTime `json:"firstAired"`
Network string `json:"network"`
NetworkId string `json:"networkId"`
Runtime string `json:"runtime"`
Genre []string `json:"genre"`
Overview string `json:"overview"`
LastUpdated int `json:"lastUpdated"`
AirsDayOfWeek string `json:"airsDayOfWeek"`
AirsTime string `json:"airsTime"`
Rating string `json:"rating"`
ImdbId string `json:"imdbId"`
Zap2itId string `json:"zap2itId"`
Added string `json:"added"`
SiteRating float32 `json:"siteRating"`
SiteRatingCount int `json:"siteRatingCount"`
}
type seriesInfoData struct {
Series Series `json:"data"`
}
type Update struct {
Updated time.Time `json:"lastUpdated"`
SeriesId int `json:""id`
}
type updates struct {
Updates []Update `json:"data"`
}
func (client Client) Search(query SearchQuery) []Series {
result := searchData{}
values := url.Values{}
if query.Name != "" {
values.Add("name", query.Name)
}
if query.ImdbId != "" {
values.Add("imdbId", query.ImdbId)
}
if query.Zap2itId != "" {
values.Add("zap2itId", query.Zap2itId)
}
url := fmt.Sprintf("https://api.thetvdb.com/search/series?%s", values.Encode())
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("authorization", "Bearer "+client.ApiToken)
if query.AcceptLanguage != "" {
req.Header.Add("Accept-Language", query.AcceptLanguage)
}
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
err := json.Unmarshal(body, &result)
if err != nil {
log.Fatal(err)
return result.Series
}
log.Println("search completed successfully")
log.Println(fmt.Sprintf("Total found: %v", len(result.Series)))
return result.Series
}
func (client Client) GetSeriesInfo(series Series) Series {
return client.GetSeriesInfoById(series.Id)
}
func (client Client) GetSeriesInfoById(seriesId int) Series {
result := seriesInfoData{}
url := fmt.Sprintf("https://api.thetvdb.com/series/%v", seriesId)
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("authorization", "Bearer "+client.ApiToken)
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
err := json.Unmarshal(body, &result)
if err != nil {
log.Fatal(err)
return result.Series
}
log.Println("get series info completed successfully")
log.Println(fmt.Sprintf("Series: %s; ImdbId: %s; LastUpdated: %s; Zip2itid: %s; First Aired: %v",
result.Series.SeriesName,
result.Series.ImdbId,
result.Series.LastUpdated,
result.Series.Zap2itId,
result.Series.FirstAired))
return result.Series
}
func (client Client) GetUpdate(fromTime time.Time) ([]Update, error) {
var result []Update
var updates updates
url := fmt.Sprintf("https://api.thetvdb.com/updated/query?fromTime=%s", fromTime.Format("UnixTime"))
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return result, err
}
req.Header.Add("authorization", "Bearer " + client.ApiToken)
res, err := http.DefaultClient.Do(req)
defer res.Body.Close()
if err != nil {
return result, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return result, err
}
err = json.Unmarshal(body, &updates)
if err != nil {
return result, err
}
return updates.Updates, nil
} |
package utils
import (
"ginDemo/models"
"github.com/dgrijalva/jwt-go"
"time"
)
var jwtSecret = []byte(Settings.Server.JWTSecret)
type Claims struct {
UserID uint64
Username string
jwt.StandardClaims
}
func GenerateToken(user models.Author) (string, error) {
now := time.Now()
claims := Claims{
user.ID,
user.Username,
jwt.StandardClaims{
ExpiresAt: now.Add(24 * time.Hour).Unix(),
Issuer: "ginDemo",
},
}
tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
token, err := tokenClaims.SignedString(jwtSecret)
return token, err
}
func ParseToken(token string) (*Claims, error) {
jwtToken, err := jwt.ParseWithClaims(token, &Claims{}, func(token *jwt.Token) (interface{}, error) {
return jwtSecret, nil
})
if jwtToken != nil {
if jwtToken.Valid {
return jwtToken.Claims.(*Claims), nil
}
}
return nil, err
}
|
package models
import "time"
type EnergyResource struct {
EnergyResourceAttributeId int `json:"energyResourceAttributeId,omitempty" db:"EnergyResourceAttributeId"`
EnergyResourceAttributeName string `json:"energyResourceAttributeName" db:"EnergyResourceAttributeName"`
GUSResourceId int `json:"GUSResourceId" db:"GUSResourceId"`
SourceId int `json:"sourceId" db:"SourceId"`
CO2Value float32 `json:"CO2Value" db:"CO2Value"`
NCVValue float32 `json:"NCVValue" db:"NCVValue"`
CO2UnitId int `json:"CO2UnitId" db:"CO2UnitId"`
}
type EnergyResourceAttribute struct {
EnergyResourceAttributeId int `json:"energyResourceAttributeId" db:"EnergyResourceAttributeId"`
EnergyResourceId int `json:"energyResourceId" db:"EnergyResourceId"`
SourceId int `json:"sourceId" db:"SourceId"`
CO2Value float32 `json:"CO2Value" db:"CO2Value"`
NCVValue float32 `json:"NCVValue" db:"NCVValue"`
CO2UnitId int `json:"CO2UnitId" db:"CO2UnitId"`
}
type EnergyResourceAttributeEdit struct {
EnergyResourceAttributeId int `json:"energyResourceAttributeId" db:"EnergyResourceAttributeId"`
EnergyResourceId int `json:"energyResourceId" db:"EnergyResourceId"`
SourceId int `json:"sourceId" db:"SourceId"`
CO2Value float32 `json:"CO2Value" db:"CO2Value"`
NCVValue float32 `json:"NCVValue" db:"NCVValue"`
CO2UnitId int `json:"CO2UnitId" db:"CO2UnitId"`
EnergyResourceName string `json:"energyResourceName" db:"EnergyResourceName"`
GUSResourceId int `json:"GUSResourceId" db:"GUSResourceId"`
}
type EnergyResourceAttributeForm struct {
EnergyResourceAttributeId int `json:"energyResourceAttributeId" db:"EnergyResourceAttributeId"`
EnergyResourceId int `json:"energyResourceId" db:"EnergyResourceId"`
EnergyResourceName string `json:"energyResourceName" db:"EnergyResourceName"`
GUSResourceId int `json:"GUSResourceId" db:"GUSResourceId"`
GUSId int `json:"GUSId" db:"GUSId"`
SourceId int `json:"sourceId" db:"SourceId"`
SourceDescription string `json:"sourceDescription" db:"SourceDescription"`
SourceDate time.Time `json:"sourceDate" db:"SourceDate"`
CO2Value float32 `json:"CO2Value" db:"CO2Value"`
NCVValue float32 `json:"NCVValue" db:"NCVValue"`
CO2UnitId int `json:"CO2UnitId" db:"CO2UnitId"`
Ratio float32 `json:"ratio" db:"Ratio"`
UnitShortName string `json:"unitShortName" db:"UnitShortName"`
}
type EnergyResourceEdit struct {
EnergyResourceId int `json:"energyResourceId" db:"EnergyResourceId"`
EnergyResourceName string `json:"energyResourceName" db:"EnergyResourceName"`
GUSResourceId int `json:"GUSResourceId" db:"GUSResourceId"`
}
|
// Copyright © 2021 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/banzaicloud/helm-s3/internal/awss3"
"github.com/banzaicloud/helm-s3/internal/awsutil"
)
type proxyCmd struct {
uri string
}
const indexYaml = "index.yaml"
func (act proxyCmd) Run(ctx context.Context) error {
sess, err := awsutil.Session(
awsutil.AssumeRoleTokenProvider(awsutil.StderrTokenProvider),
awsutil.DynamicBucketRegion(act.uri),
)
if err != nil {
return err
}
storage := awss3.New(sess)
b, err := storage.FetchRaw(ctx, act.uri)
if err != nil {
if strings.HasSuffix(act.uri, indexYaml) && err == awss3.ErrObjectNotFound {
return fmt.Errorf(
"The index file does not exist by the path %s. "+
"If you haven't initialized the repository yet, try running \"helm s3 init %s\"",
act.uri,
strings.TrimSuffix(strings.TrimSuffix(act.uri, indexYaml), "/"),
)
}
return errors.WithMessage(err, fmt.Sprintf("fetch from s3 uri=%s", act.uri))
}
fmt.Print(string(b))
return nil
}
|
/**
* Author: hashcode55 (Mehul Ahuja)
* Created: 11.05.2017
**/
package main
// Remove local imports
import (
"flag"
"github.com/HashCode55/GPython"
)
func main() {
boolPtr := flag.Bool("log", true, "Set it to true to log the details.")
flag.Parse()
gpython.ParseEngine("hello = 3 * 6 - ( 5 / 2 )", *boolPtr)
}
|
package main
import (
"flag"
"fmt"
"github.com/golang/glog"
"strings"
"sync"
"time"
mvutil "podmove/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
//global variables
var (
masterUrl string
kubeConfig string
nameSpace string
podName string
nodeName string
k8sVersion string
memLimit int
cpuLimit int
)
const (
defaultRetryLess = 2
defaultSleep = time.Second * 10
defaultWaitLockTimeOut = time.Second * 100
)
func setFlags() {
flag.StringVar(&masterUrl, "masterUrl", "", "master url")
flag.StringVar(&kubeConfig, "kubeConfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&nameSpace, "nameSpace", "default", "kubernetes object namespace")
flag.StringVar(&podName, "podName", "myschedule-cpu-80", "the podNames to be handled, split by ','")
flag.StringVar(&nodeName, "nodeName", "", "Destination of move")
flag.StringVar(&k8sVersion, "k8sVersion", "1.6", "the version of Kubenetes cluster, candidates are 1.5 | 1.6")
flag.IntVar(&memLimit, "memLimit", 0, "the memory limit in MB. 0 means no change")
flag.IntVar(&cpuLimit, "cpuLimit", 0, "the cpu limit in m. 0 means no change")
flag.Set("alsologtostderr", "true")
flag.Parse()
}
func movePod(client *kclient.Clientset, nameSpace, podName, nodeName string, newCapacity v1.ResourceList) (*v1.Pod, error) {
podClient := client.CoreV1().Pods(nameSpace)
id := fmt.Sprintf("%v/%v", nameSpace, podName)
//1. get original Pod
getOption := metav1.GetOptions{}
pod, err := podClient.Get(podName, getOption)
if err != nil {
err = fmt.Errorf("move-aborted: get original pod:%v\n%v", id, err.Error())
glog.Error(err.Error())
return nil, err
}
if pod.Spec.NodeName == nodeName {
glog.Warningf("move: pod %v is already on node: %v", id, nodeName)
}
glog.V(2).Infof("move-pod: begin to move %v from %v to %v",
id, pod.Spec.NodeName, nodeName)
//2. invalidate the schedulerName of parent controller
parentKind, parentName, err := mvutil.ParseParentInfo(pod)
if err != nil {
return nil, fmt.Errorf("move-abort: cannot get pod-%v parent info: %v", id, err.Error())
}
//2.1 if pod is barely standalone pod, move it directly
if parentKind == "" {
glog.V(2).Infof("Going to move BarePod %v", id)
return mvutil.MoveBarePod(client, pod, nodeName, newCapacity)
}
//2.2 if pod controlled by ReplicationController/ReplicaSet, then need to do more
glog.V(2).Infof("Going to move pod %v controlled by %v-%v", id, parentKind, parentName)
return mvutil.MovePod(client, pod, nodeName, newCapacity)
}
func movePods(client *kclient.Clientset, nameSpace, podNames, nodeName string, newCapaicty v1.ResourceList) error {
names := strings.Split(podNames, ",")
var wg sync.WaitGroup
for _, pname := range names {
podName := strings.TrimSpace(pname)
if len(podName) == 0 {
continue
}
wg.Add(1)
go func() {
defer wg.Done()
rpod, err := movePod(client, nameSpace, podName, nodeName, newCapaicty);
if err != nil {
glog.Errorf("move pod[%s] failed: %v", podName, err)
return
}
glog.V(2).Infof("sleep 10 seconds to check the final state")
time.Sleep(time.Second * 10)
if err := mvutil.CheckPodMoveHealth(client, nameSpace, rpod.Name, nodeName); err != nil {
glog.Errorf("move pod[%s] failed: %v", podName, err)
return
}
glog.V(2).Infof("move pod(%v/%v) to node-%v successfully", nameSpace, podName, nodeName)
}()
}
wg.Wait()
return nil
}
func main() {
setFlags()
defer glog.Flush()
kubeClient := mvutil.GetKubeClient(masterUrl, kubeConfig)
if kubeClient == nil {
glog.Errorf("failed to get a k8s client for masterUrl=[%v], kubeConfig=[%v]", masterUrl, kubeConfig)
return
}
if nodeName == "" {
glog.Errorf("nodeName should not be empty.")
return
}
patchCapaicty, err := mvutil.ParseInputLimit(cpuLimit, memLimit)
if err != nil {
glog.Errorf("Failed to parse input limits: %v", err)
patchCapaicty = make(v1.ResourceList)
}
if err := movePods(kubeClient, nameSpace, podName, nodeName, patchCapaicty); err != nil {
glog.Errorf("move pod failed: %v/%v, %v", nameSpace, podName, err.Error())
return
}
}
|
package main
import (
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"log"
"os"
"path/filepath"
"strings"
)
var (
debug *bool
providerPath *string
)
type schemaCheck func(string) schemaWalker
type schemaWalker func(ast.Node) ast.Visitor
func (fn schemaWalker) Visit(node ast.Node) ast.Visitor {
return fn(node)
}
func debugWalker() schemaWalker {
return schemaWalker(func(n ast.Node) ast.Visitor {
fmt.Printf("%#v\n", n)
return debugWalker()
})
}
type checkFn func(attributeName string, def *ast.CompositeLit, schema ast.Node) error
func checkFnFunc(fn func(attributeName string, def *ast.CompositeLit, schema ast.Node) error) checkFn {
return checkFn(fn)
}
func checkDescription(attributeName string, def *ast.CompositeLit, schema ast.Node) error {
hasDescription := false
for _, elt := range def.Elts {
name := elt.(*ast.KeyValueExpr).Key.(*ast.Ident).Name
hasDescription = hasDescription || name == "Description"
}
if hasDescription {
return nil
}
return fmt.Errorf("%s: Missing Description attribute", attributeName)
}
func collectAttributeNames(schema ast.Node) []string {
names := []string{}
ast.Walk(attributeCollector(&names), schema)
return names
}
func collectConflicts(node ast.Node) []string {
conflicts := []string{}
ast.Walk(debugWalker(), node)
return conflicts
}
func checkConflictsWith(attributeName string, def *ast.CompositeLit, schema ast.Node) error {
conflicts := []string{}
for _, elt := range def.Elts {
name := elt.(*ast.KeyValueExpr).Key.(*ast.Ident).Name
if name != "ConflictsWith" {
continue
}
for _, conflict := range elt.(*ast.KeyValueExpr).Value.(*ast.CompositeLit).Elts {
value := conflict.(*ast.BasicLit).Value
conflicts = append(conflicts, value[1:len(value)-1])
}
}
if len(conflicts) == 0 {
return nil
}
attributeNames := collectAttributeNames(schema)
errors := []error{}
for _, conflict := range conflicts {
_ = conflict
exists := false
for _, attribute := range attributeNames {
if attribute == conflict {
exists = true
break
}
}
if !exists {
errors = append(errors, fmt.Errorf("conflict target %q does not exist", conflict))
}
}
if len(errors) == 0 {
return nil
}
errorMessages := []string{}
for _, err := range errors {
errorMessages = append(errorMessages, err.Error())
}
return fmt.Errorf("%s: %s", attributeName, strings.Join(errorMessages, ", "))
}
func checkAttributeName(attributeName string, def *ast.CompositeLit, schema ast.Node) error {
if attributeName == "id" {
return fmt.Errorf("%s: attribute name is reserved", attributeName)
}
return nil
}
var checks = []checkFn{
checkFnFunc(checkDescription),
checkFnFunc(checkAttributeName),
checkFnFunc(checkConflictsWith),
}
func attributeCollector(res *[]string) schemaWalker {
return func(node ast.Node) ast.Visitor {
if node == nil {
return nil
}
k, ok := node.(*ast.KeyValueExpr)
if !ok {
return attributeCollector(res)
}
lit, ok := k.Key.(*ast.BasicLit)
if !ok {
return nil
}
*res = append(*res, lit.Value[1:len(lit.Value)-1])
return nil
}
}
func attributeChecker(fset *token.FileSet, file string, schema ast.Node) schemaWalker {
return func(node ast.Node) ast.Visitor {
if node == nil {
return nil
}
k, ok := node.(*ast.KeyValueExpr)
if !ok {
return attributeChecker(fset, file, schema)
}
lit, ok := k.Key.(*ast.BasicLit)
if !ok {
return nil
}
vs, ok := k.Value.(*ast.CompositeLit)
if !ok {
return attributeChecker(fset, file, schema)
}
for _, check := range checks {
err := check(lit.Value, vs, schema)
if err != nil {
fmt.Printf("%s:%#v %s\n", strings.Replace(file, *providerPath, "", -1), fset.Position(node.Pos()).Line, err.Error())
}
}
return attributeChecker(fset, file, schema)
}
}
func schemaChecker(fset *token.FileSet, file string) schemaWalker {
return func(node ast.Node) ast.Visitor {
if node == nil {
return nil
}
c, ok := node.(*ast.CompositeLit)
if !ok {
return schemaChecker(fset, file)
}
if c.Type == nil {
return schemaChecker(fset, file)
}
return attributeChecker(fset, file, c)
}
}
func schemaResourceChecker(fset *token.FileSet, file string) schemaWalker {
return func(node ast.Node) ast.Visitor {
if node == nil {
return nil
}
kv, ok := node.(*ast.KeyValueExpr)
if !ok {
return schemaResourceChecker(fset, file)
}
if v, ok := kv.Key.(*ast.Ident); !ok || v.Name != "Schema" {
return schemaResourceChecker(fset, file)
}
return schemaChecker(fset, file)
}
}
func schemaFinder(fset *token.FileSet, file string) schemaWalker {
return func(node ast.Node) ast.Visitor {
if node == nil {
return nil
}
fn, ok := node.(*ast.FuncDecl)
if !ok {
return schemaFinder(fset, file)
}
if fn.Type.Results == nil {
return nil
}
if len(fn.Type.Results.List) != 1 {
return nil
}
ret, ok := fn.Type.Results.List[0].Type.(*ast.StarExpr)
if !ok {
return nil
}
sel, ok := ret.X.(*ast.SelectorExpr)
if !ok {
return nil
}
if sel.Sel.Name != "Resource" || sel.X.(*ast.Ident).Name != "schema" {
return nil
}
return schemaResourceChecker(fset, file)
}
}
func checkSchema(path string) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
if err != nil {
log.Fatal(err.Error())
}
ast.Walk(schemaFinder(fset, path), f)
}
func init() {
providerPath = flag.String("provider-path", "", "path to the terraform provider to check")
debug = flag.Bool("debug", false, "enable debug output")
flag.Parse()
if providerPath == nil || *providerPath == "" {
flag.PrintDefaults()
os.Exit(1)
}
}
func main() {
filepath.Walk(*providerPath, func(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(path, "_test.go") {
return nil
}
if !strings.HasSuffix(path, ".go") {
return nil
}
checkSchema(path)
return nil
})
}
|
package main
import "net"
type listener struct {
newConns chan *net.Conn
addr *net.TCPAddr
}
|
package tool
import "time"
// BeginningOfHour 获取 t 这个时间点所在小时的开始时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfHour(t time.Time, locationOffsetSeconds int) time.Time {
const secondsPerHour = 60 * 60
x := t.Unix()
x += int64(locationOffsetSeconds)
x = (x / secondsPerHour) * secondsPerHour
x -= int64(locationOffsetSeconds)
return time.Unix(x, 0).In(t.Location())
}
// BeginningOfDay 获取 t 这个时间点所在天的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfDay(t time.Time, locationOffsetSeconds int) time.Time {
const secondsPerDay = 24 * 60 * 60
x := t.Unix()
x += int64(locationOffsetSeconds)
x = (x / secondsPerDay) * secondsPerDay
x -= int64(locationOffsetSeconds)
return time.Unix(x, 0).In(t.Location())
}
// MondayOfWeek 获取 t 这个时间点所在星期的星期一(Monday)的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func MondayOfWeek(t time.Time, locationOffsetSeconds int) time.Time {
const secondsPerWeek = 7 * 24 * 60 * 60
const secondsOf3Days = 3 * 24 * 60 * 60 // time.Unix(0, 0) is Thursday
x := t.Unix()
x += int64(locationOffsetSeconds)
x += secondsOf3Days
x = (x / secondsPerWeek) * secondsPerWeek
x -= secondsOf3Days
x -= int64(locationOffsetSeconds)
return time.Unix(x, 0).In(t.Location())
}
// BeginningOfMonth 获取 t 这个时间点所在 month 的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfMonth(t time.Time, loc *time.Location) time.Time {
y, m, _ := t.In(loc).Date()
return time.Date(y, m, 1, 0, 0, 0, 0, loc).In(t.Location())
}
// BeginningOfNextMonth 获取 t 这个时间点的下一个 month 的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfNextMonth(t time.Time, loc *time.Location) time.Time {
y, m, _ := t.In(loc).Date()
return time.Date(y, m+1, 1, 0, 0, 0, 0, loc).In(t.Location())
}
// BeginningOfYear 获取 t 这个时间点所在 year 的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfYear(t time.Time, loc *time.Location) time.Time {
y, _, _ := t.In(loc).Date()
return time.Date(y, time.January, 1, 0, 0, 0, 0, loc).In(t.Location())
}
// BeginningOfNextYear 获取 t 这个时间点的下一个 year 的零点时间.
//
// 返回的 time.Time 和传入的参数 t 的 *time.Location 一致!
func BeginningOfNextYear(t time.Time, loc *time.Location) time.Time {
y, _, _ := t.In(loc).Date()
return time.Date(y+1, time.January, 1, 0, 0, 0, 0, loc).In(t.Location())
}
var (
// ShanghaiLocationOffset 是东八区的 offset
ShanghaiLocationOffset = 8 * 60 * 60
// ShanghaiLocation 表示东八区
ShanghaiLocation = time.FixedZone("Asia/Shanghai", 8*60*60)
)
|
package mock
import (
"math/big"
"sync"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/merkle"
"github.com/qlcchain/go-qlc/common/types"
)
var povCoinbaseOnce sync.Once
var povCoinbaseAcc *types.Account
func GeneratePovCoinbase() *types.Account {
if povCoinbaseAcc == nil {
povCoinbaseOnce.Do(func() {
povCoinbaseAcc = Account()
})
}
return povCoinbaseAcc
}
func GeneratePovBlock(prevBlock *types.PovBlock, txNum uint32) (*types.PovBlock, *big.Int) {
if prevBlock == nil {
genesis := common.GenesisPovBlock()
prevBlock = &genesis
}
prevTD := prevBlock.Target.ToBigInt()
block := prevBlock.Clone()
block.Timestamp = prevBlock.Timestamp + 1
block.Previous = prevBlock.GetHash()
block.Height = prevBlock.GetHeight() + 1
if txNum > 0 {
txHashes := make([]*types.Hash, 0, txNum)
for txIdx := uint32(0); txIdx < txNum; txIdx++ {
txBlk := StateBlockWithoutWork()
txHash := txBlk.GetHash()
txHashes = append(txHashes, &txHash)
tx := &types.PovTransaction{Hash: txHash, Block: txBlk}
block.Transactions = append(block.Transactions, tx)
}
block.TxNum = txNum
block.MerkleRoot = merkle.CalcMerkleTreeRootHash(txHashes)
}
cb := GeneratePovCoinbase()
block.Coinbase = cb.Address()
block.VoteSignature = cb.Sign(block.ComputeVoteHash())
block.Hash = block.ComputeHash()
block.Signature = cb.Sign(block.Hash)
nextTD := new(big.Int).Add(prevTD, prevTD)
return block, nextTD
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"net"
"os"
"os/signal"
"strings"
"time"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/hashicorp/memberlist"
"github.com/swiftkick-io/xbinary"
)
// Raft support
// - Setup Memberlist
// - If the cluster does not already exist, create a new raft.Node instance with no peers.
// - If the cluster exists, get the metadata from each node and create a new raft.Node from the peer IDs
// - If a node joins, propose a config change (?) to add the peer
// - If a node leaves, propose a config change (?) to remove the peer
// - To send messages, propose a message
// TODO: Write BoltDB storage engine for raft.
import "flag"
var name = flag.String("name", "", "node name")
var port = flag.Int("port", 7496, "bind port")
var tcpport = flag.Int("tcpport", 9000, "tcp bind port")
var addr = flag.String("addr", "127.0.0.1", "bind addr")
var tcpaddr = flag.String("tcpaddr", "127.0.0.1", "tcp bind addr")
var hosts = flag.String("hosts", "", "address of peer nodes")
func main() {
flag.Parse()
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
/* Create the initial memberlist from a safe configuration.
Please reference the godoc for other default config types.
http://godoc.org/github.com/hashicorp/memberlist#Config
*/
evtDelegate := MultiEventDelegate{}
var delegate = NewDelegate(*name, *tcpport)
var cfg = memberlist.DefaultLocalConfig()
cfg.Events = &evtDelegate
cfg.Delegate = delegate
cfg.Name = *name
cfg.BindPort = *port
cfg.BindAddr = *addr
cfg.AdvertisePort = *port
cfg.AdvertiseAddr = *addr
list, err := memberlist.Create(cfg)
if err != nil {
log.Fatalln("Failed to create memberlist: " + err.Error())
return
}
if len(*hosts) > 0 {
// Join an existing cluster by specifying at least one known member.
_, err = list.Join(strings.Split(*hosts, ","))
if err != nil {
log.Println("Failed to join cluster: " + err.Error())
}
}
// Ask for members of the cluster
for _, member := range list.Members() {
fmt.Printf("Member: %s %s\n", member.Name, member.Addr)
}
lookup := NewNodeLookup()
transport := NewTcpTransporter()
// Setup raft
id := uint32(Jesteress([]byte(cfg.Name)))
log.Printf("Name: %s ID: %d", cfg.Name, id)
storage := raft.NewMemoryStorage()
c := &raft.Config{
ID: uint64(id),
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: 4096,
MaxInflightMsgs: 256,
}
log.Println("Node ID: ", c.ID)
r := NewRaftNode(*name, c, storage, lookup, transport)
evtDelegate.AddEventDelegate(r)
// Listen for incoming connections.
l, err := net.Listen("tcp", fmt.Sprintf(":%d", *tcpport))
if err != nil {
log.Println("Error listening:", err.Error())
return
}
// Start TCP server
server := TCPServer{l, r}
go server.Start()
fmt.Printf("Listening on %s:%d\n", *tcpaddr, *tcpport)
// Close the listener when the application closes.
defer server.Stop()
// Start raft server
go r.Start()
// Handle signals
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, os.Kill)
// Wait for signal
log.Println("Cluster open for business")
// Block until signal is received
<-sig
log.Println("Shutting down node")
if err = list.Leave(time.Second); err != nil {
log.Println("Error leaving cluster: " + err.Error())
}
if err = list.Shutdown(); err != nil {
log.Println("Error shutting down node: " + err.Error())
}
}
type TCPServer struct {
listener net.Listener
raft *raftNode
}
func (t *TCPServer) Start() {
for {
// Listen for an incoming connection.
conn, err := t.listener.Accept()
if err != nil {
fmt.Println("Error accepting: ", err.Error())
continue
}
//logs an incoming message
fmt.Printf("Received message %s -> %s \n", conn.RemoteAddr(), conn.LocalAddr())
// Handle connections in a new goroutine.
go t.handleRequest(conn)
}
}
func (t *TCPServer) Stop() error {
return t.listener.Close()
}
// Handles incoming requests.
func (t *TCPServer) handleRequest(conn net.Conn) {
var b []byte
buffer := bytes.NewBuffer(b)
bufReader := bufio.NewReader(conn)
size := make([]byte, 4)
ack := []byte{byte(AckMessageType)}
for {
typ, err := bufReader.ReadByte()
if err != nil {
log.Printf("Failed to read message type from TCP client")
break
}
switch UserMessageType(typ) {
case RaftMessageType:
log.Printf("Received Raft message type")
// Read size
n, err := bufReader.Read(size)
if err != nil || n != 4 {
log.Printf("Failed to read message size from TCP client")
break
}
byteCount, _ := xbinary.LittleEndian.Uint32(size, 0)
if n, err := buffer.ReadFrom(io.LimitReader(bufReader, int64(byteCount))); err != nil || n != int64(byteCount) {
log.Printf("Failed to read message from TCP client")
break
}
m := raftpb.Message{}
if err := m.Unmarshal(buffer.Bytes()); err != nil {
log.Printf("Failed to decode message from TCP client")
break
}
log.Printf("Applying Raft message")
t.raft.Step(m)
// Write the message in the connection channel.
conn.Write(ack)
// Reset Buffer
buffer.Reset()
default:
break
}
}
// Close the connection when you're done with it.
conn.Close()
}
|
package main
import (
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"strings"
)
var configFile = flag.String("conf", "config.json", "configuration file")
var httpAddress = flag.String("http", ":80", "http address")
var httpsAddress = flag.String("https", ":8080", "https address")
var httpsEnabled = flag.Bool("https-enabled", false, "enable https server")
var verbose = flag.Bool("verbose", false, "explain what is being done")
var config map[string]interface{}
// NewReverseProxy New Reverse proxy
func NewReverseProxy(scheme, host string) *httputil.ReverseProxy {
return httputil.NewSingleHostReverseProxy(&url.URL{
Scheme: scheme,
Host: host,
})
}
// Register url
func Register(p *httputil.ReverseProxy) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if *verbose {
log.Printf("request %s%s", r.RemoteAddr, r.RequestURI)
}
// w.Header().Set("Access-Control-Allow-Origin", "*")
// w.Header().Set("Access-Control-Allow-Headers", "X-Requested-With")
p.ServeHTTP(w, r)
}
}
func main() {
flag.Usage = func() {
fmt.Printf("usage: %s [options]\n", filepath.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
log.SetFlags(log.LstdFlags | log.Lshortfile)
folder, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatalln(err)
}
file, err := os.Open(filepath.Join(folder, *configFile))
if err != nil {
log.Fatalln(err)
}
if err := json.NewDecoder(file).Decode(&config); err != nil {
log.Fatalln(err)
}
for path, host := range config["routes"].(map[string]interface{}) {
log.Printf("%s -> %s", path, host)
if strings.HasPrefix(path, "#") {
// skip comments
continue
}
u, err := url.Parse(host.(string))
if err != nil {
// skip invalid hosts
log.Println(err)
continue
}
if u.Scheme == "https" && !*httpsEnabled {
log.Println("https scheme detected but server is not enabled, run with -https-enabled")
continue
}
http.HandleFunc(path, Register(NewReverseProxy(u.Scheme, u.Host)))
}
if *httpsEnabled {
go func() {
// allow you to use self signed certificates
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
// openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout server.key -out server.crt
log.Printf("start https server on %s", *httpsAddress)
if err := http.ListenAndServeTLS(*httpsAddress, filepath.Join(folder, "server.crt"), filepath.Join(folder, "server.key"), nil); err != nil {
log.Fatalln(err)
}
}()
}
log.Printf("start http server on %s", *httpAddress)
if err := http.ListenAndServe(*httpAddress, nil); err != nil {
log.Fatalln(err)
}
}
|
package main
import(
_ "github.com/denisenkom/go-mssqldb"
"dataFromDB"
)
//Server=120.196.136.235;database=mclCoverSystem_Web_BAGX;User Id=sa;Password=maxt8899MAX
func main(){
dbConn := Database.dbConn{
server: "120.196.136.235",
user: "sa",
password: "maxt8899MAXT",
database: "mclCoverSystem_Web_BAGX",
encrypt: "disable",
}
dbConn.Sql("select F_Id, 序列号, IMSI码 from VW_BreakDownBill")
} |
package graphql
import (
"context"
"sync"
)
func (r subscriptionResolver) MessageAdded(ctx context.Context, id string) (<-chan Message, error) {
ls := r.ls.addListener(ctx, id)
// Not sure if I need to lock here, better safe then sorry.
r.ls.mtx.Lock()
ls.mc = make(chan Message, 1)
r.ls.mtx.Unlock()
return ls.mc, nil
}
type listenerPool struct {
mtx sync.Mutex
ls map[string]*listener
}
type listener struct {
mc chan Message
}
func (p *listenerPool) addListener(ctx context.Context, key string) *listener {
p.mtx.Lock()
ls := p.ls[key]
if ls == nil {
// defaults the listener and insert one into the map.
ls = &listener{}
}
p.ls[key] = ls
p.mtx.Unlock()
go func() {
// When user ends their connection we remove him from the pool.
<-ctx.Done()
p.mtx.Lock()
delete(p.ls, key)
p.mtx.Unlock()
}()
return ls
}
func (p *listenerPool) sendMessage(receiverID string, m Message) {
p.mtx.Lock()
defer p.mtx.Unlock()
ls := p.ls[receiverID]
if ls == nil {
// listener seems to have gone offline, bail.
return
}
ls.mc <- m
}
|
package main
import (
"log"
"net/http"
"os"
"github.com/go-http/wechat_work"
"github.com/tencentyun/scf-go-lib/cloudfunction"
)
func main() {
client := wechat.NewAgentClientFromEnv()
client.SendTextToUsers("中华英豪", "fengjianbo")
message := wechat.NewNewsMessage()
message.Append("标题标题3", "http://b22aiodu.com", "描sfa描述", "")
message.Append("标题标题44", "http://ba22iodu.com", "描述描述s", "")
log.Println(client.SendNewsMessageToUsers(message, "fengjianbo"))
if _, ok := os.LookupEnv("TENCENTCLOUD_RUNENV"); ok {
cloudfunction.Start(hello)
return
}
hello()
}
func hello() error {
client := wechat.NewAgentClientFromEnv()
message := wechat.NewNewsMessage()
message.Append("标题标题", "http://baiodu.com", "描述描述", "")
message.Append("标题标题2", "http://baiodu.com", "描述描述s", "")
client.SendNewsMessageToUsers(message, "fengjianbo")
token, err := client.GetAccessTokenFromCache()
if err != nil {
return err
}
log.Println("AccessToken is ", token)
resp, err := http.Get("https://img.xjh.me/random_img.php?return=302")
if err != nil {
return err
}
defer resp.Body.Close()
mediaId, expiredAt, err := client.ImageMediaUpload(resp.Body)
if err != nil {
return err
}
log.Printf("MediaUpload:%#v,%#v", mediaId, expiredAt)
invalidUsers, err := client.SendImageToUsers(mediaId, "fengjianbo")
if err != nil {
return err
}
log.Println("MessageSend", invalidUsers)
return nil
}
|
package renter
import (
"context"
"fmt"
"sync"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/errors"
)
const (
// projectDownloadByRootPerformanceDecay defines the amount of decay that is
// applied to the exponential weigted average used to compute the
// performance of the download by root projects that have run recently.
projectDownloadByRootPerformanceDecay = 0.9
)
var (
// ErrRootNotFound is returned if all workers were unable to recover the
// root
ErrRootNotFound = errors.New("workers were unable to recover the data by sector root - all workers failed")
// ErrProjectTimedOut is returned when the project timed out
ErrProjectTimedOut = errors.New("project timed out")
// sectorLookupToDownloadRatio is an arbitrary ratio that resembles the
// amount of lookups vs downloads. It is used in price gouging checks.
sectorLookupToDownloadRatio = 16
)
// projectDownloadByRootManager tracks metrics across multiple runs of
// DownloadByRoot projects, and is used by the projects to set expectations for
// performance.
//
// We put downloads into 3 different buckets for performance because the
// performance characterstics are very different depending on which bucket you
// are in.
type projectDownloadByRootManager struct {
// Aggregate values for download by root projects. These are typically used
// for research purposes, as opposed to being used in real time.
totalTime64k time.Duration
totalTime1m time.Duration
totalTime4m time.Duration
totalRequests64k uint64
totalRequests1m uint64
totalRequests4m uint64
// Decayed values track the recent performance of jobs in each bucket. These
// values are generally used to help select workers when scheduling work,
// because they are more responsive to changing network conditions.
decayedTime64k float64
decayedTime1m float64
decayedTime4m float64
decayedRequests64k float64
decayedRequests1m float64
decayedRequests4m float64
mu sync.Mutex
}
// managedRecordProjectTime adds a download to the historic values of the
// project manager. It takes a length so that it knows which bucket to put the
// data in.
func (m *projectDownloadByRootManager) managedRecordProjectTime(length uint64, timeElapsed time.Duration) {
m.mu.Lock()
defer m.mu.Unlock()
if length <= 1<<16 {
m.totalTime64k += timeElapsed
m.totalRequests64k++
m.decayedTime64k *= projectDownloadByRootPerformanceDecay
m.decayedRequests64k *= projectDownloadByRootPerformanceDecay
m.decayedTime64k += float64(timeElapsed)
m.decayedRequests64k++
} else if length <= 1<<20 {
m.totalTime1m += timeElapsed
m.totalRequests1m++
m.decayedTime1m *= projectDownloadByRootPerformanceDecay
m.decayedRequests1m *= projectDownloadByRootPerformanceDecay
m.decayedTime1m += float64(timeElapsed)
m.decayedRequests1m++
} else {
m.totalTime4m += timeElapsed
m.totalRequests4m++
m.decayedTime4m *= projectDownloadByRootPerformanceDecay
m.decayedRequests4m *= projectDownloadByRootPerformanceDecay
m.decayedTime4m += float64(timeElapsed)
m.decayedRequests4m++
}
}
// managedAverageProjectTime will return the average download time that projects
// have had for the given length.
func (m *projectDownloadByRootManager) managedAverageProjectTime(length uint64) time.Duration {
m.mu.Lock()
defer m.mu.Unlock()
var avg time.Duration
if length <= 1<<16 {
avg = time.Duration(m.decayedTime64k / m.decayedRequests64k)
} else if length <= 1<<20 {
avg = time.Duration(m.decayedTime1m / m.decayedRequests1m)
} else {
avg = time.Duration(m.decayedTime4m / m.decayedRequests4m)
}
return avg
}
// managedDownloadByRoot will fetch data using the merkle root of that data.
// Unlike the exported version of this function, this function does not request
// memory from the memory manager.
func (r *Renter) managedDownloadByRoot(ctx context.Context, root crypto.Hash, offset, length uint64) ([]byte, error) {
// Check if the merkleroot is blocked
if r.staticSkynetBlocklist.IsHashBlocked(crypto.HashObject(root)) {
return nil, ErrSkylinkBlocked
}
// Create a context that dies when the function ends, this will cancel all
// of the worker jobs that get created by this function.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Convenience variable.
pm := r.staticProjectDownloadByRootManager
// Track the total duration of the project.
start := time.Now()
// Potentially force a timeout via a disrupt for testing.
if r.deps.Disrupt("timeoutProjectDownloadByRoot") {
return nil, errors.Compose(ErrProjectTimedOut, ErrRootNotFound)
}
// Get the full list of workers and create a channel to receive all of the
// results from the workers. The channel is buffered with one slot per
// worker, so that the workers do not have to block when returning the
// result of the job, even if this thread is not listening.
workers := r.staticWorkerPool.callWorkers()
staticResponseChan := make(chan *jobHasSectorResponse, len(workers))
// Filter out all workers that do not support the new protocol. It has been
// determined that hosts who do not support the async protocol are not worth
// supporting in the new download by root code - it'll remove pretty much
// all of the performance advantages. Skynet is being forced to fully
// migrate to the async protocol.
numAsyncWorkers := 0
for _, worker := range workers {
cache := worker.staticCache()
if build.VersionCmp(cache.staticHostVersion, minAsyncVersion) < 0 {
continue
}
// check for price gouging
pt := worker.staticPriceTable().staticPriceTable
err := checkPDBRGouging(pt, cache.staticRenterAllowance)
if err != nil {
r.log.Debugf("price gouging detected in worker %v, err: %v\n", worker.staticHostPubKeyStr, err)
continue
}
jhs := worker.newJobHasSector(ctx, staticResponseChan, root)
if !worker.staticJobHasSectorQueue.callAdd(jhs) {
// This will filter out any workers that are on cooldown or
// otherwise can't participate in the project.
continue
}
workers[numAsyncWorkers] = worker
numAsyncWorkers++
}
workers = workers[:numAsyncWorkers]
// If there are no workers remaining, fail early.
if len(workers) == 0 {
return nil, errors.AddContext(modules.ErrNotEnoughWorkersInWorkerPool, "cannot perform DownloadByRoot")
}
// Create a timer that is used to determine when the project should stop
// looking for a better worker, and instead go use the best worker it has
// found so far.
//
// Currently, we track the recent historical performance of projects using
// an exponential weighted average. Workers also track their recent
// performance using an exponential weighted average. Using these two
// values, we can determine whether using a worker is likely to result in
// better than historic average performance.
//
// If a worker does look like it can be used to achieve better than average
// performance, we will use that worker immediately. Otherwise, we will wait
// for a better worker to appear.
//
// After we have spent half of the whole historic time waiting for better
// workers to appear, we give up and use the best worker that we have found
// so far.
useBestWorkerCtx, useBestWorkerCancel := context.WithTimeout(ctx, pm.managedAverageProjectTime(length)/2)
defer useBestWorkerCancel()
// Run a loop to receive responses from the workers as they figure out
// whether or not they have the sector we are looking for. The loop needs to
// run until we have tried every worker, which means that the number of
// responses must be equal to the number of workers, and the length of the
// usable workers map must be 0.
//
// The usable workers map is a map from the iteration that we found the
// worker to the worker. We use a map because it makes it easy to see the
// length, is simple enough to implement, and iterating over a whole map
// with 30 or so elements in it is not too costly. It is also easy to delete
// elements from a map as workers fail.
responses := 0
usableWorkers := make(map[int]*worker)
useBestWorker := false
for responses < len(workers) || len(usableWorkers) > 0 {
// Check for the timeout. This is done separately to ensure the timeout
// has priority.
select {
case <-ctx.Done():
return nil, errors.Compose(ErrProjectTimedOut, ErrRootNotFound)
default:
}
var resp *jobHasSectorResponse
if len(usableWorkers) > 0 && responses < numAsyncWorkers {
// There are usable workers, and there are also workers that have
// not reported back yet. Because we have usable workers, we want to
// listen on the useBestWorkerChan.
select {
case <-useBestWorkerCtx.Done():
useBestWorker = true
case resp = <-staticResponseChan:
responses++
case <-ctx.Done():
return nil, errors.Compose(ErrProjectTimedOut, ErrRootNotFound)
}
} else if len(usableWorkers) == 0 {
// There are no usable workers, which means there's no point
// listening on the useBestWorkerChan.
select {
case resp = <-staticResponseChan:
responses++
case <-ctx.Done():
return nil, errors.Compose(ErrProjectTimedOut, ErrRootNotFound)
}
} else {
// All workers have responded, which means we should now use the
// best worker that we have to attempt the download. No need to wait
// for a signal.
useBestWorker = true
}
// Since the program only contained a single instruction, the first
// element of the slice (if available) is what we're looking for.
var available bool
successfulResponse := resp != nil && resp.staticErr == nil
if successfulResponse && len(resp.staticAvailables) == 0 {
build.Critical("A successful job should not have a zero length response")
} else {
available = successfulResponse && resp.staticAvailables[0]
}
// If we received a response from a worker that is not useful for
// completing the project, go back to blocking. This check is ignored if
// we are supposed to use the best worker.
if !available && !useBestWorker {
continue
}
// If there was a positive response, add this worker to the set of
// usable workers. Check whether or not this worker is expected to
// finish better than the average project time. If so, set a flag so
// that the download continues even if we aren't yet ready to use the
// best known worker.
goodEnough := false
if available {
w := resp.staticWorker
jq := w.staticJobReadQueue
usableWorkers[responses] = w
goodEnough = time.Since(start)+jq.callExpectedJobTime(length) < pm.managedAverageProjectTime(length)
}
// Determine whether to move forward with the download or wait for more
// workers. If the useBestWorker flag is set, we will move forward with
// the download. If the most recent worker has an average job time that
// would expect us to complete this job faster than usual, we can move
// forward with that worker.
//
// This conditional is set up as an inverse so that we can continue
// rather than putting all of the logic inside a big if block.
if !useBestWorker && !goodEnough {
continue
}
// If there are no usable workers, continue.
if len(usableWorkers) == 0 {
continue
}
// Scan through the set of workers to find the best worker.
var bestWorkerIndex int
var bestWorker *worker
var bestWorkerTime time.Duration
for i, w := range usableWorkers {
wTime := w.staticJobReadQueue.callExpectedJobTime(length)
if bestWorkerTime == 0 || wTime < bestWorkerTime {
bestWorkerTime = wTime
bestWorkerIndex = i
bestWorker = w
}
}
// Delete this worker from the set of usable workers, because if this
// download fails, the worker shouldn't be used again.
delete(usableWorkers, bestWorkerIndex)
// Queue the job to download the sector root.
readSectorRespChan := make(chan *jobReadResponse)
jrs := &jobReadSector{
jobRead: jobRead{
staticResponseChan: readSectorRespChan,
staticLength: length,
jobGeneric: newJobGeneric(ctx, bestWorker.staticJobReadQueue, jobReadSectorMetadata{staticSector: root}),
},
staticOffset: offset,
staticSector: root,
}
if !bestWorker.staticJobReadQueue.callAdd(jrs) {
continue
}
// Wait for a response from the worker.
//
// TODO: This worker is currently a single point of failure, if the
// worker takes longer to respond than the lookup timeout, the project
// will fail even though there are potentially more workers to be using.
// I think the best way to fix this is to switch to the multi-worker
// paradigm, where we use multiple workers to fetch a single sector
// root.
var readSectorResp *jobReadResponse
select {
case readSectorResp = <-readSectorRespChan:
case <-ctx.Done():
return nil, errors.Compose(ErrProjectTimedOut, ErrRootNotFound)
}
// If the read sector job was not successful, move on to the next
// worker.
if readSectorResp == nil || readSectorResp.staticErr != nil {
continue
}
// We got a good response! Record the total project time and return the
// data.
pm.managedRecordProjectTime(length, time.Since(start))
return readSectorResp.staticData, nil
}
// All workers have failed.
return nil, ErrRootNotFound
}
// DownloadByRoot will fetch data using the merkle root of that data. This uses
// all of the async worker primitives to improve speed and throughput.
func (r *Renter) DownloadByRoot(root crypto.Hash, offset, length uint64, timeout time.Duration) ([]byte, error) {
if err := r.tg.Add(); err != nil {
return nil, err
}
defer r.tg.Done()
// Block until there is memory available, and then ensure the memory gets
// returned.
if !r.memoryManager.Request(length, memoryPriorityHigh) {
return nil, errors.New("renter shut down before memory could be allocated for the project")
}
defer r.memoryManager.Return(length)
// Create a context. If the timeout is greater than zero, have the context
// expire when the timeout triggers.
ctx := r.tg.StopCtx()
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(r.tg.StopCtx(), timeout)
defer cancel()
}
data, err := r.managedDownloadByRoot(ctx, root, offset, length)
if errors.Contains(err, ErrProjectTimedOut) {
err = errors.AddContext(err, fmt.Sprintf("timed out after %vs", timeout.Seconds()))
}
return data, err
}
// checkPDBRGouging verifies the cost of executing the jobs performed by the
// PDBR are reasonable in relation to the user's allowance and the amount of
// data they intend to download
func checkPDBRGouging(pt modules.RPCPriceTable, allowance modules.Allowance) error {
// Check whether the download bandwidth price is too high.
if !allowance.MaxDownloadBandwidthPrice.IsZero() && allowance.MaxDownloadBandwidthPrice.Cmp(pt.DownloadBandwidthCost) < 0 {
return fmt.Errorf("download bandwidth price of host is %v, which is above the maximum allowed by the allowance: %v - price gouging protection enabled", pt.DownloadBandwidthCost, allowance.MaxDownloadBandwidthPrice)
}
// Check whether the upload bandwidth price is too high.
if !allowance.MaxUploadBandwidthPrice.IsZero() && allowance.MaxUploadBandwidthPrice.Cmp(pt.UploadBandwidthCost) < 0 {
return fmt.Errorf("upload bandwidth price of host is %v, which is above the maximum allowed by the allowance: %v - price gouging protection enabled", pt.UploadBandwidthCost, allowance.MaxUploadBandwidthPrice)
}
// If there is no allowance, price gouging checks have to be disabled,
// because there is no baseline for understanding what might count as price
// gouging.
if allowance.Funds.IsZero() {
return nil
}
// In order to decide whether or not the cost of performing a PDBR is too
// expensive, we make some assumptions with regards to lookup vs download
// job ratio and avg download size. The total cost is then compared in
// relation to the allowance, where we verify that a fraction of the cost
// (which we'll call reduced cost) to download the amount of data the user
// intends to download does not exceed its allowance.
// Calculate the cost of a has sector job
pb := modules.NewProgramBuilder(&pt, 0)
pb.AddHasSectorInstruction(crypto.Hash{})
programCost, _, _ := pb.Cost(true)
ulbw, dlbw := hasSectorJobExpectedBandwidth(1)
bandwidthCost := modules.MDMBandwidthCost(pt, ulbw, dlbw)
costHasSectorJob := programCost.Add(bandwidthCost)
// Calculate the cost of a read sector job, we use StreamDownloadSize as an
// average download size here which is 64 KiB.
pb = modules.NewProgramBuilder(&pt, 0)
pb.AddReadSectorInstruction(modules.StreamDownloadSize, 0, crypto.Hash{}, true)
programCost, _, _ = pb.Cost(true)
ulbw, dlbw = readSectorJobExpectedBandwidth(modules.StreamDownloadSize)
bandwidthCost = modules.MDMBandwidthCost(pt, ulbw, dlbw)
costReadSectorJob := programCost.Add(bandwidthCost)
// Calculate the cost of a project
costProject := costReadSectorJob.Add(costHasSectorJob.Mul64(uint64(sectorLookupToDownloadRatio)))
// Now that we have the cost of each job, and we estimate a sector lookup to
// download ratio of 16, all we need to do is calculate the number of
// projects necessary to download the expected download amount.
numProjects := allowance.ExpectedDownload / modules.StreamDownloadSize
// The cost of downloading is considered too expensive if the allowance is
// insufficient to cover a fraction of the expense to download the amount of
// data the user intends to download
totalCost := costProject.Mul64(numProjects)
reducedCost := totalCost.Div64(downloadGougingFractionDenom)
if reducedCost.Cmp(allowance.Funds) > 0 {
errStr := fmt.Sprintf("combined PDBR pricing of host yields %v, which is more than the renter is willing to pay for downloads: %v - price gouging protection enabled", reducedCost, allowance.Funds)
return errors.New(errStr)
}
return nil
}
|
package sync
import (
"context"
"github.com/kumahq/kuma/pkg/core/dns/lookup"
core_mesh "github.com/kumahq/kuma/pkg/core/resources/apis/mesh"
"github.com/kumahq/kuma/pkg/core/resources/manager"
core_model "github.com/kumahq/kuma/pkg/core/resources/model"
core_store "github.com/kumahq/kuma/pkg/core/resources/store"
"github.com/kumahq/kuma/pkg/envoy/admin"
xds_context "github.com/kumahq/kuma/pkg/xds/context"
xds_topology "github.com/kumahq/kuma/pkg/xds/topology"
)
type xdsContextBuilder struct {
resManager manager.ReadOnlyResourceManager
lookupIP lookup.LookupIPFunc
envoyAdminClient admin.EnvoyAdminClient
cpContext *xds_context.ControlPlaneContext
}
func newXDSContextBuilder(
cpContext *xds_context.ControlPlaneContext,
resManager manager.ReadOnlyResourceManager,
lookupIP lookup.LookupIPFunc,
envoyAdminClient admin.EnvoyAdminClient,
) *xdsContextBuilder {
return &xdsContextBuilder{
resManager: resManager,
lookupIP: lookupIP,
envoyAdminClient: envoyAdminClient,
cpContext: cpContext,
}
}
func (c *xdsContextBuilder) buildMeshedContext(dpKey core_model.ResourceKey, meshHash string) (*xds_context.Context, error) {
ctx := context.Background()
xdsCtx, err := c.buildContext(dpKey)
if err != nil {
return nil, err
}
mesh := core_mesh.NewMeshResource()
if err := c.resManager.Get(ctx, mesh, core_store.GetByKey(dpKey.Mesh, core_model.NoMesh)); err != nil {
return nil, err
}
dataplanes, err := xds_topology.GetDataplanes(syncLog, context.Background(), c.resManager, c.lookupIP, dpKey.Mesh)
if err != nil {
return nil, err
}
xdsCtx.Mesh = xds_context.MeshContext{
Resource: mesh,
Dataplanes: dataplanes,
Hash: meshHash,
}
return xdsCtx, nil
}
func (c *xdsContextBuilder) buildContext(dpKey core_model.ResourceKey) (*xds_context.Context, error) {
return &xds_context.Context{
ControlPlane: c.cpContext,
Mesh: xds_context.MeshContext{},
EnvoyAdminClient: c.envoyAdminClient,
}, nil
}
|
/*
Cryptocurrencies often have a lot of decimals. For example, the popular cryptocurrency Ethereum has 18 decimals.
When dealing with money, precision is important, you don't want to lose money because a number is losing precision.
However, with JavaScript, normal numbers only can go up to 9007199254740991.
To deal with this, Javascript now has BigInt for integers bigger than that.
However, in order to get back to a decimal number, the number needs to be formatted from a BigInt
to a string with the right number of decimals.
Write a function that takes as arguments a BigInt and the desired amount of decimals and returns a string (not a number,
as it will lose precision) with the correct amount of decimals.
Examples
formatBigInt(1938908490185852058934n, 18) ➞ "1938.908490185852058934"
formatBigInt(987654321987654321n, 6 ) ➞ "987654321987.654321"
formatBigInt(13902183984901849081284n, 12) ➞ "13902183984.901849081284"
Notes
N/A
*/
package main
import (
"fmt"
"math/big"
)
func main() {
test("1938908490185852058934", 18, "1938.908490185852058934")
test("987654321987654321", 6, "987654321987.654321")
test("13902183984901849081284", 12, "13902183984.901849081284")
test("923948293849023849082094892384024", 18, "923948293849023.849082094892384024")
}
func test(s string, p int, r string) {
assert(format(xint(s), p) == r)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func format(x *big.Int, p int) string {
s := x.String()
if p < len(s) {
s = fmt.Sprintf("%s.%s", s[:len(s)-p], s[len(s)-p:])
}
return s
}
func xint(s string) *big.Int {
x := new(big.Int)
x.SetString(s, 10)
return x
}
|
package queue
// IntQueue only process integers.
type IntQueue interface {
// Insert an element into the queue.
// Return true if the operation is successful.
EnQueue(x int) bool
// Delete an element from the queue.
// Return true if the operation is successful.
DeQueue() bool
// Get the front item from the queue.
// If the queue is empty, return -1.
Front() int
// Checks whether the queue is empty or not.
IsEmpty() bool
}
type myIntQueue struct {
SliceQueue
}
func (q *myIntQueue) EnQueue(x int) bool {
return q.SliceQueue.EnQueue(x)
}
func (q *myIntQueue) Front() int {
val, ok := q.SliceQueue.Front().(int)
if !ok {
return -1
}
return val
}
|
// A native Pulumi package for creating and managing Amazon Web Services (AWS) resources.
package aws
|
// +build !debug
package main
import (
"github.com/blevesearch/bleve/index/store"
)
func printOtherHeader(s store.KVStore) {
}
func printOther(s store.KVStore) {
}
|
package test
import (
"testing"
"github.com/alionurgeven/IBAN/pkg/ibanvalidator"
"github.com/stretchr/testify/assert"
)
func TestValidIBANs(t *testing.T) {
IBAN := "AE070331234567890123456"
assert.True(t, ibanvalidator.Validate(IBAN))
IBAN = "GB82 WEST 1234 5698 7654 32"
assert.True(t, ibanvalidator.Validate(IBAN))
}
func TestInvalidIBANs(t *testing.T) {
IBAN := "AE070aas331231234567890123456"
assert.False(t, ibanvalidator.Validate(IBAN))
IBAN = "GB82 123WEST 1234 12351698 765412 32"
assert.False(t, ibanvalidator.Validate(IBAN))
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command flakybot searches for sponge_log.xml files and publishes them to
// Pub/Sub.
//
// You can run it locally by running:
//
// go build
// ./flakybot -repo=my-org/my-repo -installation_id=123 -project=my-project
package main
import (
"context"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io/fs"
"log"
"os"
"path/filepath"
"strings"
"cloud.google.com/go/pubsub"
"google.golang.org/api/option"
)
func main() {
log.SetFlags(0)
log.SetPrefix("[FlakyBot] ")
log.SetOutput(os.Stderr)
repo := flag.String("repo", "", "The repo this is for. Defaults to auto-detect from Kokoro environment. If that doesn't work, if your repo is github.com/GoogleCloudPlatform/golang-samples, --repo should be GoogleCloudPlatform/golang-samples")
installationID := flag.String("installation_id", "", "GitHub installation ID. Defaults to auto-detect. If your repo is not part of GoogleCloudPlatform or googleapis set this to the GitHub installation ID for your repo. See https://github.com/googleapis/repo-automation-bots/issues.")
projectID := flag.String("project", "repo-automation-bots", "Project ID to publish to. Defaults to repo-automation-bots.")
topicID := flag.String("topic", "passthrough", "Pub/Sub topic to publish to. Defaults to passthrough.")
logsDir := flag.String("logs_dir", ".", "The directory to look for logs in. Defaults to current directory.")
commit := flag.String("commit_hash", "", "Long form commit hash this build is being run for. Defaults to the KOKORO_GIT_COMMIT environment variable.")
serviceAccount := flag.String("service_account", "", "Path to service account to use instead of Trampoline default or client library auto-detection.")
buildURL := flag.String("build_url", "", "Build URL (markdown OK). Defaults to detect from Kokoro.")
flag.Parse()
cfg := &config{
projectID: *projectID,
topicID: *topicID,
repo: *repo,
installationID: *installationID,
commit: *commit,
logsDir: *logsDir,
serviceAccount: *serviceAccount,
buildURL: *buildURL,
}
if ok := cfg.setDefaults(); !ok {
os.Exit(1)
}
log.Println("Sending logs to Flaky Bot...")
log.Println("See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.")
logs, err := findLogs(cfg.logsDir)
if err != nil {
log.Printf("Error searching for logs: %v", err)
os.Exit(1)
}
if len(logs) == 0 {
log.Printf("No sponge_log.xml files found in %s. Did you forget to generate sponge_log.xml?", cfg.logsDir)
os.Exit(1)
}
p, err := pubSubPublisher(context.Background(), cfg)
if err != nil {
log.Printf("Could not connect to Pub/Sub: %v", err)
os.Exit(1)
}
if err := publish(context.Background(), cfg, p, logs); err != nil {
log.Printf("Could not publish: %v", err)
os.Exit(1)
}
log.Println("Done!")
}
type githubInstallation struct {
ID string `json:"id"`
}
type message struct {
Name string `json:"name"`
Type string `json:"type"`
Location string `json:"location"`
Installation githubInstallation `json:"installation"`
Repo string `json:"repo"`
Commit string `json:"commit"`
BuildURL string `json:"buildURL"`
XUnitXML string `json:"xunitXML"`
}
type config struct {
projectID string
topicID string
repo string
installationID string
commit string
logsDir string
serviceAccount string
buildURL string
}
func (cfg *config) setDefaults() (ok bool) {
if cfg.serviceAccount == "" {
if gfileDir := os.Getenv("KOKORO_GFILE_DIR"); gfileDir != "" {
// Assume any given service account exists, but check the Trampoline
// account exists before trying to use it (instead of default
// credentials).
path := filepath.Join(gfileDir, "kokoro-trampoline.service-account.json")
if _, err := os.Stat(path); err == nil {
cfg.serviceAccount = path
}
}
}
if cfg.repo == "" {
cfg.repo = detectRepo()
}
if cfg.repo == "" {
log.Printf(`Unable to detect repo. Please set the --repo flag.
If your repo is github.com/GoogleCloudPlatform/golang-samples, --repo should be GoogleCloudPlatform/golang-samples.
If your repo is not in GoogleCloudPlatform or googleapis, you must also set
--installation_id. See https://github.com/apps/flaky-bot/.`)
return false
}
if cfg.installationID == "" {
cfg.installationID = detectInstallationID(cfg.repo)
}
if cfg.installationID == "" {
log.Printf(`Unable to detect installation ID from repo=%q. Please set the --installation_id flag.
If your repo is part of GoogleCloudPlatform or googleapis and you see this error,
file an issue at https://github.com/googleapis/repo-automation-bots/issues.
Otherwise, set --installation_id with the numeric installation ID.
See https://github.com/apps/flaky-bot/.`, cfg.repo)
return false
}
if cfg.commit == "" {
cfg.commit = os.Getenv("KOKORO_GIT_COMMIT")
}
if cfg.commit == "" {
log.Printf(`Unable to detect commit hash (expected the KOKORO_GIT_COMMIT env var).
Please set --commit_hash to the latest git commit hash.
See https://github.com/apps/flaky-bot/.`)
return false
}
if cfg.buildURL == "" {
buildID := os.Getenv("KOKORO_BUILD_ID")
if buildID == "" {
log.Printf(`Unable to build URL (expected the KOKORO_BUILD_ID env var).
Please set --build_url to the URL of the build.
See https://github.com/apps/flaky-bot/.`)
return false
}
cfg.buildURL = fmt.Sprintf("[Build Status](https://source.cloud.google.com/results/invocations/%s), [Sponge](http://sponge2/%s)", buildID, buildID)
}
return true
}
func pubSubPublisher(ctx context.Context, cfg *config) (*publisher, error) {
opts := []option.ClientOption{}
if cfg.serviceAccount != "" {
opts = append(opts, option.WithCredentialsFile(cfg.serviceAccount))
}
client, err := pubsub.NewClient(ctx, cfg.projectID, opts...)
if err != nil {
return nil, fmt.Errorf("unable to connect to Pub/Sub: %v", err)
}
topic := client.Topic(cfg.topicID)
return &publisher{topic: topic}, nil
}
// findLogs searches dir for sponge_log.xml files and returns their paths.
func findLogs(dir string) ([]string, error) {
var paths []string
walk := func(path string, dirEntry fs.DirEntry, err error) error {
if err != nil {
return err
}
if !strings.HasSuffix(dirEntry.Name(), "sponge_log.xml") {
return nil
}
paths = append(paths, path)
return nil
}
if err := filepath.WalkDir(dir, walk); err != nil {
return nil, err
}
return paths, nil
}
// publish publishes the given log files with the given publisher.
func publish(ctx context.Context, cfg *config, p messagePublisher, logs []string) error {
for _, path := range logs {
if err := processLog(ctx, cfg, p, path); err != nil {
return fmt.Errorf("publishing logs: %v", err)
}
}
return nil
}
// detectRepo tries to detect the repo from the environment.
func detectRepo() string {
githubURL := os.Getenv("KOKORO_GITHUB_COMMIT_URL")
if githubURL == "" {
githubURL = os.Getenv("KOKORO_GITHUB_PULL_REQUEST_URL")
if githubURL != "" {
log.Printf("Warning! Running on a PR. Double check how you call buildocp before merging.")
}
}
if githubURL == "" {
return ""
}
parts := strings.Split(githubURL, "/")
if len(parts) < 5 {
return ""
}
repo := fmt.Sprintf("%s/%s", parts[3], parts[4])
return repo
}
// detectInstallationID tries to detect the GitHub installation ID based on the
// repo.
func detectInstallationID(repo string) string {
if strings.Contains(repo, "GoogleCloudPlatform") {
return "5943459"
}
if strings.Contains(repo, "googleapis") {
return "6370238"
}
return ""
}
type messagePublisher interface {
publish(context.Context, *pubsub.Message) (serverID string, err error)
}
type publisher struct {
topic *pubsub.Topic
}
func (p *publisher) publish(ctx context.Context, msg *pubsub.Message) (serverID string, err error) {
return p.topic.Publish(ctx, msg).Get(ctx)
}
// processLog is used to process log files and publish them with the given publisher.
func processLog(ctx context.Context, cfg *config, p messagePublisher, path string) error {
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("os.ReadFile(%q): %v", path, err)
}
enc := base64.StdEncoding.EncodeToString(data)
msg := message{
Name: "flakybot",
Type: "function",
Location: "us-central1",
Installation: githubInstallation{ID: cfg.installationID},
Repo: cfg.repo,
Commit: cfg.commit,
BuildURL: cfg.buildURL,
XUnitXML: enc,
}
data, err = json.Marshal(msg)
if err != nil {
return fmt.Errorf("json.Marshal: %v", err)
}
pubsubMsg := &pubsub.Message{
Data: data,
}
id, err := p.publish(ctx, pubsubMsg)
if err != nil {
return fmt.Errorf("Pub/Sub Publish.Get: %v", err)
}
log.Printf("Published %s (%v)!", path, id)
return nil
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connectivity
import (
"fmt"
"github.com/pkg/errors"
vpptypes "github.com/calico-vpp/vpplink/api/v0"
"github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/common"
"github.com/projectcalico/vpp-dataplane/v3/vpplink"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/types"
)
type IpipProvider struct {
*ConnectivityProviderData
ipipIfs map[string]*vpptypes.IPIPTunnel
ipipRoutes map[uint32]map[string]bool
}
func NewIPIPProvider(d *ConnectivityProviderData) *IpipProvider {
return &IpipProvider{d, make(map[string]*vpptypes.IPIPTunnel), make(map[uint32]map[string]bool)}
}
func (p *IpipProvider) EnableDisable(isEnable bool) {
}
func (p *IpipProvider) Enabled(cn *common.NodeConnectivity) bool {
return true
}
func (p *IpipProvider) RescanState() {
p.log.Infof("Rescanning existing tunnels")
p.ipipIfs = make(map[string]*vpptypes.IPIPTunnel)
tunnels, err := p.vpp.ListIPIPTunnels()
if err != nil {
p.log.Errorf("Error listing ipip tunnels: %v", err)
}
ip4, ip6 := p.server.GetNodeIPs()
for _, tunnel := range tunnels {
if (ip4 != nil && tunnel.Src.Equal(*ip4)) || (ip6 != nil && tunnel.Src.Equal(*ip6)) {
p.log.Infof("Found existing tunnel: %s", tunnel)
p.ipipIfs[tunnel.Dst.String()] = tunnel
}
}
indexTunnel := make(map[uint32]*vpptypes.IPIPTunnel)
for _, tunnel := range p.ipipIfs {
indexTunnel[tunnel.SwIfIndex] = tunnel
}
p.log.Infof("Rescanning existing routes")
p.ipipRoutes = make(map[uint32]map[string]bool)
routes, err := p.vpp.GetRoutes(0, false)
if err != nil {
p.log.Errorf("Error listing routes: %v", err)
}
for _, route := range routes {
for _, routePath := range route.Paths {
_, exists := indexTunnel[routePath.SwIfIndex]
if exists {
_, found := p.ipipRoutes[routePath.SwIfIndex]
if !found {
p.ipipRoutes[routePath.SwIfIndex] = make(map[string]bool)
}
p.ipipRoutes[routePath.SwIfIndex][route.Dst.String()] = true
}
}
}
}
func (p *IpipProvider) errorCleanup(tunnel *vpptypes.IPIPTunnel) {
err := p.vpp.DelIPIPTunnel(tunnel)
if err != nil {
p.log.Errorf("Error deleting ipip tunnel %s after error: %v", tunnel.String(), err)
}
}
func (p *IpipProvider) AddConnectivity(cn *common.NodeConnectivity) error {
p.log.Debugf("connectivity(add) IPIP Tunnel to VPP")
tunnel, found := p.ipipIfs[cn.NextHop.String()]
if !found {
tunnel = &vpptypes.IPIPTunnel{
Dst: cn.NextHop,
}
ip4, ip6 := p.server.GetNodeIPs()
if vpplink.IsIP6(cn.NextHop) && ip6 != nil {
tunnel.Src = *ip6
} else if !vpplink.IsIP6(cn.NextHop) && ip4 != nil {
tunnel.Src = *ip4
} else {
return fmt.Errorf("Missing node address")
}
p.log.Infof("connectivity(add) create IPIP tunnel=%s", tunnel.String())
swIfIndex, err := p.vpp.AddIPIPTunnel(tunnel)
if err != nil {
return errors.Wrapf(err, "Error adding ipip tunnel %s", tunnel.String())
}
err = p.vpp.InterfaceSetUnnumbered(swIfIndex, common.VppManagerInfo.GetMainSwIfIndex())
if err != nil {
p.errorCleanup(tunnel)
return errors.Wrapf(err, "Error setting ipip tunnel unnumbered")
}
// Always enable GSO feature on IPIP tunnel, only a tiny negative effect on perf if GSO is not enabled on the taps
err = p.vpp.EnableGSOFeature(swIfIndex)
if err != nil {
p.errorCleanup(tunnel)
return errors.Wrapf(err, "Error enabling gso for ipip interface")
}
err = p.vpp.CnatEnableFeatures(swIfIndex)
if err != nil {
p.errorCleanup(tunnel)
return errors.Wrapf(err, "Error enabling nat for ipip interface")
}
err = p.vpp.InterfaceAdminUp(swIfIndex)
if err != nil {
p.errorCleanup(tunnel)
return errors.Wrapf(err, "Error setting ipip interface up")
}
p.log.Debugf("Routing pod->node %s traffic into tunnel (swIfIndex %d)", cn.NextHop.String(), swIfIndex)
err = p.vpp.RouteAdd(&types.Route{
Dst: common.ToMaxLenCIDR(cn.NextHop),
Paths: []types.RoutePath{{
SwIfIndex: swIfIndex,
Gw: nil,
}},
Table: common.PodVRFIndex,
})
if err != nil {
p.errorCleanup(tunnel)
return errors.Wrapf(err, "Error adding route to %s in ipip tunnel %d for pods", cn.NextHop.String(), swIfIndex)
}
p.ipipIfs[cn.NextHop.String()] = tunnel
common.SendEvent(common.CalicoVppEvent{
Type: common.TunnelAdded,
New: swIfIndex,
})
}
p.log.Infof("connectivity(add) using IPIP tunnel=%s", tunnel.String())
p.log.Debugf("connectivity(add) ipip tunnel route dst=%s via tunnel swIfIndex=%d", cn.Dst.IP.String(), tunnel.SwIfIndex)
route := &types.Route{
Dst: &cn.Dst,
Paths: []types.RoutePath{{
SwIfIndex: tunnel.SwIfIndex,
Gw: nil,
}},
}
err := p.vpp.RouteAdd(route)
if err != nil {
return errors.Wrapf(err, "Error Adding route to ipip tunnel")
}
_, found = p.ipipRoutes[tunnel.SwIfIndex]
if !found {
p.ipipRoutes[tunnel.SwIfIndex] = make(map[string]bool)
}
p.ipipRoutes[tunnel.SwIfIndex][route.Dst.String()] = true
return nil
}
func (p *IpipProvider) DelConnectivity(cn *common.NodeConnectivity) error {
tunnel, found := p.ipipIfs[cn.NextHop.String()]
if !found {
return errors.Errorf("Deleting unknown ipip tunnel cn=%s", cn.String())
}
p.log.Infof("connectivity(del) Removed IPIP connectivity cn=%s swIfIndex=%d", cn.String(), tunnel.SwIfIndex)
routeToDelete := &types.Route{
Dst: &cn.Dst,
Paths: []types.RoutePath{{
SwIfIndex: tunnel.SwIfIndex,
Gw: nil,
}},
}
err := p.vpp.RouteDel(routeToDelete)
if err != nil {
return errors.Wrapf(err, "Error deleting ipip tunnel route")
}
delete(p.ipipRoutes[tunnel.SwIfIndex], routeToDelete.Dst.String())
remaining_routes, found := p.ipipRoutes[tunnel.SwIfIndex]
if !found || len(remaining_routes) == 0 {
p.log.Infof("connectivity(del) all gone. Deleting IPIP tunnel swIfIndex=%d", tunnel.SwIfIndex)
err = p.vpp.RouteDel(&types.Route{
Dst: common.ToMaxLenCIDR(cn.NextHop),
Paths: []types.RoutePath{{
SwIfIndex: tunnel.SwIfIndex,
Gw: nil,
}},
Table: common.PodVRFIndex,
})
if err != nil {
p.log.Errorf("Error deleting ipip route dst=%s via tunnel swIfIndex=%d %s", cn.NextHop.String(), tunnel.SwIfIndex, err)
}
p.log.Infof("connectivity(del) IPIP tunnel=%s", tunnel)
err := p.vpp.DelIPIPTunnel(tunnel)
if err != nil {
p.log.Errorf("Error deleting ipip tunnel %s after error: %v", tunnel.String(), err)
}
delete(p.ipipIfs, cn.NextHop.String())
common.SendEvent(common.CalicoVppEvent{
Type: common.TunnelDeleted,
Old: tunnel.SwIfIndex,
})
}
return nil
}
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package operationparser
import (
"encoding/json"
"errors"
"fmt"
"github.com/trustbloc/sidetree-core-go/pkg/api/operation"
"github.com/trustbloc/sidetree-core-go/pkg/docutil"
"github.com/trustbloc/sidetree-core-go/pkg/patch"
"github.com/trustbloc/sidetree-core-go/pkg/versions/0_1/model"
"github.com/trustbloc/sidetree-core-go/pkg/versions/0_1/operationparser/patchvalidator"
)
// ParseCreateOperation will parse create operation.
func (p *Parser) ParseCreateOperation(request []byte, anchor bool) (*model.Operation, error) {
schema, err := p.parseCreateRequest(request)
if err != nil {
return nil, err
}
// create is not valid if suffix data is not valid
err = p.ValidateSuffixData(schema.SuffixData)
if err != nil {
return nil, err
}
if !anchor {
err = p.ValidateDelta(schema.Delta)
if err != nil {
return nil, err
}
// verify actual delta hash matches expected delta hash
err = docutil.IsValidModelMultihash(schema.Delta, schema.SuffixData.DeltaHash)
if err != nil {
return nil, fmt.Errorf("parse create operation: delta doesn't match suffix data delta hash: %s", err.Error())
}
}
uniqueSuffix, err := docutil.CalculateModelMultihash(schema.SuffixData, p.MultihashAlgorithm)
if err != nil {
return nil, err
}
return &model.Operation{
OperationBuffer: request,
Type: operation.TypeCreate,
UniqueSuffix: uniqueSuffix,
Delta: schema.Delta,
SuffixData: schema.SuffixData,
}, nil
}
// parseCreateRequest parses a 'create' request.
func (p *Parser) parseCreateRequest(payload []byte) (*model.CreateRequest, error) {
schema := &model.CreateRequest{}
err := json.Unmarshal(payload, schema)
if err != nil {
return nil, err
}
return schema, nil
}
// ValidateDelta validates delta.
func (p *Parser) ValidateDelta(delta *model.DeltaModel) error {
if delta == nil {
return errors.New("missing delta")
}
if len(delta.Patches) == 0 {
return errors.New("missing patches")
}
for _, ptch := range delta.Patches {
action, err := ptch.GetAction()
if err != nil {
return err
}
if !p.isPatchEnabled(action) {
return fmt.Errorf("%s patch action is not enabled", action)
}
if err := patchvalidator.Validate(ptch); err != nil {
return err
}
}
if !docutil.IsComputedUsingHashAlgorithm(delta.UpdateCommitment, uint64(p.MultihashAlgorithm)) {
return fmt.Errorf("next update commitment hash is not computed with the required supported hash algorithm: %d", p.MultihashAlgorithm)
}
return nil
}
func (p *Parser) isPatchEnabled(action patch.Action) bool {
for _, allowed := range p.Patches {
if patch.Action(allowed) == action {
return true
}
}
return false
}
// ValidateSuffixData validates suffix data.
func (p *Parser) ValidateSuffixData(suffixData *model.SuffixDataModel) error {
if suffixData == nil {
return errors.New("missing suffix data")
}
if !docutil.IsComputedUsingHashAlgorithm(suffixData.RecoveryCommitment, uint64(p.MultihashAlgorithm)) {
return fmt.Errorf("next recovery commitment hash is not computed with the required supported hash algorithm: %d", p.MultihashAlgorithm)
}
if !docutil.IsComputedUsingHashAlgorithm(suffixData.DeltaHash, uint64(p.MultihashAlgorithm)) {
return fmt.Errorf("patch data hash is not computed with the required supported hash algorithm: %d", p.MultihashAlgorithm)
}
return nil
}
func (p *Parser) validateCreateRequest(create *model.CreateRequest) error {
if create.SuffixData == nil {
return errors.New("missing suffix data")
}
return nil
}
|
package components
import (
"github.com/fananchong/go-xserver/common"
"github.com/fananchong/go-xserver/internal/components/gateway"
"github.com/fananchong/gotcp"
)
// Gateway : 网关服务器
type Gateway struct {
ctx *common.Context
}
// NewGateway : 构造函数
func NewGateway(ctx *common.Context) *Gateway {
gw := &Gateway{
ctx: ctx,
}
gw.ctx.Gateway = gw
return gw
}
// Start : 启动
func (gw *Gateway) Start() bool {
if getPluginType(gw.ctx) == common.Gateway {
gw.ctx.ServerForIntranet.(*gotcp.Server).SetUserData(gw.ctx)
gw.ctx.ServerForIntranet.RegisterSessType(gateway.IntranetSession{})
}
return true
}
// Close : 关闭
func (gw *Gateway) Close() {
}
// OnRecvFromClient : 可自定义客户端交互协议。data 格式需转化为框架层可理解的格式。done 为 true ,表示框架层接管处理该消息
func (gw *Gateway) OnRecvFromClient(data []byte) (done bool) {
return
}
// RegisterSendToClient : 可自定义客户端交互协议
func (gw *Gateway) RegisterSendToClient(f common.FuncTypeSendToClient) {
}
// RegisterEncodeFunc : 可自定义加解密算法
func (gw *Gateway) RegisterEncodeFunc(f common.FuncTypeEncode) {
}
// RegisterDecodeFunc : 可自定义加解密算法
func (gw *Gateway) RegisterDecodeFunc(f common.FuncTypeDecode) {
}
|
package testdata
type SelectExistsWithWhereBlockQuery struct {
Exists bool `rel:"test_user:u"`
Where struct {
Email string `sql:"u.email,@lower"`
}
}
|
// SPDX-FileCopyrightText: (c) 2018 Daniel Czerwonk
//
// SPDX-License-Identifier: MIT
package server
import (
"context"
"fmt"
"testing"
bnet "github.com/bio-routing/bio-rd/net"
"github.com/bio-routing/bio-rd/route"
"github.com/czerwonk/bioject/pkg/api"
"github.com/czerwonk/bioject/pkg/database"
pb "github.com/czerwonk/bioject/proto"
"github.com/stretchr/testify/assert"
)
type bgpMock struct {
addResult error
addCalled bool
removeResult bool
removeCalled bool
}
func (m *bgpMock) addPath(ctx context.Context, pfx *bnet.Prefix, p *route.Path) error {
m.addCalled = true
return m.addResult
}
func (m *bgpMock) removePath(ctx context.Context, pfx *bnet.Prefix, p *route.Path) bool {
m.removeCalled = true
return m.removeResult
}
type dbMock struct {
saveCalled bool
deleteCalled bool
}
func (m *dbMock) Save(ctx context.Context, route *database.Route) error {
m.saveCalled = true
return nil
}
func (m *dbMock) Delete(ctx context.Context, route *database.Route) error {
m.deleteCalled = true
return nil
}
func TestAddRoute(t *testing.T) {
tests := []struct {
name string
req *pb.AddRouteRequest
addResult error
expectedCode uint32
wantBGPCall bool
wantDBCall bool
wantFail bool
}{
{
name: "valid route IPv4",
req: &pb.AddRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
LocalPref: 200,
Med: 1,
},
},
wantBGPCall: true,
wantDBCall: true,
expectedCode: api.StatusCodeOK,
},
{
name: "valid route IPv6",
req: &pb.AddRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{0x20, 0x01, 0x06, 0x78, 0x01, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Length: 48,
},
NextHop: []byte{0x20, 0x01, 0x06, 0x78, 0x01, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff},
},
},
wantBGPCall: true,
wantDBCall: true,
expectedCode: api.StatusCodeOK,
},
{
name: "invalid prefix",
req: &pb.AddRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
LocalPref: 200,
Med: 1,
},
},
wantFail: true,
expectedCode: api.StatusCodeRequestError,
},
{
name: "invalid next hop",
req: &pb.AddRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168},
LocalPref: 200,
Med: 1,
},
},
wantFail: true,
expectedCode: api.StatusCodeRequestError,
},
{
name: "error on add",
req: &pb.AddRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
LocalPref: 200,
Med: 1,
},
},
wantFail: true,
wantBGPCall: true,
wantDBCall: false,
addResult: fmt.Errorf("test"),
expectedCode: api.StatusCodeProcessingError,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b := &bgpMock{
addResult: test.addResult,
}
db := &dbMock{}
api := &apiServer{
bgp: b,
db: db,
}
res, err := api.AddRoute(context.Background(), test.req)
if err != nil {
assert.True(t, test.wantFail, "unexpected error:", err)
return
}
assert.Equal(t, test.wantBGPCall, b.addCalled, "add called on BGP")
assert.Equal(t, test.wantDBCall, db.saveCalled, "save called on DB")
assert.Equal(t, test.expectedCode, res.Code, "code")
})
}
}
func TestWithdrawRoute(t *testing.T) {
tests := []struct {
name string
req *pb.WithdrawRouteRequest
removeResult bool
expectedCode uint32
wantBGPCall bool
wantDBCall bool
wantFail bool
}{
{
name: "valid route IPv4",
req: &pb.WithdrawRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
},
},
wantBGPCall: true,
wantDBCall: true,
removeResult: true,
expectedCode: api.StatusCodeOK,
},
{
name: "valid route IPv6",
req: &pb.WithdrawRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{0x20, 0x01, 0x06, 0x78, 0x01, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Length: 48,
},
NextHop: []byte{0x20, 0x01, 0x06, 0x78, 0x01, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff},
},
},
wantBGPCall: true,
wantDBCall: true,
removeResult: true,
expectedCode: api.StatusCodeOK,
},
{
name: "invalid prefix",
req: &pb.WithdrawRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
},
},
wantFail: true,
expectedCode: api.StatusCodeRequestError,
},
{
name: "invalid next hop",
req: &pb.WithdrawRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168},
},
},
wantFail: true,
expectedCode: api.StatusCodeRequestError,
},
{
name: "error on add",
req: &pb.WithdrawRouteRequest{
Route: &pb.Route{
Prefix: &pb.Prefix{
Ip: []byte{194, 48, 228, 0},
Length: 24,
},
NextHop: []byte{192, 168, 2, 1},
},
},
wantFail: true,
wantBGPCall: true,
wantDBCall: false,
removeResult: false,
expectedCode: api.StatusCodeProcessingError,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b := &bgpMock{
removeResult: test.removeResult,
}
db := &dbMock{}
api := &apiServer{
bgp: b,
db: db,
}
res, err := api.WithdrawRoute(context.Background(), test.req)
if err != nil {
assert.True(t, test.wantFail, "unexpected error:", err)
return
}
assert.Equal(t, test.wantBGPCall, b.removeCalled, "remove called on BGP")
assert.Equal(t, test.wantDBCall, db.deleteCalled, "delete called on DB")
assert.Equal(t, test.expectedCode, res.Code, "code")
})
}
}
func TestPathForRoute(t *testing.T) {
tests := []struct {
name string
route *pb.Route
expected *route.BGPPath
wantFail bool
}{
{
name: "valid path with IPv4 nexthop",
route: &pb.Route{
NextHop: []byte{192, 168, 2, 1},
LocalPref: 200,
Med: 1,
},
expected: &route.BGPPath{
ASPath: emptyASPath(),
BGPPathA: &route.BGPPathA{
EBGP: true,
LocalPref: 200,
MED: 1,
NextHop: bnet.IPv4FromOctets(192, 168, 2, 1).Ptr(),
Source: &bnet.IP{},
},
},
},
{
name: "valid path with IPv6 nexthop",
route: &pb.Route{
NextHop: []byte{0x20, 0x01, 0x06, 0x78, 0x01, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
LocalPref: 200,
Med: 1,
},
expected: &route.BGPPath{
ASPath: emptyASPath(),
BGPPathA: &route.BGPPathA{
EBGP: true,
LocalPref: 200,
MED: 1,
NextHop: bnet.IPv6FromBlocks(0x2001, 0x0678, 0x01e0, 0, 0, 0, 0, 1).Ptr(),
Source: &bnet.IP{},
},
},
},
{
name: "invalid nexthop",
route: &pb.Route{
NextHop: []byte{65, 66, 67},
LocalPref: 200,
Med: 1,
},
wantFail: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
s := &apiServer{}
path, err := s.pathForRoute(test.route)
if err != nil {
assert.True(t, test.wantFail, "unexpected error:", err)
return
}
assert.Equal(t, test.expected, path.BGPPath)
})
}
}
|
package flags
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"os"
"strings"
)
// ApplyExtraFlags args parses the flags for a certain command from the environment variables
func ApplyExtraFlags(cobraCmd *cobra.Command) ([]string, error) {
envName := strings.ToUpper(strings.Replace(cobraCmd.CommandPath(), " ", "_", -1) + "_FLAGS")
flags, err := parseCommandLine(os.Getenv("DEVSPACE_FLAGS"))
if err != nil {
return nil, err
}
commandFlags, err := parseCommandLine(os.Getenv(envName))
if err != nil {
return nil, err
}
flags = append(flags, commandFlags...)
if len(flags) == 0 {
return nil, nil
}
err = cobraCmd.ParseFlags(flags)
if err != nil {
return nil, err
}
err = cobraCmd.ParseFlags(os.Args)
if err != nil {
return nil, err
}
return flags, nil
}
func parseCommandLine(command string) ([]string, error) {
var args []string
state := "start"
current := ""
quote := "\""
escapeNext := true
for i := 0; i < len(command); i++ {
c := command[i]
if state == "quotes" {
if string(c) != quote {
current += string(c)
} else {
args = append(args, current)
current = ""
state = "start"
}
continue
}
if escapeNext {
current += string(c)
escapeNext = false
continue
}
if c == '\\' {
escapeNext = true
continue
}
if c == '"' || c == '\'' {
state = "quotes"
quote = string(c)
continue
}
if state == "arg" {
if c == ' ' || c == '\t' {
args = append(args, current)
current = ""
state = "start"
} else {
current += string(c)
}
continue
}
if c != ' ' && c != '\t' {
state = "arg"
current += string(c)
}
}
if state == "quotes" {
return []string{}, errors.New(fmt.Sprintf("Unclosed quote in command line: %s", command))
}
if current != "" {
args = append(args, current)
}
return args, nil
}
|
package modulecreate
import (
"github.com/gobuffalo/packr/v2"
"github.com/tendermint/starport/starport/pkg/cosmosver"
)
// these needs to be created in the compiler time, otherwise packr2 won't be
// able to find boxes.
var templates = map[cosmosver.MajorVersion]*packr.Box{
cosmosver.Launchpad: packr.New("module/create/templates/launchpad", "./launchpad"),
cosmosver.Stargate: packr.New("module/create/templates/stargate", "./stargate"),
}
|
package main
import (
"log"
"runtime"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/glfw/v3.2/glfw"
"./controller"
"math/rand"
)
const (
width = 500
height = 500
)
func main() {
runtime.LockOSThread()
window := initGlfw()
defer glfw.Terminate()
initOpenGL()
rand.Seed(123456)
con := controller.NewController()
mouseCallback := func(window *glfw.Window, button glfw.MouseButton, action glfw.Action, mod glfw.ModifierKey) {
if action != glfw.Press {
return
}
x, y := window.GetCursorPos()
w, h := window.GetSize()
x -= float64(w)/2.0
y -= float64(h)/2.0
x /= float64(w)/2.0
y /= -float64(h)/2.0
con.SetSquare(x, y)
}
window.SetMouseButtonCallback(mouseCallback)
for !window.ShouldClose() {
con.Update()
draw(con, window)
}
}
func draw(controller controller.Controllable, window *glfw.Window) {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
controller.Draw()
glfw.PollEvents()
window.SwapBuffers()
}
func initGlfw() *glfw.Window {
if err := glfw.Init(); err != nil {
panic(err)
}
glfw.WindowHint(glfw.Resizable, glfw.False)
glfw.WindowHint(glfw.ContextVersionMajor, 4)
glfw.WindowHint(glfw.ContextVersionMinor, 1)
glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile)
glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True)
window, err := glfw.CreateWindow(width, height, "Title", nil, nil)
if err != nil {
panic(err)
}
window.MakeContextCurrent()
return window
}
func initOpenGL() {
if err := gl.Init(); err != nil {
panic(err)
}
version := gl.GoStr(gl.GetString(gl.VERSION))
log.Println("OpenGL version", version)
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"io/ioutil"
"net/http"
"strings"
"github.com/golang/glog"
"k8s.io/api/admission/v1alpha1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// only allow pods to pull images from specific registry.
func admit(data []byte) *v1alpha1.AdmissionReviewStatus {
ar := v1alpha1.AdmissionReview{}
if err := json.Unmarshal(data, &ar); err != nil {
glog.Error(err)
return nil
}
// The externalAdmissionHookConfiguration registered via selfRegistration
// asks the kube-apiserver only sends admission request regarding pods.
podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
if ar.Spec.Resource != podResource {
glog.Errorf("expect resource to be %s", podResource)
return nil
}
raw := ar.Spec.Object.Raw
pod := v1.Pod{}
if err := json.Unmarshal(raw, &pod); err != nil {
glog.Error(err)
return nil
}
reviewStatus := v1alpha1.AdmissionReviewStatus{}
for _, container := range pod.Spec.Containers {
// gcr.io is just an example.
if !strings.Contains(container.Image, "gcr.io") {
reviewStatus.Allowed = false
reviewStatus.Result = &metav1.Status{
Reason: "can only pull image from grc.io",
}
return &reviewStatus
}
}
reviewStatus.Allowed = true
return &reviewStatus
}
func serve(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
body = data
}
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
glog.Errorf("contentType=%s, expect application/json", contentType)
return
}
reviewStatus := admit(body)
ar := v1alpha1.AdmissionReview{
Status: *reviewStatus,
}
resp, err := json.Marshal(ar)
if err != nil {
glog.Error(err)
}
if _, err := w.Write(resp); err != nil {
glog.Error(err)
}
}
func main() {
flag.Parse()
http.HandleFunc("/", serve)
clientset := getClient()
server := &http.Server{
Addr: ":8000",
TLSConfig: configTLS(clientset),
}
go selfRegistration(clientset, caCert)
server.ListenAndServeTLS("", "")
}
|
package main
import (
"bufio"
"fmt"
"os"
)
// định nghĩa hàm
func main () {
fmt.Println("mời bạn nhập tên: ");
var reader *bufio.Reader = bufio.NewReader(os.Stdin)
name, _ := reader.ReadString('\n')
fmt.Println("xin chào: -> " + name)
} |
package main
import (
"github.com/hydra13142/webui"
"net/http"
"time"
)
func main() {
w := &webui.Window{
Width: 200,
Height: 50,
Sub: []webui.Object{
&webui.Timer{Common: webui.Common{Id: "clock", Do: func(c *webui.Context) {
c.Ans["text"] = time.Now().Format("2006/01/02 15:04:05.00 -0700")
}}, Ms: 50},
&webui.Text{Common: webui.Common{"text", "", 5, 5, 190, 40, nil}, Readonly: true},
},
}
http.ListenAndServe(":9999", webui.NewHandler(w, "clock.htm", nil))
}
|
package model
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDatabase_QueryFileById(t *testing.T) {
database := PrepareTestDatabase()
// Query exists row.
file, err := database.QueryFileById(1)
assert.NoError(t, err)
t.Log(file)
}
func TestDatabase_QueryFileByUk(t *testing.T) {
database := PrepareTestDatabase()
// Query exists row.
file, err := database.QueryFileByUk(1, 1)
assert.NoError(t, err)
t.Log(file)
}
func TestDatabase_QueryMaxExpireByHash(t *testing.T) {
database := PrepareTestDatabase()
maxExpireTime, err := database.QueryMaxExpireByHash(1)
assert.NoError(t, err)
t.Log(maxExpireTime)
}
func TestInsertFileInfo(t *testing.T) {
database := PrepareTestDatabase()
session := database.DB.NewSession()
err := session.Begin()
assert.NoError(t, err)
defer session.Close()
id, err := InsertFileInfo(session, 2, 100, "hello.txt", int(time.Now().Local().Unix()))
if err != nil {
err1 := session.Rollback()
assert.NoError(t, err1)
t.Error(err)
return
}
err = session.Commit()
assert.NoError(t, err)
t.Log(id)
}
func TestUpdateBtfsFileId(t *testing.T) {
database := PrepareTestDatabase()
session := database.DB.NewSession()
err := session.Begin()
assert.NoError(t, err)
defer session.Close()
err = UpdateBtfsFileId(session, 1, 2, 1)
if err != nil {
err1 := session.Rollback()
assert.NoError(t, err1)
t.Error(err)
return
}
err = session.Commit()
assert.NoError(t, err)
}
func TestUpdateFileExpireTime(t *testing.T) {
database := PrepareTestDatabase()
session := database.DB.NewSession()
err := session.Begin()
assert.NoError(t, err)
defer session.Close()
err = UpdateFileExpireTime(session, time.Now().Unix(), 1, 1)
if err != nil {
err1 := session.Rollback()
assert.NoError(t, err1)
t.Error(err)
return
}
err = session.Commit()
assert.NoError(t, err)
}
func TestDeleteFile(t *testing.T) {
database := PrepareTestDatabase()
session := database.DB.NewSession()
err := session.Begin()
assert.NoError(t, err)
defer session.Close()
err = DeleteFile(session, 3, 1)
if err != nil {
err1 := session.Rollback()
assert.NoError(t, err1)
t.Error(err)
return
}
err = session.Commit()
assert.NoError(t, err)
}
func TestReopenFile(t *testing.T) {
database := PrepareTestDatabase()
session := database.DB.NewSession()
err := session.Begin()
assert.NoError(t, err)
defer session.Close()
err = ReopenFile(session, 4, 1, 1578284537)
if err != nil {
err1 := session.Rollback()
assert.NoError(t, err1)
t.Error(err)
return
}
err = session.Commit()
assert.NoError(t, err)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.