text stringlengths 11 4.05M |
|---|
package conf
//Kafaka 定义结构体
type Kafaka struct {
Ipaddr string `ini:"ipaddr"`
Topic string `ini:"topic"`
}
//Tailf 定义路径
type Tailf struct {
Path string `ini:"path"`
}
//LogConf 定义配置文件信息
type LogConf struct {
Kafaka `ini:"kafaka"`
Tailf `ini:"tailf"`
}
|
package model
import (
"github.com/guregu/null"
)
// Subscription represents user's notification settings.
type Subscription struct {
ID int `json:"id"`
UserID null.Int `json:"user_id"`
ProjectUUID string `json:"project_uuid" gorm:"ForeignKey:UUID"`
Email string `json:"email"`
DeletedAt Time `json:"deleted_at"`
}
|
package workflows
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/codepipeline"
"github.com/olekukonko/tablewriter"
"github.com/stelligent/mu/common"
"io"
)
// NewServiceViewer create a new workflow for showing an environment
func NewServiceViewer(ctx *common.Context, serviceName string, writer io.Writer) Executor {
workflow := new(serviceWorkflow)
return newWorkflow(
workflow.serviceInput(ctx, serviceName),
workflow.serviceViewer(ctx.StackManager, ctx.StackManager, ctx.PipelineManager, ctx.TaskManager, ctx.Config, writer),
)
}
func (workflow *serviceWorkflow) serviceViewer(stackLister common.StackLister, stackGetter common.StackGetter, pipelineStateLister common.PipelineStateLister, taskManager common.TaskManager, config common.Config, writer io.Writer) Executor {
return func() error {
stacks, err := stackLister.ListStacks(common.StackTypeService)
if err != nil {
return err
}
pipelineStackName := common.CreateStackName(common.StackTypePipeline, workflow.serviceName)
pipelineStack, err := stackGetter.GetStack(pipelineStackName)
if err == nil {
fmt.Fprint(writer, common.NewLine)
fmt.Fprintf(writer, common.SvcPipelineFormat, common.Bold(common.SvcPipelineURLLabel), pipelineStack.Outputs[common.SvcCodePipelineURLKey])
states, err := pipelineStateLister.ListState(pipelineStack.Outputs[common.SvcCodePipelineNameKey])
if err != nil {
return err
}
stateTable := buildPipelineStateTable(writer, states)
stateTable.Render()
fmt.Fprint(writer, common.NewLine)
} else {
fmt.Fprint(writer, common.NewLine)
fmt.Fprintf(writer, common.SvcPipelineFormat, common.Bold(common.SvcPipelineURLLabel), common.NA)
}
fmt.Fprintf(writer, common.SvcDeploymentsFormat, common.Bold(common.SvcDeploymentsLabel))
table := buildEnvTable(writer, stacks, workflow.serviceName)
table.Render()
viewTasks(taskManager, writer, stacks, workflow.serviceName)
return nil
}
}
func buildPipelineStateTable(writer io.Writer, stages []*codepipeline.StageState) *tablewriter.Table {
table := common.CreateTableSection(writer, common.SvcPipelineTableHeader)
for _, stage := range stages {
for _, action := range stage.ActionStates {
revision := common.LineChar
if action.CurrentRevision != nil {
revision = aws.StringValue(action.CurrentRevision.RevisionId)
}
status := common.LineChar
message := common.Empty
lastUpdate := common.LineChar
if action.LatestExecution != nil {
lastUpdate = aws.TimeValue(action.LatestExecution.LastStatusChange).Local().Format(common.LastUpdateTime)
status = aws.StringValue(action.LatestExecution.Status)
if action.LatestExecution.ErrorDetails != nil {
message = aws.StringValue(action.LatestExecution.ErrorDetails.Message)
}
}
table.Append([]string{
common.Bold(aws.StringValue(stage.StageName)),
aws.StringValue(action.ActionName),
revision,
fmt.Sprintf(common.KeyValueFormat, colorizeActionStatus(status), message),
lastUpdate,
})
}
}
return table
}
func buildEnvTable(writer io.Writer, stacks []*common.Stack, serviceName string) *tablewriter.Table {
table := common.CreateTableSection(writer, common.SvcEnvironmentTableHeader)
for _, stack := range stacks {
if stack.Tags[common.SvcCmd] != serviceName {
continue
}
table.Append([]string{
common.Bold(stack.Tags[common.EnvCmd]),
stack.Name,
stack.Parameters[common.SvcImageURLKey],
fmt.Sprintf(common.KeyValueFormat, colorizeStackStatus(stack.Status), stack.StatusReason),
stack.LastUpdateTime.Local().Format(common.LastUpdateTime),
stack.Tags[common.SvcVersionKey],
})
}
return table
}
func viewTasks(taskManager common.TaskManager, writer io.Writer, stacks []*common.Stack, serviceName string) error {
for _, stack := range stacks {
if stack.Tags[common.SvcCmd] != serviceName && len(serviceName) != common.Zero {
continue
}
if len(serviceName) == common.Zero {
serviceName = stack.Tags[common.SvcCmd]
}
tasks, err := taskManager.ListTasks(stack.Tags[common.EnvCmd], serviceName)
if err != nil {
return err
}
fmt.Fprintf(writer, common.SvcContainersFormat, common.Bold(common.SvcContainersLabel), common.Bold(serviceName))
containersTable := buildTaskTable(tasks, writer)
containersTable.Render()
}
return nil
}
func buildTaskTable(tasks []common.Task, writer io.Writer) *tablewriter.Table {
table := common.CreateTableSection(writer, common.SvcTaskContainerHeader)
for _, task := range tasks {
for _, container := range task.Containers {
table.Append([]string{
common.Bold(task.Name),
container.Name,
container.Instance,
container.PrivateIP,
})
}
}
return table
}
|
package main
import "fmt"
func distributeCandies(candies []int) int {
vis := make(map[int]bool)
for _, v := range candies {
vis[v] = true
}
if len(vis) >= len(candies)/2 {
return len(candies) / 2
} else {
return len(vis)
}
}
func main() {
fmt.Println(distributeCandies([]int{1, 1, 2, 2, 3, 3}))
fmt.Println(distributeCandies([]int{1, 1, 2, 3}))
}
|
// SPDX-License-Identifier: MIT
package mock
import (
"strconv"
"github.com/caixw/apidoc/v7/internal/ast"
)
// GenOptions 生成随机数据的函数
type GenOptions struct {
// 返回一个随机的数值
//
// 可以是浮点和整数类型。
Number func(p *ast.Param) any
// 返回一个随机长度的字符串
String func(p *ast.Param) string
// 返回一个随机的布尔值
Bool func() bool
// 返回一个随机的数值
//
// 该数值被用于声明 slice 长度,所以必须为正整数。
SliceSize func() int
// 返回一个介于 [0, max] 之间的数值
//
// 该数值被用于从数组中获取其中的某个元素。
Index func(max int) int
}
func isEnum(p *ast.Param) bool {
return len(p.Enums) > 0
}
func (g *GenOptions) generateBool() bool {
return g.Bool()
}
func (g *GenOptions) generateNumber(p *ast.Param) any {
if isEnum(p) {
index := g.Index(len(p.Enums))
v, err := strconv.ParseInt(p.Enums[index].Value.V(), 10, 32)
if err != nil { // 这属于文档定义错误,直接 panic
panic(err)
}
return v
}
return g.Number(p)
}
func (g *GenOptions) generateString(p *ast.Param) string {
if isEnum(p) {
return p.Enums[g.Index(len(p.Enums))].Value.V()
}
return g.String(p)
}
func (g *GenOptions) generateSliceSize() int {
return g.SliceSize()
}
|
package settings
import (
"fmt"
"log"
"os"
"strings"
)
const (
// The base url of the host for constructing callback urls
HostUrlEnvVar string = "HOST_URL"
// Heroku injects the exposed port via environment variable
HostPortEnvVar string = "PORT"
// Heroku injects the attached database host via environment variable
DatabaseHostEnvVar string = "DATABASE_URL"
// The Twitch App Client Identifier used when communicating with Twitch APIs
ClientIdEnvVar string = "TWITCH_CLIENT_ID"
// A comma delimited list of Twitch user names to subscribe to go live events for
UsersEnvVar string = "TWITCH_USERS"
// The discord web hook id environment variable
DiscordWebHookIdEnvVar string = "DISCORD_WEBHOOK_ID"
// The discord web hook token environment variable
DiscordWebHookTokenEnvVar string = "DISCORD_WEBHOOK_TOKEN"
// The default host url
DefaultHostUrl string = "http://localhost"
// Default Port Value when running locally
DefaultPort string = "3001"
)
var (
twitchClientId string
twitchUserNames []string
hostUrl string
hostPort string
discordWebHookId string
discordWebHookToken string
databaseHost string
)
// DumpEnvironmentVariables is a Debug Function to Dump All Environment Variables to stdout
func DumpEnvironmentVariables() {
fmt.Println("--- ENV Vars ---")
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
fmt.Println(pair[0] + " = " + os.Getenv(pair[0]))
}
fmt.Println("---------------")
}
// GetHostUrl Gets the Host URL base
func GetHostUrl() string {
host := os.Getenv(HostUrlEnvVar)
if host == "" {
host = DefaultHostUrl
}
return host
}
// GetHostPort Gets the port the web app should be hosted on
func GetHostPort() string {
if "" != hostPort {
return hostPort
}
hostPort = os.Getenv(HostPortEnvVar)
if hostPort == "" {
log.Println("$PORT not set. Defaulting to 3001")
hostPort = DefaultPort
}
return hostPort
}
// GetDatabaseHost Gets the database host url
func GetDatabaseHost() string {
if "" != databaseHost {
return databaseHost
}
databaseHost = os.Getenv(DatabaseHostEnvVar)
if "" == databaseHost {
log.Println("$DATABASE_URL not set.")
}
return databaseHost
}
// GetClientId Gets the Client Identifier Header for HTTP Requests
func GetClientId() string {
if "" != twitchClientId {
return twitchClientId
}
twitchClientId = os.Getenv(ClientIdEnvVar)
return twitchClientId
}
// GetUserNames Gets the name of twitch users to listen for go live events
func GetUserNames() []string {
if nil != twitchUserNames {
return twitchUserNames
}
userNames := os.Getenv(UsersEnvVar)
if "" != userNames {
twitchUserNames = strings.Split(userNames, ",")
} else {
twitchUserNames = []string{}
}
return twitchUserNames
}
// GetDiscordHookId gets the Discord WebHook Id
func GetDiscordHookId() string {
if "" != discordWebHookId {
return discordWebHookId
}
discordWebHookId = os.Getenv(DiscordWebHookIdEnvVar)
return discordWebHookId
}
// GetDiscordHookToken gets the Discord WebHook Token
func GetDiscordHookToken() string {
if "" != discordWebHookToken {
return discordWebHookToken
}
discordWebHookToken = os.Getenv(DiscordWebHookTokenEnvVar)
return discordWebHookToken
}
|
package app
import (
"net/http"
)
//IContext 定义一个接口
type IContext interface {
Config(w http.ResponseWriter, r *http.Request)
}
//Context 结构体
type Context struct {
w http.ResponseWriter
r *http.Request
}
//Config 配置ResponseWriter和Request
func (c *Context) Config(w http.ResponseWriter, r *http.Request) {
c.w = w
c.r = r
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package rpc
import (
"crypto/tls"
"net"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/common/memory"
)
const (
// IsDRPC is true if drpc is being used.
IsDRPC = true
)
var mon = monkit.Package()
// Error wraps all of the errors returned by this package.
var Error = errs.Class("rpc")
//
// timed conns
//
// timedConn wraps a net.Conn so that all reads and writes get the specified timeout and
// return bytes no faster than the rate. If the timeout or rate are zero, they are
// ignored.
type timedConn struct {
net.Conn
rate memory.Size
}
// now returns time.Now if there's a nonzero rate.
func (t *timedConn) now() (now time.Time) {
if t.rate > 0 {
now = time.Now()
}
return now
}
// delay ensures that we sleep to keep the rate if it is nonzero. n is the number of
// bytes in the read or write operation we need to delay.
func (t *timedConn) delay(start time.Time, n int) {
if t.rate > 0 {
expected := time.Duration(n * int(time.Second) / t.rate.Int())
if actual := time.Since(start); expected > actual {
time.Sleep(expected - actual)
}
}
}
// Read wraps the connection read and adds sleeping to ensure the rate.
func (t *timedConn) Read(p []byte) (int, error) {
start := t.now()
n, err := t.Conn.Read(p)
t.delay(start, n)
return n, err
}
// Write wraps the connection write and adds sleeping to ensure the rate.
func (t *timedConn) Write(p []byte) (int, error) {
start := t.now()
n, err := t.Conn.Write(p)
t.delay(start, n)
return n, err
}
//
// tls conn wrapper
//
// tlsConnWrapper is a wrapper around a *tls.Conn that calls Close on the
// underlying connection when closed rather than trying to send a
// notification to the other side which may block forever.
type tlsConnWrapper struct {
*tls.Conn
underlying net.Conn
}
// Close closes the underlying connection.
func (t *tlsConnWrapper) Close() error { return t.underlying.Close() }
|
package connection
import (
"github.com/Iteam1337/go-protobuf-wejay/message"
"github.com/Iteam1337/go-udp-wejay/rooms"
)
func (c *Connection) handleQueryRooms() {
msg := c.msg.(*message.QueryRooms)
res := message.QueryRoomsResponse{Ok: false}
if msg == nil {
res.Error = "could not parse input"
c.send(&res)
return
}
res.Ok = true
for _, result := range rooms.Available(msg.Name) {
res.Room = append(res.Room, &message.RefRoom{
Id: result.Name,
Size: int32(result.Size),
})
}
c.send(&res)
}
|
// Copyright 2018-present The Yumcoder Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Author: yumcoder (omid.jn@gmail.com)
//
package datatype
import (
"fmt"
"testing"
)
type sss struct {
}
func f1(f interface{}) {
if name, ok := f.(fmt.Stringer); !ok {
fmt.Println(ok)
} else {
fmt.Println(name)
}
}
func Test_C1(t *testing.T) {
s := &sss{}
f1(s)
}
|
// Copyright 2014 The DevMine Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"database/sql"
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"time"
"github.com/lib/pq"
"labix.org/v2/mgo/bson"
)
// GitHub entities
const (
ghUsers = "users"
ghOrgMembers = "org_members"
ghRepos = "repos"
ghRepoCollaborators = "repo_collaborators"
)
// GHTorrent structures for unmarshalling BSON.
type (
// ghUser represents a GitHub user.
ghUser struct {
ID int64 `bson:"id"`
Login string `bson:"login"`
AvatarURL string `bson:"avatar_url"`
HTMLURL string `bson:"html_url"`
Type string `bson:"type"` // User or Organization
Name string `bson:"name"` // Real name
Company string `bson:"company"`
Bio string `bson:"bio"`
Location string `bson:"location"`
Email string `bson:"email"`
Hireable bool `bson:"hireable"`
Followers int64 `bson:"followers"`
Following int64 `bson:"following"`
CreatedAt string `bson:"created_at"`
UpdatedAt string `bson:"updated_at"`
}
// ghOrgMember is a relation between an organization and a user.
ghOrgMember struct {
ID int64 `bson:"id"`
Login string `bson:"login"`
Org string `bson:"org"`
Type string `bson:"type"` // should always be "User"
}
// ghRepo represents a GitHub repository.
ghRepo struct {
ID int64 `bson:"id"`
Name string `bson:"name"`
FullName string `bson:"full_name"`
Description string `bson:"description"`
Homepage string `bson:"homepage"`
Language string `bson:"language"`
DefaultBranch string `bson:"default_branch"`
MasterBranch string `bson:"master_branch"`
HTMLURL string `bson:"html_url"`
CloneURL string `bson:"clone_url"`
Fork bool `bson:"fork"`
ForksCount int64 `bson:"forks_count"`
OpenIssuesCount int64 `bson:"open_issues_count"`
StargazersCount int64 `bson:"stargazers_count"`
SubscribersCount int64 `bson:"subscribers_count"`
WatchersCount int64 `bson:"watchers_count"`
SizeInKb int64 `bson:"size_in_kb"`
CreatedAt string `bson:"created_at"`
UpdatedAt string `bson:"updated_at"`
PushedAt string `bson:"pushed_at"`
// Repository owner
Owner struct {
Login string `bson:"login"`
} `bson:"owner"`
}
// ghRepoCollaborator is a relation between a user and a repository.
ghRepoCollaborator struct {
ID int64 `bson:"id"`
Login string `bson:"login"`
Repo string `bson:"repo"`
Owner string `bson:"owner"`
}
)
// Tables fields
var (
usersFields = []string{"username", "name", "email"}
ghUsersFields = []string{
"user_id",
"github_id",
"login",
"bio",
"company",
"email",
"hireable",
"location",
"avatar_url",
"html_url",
"followers_count",
"following_count",
"created_at",
"updated_at",
}
ghOrgsFields = []string{
"login",
"github_id",
"avatar_url",
"html_url",
"name",
"company",
"location",
"email",
"created_at",
"updated_at",
}
tmpReposFields = []string{
"name",
"primary_language",
"clone_url",
"clone_path",
"vcs",
"full_name",
"description",
"homepage",
"fork",
"github_id",
"default_branch",
"master_branch",
"html_url",
"forks_count",
"open_issues_count",
"stargazers_count",
"subscribers_count",
"watchers_count",
"created_at",
"updated_at",
"pushed_at",
}
reposCollabosFields = []string{"user_id", "repository_id"}
orgMembersFields = []string{"gh_user_id", "gh_organization_id"}
)
// config holds ght2dm configuration.
type config struct {
// BSON files folders. The order is kept while processing them.
//
// The name of each folder MUST match the name of the GitHub entity in
// snake case and pluralized (see defined constants).
GHTorrentFolder []string `json:"ghtorrent_folders"`
// database config
DevMineDatabase devmineDatabase `json:"devmine_database"`
}
// devmineDatabase holds database login information.
//
// PostgreSQL is only database supported for now.
type devmineDatabase struct {
Host string `json:"host"` // host where the database is running
Port int `json:"port"` // database port
User string `json:"user"` // database user
Password string `json:"password"` // user's password
Database string `json:"database"` // DevMine database
SSLMode string `json:"ssl_mode"` // SSL mode (disable, require)
}
// readConfig reads the configuration file and parses it.
func readConfig(path string) (*config, error) {
bs, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
cfg := config{}
if err := json.Unmarshal(bs, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// dumpReader is a reader for BSON files.
type dumpReader struct {
r io.Reader
}
// newDumpReader creates a new dumpReader that reads from r.
func newDumpReader(r io.Reader) *dumpReader {
return &dumpReader{r: r}
}
// ReadDoc reads the next BSON document.
func (dr *dumpReader) ReadDoc() ([]byte, error) {
lenBuf := make([]byte, 4)
if n, err := dr.r.Read(lenBuf); err != nil {
return nil, err
} else if n != 4 {
return nil, errors.New("malformed bson dump")
}
var docLen int32
if err := binary.Read(bytes.NewReader(lenBuf), binary.LittleEndian, &docLen); err != nil {
return nil, err
}
doc := make([]byte, docLen)
// We copy the length field into the document buffer because it is part of
// the document and it is expected by bson.Unmarshal.
copy(doc, lenBuf)
if _, err := dr.r.Read(doc[4:]); err != nil {
return nil, err
}
return doc, nil
}
// importUsers imports a BSON file containing GitHub users into the DevMine
// database.
func importUsers(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
r := newDumpReader(f)
// Begin a new transaction.
txn, err := db.Begin()
if err != nil {
return err
}
defer txn.Rollback()
// Disable foreign key constraints.
_, err = txn.Exec("ALTER TABLE ONLY gh_users DROP CONSTRAINT gh_users_fk_users")
if err != nil {
return err
}
userStmt, err := txn.Prepare(genInsQuery("users", usersFields...) + " RETURNING id")
if err != nil {
return err
}
ghUserStmt, err := txn.Prepare(genInsQuery("gh_users", ghUsersFields...))
if err != nil {
return err
}
ghOrgStmt, err := txn.Prepare(genInsQuery("gh_organizations", ghOrgsFields...))
if err != nil {
return err
}
for {
bs, err := r.ReadDoc()
if err == io.EOF {
break
} else if err != nil {
fail(err)
continue
}
ghu := ghUser{}
if err := bson.Unmarshal(bs, &ghu); err != nil {
fail(path, ":", err)
continue
}
printVerbose("importing gh_user with login", ghu.Login)
switch ghu.Type {
case "User":
userID, err := insertUser(txn, userStmt, ghu)
if err != nil {
fail(err)
continue
}
if err := insertGhUser(txn, ghUserStmt, ghu, userID); err != nil {
fail(err)
continue
}
case "Organization":
if err := insertGhOrg(txn, ghOrgStmt, ghu); err != nil {
fail(err)
continue
}
default: // should never happen
fail(fmt.Errorf("invalid type of user %s", ghu.Type))
continue
}
}
if err := userStmt.Close(); err != nil {
return err
}
if err := ghUserStmt.Close(); err != nil {
return err
}
if err := ghOrgStmt.Close(); err != nil {
return err
}
// Re-enable foreign key constraints.
_, err = txn.Exec("ALTER TABLE ONLY gh_users ADD CONSTRAINT gh_users_fk_users FOREIGN KEY (user_id) REFERENCES users(id)")
if err != nil {
return err
}
if err := txn.Commit(); err != nil {
return err
}
return nil
}
// insertGhOrg inserts a GitHub organization into the database.
func insertGhOrg(txn *sql.Tx, stmt *sql.Stmt, ghu ghUser) error {
if !*nocheck {
if id := fetchOrgID(txn, ghu.ID); id != 0 {
if id == -1 {
return errors.New("impossible to insert github organization with login = " + ghu.Login)
}
return nil
}
}
// Some documents only have a creation date, so for these ones, we set the
// last modification date to the creation date.
if ghu.UpdatedAt == "" {
ghu.UpdatedAt = ghu.CreatedAt
}
_, err := stmt.Exec(
ghu.Login,
ghu.ID,
ghu.AvatarURL,
ghu.HTMLURL,
ghu.Name,
ghu.Company,
ghu.Location,
ghu.Email,
ghu.CreatedAt,
ghu.UpdatedAt)
if err != nil {
fail(err)
return errors.New("impossible to insert github organization with login = " + ghu.Login)
}
return nil
}
// insertGhUser inserts a GitHub user into the database.
func insertGhUser(txn *sql.Tx, stmt *sql.Stmt, ghu ghUser, userID int64) error {
if !*nocheck {
if id := fetchGhUserID(txn, ghu.ID); id != 0 {
if id == -1 {
return errors.New("impossible to insert github user with login = " + ghu.Login)
}
return nil
}
}
// Some documents only have a creation date, so for these ones, we set the
// last modification date to the creation date.
if ghu.UpdatedAt == "" {
ghu.UpdatedAt = ghu.CreatedAt
}
_, err := stmt.Exec(
userID,
ghu.ID,
ghu.Login,
ghu.Bio,
ghu.Company,
ghu.Email,
ghu.Hireable,
ghu.Location,
ghu.AvatarURL,
ghu.HTMLURL,
ghu.Followers,
ghu.Following,
ghu.CreatedAt,
ghu.UpdatedAt)
if err != nil {
fail(err)
return errors.New("impossible to insert github user with login = " + ghu.Login)
}
return nil
}
// insertUser inserts a user into the database.
func insertUser(txn *sql.Tx, stmt *sql.Stmt, ghu ghUser) (int64, error) {
if !*nocheck {
if id := fetchUserID(txn, ghu.ID); id != 0 {
if id == -1 {
return 0, errors.New("impossible to insert user with login " + ghu.Login)
}
return 0, nil
}
}
var userID int64
err := stmt.QueryRow(ghu.Login, ghu.Name, ghu.Email).Scan(&userID)
if err != nil {
fail(err)
return 0, errors.New("impossible to insert user with login " + ghu.Login)
}
return userID, nil
}
// fetchUserID fetches the user ID corresponding to a given GitHub user ID.
//
// It returns 0 if the user does not already exists in the database and -1 if
// an error occured while processing the query.
//
// When an error occurs, this function takes care of logging it before
// returning -1.
func fetchUserID(txn *sql.Tx, githubID int64) int64 {
var id int64
err := txn.QueryRow("SELECT user_id FROM gh_users WHERE github_id=$1", githubID).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail("failed to fetch user id: ", err)
return -1
}
return id
}
// fetchGhUserID fetches the GitHub user ID corresponding to a given GitHub user
// ID.
// It returns 0 if the GitHub user does not already exists in the database and
// -1 if an error occured while processing the query.
func fetchGhUserID(txn *sql.Tx, githubID int64) int64 {
var id int64
err := txn.QueryRow("SELECT id FROM gh_users WHERE github_id=$1", githubID).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail("failed to fetch github user id: ", err)
return -1
}
return id
}
// fetchOrgID fetches the organizationID corresponding to a given GitHub user
// ID.
// It returns 0 if the organization does not already exists in the database and
// -1 if an error occured while processing the query.
func fetchOrgID(txn *sql.Tx, githubID int64) int64 {
var id int64
err := txn.QueryRow("SELECT id FROM gh_organizations WHERE github_id=$1", githubID).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail("failed to fetch organization id: ", err)
return -1
}
return id
}
// importRepos imports a BSON file containing GitHub repositories into the
// DevMine database.
func importRepos(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
r := newDumpReader(f)
// Begin a new transaction.
txn, err := db.Begin()
if err != nil {
return err
}
defer txn.Rollback()
tmpRepoStmt, err := txn.Prepare(pq.CopyIn("tmp_gh_repositories", tmpReposFields...))
if err != nil {
return err
}
for {
bs, err := r.ReadDoc()
if err == io.EOF {
break
} else if err != nil {
fail(err)
continue
}
ghr := ghRepo{}
if err := bson.Unmarshal(bs, &ghr); err != nil {
fail(err)
continue
}
printVerbose("importing gh_repo with clone url", ghr.HTMLURL+".git")
if err := insertTmpRepo(txn, tmpRepoStmt, ghr); err != nil {
fail(err)
continue
}
}
if _, err := tmpRepoStmt.Exec(); err != nil {
return err
}
if err := tmpRepoStmt.Close(); err != nil {
return err
}
if err := txn.Commit(); err != nil {
return err
}
return nil
}
// buildClonePath build the clone path for a repository.
func buildClonePath(ghr ghRepo) string {
lang := ghr.Language
if lang == "" {
lang = "unknown"
}
login := ghr.Owner.Login
if login == "" {
// should NEVER happen!!
login = "john_doe"
}
name := ghr.Name
if name == "" {
// should NEVER happen!!
name = "42"
}
return strings.ToLower(filepath.Join(lang, login, name))
}
// removeNullByte removes null bytes from s.
//
// Null bytes make PostgreSQL insertions to fail, thus this function must
// must be used on every string that could possibly contain a null byte.
func removeNullByte(s string) string {
return string(bytes.Replace([]byte(s), []byte{0x0}, []byte{}, -1))
}
// insertTmpRepo inserts a repository into a temporary table in the database.
func insertTmpRepo(txn *sql.Tx, stmt *sql.Stmt, ghr ghRepo) error {
clonePath := buildClonePath(ghr)
// Ensure that the dates are not empty strings "", otherwise PosgtreSQL fails
// to insert the new entry.
createdAt := &ghr.CreatedAt
if *createdAt == "" {
createdAt = nil
}
updatedAt := &ghr.UpdatedAt
if *updatedAt == "" {
updatedAt = nil
}
pushedAt := &ghr.PushedAt
if *pushedAt == "" {
pushedAt = nil
}
_, err := stmt.Exec(
removeNullByte(ghr.Name),
removeNullByte(ghr.Language),
removeNullByte(ghr.CloneURL),
removeNullByte(clonePath),
"git",
removeNullByte(ghr.FullName),
removeNullByte(ghr.Description),
removeNullByte(ghr.Homepage),
ghr.Fork,
ghr.ID,
removeNullByte(ghr.DefaultBranch),
removeNullByte(ghr.MasterBranch),
removeNullByte(ghr.HTMLURL),
ghr.ForksCount,
ghr.OpenIssuesCount,
ghr.StargazersCount,
ghr.SubscribersCount,
ghr.WatchersCount,
createdAt,
updatedAt,
pushedAt)
if err != nil {
fail(err)
return fmt.Errorf("impossible to insert tmp repository with github_id %d", ghr.ID)
}
return nil
}
// fetchRepoID fetches the repository ID corresponding to a given GitHub
// repository ID.
//
// It returns 0 if the repository does not already exists in the database and
// -1 if an error occured while processing the query
//
// When an error occurs, this function takes care of logging it before
// returning -1.
func fetchRepoID(txn *sql.Tx, ghr ghRepo) int64 {
clonePath := buildClonePath(ghr)
var id int64
err := txn.QueryRow(`
SELECT repositories.id
FROM gh_repositories
LEFT JOIN repositories ON repositories.id = gh_repositories.repository_id
WHERE gh_repositories.github_id=$1
OR repositories.clone_url=$2
OR repositories.clone_path=$3`, ghr.ID, ghr.CloneURL, clonePath).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail("failed to fetch repository id: ", err)
return -1
}
return id
}
// importOrgMembers imports a BSon file containing GitHub organization members
// into the DevMine database.
func importOrgMembers(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
r := newDumpReader(f)
// Begin a new transaction.
txn, err := db.Begin()
if err != nil {
return err
}
defer txn.Rollback()
// Disable foreign key constraints.
_, err = txn.Exec("ALTER TABLE ONLY gh_users_organizations DROP CONSTRAINT gh_users_organizations_fk_organization")
if err != nil {
return err
}
_, err = txn.Exec("ALTER TABLE ONLY gh_users_organizations DROP CONSTRAINT gh_users_organizations_fk_users")
if err != nil {
return err
}
orgMemberStmt, err := txn.Prepare(genInsQuery("gh_users_organizations", orgMembersFields...))
if err != nil {
return err
}
for {
bs, err := r.ReadDoc()
if err == io.EOF {
break
} else if err != nil {
fail(err)
continue
}
ghom := ghOrgMember{}
if err := bson.Unmarshal(bs, &ghom); err != nil {
fail(err)
continue
}
if err := insertOrgMember(txn, orgMemberStmt, ghom); err != nil {
fail(err)
continue
}
}
if err := orgMemberStmt.Close(); err != nil {
return err
}
// Re-enable foreign key constraints.
_, err = txn.Exec("ALTER TABLE ONLY gh_users_organizations ADD CONSTRAINT gh_users_organizations_fk_organization FOREIGN KEY (gh_organization_id) REFERENCES gh_organizations(id)")
if err != nil {
return err
}
_, err = txn.Exec("ALTER TABLE ONLY gh_users_organizations ADD CONSTRAINT gh_users_organizations_fk_users FOREIGN KEY (gh_user_id) REFERENCES gh_users(id)")
if err != nil {
return err
}
if err := txn.Commit(); err != nil {
return err
}
return nil
}
// insertOrgMember inserts a GitHub organization member into the database.
func insertOrgMember(txn *sql.Tx, stmt *sql.Stmt, ghom ghOrgMember) error {
if !*nocheck {
rows, err := txn.Query(`
SELECT gh_users_organizations.gh_user_id, gh_users_organizations.gh_organization_id
FROM gh_users_organizations
LEFT JOIN gh_users ON gh_users.id = gh_users_organizations.gh_user_id
LEFT JOIN gh_organizations ON gh_organizations.id = gh_users_organizations.gh_organization_id
WHERE gh_users.login = $1 AND gh_organizations.login = $2
`, ghom.Login, ghom.Org)
defer rows.Close()
switch {
case rows != nil && rows.Next():
var ghUserID, ghOrgID int64
if err := rows.Scan(&ghUserID, &ghOrgID); err == nil {
printVerbose(fmt.Sprintf("the gh_users_organizations relation (%d, %d) already exists", ghUserID, ghOrgID))
}
return nil // the relation already exist, no need to create it
case err != nil:
fail(err)
return fmt.Errorf("impossible to fetch member organization with id %d", ghom.ID)
default:
break // the relation does not already exist, so we can create it
}
}
ghUserID := fetchGhUserIDFromLogin(txn, ghom.Login)
if ghUserID <= 0 {
return fmt.Errorf("failed to retrieve the id of the github user having the login %s", ghom.Login)
}
ghOrgID := fetchGhOrgIDFromLogin(txn, ghom.Org)
if ghOrgID <= 0 {
return fmt.Errorf("failed to retrieve the id of the github organization having the login %s", ghom.Org)
}
if _, err := stmt.Exec(ghUserID, ghOrgID); err != nil {
fail(err)
return fmt.Errorf("impossible to insert member organization with id %d", ghom.ID)
}
return nil
}
// fetchGhUserIDFromLogin fetches the GitHub user ID corresponding to a given
// login.
// It returns 0 if the GitHub user does not already exists in the database and
// -1 if an error occured while processing the query.
func fetchGhUserIDFromLogin(txn *sql.Tx, login string) int64 {
var id int64
err := txn.QueryRow("SELECT id FROM gh_users WHERE login=$1", login).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail(fmt.Sprintf("failed to fetch github user with login %s:", login), err)
return -1
}
return id
}
// fetchGhOrgIDFromLogin fetches the GitHub organization ID corresponding to a
// given login.
// It returns 0 if the GitHub organization does not already exists in the
// database and -1 if an error occured while processing the query.
func fetchGhOrgIDFromLogin(txn *sql.Tx, login string) int64 {
var id int64
err := txn.QueryRow("SELECT id FROM gh_organizations WHERE login=$1", login).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail(fmt.Sprintf("failed to fetch github organization with login %s:", login), err)
return -1
}
return id
}
// importRepoCollabo imports a BSON file containing GitHub repository
// collaborators into the DevMine database.
func importRepoCollabo(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
r := newDumpReader(f)
// Begin a new transaction.
txn, err := db.Begin()
if err != nil {
return err
}
defer txn.Rollback()
// Disable foreign key constraints.
/*_, err = txn.Exec("ALTER TABLE ONLY users_repositories DROP CONSTRAINT users_repositories_fk_repository")
if err != nil {
return err
}
_, err = txn.Exec("ALTER TABLE ONLY users_repositories DROP CONSTRAINT users_repositories_fk_users")
if err != nil {
return err
}*/
repoCollaboStmt, err := txn.Prepare(genInsQuery("users_repositories", reposCollabosFields...))
if err != nil {
return err
}
for {
bs, err := r.ReadDoc()
if err == io.EOF {
break
} else if err != nil {
fail(err)
continue
}
ghrc := ghRepoCollaborator{}
if err := bson.Unmarshal(bs, &ghrc); err != nil {
fail(err)
continue
}
printVerbose("importing repo_collaborators with login", ghrc.Login, ", owner", ghrc.Owner, "and repo", ghrc.Repo)
if err := insertRepoCollabo(txn, repoCollaboStmt, ghrc); err != nil {
fail(err)
continue
}
}
if err := repoCollaboStmt.Close(); err != nil {
return err
}
// Re-enable foreign key constraints.
_, err = txn.Exec("ALTER TABLE ONLY users_repositories ADD CONSTRAINT users_repositories_fk_repository FOREIGN KEY (repository_id) REFERENCES repositories(id)")
if err != nil {
return err
}
_, err = txn.Exec("ALTER TABLE ONLY users_repositories ADD CONSTRAINT users_repositories_fk_users FOREIGN KEY (user_id) REFERENCES users(id)")
if err != nil {
return err
}
if err := txn.Commit(); err != nil {
return err
}
return nil
}
// insertRepoCollabo inserts a GitHub repository collaborator into the database.
func insertRepoCollabo(txn *sql.Tx, stmt *sql.Stmt, ghrc ghRepoCollaborator) error {
if !*nocheck {
rows, err := txn.Query(`
SELECT users_repositories.user_id, users_repositories.repository_id
FROM users_repositories
LEFT JOIN users ON users.id = users_repositories.user_id
LEFT JOIN gh_users ON gh_users.user_id = users.id
LEFT JOIN repositories ON repositories.id = users_repositories.repository_id
LEFT JOIN gh_repositories ON gh_repositories.id = repositories.id
WHERE gh_users.login = $1 AND gh_repositories.full_name = $2
`, ghrc.Login, ghrc.Owner+"/"+ghrc.Repo)
if rows != nil {
defer rows.Close()
}
switch {
case rows != nil && rows.Next():
var userID, repoID int64
if err := rows.Scan(&userID, &repoID); err == nil {
printVerbose(fmt.Sprintf("the users_repositories relation (%d, %d) already exists", userID, repoID))
}
return nil // the relation already exist, no need to create it
case err != nil:
fail(err)
return fmt.Errorf("impossible to fetch repo collaborator with id %d", ghrc.ID)
default:
break // the relation does not already exist, so we can create it
}
}
ghUserID := fetchGhUserIDFromLogin(txn, ghrc.Login)
if ghUserID <= 0 {
return fmt.Errorf("failed to retrieve github user id with login %s", ghrc.Login)
}
ghRepoID := fetchRepoIDFromFullname(txn, ghrc.Owner+"/"+ghrc.Repo)
if ghRepoID <= 0 {
return fmt.Errorf("failed to retrieve github repository id with login %s", ghrc.Login)
}
if _, err := stmt.Exec(ghUserID, ghRepoID); err != nil {
fail(err)
return fmt.Errorf("impossible to fetch insert repository collaborator with id %d", ghrc.ID)
}
return nil
}
// fetchRepoIDFromFullname fetches the repository ID corresponding to a
// given GitHub repository fullname.
// It returns 0 if the repository does not already exists in the
// database and -1 if an error occured while processing the query.
func fetchRepoIDFromFullname(txn *sql.Tx, fullname string) int64 {
var id int64
err := txn.QueryRow(`
SELECT repositories.id AS repo_id
FROM repositories
LEFT JOIN gh_repositories ON gh_repositories.repository_id = repositories.id
WHERE gh_repositories.full_name=$1
`, fullname).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fail("failed to fetch repository id: ", err)
return -1
}
return id
}
// genInsQuery generates a query string for an insertion into the database.
func genInsQuery(tableName string, fields ...string) string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("INSERT INTO %s(%s)\n", tableName, strings.Join(fields, ",")))
buf.WriteString("VALUES(")
for ind, _ := range fields {
if ind > 0 {
buf.WriteString(",")
}
buf.WriteString(fmt.Sprintf("$%d", ind+1))
}
buf.WriteString(")\n")
return buf.String()
}
// A fileInfoList is just a wrapper around a slice of os.FileInfo that
// implements the sort.Interface. In other words, it is a sortable list of
// os.FileInfo. They are sorted by the date (the one present in the file name)
// in descending order.
type fileInfoList []os.FileInfo
func (fil fileInfoList) Len() int {
return len(fil)
}
func (fil fileInfoList) Swap(i, j int) {
fil[i], fil[j] = fil[j], fil[i]
}
func (fil fileInfoList) Less(i, j int) bool {
di, err := time.Parse("2006-01-02", strings.TrimSuffix(fil[i].Name(), ".bson"))
if err != nil {
// this should never happen since file must have a correct name
fail(err)
return false
}
dj, err := time.Parse("2006-01-02", strings.TrimSuffix(fil[j].Name(), ".bson"))
if err != nil {
// this should never happen since file must have a correct name
fail(err)
return false
}
return di.After(dj)
}
func visit(path, entity string) error {
fis, err := ioutil.ReadDir(path)
if err != nil {
return err
}
fil := fileInfoList(fis)
sort.Sort(fil)
for _, fi := range fil {
if ok, err := regexp.MatchString("[0-9]{4}-[0-9]{2}-[0-9]{2}\\.bson", fi.Name()); !ok {
if err != nil {
fail(err)
}
fmt.Printf("[%s] skipped '%s'\n", entity, fi.Name())
continue
}
fmt.Printf("[%s] processing '%s'\n", entity, fi.Name())
fullpath := filepath.Join(path, fi.Name())
var err error
switch entity {
case ghUsers:
if err = importUsers(fullpath); err != nil {
break
}
case ghOrgMembers:
err = importOrgMembers(fullpath)
case ghRepos:
if err = importRepos(fullpath); err != nil {
break
}
case ghRepoCollaborators:
err = importRepoCollabo(fullpath)
}
if err != nil {
fail(fmt.Sprintf("failed to import bson '%s': %v",
filepath.Join(path, fi.Name()), err))
}
}
return nil
}
// fatal log an error into stderr and exit with status 1.
func fatal(a ...interface{}) {
// XXX: avoid code duplication
var msg string
if *dflag {
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "???"
line = 0
} else {
file = filepath.Base(file)
}
msg = fmt.Sprintf("[%s:%d]", file, line)
}
msg += fmt.Sprint(a...)
fmt.Fprintln(os.Stderr, msg)
os.Exit(1)
}
// fail log an error without exiting.
func fail(a ...interface{}) {
var msg string
if *dflag {
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "???"
line = 0
} else {
file = filepath.Base(file)
}
msg = fmt.Sprintf("[%s:%d]", file, line)
}
msg += fmt.Sprint(a...)
fmt.Fprintln(os.Stderr, msg)
}
// printVerbose only print messages when the verbose mode is enabled by vflag.
func printVerbose(a ...interface{}) {
if *vflag {
fmt.Println(a...)
}
}
// db is the database session
var db *sql.DB
// setupDB connects to the database and initialize the session.
// This must only be called once and from the main().
func setupDB(cfg devmineDatabase) error {
dbURL := fmt.Sprintf(
"user='%s' password='%s' host='%s' port=%d dbname='%s' sslmode='%s'",
cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Database, cfg.SSLMode)
var err error
db, err = sql.Open("postgres", dbURL)
if err != nil {
return err
}
return nil
}
// Command line options.
var (
vflag = flag.Bool("v", false, "enable verbose mode")
dflag = flag.Bool("d", false, "enable debug mode")
nocheck = flag.Bool("nocheck", false, "do not check if an entry is already present in the database (only use when there is no duplicate)")
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s [config]\n\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Available options:")
flag.PrintDefaults()
os.Exit(1)
}
flag.Parse()
if flag.NArg() != 1 {
fmt.Fprintln(os.Stderr, "invalid # of arguments")
flag.Usage()
}
cfg, err := readConfig(flag.Arg(0))
if err != nil {
fatal(err)
}
if err := setupDB(cfg.DevMineDatabase); err != nil {
fatal(err)
}
defer db.Close()
for _, f := range cfg.GHTorrentFolder {
if err := visit(f, filepath.Base(f)); err != nil {
fatal(err)
}
}
}
|
package model_test
import (
"github.com/Lunchr/luncher-api/db/model"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("RegistrationAccessToken", func() {
Describe("Token", func() {
Describe("NewToken", func() {
It("doesn't return duplicate items", func() {
t1, err := model.NewToken()
Expect(err).NotTo(HaveOccurred())
t2, err := model.NewToken()
Expect(err).NotTo(HaveOccurred())
Expect(t1).NotTo(Equal(t2))
})
})
Describe("TokenFromString", func() {
It("creates a correct token from a known string", func() {
t, err := model.TokenFromString("EF4120DA-0302-BCEE-712B-1C258D2FB6D4")
Expect(err).NotTo(HaveOccurred())
Expect(t).To(Equal(model.Token{0xef, 0x41, 0x20, 0xda, 0x3, 0x2, 0xbc, 0xee, 0x71, 0x2b,
0x1c, 0x25, 0x8d, 0x2f, 0xb6, 0xd4}))
})
})
Describe("String", func() {
It("creates a correct string from known token", func() {
t := model.Token{0xef, 0x41, 0x20, 0xda, 0x3, 0x2, 0xbc, 0xee, 0x71, 0x2b, 0x1c, 0x25, 0x8d,
0x2f, 0xb6, 0xd4}
Expect(t.String()).To(Equal("EF4120DA-0302-BCEE-712B-1C258D2FB6D4"))
})
})
})
})
|
package server
import (
"context"
"io"
"testing"
"github.com/dhaifley/dlib"
"github.com/dhaifley/dlib/ptypes"
"github.com/dhaifley/dlib/dauth"
"github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc"
)
type MockUserAccess struct {
DBS dlib.SQLExecutor
}
func (m *MockUserAccess) GetUsers(opt *dauth.UserFind) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
user := dauth.User{ID: 1, User: "test"}
r := dlib.Result{Val: &user}
ch <- r
}()
return ch
}
func (m *MockUserAccess) GetUserByID(id int64) <-chan dlib.Result {
return m.GetUsers(nil)
}
func (m *MockUserAccess) DeleteUsers(opt *dauth.UserFind) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
r := dlib.Result{Num: 1}
ch <- r
}()
return ch
}
func (m *MockUserAccess) DeleteUserByID(id int64) <-chan dlib.Result {
return m.DeleteUsers(nil)
}
func (m *MockUserAccess) SaveUser(a *dauth.User) <-chan dlib.Result {
ch := make(chan dlib.Result, 256)
go func() {
defer close(ch)
user := dauth.User{ID: 1, User: "test"}
r := dlib.Result{Val: &user}
ch <- r
}()
return ch
}
func (m *MockUserAccess) SaveUsers(a []dauth.User) <-chan dlib.Result {
return m.SaveUser(nil)
}
type MockRFAuthGetUsersServer struct {
grpc.ServerStream
Results []ptypes.UserResponse
}
func (m *MockRFAuthGetUsersServer) Send(msg *ptypes.UserResponse) error {
m.Results = append(m.Results, *msg)
return nil
}
type MockRFAuthSaveUsersServer struct {
grpc.ServerStream
Results []ptypes.UserResponse
Count int16
}
func (m *MockRFAuthSaveUsersServer) Send(msg *ptypes.UserResponse) error {
m.Results = append(m.Results, *msg)
return nil
}
func (m *MockRFAuthSaveUsersServer) Recv() (*ptypes.UserRequest, error) {
if m.Count < 1 {
msg := ptypes.UserRequest{
ID: 1,
User: "test",
}
m.Count++
return &msg, nil
}
return nil, io.EOF
}
func TestServerGetUsers(t *testing.T) {
ma := MockUserAccess{}
lm, _ := test.NewNullLogger()
svr := Server{Users: &ma, Log: lm}
var stream MockRFAuthGetUsersServer
err := svr.GetUsers(&ptypes.UserRequest{User: "test"}, &stream)
if err != nil {
t.Error(err)
}
if stream.Results[0].ID != 1 {
t.Errorf("ID expected: 1, got %v", stream.Results[0].ID)
}
}
func TestServerSaveUsers(t *testing.T) {
ma := MockUserAccess{}
lm, _ := test.NewNullLogger()
svr := Server{Users: &ma, Log: lm}
var stream MockRFAuthSaveUsersServer
err := svr.SaveUsers(&stream)
if err != nil {
t.Error(err)
}
if stream.Results[0].ID != 1 {
t.Errorf("ID expected: 1, got %v", stream.Results[0].ID)
}
}
func TestServerDeleteUsers(t *testing.T) {
ma := MockUserAccess{}
lm, _ := test.NewNullLogger()
svr := Server{Users: &ma, Log: lm}
res, err := svr.DeleteUsers(context.Background(), &ptypes.UserRequest{ID: 1})
if err != nil {
t.Error(err)
}
if res.Num != 1 {
t.Errorf("Num expected: 1, got %v", res.Num)
}
}
|
package main
import (
"bytes"
"html/template"
"os"
"strings"
)
// Template options.
type Template struct {
Name string
Data map[string]string
}
// Render returns string markup for a template.
func (t *Template) Render() string {
var markup bytes.Buffer
data := make(map[string]template.HTML)
wd, wdErr := os.Getwd()
if wdErr != nil {
return t.Data["body"]
}
for k, v := range t.Data {
safe := template.HTMLEscapeString(v)
data[k] = template.HTML(strings.Replace(safe, "\n", "<br>", -1))
}
path := strings.Join([]string{wd, "/templates/", t.Name, ".html"}, "")
emailTemplate := template.Must(template.ParseFiles(path))
templateErr := emailTemplate.Execute(&markup, data)
if templateErr != nil {
return t.Data["body"]
}
return markup.String()
}
|
package httpexpect
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDurationFailed(t *testing.T) {
chain := makeChain(newMockReporter(t))
chain.fail("fail")
ts := time.Second
value := &Duration{chain, &ts}
value.chain.assertFailed(t)
value.Equal(ts)
value.NotEqual(ts)
value.Gt(ts)
value.Ge(ts)
value.Lt(ts)
value.Le(ts)
value.InRange(ts, ts)
}
func TestDurationNil(t *testing.T) {
chain := makeChain(newMockReporter(t))
ts := time.Second
value := &Duration{chain, nil}
value.chain.assertOK(t)
value.Equal(ts)
value.NotEqual(ts)
value.Gt(ts)
value.Ge(ts)
value.Lt(ts)
value.Le(ts)
value.InRange(ts, ts)
}
func TestDurationSet(t *testing.T) {
chain := makeChain(newMockReporter(t))
ts := time.Second
value := &Duration{chain, &ts}
value.IsSet()
value.chain.assertOK(t)
value.chain.reset()
value.NotSet()
value.chain.assertFailed(t)
value.chain.reset()
}
func TestDurationUnset(t *testing.T) {
chain := makeChain(newMockReporter(t))
value := &Duration{chain, nil}
value.IsSet()
value.chain.assertFailed(t)
value.chain.reset()
value.NotSet()
value.chain.assertOK(t)
value.chain.reset()
}
func TestDurationEqual(t *testing.T) {
reporter := newMockReporter(t)
value := NewDuration(reporter, time.Second)
assert.Equal(t, time.Second, value.Raw())
value.Equal(time.Second)
value.chain.assertOK(t)
value.chain.reset()
value.Equal(time.Minute)
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqual(time.Minute)
value.chain.assertOK(t)
value.chain.reset()
value.NotEqual(time.Second)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestDurationGreater(t *testing.T) {
reporter := newMockReporter(t)
value := NewDuration(reporter, time.Second)
value.Gt(time.Second - 1)
value.chain.assertOK(t)
value.chain.reset()
value.Gt(time.Second)
value.chain.assertFailed(t)
value.chain.reset()
value.Ge(time.Second - 1)
value.chain.assertOK(t)
value.chain.reset()
value.Ge(time.Second)
value.chain.assertOK(t)
value.chain.reset()
value.Ge(time.Second + 1)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestDurationLesser(t *testing.T) {
reporter := newMockReporter(t)
value := NewDuration(reporter, time.Second)
value.Lt(time.Second + 1)
value.chain.assertOK(t)
value.chain.reset()
value.Lt(time.Second)
value.chain.assertFailed(t)
value.chain.reset()
value.Le(time.Second + 1)
value.chain.assertOK(t)
value.chain.reset()
value.Le(time.Second)
value.chain.assertOK(t)
value.chain.reset()
value.Le(time.Second - 1)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestDurationInRange(t *testing.T) {
reporter := newMockReporter(t)
value := NewDuration(reporter, time.Second)
value.InRange(time.Second, time.Second)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(time.Second-1, time.Second)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(time.Second, time.Second+1)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(time.Second+1, time.Second+2)
value.chain.assertFailed(t)
value.chain.reset()
value.InRange(time.Second-2, time.Second-1)
value.chain.assertFailed(t)
value.chain.reset()
value.InRange(time.Second+1, time.Second-1)
value.chain.assertFailed(t)
value.chain.reset()
}
|
package utils
import (
"strings"
"unicode"
)
// ToSnake converts a given string to snake case
func ToSnake(s string) string {
var result string
var words []string
var lastPos int
rs := []rune(s)
for i := 0; i < len(rs); i++ {
if i > 0 {
switch {
case unicode.IsUpper(rs[i]):
words = append(words, s[lastPos:i])
lastPos = i
case rs[i] == '-':
fallthrough
case rs[i] == '.':
words = append(words, s[lastPos:i])
i++
lastPos = i
}
}
}
// append the last word
if s[lastPos:] != "" {
words = append(words, s[lastPos:])
}
for k, word := range words {
if k > 0 {
result += "_"
}
result += strings.ToLower(word)
}
return result
}
// ToCamel returns a string converted from snake case to uppercase
func ToCamel(s string) string {
return snakeToCamel(s, true)
}
// ToCamelLower returns a string converted from snake case to lowercase
func ToCamelLower(s string) string {
return snakeToCamel(s, false)
}
func snakeToCamel(s string, upperCase bool) string {
var result string
words := strings.Split(s, "_")
for i, word := range words {
if (upperCase || i > 0) && len(word) > 0 {
w := []rune(word)
w[0] = unicode.ToUpper(w[0])
result += string(w)
} else {
result += word
}
}
return result
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifi
import (
"bytes"
"context"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"chromiumos/tast/errors"
"chromiumos/tast/remote/bundles/cros/wifi/wifiutil"
"chromiumos/tast/remote/network/ip"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/remote/wificell/hostapd"
"chromiumos/tast/remote/wificell/pcap"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: ConnectMBO,
Desc: "Verifies that the MBO IE and other MBO-related capability bits are set",
Contacts: []string{
"chromeos-wifi-champs@google.com", // WiFi oncall rotation; or http://b/new?component=893827
},
Attr: []string{"group:wificell", "wificell_func"},
ServiceDeps: []string{wificell.TFServiceName},
Fixture: "wificellFixtRouterAsPcap",
SoftwareDeps: []string{"mbo", "rrm_support"},
Params: []testing.Param{
{
ExtraHardwareDeps: hwdep.D(hwdep.WifiNotMarvell()),
Val: true,
},
{
Name: "marvell",
ExtraHardwareDeps: hwdep.D(hwdep.WifiMarvell()),
Val: false,
},
},
})
}
func ConnectMBO(ctx context.Context, s *testing.State) {
tf := s.FixtValue().(*wificell.TestFixture)
ctx, restore, err := tf.WifiClient().DisableMACRandomize(ctx)
if err != nil {
s.Fatal("Failed to disable MAC randomization: ", err)
}
defer func() {
if err := restore(); err != nil {
s.Error("Failed to restore MAC randomization: ", err)
}
}()
// Get the MAC address of WiFi interface.
iface, err := tf.ClientInterface(ctx)
if err != nil {
s.Fatal("Failed to get WiFi interface of DUT: ", err)
}
ipr := ip.NewRemoteRunner(s.DUT().Conn())
mac, err := ipr.MAC(ctx, iface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
apOps := []hostapd.Option{
hostapd.MBO(),
hostapd.RRMBeaconReport(),
hostapd.Channel(1),
hostapd.Mode(hostapd.Mode80211acMixed),
hostapd.HTCaps(hostapd.HTCapHT40),
hostapd.VHTChWidth(hostapd.VHTChWidth20Or40),
}
pcapPath, _, err := wifiutil.ConnectAndCollectPcap(ctx, tf, apOps)
if err != nil {
s.Fatal("Failed to collect packet: ", err)
}
s.Log("Start analyzing pcap")
filters := []pcap.Filter{
pcap.Dot11FCSValid(),
pcap.TransmitterAddress(mac),
}
probePackets, err := pcap.ReadPackets(pcapPath, append(filters, pcap.TypeFilter(layers.LayerTypeDot11MgmtProbeReq, nil))...)
if err != nil {
s.Fatal("Failed to read probe request packets: ", err)
}
s.Logf("Total %d probe requests found", len(probePackets))
assocPackets, err := pcap.ReadPackets(pcapPath, append(filters, pcap.TypeFilter(layers.LayerTypeDot11MgmtAssociationReq, nil))...)
if err != nil {
s.Fatal("Failed to read association request packets: ", err)
}
s.Logf("Total %d assoc requests found", len(assocPackets))
checkIEs := func(p gopacket.Packet, isProbe bool) error {
containsExt := false
containsMBO := false
containsRM := false
for _, l := range p.Layers() {
element, ok := l.(*layers.Dot11InformationElement)
if !ok {
continue
}
if element.ID == layers.Dot11InformationElementIDExtCapability {
containsExt = true
if int(element.Length) < 3 {
return errors.New("Extended Capability IE not long enough")
}
if (element.Info[2] & 0x08) == 0 {
return errors.New("Extended Capability IE does not contain BSS Transition capability")
}
}
if element.ID == layers.Dot11InformationElementIDVendor {
if int(element.Length) < 7 ||
bytes.Compare(element.OUI[:3], []byte{0x50, 0x6F, 0x9A}) != 0 ||
element.OUI[3] != 0x16 {
continue
}
for i := 0; i < len(element.Info); {
attrID := element.Info[i]
attrLen := element.Info[i+1]
// Check that the Cellular Data Capabilities attribute is present
if attrID == 0x03 && attrLen == 1 && element.Info[i+2] >= 0x01 && element.Info[i+2] <= 0x03 {
containsMBO = true
break
}
i += 2 + int(attrLen)
}
}
if element.ID == layers.Dot11InformationElementIDRMEnabledCapabilities {
containsRM = true
if int(element.Length) < 1 {
return errors.New("RM Enabled Capabilities IE not long enough")
}
if (element.Info[0] & 0x10) == 0 {
return errors.New("RM Enabled Capabilities IE missing Passive Measurement support")
}
if (element.Info[0] & 0x20) == 0 {
return errors.New("RM Enabled Capabilities IE missing Active Measurement support")
}
if (element.Info[0] & 0x40) == 0 {
return errors.New("RM Enabled Capabilities IE missing Table Measurement support")
}
}
}
if !containsExt {
return errors.New("Extended Capabilities IE missing")
} else if !containsMBO {
return errors.New("MBO-OCE IE missing")
} else if !isProbe && !containsRM {
return errors.New("RM Enabled Capabilities IE missing")
}
return nil
}
s.Log("Checking probe request packets")
for _, p := range probePackets {
layer := p.Layer(layers.LayerTypeDot11MgmtProbeReq)
if layer == nil {
s.Fatal("Found packet without ProbeReq layer")
}
req := layer.(*layers.Dot11MgmtProbeReq)
content := req.LayerContents()
e := gopacket.NewPacket(content, layers.LayerTypeDot11InformationElement, gopacket.NoCopy)
if err := e.ErrorLayer(); err != nil {
s.Log("Error: ", err)
continue
}
if err := checkIEs(e, true); err != nil {
s.Fatal("Probe request IEs missing: ", err)
}
}
// We skip the assoc request packet check for Marvell devices because
// they are fullMAC devices, meaning wpa_supplicant can't inject the MBO
// IEs into the assoc request packet like it does in softMAC devices.
// We still check to make sure it can associate properly above, but it's
// less important to check that the IEs are there.
notMarvell := s.Param().(bool)
if notMarvell {
s.Log("Checking assoc request packets")
for _, p := range assocPackets {
if err := checkIEs(p, false); err != nil {
s.Fatal("Assoc request IEs missing: ", err)
}
}
}
}
|
package dataStruct
type Stact struct {
ll *LinkedList
}
func NewStack() *Stact {
return &Stact{ll: &LinkedList{}}
}
func (s *Stact) Push(val int) {
s.ll.AddNode(val)
}
func (s *Stact) Pop() int {
back := s.ll.Back()
s.ll.PopBack()
return back
}
func (s *Stact) Empty() bool {
return s.ll.Empty()
}
|
package app
import "html/template"
// Tag ..
type Tag struct {
ID int `json:"id"`
Name string `json:"name"`
Level int `json:"level"`
Order int `json:"order"`
Logo string `json:"logo"`
ParentID int `json:"parentid"`
Children TagArr `json:"children"`
}
// TagArr ..
type TagArr []Tag
func (s TagArr) Len() int {
return len(s)
}
func (s TagArr) Less(i, j int) bool {
return s[i].Order < s[j].Order
}
func (s TagArr) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Article ..
type Article struct {
ID int `json:"id"`
Title string `json:"title"`
Tags string `json:"tags"`
Readed int `json:"readed"`
Time string `json:"time"`
Cover string `json:"cover"`
Content template.HTML `json:"content"`
Top int `json:"top"`
}
// Tool ..
type Tool struct {
ID int `json:"id"`
Name string `json:"name"`
Desc string `json:"desc"`
Link string `json:"link"`
Logo string `json:"logo"`
Top int `json:"top"`
}
// SuccRes 请求成功返回数据结构
type SuccRes struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data interface{} `json:"data"`
}
// ErrRes 请求失败返回数据结构
type ErrRes struct {
Code int `json:"code"`
Msg string `json:"msg"`
}
|
package main
import (
"fmt"
"github.com/c2technology/tiny-wasteland-tools/vehicle"
)
// Generates a vehicle
func main() {
vehicle := vehicle.Generate()
//TODO: Pull name from input or a name generator
fmt.Println(fmt.Sprintf("Chassis: %s", vehicle.Chassis.Name))
fmt.Println(fmt.Sprintf("Capacity: %d", vehicle.Capacity))
fmt.Println(fmt.Sprintf("Hit Points: %d", vehicle.HitPoints))
fmt.Println("Upgrades:")
for key, val := range vehicle.Upgrades {
fmt.Println(fmt.Sprintf(" %s: %s", key, val.Description))
}
}
|
package user
import (
"github.com/gin-gonic/gin"
//"net/http"
"fmt"
"go-antd-admin/utils/result"
"go-antd-admin/utils/e"
"go-antd-admin/models"
"go-antd-admin/middleware/jwt"
)
func Index(c *gin.Context) {
c.String(200, "Hello World2")
}
// 注册信息
type RegistInfo struct {
Name string `form:"name" binding:"required"`
Pwd string `form:"pwd" binding:"required"`
}
var userModel = new(models.User)
// @Summary 注册用户
// @Produce json
// @Param name query string true "name"
// @Param pwd query string true "password"
// @Success 200 {object} result.Response
// @Failure 500 {object} result.Response
// @Router /register [get]
type UserRes struct{
models.User
Token string `json:"token"`
}
func Register(c *gin.Context){
var registerInfo RegistInfo
if c.ShouldBind(®isterInfo) == nil {
if _,err:=userModel.GetUserByName(registerInfo.Name);err!=nil{
user:=models.User{Name:registerInfo.Name,Pwd:registerInfo.Pwd}
if user,err=userModel.AddUser(user);err==nil{
user.Pwd=""
jwtTemp:=jwt.NewJWT()
token,_:=jwtTemp.CreateToken(user.Name,int(user.ID))
var userRes UserRes
userRes.User=user
userRes.Token=token
result.SuccessWithData(c,e.REGISTER_SUCCESS,userRes)
}else{
result.Error(c,e.ERROR_CREATE_USER)
}
}else{
result.Error(c,e.ERROR_EXIST_USER )
}
}else{
fmt.Println(registerInfo)
result.Error(c,e.INVALID_PARAMS)
}
}
func Login(c *gin.Context){
var registerInfo RegistInfo
if c.ShouldBind(®isterInfo) == nil {
if user,err:=userModel.GetUserByName(registerInfo.Name);err==nil{
user.Pwd=""
jwtTemp:=jwt.NewJWT()
token,_:=jwtTemp.CreateToken(user.Name,int(user.ID))
var userRes UserRes
userRes.User=user
userRes.Token=token
result.SuccessWithData(c,e.LOGIN_SUCCESS,userRes)
}else{
result.Error(c,e.ERROR_NOT_EXIST_USER)
}
}else{
fmt.Println(registerInfo)
result.Error(c,e.INVALID_PARAMS)
}
}
func CurrentUser(c *gin.Context) {
fmt.Println(c.Get("claims"))
claims,_:=c.MustGet("claims").(*jwt.CustomClaims)
fmt.Println(claims.Name)
if user,err:=userModel.GetUserByName(claims.Name);err==nil{
user.Pwd=""
jwtTemp:=jwt.NewJWT()
token,_:=jwtTemp.CreateToken(user.Name,int(user.ID))
var userRes UserRes
userRes.User=user
userRes.Token=token
result.SuccessWithData(c,e.LOGIN_SUCCESS,userRes)
}else{
result.Error(c,e.ERROR_NOT_EXIST_USER)
}
// currentUser:=map[string]interface{}{
// "address":"美的全球创新中心",
// "avatar":"https://gw.alipayobjects.com/zos/antfincdn/XAosXuNZyF/BiazfanxmamNRoxxVxka.png",
// "country":"China",
// }
// result.SuccessWithData(c,e.LOGIN_SUCCESS,currentUser)
} |
package paxos
import (
"errors"
"fmt"
"github.com/cmu440-F15/paxosapp/rpc/paxosrpc"
"net"
"net/http"
"net/rpc"
"strings"
"sync"
"time"
"encoding/gob"
"bytes"
)
type proposal struct {
N int
V interface{}
}
type paxosNode struct {
id int
nodes map[int]*rpc.Client
storage map[string]interface{} // is interface{} right?
replace bool // whether or not this is a replacement node
numNodes int
clockMap map[string]int // clock value for each key (used to calculate next proposal number)
maxpropMap map[string]int // N_h for each key (highest proposal number seen)
acceptMap map[string]*proposal // N_a, V_a for each key (highest proposal number accepted and corresponding value)
commitMutexes map[string]*sync.Mutex // things to commit
storageMutex sync.Mutex
clockMutex sync.Mutex
acceptMutex sync.Mutex
}
func createNode(srvId, numNodes int, replace bool) *paxosNode {
nodes := make(map[int]*rpc.Client)
storage := make(map[string]interface{})
acceptMap := make(map[string]*proposal)
clockMap := make(map[string]int)
maxpropMap := make(map[string]int)
commitMutexes := make(map[string]*sync.Mutex)
storageMutex := sync.Mutex{}
clockMutex := sync.Mutex{}
acceptMutex := sync.Mutex{}
return &paxosNode{srvId, nodes, storage, replace, numNodes, clockMap, maxpropMap, acceptMap, commitMutexes, storageMutex, clockMutex, acceptMutex}
}
// NewPaxosNode creates a new PaxosNode. This function should return only when
// all nodes have joined the ring, and should return a non-nil error if the node
// could not be started in spite of dialing the other nodes numRetries times.
//
// hostMap is a map from node IDs to their hostports, numNodes is the number
// of nodes in the ring, replace is a flag which indicates whether this node
// is a replacement for a node which failed.
func NewPaxosNode(myHostPort string, hostMap map[int]string, numNodes, srvId, numRetries int, replace bool) (PaxosNode, error) {
fmt.Println("we out here:", myHostPort)
// register all the other hosts
otherHosts := make(map[int]*rpc.Client)
newNode := createNode(srvId, numNodes, replace)
rpc.RegisterName("PaxosNode", paxosrpc.Wrap(newNode))
rpc.HandleHTTP()
port := strings.Split(myHostPort, "localhost")[1]
l, e := net.Listen("tcp", port)
if e != nil {
return nil, errors.New("RPC Issue Storage Server\n")
}
go http.Serve(l, nil)
for id, hostport := range hostMap {
// keep tryin until you give up, return fail if a node in ring fails to connect
allGood := false
for retryCount := 0; retryCount < numRetries; retryCount++ {
cli, err := rpc.DialHTTP("tcp", hostport)
if err == nil {
allGood = true
otherHosts[id] = cli
break
}
time.Sleep(time.Duration(1) * time.Second)
}
if !allGood {
fmt.Println(hostport)
return nil, errors.New("Failed to connect to a client")
}
}
newNode.nodes = otherHosts
// the line below doesnt work but i think we need to do something like this
//Catchup shit
if(replace){
var helper *rpc.Client
for id, cli := range newNode.nodes {
if(id != srvId){
helper = cli
args_server := paxosrpc.ReplaceServerArgs{srvId,myHostPort}
var reply_server paxosrpc.ReplaceServerReply
if err := cli.Call("PaxosNode.RecvReplaceServer", &args_server, &reply_server); err != nil {
return nil,err
}
}
}
args := paxosrpc.ReplaceCatchupArgs{}
var reply paxosrpc.ReplaceCatchupReply
if err := helper.Call("PaxosNode.RecvReplaceCatchup", &args, &reply); err != nil {
return nil,err
}
dec := gob.NewDecoder(bytes.NewBuffer(reply.Data))
var data map[string]interface{}
err := dec.Decode(&data)
if err != nil {
return nil,err
}
for key,_ := range data {
args_num := paxosrpc.ProposalNumberArgs{key}
var reply_num paxosrpc.ProposalNumberReply
if err := helper.Call("PaxosNode.GetNextProposalNumber", &args_num, &reply_num); err != nil {
return nil,err
}
newNode.clockMap[key] = (reply_num.N)/numNodes
}
newNode.storage = data
}
return newNode, nil
}
func (pn *paxosNode) GetNextProposalNumber(args *paxosrpc.ProposalNumberArgs, reply *paxosrpc.ProposalNumberReply) error {
// just increment the clock and calculate the proposal number
N_my, ok1 := pn.clockMap[args.Key];
N_h, ok2 := pn.maxpropMap[args.Key];
highest_time := 0
if ok1 && ok2 {
if N_my > N_h/pn.numNodes {
highest_time = N_my
} else {
highest_time = N_h/pn.numNodes
}
} else if ok1 {
highest_time = N_my
} else if ok2 {
highest_time = N_h/pn.numNodes
}
pn.clockMap[args.Key] = highest_time + 1
reply.N = pn.clockMap[args.Key]*pn.numNodes + pn.id
return nil
}
func asyncCallRecvPrepare(cli *rpc.Client, prepArgs *paxosrpc.PrepareArgs, good_replies *chan *proposal, bad_replies *chan int) error {
var nodeReply paxosrpc.PrepareReply
if err := cli.Call("PaxosNode.RecvPrepare", prepArgs, &nodeReply); err != nil {
return err
}
if nodeReply.Status == paxosrpc.OK {
p := proposal{nodeReply.N_a, nodeReply.V_a}
*good_replies <- &p
} else {
*bad_replies <- nodeReply.N_a
}
return nil
}
func asyncCallRecvAccept(cli *rpc.Client, acceptArgs *paxosrpc.AcceptArgs, good_replies, bad_replies *chan int) error {
var nodeReply paxosrpc.AcceptReply
if err := cli.Call("PaxosNode.RecvAccept", acceptArgs, &nodeReply); err != nil {
return err
}
if nodeReply.Status == paxosrpc.OK {
*good_replies <- 1
} else {
*bad_replies <- 1
}
return nil
}
func asyncCallRecvCommit(cli *rpc.Client, commitArgs *paxosrpc.CommitArgs, comm_replies *chan int) error {
var commitReply paxosrpc.CommitReply
if err := cli.Call("PaxosNode.RecvCommit", commitArgs, &commitReply); err != nil {
return err
}
*comm_replies <- 1
return nil
}
func asyncTimeout(timeoutChan *chan int) {
time.Sleep(14 * time.Second)
fmt.Println("timing")
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
*timeoutChan <- 1
fmt.Println("outtie")
}
func (pn *paxosNode) Propose(args *paxosrpc.ProposeArgs, reply *paxosrpc.ProposeReply) error {
timeoutChan := make(chan int, 100)
go asyncTimeout(&timeoutChan)
key := args.Key
N := args.N
V := args.V
fmt.Println("\nPROPOSE:", key, N, V)
// cant end until a commit has been made
//pn.commitMutexes[key] = &sync.Mutex{}
//pn.commitMutexes[key].Lock()
// PREPARE //
fmt.Println("PREPARE")
// ask each node in the ring to prepare
prepArgs := paxosrpc.PrepareArgs{key, N}
prep_good_replies := make(chan *proposal, pn.numNodes)
prep_bad_replies := make(chan int, pn.numNodes)
for i, cli := range pn.nodes {
fmt.Println("asyncp id, i, key, N:", pn.id, i, prepArgs.Key, prepArgs.N)
go asyncCallRecvPrepare(cli,
&prepArgs,
&prep_good_replies,
&prep_bad_replies)
}
// collect replies from nodes
fmt.Println("Waiting for Prepare replies")
max_n := -1
good_count := 0
L:
for good_count <= pn.numNodes/2+1{
select{
case p := <-prep_good_replies:
fmt.Println("Received good prepare-reply")
N_a := p.N
V_a := p.V
if N_a != -1 && max_n < N_a {
max_n = N_a
V = V_a
}
good_count += 1
case <-prep_bad_replies:
fmt.Println("Received bad prepare-reply")
break
case <-timeoutChan:
fmt.Println("timed out")
break L
}
}
// the proposing node fails to be the leader
// (this code should be blocking until a commit is made, and then return that value)
if good_count <= (pn.numNodes/2+1) {
// pn.commitMutexes[key].Lock()
reply.V = pn.storage[key]
// pn.commitMutexes[key].Unlock()
}
////////////////////////////////////////////////////////////////////////////////////////
// ACCEPT //
fmt.Println("ACCEPT")
// ask each node in the ring to accept
acceptArgs := paxosrpc.AcceptArgs{key, N, V}
acc_good_replies := make(chan int, pn.numNodes)
acc_bad_replies := make(chan int, pn.numNodes)
for i, cli := range pn.nodes {
fmt.Println("asynca id, i, key, N, V:", pn.id, i, acceptArgs.Key, acceptArgs.N, acceptArgs.V)
go asyncCallRecvAccept(cli,
&acceptArgs,
&acc_good_replies,
&acc_bad_replies)
}
// collect replies from nodes
good_count = 0
L1:
for good_count <= (pn.numNodes/2+1) {
select{
case <-acc_good_replies:
good_count += 1
fmt.Println("Received good accept-reply")
case <-acc_bad_replies:
fmt.Println("Received bad accept-reply")
break
case <-timeoutChan:
break L1
}
}
// the proposing node fails to be the leader
// (this code should be blocking until a commit is made, and then reply with that value)
if good_count <= (pn.numNodes/2+1) {
// pn.commitMutexes[key].Lock()
reply.V = pn.storage[key]
// pn.commitMutexes[key].Unlock()
return nil
}
////////////////////////////////////////////////////////////////////////////////////////
// COMMIT //
fmt.Println("COMMIT")
// tell each node in the ring to shut the fuck up and commit, bitch
prop, ok := pn.acceptMap[key]
commitArgs := paxosrpc.CommitArgs{key, V}
comm_replies := make(chan int, pn.numNodes)
if ok { commitArgs = paxosrpc.CommitArgs{key, prop.V} }
for i, cli := range pn.nodes {
fmt.Println("asynccomm. id, key, value:", pn.id, i, commitArgs.Key, commitArgs.V)
go asyncCallRecvCommit(cli,
&commitArgs,
&comm_replies)
}
// wait until all nodes have committed
good_count = 0
L2:
for good_count < pn.numNodes {
select{
case <-comm_replies:
good_count += 1
fmt.Println("Received commit-reply")
case <-timeoutChan:
break L2
}
}
fmt.Println("DONE")
pn.storageMutex.Lock()
reply.V = pn.storage[key]
pn.storageMutex.Unlock()
return nil
}
func (pn *paxosNode) GetValue(args *paxosrpc.GetValueArgs, reply *paxosrpc.GetValueReply) error {
// i think eventually this needs to be a survey of all the nodes, but for now, just taking
// local values
pn.storageMutex.Lock()
defer pn.storageMutex.Unlock()
val, ok := pn.storage[args.Key]
if ok {
reply.Status = paxosrpc.KeyFound
reply.V = val
} else {
reply.Status = paxosrpc.KeyNotFound
}
return nil
}
func (pn *paxosNode) RecvPrepare(args *paxosrpc.PrepareArgs, reply *paxosrpc.PrepareReply) error {
key := args.Key
N := args.N
N_h, ok := pn.maxpropMap[key]
//pn.acceptMutex.Lock()
//prop, ok := pn.acceptMap[key]
//pn.acceptMutex.Unlock()
if ok && N < N_h {
reply.Status = paxosrpc.Reject
} else {
pn.maxpropMap[key] = N
reply.Status = paxosrpc.OK
prop, ok := pn.acceptMap[key]
if ok {
reply.N_a = prop.N
reply.V_a = prop.V
} else {
reply.N_a = -1
reply.V_a = nil
}
}
return nil
}
func (pn *paxosNode) RecvAccept(args *paxosrpc.AcceptArgs, reply *paxosrpc.AcceptReply) error {
key := args.Key
N := args.N
V := args.V
N_h, ok := pn.maxpropMap[key]
//pn.acceptMutex.Lock()
//prop, ok := pn.acceptMap[key]
//pn.acceptMutex.Unlock()
if ok && N < N_h {
reply.Status = paxosrpc.Reject
} else {
reply.Status = paxosrpc.OK
p := proposal{N, V}
pn.maxpropMap[key] = N
pn.acceptMap[key] = &p
}
return nil
}
func (pn *paxosNode) RecvCommit(args *paxosrpc.CommitArgs, reply *paxosrpc.CommitReply) error {
key := args.Key
value := args.V
pn.storageMutex.Lock()
pn.storage[key] = value
pn.storageMutex.Unlock()
delete(pn.acceptMap, key)
fmt.Println("Committing. id, key, value:", pn.id, key, value)
//pn.commitMutexes[key].Unlock()
return nil
}
func (pn *paxosNode) RecvReplaceServer(args *paxosrpc.ReplaceServerArgs, reply *paxosrpc.ReplaceServerReply) error {
hostport := args.Hostport
srvid := args.SrvID
cli, err := rpc.DialHTTP("tcp", hostport)
if err != nil {
return err
}
pn.nodes[srvid] = cli
return nil
}
func (pn *paxosNode) RecvReplaceCatchup(args *paxosrpc.ReplaceCatchupArgs, reply *paxosrpc.ReplaceCatchupReply) error {
var network bytes.Buffer
enc := gob.NewEncoder(&network)
err := enc.Encode(pn.storage)
if err != nil {
return err
}
reply.Data = network.Bytes()
return nil
}
|
package sequencing
// Align byte arrays x and y into z and w.
// Example :ABCDEF and ABCCDEF can be aligned in AB-CDEF and ABCCDEF
// https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm
func NeedlemanWunsch(x []byte, y []byte, gap int, similarity func(byte, byte) int, f [][]int) (z []byte, w []byte) {
// Build grid
if f == nil {
f = make([][]int, len(x))
for i := 0; i < len(x); i++ {
f[i] = make([]int, len(y))
}
}
for i := 0; i < len(x); i++ {
f[i][0] = gap * i
}
for j := 0; j < len(y); j++ {
f[0][j] = gap * j
}
for i := 1; i < len(x); i++ {
for j := 1; j < len(y); j++ {
match := f[i-1][j-1] + similarity(x[i], y[j])
del := f[i-1][j] + gap
ins := f[i][j-1] + gap
f[i][j] = max3(match, del, ins)
}
}
// Align
z = make([]byte, 0)
w = make([]byte, 0)
i := len(x) - 1
j := len(y) - 1
for i > 0 || j > 0 {
if i > 0 && j > 0 && f[i][j] == f[i-1][j-1]+similarity(x[i], y[j]) { // match
z = append([]byte{x[i]}, z...)
w = append([]byte{y[j]}, w...)
i--
j--
} else if i > 0 && f[i][j] == f[i-1][j]+gap { // del
z = append([]byte{x[i]}, z...)
w = append([]byte{'-'}, w...)
i--
} else { // ins
z = append([]byte{'-'}, z...)
w = append([]byte{y[j]}, w...)
j--
}
}
if i == 0 && j == 0 {
z = append([]byte{x[i]}, z...)
w = append([]byte{y[j]}, w...)
}
return z, w
}
|
package sgml
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = DescribeTable("predefined attributes",
func(code, rendered string) {
Expect(predefinedAttribute(code)).To(Equal(rendered))
},
Entry("sp", "sp", " "),
Entry("blank", "blank", ""),
Entry("empty", "empty", ""),
Entry("nbsp", "nbsp", " "),
Entry("zwsp", "zwsp", "​"),
Entry("wj", "wj", "⁠"),
Entry("apos", "apos", "'"),
Entry("quot", "quot", """),
Entry("lsquo", "lsquo", "‘"),
Entry("rsquo", "rsquo", "’"),
Entry("ldquo", "ldquo", "“"),
Entry("rdquo", "rdquo", "”"),
Entry("deg", "deg", "°"),
Entry("plus", "plus", "+"),
Entry("brvbar", "brvbar", "¦"),
Entry("vbar", "vbar", "|"),
Entry("amp", "amp", "&"),
Entry("lt", "lt", "<"),
Entry("gt", "gt", ">"),
Entry("startsb", "startsb", "["),
Entry("endsb", "endsb", "]"),
Entry("caret", "caret", "^"),
Entry("asterisk", "asterisk", "*"),
Entry("tilde", "tilde", "~"),
Entry("backslash", "backslash", `\`),
Entry("backtick", "backtick", "`"),
Entry("two-colons", "two-colons", "::"),
Entry("two-semicolons", "two-semicolons", ";"),
Entry("cpp", "cpp", "C++"),
)
|
package types
import (
"github.com/caos/zitadel/internal/config/systemdefaults"
"github.com/caos/zitadel/internal/crypto"
iam_model "github.com/caos/zitadel/internal/iam/model"
"github.com/caos/zitadel/internal/notification/templates"
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
view_model "github.com/caos/zitadel/internal/user/repository/view/model"
)
type PhoneVerificationCodeData struct {
UserID string
}
func SendPhoneVerificationCode(text *iam_model.MessageTextView, user *view_model.NotifyUser, code *es_model.PhoneCode, systemDefaults systemdefaults.SystemDefaults, alg crypto.EncryptionAlgorithm) error {
codeString, err := crypto.DecryptString(code.Code, alg)
if err != nil {
return err
}
var args = mapNotifyUserToArgs(user)
args["Code"] = codeString
text.Text, err = templates.ParseTemplateText(text.Text, args)
codeData := &PhoneVerificationCodeData{UserID: user.ID}
template, err := templates.ParseTemplateText(text.Text, codeData)
if err != nil {
return err
}
return generateSms(user, template, systemDefaults.Notifications, true)
}
|
package main
import (
"strconv"
"testing"
)
func TestStringConcat(t *testing.T) {
start := "hello"
start += " there"
if start != "hello there" {
t.Errorf("string concat doesn't behave as you expect, got %v", start)
}
}
func TestStringConversion(t *testing.T) {
conversion, _ := strconv.Atoi("39")
if conversion != 39 {
t.Errorf("string conversion to int doesn't behave as you expect, got %v", conversion)
}
}
|
package video
import (
"github.com/pokemium/worldwide/pkg/util"
)
// GBVideoRenderer & GBVideoSoftwareRenderer
type Renderer struct {
// GBVideoRenderer
g *Video
disableBG, disableOBJ, disableWIN bool
highlightBG bool
highlightOBJ [MAX_OBJ]bool
highlightWIN bool
highlightColor uint16
highlightAmount byte
// GBVideoSoftwareRenderer
// Renderer.row[i] -> Renderer.Lookup[i] -> Renderer.palette[i] -> outputBuffer
outputBuffer [256 * 256]Color
outputBufferStride int
// each element represents palette color
// x = 0 or 1 or 2 or 3
// x -> BGP or BGP0
// 1*4 + x -> BGP1
// 2*4 + x -> BGP2
// ...
// 7*4 + x -> BGP7
// 8*4 + x -> OBP0
// 9*4 + x -> OBP1
row [HORIZONTAL_PIXELS + 8]uint16
Palette [64 * 3]Color
// palette color(Renderer.row element) -> Renderer.palette index
Lookup [64 * 3]byte
wy, wx, currentWy, currentWx byte
lastY, lastX int
hasWindow bool
lastHighlightAmount byte
Model util.GBModel
obj [MAX_LINE_OBJ]Sprite
objMax int
sgbBorders bool
sgbRenderMode int
sgbAttributes []byte
}
func NewRenderer(g *Video) *Renderer {
r := &Renderer{
g: g,
highlightColor: 0x7fff,
outputBufferStride: 160,
lastY: VERTICAL_PIXELS,
}
for i := byte(0); i < 192; i++ {
r.Lookup[i] = i
}
return r
}
// GBVideoSoftwareRendererUpdateWindow
func (r *Renderer) updateWindow(before, after bool, oldWy byte) {
if r.lastY >= VERTICAL_PIXELS || !(after || before) {
return
}
if !r.hasWindow && r.lastX == HORIZONTAL_PIXELS {
return
}
if r.lastY >= int(oldWy) {
if !after {
r.currentWy = byte(int(r.currentWy) - r.lastY)
r.hasWindow = true
} else if !before {
if !r.hasWindow {
r.currentWy = byte(r.lastY - int(r.wy))
if r.lastY >= int(r.wy) && r.lastX > int(r.wx) {
r.currentWy++
}
} else {
r.currentWy += byte(r.lastY)
}
} else if r.wy != oldWy {
r.currentWy += oldWy - r.wy
r.hasWindow = true
}
}
}
// writeVideoRegister / GBVideoSoftwareRendererWriteVideoRegister
// this is called from GBIOWrite/GBVideoWritePalette/etc...
func (r *Renderer) WriteVideoRegister(offset byte, value byte) byte {
wasWindow := r.inWindow()
wy := r.wy
switch offset {
case GB_REG_LCDC:
r.g.LCDC = value
r.updateWindow(wasWindow, r.inWindow(), wy)
case GB_REG_WY:
r.wy = value
r.updateWindow(wasWindow, r.inWindow(), wy)
case GB_REG_WX:
r.wx = value
r.updateWindow(wasWindow, r.inWindow(), wy)
case GB_REG_BGP:
r.Lookup[0] = value & 3
r.Lookup[1] = (value >> 2) & 3
r.Lookup[2] = (value >> 4) & 3
r.Lookup[3] = (value >> 6) & 3
r.Lookup[PAL_HIGHLIGHT_BG+0] = PAL_HIGHLIGHT + (value & 3)
r.Lookup[PAL_HIGHLIGHT_BG+1] = PAL_HIGHLIGHT + ((value >> 2) & 3)
r.Lookup[PAL_HIGHLIGHT_BG+2] = PAL_HIGHLIGHT + ((value >> 4) & 3)
r.Lookup[PAL_HIGHLIGHT_BG+3] = PAL_HIGHLIGHT + ((value >> 6) & 3)
case GB_REG_OBP0:
r.Lookup[PAL_OBJ+0] = value & 3
r.Lookup[PAL_OBJ+1] = (value >> 2) & 3
r.Lookup[PAL_OBJ+2] = (value >> 4) & 3
r.Lookup[PAL_OBJ+3] = (value >> 6) & 3
r.Lookup[PAL_HIGHLIGHT_OBJ+0] = PAL_HIGHLIGHT + (value & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+1] = PAL_HIGHLIGHT + ((value >> 2) & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+2] = PAL_HIGHLIGHT + ((value >> 4) & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+3] = PAL_HIGHLIGHT + ((value >> 6) & 3)
case GB_REG_OBP1:
r.Lookup[PAL_OBJ+4] = value & 3
r.Lookup[PAL_OBJ+5] = (value >> 2) & 3
r.Lookup[PAL_OBJ+6] = (value >> 4) & 3
r.Lookup[PAL_OBJ+7] = (value >> 6) & 3
r.Lookup[PAL_HIGHLIGHT_OBJ+4] = PAL_HIGHLIGHT + (value & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+5] = PAL_HIGHLIGHT + ((value >> 2) & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+6] = PAL_HIGHLIGHT + ((value >> 4) & 3)
r.Lookup[PAL_HIGHLIGHT_OBJ+7] = PAL_HIGHLIGHT + ((value >> 6) & 3)
}
return value
}
// writePalette / GBVideoSoftwareRendererWritePalette
// GBVideoWritePalette calls this
func (r *Renderer) writePalette(index int, value Color) {
color := value
if r.Model&util.GB_MODEL_SGB != 0 {
if index < 0x10 && index != 0 && (index&3) == 0 {
color = r.Palette[0]
} else if index >= PAL_SGB_BORDER && (index&0xf) == 0 {
color = r.Palette[0]
} else if index > PAL_HIGHLIGHT && index < PAL_HIGHLIGHT_OBJ && (index&3) == 0 {
color = r.Palette[PAL_HIGHLIGHT_BG]
}
}
r.Palette[index] = color
if index < PAL_SGB_BORDER && (index < PAL_OBJ || (index&3) != 0) {
r.Palette[index+PAL_HIGHLIGHT] = color
}
}
// drawRange / GBVideoSoftwareRendererDrawRange
// by row
func (r *Renderer) drawRange(startX, endX, y int) {
r.lastY, r.lastX = y, endX
if startX >= endX {
return
}
mapIdx := GB_BASE_MAP // 0x9800
if util.Bit(r.g.LCDC, TileMap) {
mapIdx += GB_SIZE_MAP // 0x9c00
}
if r.disableBG {
for x := startX; x < endX; x++ {
r.row[x] = 0
}
}
if util.Bit(r.g.LCDC, BgEnable) || r.Model >= util.GB_MODEL_CGB {
wy, wx := int(r.wy)+int(r.currentWy), int(r.wx)+int(r.currentWx)-7
if util.Bit(r.g.LCDC, Window) && wy == y && wx <= endX {
r.hasWindow = true
}
scx, scy := int(r.g.io[GB_REG_SCX]), int(r.g.io[GB_REG_SCY])
if util.Bit(r.g.LCDC, Window) && r.hasWindow && wx <= endX && !r.disableWIN {
if wx > 0 && !r.disableBG {
// bg -> window
r.drawBackground(mapIdx, startX, wx, scx, scy+y, r.highlightBG)
// fallthrough and draw window
}
mapIdx = GB_BASE_MAP
if util.Bit(r.g.LCDC, WindowTileMap) {
mapIdx += GB_SIZE_MAP // 0x9c00
}
r.drawBackground(mapIdx, wx, endX, -wx, y-wy, r.highlightWIN)
} else if !r.disableBG {
r.drawBackground(mapIdx, startX, endX, scx, scy+y, r.highlightBG)
}
} else if !r.disableBG {
for x := startX; x < endX; x++ {
r.row[x] = 0
}
}
if startX == 0 {
r.cleanOAM(y)
}
if util.Bit(r.g.LCDC, ObjEnable) && !r.disableOBJ {
for i := 0; i < r.objMax; i++ {
r.drawObj(r.obj[i], startX, endX, y)
}
}
highlightAmount := (r.highlightAmount + 6) >> 4
if r.lastHighlightAmount != highlightAmount {
r.lastHighlightAmount = highlightAmount
for i := 0; i < PAL_SGB_BORDER; i++ {
if i >= PAL_OBJ && i&3 == 0 {
continue
}
r.Palette[i+PAL_HIGHLIGHT] = r.Palette[i]
}
}
sgbOffset := 0
if (r.Model&util.GB_MODEL_SGB != 0) && r.sgbBorders {
sgbOffset = r.outputBufferStride*40 + 48
}
row := r.outputBuffer[r.outputBufferStride*y+sgbOffset:]
x, p := startX, 0
switch r.sgbRenderMode {
case 0:
if r.Model&util.GB_MODEL_SGB != 0 {
p = int(r.sgbAttributes[(startX>>5)+5*(y>>3)])
p >>= 6 - ((x / 4) & 0x6)
p &= 3
p <<= 2
}
for ; x < ((startX+7) & ^7) && x < endX; x++ {
row[x] = r.Palette[p|int(r.Lookup[r.row[x]&OBJ_PRIO_MASK])]
}
for ; x+7 < (endX & ^7); x += 8 {
if (r.Model & util.GB_MODEL_SGB) != 0 {
p = int(r.sgbAttributes[(x>>5)+5*(y>>3)])
p >>= 6 - ((x / 4) & 0x6)
p &= 3
p <<= 2
}
row[x+0] = r.Palette[p|int(r.Lookup[r.row[x]&OBJ_PRIO_MASK])]
row[x+1] = r.Palette[p|int(r.Lookup[r.row[x+1]&OBJ_PRIO_MASK])]
row[x+2] = r.Palette[p|int(r.Lookup[r.row[x+2]&OBJ_PRIO_MASK])]
row[x+3] = r.Palette[p|int(r.Lookup[r.row[x+3]&OBJ_PRIO_MASK])]
row[x+4] = r.Palette[p|int(r.Lookup[r.row[x+4]&OBJ_PRIO_MASK])]
row[x+5] = r.Palette[p|int(r.Lookup[r.row[x+5]&OBJ_PRIO_MASK])]
row[x+6] = r.Palette[p|int(r.Lookup[r.row[x+6]&OBJ_PRIO_MASK])]
row[x+7] = r.Palette[p|int(r.Lookup[r.row[x+7]&OBJ_PRIO_MASK])]
}
if (r.Model & util.GB_MODEL_SGB) != 0 {
p = int(r.sgbAttributes[(x>>5)+5*(y>>3)])
p >>= 6 - ((x / 4) & 0x6)
p &= 3
p <<= 2
}
for ; x < endX; x++ {
row[x] = r.Palette[p|int(r.Lookup[r.row[x]&OBJ_PRIO_MASK])]
}
case 2:
for ; x < ((startX+7) & ^7) && x < endX; x++ {
row[x] = 0
}
for ; x+7 < (endX & ^7); x += 8 {
row[x] = 0
row[x+1] = 0
row[x+2] = 0
row[x+3] = 0
row[x+4] = 0
row[x+5] = 0
row[x+6] = 0
row[x+7] = 0
}
for ; x < endX; x++ {
row[x] = 0
}
case 3:
for ; x < ((startX+7) & ^7) && x < endX; x++ {
row[x] = r.Palette[0]
}
for ; x+7 < (endX & ^7); x += 8 {
row[x] = r.Palette[0]
row[x+1] = r.Palette[0]
row[x+2] = r.Palette[0]
row[x+3] = r.Palette[0]
row[x+4] = r.Palette[0]
row[x+5] = r.Palette[0]
row[x+6] = r.Palette[0]
row[x+7] = r.Palette[0]
}
for ; x < endX; x++ {
row[x] = r.Palette[0]
}
}
}
// finishScanline / GBVideoSoftwareRendererFinishScanline
func (r *Renderer) finishScanline(y int) {
r.lastX, r.currentWx = 0, 0
}
// finishFrame / GBVideoSoftwareRendererFinishFrame
func (r *Renderer) finishFrame() {
if !util.Bit(r.g.LCDC, Enable) {
r.clearScreen()
}
r.lastY, r.lastX = VERTICAL_PIXELS, 0
r.currentWy, r.currentWx = 0, 0
r.hasWindow = false
}
// GBVideoSoftwareRendererDrawBackground
// by row
func (r *Renderer) drawBackground(mapIdx, startX, endX, sx, sy int, highlight bool) {
vramIdx := 0
attrIdx := mapIdx + GB_SIZE_VRAM_BANK0 // for CGB
if !util.Bit(r.g.LCDC, TileData) {
vramIdx += 0x1000
}
topY := ((sy >> 3) & 0x1F) * 0x20
bottomY := sy & 7
if startX < 0 {
startX = 0
}
x := 0
if ((startX + sx) & 7) > 0 {
startX2 := startX + 8 - ((startX + sx) & 7)
for x := startX; x < startX2; x++ {
localData := vramIdx
localY := bottomY
topX, bottomX := ((x+sx)>>3)&0x1F, 7-((x+sx)&7)
bgTile := 0
if util.Bit(r.g.LCDC, TileData) {
// 0x8000-0x8800 [0, 255]
bgTile = int(r.g.VRAM.Buffer[mapIdx+topX+topY])
} else {
// 0x8800-0x97ff [-128, 127]
bgTile = int(int8(r.g.VRAM.Buffer[mapIdx+topX+topY]))
}
p := uint16(0)
if highlight {
p = PAL_HIGHLIGHT_BG
}
if r.Model >= util.GB_MODEL_CGB {
attrs := r.g.VRAM.Buffer[attrIdx+topX+topY]
p |= uint16(attrs&0x7) * 4
if util.Bit(attrs, ObjAttrPriority) && util.Bit(r.g.LCDC, BgEnable) {
p |= OBJ_PRIORITY
}
if util.Bit(attrs, ObjAttrBank) {
localData += GB_SIZE_VRAM_BANK0
}
if util.Bit(attrs, ObjAttrYFlip) {
localY = 7 - bottomY
}
if util.Bit(attrs, ObjAttrXFlip) {
bottomX = 7 - bottomX
}
}
tileDataLower := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2]
tileDataUpper := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2+1]
tileDataUpper >>= bottomX
tileDataLower >>= bottomX
r.row[x] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
}
startX = startX2
}
// by tile row
for x = startX; x < endX; x += 8 {
localData := vramIdx
localY := bottomY
topX := ((x + sx) >> 3) & 0x1F
bgTile := 0
if util.Bit(r.g.LCDC, TileData) {
// 0x8000-0x8800 [0, 255]
bgTile = int(r.g.VRAM.Buffer[mapIdx+topX+topY])
} else {
// 0x8800-0x97ff [-128, 127]
bgTile = int(int8(r.g.VRAM.Buffer[mapIdx+topX+topY]))
}
p := uint16(PAL_BG)
if highlight {
p = PAL_HIGHLIGHT_BG
}
if r.Model >= util.GB_MODEL_CGB {
attrs := r.g.VRAM.Buffer[attrIdx+topX+topY]
p |= uint16(attrs&0x7) * 4
if util.Bit(attrs, ObjAttrPriority) && util.Bit(r.g.LCDC, BgEnable) {
p |= OBJ_PRIORITY
}
if util.Bit(attrs, ObjAttrBank) {
localData += GB_SIZE_VRAM_BANK0
}
if util.Bit(attrs, ObjAttrYFlip) {
localY = 7 - bottomY
}
if util.Bit(attrs, ObjAttrXFlip) {
tileDataLower := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2]
tileDataUpper := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2+1]
r.row[x+0] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
r.row[x+1] = p | uint16(tileDataUpper&2) | uint16((tileDataLower&2)>>1)
r.row[x+2] = p | uint16((tileDataUpper&4)>>1) | uint16((tileDataLower&4)>>2)
r.row[x+3] = p | uint16((tileDataUpper&8)>>2) | uint16((tileDataLower&8)>>3)
r.row[x+4] = p | uint16((tileDataUpper&16)>>3) | uint16((tileDataLower&16)>>4)
r.row[x+5] = p | uint16((tileDataUpper&32)>>4) | uint16((tileDataLower&32)>>5)
r.row[x+6] = p | uint16((tileDataUpper&64)>>5) | uint16((tileDataLower&64)>>6)
r.row[x+7] = p | uint16((tileDataUpper&128)>>6) | uint16((tileDataLower&128)>>7)
continue
}
}
tileDataLower := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2]
tileDataUpper := r.g.VRAM.Buffer[localData+(bgTile*8+localY)*2+1]
r.row[x+7] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
r.row[x+6] = p | uint16(tileDataUpper&2) | uint16((tileDataLower&2)>>1)
r.row[x+5] = p | uint16((tileDataUpper&4)>>1) | uint16((tileDataLower&4)>>2)
r.row[x+4] = p | uint16((tileDataUpper&8)>>2) | uint16((tileDataLower&8)>>3)
r.row[x+3] = p | uint16((tileDataUpper&16)>>3) | uint16((tileDataLower&16)>>4)
r.row[x+2] = p | uint16((tileDataUpper&32)>>4) | uint16((tileDataLower&32)>>5)
r.row[x+1] = p | uint16((tileDataUpper&64)>>5) | uint16((tileDataLower&64)>>6)
r.row[x+0] = p | uint16((tileDataUpper&128)>>6) | uint16((tileDataLower&128)>>7)
}
}
// GBVideoSoftwareRendererDrawObj
func (r *Renderer) drawObj(obj Sprite, startX, endX, y int) {
objX := int(obj.obj.x)
ix := objX - 8
if endX < ix || startX >= ix+8 {
return
}
if objX < endX {
endX = objX
}
if objX-8 > startX {
startX = objX - 8
}
if startX < 0 {
startX = 0
}
vramIdx := 0x0
tileOffset, bottomY := 0, 0
objY := int(obj.obj.y)
if util.Bit(obj.obj.attr, ObjAttrYFlip) {
bottomY = 7 - ((y - objY - 16) & 7)
if util.Bit(r.g.LCDC, ObjSize) && y-objY < -8 {
tileOffset++
}
} else {
bottomY = (y - objY - 16) & 7
if util.Bit(r.g.LCDC, ObjSize) && y-objY >= -8 {
tileOffset++
}
}
if util.Bit(r.g.LCDC, ObjSize) && obj.obj.tile&1 == 1 {
tileOffset--
}
mask, mask2 := uint(0x60), uint(OBJ_PRIORITY/3)
if util.Bit(obj.obj.attr, ObjAttrPriority) {
mask, mask2 = 0x63, 0
}
p := uint16(PAL_OBJ)
if r.highlightOBJ[obj.index] {
p = PAL_HIGHLIGHT_OBJ
}
if r.Model >= util.GB_MODEL_CGB {
p |= uint16(obj.obj.attr&0x07) * 4
if util.Bit(obj.obj.attr, ObjAttrBank) {
vramIdx += GB_SIZE_VRAM_BANK0
}
if !util.Bit(r.g.LCDC, BgEnable) {
mask, mask2 = 0x60, OBJ_PRIORITY/3
}
} else {
p |= (uint16((obj.obj.attr>>ObjAttrPalette)&1) + 8) * 4 // 8x4 or 9x4
}
bottomX, x, objTile := 0, startX, int(obj.obj.tile)+tileOffset
if (x-objX)&7 != 0 {
for ; x < endX; x++ {
if util.Bit(obj.obj.attr, ObjAttrXFlip) {
bottomX = (x - objX) & 7
} else {
bottomX = 7 - ((x - objX) & 7)
}
tileDataLower := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2]
tileDataUpper := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2+1]
tileDataUpper >>= bottomX
tileDataLower >>= bottomX
current := r.row[x]
if ((tileDataUpper|tileDataLower)&1 > 0) && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
}
}
} else if util.Bit(obj.obj.attr, ObjAttrXFlip) {
tileDataLower := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2]
tileDataUpper := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2+1]
current := r.row[x]
if ((tileDataUpper|tileDataLower)&1) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
}
current = r.row[x+1]
if ((tileDataUpper|tileDataLower)&2) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+1] = p | uint16(tileDataUpper&2) | uint16((tileDataLower&2)>>1)
}
current = r.row[x+2]
if ((tileDataUpper|tileDataLower)&4) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+2] = p | uint16((tileDataUpper&4)>>1) | uint16((tileDataLower&4)>>2)
}
current = r.row[x+3]
if ((tileDataUpper|tileDataLower)&8) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+3] = p | uint16((tileDataUpper&8)>>2) | uint16((tileDataLower&8)>>3)
}
current = r.row[x+4]
if ((tileDataUpper|tileDataLower)&16) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+4] = p | uint16((tileDataUpper&16)>>3) | uint16((tileDataLower&16)>>4)
}
current = r.row[x+5]
if ((tileDataUpper|tileDataLower)&32) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+5] = p | uint16((tileDataUpper&32)>>4) | uint16((tileDataLower&32)>>5)
}
current = r.row[x+6]
if ((tileDataUpper|tileDataLower)&64) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+6] = p | uint16((tileDataUpper&64)>>5) | uint16((tileDataLower&64)>>6)
}
current = r.row[x+7]
if ((tileDataUpper|tileDataLower)&128) != 0 && (uint(current)&mask == 0) && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+7] = p | uint16((tileDataUpper&128)>>6) | uint16((tileDataLower&128)>>7)
}
} else {
tileDataLower := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2]
tileDataUpper := r.g.VRAM.Buffer[vramIdx+(objTile*8+bottomY)*2+1]
current := r.row[x+7]
if ((tileDataUpper|tileDataLower)&1) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+7] = p | uint16((tileDataUpper&1)<<1) | uint16(tileDataLower&1)
}
current = r.row[x+6]
if ((tileDataUpper|tileDataLower)&2) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+6] = p | uint16(tileDataUpper&2) | uint16((tileDataLower&2)>>1)
}
current = r.row[x+5]
if ((tileDataUpper|tileDataLower)&4) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+5] = p | uint16((tileDataUpper&4)>>1) | uint16((tileDataLower&4)>>2)
}
current = r.row[x+4]
if ((tileDataUpper|tileDataLower)&8) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+4] = p | uint16((tileDataUpper&8)>>2) | uint16((tileDataLower&8)>>3)
}
current = r.row[x+3]
if ((tileDataUpper|tileDataLower)&16) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+3] = p | uint16((tileDataUpper&16)>>3) | uint16((tileDataLower&16)>>4)
}
current = r.row[x+2]
if ((tileDataUpper|tileDataLower)&32) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+2] = p | uint16((tileDataUpper&32)>>4) | uint16((tileDataLower&32)>>5)
}
current = r.row[x+1]
if ((tileDataUpper|tileDataLower)&64) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x+1] = p | uint16((tileDataUpper&64)>>5) | uint16((tileDataLower&64)>>6)
}
current = r.row[x]
if ((tileDataUpper|tileDataLower)&128) != 0 && (uint(current)&mask) == 0 && (uint(current)&mask2) <= OBJ_PRIORITY {
r.row[x] = p | uint16((tileDataUpper&128)>>6) | uint16((tileDataLower&128)>>7)
}
}
}
// _cleanOAM
func (r *Renderer) cleanOAM(y int) {
spriteHeight := 8
if util.Bit(r.g.LCDC, ObjSize) {
spriteHeight = 16
}
o := 0
for i := 0; i < MAX_OBJ && o < MAX_LINE_OBJ; i++ {
oy := int(r.g.Oam.Objs[i].y)
if y < oy-16 || y >= oy-16+spriteHeight {
continue
}
r.obj[o].obj = *r.g.Oam.Objs[i]
r.obj[o].index = int8(i)
o++
if o == 10 {
break
}
}
r.objMax = o
}
// _inWindow
func (r *Renderer) inWindow() bool {
return util.Bit(r.g.LCDC, Window) && HORIZONTAL_PIXELS+7 > r.wx
}
// _clearScreen
func (r *Renderer) clearScreen() {
if r.Model&util.GB_MODEL_SGB != 0 {
return
}
for y := 0; y < VERTICAL_PIXELS; y++ {
row := r.outputBuffer[r.outputBufferStride*y:]
for x := 0; x < HORIZONTAL_PIXELS; x += 4 {
row[x+0] = r.Palette[0]
row[x+1] = r.Palette[0]
row[x+2] = r.Palette[0]
row[x+3] = r.Palette[0]
}
}
}
|
// Copyright (c) 2018-present, MultiVAC Foundation.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"crypto/sha256"
"fmt"
"github.com/prometheus/common/log"
"math/big"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/multivactech/MultiVAC/base/rlp"
"github.com/multivactech/MultiVAC/interface/isysapi"
"github.com/multivactech/MultiVAC/model/chaincfg/chainhash"
"github.com/multivactech/MultiVAC/model/chaincfg/multivacaddress"
"github.com/multivactech/MultiVAC/model/chaincfg/signature"
"github.com/multivactech/MultiVAC/model/shard"
)
const defaultShardIDForTest = shard.Index(0)
var privateKeyForTestingTx = signature.PrivateKey{
244, 138, 151, 117, 41, 188, 240, 122, 9, 91, 196, 16, 0, 218, 181, 139, 191, 239, 56, 161, 117, 54, 234, 61, 117, 159, 75, 32, 8, 250, 197, 124, 194, 112, 47, 169, 227, 191, 100, 160, 190, 42, 73, 45, 224, 181, 65, 162, 68, 159, 26, 160, 8, 109, 133, 247, 223, 40, 100, 226, 107, 234, 19, 104,
}
func newMsgTxForTest() *MsgTx {
return NewMsgTx(TxVersion, defaultShardIDForTest)
}
// TestTx tests the MsgTx API.
func TestTx(t *testing.T) {
pver := ProtocolVersion
// Block 100000 hash.
hashStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
hash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the command is expected value.
wantCmd := "tx"
msg := newMsgTxForTest()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddr: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1000 * 4000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same transaction output point data back out.
// NOTE: This is a block hash and made up index, but we're only
// testing package functionality.
prevOutIndex := 1
prevOut := newOutPoint(hash, prevOutIndex, chainhash.Hash{}, big.NewInt(10))
if !prevOut.TxHash.IsEqual(hash) {
t.Errorf("newOutPoint: wrong hash - got %v, want %v",
spew.Sprint(&prevOut.TxHash), spew.Sprint(hash))
}
if prevOut.Index != prevOutIndex {
t.Errorf("newOutPoint: wrong index - got %v, want %v",
prevOut.Index, prevOutIndex)
}
prevOutStr := fmt.Sprintf("%s:%d", hash.String(), prevOutIndex)
if s := prevOut.String(); s != prevOutStr {
t.Errorf("OutPoint.String: unexpected result - got %v, "+
"want %v", s, prevOutStr)
}
txIn := NewTxIn(prevOut)
if !reflect.DeepEqual(&txIn.PreviousOutPoint, prevOut) {
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
spew.Sprint(&txIn.PreviousOutPoint),
spew.Sprint(prevOut))
}
// Ensure we get the same transaction output back out.
// Ensure transaction inputs are added properly.
msg.AddTxIn(txIn)
if !reflect.DeepEqual(msg.TxIn[0], txIn) {
t.Errorf("AddTxIn: wrong transaction input added - got %v, want %v",
spew.Sprint(msg.TxIn[0]), spew.Sprint(txIn))
}
// Ensure transaction outputs are added properly.
}
// TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHash(t *testing.T) {
// From block 23157 in a past version of segnet.
msgTx := newMsgTxForTest()
txIn := TxIn{
PreviousOutPoint: OutPoint{
TxHash: chainhash.Hash{
0xa5, 0x33, 0x52, 0xd5, 0x13, 0x57, 0x66, 0xf0,
0x30, 0x76, 0x59, 0x74, 0x18, 0x26, 0x3d, 0xa2,
0xd9, 0xc9, 0x58, 0x31, 0x59, 0x68, 0xfe, 0xa8,
0x23, 0x52, 0x94, 0x67, 0x48, 0x1f, 0xf9, 0xcd,
},
Index: 19,
Data: []byte{},
},
}
msgTx.AddTxIn(&txIn)
txHash := msgTx.TxHash()
var buf bytes.Buffer
err := msgTx.BtcEncode(&buf, 0, BaseEncoding)
if err != nil {
log.Errorf("failed to encode message,err:%v", err)
}
wantHash := chainhash.HashH(buf.Bytes())
if !txHash.IsEqual(&wantHash) {
t.Errorf("TxSha: wrong hash - got %v, want %v",
spew.Sprint(txHash), spew.Sprint(wantHash))
}
}
// TestTxWire tests the MsgTx wire encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestTxWire(t *testing.T) {
// Empty tx message.
noTx := newMsgTxForTest()
noTx.Version = 1
noTxOut := &MsgTx{
Version: 1,
TxIn: []*TxIn{},
SignatureScript: []byte{},
Params: []byte{},
ContractAddress: isysapi.SysAPIAddress,
PublicKey: []byte{},
StorageNodeAddress: multivacaddress.Address{},
}
tests := []struct {
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
pver uint32 // Protocol version for wire encoding
enc MessageEncoding // Message encoding format
}{
// Latest protocol version with no transactions.
{
noTx,
noTxOut,
ProtocolVersion,
BaseEncoding,
},
// Latest protocol version with multiple transactions.
{
multiTx,
multiTx,
ProtocolVersion,
BaseEncoding,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver, test.enc)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
continue
}
// Decode the message from wire format.
var msg MsgTx
rbuf := bytes.NewReader(buf.Bytes())
err = msg.BtcDecode(rbuf, test.pver, test.enc)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
func TestTxSignature(t *testing.T) {
pubKey := privateKeyForTestingTx.Public()
tx := newMsgTxForTest()
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(pubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(pubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
data := tx.doubleSha256WithoutSignatureAndPubKey()
sig := signature.Sign(privateKeyForTestingTx, data[:])
tx.SetSignatureScriptAndPubKey(pubKey, sig)
if !tx.VerifySignature() {
t.Error("Signature verification failed")
}
}
func TestTxSignature_wrongSignature(t *testing.T) {
randPubKeyBytes := sha256.Sum256([]byte("blahblah"))
randPubKey := signature.PublicKey(randPubKeyBytes[:])
tx := newMsgTxForTest()
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(randPubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(randPubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
data := tx.doubleSha256WithoutSignatureAndPubKey()
sig := signature.Sign(privateKeyForTestingTx, data[:])
tx.SetSignatureScriptAndPubKey(randPubKey, sig)
if tx.VerifySignature() {
t.Error("Signature verification failed")
}
}
func TestVerifyTx_txinIsFromDifferentShard(t *testing.T) {
pubKey := privateKeyForTestingTx.Public()
tx := newMsgTxForTest()
tx.AddTxIn(NewTxIn(&OutPoint{
Shard: defaultShardIDForTest + 1,
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(pubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
tx.Sign(&privateKeyForTestingTx)
if err := tx.VerifyTransaction(); err == nil {
t.Error("Expecting error, Txin is from a different shard")
}
}
func TestVerifyTx_sameInputUsedTwice(t *testing.T) {
pubKey := privateKeyForTestingTx.Public()
tx := newMsgTxForTest()
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(pubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
tx.AddTxIn(NewTxIn(&OutPoint{
TxHash: chainhash.Hash{},
Index: 1,
UserAddress: multivacaddress.GenerateAddress(pubKey, multivacaddress.UserAddress),
Data: MtvValueToData(big.NewInt(100)),
}))
tx.Sign(&privateKeyForTestingTx)
if err := tx.VerifyTransaction(); err == nil {
t.Error("Expecting error, same input is used twice")
}
}
// it seems unused.
//var multiTxData = struct{ Value *big.Int }{Value: big.NewInt(100000)}
// it seems unused.
//var multiTxDataBytes, _ = rlp.EncodeToBytes(multiTxData)
// multiTx is a MsgTx with an input and output and used in various tests.
var multiTx = &MsgTx{
Version: 1,
TxIn: []*TxIn{
{
PreviousOutPoint: OutPoint{
TxHash: chainhash.Hash{},
Index: 0xffffffff,
UserAddress: multivacaddress.Address{},
Data: MtvValueToData(big.NewInt(100000)),
ContractAddress: isysapi.SysAPIAddress,
},
},
},
ContractAddress: isysapi.SysAPIAddress,
SignatureScript: []byte{},
Params: []byte{},
PublicKey: []byte{},
StorageNodeAddress: multivacaddress.Address{},
}
// multiTxEncoded is the wire encoded bytes for multiTx using protocol version
// 60002 and is used in the various tests.
// it seems unused.
//var multiTxEncoded = []byte{
// 0x01, 0x00, 0x00, 0x00, // Version
// 0x01, // Varint for number of input transactions
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
// 0xff, 0xff, 0xff, 0xff, // Prevous output index
// 0x07, // Varint for length of signature script
// 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script
// 0xff, 0xff, 0xff, 0xff, // Sequence
// 0x02, // Varint for number of output transactions
// 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
// 0x43, // Varint for length of pk script
// 0x41, // OP_DATA_65
// 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
// 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
// 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
// 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
// 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
// 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
// 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
// 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
// 0xa6, // 65-byte signature
// 0xac, // OP_CHECKSIG
// 0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount
// 0x43, // Varint for length of pk script
// 0x41, // OP_DATA_65
// 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
// 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
// 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
// 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
// 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
// 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
// 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
// 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
// 0xa6, // 65-byte signature
// 0xac, // OP_CHECKSIG
// 0x00, 0x00, 0x00, 0x00, // Lock time
//}
// Returns a new bitcoin transaction outpoint point with the provided hash and index.
func newOutPoint(txHash *chainhash.Hash, index int, pkHash chainhash.Hash, value *big.Int) *OutPoint {
data := struct{ Value *big.Int }{Value: value}
dataBytes, _ := rlp.EncodeToBytes(data) // Ignore error
publicKey := signature.PublicKey(pkHash.CloneBytes())
return &OutPoint{
TxHash: *txHash,
Index: index,
Shard: shard.Index(0),
UserAddress: multivacaddress.GenerateAddress(publicKey, multivacaddress.UserAddress),
Data: dataBytes,
}
}
|
package cmd
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/Recime/recime-cli/util"
"github.com/howeyc/fsnotify"
"github.com/mitchellh/go-homedir"
)
//WatchForChanges watch file for changes
func WatchForChanges(dir string, targetDir string) {
watcher, err := fsnotify.NewWatcher()
check(err)
// Process events
go func() {
for {
select {
case ev := <-watcher.Event:
if !ev.IsAttrib() {
fmt.Println("INFO: File change event.")
util.CopyDir(dir, targetDir)
Build(targetDir)
}
case err := <-watcher.Error:
fmt.Println("error:", err)
}
}
}()
err = watcher.Watch(dir)
check(err)
}
//Run runs the bot in a local node server.
func Run(options map[string]interface{}) {
url := options["url"].(string)
base := options["base"].(string)
uid := options["uid"].(string)
watch := options["watch"].(bool)
tokens := strings.Split(url, "/")
fileName := tokens[len(tokens)-1]
version := strings.TrimSuffix(fileName, filepath.Ext(fileName))
home, err := homedir.Dir()
check(err)
home = filepath.ToSlash(home) + "/recime-cli"
fileName = fmt.Sprintf("%s/recime-%s.zip", home, version)
_, err = os.Stat(home)
if os.IsNotExist(err) {
err = os.Mkdir(home, os.ModePerm)
check(err)
}
Download(url, fileName)
target := home
util.Unzip(fileName, target)
templateDir := target + "/recime-bot-template-" + version
wd, err := os.Getwd()
check(err)
botDir := templateDir + "/" + uid
fmt.Println("INFO: Deploying Bot...")
Build(wd)
util.CopyDir(filepath.ToSlash(wd), botDir)
fmt.Println("INFO: Installing Dependencies...")
installCmd := []string{"npm", "install"}
runCmd(installCmd, botDir, nil)
runCmd(installCmd, templateDir, nil)
fmt.Println("INFO: Starting...")
if watch {
WatchForChanges(filepath.ToSlash(wd), botDir)
}
config := []Config{Config{Key: "BOT_UNIQUE_ID", Value: uid}}
config = append(config, Config{Key: "BASE_URL", Value: base})
_config := Config{}
// Add config user config
reader, _ := _config.Open(wd)
vars := _config.Get(reader)
for key, value := range vars {
config = append(config, Config{Key: key, Value: value})
}
runCmd([]string{"npm", "start"}, templateDir, config)
}
func runCmd(args []string, wd string, config []Config) {
cmd := exec.Command(args[0], args[1])
cmd.Dir = wd
if config != nil {
env := os.Environ()
for _, c := range config {
env = append(env, fmt.Sprintf("%s=%s", c.Key, c.Value))
}
cmd.Env = env
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
}
// Download downloads url to a file name
func Download(url string, fileName string) {
// TODO: check file existence first with io.IsExist
output, err := os.Create(fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
_, err = io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
}
|
package main
import "fmt"
/*
Let's call any (contiguous) subarray B (of A) a mountain if the following properties hold:
B.length >= 3
There exists some 0 < i < B.length - 1 such that B[0] < B[1] < ... B[i-1] < B[i] > B[i+1] > ... > B[B.length - 1]
(Note that B could be any subarray of A, including the entire array A.)
Given an array A of integers, return the length of the longest mountain.
Return 0 if there is no mountain.
Example 1:
Input: [2,1,4,7,3,2,5]
Output: 5
Explanation: The largest mountain is [1,4,7,3,2] which has length 5.
Example 2:
Input: [2,2,2]
Output: 0
Explanation: There is no mountain.
Note:
0 <= A.length <= 10000
0 <= A[i] <= 10000
*/
func longestMountain(A []int) int {
mountain := make([]int,0)
down := make([]int,0)
if len(A) < 2 {
return 0
}
if A[0] > A[1] {
mountain = append(mountain,0)
} else if A[0]<A[1] {
down = append(down,0)
}
for i:=1;i<len(A)-1;i++ {
if A[i]>A[i-1] && A[i] > A[i+1] {
mountain = append(mountain,i)
}
if A[i]<=A[i-1] && A[i] <= A[i+1] {
down = append(down,i)
}
}
if len(A) > 3 && A[len(A)-1] > A[len(A)-2] {
mountain = append(mountain,len(A)-1)
} else if len(A) >= 3 && A[len(A)-1] < A[len(A)-2] {
down = append(down,len(A)-1)
}
fmt.Println(mountain)
fmt.Println(down)
ret := 0
for _,m := range mountain {
idx := binarysearch(down,m)
c := -1
if idx >= len(down) {
// c = m - down[len(down)-1]
} else if idx <= 0{
// c = down[0]-m
} else {
c = down[idx] - down[idx-1]
}
if c+1 > ret {
ret = c+1
}
}
return ret
}
func binarysearch(nums []int,t int) int {
l,h := 0,len(nums)
for l < h {
m := (l+h)/2
if nums[m] == t {
return t
} else if nums[m] < t {
l = m + 1
} else {
h = m
}
}
return h
}
func main() {
//fmt.Println(longestMountain([]int{2,1,4,7,3,2,5}))
//fmt.Println(longestMountain([]int{2,2,2}))
//fmt.Println(longestMountain([]int{3,2}))
//fmt.Println(longestMountain([]int{1,1,0,0,1,0}))
//fmt.Println(longestMountain([]int{0,1,0}))
fmt.Println(longestMountain([]int{0,0,1,0,0,1,1,1,1,1}))
//fmt.Println(longestMountain([]int{0,1,2,3,4,5,4,3,2,1,0}))
}
|
package main
import (
"net/http"
"fmt"
"encoding/json"
"bytes"
)
type twilText struct {
MessageSid string `json:"MessageSid"`
SmsSid string `json:"SmsSid"`
AccountSid string `json:"AccountSid"`
From string `json:"From"`
To string `json:"To"`
Body string `json:"Body"`
NumMedia string `json:"NumMedia"`
}
func main() {
url := "http://localhost:8000/command"
text := twilText{
asid: "1",
version: "2",
body: "LeftShark",
errcode: "3",
errmsg: "4",
segments: "5",
medias: "6",
created: "7",
sent: "8",
updated: "9",
direction: "10",
from: "+1234567890",
price: "11",
sid: "12",
status: "13",
to: "+14254175393",
uri: "14",
}
parsedText, _ := json.Marshal(text)
var str = []byte(string(parsedText))
req, err := http.NewRequest("GET", url, bytes.NewBuffer(str))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
res, err := client.Do(req)
if err != nil {
fmt.Println("Error sending message")
}
defer res.Body.Close()
}
|
package main
import (
"log"
"net/http"
_ "github.com/go-sql-driver/mysql"
"github.com/ksbeasle/GoLang/api"
"github.com/ksbeasle/GoLang/application"
"github.com/ksbeasle/GoLang/database"
)
func main() {
/* Start a Connection to the Database - mysql */
db, err := application.StartDB()
if err != nil {
log.Fatal(err)
}
app := &application.App{
DBMODEL: &database.GameDB{DB: db},
}
log.Println(app)
defer db.Close()
// if err != nil {
// errorLog.Fatal(err)
// }
//defer DB.Close()
// /* Create a new application struct */
// app := &application{
// infoLog: infoLog,
// errorLog: errorLog,
// db: &mysql.DBModel{DB: DB},
// //gonna check something here later -- db: DB,
// }
/* server struct */
server := &http.Server{
Addr: ":8080",
Handler: api.Routes(),
}
log.Println("STARTING SERVER AT PORT ... ", server.Addr)
err = server.ListenAndServe()
if err != nil {
log.Fatal(err)
}
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/getsentry/sentry-go"
"github.com/carlmjohnson/flagext"
"github.com/spotlightpa/almanack/internal/db"
"github.com/spotlightpa/almanack/internal/herokuapi"
"github.com/spotlightpa/almanack/internal/netlifyid"
"github.com/spotlightpa/almanack/internal/slack"
"github.com/spotlightpa/almanack/pkg/almanack"
)
func main() {
if err := globalEnv.parseEnv(); err != nil {
globalEnv.sc.Post(slack.Message{
Attachments: []slack.Attachment{
{
Title: "Could not start identity-signup",
Text: err.Error(),
Color: colorRed,
}}})
panic(err)
}
globalEnv.logger.Printf("starting identity-signup rev %s", almanack.BuildVersion)
lambda.Start(whitelistEmails)
}
type appEnv struct {
db db.Querier
sc slack.Client
logger *log.Logger
}
func (app *appEnv) parseEnv() error {
app.logger = log.New(os.Stdout, "identity-signup ", log.LstdFlags)
fl := flag.NewFlagSet("identity-signup", flag.ContinueOnError)
slackHookURL := fl.String("slack-hook-url", "", "Slack hook endpoint `URL`")
pg := db.FlagVar(fl, "postgres", "PostgreSQL database `URL`")
heroku := herokuapi.ConfigureFlagSet(fl)
sentryDSN := fl.String("sentry-dsn", "", "DSN `pseudo-URL` for Sentry")
if err := fl.Parse([]string{}); err != nil {
return err
}
if err := flagext.ParseEnv(fl, "almanack"); err != nil {
return err
}
if err := heroku.Configure(app.logger, map[string]string{
"postgres": "DATABASE_URL",
}); err != nil {
return err
}
if err := sentry.Init(sentry.ClientOptions{
Dsn: *sentryDSN,
Release: almanack.BuildVersion,
Transport: &sentry.HTTPSyncTransport{Timeout: 1 * time.Second},
}); err != nil {
return err
}
app.sc = slack.New(*slackHookURL, app.logger)
if err := flagext.MustHave(fl, "postgres"); err != nil {
return err
}
app.db = *pg
return nil
}
var globalEnv appEnv
const (
colorGreen = "#78bc20"
colorRed = "#da291c"
)
func whitelistEmails(ctx context.Context, request events.APIGatewayProxyRequest) (resp events.APIGatewayProxyResponse, err error) {
defer func() {
if err != nil {
sentry.CaptureException(err)
}
}()
var data struct {
EventType string `json:"event"`
User netlifyid.User `json:"user"`
}
if err = json.Unmarshal([]byte(request.Body), &data); err != nil {
return resp, err
}
roles, err := db.GetRolesForEmailDomain(ctx, globalEnv.db, data.User.Email)
if err != nil {
return resp, err
}
data.User.AppMetadata.Roles = append(data.User.AppMetadata.Roles, roles...)
body, err := json.Marshal(data.User)
if err != nil {
return resp, err
}
msg := fmt.Sprintf("%s <%s> with %d role(s)",
data.User.UserMetadata.FullName,
data.User.Email,
len(data.User.AppMetadata.Roles))
color := colorGreen
if len(data.User.AppMetadata.Roles) < 1 {
color = colorRed
}
globalEnv.sc.Post(
slack.Message{
Attachments: []slack.Attachment{
{
Title: "New Almanack Registration",
Text: msg,
Color: color,
Fields: []slack.Field{
{
Title: "Roles",
Value: strings.Join(data.User.AppMetadata.Roles, ", "),
Short: true,
}}}}},
)
return events.APIGatewayProxyResponse{
StatusCode: 200,
Body: string(body),
}, nil
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lint
import (
"context"
"path/filepath"
"github.com/moby/buildkit/frontend/dockerfile/command"
"go.lsp.dev/protocol"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/docker"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
)
// for testing
var getDockerDependenciesForEachFromTo = docker.GetDependenciesByDockerCopyFromTo
var dockerfileRules = &dockerfileLintRules
var DockerfileLinters = []Linter{
&DockerfileCommandLinter{},
}
var dockerfileLintRules = []Rule{
{
RuleID: DockerfileCopyOver1000Files,
RuleType: DockerfileCommandLintRule,
Severity: protocol.DiagnosticSeverityWarning,
Filter: DockerCommandFilter{
DockerCommand: command.Copy,
DockerCopySourceRegExp: `.*`,
},
ExplanationTemplate: `Found docker 'COPY' command where the source directory "{{index .FieldMap "src"}}" has over 1000 files. This has the potential to dramatically slow 'skaffold dev' down ` +
`as skaffold watches all sources files referenced in dockerfile COPY directives for changes. ` +
`If you notice skaffold rebuilding images unnecessarily when non-image-critical files are ` +
`modified, consider changing this to 'COPY $REQUIRED_SOURCE_FILE(s) {{index .FieldMap "dest"}}' for each required source file instead of ` +
`or adding a .dockerignore file (https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring non-image-critical files. skaffold respects files ignored via the .dockerignore`,
ExplanationPopulator: func(params InputParams) (explanationInfo, error) {
return explanationInfo{
FieldMap: map[string]interface{}{
"src": params.DockerCopyCommandInfo.From,
"dest": params.DockerCopyCommandInfo.To,
},
}, nil
},
LintConditions: []func(InputParams) bool{func(params InputParams) bool {
files := 0
for range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] {
files++
}
return files > 1000
}},
},
{
RuleID: DockerfileCopyContainsGitDir,
RuleType: DockerfileCommandLintRule,
Severity: protocol.DiagnosticSeverityWarning,
Filter: DockerCommandFilter{
DockerCommand: command.Copy,
DockerCopySourceRegExp: `.*`,
},
// TODO(aaron-prindle) suggest a full .dockerignore sample - .dockerignore:**/.git
ExplanationTemplate: `Found docker 'COPY' command where the source directory "{{index .FieldMap "src"}}" contains a '.git' directory at {{index .FieldMap "gitDirectoryAbsPath"}}. This has the potential to dramatically slow 'skaffold dev' down ` +
`as skaffold will watch all of the files in the .git directory as skaffold watches all sources files referenced in dockerfile COPY directives for changes. ` +
`skaffold will likely rebuild images unnecessarily when non-image-critical files are ` +
`modified during any git related operation. Consider adding a .dockerignore file (https://docs.docker.com/engine/reference/builder/#dockerignore-file) ignoring the '.git' directory. skaffold respects files ignored via the .dockerignore`,
ExplanationPopulator: func(params InputParams) (explanationInfo, error) {
var gitDirectoryAbsPath string
for _, dep := range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] {
if filepath.Dir(dep) == ".git" {
gitDirectoryAbsPath = filepath.Join(params.WorkspacePath, filepath.Dir(dep))
break
}
}
return explanationInfo{
FieldMap: map[string]interface{}{
"src": params.DockerCopyCommandInfo.From,
"gitDirectoryAbsPath": gitDirectoryAbsPath,
},
}, nil
},
// TODO(aaron-prindle) currently the LintCondition runs w/ deps that map to a dockerfile and not a specific COPY command. Can make certain rules infeasible
LintConditions: []func(InputParams) bool{func(params InputParams) bool {
for _, dep := range params.DockerfileToFromToToDeps[params.ConfigFile.AbsPath][params.DockerCopyCommandInfo.String()] {
if filepath.Dir(dep) == ".git" {
return true
}
}
return false
}},
},
}
func GetDockerfilesLintResults(ctx context.Context, opts Options, dockerCfg docker.Config) (*[]Result, error) {
cfgs, err := getConfigSet(ctx, config.SkaffoldOptions{
ConfigurationFile: opts.Filename,
ConfigurationFilter: opts.Modules,
RepoCacheDir: opts.RepoCacheDir,
Profiles: opts.Profiles,
})
if err != nil {
return nil, err
}
l := []Result{}
seen := map[string]bool{}
dockerfileToFromToToDepMap := map[string]map[string][]string{}
workdir, err := realWorkDir()
if err != nil {
return nil, err
}
for _, c := range cfgs {
for _, a := range c.Build.Artifacts {
if a.DockerArtifact != nil {
// TODO(aaron-prindle) HACK - multi-module configs use abs path for a.Workspace vs single module which has rel path
// see if there is a built-in/better way of handling this. This is currently working for multi-module
ws := a.Workspace
if !filepath.IsAbs(ws) {
ws = filepath.Join(workdir, a.Workspace)
}
fp := filepath.Join(ws, a.DockerArtifact.DockerfilePath)
if _, ok := seen[fp]; ok {
continue
}
seen[fp] = true
b, err := util.ReadFile(fp)
if err != nil {
return nil, err
}
dockerfile := ConfigFile{
AbsPath: fp,
RelPath: filepath.Join(a.Workspace, a.DockerArtifact.DockerfilePath),
Text: string(b),
}
// TODO(aaron-prindle) currently this dep map is computed twice; here and in skaffoldyamls.go, make a singleton/share-the-info
// TODO(aaron-prindle) currently copy commands are parsed twice; here and in linters.go
fromToToDepMap, err := getDockerDependenciesForEachFromTo(context.TODO(),
docker.NewBuildConfig(ws, a.ImageName, fp, map[string]*string{}), nil)
if err != nil {
return nil, err
}
dockerfileToFromToToDepMap[fp] = fromToToDepMap
for _, r := range DockerfileLinters {
recs, err := r.Lint(InputParams{
ConfigFile: dockerfile,
SkaffoldConfig: c,
DockerfileToFromToToDeps: dockerfileToFromToToDepMap,
WorkspacePath: ws,
DockerConfig: dockerCfg,
}, dockerfileRules)
if err != nil {
return nil, err
}
l = append(l, *recs...)
}
}
}
}
return &l, nil
}
|
package model
import (
"errors"
"shared/utility/number"
"shared/utility/rand"
)
type Drop map[int32]*number.BitNumber
func NewDrop() *Drop {
return (*Drop)(&map[int32]*number.BitNumber{})
}
// 给定长度, 随机下标,不会随机到标记过的
func (d *Drop) RandIndex(rewardID int32, length int) (int, error) {
if length <= 0 {
return 0, errors.New("length invalid")
}
bn, ok := (*d)[rewardID]
if !ok {
return rand.RangeInt(0, length-1), nil
}
is := rand.Perm(length)
for _, i := range is {
if !bn.IsMarked(i) {
return i, nil
}
}
return 0, errors.New("all dropped")
}
// 标记下标,下次不会随机到
func (d *Drop) MarkIndex(rewardID int32, index int) {
b, ok := (*d)[rewardID]
if !ok {
b = number.NewBitNumber()
b.Mark(index)
(*d)[rewardID] = b
}
b.Mark(index)
}
// 如果全部随机到,清空标记
func (d *Drop) ClearIfAllDropped(rewardID int32, length int) error {
if length <= 0 {
return errors.New("length invalid")
}
bn, ok := (*d)[rewardID]
if !ok {
// no need refresh
return nil
}
if bn.Counts() >= length {
// all dropped
bn.Clear()
}
return nil
}
|
// +build js
package math4g
import (
"github.com/gopherjs/gopherjs/js"
)
var (
sin = js.Global.Get("Math").Get("sin")
cos = js.Global.Get("Math").Get("cos")
tan = js.Global.Get("Math").Get("tan")
)
func Sin(x Scala) Scala {
return Scala(sin.Invoke(x).Float())
}
func Cos(x Scala) Scala {
return Scala(cos.Invoke(x).Float())
}
func Tan(x Scala) Scala {
return Scala(tan.Invoke(x).Float())
}
func Sincos(x Scala) (s, c Scala) {
/* slow implementation: 1
sin, cos := math.Sincos(float64(x))
return Scala(sin), Scala(cos)
*/
return Scala(sin.Invoke(x).Float()), Scala(cos.Invoke(x).Float())
} |
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package main
import (
"embed"
"fmt"
"log"
"net/http"
"os"
cloud "github.com/mattermost/mattermost-cloud/model"
"github.com/0xAX/notificator"
)
const (
// DefaultPort is default listening port for incoming webhooks.
DefaultPort = "8065"
// ListenPortEnv is the env var name for overriding the default listen port.
ListenPortEnv = "CWL_PORT"
)
//go:embed profile.png
var profileImageFS embed.FS
var notify *notificator.Notificator
var icon = fmt.Sprintf("%s/cwl-icon.png", os.TempDir())
func handler(w http.ResponseWriter, r *http.Request) {
webhook, err := cloud.WebhookPayloadFromReader(r.Body)
if err != nil {
log.Printf("Error: failed to parse webhook: %s", err)
return
}
if len(webhook.ID) == 0 {
return
}
wType := "UNKN"
switch webhook.Type {
case cloud.TypeCluster:
wType = "CLSR"
case cloud.TypeInstallation:
wType = "INST"
case cloud.TypeClusterInstallation:
wType = "CLIN"
}
message := fmt.Sprintf("[ %s | %s ] %s -> %s", wType, webhook.ID[0:4], webhook.OldState, webhook.NewState)
log.Print(message)
notify.Push("Cloud Webhook Listener", message, icon, notificator.UR_NORMAL)
w.WriteHeader(http.StatusOK)
}
func main() {
profileImg, err := profileImageFS.ReadFile("profile.png")
if err != nil {
panic(err)
}
_ = os.WriteFile(icon, profileImg, 0600)
notify = notificator.New(notificator.Options{
DefaultIcon: icon,
AppName: "Cloud Webhook Listener",
})
port := DefaultPort
if len(os.Getenv(ListenPortEnv)) != 0 {
port = os.Getenv(ListenPortEnv)
}
log.Printf("Starting cloud webhook listener on port %s", port)
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
/**
序列最长递增子序列及其长度
*/
package dynamicProgramming
import "fmt"
type SubqueryMax struct {
arr []int
cnt int
}
func newSubqueryMax(arr []int, cnt int) *SubqueryMax {
return &SubqueryMax{
arr: arr,
cnt: cnt,
}
}
func (sm *SubqueryMax) getMaxNum() int {
var states = make([]int, sm.cnt)
states[0] = 1 // 第一个节点状态长度是1
for i := 1; i < sm.cnt; i++ {
states[i] = 1
for j := 0; j < i; j++ {
if sm.arr[j] < sm.arr[i] && states[i] < states[j]+1 {
// 方程:maxlen(i) = max(maxlen(0),..,maxlen(i-1))+1
states[i] = states[j] + 1
}
}
}
fmt.Println(states)
var max int
for i := 0; i < sm.cnt; i++ {
if max < states[i] {
max = states[i]
}
}
return max
}
func (sm *SubqueryMax) getMaxSub() []int {
sub := make([][]int, sm.cnt)
for i := 0; i < sm.cnt; i++ {
sub[i] = make([]int, 0, sm.cnt)
}
sub[0] = sub[0][:1]
sub[0][0] = sm.arr[0]
for i := 1; i < sm.cnt; i++ {
for j := 0; j < i; j++ {
if sm.arr[j] < sm.arr[i] && len(sub[i]) < len(sub[j])+1 {
fmt.Printf("bef=> i: %d, j: %d, sub: %v, %d, subj: %v\n",
i, j, sub[i], len(sub[i]), sub[j])
sub[i] = sub[i][:len(sub[j])]
copy(sub[i], sub[j])
fmt.Printf("aft=> i: %d, j: %d, sub: %v, %d, subj: %v\n",
i, j, sub[i], len(sub[i]), sub[j])
}
}
sub[i] = append(sub[i], sm.arr[i])
}
fmt.Println(sub)
maxSub := sub[0]
for _, v := range sub {
if len(v) > len(maxSub) {
maxSub = v
}
}
return maxSub
}
|
package account
type TransferPointRequest struct {
FromUid string `json:"fromUid"`
ToUid string `json:"toUid"`
GroupId int64 `json:"groupId"`
Amount string `json:"amount"`
} |
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package bitmark_test
import (
"crypto/ed25519"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/bitmark-inc/bitmarkd/account"
"github.com/bitmark-inc/bitmarkd/chain"
"github.com/bitmark-inc/bitmarkd/currency"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/messagebus"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/bitmarkd/pay"
"github.com/bitmark-inc/bitmarkd/reservoir"
"github.com/bitmark-inc/bitmarkd/rpc/bitmark"
"github.com/bitmark-inc/bitmarkd/rpc/fixtures"
"github.com/bitmark-inc/bitmarkd/rpc/mocks"
"github.com/bitmark-inc/bitmarkd/transactionrecord"
"github.com/bitmark-inc/logger"
)
func TestBitmarkTransfer(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
bus := messagebus.Bus.Broadcast.Chan(5)
defer messagebus.Bus.Broadcast.Release()
ctl := gomock.NewController(t)
defer ctl.Finish()
owner := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
transfer := transactionrecord.BitmarkTransferCountersigned{
Link: merkle.Digest{},
Escrow: nil,
Owner: &owner,
Signature: nil,
Countersignature: nil,
}
unratitifed := transactionrecord.BitmarkTransferUnratified{
Link: merkle.Digest{},
Escrow: nil,
Owner: &owner,
Signature: nil,
}
info := reservoir.TransferInfo{
Id: pay.PayId{1, 2},
TxId: merkle.Digest{1, 2},
IssueTxId: merkle.Digest{1, 2},
Packed: nil,
Payments: []transactionrecord.PaymentAlternative{
[]*transactionrecord.Payment{
{
Currency: currency.Litecoin,
Address: fixtures.LitecoinAddress,
Amount: 100,
},
},
},
}
r := mocks.NewMockReservoir(ctl)
r.EXPECT().StoreTransfer(&unratitifed).Return(&info, false, nil).Times(1)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
var reply bitmark.TransferReply
err := b.Transfer(&transfer, &reply)
assert.Nil(t, err, "wrong transfer")
assert.Equal(t, info.Id, reply.PayId, "wrong payID")
assert.Equal(t, info.TxId, reply.TxId, "wrong txID")
assert.Equal(t, 1, len(reply.Payments), "wrong payment count")
assert.Equal(t, fixtures.LitecoinAddress, reply.Payments[currency.Litecoin.String()][0].Address, "wrong litecoin payment address")
received := <-bus
assert.Equal(t, "transfer", received.Command, "wrong message")
}
func TestBitmarkProvenanceWhenBitmarkIssuance(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
ctl := gomock.NewController(t)
defer ctl.Finish()
r := mocks.NewMockReservoir(ctl)
poolT := mocks.NewMockHandle(ctl)
poolA := mocks.NewMockHandle(ctl)
poolO := mocks.NewMockHandle(ctl)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{
Assets: poolA,
Transactions: poolT,
OwnerTxIndex: poolO,
},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
txID := merkle.Digest{1, 2, 3, 4}
arg := bitmark.ProvenanceArguments{
TxId: txID,
Count: 2,
}
acc := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
tr1 := transactionrecord.BitmarkIssue{
AssetId: transactionrecord.AssetIdentifier{},
Owner: &acc,
Nonce: 1,
Signature: nil,
}
packed1, _ := tr1.Pack(&acc)
tr1.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed1)
packed1, _ = tr1.Pack(&acc)
ass := transactionrecord.AssetData{
Name: "test",
Fingerprint: "fin",
Metadata: "owner\x00me",
Registrant: &acc,
Signature: nil,
}
packed2, _ := ass.Pack(&acc)
ass.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed2)
packed2, _ = ass.Pack(&acc)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(1), packed1).Times(1)
poolO.EXPECT().Has(gomock.Any()).Return(true).Times(1)
poolA.EXPECT().GetNB(gomock.Any()).Return(uint64(1), packed2).Times(1)
var reply bitmark.ProvenanceReply
err := b.Provenance(&arg, &reply)
assert.Nil(t, err, "wrong Provenance")
assert.Equal(t, 2, len(reply.Data), "wrong reply count")
assert.Equal(t, "BitmarkIssue", reply.Data[0].Record, "wrong record name")
assert.True(t, reply.Data[0].IsOwner, "wrong is owner")
assert.Equal(t, txID, reply.Data[0].TxId, "wrong tx ID")
assert.Equal(t, "AssetData", reply.Data[1].Record, "wrong record name")
d := reply.Data[1].Data.(*transactionrecord.AssetData)
assert.Equal(t, ass.Name, d.Name, "wrong asset name")
assert.Equal(t, ass.Fingerprint, d.Fingerprint, "wrong asset fingerprint")
assert.Equal(t, ass.Metadata, d.Metadata, "wrong meta data")
assert.Equal(t, &acc, d.Registrant, "wrong registrant")
}
func TestBitmarkProvenanceWhenOldBaseData(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
ctl := gomock.NewController(t)
defer ctl.Finish()
r := mocks.NewMockReservoir(ctl)
poolT := mocks.NewMockHandle(ctl)
poolA := mocks.NewMockHandle(ctl)
poolO := mocks.NewMockHandle(ctl)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{
Assets: poolA,
Transactions: poolT,
OwnerTxIndex: poolO,
},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
txID := merkle.Digest{1, 2, 3, 4}
arg := bitmark.ProvenanceArguments{
TxId: txID,
Count: 2,
}
acc := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
tr1 := transactionrecord.OldBaseData{
Currency: currency.Litecoin,
PaymentAddress: fixtures.LitecoinAddress,
Owner: &acc,
Nonce: 1,
Signature: nil,
}
packed1, _ := tr1.Pack(&acc)
tr1.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed1)
packed1, _ = tr1.Pack(&acc)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(1), packed1).Times(1)
poolO.EXPECT().Has(gomock.Any()).Return(true).Times(1)
var reply bitmark.ProvenanceReply
err := b.Provenance(&arg, &reply)
assert.Nil(t, err, "wrong Provenance")
assert.Equal(t, 1, len(reply.Data), "wrong reply count")
assert.Equal(t, "BaseData", reply.Data[0].Record, "wrong record name")
assert.True(t, reply.Data[0].IsOwner, "wrong is owner")
assert.Equal(t, txID, reply.Data[0].TxId, "wrong tx ID")
assert.Equal(t, &tr1, reply.Data[0].Data, "wrong data")
}
func TestBitmarkProvenanceWhenBlockFoundation(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
ctl := gomock.NewController(t)
defer ctl.Finish()
r := mocks.NewMockReservoir(ctl)
poolT := mocks.NewMockHandle(ctl)
poolA := mocks.NewMockHandle(ctl)
poolO := mocks.NewMockHandle(ctl)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{
Assets: poolA,
Transactions: poolT,
OwnerTxIndex: poolO,
},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
txID := merkle.Digest{1, 2, 3, 4}
arg := bitmark.ProvenanceArguments{
TxId: txID,
Count: 2,
}
acc := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
tr1 := transactionrecord.BlockFoundation{
Version: uint64(1),
Payments: map[currency.Currency]string{
currency.Bitcoin: fixtures.BitcoinAddress,
currency.Litecoin: fixtures.LitecoinAddress,
},
Owner: &acc,
Nonce: 1,
Signature: nil,
}
packed1, _ := tr1.Pack(&acc)
tr1.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed1)
packed1, _ = tr1.Pack(&acc)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(1), packed1).Times(1)
poolO.EXPECT().Has(gomock.Any()).Return(false).Times(1)
var reply bitmark.ProvenanceReply
err := b.Provenance(&arg, &reply)
assert.Nil(t, err, "wrong Provenance")
assert.Equal(t, 1, len(reply.Data), "wrong reply count")
assert.Equal(t, "BlockFoundation", reply.Data[0].Record, "wrong record name")
assert.False(t, reply.Data[0].IsOwner, "wrong is owner")
assert.Equal(t, txID, reply.Data[0].TxId, "wrong tx ID")
assert.Equal(t, &tr1, reply.Data[0].Data, "wrong data")
}
func TestBitmarkProvenanceWhenTransferUnratified(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
ctl := gomock.NewController(t)
defer ctl.Finish()
r := mocks.NewMockReservoir(ctl)
poolT := mocks.NewMockHandle(ctl)
poolA := mocks.NewMockHandle(ctl)
poolO := mocks.NewMockHandle(ctl)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{
Assets: poolA,
Transactions: poolT,
OwnerTxIndex: poolO,
},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
txID := merkle.Digest{1, 2, 3, 4}
arg := bitmark.ProvenanceArguments{
TxId: txID,
Count: 2,
}
acc := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
tr1 := transactionrecord.BitmarkTransferUnratified{
Link: merkle.Digest{},
Escrow: nil,
Owner: &acc,
Signature: nil,
}
packed1, _ := tr1.Pack(&acc)
tr1.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed1)
packed1, _ = tr1.Pack(&acc)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(1), packed1).Times(1)
poolT.EXPECT().GetNB(merkle.Digest{}.Bytes()).Return(uint64(0), nil).Times(1)
poolO.EXPECT().Has(gomock.Any()).Return(true).Times(1)
var reply bitmark.ProvenanceReply
err := b.Provenance(&arg, &reply)
assert.Nil(t, err, "wrong Provenance")
assert.Equal(t, 1, len(reply.Data), "wrong reply count")
assert.Equal(t, "BitmarkTransferUnratified", reply.Data[0].Record, "wrong record name")
assert.True(t, reply.Data[0].IsOwner, "wrong is owner")
assert.Equal(t, txID, reply.Data[0].TxId, "wrong tx ID")
assert.Equal(t, &tr1, reply.Data[0].Data, "wrong data")
}
func TestBitmarkProvenanceWhenBitmarkShare(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
mode.Initialise(chain.Testing)
defer mode.Finalise()
ctl := gomock.NewController(t)
defer ctl.Finish()
r := mocks.NewMockReservoir(ctl)
poolT := mocks.NewMockHandle(ctl)
poolA := mocks.NewMockHandle(ctl)
poolO := mocks.NewMockHandle(ctl)
b := bitmark.New(
logger.New(fixtures.LogCategory),
reservoir.Handles{
Assets: poolA,
Transactions: poolT,
OwnerTxIndex: poolO,
},
func(_ mode.Mode) bool { return true },
func() bool { return true },
r,
)
txID := merkle.Digest{1, 2, 3, 4}
arg := bitmark.ProvenanceArguments{
TxId: txID,
Count: 2,
}
acc := account.Account{
AccountInterface: &account.ED25519Account{
Test: true,
PublicKey: fixtures.IssuerPublicKey,
},
}
tr1 := transactionrecord.BitmarkShare{
Link: txID,
Quantity: 5,
Signature: nil,
}
packed1, _ := tr1.Pack(&acc)
tr1.Signature = ed25519.Sign(fixtures.IssuerPrivateKey, packed1)
packed1, _ = tr1.Pack(&acc)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(1), packed1).Times(1)
poolT.EXPECT().GetNB(txID[:]).Return(uint64(0), nil).Times(1)
var reply bitmark.ProvenanceReply
err := b.Provenance(&arg, &reply)
assert.Nil(t, err, "wrong Provenance")
assert.Equal(t, 1, len(reply.Data), "wrong reply count")
assert.Equal(t, "ShareBalance", reply.Data[0].Record, "wrong record name")
assert.True(t, reply.Data[0].IsOwner, "wrong is owner")
assert.Equal(t, txID, reply.Data[0].TxId, "wrong tx ID")
assert.Equal(t, &tr1, reply.Data[0].Data, "wrong data")
}
|
package syncer
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"github.com/baidu/ote-stack/pkg/util"
)
// PodSyncer is responsible for synchronizing pod from apiserver.
type PodSyncer struct {
ctx *SyncContext
Informer cache.SharedIndexInformer
}
func NewPodSyncer(ctx *SyncContext) Syncer {
informer, ok := ctx.InformerFactory[DefaultInformerFatory]
if !ok {
return nil
}
return &PodSyncer{
ctx: ctx,
Informer: informer.Core().V1().Pods().Informer(),
}
}
func (ps *PodSyncer) startSyncer() error {
if !ps.ctx.IsValid() {
return fmt.Errorf("start pod syncer failed: SyncContext is invalid")
}
registerInformerHandler(ps)
return nil
}
// handleAddEvent puts the added pod into persistent storage, and return to edge node as a watch event.
func (ps *PodSyncer) handleAddEvent(obj interface{}) {
pod := obj.(*corev1.Pod)
ps.addKindAndVersion(pod)
klog.V(4).Infof("add pod: %s", pod.Name)
go syncToNode(watch.Added, util.ResourcePod, pod)
syncToStorage(ps.ctx, watch.Added, util.ResourcePod, pod)
}
// handleUpdateEvent puts the modified pod into persistent storage, and return to edge node as a watch event.
func (ps *PodSyncer) handleUpdateEvent(old, new interface{}) {
newPod := new.(*corev1.Pod)
oldPod := old.(*corev1.Pod)
if newPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
return
}
ps.addKindAndVersion(newPod)
klog.V(4).Infof("update pod: %s", newPod.Name)
go syncToNode(watch.Modified, util.ResourcePod, newPod)
syncToStorage(ps.ctx, watch.Modified, util.ResourcePod, newPod)
}
// handleDeleteEvent delete the pod from persistent storage, and return to edge node as a watch event.
func (ps *PodSyncer) handleDeleteEvent(obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %v", obj)
return
}
pod, ok = tombstone.Obj.(*corev1.Pod)
if !ok {
klog.Errorf("Tombstone contained object that is not a pod %v", obj)
return
}
}
ps.addKindAndVersion(pod)
klog.V(4).Infof("delete pod: %s", pod.Name)
go syncToNode(watch.Deleted, util.ResourcePod, pod)
syncToStorage(ps.ctx, watch.Deleted, util.ResourcePod, pod)
}
// getInformer returns informer of this syncer.
func (ps *PodSyncer) getInformer() cache.SharedIndexInformer {
return ps.Informer
}
func (ps *PodSyncer) addKindAndVersion(pod *corev1.Pod) {
pod.APIVersion = "v1"
pod.Kind = "Pod"
}
|
package generate
import (
"encoding/xml"
"os"
)
const namespace = "http://www.sitemaps.org/schemas/sitemap/0.9"
type XmlLoc struct {
Value string `xml:"loc"`
}
type UrlSet struct {
Urls []XmlLoc `xml:"url"`
XNamespace string `xml:"xmlns,attr"`
}
func ConvertXml(convert UrlSet, filename string) {
f, err := os.Create(filename)
if err != nil {
panic(err)
}
convert.XNamespace = namespace
enc := xml.NewEncoder(f)
enc.Indent("", " ")
if err := enc.Encode(convert); err != nil {
panic(err)
}
}
|
/*
Copyright 2021 github.com/moizalicious
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fetcher
import (
"golang.org/x/net/html"
)
// Fetcher provides the interface with a function
// which can be used to fetch and parse HTML content
// from a given source.
type Fetcher interface {
Fetch(string) (*html.Node, error)
}
// NewURLFetcher creates and returns a instance of urlFetcher.
func NewURLFetcher() Fetcher {
return urlFetcher{}
}
// NewFileFetcher creates and returns a instance of fileFetcher.
func NewFileFetcher() Fetcher {
return fileFetcher{}
}
|
package main
import (
"fmt"
"math"
)
func main() {
a := 42.5
b := 43.5
fmt.Println(math.Round(a), math.Round(b))
fmt.Println(math.RoundToEven(a), math.RoundToEven(b))
}
|
package router
import (
"log"
"net/http"
"time"
jwt "github.com/appleboy/gin-jwt"
"github.com/filiponegrao/desafio-globo.com/controllers"
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/cookie"
csrf "github.com/utrack/gin-csrf"
"github.com/gin-gonic/gin"
)
func Initialize(r *gin.Engine) {
controllers.Config(r)
r.LoadHTMLGlob("view/*")
store := cookie.NewStore([]byte("secret"))
session := sessions.Sessions("mysession", store)
r.Use(session)
r.Use(csrf.Middleware(csrf.Options{
Secret: "Globo.com.deasfio.secret",
// IgnoreMethods: []string{"/new-password/:hash"},
ErrorFunc: func(c *gin.Context) {
c.String(400, "CSRF token mismatch")
c.Abort()
},
}))
r.Use(Interceptor())
// the jwt middleware
authMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Realm: "test zone",
Key: []byte("secret key"),
Timeout: time.Hour * 24 * 7,
MaxRefresh: time.Hour,
IdentityKey: "id",
PayloadFunc: controllers.AuthorizationPayload,
IdentityHandler: controllers.IdentityHandler,
Authenticator: controllers.UserAuthentication,
Authorizator: controllers.UserAuthorization,
Unauthorized: controllers.UserUnauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: LoginResponse,
})
if err != nil {
log.Fatal("JWT Error:" + err.Error())
}
api := r.Group("")
// Metodos sem autorizacao
api.GET("", RedirectToLogin)
api.GET("/login", controllers.GetLoginPage)
api.POST("/users", controllers.CreateUser)
api.GET("/register", controllers.GetRegsisterPage)
api.GET("/forgot-password", controllers.GetForgotPasswordPage)
api.GET("/new-password/:hash", controllers.GetNewPasswordPage)
api.POST("/login", authMiddleware.LoginHandler)
api.POST("/forgot-password", controllers.ForgotPassword)
api.POST("/new-password/:hash", controllers.NewPassword)
// Metodos com autorizacao
api.Use(authMiddleware.MiddlewareFunc())
{
api.GET("/bookmarks", controllers.GetBookmarksPage)
api.GET("/create-bookmark", controllers.GetCreateBookmarkPage)
api.POST("/bookmarks", controllers.CreateBookmark)
api.POST("/delete-bookmark/:id", controllers.DeleteBookmark)
}
}
func RedirectToLogin(c *gin.Context) {
c.Redirect(http.StatusMovedPermanently, "/login")
}
func LoginResponse(c *gin.Context, n int, token string, time time.Time) {
tokenString := "Bearer " + token
c.SetCookie("authorization", tokenString, 3600, "", "localhost:8080", false, false)
c.Set("authorization", tokenString)
c.Redirect(303, "/bookmarks")
}
func Interceptor() gin.HandlerFunc {
return func(c *gin.Context) {
// Secure actions
c.Header("X-Frame-Options", "deny")
c.Header("X-XSS-Protection", "1")
c.Header("X-Content-Type-Options", "nosniff")
// // CRSF Token
// token := csrf.GetToken(c)
// log.Println("Teste")
// log.Println(token)
// c.Request.Header.Set("X-CSRF-TOKEN", token)
resp, err := c.Cookie("authorization")
if err != nil {
log.Println(err)
} else {
c.Request.Header.Set("Authorization", resp)
}
c.Next()
}
}
|
package main
import (
"fmt"
"os"
"github.com/wenkesj/genexp"
)
func main() {
if len(os.Args) < 3 {
panic("Expected 2 sequences as part of the command. \n For example: genexpa ATG GTG")
}
a := os.Args[1]
b := os.Args[2]
alignedAs, alignedBs, score := genexp.Align(a, b, genexp.LocalAlignmentCost)
fmt.Println("Alignments: ")
for i := range alignedAs {
fmt.Println("A: ", alignedAs[i])
fmt.Println("B: ", alignedBs[i])
fmt.Println("Score: ", score)
}
}
|
package main
import (
"fmt" //package implementing formatted I/O
// "math"
// "strings"
// "myAnonymous"
// "myClosure"
// "myErrorhandling"
// "myMap"
// "myVarargs"
"slice"
// "myOOP"
// "myGoRoutine"
// "myString"
// "myTime"
)
func test_multi_assign() {
var v1 string = "chenxi"
var v2 string = "zhangchong"
v1, v2 = v2, v1
fmt.Printf(" %s\n", v2)
}
func test_multi_return() {
_, lastname, _ := getName()
fmt.Printf("lastname is %s\n", lastname)
}
func test_operator() {
x := 2
fmt.Printf("^2=%v\n", ^x)
}
func test_complex() {
var value1, value2, value3 complex64 // 由2个float32构成的复数类型
// value1 = 3.2 + 12i
// value2 := 3.2 + 12i // value2是complex128类型
// value3 := complex(3.2, 12) // value3结果同value2
value1, value2, value3 = 3.2+12i, 3.2+12i, complex(3.2, 12)
fmt.Printf("value1=%v\n", value1)
fmt.Printf("value2=%v\n", value2)
fmt.Printf("value3=%v\n", value3)
}
func test_for(array [10]int) error {
for i := 0; i < len(array); i++ {
fmt.Println("Element", i, "of array is", array[i])
}
// Go语言还提供了一个关键字range,用于便捷地遍历容器中的元素。当然,数组也是range
// 的支持范围。上面的遍历过程可以简化为如下的写法:
for i, v := range array {
fmt.Println("Array element[", i, "]=", v)
}
for pos, value := range "Go在中国" { //一个汉字占三个字节
fmt.Printf("character '%c' type is %T value is %v, and start at byte position %d \n", value, value, value, pos)
str := string(value) //convert rune to string
fmt.Printf("string(%v)=>%s \n", value, str)
}
return nil
}
// p为用户自定义的比较精度,比如 0.00001
func IsEqual(f1, f2, p float64) bool {
return true /*math.Fdim(f1, f2) < p*/
}
func getName() (firstName, lastName, nickName string) {
if true {
fmt.Printf("OK\n")
}
return "zhang", "chong", "handsome"
}
func test_rune() {
var ch rune
ch = 'e'
fmt.Printf("ch v = %v\n", ch)
fmt.Printf("ch c = %c\n", ch)
}
func test_string() {
str := "Zhongguo"
str1 := "Hello world" // 字符串也支持声明时进行初始化的做法
// str[0] = 'X' // 编译错误 字符串的内容不能在初始化后被修改
total := str + " " + str1
fmt.Printf("total = %s,total size is %v\n", total, len(total))
str = "Hello,世界"
n := len(str)
for i := 0; i < n; i++ {
ch := str[i] // 依据下标取字符串中的字符,类型为byte
fmt.Println(i, ch)
}
}
func test_banner(method_name string) {
fmt.Printf("\n======= " + method_name + " ======\n")
}
func main() {
// 指向数组的指针的声明。如:
// var p2array *[3]int;
// 这就是指针数组的声明。
// var pointers [3]*int;
// f := func(x, y int) int {
// return x + y
// }//closable
// var v1 int = 10
// v1 := 11
// var v3 [10]int // 数组
// var v4 []int // 数组切片
// var v5 struct {
// f int
// }
// var v6 *int // 指针
// var v7 map[string]int // map,key为string类型,value为int类型
// var v8 func(a int) int
// fmt.Printf("%v", v1)
// test_banner("test_multi_assign")
// test_multi_assign()
// test_banner("test_multi_return")
// test_multi_return()
// test_banner("test_operator")
// test_operator()
// test_banner("test_complex")
// test_complex()
// test_banner("test_rune")
// test_rune()
// test_banner("test_string")
// test_string()
// test_banner("test_for")
// var array [10]int // int 数组如果没初始化 默认初始化为0
// test_for(array)
test_banner("test_slice")
slice.Test_slice()
// test_banner("test_varargs")
// myVarargs.Test_varargs()
// test_banner("Test_anonymous")
// myAnonymous.Test_anonymous()
// test_banner("Test_closure")
// myClosure.Test_closure()
// test_banner("Test_Errorhandling")
// myErrorhandling.Test_Errorhandling()
// test_banner("Test_Map")
// myMap.Test_Map()
// test_banner("Test_OOP")
// myOOP.Test_OOP()
// test_banner("Test_goroutine")
// myGoRoutine.Test_goroutine()
// testCopy()
// testRandom()
// test_banner("Test_String")
// myString.Test_String()
// test_banner("Test_Time")
// myTime.Test_Mytime()
// var f uint16 = 12
// fmt.Println(f, string(f))
}
// func testRandom() {
// // Ni+1=(A* Ni + B)% M 其中i = 0,1,...,M-1
// last := 23
// fmt.Println(last)
// for i := 0; i < 10000; i++ {
// next := (991*last + 857) % 10000
// fmt.Println(next)
// last = next
// }
// }
// func testCopy() {
// // 0 1 2 3 4 5 6 7
// var values []int = []int{11, 22, 13, 4, 51, 6, 7, 8}
// left := 0
// right := 7
// m := 3
// i := left //[left ... i ... m]
// w := m + 1 //[m+1 ... w ... right]
// s := values[left : right+1] //切片[start:end] end 是指超出末尾 1 个位置的那个地方,也就是实际是 [start,end-1] 的元素
// tmp := make([]int, 0)
// for i <= m && w <= right {
// if values[i] >= values[w] {
// tmp = append(tmp, values[w])
// fmt.Printf("%v,%v,append %v\n", values[i], values[w], values[w])
// w++
// } else {
// tmp = append(tmp, values[i])
// fmt.Printf("%v,%v,append %v\n", values[i], values[w], values[i])
// i++
// }
// }
// if i <= m {
// tmp = append(tmp, s[i:m+1]...)
// fmt.Printf("i <= m append s[%v:%v]\n", i, m+1)
// }
// if w <= right {
// tmp = append(tmp, s[w:right+1]...)
// fmt.Printf("w <= right append s[%v:%v]\n", w, right+1)
// }
// toString(tmp, "tmp")
// // 把临时数组的数据copy到原数组中
// copy(s[left:right+1], tmp[left:right+1])
// toString(s, "s")
// t := values[7:8]
// toString(t, "t")
// }
// func toString(s []int, name string) {
// fmt.Printf("%s:", name)
// for i, v := range s {
// if i > 0 {
// fmt.Printf(",")
// }
// fmt.Printf("%v", v)
// }
// fmt.Printf("\n")
// }
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord
import (
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// TxnCoordSenderFactory implements client.TxnSenderFactory.
type TxnCoordSenderFactory struct {
log.AmbientContext
st *cluster.Settings
wrapped kv.Sender
clock *hlc.Clock
heartbeatInterval time.Duration
linearizable bool // enables linearizable behavior
stopper *stop.Stopper
metrics TxnMetrics
condensedIntentsEveryN log.EveryN
testingKnobs ClientTestingKnobs
}
var _ kv.TxnSenderFactory = &TxnCoordSenderFactory{}
// TxnCoordSenderFactoryConfig holds configuration and auxiliary objects that can be passed
// to NewTxnCoordSenderFactory.
type TxnCoordSenderFactoryConfig struct {
AmbientCtx log.AmbientContext
Settings *cluster.Settings
Clock *hlc.Clock
Stopper *stop.Stopper
HeartbeatInterval time.Duration
Linearizable bool
Metrics TxnMetrics
TestingKnobs ClientTestingKnobs
}
// NewTxnCoordSenderFactory creates a new TxnCoordSenderFactory. The
// factory creates new instances of TxnCoordSenders.
func NewTxnCoordSenderFactory(
cfg TxnCoordSenderFactoryConfig, wrapped kv.Sender,
) *TxnCoordSenderFactory {
tcf := &TxnCoordSenderFactory{
AmbientContext: cfg.AmbientCtx,
st: cfg.Settings,
wrapped: wrapped,
clock: cfg.Clock,
stopper: cfg.Stopper,
linearizable: cfg.Linearizable,
heartbeatInterval: cfg.HeartbeatInterval,
metrics: cfg.Metrics,
condensedIntentsEveryN: log.Every(time.Second),
testingKnobs: cfg.TestingKnobs,
}
if tcf.st == nil {
tcf.st = cluster.MakeTestingClusterSettings()
}
if tcf.heartbeatInterval == 0 {
tcf.heartbeatInterval = base.DefaultTxnHeartbeatInterval
}
if tcf.metrics == (TxnMetrics{}) {
tcf.metrics = MakeTxnMetrics(metric.TestSampleInterval)
}
return tcf
}
// RootTransactionalSender is part of the TxnSenderFactory interface.
func (tcf *TxnCoordSenderFactory) RootTransactionalSender(
txn *roachpb.Transaction, pri roachpb.UserPriority,
) kv.TxnSender {
return newRootTxnCoordSender(tcf, txn, pri)
}
// LeafTransactionalSender is part of the TxnSenderFactory interface.
func (tcf *TxnCoordSenderFactory) LeafTransactionalSender(
tis *roachpb.LeafTxnInputState,
) kv.TxnSender {
return newLeafTxnCoordSender(tcf, tis)
}
// NonTransactionalSender is part of the TxnSenderFactory interface.
func (tcf *TxnCoordSenderFactory) NonTransactionalSender() kv.Sender {
return tcf.wrapped
}
// Metrics returns the factory's metrics struct.
func (tcf *TxnCoordSenderFactory) Metrics() TxnMetrics {
return tcf.metrics
}
|
package main
import "fmt"
type People interface {
run()
read()
}
type xhaoge struct {
name string
sport string
book string
}
func (x *xhaoge) run() {
fmt.Println("%s 开始跑步...", x.name)
}
func (x *xhaoge) read() {
fmt.Println("%s 喜欢读%s", x.name, x.book)
}
func findType(xxx interface{}) {
switch xxx.(type) {
case string:
fmt.Println("string")
case People:
fmt.Println("people")
default:
fmt.Println("unknown")
}
}
func main() {
var xx People
xx = &xhaoge{
name: "xhaoge",
sport: "baseketball",
book: "富翁的书....",
}
fmt.Println(xx)
// findType(xx)
val, ok := xx.(*xhaoge)
if !ok {
fmt.Println("not ok")
}
fmt.Println(val)
findType(xx)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package task
import (
"context"
"time"
)
// CancelFunc is a function type that can be used to stop a context.
type CancelFunc context.CancelFunc
// WithCancel returns a copy of ctx with a new Done channel.
// See context.WithCancel for more details.
func WithCancel(ctx context.Context) (context.Context, CancelFunc) {
c, cancel := context.WithCancel(ctx)
return c, CancelFunc(cancel)
}
// WithDeadline returns a copy of ctx with the deadline adjusted to be no later than deadline.
// See context.WithDeadline for more details.
func WithDeadline(ctx context.Context, deadline time.Time) (context.Context, CancelFunc) {
c, cancel := context.WithDeadline(ctx, deadline)
return c, CancelFunc(cancel)
}
// WithTimeout is shorthand for ctx.WithDeadline(time.Now().Add(duration)).
// See context.Context.WithTimeout for more details.
func WithTimeout(ctx context.Context, duration time.Duration) (context.Context, CancelFunc) {
return WithDeadline(ctx, time.Now().Add(duration))
}
// ShouldStop returns a chan that's closed when work done on behalf of this
// context should be stopped.
// See context.Context.Done for more details.
func ShouldStop(ctx context.Context) <-chan struct{} {
return ctx.Done()
}
// StopReason returns a non-nil error value after Done is closed.
// See context.Context.Err for more details.
func StopReason(ctx context.Context) error {
return ctx.Err()
}
// Stopped is shorthand for StopReason(ctx) != nil because it increases the readability of common use cases.
func Stopped(ctx context.Context) bool {
return ctx.Err() != nil
}
|
package delete
import (
"encoding/json"
"net/http"
"os"
"github.com/ocoscope/face/db"
"github.com/ocoscope/face/utils"
"github.com/ocoscope/face/utils/answer"
"github.com/ocoscope/face/utils/recognition"
)
func UserPhoto(w http.ResponseWriter, r *http.Request) {
type tbody struct {
CompanyID, UserID int64
AccessToken string
}
var body tbody
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
utils.Message(w, answer.WRONG_DATA, 400)
return
}
database, err := db.CopmanyDB(body.CompanyID)
if err != nil {
utils.Message(w, answer.NOT_FOUND_COMPANY, 400)
return
}
defer database.Close()
err = db.CheckUserAccessToken(database, body.UserID, body.AccessToken)
if err != nil {
utils.Message(w, answer.UNAUTHORIZED, 401)
return
}
faceID, err := db.GetUserFace(database, body.UserID)
if err != nil {
utils.Message(w, answer.F_SERVER, 500)
return
}
collection, err := db.GetCompanyCollection(body.CompanyID)
if err != nil {
utils.Message(w, answer.F_SERVER, 500)
return
}
recognition.DeleteFaceInCollection(collection, faceID)
db.UserPhotoUpdate(database, body.UserID, "")
path := "./images/" + utils.IntToStr(body.CompanyID) + "/" + utils.IntToStr(body.UserID) + ".jpg"
os.Remove(path)
utils.Message(w, "Фотография успешно удалено", 200)
}
|
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
)
const configFile = ".vim/vim-modules.conf"
type cmdFunc func() error
type cmd map[string]cmdFunc
type config struct {
lines []string
}
var commands = cmd{
"install": cmdInstall,
"clean": cmdClean,
}
var dryRun bool
var saveModule bool
func main() {
help := flag.Bool("h", false, "This help screen")
flag.BoolVar(&dryRun, "dry-run", false, "Make no modifications")
flag.BoolVar(&saveModule, "s", false, "Save the module to the config file")
flag.Parse()
if *help {
showHelp()
os.Exit(1)
}
input := flag.Arg(0)
c := commands[input]
if c == nil {
fmt.Println("Invalid command:", input)
showHelp()
os.Exit(1)
}
err := c()
if err != nil {
fmt.Printf("Error: %s", err)
os.Exit(1)
}
}
func showHelp() {
fmt.Println("Usage:")
fmt.Printf("%s [options] install | clear\n\nOptions:\n", os.Args[0])
flag.VisitAll(func(f *flag.Flag) {
fmt.Printf(" - %s (%s) %s\n", f.Name, f.DefValue, f.Usage)
})
fmt.Println("")
}
func cdBundleDir() error {
return os.Chdir(os.Getenv("HOME") + "/.vim/bundle")
}
func cmdClean() error {
fmt.Printf("Cleaning modules ...")
fmt.Printf("This is not implemented")
return nil
}
func cmdInstall() error {
err := os.Chdir(os.Getenv("HOME") + "/.vim/bundle")
if err != nil {
return fmt.Errorf("failed to cd into ./vim: %s", err)
}
// No module to install? use the config
newModule := flag.Arg(1)
if newModule == "" {
conf, err := getConfig()
if err != nil {
return err
}
for _, module := range conf.lines {
err = installOne(module)
if err != nil {
return err
}
}
} else {
err = installOne(newModule)
if err != nil {
return err
}
if saveModule {
conf, err := getConfig()
conf.lines = append(conf.lines, newModule)
err = saveConfig(conf)
if err != nil {
return err
}
}
}
return nil
}
func installOne(module string) error {
name := gitParseRepoName(module)
if _, err := os.Stat(name); err == nil {
fmt.Printf("Module '%s' already exists. Skipping.\n", name)
return nil
}
return gitClone(module)
}
func getConfig() (*config, error) {
home := os.Getenv("HOME")
path := home + "/" + configFile
fd, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open config file '%s': %s", path, err)
}
defer fd.Close()
scanner := bufio.NewScanner(fd)
reComments := regexp.MustCompile(`#.*$`)
reLeadingWs := regexp.MustCompile(`^\s*`)
conf := &config{}
for scanner.Scan() {
line := scanner.Text()
line = reComments.ReplaceAllString(line, "")
line = reLeadingWs.ReplaceAllString(line, "")
if line != "" {
conf.lines = append(conf.lines, scanner.Text())
}
}
return conf, nil
}
func saveConfig(conf *config) error {
home := os.Getenv("HOME")
path := home + "/" + configFile
fd, err := ioutil.TempFile(filepath.Dir(path), "vim-modules.conf")
if err != nil {
return err
}
buff := bufio.NewWriter(fd)
for _, line := range conf.lines {
buff.Write([]byte(line))
buff.Write([]byte("\n"))
}
err = buff.Flush()
if err != nil {
return err
}
err = fd.Close()
if err != nil {
return err
}
return os.Rename(fd.Name(), path)
}
func gitParseRepoName(repo string) string {
result := filepath.Base(repo)
return regexp.MustCompile(`\.git`).ReplaceAllString(result, "")
}
func gitClone(repo string) error {
if dryRun {
fmt.Printf("git clone %s\n", repo)
return nil
}
return runIt("git", "clone", repo)
}
func runIt(cmd string, args ...string) error {
c := exec.Command(cmd, args...)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
return c.Run()
}
|
package main
import "math"
//
//在 x 轴上有一个一维的花园。花园长度为 n,从点 0 开始,到点 n 结束。
//
//花园里总共有 n + 1 个水龙头,分别位于 [0, 1, ..., n] 。
//
//给你一个整数 n 和一个长度为 n + 1 的整数数组 ranges ,其中 ranges[i] (下标从 0 开始)表示:如果打开点 i 处的水龙头,可以灌溉的区域为 [i - ranges[i], i + ranges[i]] 。
//
//请你返回可以灌溉整个花园的 最少水龙头数目 。如果花园始终存在无法灌溉到的地方,请你返回 -1 。
//
//
//
//示例 1:
//输入:n = 5, ranges = [3,4,1,1,0,0]
//输出:1
//解释:
//点 0 处的水龙头可以灌溉区间 [-3,3]
//点 1 处的水龙头可以灌溉区间 [-3,5]
//点 2 处的水龙头可以灌溉区间 [1,3]
//点 3 处的水龙头可以灌溉区间 [2,4]
//点 4 处的水龙头可以灌溉区间 [4,4]
//点 5 处的水龙头可以灌溉区间 [5,5]
//只需要打开点 1 处的水龙头即可灌溉整个花园 [0,5] 。
// 方案 贪婪算法
// 指定一个【0,n] result区间,上面记录区间范围
// 从水龙头【l,r] r值相同时 记录l 最大
// 从 n 尾部开始递减扫result,
func minTaps(n int, ranges []int) int {
result := make([]int, n+1)
for i := 0; i <= n; i++ {
l := max(i-ranges[i], 0)
r := min(i+ranges[i], n)
result[l] = max(result[l], r)
}
count := 0
best := math.MinInt32
seek := 0
for i := 0; i < n; i++ {
best = max(best, result[i])
if i == seek {
if best <= i {
return -1
}
seek = best
count++
}
}
return count
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
|
// 微信公众号支付参数服务列表
// 1. 支付证书上传
// 2. 支付参数修改
// 3. 通过公众号ID,获取公众号支付参数记录
package models
import (
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/1046102779/common/consts"
"github.com/1046102779/official_account/conf"
. "github.com/1046102779/official_account/logger"
"github.com/pkg/errors"
"github.com/astaxie/beego/orm"
)
type OfficialAccountsPayParams struct {
Id int `orm:"column(official_accounts_pay_param_id);auto"`
OfficialAccountId int `orm:"column(official_account_id);null"`
MchId string `orm:"column(mch_id);size(30);null"`
Name string `orm:"column(name);size(100);null"`
Appkey string `orm:"column(appkey);size(50);null"`
Status int16 `orm:"column(status);null"`
UpdatedAt time.Time `orm:"column(updated_at);type(datetime);null"`
CreatedAt time.Time `orm:"column(created_at);type(datetime);null"`
}
func (t *OfficialAccountsPayParams) TableName() string {
return "official_accounts_pay_params"
}
func (t *OfficialAccountsPayParams) UpdateOfficialAccountsPayParamsNoLock(o *orm.Ormer) (retcode int, err error) {
Logger.Info("[%v] enter UpdateOfficialAccountsPayParamsNoLock.", t.Id)
defer Logger.Info("[%v] left UpdateOfficialAccountsPayParamsNoLock.", t.Id)
if o == nil {
err = errors.New("param `orm.Ormer` ptr empty")
retcode = consts.ERROR_CODE__DB__UPDATE
return
}
if _, err = (*o).Update(t); err != nil {
err = errors.Wrap(err, "UpdateOfficialAccountsPayParamsNoLock")
retcode = consts.ERROR_CODE__DB__UPDATE
return
}
return
}
func (t *OfficialAccountsPayParams) InsertOfficialAccountsPayParamsNoLock(o *orm.Ormer) (retcode int, err error) {
Logger.Info("enter InsertOfficialAccountsPayParamsNoLock.")
defer Logger.Info("left InsertOfficialAccountsPayParamsNoLock.")
if o == nil {
err = errors.New("param `orm.Ormer` ptr empty")
retcode = consts.ERROR_CODE__DB__UPDATE
return
}
if _, err = (*o).Insert(t); err != nil {
retcode = consts.ERROR_CODE__DB__INSERT
err = errors.Wrap(err, "InsertOfficialAccountsPayParamsNoLock")
return
}
return
}
func init() {
orm.RegisterModel(new(OfficialAccountsPayParams))
}
func UploadCertification(id int, req *http.Request) (retcode int, err error) {
Logger.Info("[%v] enter UploadCertification.", id)
defer Logger.Info("[%v] left UploadCertification.", id)
o := orm.NewOrm()
officialAccount := &OfficialAccounts{
Id: id,
}
if retcode, err = officialAccount.ReadOfficialAccountNoLock(&o); err != nil {
err = errors.Wrap(err, "UploadCertification")
return
}
// 读写文件
req.ParseMultipartForm(32 << 20)
file, header, newErr := req.FormFile("certification_file")
if header == nil {
err = errors.New("parse file empty")
retcode = consts.ERROR_CODE__SOURCE_DATA__ILLEGAL
return
}
if header.Filename != "apiclient_key.pem" && header.Filename != "apiclient_cert.pem" {
err = errors.New("form param `filename` error")
retcode = consts.ERROR_CODE__SOURCE_DATA__ILLEGAL
return
}
defer file.Close()
os.Mkdir(fmt.Sprintf("%s/%s", conf.CertificationDir, officialAccount.Appid), os.ModePerm)
newFile, newErr := os.OpenFile(fmt.Sprintf("%s/%s/%s", conf.CertificationDir, officialAccount.Appid, header.Filename), os.O_WRONLY|os.O_CREATE, 0666)
defer newFile.Close()
if newErr != nil {
err = newErr
retcode = consts.ERROR_CODE__SOURCE_DATA__ILLEGAL
return
}
_, err = io.Copy(newFile, file)
if err != nil {
retcode = consts.ERROR_CODE__SOURCE_DATA__ILLEGAL
return
}
// end
return
}
func ModifyWechatParams(id int, appkey string, mchid string, name string) (retcode int, err error) {
Logger.Info("[%v] enter ModifyWechatParams.", id)
defer Logger.Info("[%v] left ModifyWechatParams.", id)
var (
officialAccountsPayParams []OfficialAccountsPayParams = []OfficialAccountsPayParams{}
num int64
)
o := orm.NewOrm()
now := time.Now()
officialAccount := &OfficialAccounts{
Id: id,
}
if retcode, err = officialAccount.ReadOfficialAccountNoLock(&o); err != nil {
err = errors.Wrap(err, "ModifyWechatParams")
return
}
num, err = o.QueryTable((&OfficialAccountsPayParams{}).TableName()).Filter("official_account_id", id).Filter("status", consts.STATUS_VALID).All(&officialAccountsPayParams)
if err != nil {
err = errors.Wrap(err, "ModifyWechatParams")
retcode = consts.ERROR_CODE__DB__READ
return
}
if num > 0 {
// update wechat pay params
if "" != strings.TrimSpace(appkey) {
officialAccountsPayParams[0].Appkey = appkey
}
if "" != strings.TrimSpace(mchid) {
officialAccountsPayParams[0].MchId = mchid
}
if "" != strings.TrimSpace(name) {
officialAccountsPayParams[0].Name = name
}
officialAccountsPayParams[0].UpdatedAt = now
if retcode, err = officialAccountsPayParams[0].UpdateOfficialAccountsPayParamsNoLock(&o); err != nil {
err = errors.Wrap(err, "ModifyWechatParams")
return
}
} else {
// add wechat pay params
officialAccountsPayParam := &OfficialAccountsPayParams{
OfficialAccountId: id,
MchId: mchid,
Name: name,
Appkey: appkey,
Status: consts.STATUS_VALID,
UpdatedAt: now,
CreatedAt: now,
}
if retcode, err = officialAccountsPayParam.InsertOfficialAccountsPayParamsNoLock(&o); err != nil {
err = errors.Wrap(err, "ModifyWechatParams")
return
}
}
return
}
// 3. 通过公众号ID,获取公众号支付参数记录
func GetOfficialAccountPayParamByOfficialAccountId(id int) (officialAccountPayParam *OfficialAccountsPayParams, retcode int, err error) {
Logger.Info("[%v] enter GetOfficialAccountPayParamByOfficialAccountId.", id)
defer Logger.Info("[%v] left GetOfficialAccountPayParamByOfficialAccountId.", id)
var (
officialAccountsPayParams []*OfficialAccountsPayParams = []*OfficialAccountsPayParams{}
num int64
)
o := orm.NewOrm()
num, err = o.QueryTable((&OfficialAccountsPayParams{}).TableName()).Filter("official_account_id", id).Filter("status", consts.STATUS_VALID).All(&officialAccountsPayParams)
if err != nil {
err = errors.Wrap(err, "GetOfficialAccountPayParamByOfficialAccountId")
retcode = consts.ERROR_CODE__DB__READ
return
}
if num > 0 {
officialAccountPayParam = officialAccountsPayParams[0]
}
return
}
|
package function
import (
"bytes"
"encoding/json"
"errors"
"github.com/hecatoncheir/Storage"
"log"
"os"
"text/template"
)
type Storage interface {
Query(string) ([]byte, error)
}
type Executor struct {
Store Storage
}
var ExecutorLogger = log.New(os.Stdout, "Executor: ", log.Lshortfile)
var (
// ErrPageInstructionCanNotBeWithoutID means that page instruction can't be without id
ErrPageInstructionCanNotBeWithoutID = errors.New("page instruction can not be without id")
// ErrPageInstructionByIDCanNotBeFound means that the instruction can't be found in database
ErrPageInstructionByIDCanNotBeFound = errors.New("page instruction by id can not be found")
// ErrPageInstructionDoesNotExist means than the page instruction does not exist in database
ErrPageInstructionDoesNotExist = errors.New("page instruction does not exist")
)
// ReadPageInstructionByID is a method for get all nodes of instructions by ID
func (executor *Executor) ReadPageInstructionByID(pageInstructionID string) (storage.PageInstruction, error) {
if pageInstructionID == "" {
ExecutorLogger.Printf("Page pageInstruction can't be without ID")
return storage.PageInstruction{}, ErrPageInstructionCanNotBeWithoutID
}
variables := struct {
PageInstructionID string
}{
PageInstructionID: pageInstructionID}
queryTemplate, err := template.New("ReadPageInstructionByID").Parse(`{
pageInstructions(func: uid("{{.PageInstructionID}}")) @filter(has(path)) {
uid
path
pageInPaginationSelector
pageParamPath
cityParamPath
itemSelector
nameOfItemSelector
priceOfItemSelector
}
}`)
pageInstruction := storage.PageInstruction{ID: pageInstructionID}
if err != nil {
ExecutorLogger.Println(err)
return pageInstruction, err
}
queryBuf := bytes.Buffer{}
err = queryTemplate.Execute(&queryBuf, variables)
if err != nil {
ExecutorLogger.Println(err)
return pageInstruction, err
}
response, err := executor.Store.Query(queryBuf.String())
if err != nil {
ExecutorLogger.Println(err)
return pageInstruction, ErrPageInstructionByIDCanNotBeFound
}
type PageInstructionsInStorage struct {
PageInstructions []storage.PageInstruction `json:"pageInstructions"`
}
var foundedPageInstructions PageInstructionsInStorage
err = json.Unmarshal(response, &foundedPageInstructions)
if err != nil {
ExecutorLogger.Println(err)
return pageInstruction, ErrPageInstructionByIDCanNotBeFound
}
if len(foundedPageInstructions.PageInstructions) == 0 {
return pageInstruction, ErrPageInstructionDoesNotExist
}
return foundedPageInstructions.PageInstructions[0], nil
}
|
package main
import (
"bufio"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"os"
"strings"
)
func main() {
//testStr := "abcdefg"
reader := bufio.NewReader(os.Stdin)
text, _ := reader.ReadString('\n')
fmt.Println(toMd5(strings.NewReader(text)))
//fmt.Fprintln(os.Stdout, "test")
//fmt.Println(toSha1(strings.NewReader(testStr)))
}
func toMd5(r io.Reader) (bytes []byte, hexStr string, base64Str string) {
h := md5.New()
io.Copy(h, r)
bytes = h.Sum(nil)
hexStr = hex.EncodeToString(bytes)
base64Str = base64.StdEncoding.EncodeToString(bytes)
return
}
func toSha1(r io.Reader) (bytes []byte, hexStr string, base64Str string) {
h := sha1.New()
io.Copy(h, r)
bytes = h.Sum(nil)
hexStr = hex.EncodeToString(bytes)
base64Str = base64.StdEncoding.EncodeToString(bytes)
return
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colencoding
import (
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// DecodeIndexKeyToCols decodes an index key into the idx'th position of the
// provided slices of colexec.Vecs. The input index key must already have its
// tenant id and first table id / index id prefix removed. If matches is false,
// the key is from a different table, and the returned remainingKey indicates a
// "seek prefix": the next key that might be part of the table being searched
// for. The input key will also be mutated if matches is false. See the analog
// in sqlbase/index_encoding.go.
func DecodeIndexKeyToCols(
da *rowenc.DatumAlloc,
vecs []coldata.Vec,
idx int,
desc catalog.TableDescriptor,
index *descpb.IndexDescriptor,
indexColIdx []int,
types []*types.T,
colDirs []descpb.IndexDescriptor_Direction,
key roachpb.Key,
invertedColIdx int,
) (remainingKey roachpb.Key, matches bool, foundNull bool, _ error) {
var decodedTableID descpb.ID
var decodedIndexID descpb.IndexID
var err error
origKey := key
if len(index.Interleave.Ancestors) > 0 {
for i, ancestor := range index.Interleave.Ancestors {
// Our input key had its first table id / index id chopped off, so
// don't try to decode those for the first ancestor.
if i != 0 {
key, decodedTableID, decodedIndexID, err = rowenc.DecodePartialTableIDIndexID(key)
if err != nil {
return nil, false, false, err
}
if decodedTableID != ancestor.TableID || decodedIndexID != ancestor.IndexID {
// We don't match. Return a key with the table ID / index ID we're
// searching for, so the caller knows what to seek to.
curPos := len(origKey) - len(key)
key = rowenc.EncodePartialTableIDIndexID(origKey[:curPos], ancestor.TableID, ancestor.IndexID)
return key, false, false, nil
}
}
length := int(ancestor.SharedPrefixLen)
// We don't care about whether this call to DecodeKeyVals found a null or not, because
// it is a interleaving ancestor.
var isNull bool
key, isNull, err = DecodeKeyValsToCols(
da, vecs, idx, indexColIdx[:length], types[:length],
colDirs[:length], nil /* unseen */, key, invertedColIdx,
)
if err != nil {
return nil, false, false, err
}
indexColIdx, types, colDirs = indexColIdx[length:], types[length:], colDirs[length:]
foundNull = foundNull || isNull
// Consume the interleaved sentinel.
var ok bool
key, ok = encoding.DecodeIfInterleavedSentinel(key)
if !ok {
// We're expecting an interleaved sentinel but didn't find one. Append
// one so the caller can seek to it.
curPos := len(origKey) - len(key)
key = encoding.EncodeInterleavedSentinel(origKey[:curPos])
return key, false, false, nil
}
}
key, decodedTableID, decodedIndexID, err = rowenc.DecodePartialTableIDIndexID(key)
if err != nil {
return nil, false, false, err
}
if decodedTableID != desc.GetID() || decodedIndexID != index.ID {
// We don't match. Return a key with the table ID / index ID we're
// searching for, so the caller knows what to seek to.
curPos := len(origKey) - len(key)
key = rowenc.EncodePartialTableIDIndexID(origKey[:curPos], desc.GetID(), index.ID)
return key, false, false, nil
}
}
var isNull bool
key, isNull, err = DecodeKeyValsToCols(
da, vecs, idx, indexColIdx, types, colDirs, nil /* unseen */, key, invertedColIdx,
)
if err != nil {
return nil, false, false, err
}
foundNull = foundNull || isNull
// We're expecting a column family id next (a varint). If
// interleavedSentinel is actually next, then this key is for a child
// table.
if _, ok := encoding.DecodeIfInterleavedSentinel(key); ok {
curPos := len(origKey) - len(key)
key = encoding.EncodeNullDescending(origKey[:curPos])
return key, false, false, nil
}
return key, true, foundNull, nil
}
// DecodeKeyValsToCols decodes the values that are part of the key, writing the
// result to the idx'th slot of the input slice of colexec.Vecs. If the
// directions slice is nil, the direction used will default to
// encoding.Ascending.
// If the unseen int set is non-nil, upon decoding the column with ordinal i,
// i will be removed from the set to facilitate tracking whether or not columns
// have been observed during decoding.
// See the analog in sqlbase/index_encoding.go.
// DecodeKeyValsToCols additionally returns whether a NULL was encountered when decoding.
func DecodeKeyValsToCols(
da *rowenc.DatumAlloc,
vecs []coldata.Vec,
idx int,
indexColIdx []int,
types []*types.T,
directions []descpb.IndexDescriptor_Direction,
unseen *util.FastIntSet,
key []byte,
invertedColIdx int,
) ([]byte, bool, error) {
foundNull := false
for j := range types {
enc := descpb.IndexDescriptor_ASC
if directions != nil {
enc = directions[j]
}
var err error
i := indexColIdx[j]
if i == -1 {
// Don't need the coldata - skip it.
key, err = rowenc.SkipTableKey(key)
} else {
if unseen != nil {
unseen.Remove(i)
}
var isNull bool
isVirtualInverted := invertedColIdx == i
key, isNull, err = decodeTableKeyToCol(da, vecs[i], idx, types[j], key, enc, isVirtualInverted)
foundNull = isNull || foundNull
}
if err != nil {
return nil, false, err
}
}
return key, foundNull, nil
}
// decodeTableKeyToCol decodes a value encoded by EncodeTableKey, writing the result
// to the idx'th slot of the input colexec.Vec.
// See the analog, DecodeTableKey, in sqlbase/column_type_encoding.go.
// decodeTableKeyToCol also returns whether or not the decoded value was NULL.
func decodeTableKeyToCol(
da *rowenc.DatumAlloc,
vec coldata.Vec,
idx int,
valType *types.T,
key []byte,
dir descpb.IndexDescriptor_Direction,
isVirtualInverted bool,
) ([]byte, bool, error) {
if (dir != descpb.IndexDescriptor_ASC) && (dir != descpb.IndexDescriptor_DESC) {
return nil, false, errors.AssertionFailedf("invalid direction: %d", log.Safe(dir))
}
var isNull bool
if key, isNull = encoding.DecodeIfNull(key); isNull {
vec.Nulls().SetNull(idx)
return key, true, nil
}
// We might have read a NULL value in the interleaved child table which
// would update the nulls vector, so we need to explicitly unset the null
// value here.
vec.Nulls().UnsetNull(idx)
// Virtual inverted columns should not be decoded, but should instead be
// passed on as a DBytes datum.
if isVirtualInverted {
keyLen, err := encoding.PeekLength(key)
if err != nil {
return nil, false, err
}
vec.Bytes().Set(idx, key[:keyLen])
return key[keyLen:], false, nil
}
var rkey []byte
var err error
switch valType.Family() {
case types.BoolFamily:
var i int64
if dir == descpb.IndexDescriptor_ASC {
rkey, i, err = encoding.DecodeVarintAscending(key)
} else {
rkey, i, err = encoding.DecodeVarintDescending(key)
}
vec.Bool()[idx] = i != 0
case types.IntFamily, types.DateFamily:
var i int64
if dir == descpb.IndexDescriptor_ASC {
rkey, i, err = encoding.DecodeVarintAscending(key)
} else {
rkey, i, err = encoding.DecodeVarintDescending(key)
}
switch valType.Width() {
case 16:
vec.Int16()[idx] = int16(i)
case 32:
vec.Int32()[idx] = int32(i)
case 0, 64:
vec.Int64()[idx] = i
}
case types.FloatFamily:
var f float64
if dir == descpb.IndexDescriptor_ASC {
rkey, f, err = encoding.DecodeFloatAscending(key)
} else {
rkey, f, err = encoding.DecodeFloatDescending(key)
}
vec.Float64()[idx] = f
case types.DecimalFamily:
var d apd.Decimal
if dir == descpb.IndexDescriptor_ASC {
rkey, d, err = encoding.DecodeDecimalAscending(key, nil)
} else {
rkey, d, err = encoding.DecodeDecimalDescending(key, nil)
}
vec.Decimal()[idx] = d
case types.BytesFamily, types.StringFamily, types.UuidFamily:
var r []byte
if dir == descpb.IndexDescriptor_ASC {
rkey, r, err = encoding.DecodeBytesAscending(key, nil)
} else {
rkey, r, err = encoding.DecodeBytesDescending(key, nil)
}
vec.Bytes().Set(idx, r)
case types.TimestampFamily, types.TimestampTZFamily:
var t time.Time
if dir == descpb.IndexDescriptor_ASC {
rkey, t, err = encoding.DecodeTimeAscending(key)
} else {
rkey, t, err = encoding.DecodeTimeDescending(key)
}
vec.Timestamp()[idx] = t
case types.IntervalFamily:
var d duration.Duration
if dir == descpb.IndexDescriptor_ASC {
rkey, d, err = encoding.DecodeDurationAscending(key)
} else {
rkey, d, err = encoding.DecodeDurationDescending(key)
}
vec.Interval()[idx] = d
default:
var d tree.Datum
encDir := encoding.Ascending
if dir == descpb.IndexDescriptor_DESC {
encDir = encoding.Descending
}
d, rkey, err = rowenc.DecodeTableKey(da, valType, key, encDir)
vec.Datum().Set(idx, d)
}
return rkey, false, err
}
// UnmarshalColumnValueToCol decodes the value from a roachpb.Value using the
// type expected by the column, writing into the input Vec at the given row
// idx. An error is returned if the value's type does not match the column's
// type.
// See the analog, UnmarshalColumnValue, in sqlbase/column_type_encoding.go
func UnmarshalColumnValueToCol(
da *rowenc.DatumAlloc, vec coldata.Vec, idx int, typ *types.T, value roachpb.Value,
) error {
if value.RawBytes == nil {
vec.Nulls().SetNull(idx)
}
var err error
switch typ.Family() {
case types.BoolFamily:
var v bool
v, err = value.GetBool()
vec.Bool()[idx] = v
case types.IntFamily:
var v int64
v, err = value.GetInt()
switch typ.Width() {
case 16:
vec.Int16()[idx] = int16(v)
case 32:
vec.Int32()[idx] = int32(v)
default:
// Pre-2.1 BIT was using INT encoding with arbitrary sizes.
// We map these to 64-bit INT now. See #34161.
vec.Int64()[idx] = v
}
case types.FloatFamily:
var v float64
v, err = value.GetFloat()
vec.Float64()[idx] = v
case types.DecimalFamily:
err = value.GetDecimalInto(&vec.Decimal()[idx])
case types.BytesFamily, types.StringFamily, types.UuidFamily:
var v []byte
v, err = value.GetBytes()
vec.Bytes().Set(idx, v)
case types.DateFamily:
var v int64
v, err = value.GetInt()
vec.Int64()[idx] = v
case types.TimestampFamily, types.TimestampTZFamily:
var v time.Time
v, err = value.GetTime()
vec.Timestamp()[idx] = v
case types.IntervalFamily:
var v duration.Duration
v, err = value.GetDuration()
vec.Interval()[idx] = v
// Types backed by tree.Datums.
default:
var d tree.Datum
d, err = rowenc.UnmarshalColumnValue(da, typ, value)
if err != nil {
return err
}
vec.Datum().Set(idx, d)
}
return err
}
|
package main
import (
"fmt"
"time"
)
var currentId int
var todos Todos
// Give us some seed data
func init() {
RepoCreateTodo(Todo{Name: "Write presentation", Due: time.Now()})
RepoCreateTodo(Todo{Name: "Host meetup", Due: time.Now()})
}
func RepoFindTodo(id int) Todo {
for _, todo := range todos {
if todo.Id == id {
return todo
}
}
// return empty if not found
return Todo{}
}
func RepoCreateTodo(todo Todo) Todo {
currentId += 1
todo.Id = currentId
todos = append(todos, todo)
return todo
}
func RepoDestroyTodo(id int) error {
for i, todo := range todos {
if todo.Id == id {
todos = append(todos[:i], todos[i+1:]...)
return nil
}
}
return fmt.Errorf("Could not find Todo id of %d to delete", id)
}
|
package main
import (
"os"
"text/scanner"
"github.com/davecgh/go-spew/spew"
)
// scanner 用の構造体を定義し、以下のようなフィールドを持たせておいた方がよい。
type lex struct {
scanner.Scanner
token rune
}
func newLex() *lex {
l := new(lex)
return l
}
func (l *lex) getToken() {
l.token = l.Scan()
}
func main() {
lex := newLex()
lex.Init(os.Stdin)
for {
spew.Printf("> ")
lex.getToken()
spew.Println(lex.token, lex.TokenText())
}
}
|
package publisher
import (
"testing"
vaultapi "github.com/hashicorp/vault/api"
)
type secretsStoreStub struct {
data map[string]string
}
func (s *secretsStoreStub) Keys() []string {
keys := make([]string, 0, len(s.data))
for k := range s.data {
keys = append(keys, k)
}
return keys
}
func (s *secretsStoreStub) Get(key string) (string, error) {
return s.data[key], nil
}
func TestEncryption(t *testing.T) {
cfg := vaultapi.DefaultConfig()
client, err := vaultapi.NewClient(cfg)
if err != nil {
panic(err)
}
v := client.Logical()
s := &secretsStoreStub{
data: map[string]string{
"foo": "bar",
"baz": "boo",
},
}
p := New(v, "secret/test")
err = p.Push(s)
if err != nil {
t.Error(err)
}
result, _ := v.Read(p.path)
if result.Data["foo"] != "bar" {
t.Error("foo should be set to bar", result.Data)
}
if result.Data["baz"] != "boo" {
t.Error("baz should be set to boo", result.Data)
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package typecutils
import (
"bufio"
"context"
"crypto/sha256"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
)
// USB represents information of all USB devices.
type USB struct {
// Class represents class that the connected device falls into. (Example: Mass storage, Wireless, etc).
Class string
// Driver represents driver that drives the connected device. (Example: hub, btusb, etc).
Driver string
// Speed represents the speed of connected device. (Example: 480M, 5000M, etc).
Speed string
}
// re will parse Class, Driver and Speed of USB devices from 'lsusb -t' command output.
// Sample output of 'lsusb -t' command is as below:
/*
/: Bus 04.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/4p, 10000M
/: Bus 03.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/12p, 480M
|__ Port 2: Dev 2, If 0, Class=Mass Storage, Driver=usb-storage, 5000M
|__ Port 2: Dev 2, If 0, Class=Vendor Specific Class, Driver=asix, 480M
|__ Port 5: Dev 3, If 0, Class=Video, Driver=uvcvideo, 480M
|__ Port 5: Dev 3, If 1, Class=Video, Driver=uvcvideo, 480M
|__ Port 10: Dev 4, If 0, Class=Wireless, Driver=btusb, 12M
|__ Port 10: Dev 4, If 1, Class=Wireless, Driver=btusb, 12M
/: Bus 02.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/4p, 10000M
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/1p, 480M
*/
var re = regexp.MustCompile(`.*Class=([a-zA-Z_\s]+).*Driver=([a-zA-Z0-9_\-\/\s]+).*,.([a-zA-Z0-9_\/.]+)`)
// ListDevicesInfo returns the class, driver and speed for all the USB devices.
func ListDevicesInfo(ctx context.Context) ([]USB, error) {
out, err := testexec.CommandContext(ctx, "lsusb", "-t").Output()
if err != nil {
return nil, errors.Wrap(err, "failed to run lsusb command")
}
lsusbOut := string(out)
var res []USB
sc := bufio.NewScanner(strings.NewReader(lsusbOut))
for sc.Scan() {
match := re.FindStringSubmatch(sc.Text())
if match == nil {
continue
}
res = append(res, USB{
Class: match[1],
Driver: match[2],
Speed: match[3],
})
}
return res, nil
}
// MassStorageUSBSpeed returns mass storage device speed for all USB devices.
// If failed to get devices speed returns error.
func MassStorageUSBSpeed(ctx context.Context) ([]string, error) {
res, err := ListDevicesInfo(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get lsusb details")
}
var speedSlice []string
for _, dev := range res {
if dev.Class == "Mass Storage" {
devSpeed := dev.Speed
if devSpeed != "" {
speedSlice = append(speedSlice, devSpeed)
}
}
}
if len(speedSlice) == 0 {
return nil, errors.New("failed to find USB device speed")
}
return speedSlice, nil
}
// CopyFile performs copying of file from given source to destination.
func CopyFile(src, dst string) error {
sourceFileStat, err := os.Stat(src)
if err != nil {
return errors.Wrap(err, "failed to get file info")
}
if !sourceFileStat.Mode().IsRegular() {
return errors.Errorf("%s is not a regular file", src)
}
source, err := os.Open(src)
if err != nil {
return errors.Wrap(err, "failed to open file")
}
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return errors.Wrap(err, "failed to create file")
}
defer destination.Close()
if _, err := io.Copy(destination, source); err != nil {
return errors.Wrap(err, "failed to copy")
}
return nil
}
// FileChecksum checks the checksum for the input file.
func FileChecksum(path string) ([]byte, error) {
file, err := os.Open(path)
if err != nil {
return []byte{}, errors.Wrap(err, "failed to open files")
}
defer file.Close()
h := sha256.New()
if _, err := io.Copy(h, file); err != nil {
return []byte{}, errors.Wrap(err, "failed to calculate the hash of the files")
}
return h.Sum(nil), nil
}
// RemovableDirs returns the connected removable devices.
func RemovableDirs(mountPath string) ([]string, error) {
fis, err := ioutil.ReadDir(mountPath)
if err != nil {
return nil, errors.Wrap(err, "failed to read directory")
}
var ret []string
for _, fi := range fis {
ret = append(ret, fi.Name())
}
return ret, nil
}
// TbtMountPath returns the latest removable device.
func TbtMountPath(dirsAfterPlug, dirsbeforePlug []string) string {
for _, afterPlug := range dirsAfterPlug {
found := false
for _, beforePlug := range dirsbeforePlug {
if afterPlug == beforePlug {
found = true
break
}
}
if !found {
return afterPlug
}
}
return ""
}
|
package core_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ww "github.com/wetware/ww/pkg"
"github.com/wetware/ww/pkg/lang/core"
capnp "zombiezen.com/go/capnproto2"
)
func TestEmptyList(t *testing.T) {
t.Parallel()
t.Run("New", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil))
require.NoError(t, err)
assert.IsType(t, core.EmptyPersistentList{}, l)
})
t.Run("Count", func(t *testing.T) {
t.Parallel()
cnt, err := core.EmptyList.Count()
assert.NoError(t, err)
assert.Zero(t, cnt)
})
t.Run("Render", func(t *testing.T) {
t.Parallel()
s, err := core.Render(core.EmptyList)
require.NoError(t, err)
assert.Equal(t, "()", s)
})
t.Run("First", func(t *testing.T) {
t.Parallel()
item, err := core.EmptyList.First()
assert.NoError(t, err)
assert.Nil(t, item)
})
t.Run("Next", func(t *testing.T) {
t.Parallel()
tail, err := core.EmptyList.Next()
assert.NoError(t, err)
assert.Nil(t, tail)
})
t.Run("Cons", func(t *testing.T) {
t.Parallel()
l, err := core.EmptyList.Cons(mustInt(0))
require.NoError(t, err)
require.IsType(t, core.PersistentHeadList{}, l)
cnt, err := l.Count()
require.NoError(t, err)
assert.Equal(t, 1, cnt)
})
t.Run("Conj", func(t *testing.T) {
t.Parallel()
t.Run("One", func(t *testing.T) {
t.Parallel()
ctr, err := core.EmptyList.Conj(valueRange(1)...)
require.NoError(t, err)
assert.IsType(t, core.PersistentHeadList{}, ctr)
})
t.Run("Two", func(t *testing.T) {
t.Parallel()
ctr, err := core.EmptyList.Conj(valueRange(2)...)
require.NoError(t, err)
assert.IsType(t, core.PackedPersistentList{}, ctr)
})
t.Run("Many", func(t *testing.T) {
t.Parallel()
ctr, err := core.EmptyList.Conj(valueRange(3)...)
require.NoError(t, err)
assert.IsType(t, core.DeepPersistentList{}, ctr)
})
})
t.Run("Iter", func(t *testing.T) {
t.Parallel()
s, err := core.ToSlice(core.EmptyList)
require.NoError(t, err)
assert.Nil(t, s)
})
}
func TestPersistentHeadList(t *testing.T) {
t.Parallel()
t.Run("New", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
assert.IsType(t, core.PersistentHeadList{}, l)
})
t.Run("Count", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
cnt, err := l.Count()
require.NoError(t, err)
assert.Equal(t, 1, cnt)
})
t.Run("Render", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
s, err := core.Render(l)
require.NoError(t, err)
assert.Equal(t, "(0)", s)
})
t.Run("First", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
item, err := l.First()
require.NoError(t, err)
assertEq(t, mustInt(0), item)
})
t.Run("Next", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
seq, err := l.Next()
require.NoError(t, err)
assert.Nil(t, seq)
})
t.Run("Cons", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
l, err = l.Cons(mustInt(1))
require.NoError(t, err)
assert.IsType(t, core.PackedPersistentList{}, l)
got, err := l.First()
require.NoError(t, err)
assertEq(t, mustInt(1), got)
})
t.Run("Conj", func(t *testing.T) {
t.Parallel()
t.Run("One", func(t *testing.T) {
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
require.IsType(t, core.PersistentHeadList{}, l)
ctr, err := l.Conj(valueRange(1)...)
require.NoError(t, err)
require.IsType(t, core.PackedPersistentList{}, ctr)
got, err := l.First()
require.NoError(t, err)
assertEq(t, mustInt(0), got)
})
t.Run("Many", func(t *testing.T) {
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
require.IsType(t, core.PersistentHeadList{}, l)
ctr, err := l.Conj(core.True, core.False)
require.NoError(t, err)
require.IsType(t, core.DeepPersistentList{}, ctr)
got, err := l.First()
require.NoError(t, err)
assertEq(t, core.False, got)
})
})
t.Run("Iter", func(t *testing.T) {
l, err := core.NewList(capnp.SingleSegment(nil), mustInt(0))
require.NoError(t, err)
s, err := core.ToSlice(l)
require.NoError(t, err)
assert.Len(t, s, 1)
assertEq(t, s[0], mustInt(0))
})
}
func TestPackedPersistentList(t *testing.T) {
t.Parallel()
items := valueRange(2)
t.Run("New", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
assert.IsType(t, core.PackedPersistentList{}, l)
})
t.Run("Count", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
cnt, err := l.Count()
require.NoError(t, err)
assert.Equal(t, 2, cnt)
})
t.Run("Render", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
s, err := core.Render(l)
require.NoError(t, err)
assert.Equal(t, "(0 1)", s)
})
t.Run("First", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
item, err := l.First()
require.NoError(t, err)
eq, err := core.Eq(items[0], item)
require.NoError(t, err)
assert.True(t, eq)
})
t.Run("Next", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
seq, err := l.Next()
require.NoError(t, err)
assert.IsType(t, core.PersistentHeadList{}, seq)
})
t.Run("Cons", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
l, err = l.Cons(mustInt(2))
require.NoError(t, err)
assert.IsType(t, core.DeepPersistentList{}, l)
got, err := l.First()
require.NoError(t, err)
assertEq(t, mustInt(2), got)
})
t.Run("Conj", func(t *testing.T) {
t.Parallel()
t.Run("One", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
require.IsType(t, core.PackedPersistentList{}, l)
ctr, err := l.Conj(core.True)
require.NoError(t, err)
require.IsType(t, core.DeepPersistentList{}, ctr)
got, err := l.First()
require.NoError(t, err)
assertEq(t, core.True, got)
})
t.Run("Many", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
require.IsType(t, core.PackedPersistentList{}, l)
ctr, err := l.Conj(core.True, core.False)
require.NoError(t, err)
require.IsType(t, core.DeepPersistentList{}, ctr)
got, err := l.First()
require.NoError(t, err)
assertEq(t, core.False, got)
})
})
t.Run("Iter", func(t *testing.T) {
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
var i int
require.NoError(t, core.ForEach(l, func(item ww.Any) (bool, error) {
assertEq(t, items[i], item)
i++
return false, nil
}))
})
}
func TestDeepPersistentList(t *testing.T) {
t.Parallel()
items := valueRange(3)
t.Run("New", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
assert.IsType(t, core.DeepPersistentList{}, l)
})
t.Run("Count", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
cnt, err := l.Count()
require.NoError(t, err)
assert.Equal(t, len(items), cnt)
})
t.Run("Render", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
s, err := core.Render(l)
require.NoError(t, err)
assert.Equal(t, "(0 1 2)", s)
})
t.Run("First", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
item, err := l.First()
require.NoError(t, err)
assertEq(t, items[0], item)
})
t.Run("Next", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
seq, err := l.Next()
require.NoError(t, err)
assert.IsType(t, core.PackedPersistentList{}, seq)
})
t.Run("Cons", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
require.IsType(t, core.DeepPersistentList{}, l)
l, err = l.Cons(mustInt(3))
require.NoError(t, err)
assert.IsType(t, core.DeepPersistentList{}, l)
cnt, err := l.Count()
require.NoError(t, err)
assert.Equal(t, len(items)+1, cnt)
got, err := l.First()
require.NoError(t, err)
assertEq(t, mustInt(3), got)
})
t.Run("Conj", func(t *testing.T) {
t.Parallel()
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
ctr, err := l.Conj(core.True, core.False)
require.NoError(t, err)
require.IsType(t, core.DeepPersistentList{}, ctr)
cnt, err := ctr.Count()
require.NoError(t, err)
assert.Equal(t, len(items)+2, cnt)
got, err := l.First()
require.NoError(t, err)
assertEq(t, core.False, got)
})
t.Run("Iter", func(t *testing.T) {
l, err := core.NewList(capnp.SingleSegment(nil), items...)
require.NoError(t, err)
var i int
require.NoError(t, core.ForEach(l, func(item ww.Any) (bool, error) {
assertEq(t, items[i], item)
i++
return false, nil
}))
})
}
func mustList(vs ...ww.Any) core.List {
l, err := core.NewList(capnp.SingleSegment(nil), vs...)
if err != nil {
panic(err)
}
return l
}
func assertEq(t *testing.T, want, got ww.Any) {
eq, err := core.Eq(want, got)
require.NoError(t, err, "core.Eq returned an error")
assert.True(t, eq, "mem values are not equal")
}
|
package application
import (
"github.com/jsm/gode/env"
)
// Env contains stored environment information
var Env = env.Initialize()
|
package main
import "fmt"
import "sync"
func main() {
//a varaible wg to tell this parent go-routine to wait for the child go-routine to finishes
var wg sync.WaitGroup
sayHola := func() {
//adding relevent code on child side for stopping parent
defer wg.Done()
fmt.Println("Hi IN Spanish")
}
wg.Add(1)
go sayHola()
// go-routines executes in the same address space they are created in
//what this means :) see example below
//my_name - a variable in this address space
my_name := "prasad"
//Telling the parent go-routine that one more child goroutine is being added.
wg.Add(1)
//we create a go-routine in the same address space as current go-routine
go func() {
defer wg.Done()
my_name = "Jon"
}()
fmt.Println(my_name)
wg.Wait() // this is the join point - it means all go-routines are finished then only code after this point is execued.
// thus Jon will always be printed last , because of following line
fmt.Println(my_name)
//starting new concept example : How go runtime holds the mmory reference required by go-routines
//In following ex: for loop is done executing before any of the sub-gourotines it creates
//So ideally the reference to salutation should not be there
//But go runtime is observent enough to hold reference to salutation variable
// but it will be the last value that gets assigned to the variable : in this case "good day"
//Thus when go-routines actually start the execution the value for saluation they have is "good day"
for _, salutation := range []string{"hello", "greetings", "good day"} {
wg.Add(1)
go func() {
defer wg.Done()
fmt.Println(salutation)
}()
}
//again we wait for go routines created by for loops to end.
wg.Wait()
//If we want each go-routine to have one of the value of the string struct
//we passed the salutaion one by one to each sub go-routines
for _, salutation := range []string{"hello", "greetings", "good day"} {
wg.Add(1)
go func(salute string) {
defer wg.Done()
fmt.Println(salute)
}(salutation) //passed saluation created in for loop here
}
//Again wait for this for loop to end
wg.Wait()
}
|
package cmd
import (
e "github.com/cloudposse/atmos/internal/exec"
u "github.com/cloudposse/atmos/pkg/utils"
"github.com/spf13/cobra"
)
// helmfileCmd represents the base command for all helmfile sub-commands
var helmfileCmd = &cobra.Command{
Use: "helmfile",
Short: "Execute 'helmfile' commands",
Long: `This command runs helmfile commands`,
FParseErrWhitelist: struct{ UnknownFlags bool }{UnknownFlags: true},
Run: func(cmd *cobra.Command, args []string) {
err := e.ExecuteHelmfileCmd(cmd, args)
if err != nil {
u.LogErrorAndExit(err)
}
},
}
func init() {
// https://github.com/spf13/cobra/issues/739
helmfileCmd.DisableFlagParsing = true
helmfileCmd.PersistentFlags().StringP("stack", "s", "", "atmos helmfile <helmfile_command> <component> -s <stack>")
RootCmd.AddCommand(helmfileCmd)
}
|
/*
Copyright 2022 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/bmatcuk/doublestar"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/filemon"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
)
// kodataRoot is the directory in the container where ko places static assets.
// See https://github.com/google/ko/blob/2f230b88c4891ee3a71b01c1fa65e85e8d6b5f5b/README.md#static-assets
// and https://github.com/google/ko/blob/2f230b88c4891ee3a71b01c1fa65e85e8d6b5f5b/pkg/build/gobuild.go#L514
const kodataRoot = "/var/run/ko"
// Infer syncs static content in the kodata directory based on matching file name patterns.
// It returns maps of files to be copied and deleted.
func Infer(ctx context.Context, a *latest.Artifact, e filemon.Events) (toCopy map[string][]string, toDelete map[string][]string, err error) {
toCopy, err = inferSync(ctx, a, append(e.Modified, e.Added...))
if err != nil {
return nil, nil, err
}
toDelete, err = inferSync(ctx, a, e.Deleted)
if err != nil {
return nil, nil, err
}
return
}
// inferSync determines if the files match any of the inferred file sync patterns configured for the artifact.
// For files that matches at least one pattern, the function determines the destination path.
// The return value is a map of source file location to destination path.
func inferSync(ctx context.Context, a *latest.Artifact, files []string) (map[string][]string, error) {
localBasePath, err := findLocalKodataPath(a)
if err != nil {
return nil, err
}
toSync := map[string][]string{}
for _, f := range files {
dest, err := syncDest(f, a.Workspace, localBasePath, a.Sync.Infer)
if err != nil {
return nil, err
}
if dest != "" {
log.Entry(ctx).Debugf("Syncing %q to %q", f, dest)
toSync[f] = []string{dest}
} else {
log.Entry(ctx).Debugf("File %q does not match any sync pattern. Skipping sync", f)
}
}
return toSync, nil
}
// syncDest returns the destination file paths if the input file path matches at least one of the patterns.
// If the file doesn't match any of the patterns, the function returns zero values.
func syncDest(f string, workspace string, localBasePath string, patterns []string) (string, error) {
relPath, err := filepath.Rel(workspace, f)
if err != nil {
return "", err
}
for _, p := range patterns {
matches, err := doublestar.PathMatch(filepath.FromSlash(p), relPath)
if err != nil {
return "", fmt.Errorf("pattern error for file %q and pattern %s: %w", relPath, p, err)
}
if matches {
// find path to file relative to local static file directory
localFile, err := filepath.Rel(localBasePath, f)
if err != nil {
return "", fmt.Errorf("relative path error for path %q and file %q: %w", localBasePath, f, err)
}
dest := strings.ReplaceAll(filepath.Join(kodataRoot, localFile), "\\", "/")
return dest, nil
}
}
return "", nil
}
// findLocalKodataPath returns the local path to static content for ko artifacts.
func findLocalKodataPath(a *latest.Artifact) (string, error) {
if strings.Contains(a.KoArtifact.Main, "...") {
// this error should be caught by validation earlier
return "", fmt.Errorf("unable to infer file sync when ko.main contains the '...' wildcard")
}
path := filepath.Join(a.Workspace, a.KoArtifact.Dir, a.KoArtifact.Main, "kodata")
return path, nil
}
|
package digitsum
import (
"strconv"
"strings"
)
/*
We define super digit of an integer
using the following rules:
Given an integer x, we need to find the super digit of the integer.
If x has only digit, then its super digit is x
.
Otherwise, the super digit of x
is equal to the super digit of the sum of the digits of x.
eg
super_digit(9875) 9+8+7+5 = 29
super_digit(29) 2 + 9 = 11
super_digit(11) 1 + 1 = 2
super_digit(2) = 2
*/
// First attempt works with n as a number,
// performing modulo division on it
// would it be better to work with an array of digits
// to avoid the modulo division?
func superDigit(n string, k int32) int32 {
// turn n into an int
// n could be much larger than int32
// in fact, constraints say n is up to 10^100000
// so no way can we turn it into any numeric value.
// will have to sum it from the parts of the string.
// will this time out though?
// actually, probably not - it's not 10^100000 loop cycles,
// it's only 100000 - the number of digits
var sum int64
var curr int
strSlice := strings.Split(n, "")
for _, s := range strSlice {
curr, _ = strconv.Atoi(s)
sum += int64(curr)
}
// Do the first level of digit summing
// note that sum(n*k) = sum(n) * k
// Is there potential for overflow here?
// n capped at 10^5 digits, so sum is max 10^5
// k is capped at 10^5, so sum * k could be ~10^10
// int32 max is 2.15 * 10^10 so we're _probably_ okay
// ...nope, case 7 is overflowing (sum = 44917 and k = 100000)
// maybe work with int64 and then convert afterwards
// annoying because only the very first mulitplication should need this
return int32(super(sum * int64(k)))
}
func super(n int64) int64 {
if n < 10 {
return n
}
// Sum digits of n and recurse
return super(sumDigits(n))
}
func sumDigits(n int64) int64 {
var sum int64
for n > 0 {
sum += n % 10
n = n / 10
}
return sum
}
// Apparently the sum of the digits of any number N to base 10
// is equivalent to it's remainder mod 9.
// But for that to work we would have to do the first sum by string anyway
// as 10^100000 is too large to fit into a numerical type
func justModNine(n string, k int32) int32 {
// would be
// rem := n * k % 9
// if rem == 0 {
// return 9
// }
// return rem
return 0
}
// slice-of-ints version
// func superSlice(n string, k int32) int32 {
// // turn n into an array of ints
// strSlice := strings.Split(n, "")
// numSlice := make([]int, len(strSlice))
// for i, s := range strSlice {
// numSlice[i], _ = strconv.Atoi(s)
// }
// // Do the first level of digit summing
// // note that sum(n*k) = sum(n) * k
// return int32(sumSlice(numSlice)* int(k))
// }
// func sliceRecurse(digits []int) int {
// if len(digits) == 1 {
// // base case
// return digits[0]
// }
// return sliceRecurse()
// }
// Difficulty here: summing a []int -> int but then
// splitting that int back into digits ([]int)
// func sumSlice(digits []int) []int {
// var sum int
// for _, v := range digits {
// sum += v
// }
// // need to turn this back into a slice of digits
// return sum
// }
|
package connection
import (
"time"
)
// Config represents the configuration used to create a new connection.
type Config struct {
// Settings.
Created time.Time
ID string
PeerAID string
PeerBID string
Weight float64
}
// DefaultConfig provides a default configuration to create a new connection by
// best effort.
func DefaultConfig() Config {
return Config{
// Settings.
Created: time.Now(),
ID: "",
PeerAID: "",
PeerBID: "",
Weight: 0,
}
}
// New creates a new configured connection.
func New(config Config) (Connection, error) {
// Dependencies.
if config.Created.IsZero() {
return nil, maskAnyf(invalidConfigError, "created must not be empty")
}
if config.ID == "" {
return nil, maskAnyf(invalidConfigError, "ID must not be empty")
}
if config.PeerAID == "" {
return nil, maskAnyf(invalidConfigError, "peerA ID must not be empty")
}
if config.PeerBID == "" {
return nil, maskAnyf(invalidConfigError, "peerB ID must not be empty")
}
newConnection := &connection{
// Settings.
created: config.Created,
id: config.ID,
peerAID: config.PeerAID,
peerBID: config.PeerBID,
weight: config.Weight,
}
return newConnection, nil
}
type connection struct {
// Settings.
created time.Time
id string
peerAID string
peerBID string
weight float64
}
func (c *connection) Created() time.Time {
return c.created
}
func (c *connection) ID() string {
return c.id
}
func (c *connection) PeerAID() string {
return c.peerAID
}
func (c *connection) PeerBID() string {
return c.peerBID
}
func (c *connection) Weight() float64 {
return c.weight
}
|
// FIXME: Remove unnessesary table-driven tests (and simplifize tests)
package dynamicstruct_test
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
. "github.com/goldeneggg/structil/dynamicstruct"
)
type (
DynamicTestStruct struct {
Byte byte
Bytes []byte
Int int
Int64 int64
Uint uint
Uint64 uint64
Float32 float32
Float64 float64
String string
Stringptr *string
Stringslice []string
Bool bool
Map map[string]interface{}
Func func(string) interface{}
// ChInt chan int // Note: type chan is not supported by mapstructure
DynamicTestStruct2
DynamicTestStruct2Ptr *DynamicTestStruct2
DynamicTestStruct4Slice []DynamicTestStruct4
DynamicTestStruct4PtrSlice []*DynamicTestStruct4
}
DynamicTestStruct2 struct {
String string
*DynamicTestStruct3
}
DynamicTestStruct3 struct {
String string
Int int
}
DynamicTestStruct4 struct {
String string
String2 string
}
)
const (
stringFieldTag = `json:"string_field_with_tag"`
intFieldTag = `json:"int_field_with_tag"`
byteFieldTag = `json:"byte_field_with_tag"`
float32FieldTag = `json:"float32_field_with_tag"`
float64FieldTag = `json:"float64_field_with_tag"`
boolFieldTag = `json:"bool_field_with_tag"`
mapFieldTag = `json:"map_field_with_tag"`
funcFieldTag = `json:"func_field_with_tag"`
chanBothFieldTag = `json:"chan_both_field_with_tag"`
chanRecvFieldTag = `json:"chan_recv_field_with_tag"`
chanSendFieldTag = `json:"chan_send_field_with_tag"`
structFieldTag = `json:"struct_field_with_tag"`
structPtrFieldTag = `json:"struct_ptr_field_with_tag"`
sliceFieldTag = `json:"slice_field_with_tag"`
interfaceFieldTag = `json:"interface_field_with_tag"`
)
const expectedDefinition = `type DynamicStruct struct {
BoolField bool
BoolFieldWithTag bool ` + "`json:\"bool_field_with_tag\"`" + `
ByteField uint8
ByteFieldWithTag uint8 ` + "`json:\"byte_field_with_tag\"`" + `
ChanBothField chan int
ChanBothFieldWithTag chan int ` + "`json:\"chan_both_field_with_tag\"`" + `
ChanRecvField <-chan int
ChanRecvFieldWithTag <-chan int ` + "`json:\"chan_recv_field_with_tag\"`" + `
ChanSendField chan<- int
ChanSendFieldWithTag chan<- int ` + "`json:\"chan_send_field_with_tag\"`" + `
Float32Field float32
Float32FieldWithTag float32 ` + "`json:\"float32_field_with_tag\"`" + `
Float64Field float64
Float64FieldWithTag float64 ` + "`json:\"float64_field_with_tag\"`" + `
FuncField func(int, int) (bool, *errors.errorString)
FuncFieldWithTag func(int, int) (bool, *errors.errorString) ` + "`json:\"func_field_with_tag\"`" + `
IntField int
IntFieldWithTag int ` + "`json:\"int_field_with_tag\"`" + `
InterfaceField interface {}
InterfaceFieldWithTag interface {} ` + "`json:\"interface_field_with_tag\"`" + `
InterfacePtrField *interface {}
InterfacePtrFieldWithTag *interface {} ` + "`json:\"interface_field_with_tag\"`" + `
MapField map[string]float32
MapFieldWithTag map[string]float32 ` + "`json:\"map_field_with_tag\"`" + `
SliceField []struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
}
SliceFieldWithTag []struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
} ` + "`json:\"slice_field_with_tag\"`" + `
StringField string
StringFieldWithTag string ` + "`json:\"string_field_with_tag\"`" + `
StructField struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
}
StructFieldWithTag struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
} ` + "`json:\"struct_field_with_tag\"`" + `
StructPtrField struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
}
StructPtrFieldWithTag struct {
Bool bool
Byte uint8
Bytes []uint8
DynamicTestStruct2 struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct2Ptr struct {
DynamicTestStruct3 struct {
Int int
String string
}
String string
}
DynamicTestStruct4PtrSlice []struct {
String string
String2 string
}
DynamicTestStruct4Slice []struct {
String string
String2 string
}
Float32 float32
Float64 float64
Func func(string) interface {}
Int int
Int64 int64
Map map[string]interface {}
String string
Stringptr *string
Stringslice []string
Uint uint
Uint64 uint64
} ` + "`json:\"struct_ptr_field_with_tag\"`" + `
}`
var (
dynamicTestString2 = "test name2"
dynamicTestFunc = func(s string) interface{} { return s + "-func" }
//dynamicTestChan = make(chan int)
)
func newTestDynamicStruct() DynamicTestStruct {
return DynamicTestStruct{
Byte: 0x61,
Bytes: []byte{0x00, 0xFF},
Int: int(-2),
Int64: int64(-1),
Uint: uint(2),
Uint64: uint64(1),
Float32: float32(-1.23),
Float64: float64(-3.45),
String: "test name",
Stringptr: &dynamicTestString2,
Stringslice: []string{"strslice1", "strslice2"},
Bool: true,
Map: map[string]interface{}{"k1": "v1", "k2": 2},
Func: dynamicTestFunc,
// ChInt: dynamicTestChan, // Note: type chan is not supported by mapstructure
DynamicTestStruct2: DynamicTestStruct2{
String: "struct2 string",
DynamicTestStruct3: &DynamicTestStruct3{
String: "struct3 string",
Int: -123,
},
},
DynamicTestStruct2Ptr: &DynamicTestStruct2{
String: "struct2 string ptr",
DynamicTestStruct3: &DynamicTestStruct3{
String: "struct3 string ptr",
Int: -456,
},
},
DynamicTestStruct4Slice: []DynamicTestStruct4{
{
String: "key100",
String2: "value100",
},
{
String: "key200",
String2: "value200",
},
},
DynamicTestStruct4PtrSlice: []*DynamicTestStruct4{
{
String: "key991",
String2: "value991",
},
{
String: "key992",
String2: "value992",
},
},
}
}
func newTestDynamicStructPtr() *DynamicTestStruct {
ts := newTestDynamicStruct()
return &ts
}
// See: "expectedDefinition" constant (this is the Definition of the Builder as follows)
func newTestBuilder() *Builder {
return NewBuilder().
AddString("StringField").
AddStringWithTag("StringFieldWithTag", stringFieldTag).
AddInt("IntField").
AddIntWithTag("IntFieldWithTag", intFieldTag).
AddByte("ByteField").
AddByteWithTag("ByteFieldWithTag", byteFieldTag).
AddFloat32("Float32Field").
AddFloat32WithTag("Float32FieldWithTag", float32FieldTag).
AddFloat64("Float64Field").
AddFloat64WithTag("Float64FieldWithTag", float64FieldTag).
AddBool("BoolField").
AddBoolWithTag("BoolFieldWithTag", boolFieldTag).
AddMap("MapField", SampleString, SampleFloat32).
AddMapWithTag("MapFieldWithTag", SampleString, SampleFloat32, mapFieldTag).
AddFunc("FuncField", []interface{}{SampleInt, SampleInt}, []interface{}{SampleBool, ErrSample}).
AddFuncWithTag("FuncFieldWithTag", []interface{}{SampleInt, SampleInt}, []interface{}{SampleBool, ErrSample}, funcFieldTag).
AddChanBoth("ChanBothField", SampleInt).
AddChanBothWithTag("ChanBothFieldWithTag", SampleInt, chanBothFieldTag).
AddChanRecv("ChanRecvField", SampleInt).
AddChanRecvWithTag("ChanRecvFieldWithTag", SampleInt, chanRecvFieldTag).
AddChanSend("ChanSendField", SampleInt).
AddChanSendWithTag("ChanSendFieldWithTag", SampleInt, chanSendFieldTag).
AddStruct("StructField", newTestDynamicStruct(), false).
AddStructWithTag("StructFieldWithTag", newTestDynamicStruct(), false, structFieldTag).
AddStructPtr("StructPtrField", newTestDynamicStructPtr()).
AddStructPtrWithTag("StructPtrFieldWithTag", newTestDynamicStructPtr(), structPtrFieldTag).
AddSlice("SliceField", newTestDynamicStructPtr()).
AddSliceWithTag("SliceFieldWithTag", newTestDynamicStructPtr(), sliceFieldTag).
AddInterface("InterfaceField", false).
AddInterfaceWithTag("InterfaceFieldWithTag", false, interfaceFieldTag).
AddInterface("InterfacePtrField", true).
AddInterfaceWithTag("InterfacePtrFieldWithTag", true, interfaceFieldTag)
}
func newTestBuilderWithStructName(name string) *Builder {
b := newTestBuilder()
b.SetStructName(name)
return b
}
func TestBuilderAddRemoveExistsNumField(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
wantExistsIntField bool
wantNumField int
wantStructName string
wantStringFieldWithTagTag string
}{
{
name: "have fields set by newTestBuilder()",
args: args{builder: newTestBuilder()},
wantExistsIntField: true,
wantNumField: 32, // See: newTestBuilder()
wantStructName: "DynamicStruct",
wantStringFieldWithTagTag: stringFieldTag,
},
{
name: "have fields set by newTestBuilder() and Remove(IntField)",
args: args{builder: newTestBuilder().Remove("IntField")},
wantExistsIntField: false,
wantNumField: 31,
wantStructName: "DynamicStruct",
wantStringFieldWithTagTag: stringFieldTag,
},
{
name: "have fields set by newTestBuilder() and SetTag(StringFieldWithTag)",
args: args{builder: newTestBuilder().SetTag("StringFieldWithTag", "abc")},
wantExistsIntField: true,
wantNumField: 32,
wantStructName: "DynamicStruct",
wantStringFieldWithTagTag: "abc",
},
{
name: "have struct name by newTestBuilderWithStructName()",
args: args{builder: newTestBuilderWithStructName("Abc")},
wantExistsIntField: true,
wantNumField: 32,
wantStructName: "Abc",
wantStringFieldWithTagTag: stringFieldTag,
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if tt.args.builder.Exists("IntField") != tt.wantExistsIntField {
t.Errorf("result Exists(IntField) is unexpected. got: %v, want: %v", tt.args.builder.Exists("IntField"), tt.wantExistsIntField)
return
}
if tt.args.builder.NumField() != tt.wantNumField {
t.Errorf("result numfield is unexpected. got: %d, want: %d", tt.args.builder.NumField(), tt.wantNumField)
return
}
if tt.args.builder.GetTag("StringFieldWithTag") != tt.wantStringFieldWithTagTag {
t.Errorf("result GetTag(StringFieldWithTag) is unexpected. got: %s, want: %s", tt.args.builder.GetTag("StringFieldWithTag"), tt.wantStringFieldWithTagTag)
return
}
if tt.args.builder.GetStructName() != tt.wantStructName {
t.Errorf("result structName is unexpected. got: %s, want: %s", tt.args.builder.GetStructName(), tt.wantStructName)
return
}
})
}
}
func TestBuilderAddStringWithEmptyName(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddString with empty name",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddString("").Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddMapWithNilKey(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddMap with nil key",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddMap("MapFieldWithNilKey", nil, SampleFloat32).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddMapWithNilValue(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddMap with nil key",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddMap("MapFieldWithNilKey", SampleString, nil).Build()
// nil map value does NOT cause error
if err != nil {
t.Errorf("unexpected error occured %v: args: %+v", err, tt.args)
}
})
}
}
func TestBuilderAddFuncWithNilArgs(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddFunc with nil args",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddFunc("FuncFieldWithNilArgs", nil, []interface{}{SampleBool}).Build()
if err != nil {
t.Errorf("unexpected error occurred: args: %+v, %v", tt.args, err)
}
})
}
}
func TestBuilderAddFuncWithNilReturns(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddFunc with nil returns",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddFunc("FuncFieldWithNilReturns", []interface{}{SampleInt}, nil).Build()
if err != nil {
t.Errorf("unexpected error occurred: args: %+v, %v", tt.args, err)
}
})
}
}
func TestBuilderAddChanBothWithNilElem(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddChanBoth with nil elem",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddChanBoth("MapFieldWithNilKey", nil).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddChanRecvWithNilElem(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
wantError bool
}{
{
name: "try to AddChanRecv with nil elem",
args: args{builder: newTestBuilder()},
wantError: true,
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddChanRecv("MapFieldWithNilKey", nil).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddChanSendWithNilElem(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddChanSend with nil elem",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddChanSend("MapFieldWithNilKey", nil).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddStructWithNil(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddStruct with nil",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddStruct("StructFieldWithNil", nil, false).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddStructPtrWithNil(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddStructWith with nil",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddStructPtr("StructPtrFieldWithNil", nil).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
func TestBuilderAddSliceWithNil(t *testing.T) {
t.Parallel()
type args struct {
builder *Builder
}
tests := []struct {
name string
args args
}{
{
name: "try to AddStructWith with nil",
args: args{builder: newTestBuilder()},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := tt.args.builder.AddSlice("SliceFieldWithNil", nil).Build()
if err == nil {
t.Errorf("expect to occur error but does not: args: %+v", tt.args)
}
})
}
}
type buildArgs struct {
builder *Builder
}
type buildTest struct {
name string
args buildArgs
wantIsPtr bool
wantStructName string
wantNumField int
wantDefinition string
camelizeKeys bool
tryAddDynamicStruct bool
}
func TestBuilderBuild(t *testing.T) {
t.Parallel()
tests := []buildTest{
{
name: "Build() with valid Builder",
args: buildArgs{builder: newTestBuilder()},
wantIsPtr: true,
wantStructName: "DynamicStruct",
wantNumField: 32, // See: newTestBuilder()
wantDefinition: expectedDefinition,
camelizeKeys: true,
tryAddDynamicStruct: true,
},
{
name: "Build() with valid Builder",
args: buildArgs{builder: newTestBuilder()},
wantIsPtr: true,
wantStructName: "DynamicStruct",
wantNumField: 32, // See: newTestBuilder()
wantDefinition: expectedDefinition,
tryAddDynamicStruct: true,
},
{
name: "Build() with valid Builder with struct name",
args: buildArgs{builder: newTestBuilderWithStructName("HogeHuga")},
wantIsPtr: true,
wantStructName: "HogeHuga",
wantNumField: 32, // See: newTestBuilder()
camelizeKeys: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ds, err := tt.args.builder.Build()
if err != nil {
t.Errorf("unexpected error caused by DynamicStruct Build: %v", err)
}
if !testBuilderBuildWant(t, ds, tt) {
return
}
if !testBuilderBuildTag(t, ds, tt) {
return
}
if tt.tryAddDynamicStruct {
if !testBuilderBuildAddDynamicStruct(t, ds, tt) {
return
}
}
})
}
}
func TestBuilderBuildNonPtr(t *testing.T) {
t.Parallel()
tests := []buildTest{
{
name: "Build() with valid Builder",
args: buildArgs{builder: newTestBuilder()},
wantIsPtr: false,
wantStructName: "DynamicStruct",
wantNumField: 32, // See: newTestBuilder()
wantDefinition: expectedDefinition,
camelizeKeys: true,
tryAddDynamicStruct: true,
},
{
name: "Build() with valid Builder",
args: buildArgs{builder: newTestBuilder()},
wantIsPtr: false,
wantStructName: "DynamicStruct",
wantNumField: 32, // See: newTestBuilder()
wantDefinition: expectedDefinition,
tryAddDynamicStruct: true,
},
{
name: "Build() with valid Builder with struct name",
args: buildArgs{builder: newTestBuilderWithStructName("HogeHuga")},
wantIsPtr: false,
wantStructName: "HogeHuga",
wantNumField: 32, // See: newTestBuilder()
camelizeKeys: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ds, err := tt.args.builder.BuildNonPtr()
if err != nil {
t.Errorf("unexpected error caused by DynamicStruct Build: %v", err)
}
if !testBuilderBuildWant(t, ds, tt) {
return
}
if !testBuilderBuildTag(t, ds, tt) {
return
}
if tt.tryAddDynamicStruct {
if !testBuilderBuildAddDynamicStruct(t, ds, tt) {
return
}
}
})
}
}
func testBuilderBuildWant(t *testing.T, ds *DynamicStruct, tt buildTest) bool {
t.Helper()
if ds.Name() != tt.wantStructName {
t.Fatalf("result struct name is unexpected. got: %s, want: %s", ds.Name(), tt.wantStructName)
}
k := ds.Type().Kind()
if k != reflect.Struct {
t.Fatalf("result struct Type.Kind is unexpected. got: %s, want: Struct", k)
}
flds := ds.Fields()
if len(flds) != tt.wantNumField {
t.Fatalf("result Fields's length is unexpected. got: %d, want: %d", len(flds), tt.wantNumField)
}
if len(flds) > 0 {
f := ds.Field(0)
if flds[0].Name != f.Name {
t.Fatalf("result Field(0) '%s' is unmatch with flds[0] '%s'", flds[0].Name, f.Name)
}
}
if ds.NumField() != tt.wantNumField {
t.Fatalf("result numfield is unexpected. got: %d, want: %d", ds.NumField(), tt.wantNumField)
}
if ds.IsPtr() != tt.wantIsPtr {
t.Fatalf("unexpected pointer or not result. got: %v, want: %v", ds.IsPtr(), tt.wantIsPtr)
}
if tt.wantDefinition != "" {
if d := cmp.Diff(ds.Definition(), tt.wantDefinition); d != "" {
t.Fatalf("unexpected mismatch Definition: (-got +want)\n%s", d)
}
// 2nd call
if d := cmp.Diff(ds.Definition(), tt.wantDefinition); d != "" {
t.Fatalf("unexpected mismatch Definition(2nd call): (-got +want)\n%s", d)
}
}
return true
}
func testBuilderBuildTag(t *testing.T, ds *DynamicStruct, tt buildTest) bool {
t.Helper()
prefixes := map[string]string{
"String": stringFieldTag,
"Int": intFieldTag,
"Byte": byteFieldTag,
"Float32": float32FieldTag,
"Float64": float64FieldTag,
"Bool": boolFieldTag,
"Map": mapFieldTag,
"Func": funcFieldTag,
"ChanBoth": chanBothFieldTag,
"ChanRecv": chanRecvFieldTag,
"ChanSend": chanSendFieldTag,
"Struct": structFieldTag,
"StructPtr": structPtrFieldTag,
"Slice": sliceFieldTag,
}
var fName string
for prefix, tagWithTag := range prefixes {
// test without tag fields
fName = prefix + "Field"
st, ok := ds.FieldByName(fName)
if ok {
if d := cmp.Diff(st.Tag, reflect.StructTag("")); d != "" {
t.Fatalf("unexpected mismatch Tag: fName: %s, (-got +want)\n%s", fName, d)
}
} else {
t.Fatalf("Field %s does not exist.", fName)
}
// test with tag fields
fName = prefix + "FieldWithTag"
sft, ok := ds.FieldByName(fName)
if ok {
if d := cmp.Diff(sft.Tag, reflect.StructTag(tagWithTag)); d != "" {
t.Fatalf("unexpected mismatch WithTag.Tag: fName: %s, (-got +want)\n%s", fName, d)
}
} else {
t.Fatalf("Field %s does not exist.", fName)
}
}
return true
}
func testBuilderBuildAddDynamicStruct(t *testing.T, ds *DynamicStruct, tt buildTest) bool {
t.Helper()
builder := newTestBuilder()
builder.AddDynamicStruct("AdditionalDynamicStruct", ds, false)
builder.AddDynamicStructWithTag("AdditionalDynamicStructWithTag", ds, false, "json")
builder.AddDynamicStructPtr("AdditionalDynamicStructPtr", ds)
builder.AddDynamicStructPtrWithTag("AdditionalDynamicStructPtrWithTag", ds, "json")
builder.AddDynamicStructSlice("AdditionalDynamicStructSlice", ds)
builder.AddDynamicStructSliceWithTag("AdditionalDynamicStructSliceWithTag", ds, "json")
newds, err := builder.Build()
if err != nil {
t.Fatalf("unexpected error occurred from Build: %v", err)
}
if newds.NumField() != tt.wantNumField+6 {
t.Fatalf("result numfield is unexpected. got: %d, want: %d", newds.NumField(), tt.wantNumField+2)
}
_, ok := newds.FieldByName("AdditionalDynamicStruct")
if !ok {
t.Fatalf("additional AdditionalDynamicStruct field does not exist")
}
_, ok = newds.FieldByName("AdditionalDynamicStructWithTag")
if !ok {
t.Fatalf("additional AdditionalDynamicStructWithTag field does not exist")
}
_, ok = newds.FieldByName("AdditionalDynamicStructPtr")
if !ok {
t.Fatalf("additional AdditionalDynamicStructWithTag field does not exist")
}
_, ok = newds.FieldByName("AdditionalDynamicStructPtrWithTag")
if !ok {
t.Fatalf("additional AdditionalDynamicStructPtrWithTag field does not exist")
}
_, ok = newds.FieldByName("AdditionalDynamicStructSlice")
if !ok {
t.Fatalf("additional AdditionalDynamicStructSlice field does not exist")
}
_, ok = newds.FieldByName("AdditionalDynamicStructSliceWithTag")
if !ok {
t.Fatalf("additional AdditionalDynamicStructSliceWithTag field does not exist")
}
// TODO:
// wantDefinition := tt.wantDefinition + `
// StructFieldWithTag struct { Byte uint8; Bytes []uint8; Int int; Int64 int64; Uint uint; Uint64 uint64; Float32 float32; Float64 float64; String string; Stringptr *string; Stringslice []string; Bool bool; Map map[string]interface {}; Func func(string) interface {}; DynamicTestStruct2 dynamicstruct_test.DynamicTestStruct2; DynamicTestStruct2Ptr *dynamicstruct_test.DynamicTestStruct2; DynamicTestStruct4Slice []dynamicstruct_test.DynamicTestStruct4; DynamicTestStruct4PtrSlice []*dynamicstruct_test.DynamicTestStruct4 } ` + "`json:\"struct_field_with_tag\"`" + `
// StructPtrFieldWithTag *struct { Byte uint8; Bytes []uint8; Int int; Int64 int64; Uint uint; Uint64 uint64; Float32 float32; Float64 float64; String string; Stringptr *string; Stringslice []string; Bool bool; Map map[string]interface {}; Func func(string) interface {}; DynamicTestStruct2 dynamicstruct_test.DynamicTestStruct2; DynamicTestStruct2Ptr *dynamicstruct_test.DynamicTestStruct2; DynamicTestStruct4Slice []dynamicstruct_test.DynamicTestStruct4; DynamicTestStruct4PtrSlice []*dynamicstruct_test.DynamicTestStruct4 } ` + "`json:\"struct_ptr_field_with_tag\"`" + `
// }`
// if d := cmp.Diff(newds.Definition(), wantDefinition); d != "" {
// t.Errorf("unexpected mismatch Definition: (-got +want)\n%s", d)
// t.Logf("@@@@@ Entire newds.Definition = %s\n", newds.Definition())
// return false
// }
newds.NewInterface()
return true
}
|
package db
import (
"errors"
"log"
dbConf "github.com/product/internal/config/db"
)
var (
errorInvalidDbInstance = errors.New("Invalid db instance")
ErrEmptyRequest = errors.New("request is mandatory")
instanceDb = make(map[string]DbDriver)
)
const (
MySql string = "mysql"
)
// DbDriver is object DB
type DbDriver interface {
Db() interface{}
}
type Transactioner interface {
Transaction(fc func(tx interface{}) error) error
}
// NewInstanceDb is used to create a new instance DB
func NewInstanceDb(config dbConf.Database) (DbDriver, error) {
var err error
var dbName = config.Name
switch config.Adapter {
case MySql:
dbConn, sqlErr := NewGormMySQLDriver(config)
if sqlErr != nil {
err = sqlErr
log.Fatal("Database connection failed.")
}
instanceDb[dbName] = dbConn
default:
err = errorInvalidDbInstance
}
return instanceDb[dbName], err
} |
package channel
import (
"net"
"onlinejudgeForward/slflog"
)
type status uint8
var judgeList []JudgeInfo
func init() {
judgeList = make([]JudgeInfo, 5)
}
func GetJudgeList() []JudgeInfo {
return judgeList
}
func StartCatch() {
catch("127.0.0.1:5588")
}
type JudgeInfo struct {
Host string
Port int
Status status
ProcessNum int
OperatingLoad float32
}
func catch(addr string) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
slflog.FatalErr(err, "ResolveUDPAddr err")
udp, err := net.ListenUDP("udp", udpAddr)
slflog.FatalErr(err, "ListenUDP err")
defer udp.Close()
for {
buf := make([]byte, 1024)
len, raddr, err := udp.ReadFromUDP(buf)
slflog.FatalErr(err, "ReadFromUDP err")
slflog.Debug(string(buf[:len]))
_, err = udp.WriteTo([]byte("ack"), raddr)
judge, errs := parseUdp(string(buf[:len]))
slflog.FatalErr(errs, "")
judgeList = append(judgeList, *judge)
slflog.FatalErr(err, "WriteTo err")
}
}
func parseUdp(data string) (*JudgeInfo, error) {
judge := new(JudgeInfo)
return judge, nil
}
|
package interaction
import (
"fmt"
"github.com/hlandau/xlog"
)
var log, Log = xlog.New("acme.interactor")
var NonInteractive = false
type autoInteractor struct{}
var Auto Interactor = autoInteractor{}
var Interceptor Interactor
var NoDialog = false
func (autoInteractor) Prompt(c *Challenge) (*Response, error) {
r, err := Responder.Prompt(c)
if err == nil || c.Implicit {
return r, err
}
log.Infoe(err, "interaction auto-responder couldn't give a canned response")
if NonInteractive {
return nil, fmt.Errorf("cannot prompt the user: currently non-interactive")
}
if Interceptor != nil {
return Interceptor.Prompt(c)
}
if !NoDialog {
r, err := Dialog.Prompt(c)
if err == nil {
return r, nil
}
}
return Stdio.Prompt(c)
}
type dummySink struct{}
func (dummySink) Close() error {
return nil
}
func (dummySink) SetProgress(n, ofM int) {
}
func (dummySink) SetStatusLine(status string) {
}
func (autoInteractor) Status(info *StatusInfo) (StatusSink, error) {
if NonInteractive {
return dummySink{}, nil
}
if Interceptor != nil {
s, err := Interceptor.Status(info)
if err != nil {
return dummySink{}, nil
}
return s, err
}
if !NoDialog {
r, err := Dialog.Status(info)
if err == nil {
return r, nil
}
}
return Stdio.Status(info)
}
// © 2015 Hugo Landau <hlandau@devever.net> MIT License
|
package main
import (
"fmt"
"learngo/book/closure/recursionClosure"
)
func main() {
//closureBase.Test01()
fmt.Println(
recursionClosure.FibonacciPre(8),
)
}
|
package openrtb_ext
type ExtImpTappx struct {
Host string `json:"host,omitempty"` //DEPRECATED
TappxKey string `json:"tappxkey"`
Endpoint string `json:"endpoint"`
BidFloor float64 `json:"bidfloor,omitempty"`
Mktag string `json:"mktag,omitempty"`
Bcid []string `json:"bcid,omitempty"`
Bcrid []string `json:"bcrid,omitempty"`
}
|
package core
import (
"net/http"
"time"
"github.com/jzaikovs/core/loggy"
"github.com/jzaikovs/t"
)
// Router is interface for implement specific routing engines,
// for example we have module user which handles users, we can then port
// module across other projects that uses core
type Router interface {
Get(string, RouteFunc) *Route
Post(string, RouteFunc) *Route
Put(string, RouteFunc) *Route
Delete(string, RouteFunc) *Route
// main routing function
Route(context Context) bool
Handle(pattern string, handler http.Handler)
}
type defaultRouter struct {
routes []*Route
}
// NewRouter is constructor for creating router instance for default core router
func NewRouter() Router {
return &defaultRouter{routes: make([]*Route, 0)}
}
// Route if main method for dispatching routes
// returns true if found route
func (router *defaultRouter) Route(context Context) bool {
//loggy.Log("ROUTE", context.RemoteAddr(), context.Method(), context.RequestURI())
startTime := time.Now()
// TODO: router can be more optimized, for example dividing in buckets for each method
// TODO: try use trie (aka prefix-tree) as routing method
for _, r := range router.routes {
if !r.handler && context.Method() != r.method {
continue // skip routes with different method
}
matches := r.pattern.FindStringSubmatch(context.RequestURI())
//loggy.Trace.Println(matches)
if len(matches) == 0 {
continue // no match, go to next
}
if r.handler {
r.callback(context)
return true
}
// create arguments from groups in route pattern
// each group is next argument in arguments
matches = matches[1:]
args := make([]t.T, len(matches))
for i, match := range matches {
args[i] = t.T{Value: match}
}
// so we found our request
r.handle(args, startTime, context)
return true
}
return false
}
func (router *defaultRouter) addRoute(method, pattern string, callback RouteFunc) *Route {
loggy.Info.Println(method, pattern)
r := newRoute(method, pattern, callback, router)
router.routes = append(router.routes, r)
return r
}
// Get adds router handler for GET request
func (router *defaultRouter) Get(pattern string, callback RouteFunc) *Route {
return router.addRoute("GET", pattern, callback)
}
// Post adds router for POST request
func (router *defaultRouter) Post(pattern string, callback RouteFunc) *Route {
return router.addRoute("POST", pattern, callback)
}
// Put adds router for PUT request
func (router *defaultRouter) Put(pattern string, callback RouteFunc) *Route {
return router.addRoute("PUT", pattern, callback)
}
// Delete adds router for DELETE request
func (router *defaultRouter) Delete(pattern string, callback RouteFunc) *Route {
return router.addRoute("DELETE", pattern, callback)
}
// Handle implemted to support 3rd party packages that uses http.Handler
func (router *defaultRouter) Handle(pattern string, handler http.Handler) {
r := router.addRoute("?", pattern, func(context Context) {
context.noFlush()
handler.ServeHTTP(context.ResponseWriter(), context.Request())
})
// mark router as handler
r.handler = true
}
|
package internal
import (
"fmt"
"io"
"os"
"os/exec"
"time"
)
func createFile(filePath, content string) (err error) {
var f *os.File
if f, err = os.Create(filePath); err != nil {
return
}
defer func() { _ = f.Close() }()
_, err = f.WriteString(content)
return
}
var execCommand = exec.Command
func runCmd(name string, arg ...string) (err error) {
cmd := execCommand(name, arg...)
var (
stderr io.ReadCloser
stdout io.ReadCloser
)
if stderr, err = cmd.StderrPipe(); err != nil {
return
}
defer func() {
_ = stderr.Close()
}()
go func() { _, _ = io.Copy(os.Stderr, stderr) }()
if stdout, err = cmd.StdoutPipe(); err != nil {
return
}
defer func() {
_ = stdout.Close()
}()
go func() { _, _ = io.Copy(os.Stdout, stdout) }()
if err = cmd.Run(); err != nil {
err = fmt.Errorf("failed to run %s", cmd.String())
}
return
}
func formatLatency(d time.Duration) time.Duration {
switch {
case d > time.Second:
return d.Truncate(time.Second / 100)
case d > time.Millisecond:
return d.Truncate(time.Millisecond / 100)
case d > time.Microsecond:
return d.Truncate(time.Microsecond / 100)
default:
return d
}
}
|
package img
import (
"github.com/urfave/cli/v2"
"os"
"github.com/768bit/promethium/api/client/images"
"github.com/go-openapi/runtime"
)
var PullImageCommand = cli.Command{
Name: "pull",
Usage: "Pull images from remote to local",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "target-storage",
Aliases: []string{"t"},
Value: "default-local",
},
},
Action: func(c *cli.Context) error {
//get args (which is path)
storageTarget := c.String("target-storage")
alen := c.Args().Len()
if alen > 0 {
//ok lets pull these...
for i := 0; i < alen; i++ {
isLocal, path := EstablishPathToSource(c.Args().Get(i))
if isLocal && path != "" {
brdr, err := os.Open(path)
if err != nil {
return err
}
params := images.NewPushImageParams()
params.SetInFileBlob(runtime.NamedReader("inFileBlob", brdr))
params.SetTargetStorage(&storageTarget)
resp, err := ApiCli.Images.PushImage(params)
if err != nil {
return err
}
println(resp.Error())
} else {
println("Remote Path", path)
}
}
}
return nil
},
}
|
package main
import (
"crypto/hmac"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/cwillia9/ez-ftp/authentication"
"github.com/cwillia9/ez-ftp/datastore"
"github.com/cwillia9/ez-ftp/domain"
)
/*
This whole file needs to be refactored to not have all of the individual handlers have so much
logic. This effectively makes them untestable
*/
// TODO(cwilliams): Make this better. A proper HMAC implementation should use a signing string using
// something like the following
// StringToSign = HTTP-Verb + "\n" +
// Content-MD5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedAmzHeaders +
// CanonicalizedResource;
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
//
// For now the authentication will just use a public key and shared secret
func hmacAuthentication(fn func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
auth, ok := r.Header["Authorization"]
if ok == false {
http.Error(w, "Authorization required", http.StatusUnauthorized)
log.Println("upload rejected: no Authorization given")
return
}
authstring := auth[0]
// auth should look like <public_key>:<signature>
split := strings.Split(authstring, ":")
if len(split) != 2 {
http.Error(w, "Malformed Authorization", http.StatusUnauthorized)
log.Println("upload rejected: malformed Authorization. Authorization: " + authstring)
return
}
key, actualEncoding := split[0], split[1]
passhash, err := datastore.SelectUser(key)
if err != nil {
http.Error(w, "No match found for public key "+key, http.StatusUnauthorized)
log.Println("upload rejected: user not found. user: " + key)
return
}
expectation := authentication.ComputeHmac1(key, passhash)
if match := hmac.Equal([]byte(actualEncoding), []byte(expectation)); match == false {
http.Error(w, "Authorization didn't match", http.StatusUnauthorized)
log.Println("upload rejected: supplied mac encoding did not match expected for user " + key)
return
}
fn(w, r)
}
}
func downloadHandler(w http.ResponseWriter, r *http.Request, system domain.FileSystem) {
if r.Method != "GET" {
http.Error(w, "Only GET requests accepted on dl", http.StatusMethodNotAllowed)
return
}
splt := strings.Split(r.URL.Path, "/")
uuid := splt[len(splt)-1]
rootDir, path, err := datastore.SelectFile(uuid)
if err != nil {
// TODO(cwilliams): Doesn't exist or did we get a db err?
fmt.Fprintf(w, "Record doesn't exist for uuid: "+uuid)
http.Error(w, "Invalid request", http.StatusNotFound)
return
}
if rootDir != cfg.RootDir {
log.Println("Attempting to download from from wrong root dir. Attempted", rootDir, "but running", cfg.RootDir)
http.Error(w, "Invalid request", http.StatusNotFound)
return
}
f, err := system.Open(path, os.O_RDONLY)
if err != nil {
log.Println("Error opening file", err)
http.Error(w, "Error retreiving file", http.StatusInternalServerError)
return
}
defer f.Close()
log.Println("Serving file: " + path)
_, file := filepath.Split(path)
w.Header().Set("Content-Disposition", "attachment; filename="+file)
http.ServeFile(w, r, f.Name())
}
func uploadHandler(w http.ResponseWriter, r *http.Request, fs domain.FileSystem) {
// Must be using a POST/PUT method
if r.Method != "POST" && r.Method != "PUT" {
http.Error(w, "Only POST requests accepted on /ul/", http.StatusMethodNotAllowed)
return
}
// We expect the file to be called 'uploadfile'
file, handler, err := r.FormFile("uploadfile")
if err != nil {
log.Println(err)
http.Error(w, "Expected uploadfile", http.StatusExpectationFailed)
return
}
defer file.Close()
// TODO(cwilliams): Eventually we want to expose
// some kind of admin api to view directory structure
// Note: the call to FormFile above would already have parsed the form
desiredPaths, ok := r.MultipartForm.Value["path"]
if ok != true {
log.Println("No path specified")
http.Error(w, "expected path", http.StatusExpectationFailed)
return
}
// We only support a single path
desiredPath := desiredPaths[0]
fmt.Println("desiredpath", desiredPath)
newfile := path.Join(desiredPath, handler.Filename)
if err = os.MkdirAll(path.Join(cfg.RootDir, desiredPath), 0777); err != nil {
http.Error(w, "path failure", http.StatusInternalServerError)
log.Println(err)
return
}
// O_EXCL ensures that if the file already exists we will not overwrite it
f, err := fs.Open(newfile, os.O_WRONLY|os.O_CREATE|os.O_EXCL)
if err != nil {
if os.IsExist(err) {
// TODO(cwilliams): Add 'overwrite' flag functionality
log.Printf("Tried creating file that already exists: " + newfile)
http.Error(w, "File already exists", http.StatusConflict)
return
}
log.Println(err)
// This sucks....why couldn't we open a file?
http.Error(w, "Oops....", http.StatusInternalServerError)
return
}
defer f.Close()
_, err = io.Copy(f, file)
if err != nil && err != io.EOF {
log.Println(err)
http.Error(w, "Try again soon", http.StatusInternalServerError)
return
}
log.Println("Successfully wrote file to: " + path.Join(cfg.RootDir, handler.Filename))
randID := randomString(32)
err = datastore.InsertFile(randID, cfg.RootDir, path.Join(desiredPath, handler.Filename))
// TODO(cwilliams): Did we fail because the entry is already there?
// TODO(cwilliams): Did we fail beause the random string already exists? If so we should try again
if err != nil {
log.Println(err)
http.Error(w, "Try again soon", http.StatusInternalServerError)
}
log.Printf("Successfully stored new file %s/%s\n", cfg.RootDir, handler.Filename)
fmt.Fprint(w, "new uuid: "+randID)
}
func makeFsHandler(fn func(http.ResponseWriter, *http.Request, domain.FileSystem), fs domain.FileSystem) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fn(w, r, fs)
}
}
func randomString(strlen int) string {
rand.Seed(time.Now().UTC().UnixNano())
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
result := make([]byte, strlen)
for i := 0; i < strlen; i++ {
result[i] = chars[rand.Intn(len(chars))]
}
return string(result)
}
|
package pkg
import (
"errors"
"github.com/divoc/portal-api/swagger_gen/models"
log "github.com/sirupsen/logrus"
)
func GetFacilityUsers(authHeader string) ([]*models.FacilityUser, error) {
bearerToken, err := getToken(authHeader)
claimBody, err := getClaimBody(bearerToken)
if err != nil {
log.Errorf("Error while parsing token : %s", bearerToken)
return nil, err
}
if claimBody.FacilityCode == "" {
return nil, errors.New("unauthorized")
}
users, err := getFacilityUsers(claimBody.FacilityCode)
return users, err
}
func CreateFacilityUser(user *models.FacilityUser, authHeader string) error {
bearerToken, err := getToken(authHeader)
claimBody, err := getClaimBody(bearerToken)
if err != nil {
log.Errorf("Error while parsing token : %s", bearerToken)
return err
}
userRequest := KeyCloakUserRequest{
Username: user.MobileNumber,
Enabled: "true",
Attributes: KeycloakUserAttributes{
MobileNumber: []string{user.MobileNumber},
EmployeeID: user.EmployeeID,
FullName: user.Name,
FacilityCode: claimBody.FacilityCode,
},
}
resp, err := CreateKeycloakUser(userRequest)
log.Info("Created keycloak user ", resp.Response().StatusCode, " ", resp.String())
if err != nil || !isUserCreatedOrAlreadyExists(resp) {
log.Errorf("Error while creating keycloak user : %s", user.MobileNumber)
return err
} else {
log.Info("Setting up roles for the user ", user.MobileNumber)
keycloakUserId := getKeycloakUserId(resp, userRequest)
if keycloakUserId != "" {
_ = addUserToGroup(keycloakUserId, user.Groups[0].ID)
} else {
log.Error("Unable to map keycloak user id for ", user.MobileNumber)
}
}
return nil
}
func GetFacilityGroups() ([]*models.UserGroup, error) {
return getUserGroups("facility")
}
|
package ga
import (
"encoding/gob"
"github.com/pasqualesalza/amqpga/util"
)
// byte
type ByteFitnessValue int
func (fitnessValue ByteFitnessValue) Less(other FitnessValue) bool {
return fitnessValue < other.(ByteFitnessValue)
}
type ByteVectorChromosome []byte
func ByteVectorChromosomeInitialization(size int, min, max byte) ByteVectorChromosome {
chromosome := make(ByteVectorChromosome, size)
for i := 0; i < size; i++ {
chromosome[i] = util.RandomByteInRange(min, max)
}
return chromosome
}
// int
type IntFitnessValue int
func (fitnessValue IntFitnessValue) Less(other FitnessValue) bool {
return fitnessValue < other.(IntFitnessValue)
}
type IntVectorChromosome []int
func IntVectorChromosomeInitialization(size int, min, max int) IntVectorChromosome {
chromosome := make(IntVectorChromosome, size)
for i := 0; i < size; i++ {
chromosome[i] = util.RandomIntInRange(min, max)
}
return chromosome
}
// int64
type Int64FitnessValue int64
func (fitnessValue Int64FitnessValue) Less(other FitnessValue) bool {
return fitnessValue < other.(Int64FitnessValue)
}
type Int64VectorChromosome []int64
func Int64VectorChromosomeInitialization(size int, min, max int64) Int64VectorChromosome {
chromosome := make(Int64VectorChromosome, size)
for i := 0; i < size; i++ {
chromosome[i] = util.RandomInt64InRange(min, max)
}
return chromosome
}
// float32
type Float32FitnessValue float32
func (fitnessValue Float32FitnessValue) Less(other FitnessValue) bool {
return fitnessValue < other.(Float32FitnessValue)
}
type Float32VectorChromosome []float32
func Float32VectorChromosomeInitialization(size int, min, max float32) Float32VectorChromosome {
chromosome := make(Float32VectorChromosome, size)
for i := 0; i < size; i++ {
chromosome[i] = util.RandomFloat32InRange(min, max)
}
return chromosome
}
// float64
type Float64FitnessValue float64
func (fitnessValue Float64FitnessValue) Less(other FitnessValue) bool {
return fitnessValue < other.(Float64FitnessValue)
}
type Float64VectorChromosome []float64
func Float64VectorChromosomeInitialization(size int, min float64, max float64) Float64VectorChromosome {
chromosome := make(Float64VectorChromosome, size)
for i := 0; i < size; i++ {
chromosome[i] = util.RandomFloat64InRange(min, max)
}
return chromosome
}
func init() {
gob.Register(ByteFitnessValue(0))
gob.Register(ByteVectorChromosome{})
gob.Register(IntFitnessValue(0))
gob.Register(IntVectorChromosome{})
gob.Register(Int64FitnessValue(0))
gob.Register(Int64VectorChromosome{})
gob.Register(Float32FitnessValue(0.0))
gob.Register(Float32VectorChromosome{})
gob.Register(Float64FitnessValue(0.0))
gob.Register(Float64VectorChromosome{})
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rpc
import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"google.golang.org/grpc"
)
// ContextTestingKnobs provides hooks to aid in testing the system. The testing
// knob functions are called at various points in the Context life cycle if they
// are non-nil.
type ContextTestingKnobs struct {
// UnaryClientInterceptor if non-nil will be called at dial time to provide
// the base unary interceptor for client connections.
// This function may return a nil interceptor to avoid injecting behavior
// for a given target and class.
UnaryClientInterceptor func(target string, class ConnectionClass) grpc.UnaryClientInterceptor
// StreamClient if non-nil will be called at dial time to provide
// the base stream interceptor for client connections.
// This function may return a nil interceptor to avoid injecting behavior
// for a given target and class.
StreamClientInterceptor func(target string, class ConnectionClass) grpc.StreamClientInterceptor
// ArtificialLatencyMap if non-nil contains a map from target address
// (server.RPCServingAddr() of a remote node) to artificial latency in
// milliseconds to inject. Setting this will cause the server to pause for
// the given amount of milliseconds on every network write.
ArtificialLatencyMap map[string]int
// ClusterID initializes the Context's ClusterID container to this value if
// non-nil at construction time.
ClusterID *uuid.UUID
}
// NewInsecureTestingContext creates an insecure rpc Context suitable for tests.
func NewInsecureTestingContext(clock *hlc.Clock, stopper *stop.Stopper) *Context {
clusterID := uuid.MakeV4()
return NewInsecureTestingContextWithClusterID(clock, stopper, clusterID)
}
// NewInsecureTestingContextWithClusterID creates an insecure rpc Context
// suitable for tests. The context is given the provided cluster ID.
func NewInsecureTestingContextWithClusterID(
clock *hlc.Clock, stopper *stop.Stopper, clusterID uuid.UUID,
) *Context {
return NewInsecureTestingContextWithKnobs(clock, stopper, ContextTestingKnobs{
ClusterID: &clusterID,
})
}
// NewInsecureTestingContextWithKnobs creates an insecure rpc Context
// suitable for tests configured with the provided knobs.
func NewInsecureTestingContextWithKnobs(
clock *hlc.Clock, stopper *stop.Stopper, knobs ContextTestingKnobs,
) *Context {
return NewContext(ContextOptions{
TenantID: roachpb.SystemTenantID,
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
Config: &base.Config{Insecure: true},
Clock: clock,
Stopper: stopper,
Settings: cluster.MakeTestingClusterSettings(),
Knobs: knobs,
})
}
|
package main
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/vanhtuan0409/copperhead"
)
type Config struct {
HttpPort int `mapstructure:"http_port" cli:"port" default:"8080" description:"HTTP binding port"`
Timeout time.Duration `mapstructure:"timeout" default:"5s" description:"HTTP request timeout"`
}
func (c *Config) String() string {
s, _ := json.MarshalIndent(c, "", "\t")
return string(s)
}
func main() {
cfg := Config{}
copperhead.Unmarshal(&cfg, reflect.TypeOf(cfg), copperhead.ConfigOptions{})
fmt.Println(cfg.String())
}
|
package genconfig
import (
"bytes"
"encoding/base64"
"text/template"
uuid "github.com/satori/go.uuid"
)
func GenerateAppleConfig(ip, name, password, privateKey, caCert, serverCert string) (string, error) {
tmpl := template.Must(template.New("mobileconfig").Parse(mobileConfigTemplate))
tmplData := struct {
IP string
Name string
PrivateKey string
PrivateKeyPassword string
CACert string
ServerCert string
UUID1 string
UUID2 string
UUID3 string
UUID4 string
UUID5 string
UUID6 string
}{
IP: ip,
Name: name,
PrivateKeyPassword: password,
PrivateKey: base64.StdEncoding.EncodeToString([]byte(privateKey)),
CACert: base64.StdEncoding.EncodeToString([]byte(caCert)),
ServerCert: base64.StdEncoding.EncodeToString([]byte(serverCert)),
UUID1: uuid.NewV4().String(),
UUID2: uuid.NewV4().String(),
UUID3: uuid.NewV4().String(),
UUID4: uuid.NewV4().String(),
UUID5: uuid.NewV4().String(),
UUID6: uuid.NewV4().String(),
}
var buf bytes.Buffer
err := tmpl.Execute(&buf, tmplData)
if err != nil {
return "", err
}
return buf.String(), nil
}
|
// Copyright 2018 The Cacophony Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cptv
import (
"io"
"time"
"github.com/TheCacophonyProject/lepton3"
)
func NewWriter(w io.Writer) *Writer {
return &Writer{
bldr: NewBuilder(w),
comp: NewCompressor(),
}
}
// Writer uses a Builder and Compressor to create CPTV files.
type Writer struct {
bldr *Builder
comp *Compressor
t0 time.Time
}
func (w *Writer) WriteHeader(deviceName string) error {
w.t0 = time.Now()
fields := NewFieldWriter()
fields.Timestamp(Timestamp, w.t0)
fields.Uint32(XResolution, lepton3.FrameCols)
fields.Uint32(YResolution, lepton3.FrameRows)
fields.Uint8(Compression, 1)
// Optional device name field
if len(deviceName) > 0 {
err := fields.String(DeviceName, deviceName)
if err != nil {
return err
}
}
return w.bldr.WriteHeader(fields)
}
func (w *Writer) WriteFrame(frame *lepton3.Frame) error {
dt := uint64(time.Since(w.t0))
bitWidth, compFrame := w.comp.Next(frame)
fields := NewFieldWriter()
fields.Uint32(Offset, uint32(dt/1000))
fields.Uint8(BitWidth, uint8(bitWidth))
fields.Uint32(FrameSize, uint32(len(compFrame)))
return w.bldr.WriteFrame(fields, compFrame)
}
func (w *Writer) Close() error {
return w.bldr.Close()
}
|
package cmd
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/tinyzimmer/k3p/pkg/types"
"github.com/tinyzimmer/k3p/pkg/util"
)
var generatedTokenLength int
func init() {
tokenGenerateCmd.Flags().IntVarP(&generatedTokenLength, "length", "l", 128, "The length of the token to generate")
tokenCmd.AddCommand(tokenGetCmd)
tokenCmd.AddCommand(tokenGenerateCmd)
rootCmd.AddCommand(tokenCmd)
}
var tokenCmd = &cobra.Command{
Use: "token",
Short: "Token retrieval and generation commands",
}
var tokenGetCmd = &cobra.Command{
Use: "get TOKEN_TYPE",
Short: "Retrieve a k3s token",
Long: `
Retrieves the token for joining either a new "agent" or "server" to the cluster.
The "agent" token can be retrieved from any of the server instances, while the "server" token
can only be retrieved on the server where "k3p install" was run with "--init-ha".
`,
Args: cobra.ExactValidArgs(1),
ValidArgs: []string{"agent", "server"},
RunE: func(cmd *cobra.Command, args []string) error {
switch args[0] {
case "agent":
token, err := ioutil.ReadFile(types.AgentTokenFile)
if err != nil {
if os.IsNotExist(err) {
return errors.New("The K3s server does not appear to be installed to the system")
}
return err
}
fmt.Println(strings.TrimSpace(string(token)))
case "server":
token, err := ioutil.ReadFile(types.ServerTokenFile)
if err != nil {
if os.IsNotExist(err) {
return errors.New("This system does not appear to have been initialized with --init-ha")
}
return err
}
fmt.Println(strings.TrimSpace(string(token)))
}
return nil
},
}
var tokenGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generates a token that can be used for initializing HA installations",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(util.GenerateToken(generatedTokenLength))
},
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package dlp
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/dlp/clipboard"
"chromiumos/tast/local/bundles/cros/dlp/policy"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DataLeakPreventionRulesListClipboardOmni,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Test behavior of DataLeakPreventionRulesList policy with clipboard blocked restriction with omni box",
Contacts: []string{
"ayaelattar@google.com",
"chromeos-dlp@google.com",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline"},
Data: []string{"text_1.html", "text_2.html"},
Params: []testing.Param{{
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}},
})
}
func DataLeakPreventionRulesListClipboardOmni(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fakeDMS := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
allowedServer := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer allowedServer.Close()
blockedServer := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer blockedServer.Close()
if err := policyutil.ServeAndVerify(ctx, fakeDMS, cr, policy.RestrictiveDLPPolicyForClipboard(blockedServer.URL)); err != nil {
s.Fatal("Failed to serve and verify the DLP policy: ", err)
}
// Connect to Test API.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
keyboard, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer keyboard.Close()
s.Log("Waiting for chrome.clipboard API to become available")
if err := tconn.WaitForExpr(ctx, "chrome.clipboard"); err != nil {
s.Fatal("Failed to wait for chrome.clipboard API to become available: ", err)
}
for _, param := range []struct {
name string
wantAllowed bool
sourceURL string
}{
{
name: "wantDisallowed",
wantAllowed: false,
sourceURL: blockedServer.URL + "/text_1.html",
},
{
name: "wantAllowed",
wantAllowed: true,
sourceURL: allowedServer.URL + "/text_2.html",
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
if err := cr.ResetState(ctx); err != nil {
s.Fatal("Failed to reset the Chrome: ", err)
}
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
conn, err := br.NewConn(ctx, param.sourceURL)
if err != nil {
s.Fatalf("Failed to open page %q: %v", param.sourceURL, err)
}
defer conn.Close()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
if err := webutil.WaitForQuiescence(ctx, conn, 10*time.Second); err != nil {
s.Fatalf("Failed to wait for %q to be loaded and achieve quiescence: %s", param.sourceURL, err)
}
if err := uiauto.Combine("copy all text from source website",
keyboard.AccelAction("Ctrl+A"),
keyboard.AccelAction("Ctrl+C"))(ctx); err != nil {
s.Fatal("Failed to copy text from source browser: ", err)
}
if err := rightClickOmnibox(ctx, tconn, param.sourceURL, param.wantAllowed); err != nil {
s.Fatal("Failed to right click omni box: ", err)
}
// Lacros variant doesn't work correctly without dismissing the right click menu first (it doesn't react to "Ctrl+T").
if err := uiauto.Combine("open a new tab",
keyboard.AccelAction("Esc"), // Dismiss the right click menu.
keyboard.AccelAction("Ctrl+T"))(ctx); err != nil {
s.Fatal("Failed to press Ctrl+T to open a new tab: ", err)
}
parsedSourceURL, err := url.Parse(blockedServer.URL)
if err != nil {
s.Fatal("Failed to parse blocked server url: ", err)
}
if err := pasteOmnibox(ctx, tconn, keyboard, parsedSourceURL.Hostname(), param.wantAllowed); err != nil {
s.Fatal("Failed to paste content in omni box: ", err)
}
})
}
}
func rightClickOmnibox(ctx context.Context, tconn *chrome.TestConn, url string, wantAllowed bool) error {
ui := uiauto.New(tconn)
addressBar := nodewith.HasClass("OmniboxViewViews")
if err := ui.RightClick(addressBar)(ctx); err != nil {
return errors.Wrap(err, "failed to right click omni box")
}
err := clipboard.CheckGreyPasteNode(ctx, ui)
if err != nil && !wantAllowed {
return err
}
if err == nil && wantAllowed {
return errors.New("Paste node found greyed, expected focusable")
}
// Clipboard DLP bubble is never expected on right click.
err = clipboard.CheckClipboardBubble(ctx, ui, url)
if err == nil {
return errors.New("Notification found, expected none")
}
return nil
}
func pasteOmnibox(ctx context.Context, tconn *chrome.TestConn, keyboard *input.KeyboardEventWriter, url string, wantAllowed bool) error {
ui := uiauto.New(tconn)
// Select the Omnibox & paste in it.
if err := uiauto.Combine("Paste content in Omnibox",
keyboard.AccelAction("Ctrl+L"),
keyboard.AccelAction("Ctrl+V"))(ctx); err != nil {
return errors.Wrap(err, "failed to paste content in Omnibox")
}
err := clipboard.CheckClipboardBubble(ctx, ui, url)
if err != nil && !wantAllowed {
return err
}
if err == nil && wantAllowed {
return errors.New("Notification found, expected none")
}
return nil
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testflow
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
"github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1/validation"
"github.com/gardener/test-infra/pkg/testmachinery/locations"
)
// Validate validates a testrun and all its subcomponenets.
// This function validate in addition to the default validation function also the testlocations.
// Returns true if the operation cvan be retried.
// todo: refactor this to use better errors
func Validate(fldPath *field.Path, tf tmv1beta1.TestFlow, locs locations.Locations, ignoreEmptyFlow bool) (field.ErrorList, bool) {
var (
usedTestdefinitions = 0
usedStepNames = make(map[string]*tmv1beta1.DAGStep)
allErrs field.ErrorList
retry bool
)
for i, step := range tf {
stepPath := fldPath.Index(i)
testDefinitions, err := locs.GetTestDefinitions(step.Definition)
if err != nil {
allErrs = append(allErrs, field.InternalError(stepPath.Child("definition"), err))
retry = true
continue
}
// fail if there are no testdefinitions found
if len(testDefinitions) == 0 && !ignoreEmptyFlow {
allErrs = append(allErrs, field.Required(stepPath.Child("definition"), "no TestDefinitions found for step"))
retry = true
continue
}
for _, td := range testDefinitions {
tdPath := stepPath.Child(fmt.Sprintf("Location: %q; File: %q", td.Location.Name(), td.FileName))
allErrs = append(allErrs, validation.ValidateTestDefinition(tdPath, td.Info)...)
}
usedStepNames[step.Name] = step
usedTestdefinitions += len(testDefinitions)
}
// check if there are any testruns to execute. Fail if there are none.
if !ignoreEmptyFlow && usedTestdefinitions == 0 {
allErrs = append(allErrs, field.Invalid(fldPath, nil, "no testdefinitions found"))
retry = true
}
return allErrs, retry
}
|
// Copyright 2021 Dataptive SAS.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package recio
import "io"
// BufferedReader implements buffered decoding of records from an io.Reader.
type BufferedReader struct {
reader io.Reader
mode IOMode
buffer []byte
buffered int
offset int
eof bool
mustFill bool
}
// NewBufferedReader returns a new BufferedReader whose buffer has the
// specified size. If mode is set to ModeManual, Read operations that would
// trigger a blocking Fill from the underlying io.Reader will return with
// err == ErrMustFill. The caller must then call Fill manually before calling
// Read again. If mode is set to ModeAuto the reader will Fill its internal
// buffer transparently.
func NewBufferedReader(r io.Reader, size int, mode IOMode) (br *BufferedReader) {
br = &BufferedReader{
reader: r,
mode: mode,
buffer: make([]byte, size),
buffered: 0,
offset: 0,
eof: false,
mustFill: false,
}
return br
}
// Read decodes one record into v. If the reader's internal buffer does not
// contain enough data to decode a complete record, either it is automatically
// filled from the underlying io.Reader in auto mode, or Read returns with
// err == ErrMustFill in manual mode. Once all records have been read from the
// underlying io.Reader, Read fails with err == io.EOF. If EOF has been reached
// but the reader's internal buffer still contains a partial record, Read fails
// with err == io.ErrUnexpectedEOF. If a record cannot be entirely fit in the
// reader's internal buffer, Read fails with err == ErrTooLarge.
func (br *BufferedReader) Read(v Decoder) (n int, err error) {
Retry:
if br.mustFill {
// The buffer needs to be filled before trying to decode
// another record.
if br.mode == ModeManual {
return 0, ErrMustFill
}
err = br.Fill()
if err != nil {
return 0, err
}
}
if br.eof && br.offset == br.buffered {
// We've reached EOF on a previous Fill attempt and the
// buffered data has been fully consumed.
return 0, io.EOF
}
n, err = v.Decode(br.buffer[br.offset:br.buffered])
if err == ErrShortBuffer {
// Unable to decode a full record.
if br.offset == 0 && br.buffered == len(br.buffer) {
// We've tried to decode from the start of a full
// buffer, so it seems we won't be able to fit this
// record in our buffer.
return 0, ErrTooLarge
}
if br.eof {
// We won't be able to read more bytes yet there's
// a partial record left to decode.
return 0, io.ErrUnexpectedEOF
}
br.mustFill = true
goto Retry
}
br.offset += n
if err != nil {
return n, err
}
return n, nil
}
// Fill tries to fill the reader's internal buffer by reading from the
// underlying io.Reader.
func (br *BufferedReader) Fill() (err error) {
if br.offset == 0 && br.buffered == len(br.buffer) {
return nil
}
// Save what's left to consume to the start of the buffer.
br.buffered = copy(br.buffer, br.buffer[br.offset:br.buffered])
br.offset = 0
n, err := br.reader.Read(br.buffer[br.buffered:])
br.buffered += n
if err != nil && err != io.EOF {
return err
}
if err == io.EOF {
// flag EOF in our state so we'll be able to signal it
// when the buffer is fully consumed.
br.eof = true
}
br.mustFill = false
return nil
}
// Reset discards any buffered data, resets all state, and switches the
// buffered reader to read from r.
func (br *BufferedReader) Reset(r io.Reader) {
br.reader = r
br.buffered = 0
br.offset = 0
br.eof = false
br.mustFill = false
}
// BufferedWriter implements buffered encoding of records to an io.Writer.
// After all records have been encoded, the caller should invoke the Flush
// method to guarantee all buffered data has been forwarded to the underlying
// io.Writer.
type BufferedWriter struct {
writer io.Writer
mode IOMode
buffer []byte
buffered int
mustFlush bool
}
// NewBufferedWriter returns a new BufferedWriter whose buffer has the
// specified size. If mode is set to ModeManual, Write operations that would
// trigger a blocking Flush to the underlying io.Writer will return with
// err == ErrMustFlush. The caller must the call Flush manually before calling
// Write again. If mode is set to Auto (or 0) the writer will Flush its
// internal buffer transparently.
func NewBufferedWriter(w io.Writer, size int, mode IOMode) (bw *BufferedWriter) {
bw = &BufferedWriter{
writer: w,
mode: mode,
buffer: make([]byte, size),
buffered: 0,
mustFlush: false,
}
return bw
}
// Write encodes one record to the writer's internal buffer. If the buffer does
// not have enough space left to encode the complete record, either it is
// automatically flushed to the underlying io.Writer in Auto mode, or Write
// returns with err == ErrMustFlush in manual mode. If a record cannot be
// entirely fit in the writer's internal buffer, Write fails with
// err == ErrTooLarge.
func (bw *BufferedWriter) Write(v Encoder) (n int, err error) {
Retry:
if bw.mustFlush {
// The buffer needs to be flushed before trying to encode
// another record.
if bw.mode == ModeManual {
return 0, ErrMustFlush
}
err = bw.Flush()
if err != nil {
return 0, err
}
}
n, err = v.Encode(bw.buffer[bw.buffered:])
if err == ErrShortBuffer {
// Unable to encode a full record.
if bw.buffered == 0 {
// The buffer was empty, so it seems we won't be able
// to fit this record.
return 0, ErrTooLarge
}
bw.mustFlush = true
goto Retry
}
bw.buffered += n
if err != nil {
return n, err
}
return n, nil
}
// Flush writes any buffered data to the underlying io.Writer.
func (bw *BufferedWriter) Flush() (err error) {
if bw.buffered == 0 {
return nil
}
n, err := bw.writer.Write(bw.buffer[:bw.buffered])
if n < bw.buffered {
// We were unable to write the whole buffer to the underlying
// io.Writer. We try to keep the state consistent and return
// an error.
copy(bw.buffer, bw.buffer[n:bw.buffered])
bw.buffered -= n
if err == nil {
// This shouldn't happen if the Writer is well-behaved,
// but we'll prevent this error from being silenced.
return ErrShortWrite
}
}
if err != nil {
return err
}
bw.buffered = 0
bw.mustFlush = false
return nil
}
// Reset discards any unflushed data, resets all state, and switches the
// buffered writer to write to w.
func (bw *BufferedWriter) Reset(w io.Writer) {
bw.writer = w
bw.buffered = 0
bw.mustFlush = false
}
|
package database
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/janithl/paataka/entities"
)
// SQLPublicationRepository is an implementation of Posts repository currently using an in-memory store
type SQLPublicationRepository struct {
version string
publications map[string]entities.Publication
filename string
}
// NewSQLPublicationRepository returns a new SQLPublicationRepository
func NewSQLPublicationRepository(version string) *SQLPublicationRepository {
repo := &SQLPublicationRepository{
version: version,
publications: make(map[string]entities.Publication),
filename: "./database/appstate.json",
}
repo.readFromPersistance()
return repo
}
// GetVersion returns the version string of the repository
func (s *SQLPublicationRepository) GetVersion() string {
return s.version
}
// Add adds a new Publication
func (s *SQLPublicationRepository) Add(pub entities.Publication) string {
s.publications[pub.ID] = pub
return pub.ID
}
// ListAll returns all the publications in a Map
func (s *SQLPublicationRepository) ListAll() map[string]entities.Publication {
return s.publications
}
// Persist persists repository state to filesystem
func (s *SQLPublicationRepository) Persist() {
content, _ := json.MarshalIndent(s.publications, "", " ")
if err := ioutil.WriteFile(s.filename, content, 0644); err != nil {
fmt.Println("Error: ", err)
}
}
// readFromPersistance reads the repository state from the filesystem
func (s *SQLPublicationRepository) readFromPersistance() {
if content, err := ioutil.ReadFile(s.filename); err != nil {
fmt.Println("Error: ", err)
} else {
var data map[string]entities.Publication
if err := json.Unmarshal(content, &data); err != nil {
fmt.Println("Error: ", err)
} else {
s.publications = data
}
}
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flags_test
import (
"io/ioutil"
"testing"
"github.com/google/gapid/core/app/flags"
"github.com/google/gapid/core/assert"
)
type MyFlag string
func (f *MyFlag) String() string { return string(*f) }
func (f *MyFlag) Set(v string) error { *f = MyFlag(v); return nil }
type MyFlags struct {
Str string
Bools []bool
Strs []string
Ints []int
Uints []uint
Floats []float64
Mine []MyFlag
}
var (
args1 = []string{
"-str", "foo",
"-bools", "true",
"-strs", "one",
"-ints", "1",
"-uints", "2",
"-floats", "3.5",
"-mine", "yours",
}
args2 = []string{
"-str", "bar",
"-bools", "true", "-bools", "false",
"-strs", "one", "-strs", "two",
"-ints", "1", "-ints", "4",
"-uints", "2", "-uints", "5",
"-floats", "3.5", "-floats", "6.7",
"-mine", "yours", "-mine", "ours",
}
args3 = []string{
"-str", "baz",
"-bools", "true", "-bools", "false", "-bools", "false",
"-strs", "one", "-strs", "two", "-strs", "three",
"-ints", "1", "-ints", "4", "-ints", "7",
"-uints", "2", "-uints", "5", "-uints", "8",
"-floats", "3.5", "-floats", "6.7", "-floats", "9.2",
"-mine", "yours", "-mine", "ours", "-mine", "theirs",
}
)
func b(v ...bool) []bool { return v }
func s(v ...string) []string { return v }
func i(v ...int) []int { return v }
func u(v ...uint) []uint { return v }
func f(v ...float64) []float64 { return v }
func m(v ...string) (r []MyFlag) {
for _, s := range v {
r = append(r, MyFlag(s))
}
return
}
func TestRepeatedParsing(t *testing.T) {
assert := assert.To(t)
for _, cs := range []struct {
args []string
exp MyFlags
}{
{args1, MyFlags{"foo", b(true), s("one"), i(1), u(2), f(3.5), m("yours")}},
{args2, MyFlags{"bar", b(true, false), s("one", "two"), i(1, 4), u(2, 5), f(3.5, 6.7), m("yours", "ours")}},
{args3, MyFlags{"baz", b(true, false, false), s("one", "two", "three"), i(1, 4, 7), u(2, 5, 8), f(3.5, 6.7, 9.2), m("yours", "ours", "theirs")}},
} {
verb := &struct{ MyFlags }{}
flags := flags.Set{}
flags.Raw.Usage = func() {}
flags.Raw.SetOutput(ioutil.Discard)
flags.Bind("", verb, "")
err := flags.Raw.Parse(cs.args)
assert.For("err").ThatError(err).Succeeded()
assert.For("str").ThatString(verb.Str).Equals(cs.exp.Str)
assert.For("bools").ThatSlice(verb.Bools).Equals(cs.exp.Bools)
assert.For("strs").ThatSlice(verb.Strs).Equals(cs.exp.Strs)
assert.For("ints").ThatSlice(verb.Ints).Equals(cs.exp.Ints)
assert.For("uints").ThatSlice(verb.Uints).Equals(cs.exp.Uints)
assert.For("floats").ThatSlice(verb.Floats).Equals(cs.exp.Floats)
assert.For("mine").ThatSlice(verb.Mine).Equals(cs.exp.Mine)
}
}
|
/*
Copyright 2021 The DbunderFS Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package db
import "testing"
import "github.com/kos-v/dbunderfs/internal/db"
func TestDataBlockNode_Add(t *testing.T) {
tests := []struct {
defaultData []byte
offset uint64
addData []byte
expectedData []byte
expectedAddLen int
}{
{[]byte{}, 0, []byte{}, []byte{}, 0},
{[]byte{}, 0, []byte{1, 2, 3}, []byte{1, 2, 3}, 3},
{[]byte{}, 1, []byte{1, 2, 3}, []byte{1, 2, 3}, 3},
{[]byte{1, 2, 3}, 0, []byte{4, 5, 6}, []byte{4, 5, 6}, 3},
{[]byte{1, 2, 3}, 1, []byte{4, 5, 6}, []byte{1, 4, 5, 6}, 3},
{[]byte{1, 2, 3}, 2, []byte{4, 5, 6}, []byte{1, 2, 4, 5, 6}, 3},
{[]byte{1, 2, 3}, 3, []byte{4, 5, 6}, []byte{1, 2, 3, 4, 5, 6}, 3},
{[]byte{1, 2, 3}, 4, []byte{4, 5, 6}, []byte{1, 2, 3, 4, 5, 6}, 3},
}
for id, test := range tests {
id += 1
dataBlock := db.DataBlockNode{Data: test.defaultData}
addLen := dataBlock.Add(test.offset, &test.addData)
if addLen != test.expectedAddLen {
t.Errorf("Test %v fail: the number of items added is not as expected.\nExpected: %v. Result: %v.\n", id, test.expectedAddLen, addLen)
}
if len(*dataBlock.GetData()) != len(test.expectedData) {
t.Errorf("Test %v fail: object contains unexpected number of items.\nExpected: %v. Result: %v.\n", id, len(test.expectedData), len(*dataBlock.GetData()))
}
for i, item := range *dataBlock.GetData() {
if item != test.expectedData[i] {
t.Errorf("Test %v fail: result data is not as expected.\nExpected: %v. Result: %v.\n", id, test.expectedData, *dataBlock.GetData())
}
}
}
}
|
package support
type JsonResult struct {
Error bool `jsonp:""`
Message string `jsonp:""`
Results interface{} `jsonp:""`
Result interface{} `jsonp:""`
Errors map[string]string `jsonp:""`
TotalCount int64 `jsonp:""`
CurrentUnixTime int64 `jsonp:""`
} |
// Copyright 2018 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"os"
rabbitmq_terraforming "github.com/GoogleCloudPlatform/terraformer/providers/rabbitmq"
"github.com/GoogleCloudPlatform/terraformer/terraform_utils"
"github.com/spf13/cobra"
)
const (
defaultRabbitMQEndpoint = "http://localhost:15672"
)
func newCmdRabbitMQImporter(options ImportOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "rabbitmq",
Short: "Import current state to Terraform configuration from RabbitMQ",
Long: "Import current state to Terraform configuration from RabbitMQ",
RunE: func(cmd *cobra.Command, args []string) error {
endpoint := os.Getenv("RABBITMQ_SERVER_URL")
if len(endpoint) == 0 {
endpoint = defaultRabbitMQEndpoint
}
username := os.Getenv("RABBITMQ_USERNAME")
password := os.Getenv("RABBITMQ_PASSWORD")
provider := newRabbitMQProvider()
err := Import(provider, options, []string{endpoint, username, password})
if err != nil {
return err
}
return nil
},
}
cmd.AddCommand(listCmd(newRabbitMQProvider()))
cmd.PersistentFlags().BoolVarP(&options.Connect, "connect", "c", true, "")
cmd.PersistentFlags().StringSliceVarP(&options.Resources, "resources", "r", []string{}, "vhosts")
cmd.PersistentFlags().StringVarP(&options.PathPattern, "path-pattern", "p", DefaultPathPattern, "{output}/{provider}/custom/{service}/")
cmd.PersistentFlags().StringVarP(&options.PathOutput, "path-output", "o", DefaultPathOutput, "")
cmd.PersistentFlags().StringVarP(&options.State, "state", "s", DefaultState, "local or bucket")
cmd.PersistentFlags().StringVarP(&options.Bucket, "bucket", "b", "", "gs://terraform-state")
cmd.PersistentFlags().StringSliceVarP(&options.Filter, "filter", "f", []string{}, "rabbitmq_type=id1:id2:id4")
return cmd
}
func newRabbitMQProvider() terraform_utils.ProviderGenerator {
return &rabbitmq_terraforming.RBTProvider{}
}
|
package cherryDataConfig
import jsoniter "github.com/json-iterator/go"
type JsonParser struct {
}
func (j *JsonParser) TypeName() string {
return "json"
}
func (j *JsonParser) Unmarshal(data []byte, v interface{}) error {
return jsoniter.Unmarshal(data, v)
}
|
package json
import (
"bytes"
"encoding/json"
"fmt"
"github.com/project-flogo/core/data"
"github.com/project-flogo/core/data/expression/function"
)
func init() {
_ = function.Register(&fnNumbersToString{})
}
type fnNumbersToString struct {
}
// Name returns the name of the function
func (fnNumbersToString) Name() string {
return "numbersToString"
}
// Sig returns the function signature
func (fnNumbersToString) Sig() (paramTypes []data.Type, isVariadic bool) {
return []data.Type{data.TypeAny}, false
}
// Eval executes the function
func (fnNumbersToString) Eval(params ...interface{}) (interface{}, error) {
inputBytes, err := json.Marshal(params[0])
if err != nil {
return nil, err
}
reader := bytes.NewReader(inputBytes)
decoder := json.NewDecoder(reader)
decoder.UseNumber()
switch t := params[0].(type) {
case []interface{}:
outputArr := make([]interface{}, len(t))
err = decoder.Decode(&outputArr)
if err != nil {
return nil, err
}
return handleArray(outputArr), nil
case map[string]interface{}:
outputMap := make(map[string]interface{})
err = decoder.Decode(&outputMap)
if err != nil {
return nil, err
}
encodeNumbersToString(outputMap)
return outputMap, nil
default:
return nil, fmt.Errorf("Unsupported json object type [%T]", params[0])
}
}
func encodeNumbersToString(m map[string]interface{}) {
for k, v := range m {
switch t := v.(type) {
case json.Number:
m[k] = t.String()
case map[string]interface{}:
encodeNumbersToString(t)
case []interface{}:
m[k] = handleArray(t)
default:
fmt.Printf("Unsupported type: %T\n", v)
}
}
}
func handleArray(arr []interface{}) []interface{} {
for i, v := range arr {
switch t := v.(type) {
case json.Number:
arr[i] = t.String()
case map[string]interface{}:
encodeNumbersToString(t)
case []interface{}:
arr[i] = handleArray(t)
default:
fmt.Printf("Unsupported type inside array: %T\n", v)
}
}
return arr
}
|
/**
*
* By So http://sooo.site
* -----
* Don't panic.
* -----
*
*/
package conf
import (
"fmt"
"github.com/Git-So/blog-api/utils/helper"
)
var configYaml = fmt.Sprintf(`#开发相关
dev:
run_mode: debug # release debug
#页数配置
page:
article: 8
hot_article: 15
comment: 10
subject: 8
tag: 200
link: 10
#服务配置
server:
port: 8099
read_timeout: 60
write_timeout: 60
#数据库配置
database:
type: mysql # mysql sqlite3
host: 127.0.0.1
port: 3306
user: root
passwd: root
name: blog
table_prefix: blog_
#缓存配置
cache:
type: redis
host: 127.0.0.1
port: 6379
expired: 259200 # 秒
prefix: blog_
api_cache_state: true
#Jwt配置
jwt:
secret: %s
expired: 7200 # 秒
#WeChat配置
wechat:
app_id:
app_secret:
touser:
template_id:
color: 000fff
#XMPP配置
xmpp:
host: xmpp.jp:5222
touser: sooo.site@xmpp.jp
user:
passwd:
no_tls: true
session: true
status: xa
status_messgae: I for one welcome our new codebot overlords.
`, helper.GetRandomString(50))
|
package rbtree
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_New(t *testing.T) {
// given
tree := New()
// when, then
assert.NotNil(t, tree)
}
func Test_Insert_Inorder(t *testing.T) {
assert := assert.New(t)
// given
tree := New()
tree.Insert(5)
tree.Insert(2)
tree.Insert(1)
tree.Insert(4)
tree.Insert(7)
tree.Insert(22)
tree.Insert(33)
tree.Insert(11)
tree.Insert(12)
tree.Insert(13)
tree.Insert(14)
tree.Insert(15)
tree.Insert(34)
tree.Insert(35)
tree.Insert(36)
tree.Insert(37)
tree.Insert(38)
// when
n5 := tree.Find(5)
n2 := tree.Find(2)
n1 := tree.Find(1)
n4 := tree.Find(4)
n7 := tree.Find(7)
n22 := tree.Find(22)
n33 := tree.Find(33)
n11 := tree.Find(11)
n12 := tree.Find(12)
n13 := tree.Find(13)
n14 := tree.Find(14)
n15 := tree.Find(15)
n34 := tree.Find(34)
n35 := tree.Find(35)
n36 := tree.Find(36)
n37 := tree.Find(37)
n38 := tree.Find(38)
// then
assert.Equal(n2, n5.left)
assert.Equal(n1, n2.left)
assert.Equal(n4, n2.right)
assert.Equal(n13, n5.right)
assert.Equal(n11, n13.left)
assert.Equal(n7, n11.left)
assert.Equal(n12, n11.right)
assert.Equal(n34, n13.right)
assert.Equal(n22, n34.left)
assert.Equal(n14, n22.left)
assert.Equal(n15, n14.right)
assert.Equal(n33, n22.right)
assert.Equal(n36, n34.right)
assert.Equal(n35, n36.left)
assert.Equal(n37, n36.right)
assert.Equal(n38, n37.right)
assert.True(n5.isBlack)
assert.True(n2.isBlack)
assert.True(n1.isBlack)
assert.True(n4.isBlack)
assert.False(n13.isBlack)
assert.True(n11.isBlack)
assert.True(n7.isBlack)
assert.True(n12.isBlack)
assert.True(n34.isBlack)
assert.False(n22.isBlack)
assert.True(n14.isBlack)
assert.False(n15.isBlack)
assert.True(n33.isBlack)
assert.False(n36.isBlack)
assert.True(n35.isBlack)
assert.True(n37.isBlack)
assert.False(n38.isBlack)
assert.EqualValues([]int{1, 2, 4, 5, 7, 11, 12, 13, 14, 15, 22, 33, 34, 35, 36, 37, 38}, tree.Inorder())
}
|
package main
import (
"context"
"fmt"
"sync"
"time"
"cloud.google.com/go/pubsub"
)
const PROJECT_ID = "chiper-poc"
func main() {
duplication := make(map[string]bool)
go receive_message("my-first-sub", 1*time.Second, duplication)
receive_message("my-first-sub", 15*time.Second, duplication)
}
func receive_message(subID string, sleep time.Duration, duplication map[string]bool) {
fmt.Printf("receiving %s %s %s\n", PROJECT_ID, subID, sleep)
ctx := context.Background()
client, err := pubsub.NewClient(ctx, PROJECT_ID)
if err != nil {
fmt.Errorf("pubsub.NewClient: %v", err)
}
defer client.Close()
var mu sync.Mutex
sub := client.Subscription(subID)
cctx, cancel := context.WithCancel(ctx)
err = sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) {
mu.Lock()
if duplication[string(msg.Data)] {
fmt.Printf("This message is duplicated! %q", string(msg.Data))
}
duplication[string(msg.Data)] = true
mu.Unlock()
fmt.Printf("%s %s %q\n", subID, sleep, string(msg.Data))
time.Sleep(sleep)
msg.Ack()
})
cancel()
if err != nil {
fmt.Errorf("Receive: %v", err)
}
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// AnGwAddress - describes the address of the access network gateway control node
type AnGwAddress struct {
AnGwIpv4Addr string `json:"anGwIpv4Addr,omitempty"`
AnGwIpv6Addr Ipv6Addr `json:"anGwIpv6Addr,omitempty"`
}
|
package main
var colorSets = [...]string{
"#2980B9", // blue
"#C0392B", // red
"#F39C12", // yellow
"#8E44AD", // WISTERIA
"#16A085", // green
"#2C3E50", // black
}
func GetColorValue(i int) string {
if i >= len(colorSets) {
return colorSets[0]
}
return colorSets[i]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.