text stringlengths 11 4.05M |
|---|
/*
Crie uma slice usando make que possa conter todos os estados do Brasil.
Os estados: "Acre", "Alagoas", "Amapá", "Amazonas", "Bahia", "Ceará", "Espírito Santo", "Goiás",
"Maranhão", "Mato Grosso", "Mato Grosso do Sul", "Minas Gerais", "Pará", "Paraíba", "Paraná",
"Pernambuco", "Piauí", "Rio de Janeiro", "Rio Grande do Norte", "Rio Grande do Sul", "Rondônia",
"Roraima", "Santa Catarina", "São Paulo", "Sergipe", "Tocantins"
Demonstre o len e cap da slice.
Demonstre todos os valores da slice sem utilizar range.
*/
package main
import (
"fmt"
)
func main() {
estados := make([]string, 26, 26)
estados = []string{
"Acre", "Alagoas", "Amapá", "Amazonas", "Bahia", "Ceará", "Espírito Santo", "Goiás",
"Maranhão", "Mato Grosso", "Mato Grosso do Sul", "Minas Gerais", "Pará", "Paraíba", "Paraná",
"Pernambuco", "Piauí", "Rio de Janeiro", "Rio Grande do Norte", "Rio Grande do Sul", "Rondônia",
"Roraima", "Santa Catarina", "São Paulo", "Sergipe", "Tocantins",
}
fmt.Printf("Len: %v, Cap: %v\n", len(estados), cap(estados))
for indice := 0; indice < len(estados); indice++ {
fmt.Println("Nome do estado: ", estados[indice])
}
}
|
package bean
import (
"log"
"github.com/astaxie/beego/orm"
"time"
"fmt"
pkgProto "crazyant.com/deadfat/pbd/hero"
)
type ArenaLeaderboard struct {
Uid uint32 `orm:"pk;column(uid)"` // 角色编号
Score uint32 `orm:"column(score)"` // 车间波次
Updated int64 `orm:"column(updated)"` //更新时间(当前时间)
}
func (self *ArenaLeaderboard) TableName() string {
return "arenaLeaderboard"
}
// ---------------------------------------------------------------------------
var arenaLeaderboardCacheList []*ArenaLeaderboard = make([]*ArenaLeaderboard, 0)
//获取数据库中所有的ArenaLeaderboardtiaomu,并且做好排序,准备好排行榜服务
func ArenaLeaBoardLoadAndSort(){
t := time.Now()
//nowsec := t.Unix()
var arenaLeaderboards []ArenaLeaderboard
num , err := defaultOrm.QueryTable(new(ArenaLeaderboard)).Limit(-1).All(&arenaLeaderboards)
if err != nil {
panic(fmt.Sprintf("载入车间战斗排行榜缓存数据时出错: %v", err))
}
if(num == 0){
return
}
for i := int64(0); i < num; i++ {
item := arenaLeaderboards[i]
arenaLeaderboardCacheList = append(arenaLeaderboardCacheList, &item)
}
highToLowsort(arenaLeaderboardCacheList, 0 , len(arenaLeaderboardCacheList) -1)
long := time.Since(t)
log.Println("bean.ArenaLeaBoardLoadAndSort 消耗时间:", long, num)
}
func DoUpdateArenaScore(uid uint32, score uint32) (bool, uint32){
t := time.Now()
var oldIndex int
arenaLeaderboard := ArenaLeaderboard{Uid:uid}
rErr := defaultOrm.Read(&arenaLeaderboard)
cond := orm.NewCondition()
if rErr == nil{
cond1 := cond.And("score__gt", arenaLeaderboard.Score)
cond2 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__lt", arenaLeaderboard.Updated)
cond3 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__eq", arenaLeaderboard.Updated).And("uid__lt", arenaLeaderboard.Uid)
lastCond := cond.AndCond(cond1).OrCond(cond2).OrCond(cond3)
temIndex, qErr := defaultOrm.QueryTable(new(ArenaLeaderboard)).SetCond(lastCond).Count()
checkError("UpdateArenaScore_queryOldIndex,错误:", qErr)
oldIndex = int(temIndex)
oldItem := arenaLeaderboardCacheList[oldIndex]
oldItem.Score = score
oldItem.Updated = time.Now().Unix()
arenaLeaderboard.Score = score;
arenaLeaderboard.Updated = time.Now().Unix()
_, uErr := defaultOrm.Update(&arenaLeaderboard)
checkError("UpdateArenaScore_update,错误:", uErr)
}else if(rErr == orm.ErrNoRows){
arenaLeaderboard.Score = score
arenaLeaderboard.Updated = time.Now().Unix()
oldIndex = len(arenaLeaderboardCacheList)
arenaLeaderboardCacheList = append(arenaLeaderboardCacheList, &arenaLeaderboard)
_, iErr := defaultOrm.Insert(&arenaLeaderboard)
checkError("UpdateArenaScore_insert,错误:", iErr)
}else{
panic(fmt.Sprintf("DoUpdateArenaScore 获取玩家当前分数错误: %v", rErr))
}
cond1 := cond.And("score__gt", arenaLeaderboard.Score)
cond2 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__lt", arenaLeaderboard.Updated)
cond3 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__eq", arenaLeaderboard.Updated).And("uid__lt", arenaLeaderboard.Uid)
lastCond := cond.AndCond(cond1).OrCond(cond2).OrCond(cond3)
newIndex, qErr := defaultOrm.QueryTable(new(ArenaLeaderboard)).SetCond(lastCond).Count()
checkError("UpdateArenaScore_queryNewIndex,错误:", qErr)
if(oldIndex != int(newIndex)){
arenaLeaderboardCacheList[oldIndex], arenaLeaderboardCacheList[newIndex] = arenaLeaderboardCacheList[newIndex], arenaLeaderboardCacheList[oldIndex]
}
long := time.Since(t)
log.Println("bean.DoUpdateArenaScore 消耗时间:", long)
return true, uint32(newIndex+1)
}
//包括start,不包括end
func GetArenaLeaderboard(start int, end int) []*pkgProto.LearderboardInfo {
lastRank := len(arenaLeaderboardCacheList) + 1
if(end > lastRank){
end = lastRank
}
if(start > end){
return make([]*pkgProto.LearderboardInfo, 0)
}
answerList := make([]*pkgProto.LearderboardInfo, 0, end-start)
temArenaLeaderboardList := arenaLeaderboardCacheList[start-1:end-1]
for i, item := range temArenaLeaderboardList{
ch := SimRole{Uid: item.Uid}
defaultOrm.Read(&ch)
name := ch.Name
answerList = append(answerList, &pkgProto.LearderboardInfo{int32(item.Score), uint32(start+i), name})
}
return answerList
}
func GetRank(uid uint32) uint32 {
t := time.Now()
arenaLeaderboard := ArenaLeaderboard{Uid:uid}
rErr := defaultOrm.Read(&arenaLeaderboard)
if rErr == nil{
cond := orm.NewCondition()
cond1 := cond.And("score__gt", arenaLeaderboard.Score)
cond2 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__lt", arenaLeaderboard.Updated)
cond3 := cond.And("score__eq", arenaLeaderboard.Score).And("updated__eq", arenaLeaderboard.Updated).And("uid__lt", arenaLeaderboard.Uid)
lastCond := cond.AndCond(cond1).OrCond(cond2).OrCond(cond3)
log.Println("GetRank 读取出来的数据:", arenaLeaderboard.Score, arenaLeaderboard.Updated)
temIndex, qErr := defaultOrm.QueryTable(new(ArenaLeaderboard)).SetCond(lastCond).Count()
checkError("UpdateArenaScore_queryOldIndex,错误:", qErr)
long := time.Since(t)
log.Println("bean.GetRank 消耗时间:", long)
return uint32(temIndex+1)
}else if(rErr == orm.ErrNoRows){
return 0
}else{
panic(fmt.Sprintf("DoUpdateArenaScore 获取玩家当前分数错误: %v", rErr))
}
}
func GetScore(uid uint32) uint32 {
arenaLeaderboard := ArenaLeaderboard{Uid:uid}
rErr := defaultOrm.Read(&arenaLeaderboard)
if rErr == nil{
return arenaLeaderboard.Score
}else if(rErr == orm.ErrNoRows){
return 0
}else{
panic(fmt.Sprintf("DoUpdateArenaScore 获取玩家当前分数错误: %v", rErr))
}
}
//排序:积分从高到底,积分相同,updated小的靠前
func highToLowsort(arr []*ArenaLeaderboard, start int, end int) {
var (
key *ArenaLeaderboard = arr[start]
low int = start
high int = end
)
for {
for low < high {
if arr[high].Score > key.Score ||
(arr[high].Score == key.Score && arr[high].Updated < key.Updated) ||
(arr[high].Score == key.Score && arr[high].Updated < key.Updated && arr[high].Uid < key.Uid){
arr[low] = arr[high]
break
}
high--
}
for low < high {
if arr[low].Score < key.Score ||
(arr[low].Score == key.Score && arr[low].Updated > key.Updated) ||
(arr[low].Score == key.Score && arr[low].Updated > key.Updated && arr[low].Uid > key.Uid){
arr[high] = arr[low]
break
}
low++
}
if low >= high {
arr[low] = key
break
}
}
if low-1 > start {
highToLowsort(arr, start, low-1)
}
if high+1 < end {
highToLowsort(arr, high+1, end)
}
} |
package routes
import (
"net/http"
"github.com/gorilla/mux"
"github.com/sylus/openparl/api"
"github.com/sylus/openparl/auth"
"github.com/urfave/negroni"
)
// NewRoutes builds the routes for the api
func NewRoutes(api *api.API) *mux.Router {
mux := mux.NewRouter()
// client static files
mux.Handle("/", http.FileServer(http.Dir("./client/dist/"))).Methods("GET")
mux.PathPrefix("/static/js").Handler(http.StripPrefix("/static/js/", http.FileServer(http.Dir("./client/dist/static/js/"))))
// api
apiRoute := mux.PathPrefix("/api").Subrouter()
// users
users := apiRoute.PathPrefix("/user").Subrouter()
users.HandleFunc("/signup", api.UserSignup).Methods("POST")
users.HandleFunc("/login", api.UserLogin).Methods("POST")
users.Handle("/info", negroni.New(
negroni.HandlerFunc(auth.JwtMiddleware.HandlerWithNext),
negroni.Wrap(http.HandlerFunc(api.UserInfo)),
))
// bills
bills := apiRoute.PathPrefix("/bill").Subrouter()
bills.HandleFunc("/random", api.Bill).Methods("GET")
bills.Handle("/protected/random", negroni.New(
negroni.HandlerFunc(auth.JwtMiddleware.HandlerWithNext),
negroni.Wrap(http.HandlerFunc(api.PrivateBill)),
))
return mux
}
|
package model
import (
"github.com/ele828/higo/common"
"github.com/ele828/higo/config"
. "github.com/ele828/higo/error"
"github.com/jinzhu/gorm"
"log"
"math"
"strconv"
"time"
)
//------------------- ORM MODEL ---------------------//
type Article struct {
ID int
Title string `sql:"size:255; not null;"`
Content string `sql:"type:text; not null;"`
Link string `sql:"size:255; not null;"`
Topic Topic // Foreign Key
TopicId int // Name of Foreign Key
Tag []Tag `gorm:"many2many:article_tags;"`
Comment []Comment
ReadCount int `sql:"not null; default:0;"`
ThumbCount int `sql:"not null; default:0;"`
CommentCount int `sql:"not null; default:0;"`
CreateAt time.Time `json:"-"; sql:"not null; DEFAULT:current_timestamp;"`
Time string `sql:"-"`
}
//----------------- JSON OUTPUT --------------------//
type ArticleItem struct {
ID int
Title string
Topic string
TopicId int
ReadCount int
ThumbCount int
Time string
}
// Write an article
func (a *Article) Write() error {
db := DB.Create(a)
if db.Error != nil {
return db.Error
}
return nil
}
// Read an article from storage
func (a *Article) Read(id string) error {
// id validator
if id, err := strconv.Atoi(id); err != nil || id < 0 {
return ErrGivenArticleId
}
q := DB.First(a, id)
if q.Error != nil {
// article not found error
if q.Error == gorm.RecordNotFound {
log.Print(ErrArticleNotFound.Error())
return ErrArticleNotFound
}
return q.Error
}
a.Time = common.FormatTime(a.CreateAt)
err := ReadComments(a)
if err != nil {
return err
}
return nil
}
// Insert a comment into an article
func (a *Article) WriteComment(c Comment) error {
a.Comment = append(a.Comment, c)
a.CommentCount = a.CommentCount + 1
q := DB.Save(a)
if q.Error != nil {
return q.Error
}
return nil
}
type List struct{}
// Get a list of articles
func (l *List) GetList(page string) ([]ArticleItem, error) {
pageSize := config.PageSize
p, err := strconv.Atoi(page)
if err != nil {
return nil, ErrGivenPageNumber
}
start := (p - 1) * pageSize
if start < 0 {
return nil, ErrGivenPageNumber
}
var articles = []Article{}
q := DB.Order("id desc").
Limit(pageSize).
Offset(start).
Find(&articles)
if q.Error != nil {
return nil, q.Error
}
// if articles not found
if len(articles) == 0 {
return nil, ErrEmptyList
}
var items []ArticleItem
for _, v := range articles {
if err := ReadTopic(&v); err != nil {
return nil, err
}
item := ArticleItem{
ID: v.ID,
Title: v.Title,
Topic: v.Topic.Name,
TopicId: v.TopicId,
ReadCount: v.ReadCount,
ThumbCount: v.ThumbCount,
Time: common.FormatDate(v.CreateAt),
}
items = append(items, item)
}
return items, nil
}
// Get total number of article list.
func (l *List) GetListPageCount() (int, error) {
var count int
q := DB.Table("articles").Count(&count)
if q.Error != nil {
return 0, q.Error
}
return int(math.Ceil(float64(count) / float64(config.PageSize))), nil
}
|
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/google/go-github/v32/github"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
// issueCmd represents the issue command
var issueCmd = &cobra.Command{
Use: "issue",
Short: "github action issue and issue_comment operation",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
githubEventPath := os.Getenv("GITHUB_EVENT_PATH")
githubEventName := os.Getenv("GITHUB_EVENT_NAME")
githubToken := os.Getenv("GITHUB_TOKEN")
if githubEventPath == "" {
fmt.Println("GITHUB_EVENT_PATH is null.")
return
}
var ghIssueComment GithubIssueComment
content, err := ioutil.ReadFile(githubEventPath)
if err != nil {
panic(err)
}
err = json.Unmarshal(content, &ghIssueComment)
if err != nil {
panic(err)
}
SetIssueLabel(githubToken,
ghIssueComment.Repository.Owner.Login,
ghIssueComment.Repository.Name,
ghIssueComment.Issue.ID,
ghIssueComment.Comment.Body,
)
fmt.Printf("event: %s, file: %s\n", githubEventName, githubEventPath)
fmt.Printf("action: %s, issue title: %s, issue body: %s, issue user: %s\n",
ghIssueComment.Action,
ghIssueComment.Issue.Title,
ghIssueComment.Issue.Body,
ghIssueComment.Issue.User.Login,
)
fmt.Printf("comment user: %s, comment body: %s\n",
ghIssueComment.Comment.User.Login,
ghIssueComment.Comment.Body,
)
},
}
func issueCmdInit() {
rootCmd.AddCommand(issueCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:// issueCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// issueCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
type GithubIssueComment struct {
Action string `json:"action"`
Comment struct {
AuthorAssociation string `json:"author_association"`
Body string `json:"body"`
CreatedAt string `json:"created_at"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
IssueURL string `json:"issue_url"`
NodeID string `json:"node_id"`
UpdatedAt string `json:"updated_at"`
URL string `json:"url"`
User struct {
AvatarURL string `json:"avatar_url"`
EventsURL string `json:"events_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
GravatarID string `json:"gravatar_id"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
Login string `json:"login"`
NodeID string `json:"node_id"`
OrganizationsURL string `json:"organizations_url"`
ReceivedEventsURL string `json:"received_events_url"`
ReposURL string `json:"repos_url"`
SiteAdmin bool `json:"site_admin"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
Type string `json:"type"`
URL string `json:"url"`
} `json:"user"`
} `json:"comment"`
Issue struct {
ActiveLockReason interface{} `json:"active_lock_reason"`
Assignee interface{} `json:"assignee"`
Assignees []interface{} `json:"assignees"`
AuthorAssociation string `json:"author_association"`
Body string `json:"body"`
ClosedAt interface{} `json:"closed_at"`
Comments int64 `json:"comments"`
CommentsURL string `json:"comments_url"`
CreatedAt string `json:"created_at"`
EventsURL string `json:"events_url"`
HTMLURL string `json:"html_url"`
ID int `json:"id"`
Labels []interface{} `json:"labels"`
LabelsURL string `json:"labels_url"`
Locked bool `json:"locked"`
Milestone interface{} `json:"milestone"`
NodeID string `json:"node_id"`
Number int64 `json:"number"`
RepositoryURL string `json:"repository_url"`
State string `json:"state"`
Title string `json:"title"`
UpdatedAt string `json:"updated_at"`
URL string `json:"url"`
User struct {
AvatarURL string `json:"avatar_url"`
EventsURL string `json:"events_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
GravatarID string `json:"gravatar_id"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
Login string `json:"login"`
NodeID string `json:"node_id"`
OrganizationsURL string `json:"organizations_url"`
ReceivedEventsURL string `json:"received_events_url"`
ReposURL string `json:"repos_url"`
SiteAdmin bool `json:"site_admin"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
Type string `json:"type"`
URL string `json:"url"`
} `json:"user"`
} `json:"issue"`
Organization struct {
AvatarURL string `json:"avatar_url"`
Description string `json:"description"`
EventsURL string `json:"events_url"`
HooksURL string `json:"hooks_url"`
ID int64 `json:"id"`
IssuesURL string `json:"issues_url"`
Login string `json:"login"`
MembersURL string `json:"members_url"`
NodeID string `json:"node_id"`
PublicMembersURL string `json:"public_members_url"`
ReposURL string `json:"repos_url"`
URL string `json:"url"`
} `json:"organization"`
Repository struct {
ArchiveURL string `json:"archive_url"`
Archived bool `json:"archived"`
AssigneesURL string `json:"assignees_url"`
BlobsURL string `json:"blobs_url"`
BranchesURL string `json:"branches_url"`
CloneURL string `json:"clone_url"`
CollaboratorsURL string `json:"collaborators_url"`
CommentsURL string `json:"comments_url"`
CommitsURL string `json:"commits_url"`
CompareURL string `json:"compare_url"`
ContentsURL string `json:"contents_url"`
ContributorsURL string `json:"contributors_url"`
CreatedAt string `json:"created_at"`
DefaultBranch string `json:"default_branch"`
DeploymentsURL string `json:"deployments_url"`
Description interface{} `json:"description"`
Disabled bool `json:"disabled"`
DownloadsURL string `json:"downloads_url"`
EventsURL string `json:"events_url"`
Fork bool `json:"fork"`
Forks int64 `json:"forks"`
ForksCount int64 `json:"forks_count"`
ForksURL string `json:"forks_url"`
FullName string `json:"full_name"`
GitCommitsURL string `json:"git_commits_url"`
GitRefsURL string `json:"git_refs_url"`
GitTagsURL string `json:"git_tags_url"`
GitURL string `json:"git_url"`
HasDownloads bool `json:"has_downloads"`
HasIssues bool `json:"has_issues"`
HasPages bool `json:"has_pages"`
HasProjects bool `json:"has_projects"`
HasWiki bool `json:"has_wiki"`
Homepage interface{} `json:"homepage"`
HooksURL string `json:"hooks_url"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
IssueCommentURL string `json:"issue_comment_url"`
IssueEventsURL string `json:"issue_events_url"`
IssuesURL string `json:"issues_url"`
KeysURL string `json:"keys_url"`
LabelsURL string `json:"labels_url"`
Language interface{} `json:"language"`
LanguagesURL string `json:"languages_url"`
License struct {
Key string `json:"key"`
Name string `json:"name"`
NodeID string `json:"node_id"`
SpdxID string `json:"spdx_id"`
URL string `json:"url"`
} `json:"license"`
MergesURL string `json:"merges_url"`
MilestonesURL string `json:"milestones_url"`
MirrorURL interface{} `json:"mirror_url"`
Name string `json:"name"`
NodeID string `json:"node_id"`
NotificationsURL string `json:"notifications_url"`
OpenIssues int64 `json:"open_issues"`
OpenIssuesCount int64 `json:"open_issues_count"`
Owner struct {
AvatarURL string `json:"avatar_url"`
EventsURL string `json:"events_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
GravatarID string `json:"gravatar_id"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
Login string `json:"login"`
NodeID string `json:"node_id"`
OrganizationsURL string `json:"organizations_url"`
ReceivedEventsURL string `json:"received_events_url"`
ReposURL string `json:"repos_url"`
SiteAdmin bool `json:"site_admin"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
Type string `json:"type"`
URL string `json:"url"`
} `json:"owner"`
Private bool `json:"private"`
PullsURL string `json:"pulls_url"`
PushedAt string `json:"pushed_at"`
ReleasesURL string `json:"releases_url"`
Size int64 `json:"size"`
SSHURL string `json:"ssh_url"`
StargazersCount int64 `json:"stargazers_count"`
StargazersURL string `json:"stargazers_url"`
StatusesURL string `json:"statuses_url"`
SubscribersURL string `json:"subscribers_url"`
SubscriptionURL string `json:"subscription_url"`
SvnURL string `json:"svn_url"`
TagsURL string `json:"tags_url"`
TeamsURL string `json:"teams_url"`
TreesURL string `json:"trees_url"`
UpdatedAt string `json:"updated_at"`
URL string `json:"url"`
Watchers int64 `json:"watchers"`
WatchersCount int64 `json:"watchers_count"`
} `json:"repository"`
Sender struct {
AvatarURL string `json:"avatar_url"`
EventsURL string `json:"events_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
GravatarID string `json:"gravatar_id"`
HTMLURL string `json:"html_url"`
ID int64 `json:"id"`
Login string `json:"login"`
NodeID string `json:"node_id"`
OrganizationsURL string `json:"organizations_url"`
ReceivedEventsURL string `json:"received_events_url"`
ReposURL string `json:"repos_url"`
SiteAdmin bool `json:"site_admin"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
Type string `json:"type"`
URL string `json:"url"`
} `json:"sender"`
}
func SetIssueLabel(accessToken, owner, repo string, issueNumber int, label string) {
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: accessToken},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
labels, resp, err := client.Issues.AddLabelsToIssue(ctx, owner, repo, issueNumber, []string{label})
fmt.Println("resp", labels, resp, err)
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
)
/*
This application uses some of the previous concepts to calculate the size of a directory or a bunch of directories
given as input.
This version creates a new go routine for each call to walkDir. Since we do not know the number of calls done to
walkDir func the code uses a sync.WaitGroup to check the number of calls to walkDir that are still active and
a closer go routine to close the fileSizes channel when the counter drops to zero.
*/
// sema is a counting semaphore for limiting concurrency in dirents.
var sema = make(chan struct{}, 50)
func main() {
var verbose = flag.Bool("v", false, "show verbose progress messages")
var start, end time.Time
// Determine the initial directories.
flag.Parse()
roots := flag.Args()
if len(roots) == 0 {
roots = []string{"."}
}
/*
Traverse the file tree with the walkDir func in a separate go routine. fileSizes is an
unbuffered channel that is incremented on the way. When the recursion is done the channel
is closed.
*/
fileSizes := make(chan int64, 100)
// wait group is a way to know when the walkDir has finished its job. When the n is 0 it is the end. n is incremented
// by Add() method and decremented by Done() method
var n sync.WaitGroup
start = time.Now()
for _, root := range roots {
n.Add(1)
go walkDir(root, &n, fileSizes)
}
// this go routine waits till the n counter is 0, then close the fileSizes channel
go func() {
n.Wait()
close(fileSizes)
}()
// Print the results periodically.
var tick <-chan time.Time
if *verbose {
tick = time.Tick(500 * time.Millisecond)
}
// Print the results.
var nfiles, nbytes int64
loop:
for {
select {
// the fileSizes is read by the main go routine until it is closed. It increments the total of
// bytes and the number of file. The for ends (thanks to the range statement) when the channel is closed.
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
// print the number of files and the space along the way
printDiskUsage(nfiles, nbytes)
}
}
end = time.Now()
// print the number of final files and final space
printDiskUsage(nfiles, nbytes)
execTime := end.Sub(start)
fmt.Printf("execution time %d ms\n", execTime.Milliseconds())
}
func printDiskUsage(nfiles, nbytes int64) {
fmt.Printf("%d files %.1f GB (%d bytes)\n", nfiles, float64(nbytes)/1e9, nbytes)
}
// walkDir recursively walks the file tree rooted at dir
// and sends the size of each found file on fileSizes.
func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
// at the end of walkDir n is decremented by one
defer n.Done()
for _, entry := range dirents(dir) {
if entry.IsDir() {
n.Add(1)
subdir := filepath.Join(dir, entry.Name())
walkDir(subdir, n, fileSizes)
} else {
fileSizes <- entry.Size()
}
}
}
// dirents returns the entries of directory dir.
func dirents(dir string) []os.FileInfo {
// semaphore to prevent to open too many files (set the channel semaphore at the initial section of the code)
sema <- struct{}{} // acquire token
defer func() { <-sema }() // release token
entries, err := ioutil.ReadDir(dir)
if err != nil {
fmt.Fprintf(os.Stderr, "du1: %v\n", err)
return nil
}
return entries
}
|
package loader
import (
"encoding/csv"
"io"
"log"
)
type ExperienceRow struct {
CompanyName string `json:"company_name"`
RoleTaken string `json:"role_taken"`
YearStarted string `json:"year_started"`
YearEnded string `json:"year_ended"`
MonthStarted string `json:"month_started"`
MonthEnded string `json:"month_ended"`
}
type ProjectTakenRow struct {
CompanyName string `json:"company_name"`
ProjectName string `json:"project_name"`
ProjectDescription string `json:"project_description"`
ToolsUsed string `json:"tools_used"`
ProjectYear string `json:"project_year"`
ProjectRole string `json:"project_role"`
}
type Technologies struct {
Cloud string `json:"cloud"`
CloudServices string `json:"cloud_services"`
Languages string `json:"languages"`
CICD string `json:"cicd"`
Monitoring string `json:"monitoring"`
Tech string `json:"technologies"`
}
type All struct {
experienceRow ExperienceRow
projecttakenRow ProjectTakenRow
technologies Technologies
}
func LoadTechnologies(r io.Reader) *[]*Technologies {
reader := csv.NewReader(r)
ret := make([]*Technologies, 0, 0)
for {
row, err := reader.Read()
if err == io.EOF {
log.Println("End of file Technologies")
break
} else if err != nil {
log.Println(err)
break
}
technologies_used := &Technologies{
Cloud: row[0],
CloudServices: row[1],
Languages: row[2],
CICD: row[3],
Monitoring: row[4],
Tech: row[5],
}
if err != nil {
log.Fatalln(err)
}
ret = append(ret, technologies_used)
}
return &ret
}
func LoadProjects(r io.Reader) *[]*ProjectTakenRow {
reader := csv.NewReader(r)
ret := make([]*ProjectTakenRow, 0, 0)
for {
row, err := reader.Read()
if err == io.EOF {
log.Println("End of file for Projects")
break
} else if err != nil {
log.Println(err)
break
}
projectTaken := &ProjectTakenRow{
CompanyName: row[0],
ProjectName: row[1],
ProjectDescription: row[2],
ToolsUsed: row[3],
ProjectYear: row[4],
ProjectRole: row[5],
}
if err != nil {
log.Fatalln(err)
}
ret = append(ret, projectTaken)
}
return &ret
}
func LoadExperience(r io.Reader) *[]*ExperienceRow {
reader := csv.NewReader(r)
ret := make([]*ExperienceRow, 0, 0)
for {
row, err := reader.Read()
if err == io.EOF {
log.Println("End of file Experience")
break
} else if err != nil {
log.Println(err)
break
}
experience := &ExperienceRow{
CompanyName: row[0],
RoleTaken: row[1],
YearStarted: row[2],
YearEnded: row[3],
MonthStarted: row[4],
MonthEnded: row[5],
}
if err != nil {
log.Fatalln(err)
}
ret = append(ret, experience)
}
return &ret
}
|
package installer
import (
installertypes "github.com/openshift/installer/pkg/types"
vspheretypes "github.com/openshift/installer/pkg/types/vsphere"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi/config"
)
// NewInstallConfig - create a install-config since the struct already
// contains all the information we need
func NewInstallConfig(conf *config.Config) *installertypes.InstallConfig {
ic := &installertypes.InstallConfig{}
ic.APIVersion = "v1"
ic.BaseDomain = conf.Require("basedomain")
ic.SSHKey = conf.Require("sshkey")
ic.PullSecret = conf.Require("pullsecret")
ic.ObjectMeta.Name = conf.Require("clustername")
ic.Platform.VSphere = &vspheretypes.Platform{
Datacenter: conf.Require("datacenter"),
DefaultDatastore: conf.Require("datastore"),
Folder: conf.Require("folder"),
Cluster: conf.Require("cluster"),
Network: conf.Require("network"),
}
return ic
}
|
package xhlog
import (
"fmt"
"testing"
"time"
)
func TestInit(t *testing.T) {
logConf := LoggerConf{
Dir: "Z:\\Goland\\src\\gitee.com\\yongxue\\magicbox\\main\\logs",
Prefix: "test",
Level: "info",
RotateSize: 1 * 1024 * 1024,
}
if err := Init(&logConf); err != nil {
fmt.Println("dh log init error: " + err.Error())
return
}
//dhlog.LoggerExp.Error("error error error error error error error error error error error error error")
go func() {
for {
LoggerExp.Error("error error error error error error error error error error error error error")
time.Sleep(10 * time.Millisecond)
}
}()
go func() {
for {
LoggerExp.Warn("warn warn warn warn warn warn warn warn warn warn warn warn warn")
time.Sleep(10 * time.Millisecond)
}
}()
go func() {
for {
LoggerExp.Info("info info info info info info info info info info info info info")
time.Sleep(10 * time.Millisecond)
}
}()
go func() {
for {
LoggerExp.Debug("debug debug debug debug debug debug debug debug debug debug debug debug debug")
time.Sleep(10 * time.Millisecond)
}
}()
time.Sleep(30 * time.Second)
}
func BenchmarkLogger_Info(b *testing.B) {
logConf := LoggerConf{
Dir: "/Users/hehui/GolandProjects/src/github.com/cyongxue/magicbox/xhiris/xhlog/logs",
Prefix: "test",
Level: "info",
RotateSize: 1 * 1024 * 1024,
}
if err := Init(&logConf); err != nil {
fmt.Println("dh log init error: " + err.Error())
return
}
for i := 0; i < b.N; i++ {
LoggerExp.Info("info info info info info info info info info info info info infoinfo info info info info info info info info info info info info")
}
}
|
package websocket
type BroadcastInfo struct {
userIds []int
message interface{}
}
|
package flag
import (
"errors"
goflag "flag"
"strconv"
"strings"
)
// String map
type stringMapValue_t map[string]string
// NewStringMapValue returns a new go.Value from the specifed map.
// If the specified map is nil, a new map[string]string is created
func NewStringMapValue(m *map[string]string) goflag.Value {
if *m == nil {
*m = make(map[string]string)
}
return (*stringMapValue_t)(m)
}
func (m *stringMapValue_t) Set(s string) error {
var k, v string
if i := strings.IndexRune(s, '='); i > 0 {
k = s[0:i]
v = s[i+1:]
}
if k != "" && v != "" {
(*m)[k] = v
} else {
// invalid value "{VALUE}" for flag -{FLAG}: {ERROR_MSG}
return errors.New("Key and value must both be non-empty")
}
return nil
}
func (m *stringMapValue_t) String() string { return "" }
// Int map
type intMapValue_t map[string]int
// NewIntMapValue returns a new go.Value from the specifed map.
// If the specified map is nil, a new map[string]int is created
func NewIntMapValue(m *map[string]int) goflag.Value {
if *m == nil {
*m = make(map[string]int)
}
return (*intMapValue_t)(m)
}
func (m *intMapValue_t) Set(s string) error {
var k, v string
if i := strings.IndexRune(s, '='); i > 0 {
k = s[0:i]
v = s[i+1:]
}
if k != "" && v != "" {
iv, err := strconv.ParseInt(v, 0, 64)
if err != nil {
return err
}
(*m)[k] = int(iv)
} else {
// invalid value "{VALUE}" for flag -{FLAG}: {ERROR_MSG}
return errors.New("Key and value must both be non-empty")
}
return nil
}
func (m *intMapValue_t) String() string { return "" }
|
package utils
import (
"fmt"
"net/url"
"strconv"
"strings"
"unicode"
"github.com/valyala/fasthttp"
)
// IsStringAbsURL checks a string can be parsed as a URL and that is IsAbs and if it can't it returns an error
// describing why.
func IsStringAbsURL(input string) (err error) {
parsedURL, err := url.ParseRequestURI(input)
if err != nil {
return fmt.Errorf("could not parse '%s' as a URL", input)
}
if !parsedURL.IsAbs() {
return fmt.Errorf("the url '%s' is not absolute because it doesn't start with a scheme like 'http://' or 'https://'", input)
}
return nil
}
// IsStringAlphaNumeric returns false if any rune in the string is not alpha-numeric.
func IsStringAlphaNumeric(input string) bool {
for _, r := range input {
if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
return false
}
}
return true
}
// IsStringInSlice checks if a single string is in a slice of strings.
func IsStringInSlice(needle string, haystack []string) (inSlice bool) {
for _, b := range haystack {
if b == needle {
return true
}
}
return false
}
// IsStringInSliceF checks if a single string is in a slice of strings using the provided isEqual func.
func IsStringInSliceF(needle string, haystack []string, isEqual func(needle, item string) bool) (inSlice bool) {
for _, b := range haystack {
if isEqual(needle, b) {
return true
}
}
return false
}
// IsStringInSliceFold checks if a single string is in a slice of strings but uses strings.EqualFold to compare them.
func IsStringInSliceFold(needle string, haystack []string) (inSlice bool) {
for _, b := range haystack {
if strings.EqualFold(b, needle) {
return true
}
}
return false
}
// IsStringInSliceContains checks if a single string is in an array of strings.
func IsStringInSliceContains(needle string, haystack []string) (inSlice bool) {
for _, b := range haystack {
if strings.Contains(needle, b) {
return true
}
}
return false
}
// IsStringSliceContainsAll checks if the haystack contains all strings in the needles.
func IsStringSliceContainsAll(needles []string, haystack []string) (inSlice bool) {
for _, n := range needles {
if !IsStringInSlice(n, haystack) {
return false
}
}
return true
}
// IsStringSliceContainsAny checks if the haystack contains any of the strings in the needles.
func IsStringSliceContainsAny(needles []string, haystack []string) (inSlice bool) {
return IsStringSliceContainsAnyF(needles, haystack, IsStringInSlice)
}
// IsStringSliceContainsAnyF checks if the haystack contains any of the strings in the needles using the isInSlice func.
func IsStringSliceContainsAnyF(needles []string, haystack []string, isInSlice func(needle string, haystack []string) bool) (inSlice bool) {
for _, n := range needles {
if isInSlice(n, haystack) {
return true
}
}
return false
}
// SliceString splits a string s into an array with each item being a max of int d
// d = denominator, n = numerator, q = quotient, r = remainder.
func SliceString(s string, d int) (array []string) {
n := len(s)
q := n / d
r := n % d
for i := 0; i < q; i++ {
array = append(array, s[i*d:i*d+d])
if i+1 == q && r != 0 {
array = append(array, s[i*d+d:])
}
}
return
}
func isStringSlicesDifferent(a, b []string, method func(s string, b []string) bool) (different bool) {
if len(a) != len(b) {
return true
}
for _, s := range a {
if !method(s, b) {
return true
}
}
return false
}
// IsStringSlicesDifferent checks two slices of strings and on the first occurrence of a string item not existing in the
// other slice returns true, otherwise returns false.
func IsStringSlicesDifferent(a, b []string) (different bool) {
return isStringSlicesDifferent(a, b, IsStringInSlice)
}
// IsStringSlicesDifferentFold checks two slices of strings and on the first occurrence of a string item not existing in
// the other slice (case insensitive) returns true, otherwise returns false.
func IsStringSlicesDifferentFold(a, b []string) (different bool) {
return isStringSlicesDifferent(a, b, IsStringInSliceFold)
}
// IsURLInSlice returns true if the needle url.URL is in the []url.URL haystack.
func IsURLInSlice(needle url.URL, haystack []url.URL) (has bool) {
for i := 0; i < len(haystack); i++ {
if strings.EqualFold(needle.String(), haystack[i].String()) {
return true
}
}
return false
}
// StringSliceFromURLs returns a []string from a []url.URL.
func StringSliceFromURLs(urls []url.URL) []string {
result := make([]string, len(urls))
for i := 0; i < len(urls); i++ {
result[i] = urls[i].String()
}
return result
}
// URLsFromStringSlice returns a []url.URL from a []string.
func URLsFromStringSlice(urls []string) []url.URL {
var result []url.URL
for i := 0; i < len(urls); i++ {
u, err := url.Parse(urls[i])
if err != nil {
continue
}
result = append(result, *u)
}
return result
}
// OriginFromURL returns an origin url.URL given another url.URL.
func OriginFromURL(u *url.URL) (origin *url.URL) {
return &url.URL{
Scheme: u.Scheme,
Host: u.Host,
}
}
// StringSlicesDelta takes a before and after []string and compares them returning a added and removed []string.
func StringSlicesDelta(before, after []string) (added, removed []string) {
for _, s := range before {
if !IsStringInSlice(s, after) {
removed = append(removed, s)
}
}
for _, s := range after {
if !IsStringInSlice(s, before) {
added = append(added, s)
}
}
return added, removed
}
// StringHTMLEscape escapes chars for a HTML body.
func StringHTMLEscape(input string) (output string) {
return htmlEscaper.Replace(input)
}
// StringJoinDelimitedEscaped joins a string with a specified rune delimiter after escaping any instance of that string
// in the string slice. Used with StringSplitDelimitedEscaped.
func StringJoinDelimitedEscaped(value []string, delimiter rune) string {
escaped := make([]string, len(value))
for k, v := range value {
escaped[k] = strings.ReplaceAll(v, string(delimiter), "\\"+string(delimiter))
}
return strings.Join(escaped, string(delimiter))
}
// StringSplitDelimitedEscaped splits a string with a specified rune delimiter after unescaping any instance of that
// string in the string slice that has been escaped. Used with StringJoinDelimitedEscaped.
func StringSplitDelimitedEscaped(value string, delimiter rune) (out []string) {
var escape bool
split := strings.FieldsFunc(value, func(r rune) bool {
if r == '\\' {
escape = !escape
} else if escape && r != delimiter {
escape = false
}
return !escape && r == delimiter
})
for k, v := range split {
split[k] = strings.ReplaceAll(v, "\\"+string(delimiter), string(delimiter))
}
return split
}
// JoinAndCanonicalizeHeaders join header strings by a given sep.
func JoinAndCanonicalizeHeaders(sep []byte, headers ...string) (joined []byte) {
for i, header := range headers {
if i != 0 {
joined = append(joined, sep...)
}
joined = fasthttp.AppendNormalizedHeaderKey(joined, header)
}
return joined
}
// IsURLHostComponent returns true if the provided url.URL that was parsed from a string to a url.URL via url.Parse is
// just a hostname. This is needed because of the way this function parses such strings.
func IsURLHostComponent(u url.URL) (isHostComponent bool) {
return u.Path != "" && u.Scheme == "" && u.Host == "" && u.RawPath == "" && u.Opaque == "" &&
u.RawQuery == "" && u.Fragment == "" && u.RawFragment == ""
}
// IsURLHostComponentWithPort returns true if the provided url.URL that was parsed from a string to a url.URL via
// url.Parse is just a hostname with a port. This is needed because of the way this function parses such strings.
func IsURLHostComponentWithPort(u url.URL) (isHostComponentWithPort bool) {
if u.Opaque != "" && u.Scheme != "" && u.Host == "" && u.Path == "" && u.RawPath == "" &&
u.RawQuery == "" && u.Fragment == "" && u.RawFragment == "" {
_, err := strconv.Atoi(u.Opaque)
return err == nil
}
return false
}
|
package main
type JSONResponse struct {
Games []JSONGameResponse `json:"games"`
SummonerId uint32
}
type JSONGameResponse struct {
FellowPlayers []JSONPlayerResponse
Stats JSONGameStatsResponse
GameId uint64
CreateDate uint64
TeamId uint32
ChampionId uint32
GameMode string
GameType string
GameSubType string
}
type JSONGameStatsResponse struct {
Assists uint32
ChampionsKilled uint32
DamageDealtPlayer uint32
Gold uint32
GoldEarned uint32
GoldSpent uint32
Level uint32
MagicDamageDealtPlayer uint32
MinionsKilled uint32
MinionsDenied uint32
NeutralMinionsKilled uint32
NeutralMinionsKilledEnemyJungle uint32
NeutralMinionsKilledYourJungle uint32
NumDeaths uint32
NumItemsBought uint32
PhysicalDamageDealtPlayer uint32
SightWardsBought uint32
SuperMonstersKilled uint32
TimePlayed uint32
TotalDamageDealt uint32
TotalDamageDealtToChampions uint32
TotalDamageTaken uint32
TurretsKilled uint32
VisionWardsBought uint32
WardsKilled uint32
WardPlaced uint32
Win bool
}
type JSONPlayerResponse struct {
SummonerId uint32
TeamId uint32
ChampionId uint32
}
type JSONLeagueResponse struct {
Queue string
ParticipantId string
Entries []JSONLeagueEntryResponse
Tier string
}
type JSONLeagueEntryResponse struct {
LeaguePoints uint32
Division string
PlayerOrTeamId string
}
|
// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package handlers
import (
"net/http"
"time"
)
// MakeNotifierWrapper wraps a http.HandlerFunc in an interceptor to pass to HTTPNotifier
func MakeNotifierWrapper(next http.HandlerFunc, notifiers []HTTPNotifier) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
then := time.Now()
writer := newWriteInterceptor(w)
next(&writer, r)
url := r.URL.String()
for _, notifier := range notifiers {
notifier.Notify(r.Method, url, url, writer.Status(), time.Since(then))
}
}
}
func newWriteInterceptor(w http.ResponseWriter) writeInterceptor {
return writeInterceptor{
w: w,
}
}
type writeInterceptor struct {
CapturedStatusCode int
w http.ResponseWriter
}
func (c *writeInterceptor) Status() int {
if c.CapturedStatusCode == 0 {
return http.StatusOK
}
return c.CapturedStatusCode
}
func (c *writeInterceptor) Header() http.Header {
return c.w.Header()
}
func (c *writeInterceptor) Write(data []byte) (int, error) {
return c.w.Write(data)
}
func (c *writeInterceptor) WriteHeader(code int) {
c.CapturedStatusCode = code
c.w.WriteHeader(code)
}
|
package cfg
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewConfig(t *testing.T) {
cfg := NewConfig("test", "best-project", "docker-compose", "")
assert.Equal(t, cfg.Version, "test")
}
func TestWriteFailed(t *testing.T) {
cfg := NewConfig("test", "best-project", "docker-compose", "")
err := cfg.Write("")
assert.NotNil(t, err)
assert.Contains(t, "nothing to write to", err.Error())
}
func TestWriteToPath(t *testing.T) {
configPath := "/test-config.toml"
cfg := NewConfig("test", "best-project", "docker-compose", "")
cwd, err := os.Getwd()
assert.Nil(t, err)
absPath := filepath.Join(cwd, configPath)
defer os.RemoveAll(absPath)
err = cfg.Write(absPath)
assert.Nil(t, err)
writtenConfigContents, err := ioutil.ReadFile(absPath)
assert.Nil(t, err)
assert.Contains(t, string(writtenConfigContents), "best-project")
assert.Contains(t, string(writtenConfigContents), "docker-compose")
}
func TestWriteToWritersAndFile(t *testing.T) {
configPath := "/test-config.toml"
cfg := NewConfig("test", "best-project", "docker-compose", "")
cwd, err := os.Getwd()
assert.Nil(t, err)
absPath := filepath.Join(cwd, configPath)
defer os.RemoveAll(absPath)
buffer1 := bytes.NewBuffer(nil)
buffer2 := bytes.NewBuffer(nil)
err = cfg.Write(absPath, buffer1, buffer2)
assert.Nil(t, err)
writtenConfigContents, err := ioutil.ReadFile(absPath)
assert.Nil(t, err)
assert.Contains(t, string(writtenConfigContents), "best-project")
assert.Contains(t, string(writtenConfigContents), "docker-compose")
assert.Contains(t, buffer1.String(), "best-project")
assert.Contains(t, buffer2.String(), "best-project")
}
func TestConfigGetRemote(t *testing.T) {
config := &Config{Remotes: make([]*RemoteVPS, 0)}
testRemote := &RemoteVPS{
Name: "test",
IP: "12343",
User: "bobheadxi",
PEM: "/some/pem/file",
SSHPort: "22",
Daemon: &DaemonConfig{
Port: "8080",
},
}
config.AddRemote(testRemote)
remote, found := config.GetRemote("test")
assert.True(t, found)
assert.Equal(t, testRemote, remote)
_, found = config.GetRemote("what")
assert.False(t, found)
}
func TestConfigRemoveRemote(t *testing.T) {
config := &Config{Remotes: make([]*RemoteVPS, 0)}
testRemote := &RemoteVPS{
Name: "test",
IP: "12343",
User: "bobheadxi",
PEM: "/some/pem/file",
SSHPort: "22",
Daemon: &DaemonConfig{
Port: "8080",
},
}
config.AddRemote(testRemote)
config.AddRemote(&RemoteVPS{
Name: "test2",
IP: "12343",
User: "bobheadxi234",
PEM: "/some/pem/file234",
SSHPort: "222",
Daemon: &DaemonConfig{
Port: "80801",
},
})
removed := config.RemoveRemote("test2")
assert.True(t, removed)
removed = config.RemoveRemote("what")
assert.False(t, removed)
remote, found := config.GetRemote("test")
assert.True(t, found)
assert.Equal(t, testRemote, remote)
}
|
package cmd
import (
"fmt"
"strings"
"gopkg.in/kyokomi/emoji.v1"
"github.com/urfave/cli"
)
func AddCmd() cli.Command {
return cli.Command{
Name: "add",
Aliases: []string{"a"},
Usage: "Add emoji commit message",
Action: add,
}
}
func add(c *cli.Context) error {
e := scan("emoji", "")
if len(e) == 0 {
fmt.Println("emoji must be input")
return nil
}
emap := emoji.CodeMap()
var key string
if strings.HasPrefix(e, ":") && strings.HasSuffix(e, ":") {
key = e
e = strings.Trim(e, ":")
} else {
key = ":" + e + ":"
}
if _, ok := emap[key]; !ok {
fmt.Println("The given emoji does not exist")
return nil
}
a := scan("action", "")
if len(a) == 0 {
fmt.Println("action must be input")
return nil
}
msg := NewMsg(e, a)
msgs, err := getMsgs()
if err != nil {
return err
}
msgs = append(msgs, msg)
if err := updateMsgs(msgs); err != nil {
return err
}
fmt.Printf("%s %s added", msg.Emoji, msg.Action)
return nil
}
|
//~7
//~-1
//~0
//~12
//~+7.000000e+000
//~-1.000000e+000
//~+7.500000e-001
//~+1.200000e+001
//~229
//~-1
//~0
//~13110
//~HelloWorld
//~true
package main
func main() {
var i1, i2 int
var f1, f2 float64
var r1, r2 rune
var s1, s2 string
var b1, b2 bool
i1, i2 = 3, 4
f1, f2 = 3.0, 4.0
r1, r2 = 'r', 's'
b1, b2 = true, false
s1, s2 = "Hello", "World"
println(i1 + i2)
println(i1 - i2)
println(i1 / i2)
println(i1 * i2)
println(f1 + f2)
println(f1 - f2)
println(f1 / f2)
println(f1 * f2)
println(r1 + r2)
println(r1 - r2)
println(r1 / r2)
println(r1 * r2)
println(s1 + s2)
println(b1 || b2)
}
|
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strings"
"syscall"
)
func main() {
var debugFile string
var logFile string
var cmdsFile string
var header Header
stdFlagSet := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
stdFlagSet.StringVar(&debugFile, "debug-file", "", "Outputs JSON to this file as well; for debugging what is sent to i3bar.")
stdFlagSet.StringVar(&logFile, "log-file", "", "Logs i3cat events in this file. Defaults to STDERR")
stdFlagSet.StringVar(&cmdsFile, "cmd-file", "$HOME/.i3/i3cat.conf", "File listing of the commands to run. It will read from STDIN if - is provided")
stdFlagSet.IntVar(&header.Version, "header-version", 1, "The i3bar header version")
stdFlagSet.BoolVar(&header.ClickEvents, "header-clickevents", false, "The i3bar header click_events")
decFlagSet := flag.NewFlagSet("decode", flag.ExitOnError)
var decField string
encFlagSet := flag.NewFlagSet("encode", flag.ExitOnError)
var block Block
var singleBlock bool
encFlagSet.BoolVar(&singleBlock, "single", false, "If true, the block will not be in a JSON array. This allows to combine other blocks before sending to i3bar.")
encFlagSet.StringVar(&block.ShortText, "short-text", "", "the block.short_text field to encode.")
encFlagSet.StringVar(&block.Color, "color", "", "the block.color field to encode.")
encFlagSet.IntVar(&block.MinWidth, "min-width", 0, "the block.min_width field to encode.")
encFlagSet.StringVar(&block.Align, "align", "", "the block.align field to encode.")
encFlagSet.StringVar(&block.Name, "name", "", "the block.name field to encode.")
encFlagSet.StringVar(&block.Instance, "instance", "", "the block.instance field to encode.")
encFlagSet.BoolVar(&block.Urgent, "urgent", false, "the block.urgent field to encode.")
encFlagSet.BoolVar(&block.Separator, "separator", true, "the block.separator field to encode.")
encFlagSet.IntVar(&block.SeparatorBlockWidth, "separator-block-width", 0, "the block.separator_block_width field to encode.")
encFlagSet.StringVar(&block.Background, "background", "", "the block.background field to encode.")
encFlagSet.StringVar(&block.Border, "border", "", "the block.border field to encode.")
encFlagSet.StringVar(&block.Markup, "markup", "", "the block.markup field to encode.")
usage := func() {
fmt.Fprintf(os.Stderr, `Usage: i3cat [COMMAND] [ARGS]
If COMMAND is not specified, i3cat will print i3bar blocks to stdout.
`)
stdFlagSet.PrintDefaults()
fmt.Fprintf(os.Stderr, `
decode: FIELD
Reads STDIN and decodes a JSON payload representing a click event; typically sent by i3bar.
It will print the FIELD from the JSON structure to stdout.
Possible fields are name, instance, button, x, y.
`)
decFlagSet.PrintDefaults()
fmt.Fprintf(os.Stderr, `
encode: [OPTS] [FULL_TEXT...]
Concats FULL_TEXT arguments, separated with spaces, and encodes it as an i3bar block JSON payload.
If FULL_TEXT is -, it will read from STDIN instead.
The other fields of an i3bar block are optional and specified with the following options:
`)
encFlagSet.PrintDefaults()
}
switch {
case len(os.Args) > 1 && os.Args[1] == "decode":
if err := decFlagSet.Parse(os.Args[2:]); err != nil {
log.Print(err)
usage()
os.Exit(2)
}
if decFlagSet.NArg() == 0 {
usage()
os.Exit(2)
}
decField = decFlagSet.Arg(0)
if err := DecodeClickEvent(os.Stdout, os.Stdin, decField); err != nil {
log.Fatal(err)
}
case len(os.Args) > 1 && os.Args[1] == "encode":
if err := encFlagSet.Parse(os.Args[2:]); err != nil {
log.Print(err)
usage()
os.Exit(2)
}
switch {
case encFlagSet.NArg() == 0:
fallthrough
case encFlagSet.NArg() == 1 && encFlagSet.Arg(0) == "-":
fullText, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
block.FullText = string(fullText)
case encFlagSet.NArg() > 0:
block.FullText = strings.Join(encFlagSet.Args(), " ")
}
if err := EncodeBlock(os.Stdout, block, singleBlock); err != nil {
log.Fatal(err)
}
default:
if err := stdFlagSet.Parse(os.Args[1:]); err != nil {
log.Print(err)
usage()
os.Exit(2)
}
if stdFlagSet.NArg() > 0 {
usage()
os.Exit(2)
}
CatBlocksToI3Bar(cmdsFile, header, logFile, debugFile)
}
}
func EncodeBlock(w io.Writer, block Block, single bool) error {
var v interface{}
if single {
v = block
} else {
v = []Block{block}
}
return json.NewEncoder(w).Encode(v)
}
func DecodeClickEvent(w io.Writer, r io.Reader, field string) error {
var ce ClickEvent
if err := json.NewDecoder(r).Decode(&ce); err != nil {
return err
}
var v interface{}
switch field {
case "name":
v = ce.Name
case "instance":
v = ce.Instance
case "button":
v = ce.Button
case "x":
v = ce.X
case "y":
v = ce.Y
default:
return fmt.Errorf("unknown property %s", field)
}
fmt.Fprintln(w, v)
return nil
}
func CatBlocksToI3Bar(cmdsFile string, header Header, logFile string, debugFile string) {
// Read and parse commands to run.
var cmdsReader io.ReadCloser
if cmdsFile == "-" {
cmdsReader = ioutil.NopCloser(os.Stdin)
} else {
f, err := os.Open(os.ExpandEnv(cmdsFile))
if err != nil {
log.Fatal(err)
}
cmdsReader = f
}
var commands []string
scanner := bufio.NewScanner(cmdsReader)
for scanner.Scan() {
cmd := strings.TrimSpace(scanner.Text())
if cmd != "" && !strings.HasPrefix(cmd, "#") {
commands = append(commands, cmd)
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
if err := cmdsReader.Close(); err != nil {
log.Fatal(err)
}
// Init log output.
if logFile != "" {
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
if err != nil {
log.Fatal(err)
}
defer func() {
_ = f.Close()
}()
log.SetOutput(f)
}
// Init where i3cat will print its output.
var out io.Writer
if debugFile != "" {
f, err := os.OpenFile(debugFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
if err != nil {
log.Fatal(err)
}
defer func() {
_ = f.Close()
}()
out = io.MultiWriter(os.Stdout, f)
} else {
out = os.Stdout
}
// We print the header of i3bar
hb, err := json.Marshal(header)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(out, "%s\n[\n", hb)
// Listen for click events sent from i3bar
cel := NewClickEventsListener(os.Stdin)
go cel.Listen()
// Create the block aggregator and start the commands
blocksCh := make(chan *BlockAggregate)
var cmdios []*CmdIO
ba := NewBlockAggregator(out)
for _, c := range commands {
cmdio, err := NewCmdIO(c)
if err != nil {
log.Fatal(err)
}
cmdios = append(cmdios, cmdio)
if err := cmdio.Start(blocksCh); err != nil {
log.Fatal(err)
} else {
log.Printf("Starting command: %s", c)
}
}
ba.CmdIOs = cmdios
go ba.Aggregate(blocksCh)
ceCh := cel.Notify()
go ba.ForwardClickEvents(ceCh)
// Listen for worthy signals
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
for {
s := <-c
switch s {
case syscall.SIGTERM:
fallthrough
case os.Interrupt:
// Kill all processes on interrupt
log.Println("SIGINT or SIGTERM received: terminating all processes...")
for _, cmdio := range ba.CmdIOs {
if err := cmdio.Close(); err != nil {
log.Println(err)
}
}
os.Exit(0)
}
}
}
|
package chain
import (
"fmt"
"os"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/kv/dict"
"github.com/iotaledger/wasp/packages/sctransaction"
"github.com/iotaledger/wasp/packages/vm/core/blob"
"github.com/iotaledger/wasp/tools/wasp-cli/config"
"github.com/iotaledger/wasp/tools/wasp-cli/log"
"github.com/iotaledger/wasp/tools/wasp-cli/util"
)
func storeBlobCmd(args []string) {
if len(args) == 0 {
log.Fatal("Usage: %s chain store-blob [type field type value ...]", os.Args[0])
}
uploadBlob(util.EncodeParams(args), false)
}
func uploadBlob(fieldValues dict.Dict, forceWait bool) (hash hashing.HashValue) {
util.WithSCTransaction(func() (tx *sctransaction.Transaction, err error) {
hash, tx, err = Client().UploadBlob(fieldValues, config.CommitteeApi(chainCommittee()), uploadQuorum)
if err == nil {
log.Printf("uploaded blob to chain -- hash: %s", hash)
}
return
}, forceWait)
return
}
func showBlobCmd(args []string) {
if len(args) != 1 {
log.Fatal("Usage: %s chain show-blob <hash>", os.Args[0])
}
hash := util.ValueFromString("base58", args[0])
fields, err := SCClient(blob.Interface.Hname()).CallView(blob.FuncGetBlobInfo, codec.MakeDict(map[string]interface{}{
blob.ParamHash: hash,
}))
log.Check(err)
values := dict.New()
for field := range fields {
value, err := SCClient(blob.Interface.Hname()).CallView(blob.FuncGetBlobField, codec.MakeDict(map[string]interface{}{
blob.ParamHash: hash,
blob.ParamField: []byte(field),
}))
log.Check(err)
values.Set(field, value[blob.ParamBytes])
}
util.PrintDictAsJson(values)
}
func listBlobsCmd(args []string) {
ret, err := SCClient(blob.Interface.Hname()).CallView(blob.FuncListBlobs, nil)
log.Check(err)
blobs, err := blob.DecodeSizesMap(ret)
log.Check(err)
log.Printf("Total %d blob(s) in chain %s\n", len(ret), GetCurrentChainID())
header := []string{"hash", "size"}
rows := make([][]string, len(ret))
i := 0
for k, size := range blobs {
hash, _, err := codec.DecodeHashValue([]byte(k))
log.Check(err)
rows[i] = []string{hash.String(), fmt.Sprintf("%d", size)}
i++
}
log.PrintTable(header, rows)
}
|
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 {
l := len(nums1) + len(nums2)
if l % 2 == 0{
return float64(findKth(nums1, nums2, l/2) + findKth(nums1, nums2, l/2-1)) / 2
} else {
return float64(findKth(nums1, nums2, l/2))
}
}
func findKth(nums1 []int, nums2 []int, kth int) int{
if len(nums1) > len(nums2){
nums1, nums2 = nums2, nums1
}
if len(nums1) == 0{
return nums2[kth]
}
if kth == 0{
return min(nums1[0], nums2[0])
}
mid := min(len(nums1) - 1, (kth-1) / 2)
if kth >= mid{
if nums1[mid] < nums2[mid]{
return findKth(nums1[mid+1:], nums2, kth - mid - 1)
} else {
return findKth(nums1, nums2[mid+1:], kth - mid - 1)
}
} else { // kth < mid
if nums1[mid] < nums2[mid]{
return findKth(nums1, nums2[:mid], kth)
} else {
return findKth(nums1[:mid], nums2, kth)
}
}
}
func min(n1, n2 int) int {
if n1 > n2{
return n2
}
return n1
}
|
package main
// GeoQuadTree used for indexing GeoShapes
type GeoQuadTree struct {
FeatureCount int64
Root *GeoQuadTreeNode
}
// GeoQuadTreeNode used for holding data
type GeoQuadTreeNode struct {
Alpha *GeoQuadTreeNode // top right
Beta *GeoQuadTreeNode // top left
Gamma *GeoQuadTreeNode // bottom left
Delta *GeoQuadTreeNode // bottom right
Featues []Feature
Bounds Bounds
}
// Add adds a Feature to a GeoQuadTree
func (q *GeoQuadTree) Add(f *Feature) *GeoQuadTreeNode {
// TODO write this
return q.Root
}
// CheckBounds determines if `a` is a child of `q`
func (q *GeoQuadTreeNode) CheckBounds(f *Feature) bool {
fSW := f.Bounds[0]
fNE := f.Bounds[1]
qSW := q.Bounds[0]
qNE := q.Bounds[1]
SWInBounds := fSW.X >= qSW.X && fSW.Y >= qSW.Y && fSW.X <= qNE.X && fSW.Y <= qNE.Y
NEInBounds := fNE.X <= qNE.X && fNE.Y <= qNE.Y && fNE.X >= qSW.X && fNE.Y >= qSW.Y
return SWInBounds && NEInBounds
}
|
package health_check
import (
"github.com/gorilla/mux"
"user-event-store/app/endpoint/health_check/controller"
)
func RegisterRoutes(router *mux.Router) {
subRouter := router.PathPrefix("/health").Subrouter()
subRouter.Methods("GET").HandlerFunc(controller.SendHeartbeat)
}
|
package candevice
import (
"encoding/json"
"io/ioutil"
"github.com/ghetzel/canibus/api"
"github.com/ghetzel/canibus/logger"
// "strconv"
"time"
)
const (
MAX_BUFFER = 10000 // Packets in buffer
MAX_APPENDS = 1000 // Max packets returned at one time
)
type Simulator struct {
PacketFile string
SimPackets []api.CanData
Packets [MAX_BUFFER]api.CanData
HackSession api.HackSession
id int
sniffEnabled bool
packetIdx int
seqNo int
}
func (sim *Simulator) SetPacketFile(packets string) {
sim.PacketFile = packets
}
func (sim *Simulator) Init() bool {
logger.Log("Loading packets from " + sim.PacketFile)
err := sim.LoadCanDataFromFile(sim.PacketFile)
if err != nil {
return false
}
return true
}
func (sim *Simulator) LoadCanDataFromFile(file string) error {
packets, err := ioutil.ReadFile(sim.PacketFile)
if err != nil {
logger.Log("Could not open Simulator data file")
return err
}
var canPacket []api.CanData
err = json.Unmarshal(packets, &canPacket)
if err != nil {
logger.Log("Problem with json unmarshal sim data")
return err
} else {
sim.SimPackets = canPacket
}
return nil
}
func (sim *Simulator) DeviceDesc() string {
return sim.PacketFile
}
func (sim *Simulator) DeviceType() string {
return "Simulator"
}
func (sim *Simulator) GetHackSession() api.HackSession {
return sim.HackSession
}
func (sim *Simulator) SetHackSession(hsession api.HackSession) {
sim.HackSession = hsession
}
func (sim *Simulator) GetId() int {
return sim.id
}
func (sim *Simulator) SetId(id int) {
sim.id = id
}
func (sim *Simulator) GetYear() string {
return ""
}
func (sim *Simulator) GetMake() string {
return ""
}
func (sim *Simulator) GetModel() string {
return ""
}
func (sim *Simulator) StartSniffing() {
sim.sniffEnabled = true
sim.packetIdx = 0
sim.seqNo = 0
go sim.processPackets()
}
func (sim *Simulator) StopSniffing() {
sim.sniffEnabled = false
}
func (sim *Simulator) addPacket(simPkt api.CanData) {
pkt := api.CanData{}
pkt.SeqNo = sim.seqNo
sim.seqNo += 1
pkt.Src = simPkt.Src
pkt.AbsTime = time.Now().Format("10:00:00pm (EST)")
pkt.RelTime = simPkt.RelTime
pkt.Status = simPkt.Status
pkt.Error = simPkt.Error
pkt.Transmit = simPkt.Transmit
pkt.Desc = simPkt.Desc
pkt.Network = simPkt.Network
pkt.Node = simPkt.Node
pkt.ArbID = simPkt.ArbID
pkt.Remote = simPkt.Remote
pkt.Extended = simPkt.Extended
pkt.B1 = simPkt.B1
pkt.B2 = simPkt.B2
pkt.B3 = simPkt.B3
pkt.B4 = simPkt.B4
pkt.B5 = simPkt.B5
pkt.B6 = simPkt.B6
pkt.B7 = simPkt.B7
pkt.B8 = simPkt.B8
pkt.Value = simPkt.Value
pkt.Trigger = simPkt.Trigger
pkt.Signals = simPkt.Signals
sim.Packets[sim.packetIdx] = pkt
sim.packetIdx += 1
if sim.packetIdx >= MAX_BUFFER {
sim.packetIdx = 0
}
}
func (sim *Simulator) processPackets() {
total := len(sim.SimPackets)
simIdx := 0
//startedAt := time.Now()
// ported from old canibusd
//relDiff := float64(startedAt.Second() + (startedAt.Nanosecond() / 1000000000.0))
for sim.sniffEnabled == true {
if simIdx >= total || simIdx < 0 {
simIdx = 0
}
/*
pktRelTime, cerr := strconv.ParseFloat(sim.SimPackets[simIdx].RelTime, 32)
if cerr == nil {
if sim.SimPackets[simIdx].SeqNo != 0 && relDiff > pktRelTime {
relDiff -= pktRelTime
sim.addPacket(sim.SimPackets[simIdx])
simIdx += 1
}
}
*/
// FIXME Just temp code to test interface
sim.SimPackets[simIdx].Src = "Sim"
sim.addPacket(sim.SimPackets[simIdx])
simIdx += 1
time.Sleep(100 * time.Millisecond)
}
}
func (sim *Simulator) GetPacketsFrom(idx int) ([]api.CanData, int) {
var pkts []api.CanData
var done bool
done = false
appends := 0
for done != true {
if idx >= MAX_BUFFER || idx < 0 {
idx = 0
}
if idx == sim.packetIdx {
return pkts, sim.packetIdx
}
pkts = append(pkts, sim.Packets[idx])
idx += 1
appends += 1
if appends > MAX_APPENDS {
done = true
}
}
// We only get here if we appended more than MAX_APPENDS
return pkts, sim.packetIdx
}
func (sim *Simulator) GetPacketIdx() int {
return sim.packetIdx
}
func (sim *Simulator) InjectPacket(pkt api.CanData) error {
sim.addPacket(pkt)
return nil
}
|
// This utility decrypts the passwords that Windows EC2 instances generate.
//
// When starting a Windows VM on EC2, after some time an encrypted password is
// written to the VM's log. The password is encrypted using the SSH public key
// configured for that VM. The Amazon web interface can decrypt the password -
// if you paste in your private key. Given that that's insane, this utility
// exists to decrypt the base64-encoded password given an SSH private key. It
// can handle encrypted private keys.
package main
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"code.google.com/p/go.crypto/ssh/terminal"
)
func main() {
if len(os.Args) != 3 {
fmt.Fprintf(os.Stderr, "Usage: %s <path to private key> <encrypted password>\n", os.Args[0])
os.Exit(1)
}
pemPath := os.Args[1]
encryptedPasswdB64 := os.Args[2]
encryptedPasswd, err := base64.StdEncoding.DecodeString(encryptedPasswdB64)
if err != nil {
panic(err)
}
pemBytes, err := ioutil.ReadFile(pemPath)
if err != nil {
panic(err)
}
block, _ := pem.Decode(pemBytes)
var asn1Bytes []byte
if _, ok := block.Headers["DEK-Info"]; ok {
fmt.Printf("Encrypted private key. Please enter passphrase: ")
password, err := terminal.ReadPassword(0)
fmt.Printf("\n")
if err != nil {
panic(err)
}
asn1Bytes, err = x509.DecryptPEMBlock(block, password)
if err != nil {
panic(err)
}
} else {
asn1Bytes = block.Bytes
}
key, err := x509.ParsePKCS1PrivateKey(asn1Bytes)
if err != nil {
panic(err)
}
out, err := rsa.DecryptPKCS1v15(nil, key, encryptedPasswd)
if err != nil {
panic(err)
}
fmt.Printf("Decrypted password: %s\n", string(out))
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-14 09:13
# @File : base.go
# @Description :
链表核心知识点:
1. nil退出条件处理
2. dummy node 哑巴节点
3. 快慢指针
3.1 找到链表的中间节点
4. 链表插入
5. 链表删除
6. 反转链表
7. 合并链表
# @Attention :
*/
package list
type ListNode struct {
Val int
Next *ListNode
}
// 给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。
func deleteDuplicateNode(root *ListNode) {
walkerNode := root
for walkerNode != nil {
for walkerNode.Next != nil && walkerNode.Val == walkerNode.Next.Val {
walkerNode = walkerNode.Next.Next
}
walkerNode = walkerNode.Next
}
}
// 给定一个排序链表,删除所有含有重复数字的节点,只保留原始链表中 没有重复出现的数字。
func deleteDuplicate2(root *ListNode) *ListNode {
if nil == root {
return root
}
dummy := &ListNode{
Val: 0,
Next: root,
}
root = dummy
var rmVal int
for nil != root.Next && root.Next.Next != nil {
// 说明下一个节点和下下一个节点的值相同,则需要删除这2个节点
if root.Next.Val == root.Next.Next.Val {
rmVal = root.Next.Val
for root.Next != nil && root.Next.Val == rmVal {
// 删除该节点
root.Next = root.Next.Next
}
} else {
// 节点发生移动
root = root.Next
}
}
return root
}
// 反转单链表
func reverseList(head *ListNode) *ListNode {
var prev *ListNode
for nil != head {
temp := head.Next
head.Next = prev
// prev 前移动 反转意味着 在前面的会变为在后面,所以head 会变为过去式
prev = head
// head 前移动 而之前的下一个节点会是先的next ,所以会变为新的head
head = temp
}
return prev
}
// 反转单链表2
// 反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。
// 思路:先遍历到 m 处,翻转,再拼接后续
func reverseBetween(head *ListNode, m int, n int) *ListNode {
var prev *ListNode
dummy := &ListNode{Val: 0}
dummy.Next = head
prev = dummy
for i := 0; i < m-1; i++ {
prev = head.Next
}
// 当前prev 为m所处的上一个位置
// 反转m-n 之间的节点
node := reverse(prev, m-n)
prev.Next = node
return prev
}
func reverse(head *ListNode, count int) *ListNode {
var prev *ListNode
for i := 0; i < count && head != nil; i++ {
temp := prev.Next
prev.Next = prev
prev = head
head = temp
}
return prev
}
// 将两个升序链表合并为一个新的升序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。
// 通过dummyNode 实现
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
dummy := &ListNode{
Val: 0,
}
// 临时节点
tempNode := dummy
for nil != l1 && nil != l2 {
if l1.Val < l2.Val {
tempNode.Next = l1
l1 = l1.Next
} else {
tempNode.Next = l2
l2 = l2.Next
}
tempNode = tempNode.Next
}
// 可能l1 更长
for nil != l1 {
tempNode.Next = l1
l1 = l1.Next
tempNode = tempNode.Next
}
// 可能l2 更长
for nil != l2 {
tempNode.Next = l2
l2 = l2.Next
tempNode = tempNode.Next
}
return dummy.Next
}
// 分割链表
// 给定一个链表和一个特定值 x,对链表进行分隔,使得所有小于 x 的节点都在大于或等于 x 的节点之前。
// 链表合并问题
// 思路: 找到该节点 ,同时将大于该节点的 放到另外的节点,最后再合并
func partition(head *ListNode, x int) *ListNode {
var concreteNode *ListNode
dummyLagerNode := &ListNode{}
dummyWalkerNode := dummyLagerNode
headDummy := &ListNode{}
walkerNode := head
for nil != walkerNode && nil != walkerNode.Next {
if walkerNode.Val == x {
concreteNode = walkerNode
walkerNode.Next = walkerNode.Next.Next
} else if walkerNode.Val > x {
dummyWalkerNode.Next = walkerNode
dummyWalkerNode = dummyWalkerNode.Next
} else {
if headDummy.Next == nil {
headDummy.Next = walkerNode
}
walkerNode.Next = walkerNode.Next.Next
}
walkerNode = walkerNode.Next
}
// 拼接
walkerNode.Next, concreteNode.Next = concreteNode, dummyLagerNode.Next
return headDummy.Next
}
// 在 O(n log n) 时间复杂度和常数级空间复杂度下,对链表进行排序。
// 思路: 模拟快排的思路,找到中间点排序
// 重点:
// 1. 快慢指针 获取中间元素
// 2. mergeSort:归并排序(简易快排)的时候断开中间节点
// 3. 分治法的退出条件是 为空,或者是 只有一个节点
func sortList(head *ListNode) *ListNode {
return qSortLinkedList(head)
}
func qSortLinkedList(node *ListNode) *ListNode {
if node == nil || node.Next == nil {
return node
}
middle := paration(node)
// 排序之前需要断开,因为链表不像数组,链表是有前后连接的
after := middle.Next
middle.Next = nil
// 对左右进行排序
left := qSortLinkedList(node)
right := qSortLinkedList(after)
result := mergeLists(left, right)
return result
}
func mergeLists(left, right *ListNode) *ListNode {
dummy := &ListNode{}
walkerNode := dummy
for nil != left && right != nil {
if left.Val < right.Val {
walkerNode.Next = left
left = left.Next
} else {
walkerNode.Next = right
right = right.Next
}
walkerNode = walkerNode.Next
}
for nil != left {
walkerNode.Next = left
left = left.Next
walkerNode = walkerNode.Next
}
for nil != right {
walkerNode.Next = right
right = right.Next
walkerNode = walkerNode.Next
}
return dummy.Next
}
// 链表找中间元素的方法: 就是通过快慢指针,当快指针到末尾的时候,慢指针刚好到中间
func paration(node *ListNode) *ListNode {
fast := node.Next
slow := node
for nil != fast && fast.Next != nil {
fast = fast.Next.Next
slow = slow.Next
}
return slow
}
// 判断是否有环
// 快慢指针 如果有环每次前进都会使得距离缩短-1
func hasCycle(head *ListNode) bool {
if head == nil {
return false
}
if head.Next == nil || head.Next.Next == nil {
return true
}
fast := head.Next.Next
slow := head.Next
for nil != fast && fast.Next != nil {
fast = fast.Next.Next
slow = slow.Next
if slow == fast {
return true
}
}
return false
}
// 给定一个链表,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。
// 思路: 快慢指针,相遇代表有环,有环之后,慢指针到头部,两者步调一致,相遇的既为入环的节点
func detectCycleNode(head *ListNode) *ListNode {
if nil == head || head.Next == nil {
return nil
}
fast := head.Next
slow := head
for nil != fast && fast.Next != nil {
fast = fast.Next.Next
slow = slow.Next
if fast == slow {
slow = head
for slow != fast {
slow, fast = slow.Next, fast.Next
}
return slow
}
}
return nil
}
// 判断链表是否是一个回文链表
func isPalindrome(head *ListNode) bool {
if nil == head {
return true
}
// 找到中间节点
fast := head.Next
slow := head
for nil != fast && fast.Next != nil {
fast = fast.Next.Next
slow = slow.Next
}
mid := slow
// 断开连接
after := mid.Next
mid.Next = nil
// 反转后半段链表
after = revSe(after)
// 判断
for nil != head && nil != after {
if head.Val != after.Val {
return false
}
head = head.Next
after = after.Next
}
return true
}
func revSe(node *ListNode) *ListNode {
var prev *ListNode
for nil != node {
tempNode := node.Next
tempNode.Next = prev
prev = node
node = tempNode
}
return prev
}
// 给定一个链表,每个节点包含一个额外增加的随机指针,该指针可以指向链表中的任何节点或空节点。 要求返回这个链表的 深拷贝
|
package main
import (
"github.com/joho/godotenv"
"github.com/sharpvik/log-go/v2"
"github.com/sharpvik/ava/configs"
"github.com/sharpvik/ava/server"
)
func init() {
log.SetLevel(log.LevelDebug)
if err := godotenv.Load(); err != nil {
log.Error(err)
}
}
func main() {
config := configs.MustInit()
log.Debug("init successfull")
serv := server.NewServer(config.Server)
done := make(chan bool, 1)
go serv.ServeWithGrace(done)
<-done
log.Debug("server stopped")
}
|
package main
import (
"fmt"
"log"
"os"
"strings"
)
func fatalln(args ...interface{}) {
log.Fatalln(append([]interface{}{"ERROR:"}, args...)...)
}
func check(err error) {
if err != nil {
fatalln(err)
}
}
func usage() string {
lines := []string{
"Missing command, use one of:",
fmt.Sprintf("\t%s diff - Compare gedcom files", os.Args[0]),
fmt.Sprintf("\t%s publish - Publish as HTML", os.Args[0]),
fmt.Sprintf("\t%s query - Query with gedcomq", os.Args[0]),
fmt.Sprintf("\t%s tune - Used to calculate ideal weights and similarities", os.Args[0]),
fmt.Sprintf("\t%s version - Show version and exit", os.Args[0]),
fmt.Sprintf("\t%s warnings - Show warnings for a gedcom file", os.Args[0]),
}
return strings.Join(lines, "\n")
}
func main() {
if len(os.Args) < 2 {
fatalln(usage())
}
switch os.Args[1] {
case "diff":
runDiffCommand()
case "publish":
runPublishCommand()
case "query":
runQueryCommand()
case "tune":
runTuneCommand()
case "version":
runVersionCommand()
case "warnings":
runWarningsCommand()
default:
fatalln("unknown command:", os.Args[1])
}
}
|
package kubekit
import (
"time"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
)
// ResyncPeriod is the delay between resync actions from the controller. This
// can be overwritten at package level to define the ResyncPeriod for the
// controller.
var ResyncPeriod = 5 * time.Second
// Watcher represents a CRD Watcher Object. It knows enough details about a CRD
// to be able to create a controller and watch for changes.
type Watcher struct {
cg cache.Getter
namespace string
resource *CustomResource
handler cache.ResourceEventHandler
}
// NewWatcher returns a new watcher that can be used to watch in a given
// namespace. If namespace is an empty string, all namespaces will be watched.
func NewWatcher(cg cache.Getter, namespace string, resource *CustomResource, handler cache.ResourceEventHandler) *Watcher {
return &Watcher{
cg: cg,
namespace: namespace,
resource: resource,
handler: handler,
}
}
// Run starts watching the CRDs associated with the Watcher through a
// Kubernetes CacheController.
func (w *Watcher) Run(done <-chan struct{}) {
source := cache.NewListWatchFromClient(
w.cg,
w.resource.Plural,
w.namespace,
fields.Everything(),
)
_, controller := cache.NewInformer(
source,
w.resource.Object,
ResyncPeriod,
w.handler,
)
go controller.Run(done)
}
|
// problem 10.5
package chapter10
import (
"container/heap"
"fmt"
)
func KThLargestElement(stream []int, k int) {
h := make(IntHeap, 0)
heap.Init(&h)
for i := 0; i < k && i < len(stream); i++ {
heap.Push(&h, stream[i])
fmt.Printf("kth largest after %d cycles: %d\n", i+1, h[0])
}
for i := k; i < len(stream); i++ {
if stream[i] > h[0] {
heap.Pop(&h)
heap.Push(&h, stream[i])
}
fmt.Printf("kth largest after %d cycles: %d\n", i+1, h[0])
}
}
|
package trello
type Board struct {
Id string `json:"id"`
Name string `json:"name"`
Desc string `json:"desc"`
DescData struct {
Emoji struct {
} `json:"emoji"`
} `json:"descData"`
Closed bool `json:"closed"`
IdOrganization string `json:"idOrganization"`
Invited bool `json:"invited"`
Pinned bool `json:"pinned"`
Starred bool `json:"starred"`
Url string `json:"url"`
Prefs struct {
PermissionLevel string `json:"permissionLevel"`
Voting string `json:"voting"`
Comments string `json:"comments"`
Invitations string `json:"invitations"`
SelfJoin bool `json:"selfJoin"`
CardCovers bool `json:"cardCovers"`
CardAging string `json:"cardAging"`
CalendarFeedEnabled bool `json:"calendarFeedEnabled"`
Background string `json:"background"`
BackgroundColor string `json:"backgroundColor"`
BackgroundImage string `json:"backgroundImage"`
BackgroundImageScaled bool `json:"backgroundImageScaled"`
BackgroundTile bool `json:"backgroundTile"`
BackgroundBrightness string `json:"backgroundBrightness"`
CanBePublic bool `json:"canBePublic"`
CanBeOrg bool `json:"canBeOrg"`
CanBePrivate bool `json:"canBePrivate"`
CanInvite bool `json:"canInvite"`
} `json:"prefs"`
Invitations []interface{} `json:"invitations"`
memberships []struct {
Id string `json:"id"`
IdMember string `json:"idMember"`
MemberType string `json:"memberType"`
Unconfirmed bool `json:"unconfirmed"`
Deactivated bool `json:"deactivated"`
} `json:"memberships"`
ShortLink string `json:"shortLink"`
Subscribed bool `json:"subscribed"`
LabelNames struct {
Green string `json:"green"`
Yellow string `json:"yellow"`
Orange string `json:"orange"`
Red string `json:"red"`
Purple string `json:"purple"`
Blue string `json:"blue"`
Sky string `json:"sky"`
Lime string `json:"lime"`
Pink string `json:"pink"`
Black string `json:"black"`
} `json:"labelNames"`
PowerUps []string `json:"powerUps"`
DateLastActivity string `json:"dateLastActivity"`
DateLastView string `json:"dateLastView"`
ShortUrl string `json:"shortUrl"`
Lists []List `json:"lists,omitempty"`
Cards []Card `json:"cards,omitempty"`
Labels []Label `json:"labels,omitempty"`
Actions []Action `json:"actions,omitempty"`
}
|
package validator
import (
"testing"
"github.com/stretchr/testify/suite"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
type Theme struct {
suite.Suite
config *schema.Configuration
validator *schema.StructValidator
}
func (suite *Theme) SetupTest() {
suite.validator = schema.NewStructValidator()
suite.config = &schema.Configuration{
Theme: "light",
}
}
func (suite *Theme) TestShouldValidateCompleteConfiguration() {
ValidateTheme(suite.config, suite.validator)
suite.Assert().Len(suite.validator.Warnings(), 0)
suite.Assert().Len(suite.validator.Errors(), 0)
}
func (suite *Theme) TestShouldRaiseErrorWhenInvalidThemeProvided() {
suite.config.Theme = testInvalid
ValidateTheme(suite.config, suite.validator)
suite.Assert().Len(suite.validator.Warnings(), 0)
suite.Require().Len(suite.validator.Errors(), 1)
suite.Assert().EqualError(suite.validator.Errors()[0], "option 'theme' must be one of 'light', 'dark', 'grey', or 'auto' but it's configured as 'invalid'")
}
func TestThemes(t *testing.T) {
suite.Run(t, new(Theme))
}
|
package uixt
func (dExt *DriverExt) Drag(pathname string, toX, toY int, pressForDuration ...float64) (err error) {
return dExt.DragFloat(pathname, float64(toX), float64(toY), pressForDuration...)
}
func (dExt *DriverExt) DragFloat(pathname string, toX, toY float64, pressForDuration ...float64) (err error) {
return dExt.DragOffsetFloat(pathname, toX, toY, 0, 0, pressForDuration...)
}
func (dExt *DriverExt) DragOffset(pathname string, toX, toY int, xOffset, yOffset float64, pressForDuration ...float64) (err error) {
return dExt.DragOffsetFloat(pathname, float64(toX), float64(toY), xOffset, yOffset, pressForDuration...)
}
func (dExt *DriverExt) DragOffsetFloat(pathname string, toX, toY, xOffset, yOffset float64, pressForDuration ...float64) (err error) {
if len(pressForDuration) == 0 {
pressForDuration = []float64{1.0}
}
point, err := dExt.FindUIRectInUIKit(pathname)
if err != nil {
return err
}
return dExt.Driver.DragFloat(point.X+xOffset, point.Y+yOffset, toX, toY,
WithPressDuration(pressForDuration[0]))
}
|
package mocks
import (
"bou.ke/monkey"
"reflect"
)
func InstanceMethod(target interface{}, methodName string, replacement interface{}) {
monkey.PatchInstanceMethod(reflect.TypeOf(target), methodName, replacement)
}
func ResetMethod(target interface{}, method string) {
monkey.UnpatchInstanceMethod(reflect.TypeOf(target), method)
}
func MockFunc(target interface{}, replaceFunc interface{}) {
monkey.Patch(target, replaceFunc)
}
|
package odbcstream
import (
"database/sql"
"fmt"
_"github.com/alexbrainman/odbc"
)
// A pointer to an instance of the database
var DBClient *sql.DB
// Initializing variables for data source credentials
var server, user, password, database string
type Table struct {
name string
}
// Initializes a connection to the database of choice
func InitialiseDBConnection(driverType, dataSourceName string) error{
dataSourceName = fmt.Sprintf("server=%s;user id=%s;password=%s;database=%s;",
server, user, password, database)
db, err := sql.Open(driverType, dataSourceName)
if err != nil {
panic(err.Error())
}
// Checking if database is connected
err = db.Ping()
if err != nil {
panic(err.Error())
}
DBClient = db
return db.Close()
}
// Getting list of tables in database
func List(t *Table) {
var tables []Table
ta, err := DBClient.Query("SHOW TABLES")
if err != nil {
panic(err.Error())
}
for ta.Next() {
var table Table
if err := ta.Scan(&table.name); err != nil {
fmt.Println("An error scanning for tables", err)
return
}
tables = append(tables, table )
}
} |
func kthSmallest(matrix [][]int, k int) int {
return sol1(matrix, k)
}
func sol1(matrix [][]int, k int) int {
cursors := make([]int, len(matrix))
res := -1
for i := 0; i < k; i++ {
next := 0
min := matrix[len(matrix)-1][len(matrix)-1]
for j, n := range cursors {
if n >= len(matrix) {
continue
}
if min > matrix[j][n] {
min = matrix[j][n]
next = j
}
}
res = min
cursors[next]++
}
return res
}
|
package client
import (
"context"
rn "roman/proto/roman"
"time"
)
type Repository struct {
inputText string
textAnalyzer *TextAnalyzer
client *GrpcClient
}
func (r *Repository) SetInputText(inputText string) {
r.inputText = inputText
}
func (r *Repository) GetInputText() string {
return r.inputText
}
func (r *Repository) GetTokenAnalysis() *rn.TokenAnalysis {
textAnalyzer := r.textAnalyzer
textAnalyzer.SetInputText(r.inputText)
return textAnalyzer.Analysis()
}
func (r *Repository) ProcessAnalysis(tokenAnalysis *rn.TokenAnalysis) *rn.Response {
var response *rn.Response
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
r.client.ctx = ctx
response = r.client.ProcessAnalysis(tokenAnalysis)
return response
}
func (r *Repository) MappingResponseAnalysis(response *rn.Response) string {
var result string
if response.Status == 1 {
result = response.Response.Result
} else {
result = response.Error.ErrorText
}
return result
}
func (r *Repository) Handle() string {
tokenAnalysis := r.GetTokenAnalysis()
return r.MappingResponseAnalysis(r.ProcessAnalysis(tokenAnalysis))
}
func NewRepository() *Repository {
var repository Repository
repository.textAnalyzer = NewTextAnalyzer()
repository.client = NewGrpcClient()
return &repository
}
|
package leetcode
func solvePreorderTraversal(current *TreeNode, preorder []int, pos *int) {
if current == nil {
return
}
preorder[*pos] = current.Val
*pos++
solvePreorderTraversal(current.Left, preorder, pos)
solvePreorderTraversal(current.Right, preorder, pos)
}
func preorderTraversal(root *TreeNode) []int {
var ret [10000]int
var n = 0
solvePreorderTraversal(root, ret[: ], &n)
return ret[: n]
} |
package library2
import (
"github.com/lingdor/midlog"
)
//var LogModule midlog.Module = "library2"
//var Logger midlog.New(LogModule)
var Logger midlog.Midlog = midlog.New("library2")
func DumpLog(msg string) {
Logger.Info("library2:", msg)
}
func DumpError(msg string) {
Logger.Error1("library2:", msg)
}
|
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Test application of the specified definition version", func() {
ctx := context.Background()
var namespace string
var ns corev1.Namespace
BeforeEach(func() {
namespace = randomNamespaceName("defrev-e2e-test")
ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
Eventually(func() error {
return k8sClient.Create(ctx, &ns)
}, time.Second*3, time.Microsecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
labelV1 := labelWithNoTemplate.DeepCopy()
labelV1.Spec.Schematic.CUE.Template = labelV1Template
labelV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, labelV1)).Should(Succeed())
labelV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "label-v1", Namespace: namespace}, labelV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
labelV2 := new(v1beta1.TraitDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "label", Namespace: namespace}, labelV2)
if err != nil {
return err
}
labelV2.Spec.Schematic.CUE.Template = labelV2Template
return k8sClient.Update(ctx, labelV2)
}, 15*time.Second, time.Second).Should(BeNil())
labelV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "label-v2", Namespace: namespace}, labelV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV1 := webServiceWithNoTemplate.DeepCopy()
webserviceV1.Spec.Schematic.CUE.Template = webServiceV1Template
webserviceV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, webserviceV1)).Should(Succeed())
webserviceV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v1", Namespace: namespace}, webserviceV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2 := new(v1beta1.ComponentDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "webservice", Namespace: namespace}, webserviceV2)
if err != nil {
return err
}
webserviceV2.Spec.Schematic.CUE.Template = webServiceV2Template
return k8sClient.Update(ctx, webserviceV2)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v2", Namespace: namespace}, webserviceV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
jobV1 := jobComponentDef.DeepCopy()
jobV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, jobV1)).Should(Succeed())
jobV1Rev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "job-v1.2.1", Namespace: namespace}, jobV1Rev)
}, 15*time.Second, time.Second).Should(BeNil())
})
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.DeleteAllOf(ctx, &v1beta1.Application{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.ComponentDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.WorkloadDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.TraitDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.DefinitionRevision{}, client.InNamespace(namespace))
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
})
It("Test deploy application which containing cue rendering module", func() {
var (
appName = "test-website-app"
comp1Name = "front"
comp2Name = "backend"
)
workerV1 := workerWithNoTemplate.DeepCopy()
workerV1.Spec.Workload = common.WorkloadTypeDescriptor{
Definition: common.WorkloadGVK{
APIVersion: "batch/v1",
Kind: "Job",
},
}
workerV1.Spec.Schematic.CUE.Template = workerV1Template
workerV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, workerV1)).Should(Succeed())
workerV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "worker-v1", Namespace: namespace}, workerV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
workerV2 := new(v1beta1.ComponentDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "worker", Namespace: namespace}, workerV2)
if err != nil {
return err
}
workerV1.Spec.Workload = common.WorkloadTypeDescriptor{
Definition: common.WorkloadGVK{
APIVersion: "apps/v1",
Kind: "Deployment",
},
}
workerV2.Spec.Schematic.CUE.Template = workerV2Template
return k8sClient.Update(ctx, workerV2)
}, 15*time.Second, time.Second).Should(BeNil())
workerV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "worker-v2", Namespace: namespace}, workerV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: comp1Name,
Type: "webservice",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "nginx",
}),
Traits: []common.ApplicationTrait{
{
Type: "label",
Properties: util.Object2RawExtension(map[string]interface{}{
"labels": map[string]string{
"hello": "world",
},
}),
},
},
},
{
Name: comp2Name,
Type: "worker",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"cmd": []string{"sleep", "1000"},
}),
},
},
},
}
By("Create application")
Eventually(func() error {
return k8sClient.Create(ctx, app.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
webServiceDeploy := &appsv1.Deployment{}
deployName := comp1Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
workerDeploy := &appsv1.Deployment{}
deployName = comp2Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, workerDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify trait is applied to the workload")
webserviceLabels := webServiceDeploy.GetLabels()
Expect(webserviceLabels["hello"]).Should(Equal("world"))
By("Update Application and Specify the Definition version in Application")
app = v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: comp1Name,
Type: "webservice@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "nginx",
}),
Traits: []common.ApplicationTrait{
{
Type: "label@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"labels": map[string]string{
"hello": "kubevela",
},
}),
},
},
},
{
Name: comp2Name,
Type: "worker@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"cmd": []string{"sleep", "1000"},
}),
},
},
},
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Wait for dispatching v2 resources successfully")
Eventually(func() error {
RequestReconcileNow(ctx, &app)
rt := &v1beta1.ResourceTracker{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: fmt.Sprintf("%s-v2-%s", appName, namespace)}, rt); err != nil {
return err
}
if len(rt.Spec.ManagedResources) != 0 {
return nil
}
return errors.New("v2 resources have not been dispatched")
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
webServiceV1Deploy := &appsv1.Deployment{}
deployName = comp1Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceV1Deploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the workload(job) is created successfully")
workerJob := &batchv1.Job{}
jobName := comp2Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: jobName, Namespace: namespace}, workerJob)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify trait is applied to the workload")
webserviceV1Labels := webServiceV1Deploy.GetLabels()
Expect(webserviceV1Labels["hello"]).Should(Equal("kubevela"))
By("Check Application is rendered by the specified version of the Definition")
Expect(webServiceV1Deploy.Labels["componentdefinition.oam.dev/version"]).Should(Equal("v1"))
Expect(webServiceV1Deploy.Labels["traitdefinition.oam.dev/version"]).Should(Equal("v1"))
By("Application specifies the wrong version of the Definition, it will raise an error")
app = v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: comp1Name,
Type: "webservice@v10",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "nginx",
"cmd": []string{"sleep", "1000"},
}),
},
},
},
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(HaveOccurred())
})
It("Test deploy application which specify the name of component", func() {
compName := "job"
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-defrevision-app-with-job",
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: compName,
Type: "job@v1.2.1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"cmd": []string{"sleep", "1000"},
}),
},
},
},
}
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
By("Verify the workload(job) is created successfully")
busyBoxJob := &batchv1.Job{}
jobName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: jobName, Namespace: namespace}, busyBoxJob)
}, 30*time.Second, 3*time.Second).Should(Succeed())
})
// refer to https://github.com/oam-dev/kubevela/discussions/1810#discussioncomment-914295
It("Test k8s resources created by application whether with correct label", func() {
var (
appName = "test-resources-labels"
compName = "web"
)
exposeV1 := exposeWithNoTemplate.DeepCopy()
exposeV1.Spec.Schematic.CUE.Template = exposeV1Template
exposeV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, exposeV1)).Should(Succeed())
exposeV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v1", Namespace: namespace}, exposeV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2 := new(v1beta1.TraitDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "expose", Namespace: namespace}, exposeV2)
if err != nil {
return err
}
exposeV2.Spec.Schematic.CUE.Template = exposeV2Template
return k8sClient.Update(ctx, exposeV2)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v2", Namespace: namespace}, exposeV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: compName,
Type: "webservice@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "crccheck/hello-world",
"port": 8000,
}),
Traits: []common.ApplicationTrait{
{
Type: "expose@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"port": []int{8000},
}),
},
},
},
},
},
}
By("Create application")
Eventually(func() error {
return k8sClient.Create(ctx, app.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
webServiceDeploy := &appsv1.Deployment{}
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the workload label generated by KubeVela")
workloadLabel := webServiceDeploy.GetLabels()[oam.WorkloadTypeLabel]
Expect(workloadLabel).Should(Equal("webservice-v1"))
By("Verify the traPIt(service) is created successfully")
exposeSVC := &corev1.Service{}
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: compName, Namespace: namespace}, exposeSVC)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the trait label generated by KubeVela")
traitLabel := exposeSVC.GetLabels()[oam.TraitTypeLabel]
Expect(traitLabel).Should(Equal("expose-v1"))
})
})
var webServiceWithNoTemplate = &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "webservice",
},
Spec: v1beta1.ComponentDefinitionSpec{
Workload: common.WorkloadTypeDescriptor{
Definition: common.WorkloadGVK{
APIVersion: "apps/v1",
Kind: "Deployment",
},
},
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
var workerWithNoTemplate = &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "worker",
},
Spec: v1beta1.ComponentDefinitionSpec{
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
var jobComponentDef = &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "job",
Annotations: map[string]string{
oam.AnnotationDefinitionRevisionName: "1.2.1",
},
},
Spec: v1beta1.ComponentDefinitionSpec{
Workload: common.WorkloadTypeDescriptor{
Definition: common.WorkloadGVK{
APIVersion: "batch/v1",
Kind: "Job",
},
},
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: workerV1Template,
},
},
},
}
var KUBEWorker = &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kube-worker",
},
}
var HELMWorker = &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "helm-worker",
},
}
var labelWithNoTemplate = &v1beta1.TraitDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "TraitDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "label",
},
Spec: v1beta1.TraitDefinitionSpec{
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
var exposeWithNoTemplate = &v1beta1.TraitDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "TraitDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "expose",
},
Spec: v1beta1.TraitDefinitionSpec{
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
var webServiceV1Template = `output: {
apiVersion: "apps/v1"
kind: "Deployment"
metadata: labels: {
"componentdefinition.oam.dev/version": "v1"
}
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
if parameter["env"] != _|_ {
env: parameter.env
}
if context["config"] != _|_ {
env: context.config
}
ports: [{
containerPort: parameter.port
}]
if parameter["cpu"] != _|_ {
resources: {
limits:
cpu: parameter.cpu
requests:
cpu: parameter.cpu
}
}
}]
}
}
}
}
parameter: {
image: string
cmd?: [...string]
port: *80 | int
env?: [...{
name: string
value?: string
valueFrom?: {
secretKeyRef: {
name: string
key: string
}
}
}]
cpu?: string
}
`
var webServiceV2Template = `output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
if parameter.addRevisionLabel {
"app.oam.dev/appRevision": context.appRevision
}
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
if parameter["env"] != _|_ {
env: parameter.env
}
if context["config"] != _|_ {
env: context.config
}
ports: [{
containerPort: parameter.port
}]
if parameter["cpu"] != _|_ {
resources: {
limits:
cpu: parameter.cpu
requests:
cpu: parameter.cpu
}
}
}]
}
}
}
}
parameter: {
image: string
cmd?: [...string]
port: *80 | int
env?: [...{
name: string
value?: string
valueFrom?: {
secretKeyRef: {
name: string
key: string
}
}
}]
cpu?: string
addRevisionLabel: *false | bool
}
`
var workerV1Template = `output: {
apiVersion: "batch/v1"
kind: "Job"
spec: {
parallelism: parameter.count
completions: parameter.count
template: spec: {
restartPolicy : parameter.restart
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
}
parameter: {
count: *1 | int
image: string
restart: *"Never" | string
cmd?: [...string]
}
`
var workerV2Template = `output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
}
}
parameter: {
image: string
cmd?: [...string]
}
`
var labelV1Template = `patch: {
metadata: labels: {
for k, v in parameter.labels {
"\(k)": v
}
"traitdefinition.oam.dev/version": "v1"
}
}
parameter: {
labels: [string]: string
}
`
var labelV2Template = `patch: {
metadata: labels: {
for k, v in parameter.labels {
"\(k)": v
}
}
}
parameter: {
labels: [string]: string
}
`
var KUBEWorkerV1Template = `apiVersion: apps/v1
kind: Deployment
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
ports:
- containerPort: 80
`
var KUBEWorkerV2Template = `apiVersion: "batch/v1"
kind: "Job"
spec:
parallelism: 1
completions: 1
template:
spec:
restartPolicy: "Never"
containers:
- name: "job"
image: "busybox"
command:
- "sleep"
- "1000"
`
var exposeV1Template = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector:
"app.oam.dev/component": context.name
ports: [
for p in parameter.port {
port: p
targetPort: p
},
]
}
}
parameter: {
// +usage=Specify the exposion ports
port: [...int]
}
`
var exposeV2Template = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector: {
"app.oam.dev/component": context.name
}
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
backend: {
serviceName: context.name
servicePort: v
}
},
]
}
}]
}
}
parameter: {
domain: string
http: [string]: int
}
`
|
package main
import (
"encoding/json"
"net/http"
"strings"
)
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
func isAuthorized(r http.Request) bool {
reqToken := r.Header.Get("Authorization")
splitToken := strings.Split(reqToken, "Bearer ")
return len(splitToken) == 2
// TODO: implement OAuth
// can get the token like: reqToken = splitToken[1]
}
func findBooking(bookings []Spot, id string) int {
for i, booking := range bookings {
if booking.ID == id {
return i
}
}
return -1
}
|
package controllers
import (
"app/base/database"
"net/http"
"github.com/gin-gonic/gin"
)
func HealthHandler(c *gin.Context) {
c.String(http.StatusOK, "OK")
return
}
func HealthDBHandler(c *gin.Context) {
err := database.Db.DB().Ping()
if err != nil {
c.String(http.StatusInternalServerError, "unable to ping database")
}
c.String(http.StatusOK, "OK")
return
}
|
// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://wwr.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package genericactuator
import (
"context"
"errors"
"fmt"
"github.com/gardener/gardener/extensions/pkg/controller/worker"
mockclient "github.com/gardener/gardener/pkg/mock/controller-runtime/client"
"github.com/golang/mock/gomock"
"github.com/hashicorp/go-multierror"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var _ = Describe("Actuator", func() {
Describe("#listMachineClassSecrets", func() {
const (
ns = "test-ns"
purpose = "machineclass"
)
var (
existing *corev1.Secret
expected corev1.Secret
allExisting []runtime.Object
allExpected []interface{}
)
BeforeEach(func() {
existing = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "new",
Namespace: ns,
Labels: map[string]string{},
},
}
allExisting = []runtime.Object{}
allExpected = []interface{}{}
expected = *existing.DeepCopy()
})
AfterEach(func() {
a := &genericActuator{client: fake.NewFakeClient(allExisting...)}
sl, err := a.listMachineClassSecrets(context.TODO(), ns)
Expect(err).ToNot(HaveOccurred())
Expect(sl).ToNot(BeNil())
Expect(sl.Items).To(ConsistOf(allExpected...))
})
It("only classes with new label exists", func() {
existing.Labels["gardener.cloud/purpose"] = purpose
expected = *existing.DeepCopy()
allExisting = append(allExisting, existing)
allExpected = append(allExpected, expected)
})
It("only classes with old label exists", func() {
existing.Labels["garden.sapcloud.io/purpose"] = purpose
expected := *existing.DeepCopy()
allExisting = append(allExisting, existing)
allExpected = append(allExpected, expected)
})
It("secret is labeled with both labels", func() {
existing.Labels["garden.sapcloud.io/purpose"] = purpose
existing.Labels["gardener.cloud/purpose"] = purpose
expected := *existing.DeepCopy()
allExisting = append(allExisting, existing)
allExpected = append(allExpected, expected)
})
It("one old and one new secret exists", func() {
oldExisting := existing.DeepCopy()
oldExisting.Name = "old-deprecated"
oldExisting.Labels["garden.sapcloud.io/purpose"] = purpose
existing.Labels["gardener.cloud/purpose"] = purpose
expected := *existing.DeepCopy()
expectedOld := *oldExisting.DeepCopy()
allExisting = append(allExisting, existing, oldExisting)
allExpected = append(allExpected, expected, expectedOld)
})
})
Describe("#CleanupLeakedClusterRoles", func() {
var (
ctrl *gomock.Controller
ctx = context.TODO()
c *mockclient.MockClient
providerName = "provider-foo"
fakeErr = errors.New("fake")
namespace1 = "abcd"
namespace2 = "efgh"
namespace3 = "ijkl"
nonMatchingClusterRoles = []rbacv1.ClusterRole{
{ObjectMeta: metav1.ObjectMeta{Name: "doesnotmatch"}},
{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:provider-bar:%s:machine-controller-manager", namespace1)}},
{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s", providerName, namespace1)}},
{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:bar", providerName, namespace1)}},
{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:machine-controller-manager", providerName)}},
}
)
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
c = mockclient.NewMockClient(ctrl)
})
AfterEach(func() {
ctrl.Finish()
})
It("should return an error while listing the clusterroles", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{})).Return(fakeErr)
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Equal(fakeErr))
})
It("should return an error while listing the namespaces", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{}))
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{})).Return(fakeErr)
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Equal(fakeErr))
})
It("should do nothing because clusterrole list is empty", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{}))
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{}))
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Succeed())
})
It("should do nothing because clusterrole list doesn't contain matches", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{})).DoAndReturn(func(_ context.Context, list *rbacv1.ClusterRoleList, _ ...client.ListOption) error {
*list = rbacv1.ClusterRoleList{Items: nonMatchingClusterRoles}
return nil
})
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{}))
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Succeed())
})
It("should do nothing because no orphaned clusterroles found", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{})).DoAndReturn(func(_ context.Context, list *rbacv1.ClusterRoleList, _ ...client.ListOption) error {
*list = rbacv1.ClusterRoleList{
Items: append(nonMatchingClusterRoles, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace1)},
}),
}
return nil
})
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{})).DoAndReturn(func(_ context.Context, list *corev1.NamespaceList, _ ...client.ListOption) error {
*list = corev1.NamespaceList{
Items: []corev1.Namespace{
{ObjectMeta: metav1.ObjectMeta{Name: namespace1}},
},
}
return nil
})
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Succeed())
})
It("should delete the orphaned clusterroles", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{})).DoAndReturn(func(_ context.Context, list *rbacv1.ClusterRoleList, _ ...client.ListOption) error {
*list = rbacv1.ClusterRoleList{
Items: append(
nonMatchingClusterRoles,
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace1)}},
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace2)}},
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace3)}},
),
}
return nil
})
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{})).DoAndReturn(func(_ context.Context, list *corev1.NamespaceList, _ ...client.ListOption) error {
*list = corev1.NamespaceList{
Items: []corev1.Namespace{
{ObjectMeta: metav1.ObjectMeta{Name: namespace1}},
},
}
return nil
})
c.EXPECT().Delete(ctx, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace2)}})
c.EXPECT().Delete(ctx, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace3)}})
Expect(CleanupLeakedClusterRoles(ctx, c, providerName)).To(Succeed())
})
It("should return the error occurred during orphaned clusterrole deletion", func() {
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&rbacv1.ClusterRoleList{})).DoAndReturn(func(_ context.Context, list *rbacv1.ClusterRoleList, _ ...client.ListOption) error {
*list = rbacv1.ClusterRoleList{
Items: append(
nonMatchingClusterRoles,
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace1)}},
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace2)}},
rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace3)}},
),
}
return nil
})
c.EXPECT().List(ctx, gomock.AssignableToTypeOf(&corev1.NamespaceList{})).DoAndReturn(func(_ context.Context, list *corev1.NamespaceList, _ ...client.ListOption) error {
*list = corev1.NamespaceList{
Items: []corev1.Namespace{
{ObjectMeta: metav1.ObjectMeta{Name: namespace1}},
},
}
return nil
})
c.EXPECT().Delete(ctx, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace2)}})
c.EXPECT().Delete(ctx, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("extensions.gardener.cloud:%s:%s:machine-controller-manager", providerName, namespace3)}}).Return(fakeErr)
err := CleanupLeakedClusterRoles(ctx, c, providerName)
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&multierror.Error{}))
Expect(err.(*multierror.Error).Errors).To(Equal([]error{fakeErr}))
})
})
Describe("#removeWantedDeploymentWithoutState", func() {
var (
mdWithoutState = worker.MachineDeployment{ClassName: "gcp", Name: "md-without-state"}
mdWithState = worker.MachineDeployment{ClassName: "gcp", Name: "md-with-state", State: &worker.MachineDeploymentState{Replicas: 3}}
)
It("should not panic for MachineDeployments without state", func() {
removeWantedDeploymentWithoutState(worker.MachineDeployments{mdWithoutState})
})
It("should not panic for empty slice of MachineDeployments", func() {
removeWantedDeploymentWithoutState(make(worker.MachineDeployments, 0))
})
It("should not panic MachineDeployments is nil", func() {
removeWantedDeploymentWithoutState(nil)
})
It("should not return nil if MachineDeployments are reduced to zero", func() {
Expect(removeWantedDeploymentWithoutState(worker.MachineDeployments{mdWithoutState})).NotTo(BeNil())
})
It("should return only MachineDeployments with states", func() {
reducedMDs := removeWantedDeploymentWithoutState(worker.MachineDeployments{mdWithoutState, mdWithState})
Expect(len(reducedMDs)).To(Equal(1))
Expect(reducedMDs[0]).To(Equal(mdWithState))
})
})
})
|
package main
import (
"fmt"
"os"
)
func main() {
if username, password := "musa", "abu"; len(os.Args) <= 1 {
fmt.Println("Usage: [username] [password]")
} else if os.Args[1] != username {
fmt.Println("access denied for username :", os.Args[1])
} else if os.Args[2] != password {
fmt.Println("access denied for password:", os.Args[2])
} else {
fmt.Println("access granted to:", os.Args[1])
}
}
|
package main
import (
"fmt"
"os"
"bufio"
"strconv"
)
func main(){
//get path
f,_ := os.Open("path.txt")
scanner := bufio.NewScanner(f)
path := make([]int,0)
for scanner.Scan() {
tmp := scanner.Text()
num,_ := strconv.Atoi(tmp)
path = append(path,num)
}
fmt.Println(followPath(path))
// a := []int{0,3,0,1,-3}
// fmt.Println(followPath(a))
}
func followPath(path []int)(steps int) {
for i := 0;i < len(path); {
//updates offset
if path[i] > 3 {
path[i]--
} else {
path[i]++
}
//moves
i += path[i] - 1
steps++
}
return
} |
package db
import (
"log"
"gopkg.in/mgo.v2"
)
const (
localHostDB = "mongodb://localhost:27017"
mlabHost = "mongodb://archiver:!2017Dlab@ds155737.mlab.com:55737/draglabsdev"
dbName = "dsound"
userC = "users"
jamC = "jams"
recordings = "recordings"
//mongodb://marlon:4803marlon@ds035856.mlab.com:35856/draglabs
)
// DataStore struct holds our DB interaction
type DataStore struct {
session *mgo.Session
}
//Collection func, connect to DB
func (ds *DataStore) Collection(name string) *mgo.Collection {
return ds.session.DB(dbName).C(name)
}
// UserCollection return our user Collection on DB
func (ds *DataStore) UserCollection() *mgo.Collection {
return ds.session.DB(dbName).C(userC)
}
// JamCollection func, gives us a new jam collection
// is a connvenience func for `session.DB(dbName).C(cName)`
func (ds *DataStore) JamCollection() *mgo.Collection {
return ds.session.DB(dbName).C(jamC)
}
func (ds *DataStore) RecordingsCollection() *mgo.Collection {
return ds.session.DB(dbName).C(recordings)
}
// Close func closes our session on DB
func (ds *DataStore) Close() {
ds.session.Close()
}
// NewDataStore func, returns our new store
func NewDataStore() *DataStore {
info := mgo.DialInfo{
Addrs: []string{"54.183.100.139:27017"},
Database: dbName,
Username: "soundBoy",
Password: "soundBoy",
}
session, err := mgo.DialWithInfo(&info)
if err != nil {
log.Fatal(err)
return nil
}
session.SetMode(mgo.Monotonic, true)
return &DataStore{session: session.Copy()}
}
|
package main
import (
"blog/bootstrap"
"blog/config"
"flag"
)
var configFile = flag.String("config", "./blog.yaml", "配置文件路径")
func init() {
flag.Parse()
config.InitConfig(*configFile)
}
func main() {
// 运行应用
app := bootstrap.Register()
err := bootstrap.Run(app)
if err != nil {
panic(err)
}
}
|
package main
import (
controllers "github.com/vlasove/proj/controllers" // new
"github.com/vlasove/proj/models" // new
"github.com/gin-gonic/gin"
)
func main() {
r := gin.Default()
db := models.SetupModels() // new
// Provide db variable to controllers
r.Use(func(c *gin.Context) {
c.Set("db", db)
c.Next()
})
r.GET("/books", controllers.FindBooks)
r.POST("/books", controllers.CreateBook) // create
r.GET("/books/:id", controllers.FindBook) // find by id
r.PATCH("/books/:id", controllers.UpdateBook) // update by id
r.DELETE("/books/:id", controllers.DeleteBook) // delete by id
r.Run(":8080")
}
|
package model
type SmProjectFileT struct {
FileId int64 `xorm:"not null comment('文件ID') BIGINT(20)"`
ProjectId int64 `xorm:"not null comment('项目ID') BIGINT(20)"`
}
/**
* 将数据库查询出来的结果进行格式组装成request请求需要的json字段格式
*/
func (tmProjectFileT *SmProjectFileT) ProjectFileTToRespDesc() interface{} {
respInfo := map[string]interface{}{
"projectId": tmProjectFileT.ProjectId,
"fileId": tmProjectFileT.FileId,
}
return respInfo
}
|
package main
import "fmt"
func main() {
var t int
fmt.Scan(&t)
var inputs []int64
for i := 0; i < t; i++ {
var n int64
fmt.Scan(&n)
inputs = append(inputs, n)
}
for _, input := range inputs {
out := highestPrimeFactor(input)
fmt.Printf("%d\n", out)
}
}
func highestPrimeFactor(n int64) int64 {
var pf []int64
var d int64 = 2
for n > 1 {
for n%d == 0 {
pf = append(pf, d)
n /= d
}
d++
if d*d > n {
if n > 1 {
pf = append(pf, n)
}
break
}
}
return pf[len(pf)-1]
}
|
package betypes
const (
CertPath = "./certs/cert.pem"
KeyPath = "./certs/cert.key"
)
|
package relation
func generate(numRows int) [][]int {
tri := make([][]int, numRows)
for i := 0; i < numRows; i++ {
ll := i + 1
tri[i] = make([]int, ll)
tri[i][0] = 1
tri[i][ll-1] = 1
for j := 1; j < ll-1; j++ {
tri[i][j] = tri[i-1][j-1] + tri[i-1][j]
}
}
return tri
}
func getRow0(rowIndex int) []int {
out := generate(rowIndex + 1)
return out[rowIndex]
}
func getRow(rowIndex int) []int {
res := make([]int, rowIndex+1)
for i := 0; i <= rowIndex; i++ {
res[i] = 1
for j := i - 1; j >= 1; j-- {
res[j] += res[j-1]
}
}
return res
}
|
package Core
type ModuleMgr struct {
mgrs map[string]interface{}
}
var mgr *ModuleMgr
func GetModuleMgr() *ModuleMgr {
if mgr == nil {
mgr = &ModuleMgr{mgrs: make(map[string]interface{})}
}
return mgr
}
func (mgr ModuleMgr) RegistModule(moduleName string, modulePointer interface{}) {
if _, ok := mgr.mgrs[moduleName]; ok {
return
}
mgr.mgrs[moduleName] = modulePointer
}
func (mgr ModuleMgr) GetModule(moduleName string) interface{} {
if module, ok := mgr.mgrs[moduleName]; ok {
return module
}
return nil
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package tcrypto
import (
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/pairing"
"go.dedis.ch/kyber/v3/util/key"
)
type Suite interface {
kyber.Group
pairing.Suite
key.Suite
}
|
package remote
import (
"sync"
"time"
)
var value = 0
var mockValue = 0
type Sequence interface {
GetNext() int
Reset()
GetValue() int
}
type SequenceImpl struct {
wait time.Duration
mutex *sync.Mutex
}
func NewSequenceImpl() *SequenceImpl {
return &SequenceImpl{100 * time.Millisecond, &sync.Mutex{}}
}
func (seq *SequenceImpl) GetValue() int {
<-time.After(seq.wait)
return value
}
func (seq *SequenceImpl) GetNext() int {
seq.mutex.Lock()
defer seq.mutex.Unlock()
value++
<-time.After(seq.wait)
return value
}
func (seq *SequenceImpl) Reset() {
seq.mutex.Lock()
defer seq.mutex.Unlock()
value = 0
<-time.After(seq.wait)
}
type SequenceMock struct {
mutex *sync.Mutex
}
func NewSequenceMock() *SequenceMock {
return &SequenceMock{&sync.Mutex{}}
}
func (seq *SequenceMock) GetValue() int {
return mockValue
}
func (seq *SequenceMock) GetNext() int {
seq.mutex.Lock()
defer seq.mutex.Unlock()
mockValue++
return mockValue
}
func (seq *SequenceMock) Reset() {
seq.mutex.Lock()
defer seq.mutex.Unlock()
mockValue = 0
}
|
package main
import (
"encoding/json"
"github.com/gorilla/mux"
"log"
"math/rand"
"net/http"
"strconv"
)
// Book Struct (Model)
type Book struct {
ID string `json:"id"`
Isbn string `json:"isbn"`
Title string `json:"title"`
Author *Author `json:"author"`
}
// Author Struct
type Author struct {
Firstname string `json:"firstname"`
Lastname string `json:"lastname"`
}
// Init books var as a slice Book struct
var books []Book
// Functions like a ternary operator for two strings
func ternaryForString(a, b string) (chosen string) {
if a == "" {
return b
}
return a
}
// Creates a new book with new data, uses old book data if none present
func mergeBooks(oldBook Book, newBook Book) (mergedBook Book) {
newBook.ID = oldBook.ID
newBook.Isbn = ternaryForString(newBook.Isbn, oldBook.Isbn)
newBook.Title = ternaryForString(newBook.Title, oldBook.Title)
newBook.Author.Firstname = ternaryForString(newBook.Author.Firstname, oldBook.Author.Firstname)
newBook.Author.Lastname = ternaryForString(newBook.Author.Lastname, oldBook.Author.Lastname)
return newBook
}
// Get All Books
func getBooks(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(books)
}
// Get Single Book
func getBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r) // Get params
// Loop through books and find correct Id
for _, item := range books {
if item.ID == params["id"] {
json.NewEncoder(w).Encode(item)
return
}
}
json.NewEncoder(w).Encode(&Book{})
}
// Create a New Book
func createBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var book Book
_ = json.NewDecoder(r.Body).Decode(&book)
book.ID = strconv.Itoa(rand.Intn(1000000)) // Mock ID not safe
books = append(books, book)
json.NewEncoder(w).Encode(book)
}
// Update a Book
func updateBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for index, item := range books {
if item.ID == params["id"] {
oldBook := books[index]
books = append(books[:index], books[index+1:]...)
var newBook Book
_ = json.NewDecoder(r.Body).Decode(&newBook)
newBook = mergeBooks(oldBook, newBook)
books = append(books, newBook)
json.NewEncoder(w).Encode(newBook)
return
}
}
json.NewEncoder(w).Encode(books)
}
// Delete a Book
func deleteBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
for index, item := range books {
if item.ID == params["id"] {
books = append(books[:index], books[index+1:]...)
break
}
}
json.NewEncoder(w).Encode(books)
}
func main() {
// Init Router
r := mux.NewRouter()
// Mock Data - @todo - implement DB
books = append(books,
Book{
ID: "1",
Isbn: "448743",
Title: "Book One",
Author: &Author{Firstname: "John", Lastname: "Doe"},
})
books = append(books,
Book{
ID: "2",
Isbn: "847564",
Title: "Book Two",
Author: &Author{Firstname: "Steve", Lastname: "Smith"},
})
// Route Handlers / Endpoints
r.HandleFunc("/api/books", getBooks).Methods("GET")
r.HandleFunc("/api/books/{id}", getBook).Methods("GET")
r.HandleFunc("/api/books", createBook).Methods("POST")
r.HandleFunc("/api/books/{id}", updateBook).Methods("PUT")
r.HandleFunc("/api/books/{id}", deleteBook).Methods("DELETE")
log.Fatal(http.ListenAndServe(":8000", r))
}
|
// Implement binary search without using standard library.
// If there are duplicate values of the key you are searching for,
// return the first occurance in the slice.
// If the search key is not present, SearchInts returns the index of
// the first value greater than the search key.
// If the key is greater than all values in the slice, SearchInts
// returns the length of the slice.
package binarysearch
import (
"fmt"
)
const testVersion = 1
func SearchInts(slice []int, key int) int {
if len(slice) == 0 {
return 0
}
start := 0
end := len(slice) - 1
for start <= end {
mid := start + (end-start)/2
if slice[mid] == key {
for mid = mid - 1; mid >= 0; mid-- {
if slice[mid] != key {
return mid + 1
}
}
return 0
} else if slice[mid] > key {
end = mid - 1
} else {
start = mid + 1
}
}
if end < 0 {
return 0
}
if slice[end] > key {
return end
} else { // <key
return end + 1
}
}
// k found at beginning of slice.
// k found at end of slice.
// k found at index fx.
// k < all values.
// k > all n values.
// k > lv at lx, < gv at gx.
// slice has no values.
func Message(slice []int, key int) string {
if len(slice) == 0 {
return "slice has no values"
}
ix := SearchInts(slice, key) // ix: [0,len]
if ix == len(slice) {
return fmt.Sprintf("%d > all %d values", key, len(slice))
}
val := slice[ix]
if val == key {
if ix == 0 {
return fmt.Sprintf("%d found at beginning of slice", key)
} else if ix == len(slice)-1 {
return fmt.Sprintf("%d found at end of slice", key)
} else {
return fmt.Sprintf("%d found at index %d", key, ix)
}
} else if val < key {
return fmt.Sprintf("%d > %d at index %d, < %d at index %d",
key, val, ix, slice[ix+1], ix+1)
} else { // val > key
if ix == 0 {
return fmt.Sprintf("%d < all values", key)
} else {
return fmt.Sprintf("%d > %d at index %d, < %d at index %d",
key, slice[ix-1], ix-1, val, ix)
}
}
}
|
package main
import (
"fmt"
"os"
)
func main(){
s, sep := "", ""
for _, arg :=range os.Args[1:] { //_ - instead of variable
/*If we started from os.Args[0], 1st argument = path of compiled file
C:\Users\vesel\AppData\Local\Temp\go-build013435514\b001\exe\consoleArgs2.exe */
s += sep + arg
sep = " "
}
fmt.Println(s)
}
|
package dushengchen
import "fmt"
/*
Submission:
https://leetcode.com/submissions/detail/356983445/
*/
func fourSum(nums []int, target int) [][]int {
//nums = SortInt(nums)
numsMap := map[int][][]int{}
var res [][]int
for i:=0; i<len(nums); i++ {
for j:=i+1; j<len(nums); j++ {
numsMap[nums[i]+nums[j]] = append(numsMap[nums[i]+nums[j]], []int{i, j})
}
}
resMap := map[string]bool{}
for midNum, arr1 := range numsMap {
other := target - midNum
if other > midNum {
continue
}
if arr2, ok := numsMap[other]; ok && len(arr2) > 0 {
for i,item1 := range arr1 {
for j,item2 := range arr2 {
if other == midNum && i<j {
continue
}
if item1[0] == item2[0] || item1[1] == item2[1] ||
item1[0] == item2[1] || item1[1] == item2[0] {
continue
}
h := HashInt(nums[item1[0]], nums[item1[1]], nums[item2[0]], nums[item2[1]])
if resMap[h] {
continue
} else {
res = append(res, []int{nums[item1[0]], nums[item1[1]], nums[item2[0]], nums[item2[1]]})
fmt.Println(item1[0], item1[1], item2[0], item2[1], midNum)
resMap[h] = true
}
}
}
}
}
return res
}
|
package main
import (
"fmt"
"reflect"
)
// Salah satu fungsi reflect ialah dpt melihat struktur kode kita pd saat aplikasi sedang berjalan / tercompile
// Reflection sgt berguna ketika kita ingin membuat library yg general sehingga mudah digunakan
type Sample struct {
Name string `required:"true" max:"10"`
}
func IsValid(data interface{}) bool {
t := reflect.TypeOf(data)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if field.Tag.Get("required") == "true" {
value := reflect.ValueOf(data).Field(i).Interface()
if value == "" {
return false
}
}
}
return true
}
func main() {
sample := Sample{"Eko"}
var sampleType reflect.Type = reflect.TypeOf(sample)
fmt.Println(sampleType.NumField())
fmt.Println(sampleType.Field(0).Name)
fmt.Println(sampleType.Field(0).Tag.Get("max"))
fmt.Println(IsValid(sample))
sample.Name = ""
fmt.Println(IsValid(sample))
}
|
package graphql
const (
// Operations
DirectiveLocationQuery = "QUERY"
DirectiveLocationMutation = "MUTATION"
DirectiveLocationSubscription = "SUBSCRIPTION"
DirectiveLocationField = "FIELD"
DirectiveLocationFragmentDefinition = "FRAGMENT_DEFINITION"
DirectiveLocationFragmentSpread = "FRAGMENT_SPREAD"
DirectiveLocationInlineFragment = "INLINE_FRAGMENT"
// Schema Definitions
DirectiveLocationSchema = "SCHEMA"
DirectiveLocationScalar = "SCALAR"
DirectiveLocationObject = "OBJECT"
DirectiveLocationFieldDefinition = "FIELD_DEFINITION"
DirectiveLocationArgumentDefinition = "ARGUMENT_DEFINITION"
DirectiveLocationInterface = "INTERFACE"
DirectiveLocationUnion = "UNION"
DirectiveLocationEnum = "ENUM"
DirectiveLocationEnumValue = "ENUM_VALUE"
DirectiveLocationInputObject = "INPUT_OBJECT"
DirectiveLocationInputFieldDefinition = "INPUT_FIELD_DEFINITION"
)
// DefaultDeprecationReason Constant string used for default reason for a deprecation.
const DefaultDeprecationReason = "No longer supported"
// SpecifiedRules The full list of specified directives.
var SpecifiedDirectives = []*Directive{
IncludeDirective,
SkipDirective,
DeprecatedDirective,
}
// Directive structs are used by the GraphQL runtime as a way of modifying execution
// behavior. Type system creators will usually not create these directly.
type Directive struct {
Name string `json:"name"`
Description string `json:"description"`
Locations []string `json:"locations"`
Args []*Argument `json:"args"`
err error
}
// DirectiveConfig options for creating a new GraphQLDirective
type DirectiveConfig struct {
Name string `json:"name"`
Description string `json:"description"`
Locations []string `json:"locations"`
Args FieldConfigArgument `json:"args"`
}
func NewDirective(config DirectiveConfig) *Directive {
dir := &Directive{}
// Ensure directive is named
if dir.err = invariant(config.Name != "", "Directive must be named."); dir.err != nil {
return dir
}
// Ensure directive name is valid
if dir.err = assertValidName(config.Name); dir.err != nil {
return dir
}
// Ensure locations are provided for directive
if dir.err = invariant(len(config.Locations) > 0, "Must provide locations for directive."); dir.err != nil {
return dir
}
args := []*Argument{}
for argName, argConfig := range config.Args {
if dir.err = assertValidName(argName); dir.err != nil {
return dir
}
args = append(args, &Argument{
PrivateName: argName,
PrivateDescription: argConfig.Description,
Type: argConfig.Type,
DefaultValue: argConfig.DefaultValue,
})
}
dir.Name = config.Name
dir.Description = config.Description
dir.Locations = config.Locations
dir.Args = args
return dir
}
// IncludeDirective is used to conditionally include fields or fragments.
var IncludeDirective = NewDirective(DirectiveConfig{
Name: "include",
Description: "Directs the executor to include this field or fragment only when " +
"the `if` argument is true.",
Locations: []string{
DirectiveLocationField,
DirectiveLocationFragmentSpread,
DirectiveLocationInlineFragment,
},
Args: FieldConfigArgument{
"if": &ArgumentConfig{
Type: NewNonNull(Boolean),
Description: "Included when true.",
},
},
})
// SkipDirective Used to conditionally skip (exclude) fields or fragments.
var SkipDirective = NewDirective(DirectiveConfig{
Name: "skip",
Description: "Directs the executor to skip this field or fragment when the `if` " +
"argument is true.",
Args: FieldConfigArgument{
"if": &ArgumentConfig{
Type: NewNonNull(Boolean),
Description: "Skipped when true.",
},
},
Locations: []string{
DirectiveLocationField,
DirectiveLocationFragmentSpread,
DirectiveLocationInlineFragment,
},
})
// DeprecatedDirective Used to declare element of a GraphQL schema as deprecated.
var DeprecatedDirective = NewDirective(DirectiveConfig{
Name: "deprecated",
Description: "Marks an element of a GraphQL schema as no longer supported.",
Args: FieldConfigArgument{
"reason": &ArgumentConfig{
Type: String,
Description: "Explains why this element was deprecated, usually also including a " +
"suggestion for how to access supported similar data. Formatted" +
"in [Markdown](https://daringfireball.net/projects/markdown/).",
DefaultValue: DefaultDeprecationReason,
},
},
Locations: []string{
DirectiveLocationFieldDefinition,
DirectiveLocationEnumValue,
},
})
|
package main
import (
"fmt"
)
var charMorseArray =[...]string {".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."}
func getWordNum(word string) uint64 {
var result uint64 = 1
for _, tmpChar := range word[:] {
for _, tmpMorse := range charMorseArray[tmpChar - 'a'][:] {
result = result <<1
if tmpMorse == '-' {
result |= 1
}
}
}
return result
}
func uniqueMorseRepresentations(words []string) int {
result := 0
wordNum := make([]uint64, len(words))
for i, tmpWord := range words[:] {
wordNum[i] = getWordNum(tmpWord)
}
for i:=0;i<len(wordNum);i++ {
if wordNum[i] == 0 {
continue
}
for j:=i+1;j<len(wordNum);j++ {
if wordNum[j] == 0 {
continue
}
if wordNum[i] == wordNum[j] {
wordNum[j] = 0
}
}
result++
}
return result
}
func main() {
var words = []string {"gin", "zen", "gig", "msg"}
fmt.Println(uniqueMorseRepresentations(words))
}
|
package ghapp
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/dollarshaveclub/acyl/pkg/eventlogger"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/google/go-github/github"
"github.com/google/uuid"
"github.com/palantir/go-githubapp/githubapp"
"github.com/pkg/errors"
)
// prEventHandler is a ClientCreator that handles PR webhook events
type prEventHandler struct {
githubapp.ClientCreator
dl persistence.DataLayer
supportedPRActions map[string]struct{}
RRDCallback PRCallback
}
// Handles specifies the type of events handled
func (prh *prEventHandler) Handles() []string {
return []string{"pull_request"}
}
// Handle is called by the handler when an event is received
// The PR event handler validates the webhook, sets up the context with appropriate eventlogger and GH client factory
// and then executes the callback
func (prh *prEventHandler) Handle(ctx context.Context, eventType, deliveryID string, payload []byte) error {
// response is used when a non-default response is needed
response := func(status int, msg string, ctype string) {
githubapp.SetResponder(ctx, func(w http.ResponseWriter, r *http.Request) {
if ctype != "" {
w.Header().Add("Content-Type", ctype)
}
w.WriteHeader(status)
w.Write([]byte(msg))
})
}
if eventType != "pull_request" {
return errors.New("not a pull request event")
}
var event github.PullRequestEvent
if err := json.Unmarshal(payload, &event); err != nil {
response(http.StatusBadRequest, fmt.Sprintf("error unmarshaling event: %v", err), "")
return errors.Wrap(err, "error unmarshaling event")
}
did, err := uuid.Parse(deliveryID)
if err != nil {
response(http.StatusBadRequest, fmt.Sprintf("malformed delivery id: %v", err), "")
return errors.Wrap(err, "malformed delivery id")
}
rrd := models.RepoRevisionData{
BaseBranch: event.GetPullRequest().GetBase().GetRef(),
BaseSHA: event.GetPullRequest().GetBase().GetSHA(),
PullRequest: uint(event.GetPullRequest().GetNumber()),
Repo: event.GetPullRequest().GetBase().GetRepo().GetFullName(),
SourceBranch: event.GetPullRequest().GetHead().GetRef(),
SourceRef: event.GetPullRequest().GetHead().GetRef(),
SourceSHA: event.GetPullRequest().GetHead().GetSHA(),
User: event.GetPullRequest().GetUser().GetLogin(),
IsFork: event.GetPullRequest().GetHead().GetRepo().GetFork(),
}
action := event.GetAction()
if rrd.IsFork {
response(http.StatusOK, "ignoring event from forked HEAD repo", "")
return nil
}
_, ok := prh.supportedPRActions[action]
if !ok {
response(http.StatusOK, "action not relevant: "+action, "")
return nil
}
elogger, err := prh.getlogger(payload, did, rrd.Repo, rrd.PullRequest)
if err != nil {
return errors.Wrap(err, "error getting event logger")
}
// set up context
ctx = eventlogger.NewEventLoggerContext(ctx, elogger)
ctx = NewGitHubClientContext(ctx, event.GetInstallation().GetID(), prh)
err = prh.RRDCallback(ctx, action, rrd)
if err != nil {
response(http.StatusInternalServerError, fmt.Sprintf(`{"error_details":"%v"}`, err), "application/json")
} else {
response(http.StatusAccepted, fmt.Sprintf(`{"event_log_id": "%v"}`, eventlogger.GetLogger(ctx).ID.String()), "application/json")
}
return err
}
func (prh *prEventHandler) getlogger(body []byte, deliveryID uuid.UUID, repo string, pr uint) (*eventlogger.Logger, error) {
id, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "error getting random UUID")
}
logger := &eventlogger.Logger{
ID: id,
DeliveryID: deliveryID,
DL: prh.dl,
Sink: os.Stdout,
}
if err := logger.Init(body, repo, pr); err != nil {
return nil, errors.Wrap(err, "error initializing event logger")
}
return logger, nil
}
|
package sshkeymanager
import (
"fmt"
"golang.org/x/crypto/ssh"
kh "golang.org/x/crypto/ssh/knownhosts"
"io/ioutil"
"os"
"path"
"time"
)
var (
Home string
HostKeyCallback ssh.HostKeyCallback
)
func defaultKeyPath() string {
Home = os.Getenv("HOME")
if len(Home) > 0 {
return path.Join(Home, ".ssh/id_rsa")
}
return ""
}
func (c *Client) configSSH() error {
key, err := ioutil.ReadFile(defaultKeyPath())
if err != nil {
return err
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
return err
}
HostKeyCallback, err = kh.New(path.Join(Home, ".ssh/known_hosts"))
if err != nil {
return err
}
config := &ssh.ClientConfig{
User: c.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: HostKeyCallback,
Timeout: 10 * time.Second,
}
addr := fmt.Sprintf("%s:%s", c.Host, c.Port)
c.Cl, err = ssh.Dial("tcp", addr, config)
if err != nil {
return err
}
return nil
}
|
package leetcode
var last *TreeNode
func doFlattern(cur *TreeNode) {
if cur == nil {
return
}
if last != nil {
last.Left = cur
}
last = cur
doFlattern(cur.Left)
doFlattern(cur.Right)
}
func reverseTree(cur *TreeNode) {
if cur == nil {
return
}
cur.Right = cur.Left
reverseTree(cur.Left)
cur.Left = nil
}
func flatten(root *TreeNode) {
doFlattern(root)
reverseTree(root)
}
|
package main
import (
"github.com/LukeJoeDavis/moql/discovery"
"github.com/LukeJoeDavis/moql/generate"
"fmt"
)
func main() {
tables := discover.GetTables()
inserts := make([]string, 0)
for _, table := range tables{
func (tempTable string){
fmt.Println(tempTable + " starting")
discoveredTable := discover.GetColumns(tempTable)
discoveredTable.Name = tempTable
inserts = append(inserts, generate.CreateInserts(discoveredTable)...)
fmt.Println(tempTable + " complete")
}(table)
}
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
// Resources:
// - https://dev.mysql.com/doc/refman/8.0/en/caching-sha2-pluggable-authentication.html
// - https://dev.mysql.com/doc/dev/mysql-server/latest/page_caching_sha2_authentication_exchanges.html
// - https://dev.mysql.com/doc/dev/mysql-server/latest/namespacesha2__password.html
// - https://www.akkadia.org/drepper/SHA-crypt.txt
// - https://dev.mysql.com/worklog/task/?id=9591
//
// CREATE USER 'foo'@'%' IDENTIFIED BY 'foobar';
// SELECT HEX(authentication_string) FROM mysql.user WHERE user='foo';
// 24412430303524031A69251C34295C4B35167C7F1E5A7B63091349503974624D34504B5A424679354856336868686F52485A736E4A733368786E427575516C73446469496537
//
// Format:
// Split on '$':
// - digest type ("A")
// - iterations (divided by ITERATION_MULTIPLIER)
// - salt+hash
//
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"errors"
"fmt"
"strconv"
"github.com/pingcap/tidb/parser/mysql"
)
const (
// MIXCHARS is the number of characters to use in the mix
MIXCHARS = 32
// SALT_LENGTH is the length of the salt
SALT_LENGTH = 20 //nolint: revive
// ITERATION_MULTIPLIER is the number of iterations to use
ITERATION_MULTIPLIER = 1000 //nolint: revive
)
func b64From24bit(b []byte, n int, buf *bytes.Buffer) {
b64t := []byte("./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
w := (int64(b[0]) << 16) | (int64(b[1]) << 8) | int64(b[2])
for n > 0 {
n--
buf.WriteByte(b64t[w&0x3f])
w >>= 6
}
}
// Sha256Hash is an util function to calculate sha256 hash.
func Sha256Hash(input []byte) []byte {
res := sha256.Sum256(input)
return res[:]
}
// 'hash' function should return an array with 32 bytes, the same as SHA-256
func hashCrypt(plaintext string, salt []byte, iterations int, hash func([]byte) []byte) string {
// Numbers in the comments refer to the description of the algorithm on https://www.akkadia.org/drepper/SHA-crypt.txt
// 1, 2, 3
bufA := bytes.NewBuffer(make([]byte, 0, 4096))
bufA.WriteString(plaintext)
bufA.Write(salt)
// 4, 5, 6, 7, 8
bufB := bytes.NewBuffer(make([]byte, 0, 4096))
bufB.WriteString(plaintext)
bufB.Write(salt)
bufB.WriteString(plaintext)
sumB := hash(bufB.Bytes())
bufB.Reset()
// 9, 10
var i int
for i = len(plaintext); i > MIXCHARS; i -= MIXCHARS {
bufA.Write(sumB[:MIXCHARS])
}
bufA.Write(sumB[:i])
// 11
for i = len(plaintext); i > 0; i >>= 1 {
if i%2 == 0 {
bufA.WriteString(plaintext)
} else {
bufA.Write(sumB[:])
}
}
// 12
sumA := hash(bufA.Bytes())
bufA.Reset()
// 13, 14, 15
bufDP := bufA
for range []byte(plaintext) {
bufDP.WriteString(plaintext)
}
sumDP := hash(bufDP.Bytes())
bufDP.Reset()
// 16
p := make([]byte, 0, sha256.Size)
for i = len(plaintext); i > 0; i -= MIXCHARS {
if i > MIXCHARS {
p = append(p, sumDP[:]...)
} else {
p = append(p, sumDP[0:i]...)
}
}
// 17, 18, 19
bufDS := bufA
for i = 0; i < 16+int(sumA[0]); i++ {
bufDS.Write(salt)
}
sumDS := hash(bufDS.Bytes())
bufDS.Reset()
// 20
s := make([]byte, 0, 32)
for i = len(salt); i > 0; i -= MIXCHARS {
if i > MIXCHARS {
s = append(s, sumDS[:]...)
} else {
s = append(s, sumDS[0:i]...)
}
}
// 21
bufC := bufA
var sumC []byte
for i = 0; i < iterations; i++ {
bufC.Reset()
if i&1 != 0 {
bufC.Write(p)
} else {
bufC.Write(sumA[:])
}
if i%3 != 0 {
bufC.Write(s)
}
if i%7 != 0 {
bufC.Write(p)
}
if i&1 != 0 {
bufC.Write(sumA[:])
} else {
bufC.Write(p)
}
sumC = hash(bufC.Bytes())
sumA = sumC
}
// 22
buf := bytes.NewBuffer(make([]byte, 0, 100))
buf.Write([]byte{'$', 'A', '$'})
rounds := fmt.Sprintf("%03X", iterations/ITERATION_MULTIPLIER)
buf.WriteString(rounds)
buf.Write([]byte{'$'})
buf.Write(salt)
b64From24bit([]byte{sumC[0], sumC[10], sumC[20]}, 4, buf)
b64From24bit([]byte{sumC[21], sumC[1], sumC[11]}, 4, buf)
b64From24bit([]byte{sumC[12], sumC[22], sumC[2]}, 4, buf)
b64From24bit([]byte{sumC[3], sumC[13], sumC[23]}, 4, buf)
b64From24bit([]byte{sumC[24], sumC[4], sumC[14]}, 4, buf)
b64From24bit([]byte{sumC[15], sumC[25], sumC[5]}, 4, buf)
b64From24bit([]byte{sumC[6], sumC[16], sumC[26]}, 4, buf)
b64From24bit([]byte{sumC[27], sumC[7], sumC[17]}, 4, buf)
b64From24bit([]byte{sumC[18], sumC[28], sumC[8]}, 4, buf)
b64From24bit([]byte{sumC[9], sumC[19], sumC[29]}, 4, buf)
b64From24bit([]byte{0, sumC[31], sumC[30]}, 3, buf)
return buf.String()
}
// CheckHashingPassword checks if a caching_sha2_password or tidb_sm3_password authentication string matches a password
func CheckHashingPassword(pwhash []byte, password string, hash string) (bool, error) {
pwhashParts := bytes.Split(pwhash, []byte("$"))
if len(pwhashParts) != 4 {
return false, errors.New("failed to decode hash parts")
}
hashType := string(pwhashParts[1])
if hashType != "A" {
return false, errors.New("digest type is incompatible")
}
iterations, err := strconv.ParseInt(string(pwhashParts[2]), 16, 64)
if err != nil {
return false, errors.New("failed to decode iterations")
}
iterations = iterations * ITERATION_MULTIPLIER
salt := pwhashParts[3][:SALT_LENGTH]
var newHash string
switch hash {
case mysql.AuthCachingSha2Password:
newHash = hashCrypt(password, salt, int(iterations), Sha256Hash)
case mysql.AuthTiDBSM3Password:
newHash = hashCrypt(password, salt, int(iterations), Sm3Hash)
}
return bytes.Equal(pwhash, []byte(newHash)), nil
}
// NewHashPassword creates a new password for caching_sha2_password or tidb_sm3_password
func NewHashPassword(pwd string, hash string) string {
salt := make([]byte, SALT_LENGTH)
rand.Read(salt)
// Restrict to 7-bit to avoid multi-byte UTF-8
for i := range salt {
salt[i] = salt[i] &^ 128
for salt[i] == 36 || salt[i] == 0 { // '$' or NUL
newval := make([]byte, 1)
rand.Read(newval)
salt[i] = newval[0] &^ 128
}
}
switch hash {
case mysql.AuthCachingSha2Password:
return hashCrypt(pwd, salt, 5*ITERATION_MULTIPLIER, Sha256Hash)
case mysql.AuthTiDBSM3Password:
return hashCrypt(pwd, salt, 5*ITERATION_MULTIPLIER, Sm3Hash)
default:
return ""
}
}
|
package e2e
import "testing"
func TestE2E(t *testing.T) {
RunE2ETests(t)
}
|
package dao
import (
"fmt"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"mix/test/codes"
entity "mix/test/entity/core/transaction"
mapper "mix/test/mapper/core/transaction"
"mix/test/utils/status"
)
func (p *Dao) CreateHotWithdraw(logger *zap.Logger, session *xorm.Session, item *entity.HotWithdraw) (id int64, err error) {
res, err := mapper.CreateHotWithdraw(session, item)
if err != nil {
logger.Error("Call mapper.CreateHotWithdraw error", zap.Error(err))
return
}
id, err = res.LastInsertId()
if err != nil {
logger.Error("Get id error", zap.Error(err))
return
}
return
}
func (p *Dao) GetHotWithdraw(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.HotWithdraw, err error) {
item, err = mapper.GetHotWithdraw(session, id)
if err != nil {
logger.Error("Call mapper.GetHotWithdraw error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetHotWithdraw(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.HotWithdraw, err error) {
item, err = p.GetHotWithdraw(logger, session, id)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.HotWithdrawNotFound)
logger.Error(
"Get hotWithdraw error",
zap.Error(err),
zap.Int64("id", id),
)
return
}
return
}
func (p *Dao) GetHotWithdrawList(logger *zap.Logger, session *xorm.Session) (items []*entity.HotWithdraw, err error) {
items, err = mapper.GetHotWithdrawList(session)
if err != nil {
logger.Error("Call mapper.GetHotWithdrawList error", zap.Error(err))
return
}
return
}
func (p *Dao) RemoveHotWithdraw(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
_, err = mapper.RemoveHotWithdraw(session, id)
if err != nil {
logger.Error("Call mapper.RemoveHotWithdraw error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveHotWithdraw(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
res, err := mapper.RemoveHotWithdraw(session, id)
if err != nil {
logger.Error("Call mapper.RemoveHotWithdraw error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveHotWithdraw error",
zap.Int64("affected", affected),
zap.Int64("id",
id),
zap.Error(err))
return
}
return
}
func (p *Dao) UpdateHotWithdraw(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.HotWithdraw) (err error) {
_, err = mapper.UpdateHotWithdraw(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateHotWithdraw error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateHotWithdraw(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.HotWithdraw) (err error) {
res, err := mapper.UpdateHotWithdraw(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateHotWithdraw error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateHotWithdraw error",
zap.Int64("affected", affected),
zap.Int64("item.Id", item.Id),
zap.Error(err))
return
}
return
}
func (p *Dao) GetHotWithdrawByMerchantId(logger *zap.Logger, session *xorm.Session, merchantId int64, serialId string) (item *entity.HotWithdraw, err error) {
item, err = mapper.GetHotWithdrawByMerchantId(session, merchantId, serialId)
if err != nil {
logger.Error("Call mapper.GetHotWithdrawByMerchantId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetHotWithdrawByMerchantId(logger *zap.Logger, session *xorm.Session, merchantId int64, serialId string) (item *entity.HotWithdraw, err error) {
item, err = p.GetHotWithdrawByMerchantId(logger, session, merchantId, serialId)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.HotWithdrawNotFound)
logger.Error(
"Get hotWithdraw error",
zap.Error(err),
zap.Int64("merchantId", merchantId),
zap.String("serialId", serialId),
)
return
}
return
}
func (p *Dao) UpdateHotWithdrawByMerchantId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.HotWithdraw) (err error) {
_, err = mapper.UpdateHotWithdrawByMerchantId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateHotWithdrawByMerchantId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateHotWithdrawByMerchantId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.HotWithdraw) (err error) {
res, err := mapper.UpdateHotWithdrawByMerchantId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateHotWithdrawByMerchantId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateHotWithdrawByMerchantId error",
zap.Int64("affected", affected),
zap.Int64("item.MerchantId", item.MerchantId),
zap.String("item.SerialId", item.SerialId),
zap.Error(err))
return
}
return
}
func (p *Dao) RemoveHotWithdrawByMerchantId(logger *zap.Logger, session *xorm.Session, merchantId int64, serialId string) (err error) {
_, err = mapper.RemoveHotWithdrawByMerchantId(session, merchantId, serialId)
if err != nil {
logger.Error("Call mapper.RemoveHotWithdrawByMerchantId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveHotWithdrawByMerchantId(logger *zap.Logger, session *xorm.Session, merchantId int64, serialId string) (err error) {
res, err := mapper.RemoveHotWithdrawByMerchantId(session, merchantId, serialId)
if err != nil {
logger.Error("Call mapper.RemoveHotWithdrawByMerchantId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveHotWithdrawByMerchantId error",
zap.Int64("affected", affected),
zap.Int64("merchantId",
merchantId),
zap.String("serialId",
serialId),
zap.Error(err))
return
}
return
}
|
package mongodb
import (
"context"
"errors"
"testing"
"github.com/brigadecore/brigade/v2/apiserver/internal/api"
"github.com/brigadecore/brigade/v2/apiserver/internal/lib/mongodb"
mongoTesting "github.com/brigadecore/brigade/v2/apiserver/internal/lib/mongodb/testing" // nolint: lll
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func TestProjectsStoreCreate(t *testing.T) {
testProject := api.Project{
ObjectMeta: meta.ObjectMeta{
ID: "blue-book",
},
}
testCases := []struct {
name string
collection mongodb.Collection
assertions func(err error)
}{
{
name: "id already exists",
collection: &mongoTesting.MockCollection{
InsertOneFn: func(
ctx context.Context,
document interface{},
opts ...*options.InsertOneOptions,
) (*mongo.InsertOneResult, error) {
return nil, mongoTesting.MockWriteException
},
},
assertions: func(err error) {
require.Error(t, err)
ec, ok := err.(*meta.ErrConflict)
require.True(t, ok)
require.Equal(t, api.ProjectKind, ec.Type)
require.Equal(t, testProject.ID, ec.ID)
require.Contains(t, ec.Reason, "already exists")
},
},
{
name: "unanticipated error",
collection: &mongoTesting.MockCollection{
InsertOneFn: func(
ctx context.Context,
document interface{},
opts ...*options.InsertOneOptions,
) (*mongo.InsertOneResult, error) {
return nil, errors.New("something went wrong")
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error inserting new project")
},
},
{
name: "successful creation",
collection: &mongoTesting.MockCollection{
InsertOneFn: func(
ctx context.Context,
document interface{},
opts ...*options.InsertOneOptions,
) (*mongo.InsertOneResult, error) {
return nil, nil
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
err := store.Create(context.Background(), testProject)
testCase.assertions(err)
})
}
}
func TestProjectsStoreList(t *testing.T) {
testProject := api.Project{
ObjectMeta: meta.ObjectMeta{
ID: "blue-book",
},
}
testCases := []struct {
name string
collection mongodb.Collection
assertions func(projects meta.List[api.Project], err error)
}{
{
name: "error finding projects",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
return nil, errors.New("something went wrong")
},
},
assertions: func(projects meta.List[api.Project], err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error finding projects")
},
},
{
name: "projects found; no more pages of results exist",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
cursor, err := mongoTesting.MockCursor(testProject)
require.NoError(t, err)
return cursor, nil
},
CountDocumentsFn: func(
ctx context.Context,
filter interface{},
opts ...*options.CountOptions,
) (int64, error) {
return 0, nil
},
},
assertions: func(projects meta.List[api.Project], err error) {
require.NoError(t, err)
require.Empty(t, projects.Continue)
require.Zero(t, projects.RemainingItemCount)
},
},
{
name: "projects found; more pages of results exist",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
cursor, err := mongoTesting.MockCursor(testProject)
require.NoError(t, err)
return cursor, nil
},
CountDocumentsFn: func(
ctx context.Context,
filter interface{},
opts ...*options.CountOptions,
) (int64, error) {
return 5, nil
},
},
assertions: func(projects meta.List[api.Project], err error) {
require.NoError(t, err)
require.Equal(t, testProject.ID, projects.Continue)
require.Equal(t, int64(5), projects.RemainingItemCount)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
projects, err := store.List(
context.Background(),
meta.ListOptions{
Limit: 1,
Continue: "blue-book",
},
)
testCase.assertions(projects, err)
})
}
}
func TestProjectsStoreListSubscribers(t *testing.T) {
testProject1 := api.Project{
ObjectMeta: meta.ObjectMeta{
ID: "project1",
},
}
testProject2 := api.Project{
ObjectMeta: meta.ObjectMeta{
ID: "project2",
},
}
testEvent := api.Event{
Source: "github.com/krancour/fake-gateway",
Type: "push",
Qualifiers: api.Qualifiers{
"foo": "bar",
"bat": "baz",
},
}
testCases := []struct {
name string
collection mongodb.Collection
assertions func(subscribers meta.List[api.Project], err error)
}{
{
name: "error finding subscribers",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
return nil, errors.New("something went wrong")
},
},
assertions: func(subscribers meta.List[api.Project], err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error finding projects")
},
},
{
name: "found no subscribers",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
cursor, err := mongoTesting.MockCursor()
require.NoError(t, err)
return cursor, nil
},
},
assertions: func(subscribers meta.List[api.Project], err error) {
require.NoError(t, err)
require.Empty(t, subscribers.Items)
},
},
{
name: "found subscribers",
collection: &mongoTesting.MockCollection{
FindFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOptions,
) (*mongo.Cursor, error) {
cursor, err := mongoTesting.MockCursor(testProject1, testProject2)
require.NoError(t, err)
return cursor, nil
},
},
assertions: func(subscribers meta.List[api.Project], err error) {
require.NoError(t, err)
require.Len(t, subscribers.Items, 2)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
subscribers, err :=
store.ListSubscribers(context.Background(), testEvent)
testCase.assertions(subscribers, err)
})
}
}
func TestProjectsStoreGet(t *testing.T) {
const testProjectID = "blue-book"
testCases := []struct {
name string
collection mongodb.Collection
assertions func(project api.Project, err error)
}{
{
name: "project not found",
collection: &mongoTesting.MockCollection{
FindOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOneOptions,
) *mongo.SingleResult {
res, err := mongoTesting.MockSingleResult(mongo.ErrNoDocuments)
require.NoError(t, err)
return res
},
},
assertions: func(project api.Project, err error) {
require.Error(t, err)
enf, ok := err.(*meta.ErrNotFound)
require.True(t, ok)
require.Equal(t, api.ProjectKind, enf.Type)
require.Equal(t, testProjectID, enf.ID)
},
},
{
name: "unanticipated error",
collection: &mongoTesting.MockCollection{
FindOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOneOptions,
) *mongo.SingleResult {
res, err := mongoTesting.MockSingleResult(
errors.New("something went wrong"),
)
require.NoError(t, err)
return res
},
},
assertions: func(project api.Project, err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error finding/decoding project")
},
},
{
name: "user found",
collection: &mongoTesting.MockCollection{
FindOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.FindOneOptions,
) *mongo.SingleResult {
res, err := mongoTesting.MockSingleResult(
api.Project{
ObjectMeta: meta.ObjectMeta{
ID: testProjectID,
},
},
)
require.NoError(t, err)
return res
},
},
assertions: func(project api.Project, err error) {
require.NoError(t, err)
require.Equal(t, testProjectID, project.ID)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
user, err := store.Get(context.Background(), testProjectID)
testCase.assertions(user, err)
})
}
}
func TestProjectsStoreUpdate(t *testing.T) {
testProject := api.Project{
ObjectMeta: meta.ObjectMeta{
ID: "blue-book",
},
}
testCases := []struct {
name string
collection mongodb.Collection
assertions func(err error)
}{
{
name: "project not found",
collection: &mongoTesting.MockCollection{
UpdateOneFn: func(
ctx context.Context,
filter interface{},
update interface{},
opts ...*options.UpdateOptions,
) (*mongo.UpdateResult, error) {
return &mongo.UpdateResult{
MatchedCount: 0,
}, nil
},
},
assertions: func(err error) {
require.Error(t, err)
enf, ok := err.(*meta.ErrNotFound)
require.True(t, ok)
require.Equal(t, api.ProjectKind, enf.Type)
require.Equal(t, testProject.ID, enf.ID)
},
},
{
name: "unanticipated error",
collection: &mongoTesting.MockCollection{
UpdateOneFn: func(
ctx context.Context,
filter interface{},
update interface{},
opts ...*options.UpdateOptions,
) (*mongo.UpdateResult, error) {
return nil, errors.New("something went wrong")
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error updating project")
},
},
{
name: "project found",
collection: &mongoTesting.MockCollection{
UpdateOneFn: func(
ctx context.Context,
filter interface{},
update interface{},
opts ...*options.UpdateOptions,
) (*mongo.UpdateResult, error) {
return &mongo.UpdateResult{
MatchedCount: 1,
}, nil
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
err := store.Update(context.Background(), testProject)
testCase.assertions(err)
})
}
}
func TestProjectsStoreDelete(t *testing.T) {
const testProjectID = "blue-book"
testCases := []struct {
name string
collection mongodb.Collection
assertions func(err error)
}{
{
name: "project not found",
collection: &mongoTesting.MockCollection{
DeleteOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.DeleteOptions,
) (*mongo.DeleteResult, error) {
return &mongo.DeleteResult{
DeletedCount: 0,
}, nil
},
},
assertions: func(err error) {
require.Error(t, err)
enf, ok := err.(*meta.ErrNotFound)
require.True(t, ok)
require.Equal(t, api.ProjectKind, enf.Type)
require.Equal(t, testProjectID, enf.ID)
},
},
{
name: "unanticipated error",
collection: &mongoTesting.MockCollection{
DeleteOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.DeleteOptions,
) (*mongo.DeleteResult, error) {
return nil, errors.New("something went wrong")
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error deleting project")
},
},
{
name: "project found",
collection: &mongoTesting.MockCollection{
DeleteOneFn: func(
ctx context.Context,
filter interface{},
opts ...*options.DeleteOptions,
) (*mongo.DeleteResult, error) {
return &mongo.DeleteResult{
DeletedCount: 1,
}, nil
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
store := &projectsStore{
collection: testCase.collection,
}
err := store.Delete(context.Background(), testProjectID)
testCase.assertions(err)
})
}
}
|
package resource
import (
"errors"
"github.com/bhops/goapi/model"
"github.com/bhops/goapi/storage"
"github.com/manyminds/api2go"
"net/http"
)
// UserResource holds UserStorage
type UserResource struct {
UserStorage *storage.UserStorage
}
// FindAll to satisfy `api2go.DataSource` interface
func (s UserResource) FindAll(r api2go.Request) (api2go.Responder, error) {
var result []model.User
users, err := s.UserStorage.GetAll()
if err != nil {
return &Response{}, err
}
for _, user := range users {
result = append(result, *user)
}
return &Response{Res: result}, nil
}
// PaginatedFindAll NYI
func (s UserResource) PaginatedFindAll(r api2go.Request) (uint, api2go.Responder, error) {
return 1, nil, nil
}
// FindOne to satisfy `api2go.DataSource` interface
func (s UserResource) FindOne(ID string, r api2go.Request) (api2go.Responder, error) {
user, err := s.UserStorage.GetOne(ID)
if err != nil {
return &Response{}, api2go.NewHTTPError(err, err.Error(), http.StatusNotFound)
}
return &Response{Res: user}, nil
}
// Create to satisfy `api2go.DataSource` interface
func (s UserResource) Create(obj interface{}, r api2go.Request) (api2go.Responder, error) {
user, ok := obj.(model.User)
if !ok {
return &Response{}, api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
id, err := s.UserStorage.Insert(user)
if err != nil {
return &Response{}, api2go.NewHTTPError(errors.New("Failed to create user"), "Failed to create user", http.StatusInternalServerError)
}
err = user.SetID(id)
if err != nil {
return &Response{}, api2go.NewHTTPError(errors.New("Non-integer ID given"), "Non-integer ID given", http.StatusInternalServerError)
}
return &Response{Res: user, Code: http.StatusCreated}, nil
}
// Delete to satisfy `api2go.DataSource` interface
func (s UserResource) Delete(id string, r api2go.Request) (api2go.Responder, error) {
err := s.UserStorage.Delete(id)
return &Response{Code: http.StatusNoContent}, err
}
// Update to satisfy `api2go.DataSource` interface
func (s UserResource) Update(obj interface{}, r api2go.Request) (api2go.Responder, error) {
user, ok := obj.(model.User)
if !ok {
return &Response{}, api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest)
}
err := s.UserStorage.Update(user)
return &Response{Res: user, Code: http.StatusNoContent}, err
}
|
package third_part_pay
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"sub_account_service/finance/blockchain"
"sub_account_service/finance/config"
"sub_account_service/finance/db"
"sub_account_service/finance/lib"
"sub_account_service/finance/models"
"sub_account_service/finance/payment"
"sub_account_service/finance/payment/alipayModels"
"sub_account_service/finance/utils"
svcModels "sub_account_service/number_server/models"
)
type TardingStreamInfo struct {
}
// 保存第三方交易流水
func (this *TardingStreamInfo) SaveTradingStreamInfo(order svcModels.Orders) error {
//g根据订单编号查询订单信息
okflag := false
defer func() {
if !okflag {
if err := AddOrderToFailedQueue(order); err != nil {
glog.Errorln(lib.Log("add order to failed to queue err, trade num: ", order.ThirdTradeNo, "AddOrderToFailedQueue"), "", err)
}
}
}()
if config.Opts().IsQueryTradeDetails == 0 {
if err := this.SaveTradingStreamInfoOfNoQueryDetails(order); err != nil {
glog.Errorln(lib.Log("query ali trade err, trade num: ", order.ThirdTradeNo, "SaveTradingStreamInfoOfNoQueryDetails"), "", err)
return err
}
okflag = true
return nil
}
if order.OrderType == 0 {
if order.PaymentType == 0 { //支付宝交易类型是支付
if err := this.SaveTradingStreamInfoOfAliPay(order); err != nil {
glog.Errorln(lib.Log("query ali trade err, trade num: ", order.ThirdTradeNo, "SaveTradingStreamInfoOfAliPay"), "", err)
return err
}
} else if order.PaymentType == 1 { //支付宝交易类型是转账
if err := this.SaveTradingStreamInfoOfAliPayTransfer(order); err != nil {
glog.Errorln(lib.Log("query ali trade err, trade num: ", order.ThirdTradeNo, "QueryAliTrade"), "", err)
return err
}
}
} else if order.OrderType == 1 {
//暂不支持其他支付
return nil
} else {
//暂不支持其他支付
return nil
}
okflag = true
return nil
}
// 保存支付宝交易流水(付款类型: 支付)
func (this *TardingStreamInfo) SaveTradingStreamInfoOfAliPay(order svcModels.Orders) error {
//g根据订单编号查询订单信息
var bPayAmount float64
var iAmount float64
var pAmount float64
var rAmount float64
var tAmount float64
var Fees float64
var err error
var aliTrade *alipayModels.QueryAliTradeResp
// 共享车服务订单规则
if strings.Contains(order.ThirdTradeNo, "_") {
aliTrade, err = payment.QueryAliTrade(strings.Split(order.ThirdTradeNo, "_")[0])
tAmount, err = utils.KeepTwoDecimalsOfStr(fmt.Sprintf("%f", order.TransferAmount))
if err != nil {
return err
}
} else {
aliTrade, err = payment.QueryAliTrade(order.ThirdTradeNo)
if aliTrade.TotalAmount != "" {
tAmount, err = utils.KeepTwoDecimalsOfStr(aliTrade.TotalAmount)
if err != nil {
return err
}
}
}
if Fees, err = GetTradingStreamFees(tAmount, order.OrderType); err != nil {
return err
}
if aliTrade.BuyerPayAmount != "" {
bPayAmount, err = strconv.ParseFloat(aliTrade.BuyerPayAmount, 64)
if err != nil {
return err
}
}
if aliTrade.InvoiceAmount != "" {
iAmount, err = strconv.ParseFloat(aliTrade.InvoiceAmount, 64)
if err != nil {
return err
}
}
if aliTrade.PointAmount != "" {
pAmount, err = strconv.ParseFloat(aliTrade.PointAmount, 64)
if err != nil {
return err
}
}
loc, _ := time.LoadLocation("Local") //重要:获取时区
theTime, _ := time.ParseInLocation("2006-01-02 15:04:05", aliTrade.SendPayDate, loc) //使用模板在对应时区转化为time.time类型
sPayDate := theTime.Unix()
if aliTrade.ReceiptAmount != "" {
rAmount, err = strconv.ParseFloat(aliTrade.ReceiptAmount, 64)
if err != nil {
return err
}
}
if aliTrade.TotalAmount != "" {
tAmount, err = utils.KeepTwoDecimalsOfStr(aliTrade.TotalAmount)
if err != nil {
return err
}
if Fees, err = GetTradingStreamFees(tAmount, order.OrderType); err != nil {
return err
}
}
iStatement := models.IncomeStatement{
OrderType: int64(order.OrderType),
AlipaySeller: config.ZfbConfig().AlipaySeller,
BuyerLogonID: aliTrade.BuyerLogonId,
BuyerPayAmount: bPayAmount,
BuyerUserID: aliTrade.BuyerUserId,
BuyerUserType: aliTrade.BuyerUserType,
InvoiceAmount: iAmount,
OutTradeNo: aliTrade.OutTradeNo,
PointAmount: pAmount,
ReceiptAmount: rAmount,
SendPayDate: sPayDate,
TotalAmount: tAmount,
Fees: Fees,
PayerName: order.Company,
TradeNo: order.ThirdTradeNo,
SubAccountNo: order.SubAccountNo,
AutoTransfer: uint8(utils.BoolToInt(!(tAmount > config.Opts().AutoTransferLimit))),
OnchainNum: 0,
}
if res, err := db.RedisClient.Exists(iStatement.TradeNo).Result(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
} else if res != 0 {
return nil
}
out, err := json.Marshal(iStatement)
if err != nil {
glog.Errorln(lib.Log("json marshal tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo, out, 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo+"-chainnotok"+config.Opts().FinanceOrderSvcAppId, "true", 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
return nil
}
// 保存支付宝交易流水(付款类型:转账)
func (this *TardingStreamInfo) SaveTradingStreamInfoOfAliPayTransfer(order svcModels.Orders) error {
//g根据订单编号查询订单信息
var bPayAmount float64
var iAmount float64
var pAmount float64
var rAmount float64
var tAmount float64
var Fees float64
var err error
aliTransfer := alipayModels.QueryTransferToAccountResp{}
err = json.Unmarshal([]byte(order.TransferInfo), &aliTransfer)
if err != nil {
glog.Errorln(lib.Log("query ali trade err, trade num: ", order.ThirdTradeNo, "QueryAliTrade"), "", err)
return err
}
//loc, _ := time.LoadLocation("Local") //重要:获取时区
//theTime, _ := time.ParseInLocation("2006-01-02 15:04:05", order.OrderTime, loc) //使用模板在对应时区转化为time.time类型
sPayDate := order.OrderTime.Unix()
tAmount, err = utils.KeepTwoDecimalsOfStr(fmt.Sprintf("%f", order.TransferAmount))
if err != nil {
return err
}
if Fees, err = GetTradingStreamFees(tAmount, order.OrderType); err != nil {
return err
}
iStatement := models.IncomeStatement{
OrderType: int64(order.OrderType),
AlipaySeller: config.ZfbConfig().AlipaySeller,
BuyerPayAmount: bPayAmount,
InvoiceAmount: iAmount,
PointAmount: pAmount,
ReceiptAmount: rAmount,
SendPayDate: sPayDate,
TotalAmount: tAmount,
Fees: Fees,
PayerName: order.Company,
TradeNo: order.ThirdTradeNo,
SubAccountNo: order.SubAccountNo,
AutoTransfer: uint8(utils.BoolToInt(!(utils.Uint8ToBool(order.AutoTransfer) && tAmount > config.Opts().AutoTransferLimit))),
OnchainNum: 0,
}
//检查此流水是否已保存
if res, err := db.RedisClient.Exists(iStatement.TradeNo).Result(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
} else if res != 0 {
return nil
}
out, err := json.Marshal(iStatement)
if err != nil {
glog.Errorln(lib.Log("json marshal tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo, out, 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo+"-chainnotok"+config.Opts().FinanceOrderSvcAppId, "true", 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
return nil
}
// 保存交易流水(不获取交易详情)
func (this *TardingStreamInfo) SaveTradingStreamInfoOfNoQueryDetails(order svcModels.Orders) error {
//g根据订单编号查询订单信息
var bPayAmount float64
var iAmount float64
var pAmount float64
var rAmount float64
var tAmount float64
var Fees float64
var err error
//loc, _ := time.LoadLocation("Local") //重要:获取时区
//theTime, _ := time.ParseInLocation("2006-01-02 15:04:05", order.OrderTime, loc) //使用模板在对应时区转化为time.time类型
sPayDate := order.OrderTime.Unix()
//获取手续费
tAmount, err = utils.KeepTwoDecimalsOfStr(fmt.Sprintf("%f", order.TransferAmount))
if err != nil {
return err
}
if Fees, err = GetTradingStreamFees(tAmount, order.OrderType); err != nil {
return err
}
iStatement := models.IncomeStatement{
OrderType: int64(order.OrderType),
AlipaySeller: config.ZfbConfig().AlipaySeller,
BuyerPayAmount: bPayAmount,
InvoiceAmount: iAmount,
PointAmount: pAmount,
ReceiptAmount: rAmount,
SendPayDate: sPayDate,
TotalAmount: tAmount,
Fees: Fees,
PayerName: order.Company,
TradeNo: order.ThirdTradeNo,
SubAccountNo: order.SubAccountNo,
AutoTransfer: uint8(utils.BoolToInt(!(utils.Uint8ToBool(order.AutoTransfer) && tAmount > config.Opts().AutoTransferLimit))),
OnchainNum: 0,
}
//检查此流水是否已保存
if res, err := db.RedisClient.Exists(iStatement.TradeNo).Result(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
} else if res != 0 {
return nil
}
out, err := json.Marshal(iStatement)
if err != nil {
glog.Errorln(lib.Log("json marshal tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo, out, 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(iStatement.TradeNo+"-chainnotok"+config.Opts().FinanceOrderSvcAppId, "true", 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", iStatement.TradeNo, "SaveTradingStreamInfo"), "", err)
}
return nil
}
//将处理订单失败的流水加入失败队列中
func AddOrderToFailedQueue(order svcModels.Orders) error {
out, err := json.Marshal(order)
if err != nil {
glog.Errorln(lib.Log("json marshal tarding stream err", order.ThirdTradeNo, "SaveTradingStreamInfo"), "", err)
}
if err := db.RedisClient.Set(order.ThirdTradeNo+"-ordernotok"+config.Opts().FinanceOrderSvcAppId, out, 0).Err(); err != nil {
glog.Errorln(lib.Log("save tarding stream err", order.ThirdTradeNo, "SaveTradingStreamInfo"), "", err)
}
return nil
}
//获取交易手续费
func GetTradingStreamFees(amount float64, orderType uint8) (float64, error) {
var Fees float64
var err error
if config.Opts().FeesSwitch == 1 && orderType == 0 { //支付宝转账手续费
if amount*0.006 > 0.1 {
Fees, err = strconv.ParseFloat(fmt.Sprintf("%.2f", amount*0.006), 64)
if err != nil {
return 0, err
}
} else {
Fees = 0.1
}
} else if config.Opts().FeesSwitch == 1 && orderType == 1 { //微信转账手续费
if amount*0.008 > 0.1 {
Fees, err = strconv.ParseFloat(fmt.Sprintf("%.2f", amount*0.008), 64)
if err != nil {
return 0, err
}
} else {
Fees = 0.1
}
} else {
}
return Fees, nil
}
// 从第三方支付获取交易流水并保存
func (this *TardingStreamInfo) GetTradingStreamFromThirdPartPay() error {
orders, err := GetOrderListFromServer()
if err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetTradingStreamFromThirdPartPay"), "", err)
return err
}
if len(orders) != 0 {
glog.Infoln("GetTradingStreamFromThirdPartPay===========================交易流水数量=", len(orders))
}
for _, v := range orders {
if v.OrderState == 1 { // 1为正常交易流水
if err := this.SaveTradingStreamInfo(v); err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetTradingStreamFromThirdPartPay"), "", err)
return err
}
} else if v.OrderState == 2 { // 2为退款流水
if err := CustomerRefundTrade(v); err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetTradingStreamFromThirdPartPay"), "", err)
return err
}
}
}
return nil
}
// 检查获取处理订单失败的流水,重新获取账单详情
func (this *TardingStreamInfo) GetTradingStreamDetailsFromThirdPartPay() error {
var keys []string
var err error
keys, err = db.RedisClient.Keys("*ordernotok" + config.Opts().FinanceOrderSvcAppId).Result()
if err != nil {
glog.Errorln(lib.Log("get data from redis err", "", "TradingStreamWriteToBlockchain"), "", err)
return err
}
for _, key := range keys {
val, err := db.RedisClient.Get(key).Result()
if err != nil {
glog.Errorln(lib.Log("get data from redis err", fmt.Sprintf("%v", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
order := svcModels.Orders{}
if err := json.Unmarshal([]byte(val), &order); err != nil {
glog.Errorln(lib.Log("unmarshal order info err", fmt.Sprintf("%v", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
//重新处理订单
if order.OrderState == 1 { // 1为正常交易流水
if err := this.SaveTradingStreamInfo(order); err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetTradingStreamFromThirdPartPay"), "", err)
continue
}
} else if order.OrderState == 2 { // 2为退款流水
if err := CustomerRefundTrade(order); err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetTradingStreamFromThirdPartPay"), "", err)
continue
}
}
if err := db.RedisClient.Del(key).Err(); err != nil {
glog.Errorln(lib.Log("del redis record err", fmt.Sprintf("%v", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
}
return nil
}
// 检查未上链的流水,进行上链操作并修改状态
func (this *TardingStreamInfo) TradingStreamWriteToBlockchain() error {
var keys []string
var key, val string
var err error
keys, err = db.RedisClient.Keys("*chainnotok" + config.Opts().FinanceOrderSvcAppId).Result()
if err != nil {
glog.Errorln(lib.Log("get tarding stream without write chaincode err", "", "TradingStreamWriteToBlockchain"), "", err)
return err
}
for _, v := range keys {
key = strings.Split(v, "-chainnotok")[0]
val, err = db.RedisClient.Get(key).Result()
if err != nil {
glog.Errorln(lib.Log("get data from redis err", fmt.Sprintf("%v", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
if err := blockchain.WriteBlockchain(val); err != nil {
glog.Errorln(lib.Log("write blockchain err", key, "TradingStreamWriteToBlockchain"), "", err)
continue
}
if err := db.RedisClient.Del(v).Err(); err != nil {
glog.Errorln(lib.Log("del tarding stream writing block no ok err", fmt.Sprintf("%v", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
if err := db.RedisClient.Set(key+"-chainok"+config.Opts().FinanceOrderSvcAppId, "true", 0).Err(); err != nil {
glog.Errorln(lib.Log("add tarding stream writing block ok err", fmt.Sprintf("%v-chainok", key), "TradingStreamWriteToBlockchain"), "", err)
continue
}
break
}
return nil
}
// 获取订单列表
func GetOrderListFromServer() ([]svcModels.Orders, error) {
//获取最新版本号
vUrl := fmt.Sprintf("http://%s:%s/getLatestVersion?appId=%v", config.Opts().FinanceOrderSvcAddress, config.Opts().FinanceOrderSvcPort, config.Opts().FinanceOrderSvcAppId)
body, err := utils.SendHttpRequest("GET", vUrl, nil, nil, nil)
if err != nil {
glog.Errorln(lib.Log("get order version err", "", "GetOrderListFromServer"), "", err)
return []svcModels.Orders{}, err
}
var vOut map[string]interface{}
if err := json.Unmarshal(body, &vOut); err != nil {
glog.Errorln(lib.Log("unmarshal order version err", "", "GetOrderListFromServer"), "", err)
return []svcModels.Orders{}, err
}
if vOut["code"] == nil {
glog.Errorln(lib.Log("order version code is nil", "", "GetOrderListFromServer"), "", nil)
return []svcModels.Orders{}, err
}
if vOut["code"].(float64) != 0 {
glog.Errorln(lib.Log("get order version code err", "", "GetOrderListFromServer"), "", fmt.Errorf("code: %v", vOut["code"].(float64)))
return []svcModels.Orders{}, err
}
latestVer := vOut["data"].(map[string]interface{})["latestVersion"].(string)
if latestVer == "" {
glog.Infoln("[GetOrderListFromServer]: latest version is empty")
return []svcModels.Orders{}, nil
}
//根据版本号获取订单列表
//orderUrl := fmt.Sprintf("http://%s:%s/orders/batch?appId=%v&version=%s", config.Opts().FinanceOrderSvcAddress, config.Opts().FinanceOrderSvcPort, config.Opts().FinanceOrderSvcAppId, "v2")
orderUrl := fmt.Sprintf("http://%s:%s/orders/batch?appId=%v&version=%s", config.Opts().FinanceOrderSvcAddress, config.Opts().FinanceOrderSvcPort, config.Opts().FinanceOrderSvcAppId, latestVer)
body, err = utils.SendHttpRequest("GET", orderUrl, nil, nil, nil)
if err != nil {
glog.Errorln(lib.Log("get order list err", "", "GetOrderListFromServer"), "", err)
return []svcModels.Orders{}, err
}
var oOut map[string]interface{}
if err := json.Unmarshal(body, &oOut); err != nil {
glog.Errorln(lib.Log("unmarshal order list err", "", "GetOrderListFromServer"), "", err)
return []svcModels.Orders{}, err
}
if oOut["code"].(float64) != 0 {
glog.Errorln(lib.Log("get order list code err", "", "GetOrderListFromServer"), "", fmt.Errorf("get order status code=%v", oOut["code"].(float64)))
return []svcModels.Orders{}, err
}
res := []svcModels.Orders{}
for _, v := range oOut["data"].(map[string]interface{})["orders"].([]interface{}) {
out, _ := json.Marshal(v.(map[string]interface{}))
order := svcModels.Orders{}
json.Unmarshal(out, &order)
res = append(res, order)
//res = append(res, v.(svcModels.Orders))
}
return res, nil
}
|
package heap
import "fmt"
type Heap struct {
data []int
}
func CreateHeap(data []int) *Heap {
heapData := []int{0}
h := &Heap{
data: append(heapData, data...),
}
length := len(h.data)
// 对所有父节点完成堆化
for i := length/2; i >= 1; i-- {
h.heapify(h.data, length, i)
}
return h
}
// 堆化, 由每个叶子节点依次向下完成
func (h *Heap) heapify(arr []int, length int, i int) {
for {
maxIdx := i
if i*2 < length && arr[i] < arr[i*2] {
maxIdx = i * 2
}
if i*2+1 < length && arr[maxIdx] < arr[i*2+1] {
maxIdx = i*2 + 1
}
if maxIdx == i {
break
}
swap(arr, i, maxIdx)
i = maxIdx
}
}
// 移除堆顶元素
// 将堆顶元素与最后一个叶子交换, 删除交换后的最后一个叶子
// 将交换后的对顶元素重新调整
func (h *Heap) Pop() int {
swap(h.data, 1, len(h.data)-1)
n := h.data[len(h.data)-1]
h.data = h.data[:len(h.data)-1]
h.heapify(h.data, len(h.data), 1)
return n
}
func swap(arr []int, i, j int) {
t := arr[i]
arr[i] = arr[j]
arr[j] = t
}
func (h *Heap) Empty() bool {
return len(h.data) <= 1
}
func (h *Heap) String() string {
return fmt.Sprintf("data: %v", h.data)
}
|
package main
import (
"fmt"
"os"
"github.com/kingzbauer/jsonparser"
)
func main() {
if len(os.Args) == 1 {
fmt.Println("Expected filename")
os.Exit(1)
}
src, err := os.ReadFile(os.Args[1])
if err != nil {
fmt.Printf("Error: %s\n", err)
os.Exit(1)
}
if err := jsonparser.Parse(src); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"context"
"github.com/rcrowley/go-metrics"
"github.com/streamsets/datacollector-edge/api/validation"
)
type StageContext interface {
// If we plan to support ELs later, we should remove and provide in build support for this
GetResolvedValue(configValue interface{}) (interface{}, error)
CreateRecord(recordSourceId string, value interface{}) (Record, error)
GetMetrics() metrics.Registry
ToError(err error, record Record)
ReportError(err error)
GetOutputLanes() []string
Evaluate(value string, configName string, ctx context.Context) (interface{}, error)
IsErrorStage() bool
CreateConfigIssue(error string, optional ...interface{}) validation.Issue
}
|
package main
import (
"fmt"
"log"
"time"
)
func timeZone () (loctime time.Time) {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
log.Panic(err)
}
t := time.Now().In(loc)
return t
}
func main(){
fmt.Println(timeZone())
}
|
// Copyright 2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package generate
import (
"context"
"crypto"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/cilium/certgen/internal/logging"
"github.com/cilium/certgen/internal/logging/logfields"
"github.com/cloudflare/cfssl/cli/genkey"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/csr"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/initca"
"github.com/cloudflare/cfssl/signer"
"github.com/cloudflare/cfssl/signer/local"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "generate")
)
// CA contains the data and metadata of the certificate and keyfile
type Cert struct {
CommonName string
ValidityDuration time.Duration
Usage []string
Name string
Namespace string
Hosts []string
CA *CA
CertBytes []byte
KeyBytes []byte
}
// NewCert creates a new certificate blueprint
func NewCert(
commonName string,
validityDuration time.Duration,
usage []string,
name string,
namespace string,
) *Cert {
return &Cert{
CommonName: commonName,
Hosts: []string{commonName},
ValidityDuration: validityDuration,
Usage: usage,
Name: name,
Namespace: namespace,
}
}
// WithHosts modifies to use the given hosts instead of the default (CommonName)
func (c *Cert) WithHosts(hosts []string) *Cert {
c.Hosts = hosts
return c
}
// Generate the certificate and keyfile and populate c.CertBytes and c.CertKey
func (c *Cert) Generate(ca *CA) error {
log.WithFields(logrus.Fields{
logfields.CertCommonName: c.CommonName,
logfields.CertValidityDuration: c.ValidityDuration,
logfields.CertUsage: c.Usage,
}).Info("Creating CSR for certificate")
certRequest := &csr.CertificateRequest{
CN: c.CommonName,
Hosts: c.Hosts,
KeyRequest: csr.NewKeyRequest(),
}
g := &csr.Generator{Validator: genkey.Validator}
csrBytes, keyBytes, err := g.ProcessRequest(certRequest)
if err != nil {
return err
}
policy := &config.Signing{
Default: &config.SigningProfile{
Usage: c.Usage,
Expiry: c.ValidityDuration,
},
}
caCert, caSigner := ca.CACert, ca.CAKey
s, err := local.NewSigner(caSigner, caCert, signer.DefaultSigAlgo(caSigner), policy)
if err != nil {
return err
}
signReq := signer.SignRequest{Request: string(csrBytes)}
certBytes, err := s.Sign(signReq)
if err != nil {
return err
}
c.CA = ca
c.CertBytes = certBytes
c.KeyBytes = keyBytes
return nil
}
// StoreAsSecret creates or updates the certificate and keyfile in a K8s secret
func (c *Cert) StoreAsSecret(ctx context.Context, k8sClient *kubernetes.Clientset) error {
if c.CertBytes == nil || c.KeyBytes == nil {
return fmt.Errorf("cannot create secret %s/%s from empty certificate",
c.Namespace, c.Name)
}
scopedLog := log.WithFields(logrus.Fields{
logfields.K8sSecretNamespace: c.Namespace,
logfields.K8sSecretName: c.Name,
})
scopedLog.Info("Creating K8s Secret")
secret := &v1.Secret{
ObjectMeta: meta_v1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string][]byte{
"ca.crt": c.CA.CACertBytes,
"tls.crt": c.CertBytes,
"tls.key": c.KeyBytes,
},
Type: v1.SecretTypeTLS,
}
k8sSecrets := k8sClient.CoreV1().Secrets(c.Namespace)
_, err := k8sSecrets.Create(ctx, secret, meta_v1.CreateOptions{})
if k8sErrors.IsAlreadyExists(err) {
scopedLog.Info("Secret already exists, updating it instead")
_, err = k8sSecrets.Update(ctx, secret, meta_v1.UpdateOptions{})
}
return err
}
// CA contains the data and metadata of the certificate authority
type CA struct {
SecretName string
SecretNamespace string
ConfigMapName string
ConfigMapNamespace string
CACertBytes []byte
CAKeyBytes []byte
CACert *x509.Certificate
CAKey crypto.Signer
loadedFromSecret bool
}
// NewCA creates a new root CA blueprint
func NewCA(secretName, secretNamespace string) *CA {
return &CA{
SecretName: secretName,
SecretNamespace: secretNamespace,
}
}
// loadKeyPair populates c.CACert/c.CAKey from c.CACertBytes/c.CAKeyBytes
func (c *CA) loadKeyPair() error {
caCert, err := helpers.ParseCertificatePEM(c.CACertBytes)
if err != nil {
return fmt.Errorf("failed to parse CA cert PEM: %w", err)
}
caKey, err := helpers.ParsePrivateKeyPEM(c.CAKeyBytes)
if err != nil {
return fmt.Errorf("failed to parse CA key PEM: %w", err)
}
c.CACert = caCert
c.CAKey = caKey
return nil
}
// LoadedFromSecret returns true if this CA was loaded from a K8s secret
func (c *CA) LoadedFromSecret() bool {
return c.loadedFromSecret
}
// Generate the root certificate and keyfile. Populates c.CACertBytes and c.CAKeyBytes
func (c *CA) Generate(commonName string, validityDuration time.Duration) error {
log.WithFields(logrus.Fields{
logfields.CertCommonName: commonName,
logfields.CertValidityDuration: validityDuration,
}).Info("Creating CSR for certificate authority")
caCSR := &csr.CertificateRequest{
CN: commonName,
CA: &csr.CAConfig{
Expiry: validityDuration.String(),
},
KeyRequest: csr.NewKeyRequest(),
}
caCertBytes, _, caKeyBytes, err := initca.New(caCSR)
if err != nil {
return err
}
c.CACertBytes = caCertBytes
c.CAKeyBytes = caKeyBytes
c.loadedFromSecret = false
return c.loadKeyPair()
}
// LoadFromFile populates c.CACertBytes and c.CAKeyBytes by reading them from file.
func (c *CA) LoadFromFile(caCertFile, caKeyFile string) error {
if caCertFile == "" || caKeyFile == "" {
return errors.New("path for CA key and cert file must both be provided if CA is not generated")
}
caCertBytes, err := ioutil.ReadFile(caCertFile)
if err != nil {
return fmt.Errorf("failed to load CA cert file: %w", err)
}
caKeyBytes, err := ioutil.ReadFile(caKeyFile)
if err != nil {
return fmt.Errorf("failed to load Hubble CA key file: %w", err)
}
c.CACertBytes = caCertBytes
c.CAKeyBytes = caKeyBytes
c.loadedFromSecret = false
return c.loadKeyPair()
}
// StoreAsConfigMap creates or updates the CA certificate in a K8s configmap
func (c *CA) StoreAsConfigMap(ctx context.Context, k8sClient *kubernetes.Clientset) error {
if c.CACertBytes == nil || c.CAKeyBytes == nil {
return fmt.Errorf("cannot create configmap %s/%s from empty certificate",
c.ConfigMapNamespace, c.ConfigMapName)
}
scopedLog := log.WithFields(logrus.Fields{
logfields.K8sConfigMapNamespace: c.ConfigMapNamespace,
logfields.K8sConfigMapName: c.ConfigMapName,
})
scopedLog.Info("Creating K8s ConfigMap")
configMap := &v1.ConfigMap{
ObjectMeta: meta_v1.ObjectMeta{
Name: c.ConfigMapName,
Namespace: c.ConfigMapNamespace,
},
BinaryData: map[string][]byte{
"ca.crt": c.CACertBytes,
},
}
k8sConfigMaps := k8sClient.CoreV1().ConfigMaps(c.ConfigMapNamespace)
_, err := k8sConfigMaps.Create(ctx, configMap, meta_v1.CreateOptions{})
if k8sErrors.IsAlreadyExists(err) {
scopedLog.Info("ConfigMap already exists, updating it instead")
_, err = k8sConfigMaps.Update(ctx, configMap, meta_v1.UpdateOptions{})
}
return err
}
// StoreAsSecret creates or updates the CA certificate in a K8s secret
func (c *CA) StoreAsSecret(ctx context.Context, k8sClient *kubernetes.Clientset) error {
if c.CACertBytes == nil || c.CAKeyBytes == nil {
return fmt.Errorf("cannot create secret %s/%s from empty certificate",
c.SecretNamespace, c.SecretName)
}
scopedLog := log.WithFields(logrus.Fields{
logfields.K8sSecretNamespace: c.SecretNamespace,
logfields.K8sSecretName: c.SecretName,
})
scopedLog.Info("Creating K8s Secret")
secret := &v1.Secret{
ObjectMeta: meta_v1.ObjectMeta{
Name: c.SecretName,
Namespace: c.SecretNamespace,
},
Data: map[string][]byte{
"ca.crt": c.CACertBytes,
"ca.key": c.CAKeyBytes,
},
}
k8sSecrets := k8sClient.CoreV1().Secrets(c.SecretNamespace)
_, err := k8sSecrets.Create(ctx, secret, meta_v1.CreateOptions{})
if k8sErrors.IsAlreadyExists(err) {
scopedLog.Info("Secret already exists, updating it instead")
_, err = k8sSecrets.Update(ctx, secret, meta_v1.UpdateOptions{})
}
return err
}
// LoadFromSecret populates c.CACertBytes and c.CAKeyBytes by reading them from a secret
func (c *CA) LoadFromSecret(ctx context.Context, k8sClient *kubernetes.Clientset) error {
k8sSecrets := k8sClient.CoreV1().Secrets(c.SecretNamespace)
secret, err := k8sSecrets.Get(ctx, c.SecretName, meta_v1.GetOptions{})
if err != nil {
return err
}
if len(secret.Data["ca.crt"]) == 0 {
return fmt.Errorf("Secret %s/%s has no CA cert", c.SecretNamespace, c.SecretName)
}
if len(secret.Data["ca.key"]) == 0 {
return fmt.Errorf("Secret %s/%s has no CA key", c.SecretNamespace, c.SecretName)
}
c.CACertBytes = secret.Data["ca.crt"]
c.CAKeyBytes = secret.Data["ca.key"]
if err := c.loadKeyPair(); err != nil {
return err
}
c.loadedFromSecret = true
return nil
}
|
package main
import (
"github.com/yuyistudio/ecs-go/entitas"
"fmt"
"time"
"math/rand"
)
type PosCom struct {
x float64
y float64
}
func (p *PosCom) Type() entitas.ComponentType {
return ComType_pos
}
type RendererCom struct {
screen int64
}
func (p *RendererCom) Type() entitas.ComponentType {
return ComType_renderer
}
type PosMatcher struct {
hash entitas.MatcherHash
}
func (b *PosMatcher) Hash() entitas.MatcherHash {
return b.hash
}
func (b *PosMatcher) ComponentTypes() []entitas.ComponentType {
return nil
}
func NewPosMatch() *PosMatcher {
p := new(PosMatcher)
p.hash = 123
return p
}
func (m *PosMatcher) Matches(entity entitas.Entity) bool {
return entity.HasComponent(ComType_pos)
}
func (m *PosMatcher) Equals(another entitas.Matcher) bool {
return m == another
}
func (m *PosMatcher) String() string {
return fmt.Sprintf("PosMatcher(%v)", m.Hash())
}
type System interface {
OnInit()
OnUpdate()
OnCleanup()
}
type World struct {
context entitas.Context
systems []System
}
func (w *World) OnInit() {
for _, system := range w.systems {
system.OnInit()
}
}
func (w *World) OnUpdate() {
for _, system := range w.systems {
system.OnUpdate()
}
for _, system := range w.systems {
system.OnCleanup()
}
}
func (w *World) AddSystem(system System) {
w.systems = append(w.systems, system)
}
type MovementSystem struct {
g entitas.Group
context entitas.Context
}
func NewMovementSystem(context entitas.Context) System {
m := new(MovementSystem)
m.context = context
return m
}
func (m *MovementSystem) OnInit() {
posMatcher := NewPosMatch()
m.g = m.context.Group(posMatcher)
}
func (m *MovementSystem) OnUpdate() {
for _, entity := range m.g.Entities() {
posCom := entity.GetComponent(ComType_pos).(*PosCom)
posCom.y += 1
fmt.Printf("entity[%d].pos.y = %v\n", entity.ID(), posCom.y)
}
}
func (m *MovementSystem) OnCleanup() {
fmt.Printf("cleaning frame data\n")
}
func TestWorld(){
context := entitas.NewContext(888)
context.CreateEntity(&PosCom{x:1, y:2})
context.CreateEntity(&RendererCom{screen:888}, &PosCom{x:3, y:4})
world := new(World)
world.context = context
world.AddSystem(NewMovementSystem(context))
world.OnInit()
for {
// frame
world.OnUpdate()
time.Sleep(500 * time.Millisecond)
}
}
func testGetCom() {
fmt.Printf("com count %d\n", ComTypeCount)
context := entitas.NewContext(888)
e1 := context.CreateEntity(&PosCom{x:1, y:2})
/*
*/
e1.AddComponent(&Com0{x:0})
e1.AddComponent(&Com1{x:1})
e1.AddComponent(&Com2{x:2})
e1.AddComponent(&Com3{x:3})
e1.AddComponent(&Com4{x:4})
e1.AddComponent(&Com5{x:5})
e1.AddComponent(&Com6{x:6})
e1.AddComponent(&Com7{x:7})
e1.AddComponent(&Com8{x:8})
e1.AddComponent(&Com9{x:9})
e1.AddComponent(&Com10{x:10})
e1.AddComponent(&Com11{x:11})
e1.AddComponent(&Com12{x:12})
e1.AddComponent(&Com13{x:13})
e1.AddComponent(&Com14{x:14})
e1.AddComponent(&Com15{x:15})
e1.AddComponent(&Com16{x:16})
e1.AddComponent(&Com17{x:17})
e1.AddComponent(&Com18{x:18})
e1.AddComponent(&Com19{x:19})
e1.AddComponent(&Com20{x:20})
e1.AddComponent(&Com21{x:21})
e1.AddComponent(&Com22{x:22})
e1.AddComponent(&Com23{x:23})
e1.AddComponent(&Com24{x:24})
e1.AddComponent(&Com25{x:25})
e1.AddComponent(&Com26{x:26})
e1.AddComponent(&Com27{x:27})
e1.AddComponent(&Com28{x:28})
e1.AddComponent(&Com29{x:29})
e1.AddComponent(&Com30{x:30})
e1.AddComponent(&Com31{x:31})
e1.AddComponent(&Com32{x:32})
e1.AddComponent(&Com33{x:33})
e1.AddComponent(&Com34{x:34})
e1.AddComponent(&Com35{x:35})
e1.AddComponent(&Com36{x:36})
e1.AddComponent(&Com37{x:37})
e1.AddComponent(&Com38{x:38})
e1.AddComponent(&Com39{x:39})
e1.AddComponent(&Com40{x:40})
e1.AddComponent(&Com41{x:41})
e1.AddComponent(&Com42{x:42})
e1.AddComponent(&Com43{x:43})
e1.AddComponent(&Com44{x:44})
e1.AddComponent(&Com45{x:45})
e1.AddComponent(&Com46{x:46})
e1.AddComponent(&Com47{x:47})
e1.AddComponent(&Com48{x:48})
e1.AddComponent(&Com49{x:49})
e1.AddComponent(&Com50{x:50})
e1.AddComponent(&Com51{x:51})
e1.AddComponent(&Com52{x:52})
e1.AddComponent(&Com53{x:53})
e1.AddComponent(&Com54{x:54})
e1.AddComponent(&Com55{x:55})
e1.AddComponent(&Com56{x:56})
e1.AddComponent(&Com57{x:57})
e1.AddComponent(&Com58{x:58})
e1.AddComponent(&Com59{x:59})
e1.AddComponent(&Com60{x:60})
e1.AddComponent(&Com61{x:61})
e1.AddComponent(&Com62{x:62})
e1.AddComponent(&Com63{x:63})
e1.AddComponent(&Com64{x:64})
e1.AddComponent(&Com65{x:65})
e1.AddComponent(&Com66{x:66})
e1.AddComponent(&Com67{x:67})
e1.AddComponent(&Com68{x:68})
e1.AddComponent(&Com69{x:69})
e1.AddComponent(&Com70{x:70})
e1.AddComponent(&Com71{x:71})
e1.AddComponent(&Com72{x:72})
e1.AddComponent(&Com73{x:73})
e1.AddComponent(&Com74{x:74})
e1.AddComponent(&Com75{x:75})
e1.AddComponent(&Com76{x:76})
e1.AddComponent(&Com77{x:77})
e1.AddComponent(&Com78{x:78})
e1.AddComponent(&Com79{x:79})
e1.AddComponent(&Com80{x:80})
e1.AddComponent(&Com81{x:81})
e1.AddComponent(&Com82{x:82})
e1.AddComponent(&Com83{x:83})
e1.AddComponent(&Com84{x:84})
e1.AddComponent(&Com85{x:85})
e1.AddComponent(&Com86{x:86})
e1.AddComponent(&Com87{x:87})
e1.AddComponent(&Com88{x:88})
e1.AddComponent(&Com89{x:89})
e1.AddComponent(&Com90{x:90})
e1.AddComponent(&Com91{x:91})
e1.AddComponent(&Com92{x:92})
e1.AddComponent(&Com93{x:93})
e1.AddComponent(&Com94{x:94})
e1.AddComponent(&Com95{x:95})
e1.AddComponent(&Com96{x:96})
e1.AddComponent(&Com97{x:97})
e1.AddComponent(&Com98{x:98})
e1.AddComponent(&Com99{x:99})
e1.RebuildComponentIndex()
e2 := context.CreateEntity(&PosCom{x:1, y:2})
e2.AddComponent(&Com0{x:0})
e2.AddComponent(&Com1{x:1})
e2.AddComponent(&Com2{x:2})
e2.AddComponent(&Com3{x:3})
e2.AddComponent(&Com4{x:4})
e2.AddComponent(&Com5{x:5})
e2.AddComponent(&Com6{x:6})
e2.AddComponent(&Com7{x:7})
e3 := context.CreateEntity(&PosCom{x:1, y:2})
e3.AddComponent(&Com0{x:0})
e := e3
if e.BinarySearchComponent(ComType_com99) != e.GetComponent(ComType_com99) {
panic("! com-99")
}
if e.LinearSearchComponent(ComType_com44) != e.GetComponent(ComType_com44) {
panic("! com-44")
}
const loopCount = 40000000
const seed = 10324329
{
rand.Seed(seed)
st := time.Now()
for i := 0; i < loopCount; i++ {
e.DictGetComponent(entitas.ComponentType(rand.Intn(int(ComTypeCount))))
}
et := time.Now()
fmt.Printf("hash-index: %v\n", et.Sub(st))
}
{
rand.Seed(seed)
st := time.Now()
for i := 0; i < loopCount; i++ {
e.BinarySearchComponent(entitas.ComponentType(rand.Intn(int(ComTypeCount))))
}
et := time.Now()
fmt.Printf("binary-search: %v\n", et.Sub(st))
}
{
rand.Seed(seed)
st := time.Now()
for i := 0; i < loopCount; i++ {
e.GetComponent(entitas.ComponentType(rand.Intn(int(ComTypeCount))))
}
et := time.Now()
fmt.Printf("get-com: %v\n", et.Sub(st))
}
}
func main() {
testGetCom()
}
|
package helper
const workCount = 10
func work(fn func()) {
} |
package agent
import (
"net"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/mock"
"github.com/openshift/installer/pkg/ipnet"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/types/baremetal"
"github.com/openshift/installer/pkg/types/none"
"github.com/openshift/installer/pkg/types/vsphere"
)
func TestInstallConfigLoad(t *testing.T) {
cases := []struct {
name string
data string
fetchError error
expectedFound bool
expectedError string
expectedConfig *types.InstallConfig
}{
{
name: "unsupported platform",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
platform:
aws:
region: us-east-1
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: `invalid install-config configuration: Platform: Unsupported value: "aws": supported values: "baremetal", "vsphere", "none"`,
},
{
name: "apiVips not set for baremetal Compact platform",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
networkType: OpenShiftSDN
machineNetwork:
- cidr: 192.168.122.0/23
serviceNetwork:
- 172.30.0.0/16
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 3
platform:
baremetal:
externalMACAddress: "52:54:00:f6:b4:02"
provisioningMACAddress: "52:54:00:6e:3b:02"
ingressVIPs:
- 192.168.122.11
hosts:
- name: host1
bootMACAddress: 52:54:01:aa:aa:a1
- name: host2
bootMACAddress: 52:54:01:bb:bb:b1
- name: host3
bootMACAddress: 52:54:01:cc:cc:c1
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: "invalid install-config configuration: [platform.baremetal.apiVIPs: Required value: must specify at least one VIP for the API, platform.baremetal.apiVIPs: Required value: must specify VIP for API, when VIP for ingress is set]",
},
{
name: "Required values not set for vsphere platform",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
platform:
vsphere:
apiVips:
- 192.168.122.10
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: `invalid install-config configuration: platform.vsphere.ingressVIPs: Required value: must specify VIP for ingress, when VIP for API is set`,
},
{
name: "no compute.replicas set for SNO",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OVNKubernetes
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 1
platform:
none : {}
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: "invalid install-config configuration: Compute.Replicas: Required value: Total number of Compute.Replicas must be 0 when ControlPlane.Replicas is 1 for none platform. Found 3",
},
{
name: "invalid networkType for SNO cluster",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OpenShiftSDN
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 1
platform:
none : {}
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: "invalid install-config configuration: Networking.NetworkType: Invalid value: \"OpenShiftSDN\": Only OVNKubernetes network type is allowed for Single Node OpenShift (SNO) cluster",
},
{
name: "invalid platform for SNO cluster",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OpenShiftSDN
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 1
platform:
aws:
region: us-east-1
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: "invalid install-config configuration: [Platform: Unsupported value: \"aws\": supported values: \"baremetal\", \"vsphere\", \"none\", Platform: Invalid value: \"aws\": Only platform none supports 1 ControlPlane and 0 Compute nodes]",
},
{
name: "invalid architecture for SNO cluster",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OVNKubernetes
compute:
- architecture: s390x
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: s390x
hyperthreading: Enabled
name: master
platform: {}
replicas: 1
platform:
none : {}
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: false,
expectedError: "invalid install-config configuration: [ControlPlane.Architecture: Unsupported value: \"s390x\": supported values: \"amd64\", \"arm64\", Compute[0].Architecture: Unsupported value: \"s390x\": supported values: \"amd64\", \"arm64\"]",
},
{
name: "valid configuration for none platform for sno",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OVNKubernetes
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 1
platform:
none : {}
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: true,
expectedConfig: &types.InstallConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: types.InstallConfigVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
AdditionalTrustBundlePolicy: types.PolicyProxyOnly,
BaseDomain: "test-domain",
Networking: &types.Networking{
MachineNetwork: []types.MachineNetworkEntry{
{CIDR: *ipnet.MustParseCIDR("10.0.0.0/16")},
},
NetworkType: "OVNKubernetes",
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
ClusterNetwork: []types.ClusterNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
HostPrefix: 23,
},
},
},
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64(1),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
Compute: []types.MachinePool{
{
Name: "worker",
Replicas: pointer.Int64(0),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
},
Platform: types.Platform{None: &none.Platform{}},
PullSecret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
Publish: types.ExternalPublishingStrategy,
},
},
{
name: "valid configuration for none platform for HA cluster",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
networkType: OVNKubernetes
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 2
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 3
platform:
none : {}
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: true,
expectedConfig: &types.InstallConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: types.InstallConfigVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
AdditionalTrustBundlePolicy: types.PolicyProxyOnly,
BaseDomain: "test-domain",
Networking: &types.Networking{
MachineNetwork: []types.MachineNetworkEntry{
{CIDR: *ipnet.MustParseCIDR("10.0.0.0/16")},
},
NetworkType: "OVNKubernetes",
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
ClusterNetwork: []types.ClusterNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
HostPrefix: 23,
},
},
},
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64(3),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
Compute: []types.MachinePool{
{
Name: "worker",
Replicas: pointer.Int64(2),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
},
Platform: types.Platform{None: &none.Platform{}},
PullSecret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
Publish: types.ExternalPublishingStrategy,
},
},
{
name: "valid configuration for baremetal platform for HA cluster - deprecated and unused fields",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
networkType: OpenShiftSDN
machineNetwork:
- cidr: 192.168.122.0/23
serviceNetwork:
- 172.30.0.0/16
compute:
- architecture: amd64
hyperthreading: Disabled
name: worker
platform: {}
replicas: 2
controlPlane:
architecture: amd64
hyperthreading: Disabled
name: master
platform: {}
replicas: 3
platform:
baremetal:
libvirtURI: qemu+ssh://root@52.116.73.24/system
clusterProvisioningIP: "192.168.122.90"
bootstrapProvisioningIP: "192.168.122.91"
externalBridge: "somevalue"
externalMACAddress: "52:54:00:f6:b4:02"
provisioningNetwork: "Disabled"
provisioningBridge: br0
provisioningMACAddress: "52:54:00:6e:3b:02"
provisioningNetworkInterface: "eth11"
provisioningDHCPExternal: true
provisioningDHCPRange: 172.22.0.10,172.22.0.254
apiVIP: 192.168.122.10
ingressVIP: 192.168.122.11
bootstrapOSImage: https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd
clusterOSImage: https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8
bootstrapExternalStaticIP: 192.1168.122.50
bootstrapExternalStaticGateway: gateway
hosts:
- name: host1
bootMACAddress: 52:54:01:aa:aa:a1
bmc:
address: addr
- name: host2
bootMACAddress: 52:54:01:bb:bb:b1
- name: host3
bootMACAddress: 52:54:01:cc:cc:c1
- name: host4
bootMACAddress: 52:54:01:dd:dd:d1
- name: host5
bootMACAddress: 52:54:01:ee:ee:e1
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: true,
expectedConfig: &types.InstallConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: types.InstallConfigVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
AdditionalTrustBundlePolicy: types.PolicyProxyOnly,
BaseDomain: "test-domain",
Networking: &types.Networking{
MachineNetwork: []types.MachineNetworkEntry{
{CIDR: *ipnet.MustParseCIDR("192.168.122.0/23")},
},
NetworkType: "OpenShiftSDN",
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
ClusterNetwork: []types.ClusterNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
HostPrefix: 23,
},
},
},
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64(3),
Hyperthreading: types.HyperthreadingDisabled,
Architecture: types.ArchitectureAMD64,
},
Compute: []types.MachinePool{
{
Name: "worker",
Replicas: pointer.Int64(2),
Hyperthreading: types.HyperthreadingDisabled,
Architecture: types.ArchitectureAMD64,
},
},
Platform: types.Platform{
BareMetal: &baremetal.Platform{
LibvirtURI: "qemu+ssh://root@52.116.73.24/system",
ClusterProvisioningIP: "192.168.122.90",
BootstrapProvisioningIP: "192.168.122.91",
ExternalBridge: "somevalue",
ExternalMACAddress: "52:54:00:f6:b4:02",
ProvisioningNetwork: "Disabled",
ProvisioningBridge: "br0",
ProvisioningMACAddress: "52:54:00:6e:3b:02",
ProvisioningDHCPRange: "172.22.0.10,172.22.0.254",
DeprecatedProvisioningDHCPExternal: true,
ProvisioningNetworkCIDR: &ipnet.IPNet{
IPNet: net.IPNet{
IP: []byte("\xc0\xa8\x7a\x00"),
Mask: []byte("\xff\xff\xfe\x00"),
},
},
ProvisioningNetworkInterface: "eth11",
Hosts: []*baremetal.Host{
{
Name: "host1",
BootMACAddress: "52:54:01:aa:aa:a1",
BootMode: "UEFI",
HardwareProfile: "default",
BMC: baremetal.BMC{Address: "addr"},
},
{
Name: "host2",
BootMACAddress: "52:54:01:bb:bb:b1",
BootMode: "UEFI",
HardwareProfile: "default",
},
{
Name: "host3",
BootMACAddress: "52:54:01:cc:cc:c1",
BootMode: "UEFI",
HardwareProfile: "default",
},
{
Name: "host4",
BootMACAddress: "52:54:01:dd:dd:d1",
BootMode: "UEFI",
HardwareProfile: "default",
},
{
Name: "host5",
BootMACAddress: "52:54:01:ee:ee:e1",
BootMode: "UEFI",
HardwareProfile: "default",
}},
DeprecatedAPIVIP: "192.168.122.10",
APIVIPs: []string{"192.168.122.10"},
DeprecatedIngressVIP: "192.168.122.11",
IngressVIPs: []string{"192.168.122.11"},
BootstrapOSImage: "https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd",
ClusterOSImage: "https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8",
BootstrapExternalStaticIP: "192.1168.122.50",
BootstrapExternalStaticGateway: "gateway",
},
},
PullSecret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
Publish: types.ExternalPublishingStrategy,
},
},
{
name: "valid configuration for vsphere platform for compact cluster - deprecated field apiVip",
data: `
apiVersion: v1
metadata:
name: test-cluster
baseDomain: test-domain
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
networkType: OpenShiftSDN
machineNetwork:
- cidr: 192.168.122.0/23
serviceNetwork:
- 172.30.0.0/16
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
platform: {}
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
platform: {}
replicas: 3
platform:
vsphere :
vcenter: 192.168.122.30
username: testUsername
password: testPassword
datacenter: testDataCenter
defaultDataStore: testDefaultDataStore
cluster: testCluster
apiVIP: 192.168.122.10
ingressVIPs:
- 192.168.122.11
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
`,
expectedFound: true,
expectedConfig: &types.InstallConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: types.InstallConfigVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
AdditionalTrustBundlePolicy: types.PolicyProxyOnly,
BaseDomain: "test-domain",
Networking: &types.Networking{
MachineNetwork: []types.MachineNetworkEntry{
{CIDR: *ipnet.MustParseCIDR("192.168.122.0/23")},
},
NetworkType: "OpenShiftSDN",
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
ClusterNetwork: []types.ClusterNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
HostPrefix: 23,
},
},
},
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64(3),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
Compute: []types.MachinePool{
{
Name: "worker",
Replicas: pointer.Int64(0),
Hyperthreading: types.HyperthreadingEnabled,
Architecture: types.ArchitectureAMD64,
},
},
Platform: types.Platform{
VSphere: &vsphere.Platform{
DeprecatedVCenter: "192.168.122.30",
DeprecatedUsername: "testUsername",
DeprecatedPassword: "testPassword",
DeprecatedDatacenter: "testDataCenter",
DeprecatedCluster: "testCluster",
DeprecatedDefaultDatastore: "testDefaultDataStore",
DeprecatedAPIVIP: "192.168.122.10",
APIVIPs: []string{"192.168.122.10"},
IngressVIPs: []string{"192.168.122.11"},
VCenters: []vsphere.VCenter{{
Server: "192.168.122.30",
Port: 443,
Username: "testUsername",
Password: "testPassword",
Datacenters: []string{"testDataCenter"},
}},
FailureDomains: []vsphere.FailureDomain{{
Name: "generated-failure-domain",
Region: "generated-region",
Zone: "generated-zone",
Server: "192.168.122.30",
Topology: vsphere.Topology{
Datacenter: "testDataCenter",
ComputeCluster: "/testDataCenter/host/testCluster",
Networks: []string{""},
Datastore: "/testDataCenter/datastore/testDefaultDataStore",
ResourcePool: "/testDataCenter/host/testCluster//Resources",
Folder: "",
},
}},
},
},
PullSecret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
Publish: types.ExternalPublishingStrategy,
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
fileFetcher.EXPECT().FetchByName(installConfigFilename).
Return(
&asset.File{
Filename: installConfigFilename,
Data: []byte(tc.data)},
tc.fetchError,
).MaxTimes(2)
asset := &OptionalInstallConfig{}
found, err := asset.Load(fileFetcher)
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
if tc.expectedError != "" {
assert.Equal(t, tc.expectedError, err.Error())
} else {
assert.NoError(t, err)
}
if tc.expectedFound {
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in InstallConfig")
}
})
}
}
|
package queue
import (
"errors"
"fmt"
)
type MyQueue struct {
a []int
}
var q MyQueue
func NewQueue() *MyQueue {
return &MyQueue{a: make([]int, 0)}
}
func (q *MyQueue) Enqueue(val int) {
q.a = append(q.a, val)
}
func (q *MyQueue) Dequeue() error {
if len(q.a) == 0 {
return errors.New("The queue is empty")
}
if len(q.a) == 1 {
q.a = make([]int, 0)
return nil
}
q.a = q.a[1:]
return nil
}
func (q *MyQueue) isEmpty() bool {
if len(q.a) == 0 {
return true
}
return false
}
func (q *MyQueue) Display() {
fmt.Println(q.a)
}
|
package mainMenu
import (
"github.com/myProj/scaner/new/include/config/extensions"
"github.com/myProj/scaner/new/include/config/settings"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/widgets"
)
type TabCheckBoxSettings struct {
ExtcheckBoxes []*widgets.QCheckBox
SetcheckBoxes []*widgets.QCheckBox
}
func NewExtensionTab()(*widgets.QWidget,*TabCheckBoxSettings){
exclWidget := widgets.NewQWidget(nil,1)
vbox := widgets.NewQVBoxLayout()
exclWidget.SetLayout(vbox)
exclWidget.SetWindowFlags(core.Qt__Dialog)
infoTab := widgets.NewQTabWidget(nil)
eT,checkBoxesExt := extensionTab()
sT,checkBoxesSet := settingsTab(exclWidget)
infoTab.AddTab(eT, "Список расширений")
infoTab.AddTab(sT, "Дополнительные настройки")
vbox.Layout().AddWidget(infoTab)
TabCheckBoxS := &TabCheckBoxSettings{
ExtcheckBoxes: checkBoxesExt,
SetcheckBoxes: checkBoxesSet,
}
return exclWidget,TabCheckBoxS
}
func extensionTab()(*widgets.QWidget,[]*widgets.QCheckBox){
extList := widgets.NewQWidget(nil,1)
vbox := widgets.NewQGridLayout(nil)
//
extList.SetLayout(vbox)
exts := extensions.GetAllowList()
var checkBoxes []*widgets.QCheckBox
var i,j int
for _,ext := range exts {
chbx := widgets.NewQCheckBox2(ext.Ext,nil)
chbx.SetChecked(false)
if ext.AllowScanning {
chbx.SetChecked(true)
}
checkBoxes = append(checkBoxes,chbx)
vbox.AddWidget2(chbx,i,j,0)
i,j = getNewPosition(i,j,3)
}
return extList,checkBoxes
}
func getNewPosition(row,column,limit int)(int,int){
if column < limit {
return row , column+1
}
return row+1, 0
}
func settingsTab(exclWidget *widgets.QWidget)(*widgets.QWidget,[]*widgets.QCheckBox){
settingsList := widgets.NewQWidget(nil,0)
vbox := widgets.NewQVBoxLayout()
//vbox.SetContentsMargins(0,0,0,0)
//vbox.SetSpacing(0)
//vbox.SetAlignment2(vbox,core.Qt__AlignTop)
settingsList.SetLayout(vbox)
sets := settings.ReadSettingsFromConfig()
spinLimit := limitArchSetting()
spinName := widgets.NewQLabel2("Ограничение на минимальное свободное место при распаковке архивов в ГБ",spinLimit,0)
vbox.AddWidget(spinName,0,0)
vbox.AddWidget(spinLimit,0,0)
var checkBoxes []*widgets.QCheckBox
//m := newSetMap()
for _,set := range sets {
if set.Setting == "Ограничение на минимальное свободное место при распаковке архивов в ГБ"{
continue
}
chbx := widgets.NewQCheckBox2(set.Setting,nil)
chbx.SetChecked(false)
if set.IsAllowSetting {
chbx.SetChecked(true)
}
checkBoxes = append(checkBoxes,chbx)
vbox.AddWidget(chbx,0,0)
//i,j = getNewPosition(i,j,3)
}
exclWidget.ConnectCloseEvent(func(event *gui.QCloseEvent) {
settings.SetArchiveLimit(spinLimit.Value())
sets := settings.ReadSettingsFromConfig()
for i,c := range checkBoxes {
if c.Text() == "Отображать файлы в таблице с неизвестным расширением"{
sets[i].IsAllowSetting = c.IsChecked()
}
}
settings.SetNewConfig(sets)
})
vbox.AddStretch(0)
return settingsList,checkBoxes
}
func limitArchSetting() *widgets.QSpinBox{
spinBox := widgets.NewQSpinBox(nil)
spinBox.SetMinimum(0)
spinBox.SetMaximum(1000)
spinBox.SetValue(settings.GetArchiveLimit())
return spinBox
}
func newSetMap()map[string]string{
m := make(map[string]string)
m["allowUnknownExtension"] = "Отображать файлы в таблице с неизвестным расширением"
//m["restrictCountOfLinesInTable"] = "Ограничить число строк в таблиц"
//m["restrictCountOfLinesInTable"] = "Записывать неизвестные ошибки в файл"
return m
}
|
package dataconv
import "fmt"
// ShowConv demonstrates some type conversion
func ShowConv() {
// int
var a = 24
// float 64
var b = 2.0
// convert the int to a float64 for this calculation
c := float64(a) * b
fmt.Println(c)
// fmt.Sprintf is a good way to convert to strings
precision := fmt.Sprintf("%.2f", b)
// print the value and the type
fmt.Printf("%s - %T\n", precision, precision)
}
|
package synapse
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
var errorData map[string]interface{}
/********** METHODS **********/
func init() {
data, err := readFile("error_responses")
if err != nil {
panic(err)
}
errorData = data
}
/********** TESTS **********/
func Test_HandleHTTPError(t *testing.T) {
assert := assert.New(t)
for k := range errorData {
testErrRes, _ := json.Marshal(errorData[k])
testErr := handleHTTPError(testErrRes)
errData := errorData[k].(map[string]interface{})
httpCode := errData["http_code"].(string)
errCode := errData["error_code"].(string)
msg := errData["error"].(map[string]interface{})["en"].(string)
responseMsg := "http_code " + httpCode + " error_code " + errCode + " " + msg
// error message should be an error and print error code plus original API message
assert.EqualError(testErr, responseMsg)
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/6/4 9:33 上午
# @File : matrix_test.go.go
# @Description :
# @Attention :
*/
package v2
import (
"fmt"
"testing"
)
func Test_updateMatrix(t *testing.T) {
fmt.Println(updateMatrix([][]int{
{0,0,0},
{0,1,0},
{0,0,0},
}))
}
|
package repository
import (
"github.com/jmoiron/sqlx"
)
// entity
type HotelEntity struct {
ID int `json:"id" db:"id"`
HotelName string `json:"hotel_name" db:"hotel_name"`
Address string `json:"address" db:"address"`
RoomAvailability int `json:"room_availability,omitempty" db:"room_availability,omitempty"`
}
type IHotelRepo interface {
SearchHotelRoomAvailabilities(int, int, string, string, string) []HotelEntity
CountHotelRoomAvailabilities(string, string, string) int
}
type HotelRepo struct {
db *sqlx.DB
}
//NewMysqlCategory is a function to create implementation of mysql category repository
func NewHotelRepo(db *sqlx.DB) *HotelRepo {
return &HotelRepo{db}
}
func (hotelRepo *HotelRepo) SearchHotelRoomAvailabilities(limit int, offset int, checkinDate string, checkoutDate string, hotelIdsCondition string) []HotelEntity {
he := []HotelEntity{}
query := `
SELECT
rc.id, rc.hotel_name, rc.address,
CASE
WHEN rc.id = rg.id THEN rc.total_rooms - rg.max_guest
ELSE rc.total_rooms
END as room_availability
FROM
(
SELECT
h.*, COUNT(r.id) as total_rooms
FROM
hotel h
JOIN
room r ON h.id = r.hotel_id
WHERE
r.room_status = 'available' ` + hotelIdsCondition + `
GROUP BY
h.id
) as rc
LEFT OUTER JOIN
(
SELECT id, MAX(guests.total_guest) as max_guest
FROM
(
SELECT
h.id, sr.date, COUNT(sr.id) as total_guest
FROM
hotel h
JOIN
room r ON r.hotel_id = h.id
JOIN
stay_room sr ON r.id = sr.room_id
WHERE
sr.date BETWEEN ? AND ? ` + hotelIdsCondition + `
GROUP BY
sr.date, h.id
) as guests
GROUP BY id
) as rg ON rc.id = rg.id AND rc.total_rooms > rg.max_guest
LIMIT ?,?
`
err := hotelRepo.db.Select(&he, query, checkinDate, checkoutDate, offset, limit)
if err != nil {
return []HotelEntity{}
}
return he
}
func (hotelRepo *HotelRepo) CountHotelRoomAvailabilities(checkinDate string, checkoutDate string, hotelIdsCondition string) int {
var count int
query := `
SELECT
count(rc.id)
FROM
(
SELECT
h.*, COUNT(r.id) as total_rooms
FROM
hotel h
JOIN
room r ON h.id = r.hotel_id
WHERE
r.room_status = 'available' ` + hotelIdsCondition + `
GROUP BY
h.id
) as rc
LEFT OUTER JOIN
(
SELECT id, MAX(guests.total_guest) as max_guest
FROM
(
SELECT
h.id, sr.date, COUNT(sr.id) as total_guest
FROM
hotel h
JOIN
room r ON r.hotel_id = h.id
JOIN
stay_room sr ON r.id = sr.room_id
WHERE
sr.date BETWEEN ? AND ? ` + hotelIdsCondition + `
GROUP BY
sr.date, h.id
) as guests
GROUP BY id
) as rg ON rc.id = rg.id AND rc.total_rooms > rg.max_guest
`
err := hotelRepo.db.Get(&count, query, checkinDate, checkoutDate)
if err != nil {
return 0
}
return count
}
|
// Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netconfig
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestBasic(t *testing.T) {
externalRemotes := []ExternalRemote{
{
Address: "goo",
},
{
Address: "foo",
Login: ExternalLogin{
Token: "baz",
},
},
}
remoteProvider, err := NewRemoteProvider(externalRemotes)
require.NoError(t, err)
remote, ok := remoteProvider.GetRemote("foo")
require.True(t, ok)
require.Equal(t, "foo", remote.Address())
login, ok := remote.Login()
require.True(t, ok)
require.Equal(t, "baz", login.Token())
remote, ok = remoteProvider.GetRemote("goo")
require.True(t, ok)
require.Equal(t, "goo", remote.Address())
_, ok = remote.Login()
require.False(t, ok)
_, ok = remoteProvider.GetRemote("hoo")
require.False(t, ok)
updatedRemoteProvider, err := remoteProvider.WithUpdatedRemote(
"foo",
"ban",
)
require.NoError(t, err)
remote, ok = remoteProvider.GetRemote("foo")
require.True(t, ok)
require.Equal(t, "foo", remote.Address())
login, ok = remote.Login()
require.True(t, ok)
require.Equal(t, "baz", login.Token())
remote, ok = updatedRemoteProvider.GetRemote("foo")
require.True(t, ok)
require.Equal(t, "foo", remote.Address())
login, ok = remote.Login()
require.True(t, ok)
require.Equal(t, "ban", login.Token())
deleteRemoteProvider, ok := remoteProvider.WithoutRemote("foo")
require.True(t, ok)
_, ok = deleteRemoteProvider.GetRemote("foo")
require.False(t, ok)
_, ok = deleteRemoteProvider.GetRemote("goo")
require.True(t, ok)
deleteRemoteProvider, ok = deleteRemoteProvider.WithoutRemote("foo")
require.False(t, ok)
_, ok = deleteRemoteProvider.GetRemote("foo")
require.False(t, ok)
_, ok = deleteRemoteProvider.GetRemote("goo")
require.True(t, ok)
outputExternalRemotes := remoteProvider.ToExternalRemotes()
require.Equal(
t,
[]ExternalRemote{
{
Address: "foo",
Login: ExternalLogin{
Token: "baz",
},
},
{
Address: "goo",
},
},
outputExternalRemotes,
)
}
|
package ebby
import (
"github.com/conest/ebby/game"
)
// Ebby : Ebby结构定义
type Ebby struct {
sceneMap game.SceneMap
fn game.ExFunctions
publicData interface{}
}
// New : 创建新的Ebby实例
func New(sceneMap game.SceneMap) *Ebby {
return &Ebby{
sceneMap: sceneMap,
fn: game.ExFunctions{},
}
}
// SetInitialFunc : 设定Game初始化函数,将在加载整个游戏时触发
func (e *Ebby) SetInitialFunc(fn func(*game.Game)) {
e.fn.Ini = fn
}
// SetBeforeExitFunc : 设定Game结束函数,将在游戏退出前
func (e *Ebby) SetBeforeExitFunc(fn func(*game.Game)) {
e.fn.Exi = fn
}
// SetPublicData : 设定公共数据,为Game的全局数据
func (e *Ebby) SetPublicData(i interface{}) {
e.publicData = i
}
// Run :
func (e *Ebby) Run() {
if e.fn.Ini == nil {
e.fn.Ini = defaultInitialFunc
}
if e.fn.Exi == nil {
e.fn.Exi = defaultBeforeExitFunc
}
game.Enter(e.sceneMap, &e.fn, e.publicData)
}
|
package parser
import (
"fmt"
"os"
"encoding/xml"
"nighthawk/elastic"
nhconfig "nighthawk/config"
nhs "nighthawk/nhstruct"
nhlog "nighthawk/log"
nhc "nighthawk/common"
)
func ParseDriverModules(caseinfo nhs.CaseInformation, auditinfo nhs.AuditType, auditfile string) {
MAX_RECORD := nhconfig.BulkPostSize()
xmlFile, err := os.Open(auditfile)
if err != nil {
nhlog.LogMessage("ParseDriverModules", "ERROR", fmt.Sprintf("Failed to read audit file %s", err.Error()))
os.Exit(nhc.ERROR_READING_AUDIT_FILE)
}
defer xmlFile.Close()
decoder := xml.NewDecoder(xmlFile)
count := 0
total := 0
var inElement string
var esrecords []nhs.RlRecord
for {
if count == MAX_RECORD {
elastic.ProcessOutput(caseinfo, auditinfo, esrecords)
esrecords = esrecords[:0]
count = 0
}
t,_ := decoder.Token()
if t == nil {
elastic.ProcessOutput(caseinfo, auditinfo, esrecords)
esrecords = esrecords[:0]
count++
break
}
switch se := t.(type) {
case xml.StartElement:
inElement = se.Name.Local
if inElement == "ModuleItem" {
var item nhs.ModuleItem
decoder.DecodeElement(&item, &se)
item.IsWhitelisted = false
var rlrec nhs.RlRecord
rlrec.ComputerName = caseinfo.ComputerName
rlrec.CaseInfo = caseinfo
rlrec.AuditType = auditinfo
rlrec.Record = item
esrecords = append(esrecords, rlrec)
count++
total++
}
} // __end_switch__
} // __end_for__
cmsg := fmt.Sprintf("Total DriverModules %d", total)
nhlog.LogMessage("ParseDriverModules", "INFO", cmsg)
} |
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacy
import (
"context"
"io"
"k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
)
// DockerLegacyService interface is used throughout `pkg/kubelet`.
// It used to live in the `pkg/kubelet/dockershim` package. While we
// would eventually like to remove it entirely, we need to give users some form
// of warning.
//
// By including the interface in
// `pkg/kubelet/legacy/logs.go`, we ensure the interface is
// available to `pkg/kubelet`, even when we are building with the `dockerless`
// tag (i.e. not compiling the dockershim).
// While the interface always exists, there will be no implementations of the
// interface when building with the `dockerless` tag. The lack of
// implementations should not be an issue, as we only expect `pkg/kubelet` code
// to need an implementation of the `DockerLegacyService` when we are using
// docker. If we are using docker, but building with the `dockerless` tag, than
// this will be just one of many things that breaks.
type DockerLegacyService interface {
// GetContainerLogs gets logs for a specific container.
GetContainerLogs(context.Context, *v1.Pod, kubecontainer.ContainerID, *v1.PodLogOptions, io.Writer, io.Writer) error
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
// supported by native CRI integration.
// TODO(resouer): remove this when deprecating unsupported log driver
IsCRISupportedLogDriver() (bool, error)
kuberuntime.LegacyLogProvider
}
|
// ˅
package main
// ˄
type Director struct {
// ˅
// ˄
builder Builder
// ˅
// ˄
}
func NewDirector(builder Builder) *Director {
// ˅
return &Director{builder}
// ˄
}
// Construct a document
func (self *Director) Build() {
// ˅
self.builder.CreateTitle("Greeting") // Title
self.builder.CreateSection("Morning and Afternoon") // Section
self.builder.CreateItems([]string{"Good morning.", "Hello."}) // Items
self.builder.CreateSection("Evening") // Other section
self.builder.CreateItems([]string{"Good evening.", "Good night.", "Goodbye."}) // Other items
self.builder.Close()
// ˄
}
// ˅
// ˄
|
package main
import (
"fmt"
"sync"
"time"
)
var wg sync.WaitGroup
func main() {
for i := 0; i < 3; i++ {
wg.Add(1)
go worker(i)
}
wg.Wait()
fmt.Println("all done")
}
func worker(i int) {
fmt.Println(i)
time.Sleep(time.Second * time.Duration(1))
wg.Done()
}
|
package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/hashicorp/errwrap"
)
// returns the system path were all
// timeglass related data is stored for
// this machine
func SystemTimeglassPath() (string, error) {
if runtime.GOOS == "windows" {
//@see http://blogs.msdn.com/b/patricka/archive/2010/03/18/where-should-i-store-my-data-and-configuration-files-if-i-target-multiple-os-versions.aspx
//win 7/vista
if path := os.Getenv("PROGRAMDATA"); path != "" {
return filepath.Join(path, "Timeglass"), nil
} else if path = os.Getenv("ALLUSERSPROFILE"); path != "" {
return filepath.Join(path, "Timeglass"), nil
}
return "", fmt.Errorf("Expected environmnet variable 'PROGRAMDATA' or 'ALLUSERPROFILE'")
} else if runtime.GOOS == "darwin" {
//osx we can actually create user specific services, and as such, store data for the user specifically
return filepath.Join("/", "Library", "Timeglass"), nil
} else if runtime.GOOS == "linux" {
return filepath.Join("/var/lib", "timeglass"), nil
}
return "", fmt.Errorf("Operating system is not yet supported")
}
func SystemTimeglassPathCreateIfNotExist() (string, error) {
path, err := SystemTimeglassPath()
if err != nil {
return "", err
}
err = os.MkdirAll(path, 0755)
if err != nil {
return "", errwrap.Wrapf(fmt.Sprintf("Failed to create Timeglass system dir '%s': {{err}}", path), err)
}
return path, nil
}
|
package main
import (
"bufio"
"fmt"
"os"
)
func Readfile(filename string) (string, error) {
file, err := os.Open(filename) // 1.파일열기
if err != nil {
return "", err // 2. 에러 나면 에러 반환
}
defer file.Close() // 3. 함수 종료 직전 파일 닫기
rd := bufio.NewReader(file) //4. 파일 내용 읽기. bufio.NewReader() 함수로 bufio.Reader 객체를 만든다
line, _ := rd.ReadString('\n') //bufio.Reader 객체는 구분자까지 문자열을 읽어오는 ReadString() 메서드를 가지고 있음. \n까지 읽음
return line, nil
}
func Writefile(filename string, line string) error {
file, err := os.Create(filename) //5. 파일 생성. os.Create는 첫 번째로 파일 핸들을, 두 번째로 에러를 반환
if err != nil { //6. 에러 나면 에러 반환
return err
}
defer file.Close()
_, err = fmt.Fprintln(file, line) //7. 파일에 문자열 쓰기. 파일 핸들에 문자열과 줄바꿈 문자 '\n'를 쓴다.
return err
}
const filename string = "data.txt"
func main() {
line, err := Readfile(filename) //8.파일 읽기 시도
if err != nil {
err = Writefile(filename, "This is Writefile") //9.파일 생성
if err != nil { //10. 에러를 처리
fmt.Println("Failed to create a file", err)
return
}
line, err = Readfile(filename) //11. 다시 읽기 시도
if err != nil {
fmt.Println("Failed to read a file")
return
}
}
fmt.Println("file context:", line)
}
|
package plumber
import (
"context"
"fmt"
"strings"
"github.com/batchcorp/plumber-schemas/build/go/protos"
"github.com/batchcorp/plumber-schemas/build/go/protos/common"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/mcuadros/go-lookup"
"github.com/pkg/errors"
"github.com/posthog/posthog-go"
)
func (p *Plumber) HandleGetConnectionCmd(ctx context.Context, client protos.PlumberServerClient) error {
p.EnqueueManage(posthog.Capture{
Event: "command_manage",
DistinctId: p.PersistentConfig.PlumberID,
Properties: map[string]interface{}{
"connection_id": p.CLIOptions.Manage.Get.Connection.Id,
"method": "get_connection",
},
})
resp, err := client.GetConnection(ctx, &protos.GetConnectionRequest{
Auth: &common.Auth{
Token: p.CLIOptions.Manage.GlobalOptions.ManageToken,
},
ConnectionId: p.CLIOptions.Manage.Get.Connection.Id,
})
if err != nil {
p.displayJSON(map[string]string{"error": err.Error()})
return nil
}
if err := p.displayProtobuf(resp); err != nil {
return errors.Wrap(err, "failed to display response")
}
return nil
}
func (p *Plumber) HandleGetAllConnectionsCmd(ctx context.Context, client protos.PlumberServerClient) error {
p.EnqueueManage(posthog.Capture{
Event: "command_manage",
DistinctId: p.PersistentConfig.PlumberID,
Properties: map[string]interface{}{
"method": "get_all_connections",
},
})
resp, err := client.GetAllConnections(ctx, &protos.GetAllConnectionsRequest{
Auth: &common.Auth{
Token: p.CLIOptions.Manage.GlobalOptions.ManageToken,
},
})
if err != nil {
p.displayJSON(map[string]string{"error": err.Error()})
return nil
}
if err := p.displayProtobuf(resp); err != nil {
return errors.Wrap(err, "failed to display response")
}
return nil
}
func (p *Plumber) HandleDeleteConnectionCmd(ctx context.Context, client protos.PlumberServerClient) error {
p.EnqueueManage(posthog.Capture{
Event: "command_manage",
DistinctId: p.PersistentConfig.PlumberID,
Properties: map[string]interface{}{
"method": "delete_connection",
"connection_id": p.CLIOptions.Manage.Delete.Connection.Id,
},
})
resp, err := client.DeleteConnection(ctx, &protos.DeleteConnectionRequest{
Auth: &common.Auth{
Token: p.CLIOptions.Manage.GlobalOptions.ManageToken,
},
ConnectionId: p.CLIOptions.Manage.Delete.Connection.Id,
})
if err != nil {
p.displayJSON(map[string]string{"error": err.Error()})
}
if err := p.displayProtobuf(resp); err != nil {
return errors.Wrap(err, "failed to display response")
}
return nil
}
func (p *Plumber) HandleCreateConnectionCmd(ctx context.Context, client protos.PlumberServerClient) error {
// Create conn from CLI options
connOpts, err := generateConnOptionsForManageCreate(p.CLIOptions)
if err != nil {
return errors.Wrap(err, "failed to generate connection options")
}
p.EnqueueManage(posthog.Capture{
Event: "command_manage",
DistinctId: p.PersistentConfig.PlumberID,
Properties: map[string]interface{}{
"backend": p.CLIOptions.Global.XBackend,
"method": "create_connection",
},
})
resp, err := client.CreateConnection(ctx, &protos.CreateConnectionRequest{
Auth: &common.Auth{
Token: p.CLIOptions.Manage.GlobalOptions.ManageToken,
},
Options: connOpts,
})
if err != nil {
p.displayJSON(map[string]string{"error": err.Error()})
return nil
}
p.displayProtobuf(resp)
return nil
}
func generateConnOptionsForManageCreate(cliOpts *opts.CLIOptions) (*opts.ConnectionOptions, error) {
connOpts := &opts.ConnectionOptions{}
if cliOpts.Manage.Create.Connection.Name != "" {
connOpts.Name = cliOpts.Manage.Create.Connection.Name
}
if cliOpts.Manage.Create.Connection.Notes != "" {
connOpts.Notes = cliOpts.Manage.Create.Connection.Notes
}
// We need to be able to generate a ConnectionOptions from the CLI options.
// We have backend-specific arguments in options, but we do not have them
// in the form of *ConnectionOptions.
//
// The following "cleverness" is done to dynamically generate such options
// from the CLI args.
// Some backends have a dash, remove it
backendName := strings.Replace(cliOpts.Global.XBackend, "-", "", -1)
// We are looking for the individual conn located at: cfg.$action.$backendName.XConn
lookupStrings := []string{"manage", "create", "connection", backendName}
backendInterface, err := lookup.LookupI(cliOpts, lookupStrings...)
if err != nil {
return nil, fmt.Errorf("unable to lookup connection info for backendName '%s': %s",
cliOpts.Global.XBackend, err)
}
conn, ok := opts.GenerateConnOpts(backendName, backendInterface.Interface())
if !ok {
return nil, errors.New("unable to generate connection options via proto func")
}
connOpts.Conn = conn
return connOpts, nil
}
|
package service
import (
"strings"
"tesou.io/platform/brush-parent/brush-api/common/base"
"tesou.io/platform/brush-parent/brush-api/module/odds/pojo"
"tesou.io/platform/brush-parent/brush-core/common/base/service/mysql"
)
type AsiaTrackService struct {
mysql.BaseService
}
func (this *AsiaTrackService) Exist(v *pojo.AsiaTrack) (string, bool) {
temp := &pojo.AsiaTrack{MatchId: v.MatchId, CompId: v.CompId, OddDate: v.OddDate}
var id string
exist, err := mysql.GetEngine().Get(temp)
if err != nil {
base.Log.Error("Exist:", err)
}
if exist {
id = temp.Id
}
return id, exist
}
//根据比赛ID查找亚赔
func (this *AsiaTrackService) FindByMatchId(matchId string) []*pojo.AsiaTrack {
dataList := make([]*pojo.AsiaTrack, 0)
err := mysql.GetEngine().Where(" MatchId = ? ", matchId).Find(dataList)
if err != nil {
base.Log.Error("FindByMatchId:", err)
}
return dataList
}
//根据比赛ID和波菜公司ID查找亚赔
func (this *AsiaTrackService) FindByMatchIdCompId(matchId string, compIds ...string) []*pojo.AsiaTrack {
dataList := make([]*pojo.AsiaTrack, 0)
sql_build := strings.Builder{}
sql_build.WriteString(" MatchId = '" + matchId + "' AND CompId in ( '0' ")
for _, v := range compIds {
sql_build.WriteString(" ,'")
sql_build.WriteString(v)
sql_build.WriteString("'")
}
sql_build.WriteString(")")
err := mysql.GetEngine().Where(sql_build.String()).Find(&dataList)
if err != nil {
base.Log.Error("FindByMatchIdCompId:", err)
}
return dataList
}
|
package dcrlibwallet
import (
"context"
"net"
"strings"
"sync"
"github.com/decred/dcrd/addrmgr"
"github.com/decred/dcrd/rpcclient"
"github.com/decred/dcrwallet/chain"
"github.com/decred/dcrwallet/errors"
"github.com/decred/dcrwallet/p2p"
"github.com/decred/dcrwallet/spv"
"github.com/decred/dcrwallet/wallet"
)
type syncData struct {
mu sync.Mutex
rpcClient *chain.RPCClient
syncProgressListeners map[string]SyncProgressListener
showLogs bool
syncing bool
cancelSync context.CancelFunc
rescanning bool
cancelRescan context.CancelFunc
connectedPeers int32
peersWG sync.WaitGroup
*activeSyncData
}
type activeSyncData struct {
targetTimePerBlock int32
syncStage int32
headersFetchProgress HeadersFetchProgressReport
addressDiscoveryProgress AddressDiscoveryProgressReport
headersRescanProgress HeadersRescanProgressReport
beginFetchTimeStamp int64
totalFetchedHeadersCount int32
startHeaderHeight int32
headersFetchTimeSpent int64
addressDiscoveryStartTime int64
totalDiscoveryTimeSpent int64
addressDiscoveryCompleted chan bool
rescanStartTime int64
totalInactiveSeconds int64
}
type SyncErrorCode int32
const (
ErrorCodeUnexpectedError SyncErrorCode = iota
ErrorCodeDeadlineExceeded
)
const (
InvalidSyncStage = -1
HeadersFetchSyncStage = 0
AddressDiscoverySyncStage = 1
HeadersRescanSyncStage = 2
)
func (lw *LibWallet) initActiveSyncData() {
headersFetchProgress := HeadersFetchProgressReport{}
headersFetchProgress.GeneralSyncProgress = &GeneralSyncProgress{}
addressDiscoveryProgress := AddressDiscoveryProgressReport{}
addressDiscoveryProgress.GeneralSyncProgress = &GeneralSyncProgress{}
headersRescanProgress := HeadersRescanProgressReport{}
headersRescanProgress.GeneralSyncProgress = &GeneralSyncProgress{}
var targetTimePerBlock int32
if lw.activeNet.Name == "mainnet" {
targetTimePerBlock = MainNetTargetTimePerBlock
} else {
targetTimePerBlock = TestNetTargetTimePerBlock
}
lw.syncData.activeSyncData = &activeSyncData{
targetTimePerBlock: targetTimePerBlock,
syncStage: InvalidSyncStage,
headersFetchProgress: headersFetchProgress,
addressDiscoveryProgress: addressDiscoveryProgress,
headersRescanProgress: headersRescanProgress,
beginFetchTimeStamp: -1,
headersFetchTimeSpent: -1,
totalDiscoveryTimeSpent: -1,
}
}
func (lw *LibWallet) AddSyncProgressListener(syncProgressListener SyncProgressListener, uniqueIdentifier string) error {
_, k := lw.syncProgressListeners[uniqueIdentifier]
if k {
return errors.New(ErrListenerAlreadyExist)
}
lw.syncProgressListeners[uniqueIdentifier] = syncProgressListener
return nil
}
func (lw *LibWallet) RemoveSyncProgressListener(uniqueIdentifier string) {
_, k := lw.syncProgressListeners[uniqueIdentifier]
if k {
delete(lw.syncProgressListeners, uniqueIdentifier)
}
}
func (lw *LibWallet) EnableSyncLogs() {
lw.syncData.showLogs = true
}
func (lw *LibWallet) SyncInactiveForPeriod(totalInactiveSeconds int64) {
if !lw.syncing || lw.activeSyncData == nil {
log.Debug("Not accounting for inactive time, wallet is not syncing.")
return
}
lw.syncData.totalInactiveSeconds += totalInactiveSeconds
if lw.syncData.connectedPeers == 0 {
// assume it would take another 60 seconds to reconnect to peers
lw.syncData.totalInactiveSeconds += 60
}
}
func (lw *LibWallet) SpvSync(peerAddresses string) error {
loadedWallet, walletLoaded := lw.walletLoader.LoadedWallet()
if !walletLoaded {
return errors.New(ErrWalletNotLoaded)
}
// Error if the wallet is already syncing with the network.
currentNetworkBackend, _ := loadedWallet.NetworkBackend()
if currentNetworkBackend != nil {
return errors.New(ErrSyncAlreadyInProgress)
}
addr := &net.TCPAddr{IP: net.ParseIP("::1"), Port: 0}
addrManager := addrmgr.New(lw.walletDataDir, net.LookupIP) // TODO: be mindful of tor
lp := p2p.NewLocalPeer(loadedWallet.ChainParams(), addr, addrManager)
var validPeerAddresses []string
if peerAddresses != "" {
addresses := strings.Split(peerAddresses, ";")
for _, address := range addresses {
peerAddress, err := NormalizeAddress(address, lw.activeNet.Params.DefaultPort)
if err != nil {
log.Errorf("SPV peer address invalid: %v", err)
} else {
validPeerAddresses = append(validPeerAddresses, peerAddress)
}
}
if len(validPeerAddresses) == 0 {
return errors.New(ErrInvalidPeers)
}
}
// init activeSyncData to be used to hold data used
// to calculate sync estimates only during sync
lw.initActiveSyncData()
syncer := spv.NewSyncer(loadedWallet, lp)
syncer.SetNotifications(lw.spvSyncNotificationCallbacks())
if len(validPeerAddresses) > 0 {
syncer.SetPersistantPeers(validPeerAddresses)
}
loadedWallet.SetNetworkBackend(syncer)
lw.walletLoader.SetNetworkBackend(syncer)
ctx, cancel := contextWithShutdownCancel(context.Background())
lw.cancelSync = cancel
// syncer.Run uses a wait group to block the thread until sync completes or an error occurs
go func() {
lw.syncing = true
defer func() {
lw.syncing = false
}()
err := syncer.Run(ctx)
if err != nil {
if err == context.Canceled {
lw.notifySyncCanceled()
} else if err == context.DeadlineExceeded {
lw.notifySyncError(ErrorCodeDeadlineExceeded, errors.E("SPV synchronization deadline exceeded: %v", err))
} else {
lw.notifySyncError(ErrorCodeUnexpectedError, err)
}
}
}()
return nil
}
func (lw *LibWallet) RpcSync(networkAddress string, username string, password string, cert []byte) error {
loadedWallet, walletLoaded := lw.walletLoader.LoadedWallet()
if !walletLoaded {
return errors.New(ErrWalletNotLoaded)
}
// Error if the wallet is already syncing with the network.
currentNetworkBackend, _ := loadedWallet.NetworkBackend()
if currentNetworkBackend != nil {
return errors.New(ErrSyncAlreadyInProgress)
}
ctx, cancel := contextWithShutdownCancel(context.Background())
lw.cancelSync = cancel
chainClient, err := lw.connectToRpcClient(ctx, networkAddress, username, password, cert)
if err != nil {
return err
}
// init activeSyncData to be used to hold data used
// to calculate sync estimates only during sync
lw.initActiveSyncData()
syncer := chain.NewRPCSyncer(loadedWallet, chainClient)
syncer.SetNotifications(lw.generalSyncNotificationCallbacks())
networkBackend := chain.BackendFromRPCClient(chainClient.Client)
lw.walletLoader.SetNetworkBackend(networkBackend)
loadedWallet.SetNetworkBackend(networkBackend)
// notify sync progress listeners that connected peer count will not be reported because we're using rpc
for _, syncProgressListener := range lw.syncProgressListeners {
syncProgressListener.OnPeerConnectedOrDisconnected(-1)
}
// syncer.Run uses a wait group to block the thread until sync completes or an error occurs
go func() {
lw.syncing = true
defer func() {
lw.syncing = false
}()
err := syncer.Run(ctx, true)
if err != nil {
if err == context.Canceled {
lw.notifySyncCanceled()
} else if err == context.DeadlineExceeded {
lw.notifySyncError(ErrorCodeDeadlineExceeded, errors.E("RPC synchronization deadline exceeded: %v", err))
} else {
lw.notifySyncError(ErrorCodeUnexpectedError, err)
}
}
}()
return nil
}
func (lw *LibWallet) connectToRpcClient(ctx context.Context, networkAddress string, username string, password string,
cert []byte) (chainClient *chain.RPCClient, err error) {
lw.mu.Lock()
chainClient = lw.rpcClient
lw.mu.Unlock()
// If the rpcClient is already set, you can just use that instead of attempting a new connection.
if chainClient != nil {
return
}
// rpcClient is not already set, attempt a new connection.
networkAddress, err = NormalizeAddress(networkAddress, lw.activeNet.JSONRPCClientPort)
if err != nil {
return nil, errors.New(ErrInvalidAddress)
}
chainClient, err = chain.NewRPCClient(lw.activeNet.Params, networkAddress, username, password, cert, len(cert) == 0)
if err != nil {
return nil, translateError(err)
}
err = chainClient.Start(ctx, false)
if err != nil {
if err == rpcclient.ErrInvalidAuth {
return nil, errors.New(ErrInvalid)
}
if errors.Match(errors.E(context.Canceled), err) {
return nil, errors.New(ErrContextCanceled)
}
return nil, errors.New(ErrUnavailable)
}
// Set rpcClient so it can be used subsequently without re-connecting to the rpc server.
lw.mu.Lock()
lw.rpcClient = chainClient
lw.mu.Unlock()
return
}
func (lw *LibWallet) CancelSync(losePeers bool) {
if lw.cancelSync != nil {
lw.cancelSync() // will trigger context canceled in rpcSync or spvSync
lw.cancelSync = nil
}
loadedWallet, walletLoaded := lw.walletLoader.LoadedWallet()
if !walletLoaded {
return
}
lw.walletLoader.SetNetworkBackend(nil)
loadedWallet.SetNetworkBackend(nil)
// It's important to wait to lose all peers when canceling sync
// if the wallet database would be closed after canceling sync.
if losePeers {
log.Info("Waiting to lose all peers")
lw.syncData.peersWG.Wait()
log.Info("All peers are gone")
}
}
func (lw *LibWallet) IsSyncing() bool {
return lw.syncData.syncing
}
func (lw *LibWallet) RescanBlocks() error {
netBackend, err := lw.wallet.NetworkBackend()
if err != nil {
return errors.E(ErrNotConnected)
}
if lw.rescanning {
return errors.E(ErrInvalid)
}
go func() {
defer func() {
lw.rescanning = false
}()
lw.rescanning = true
progress := make(chan wallet.RescanProgress, 1)
ctx, cancel := contextWithShutdownCancel(context.Background())
lw.syncData.cancelRescan = cancel
var totalHeightRescanned int32
go lw.wallet.RescanProgressFromHeight(ctx, netBackend, 0, progress)
for p := range progress {
if p.Err != nil {
log.Error(p.Err)
return
}
totalHeightRescanned += p.ScannedThrough
report := &HeadersRescanProgressReport{
CurrentRescanHeight: totalHeightRescanned,
TotalHeadersToScan: lw.GetBestBlock(),
}
for _, syncProgressListener := range lw.syncProgressListeners {
syncProgressListener.OnHeadersRescanProgress(report)
}
select {
case <-ctx.Done():
log.Info("Rescan cancelled through context")
lw.syncData.cancelRescan = nil
return
default:
continue
}
}
// set this to nil, it no longer need since rescan has completed
lw.syncData.cancelRescan = nil
// Send final report after rescan has completed.
report := &HeadersRescanProgressReport{
CurrentRescanHeight: totalHeightRescanned,
TotalHeadersToScan: lw.GetBestBlock(),
}
for _, syncProgressListener := range lw.syncProgressListeners {
syncProgressListener.OnHeadersRescanProgress(report)
}
}()
return nil
}
func (lw *LibWallet) CancelRescan() {
if lw.syncData.cancelRescan != nil {
lw.syncData.cancelRescan()
lw.syncData.cancelRescan = nil
}
}
func (lw *LibWallet) IsScanning() bool {
return lw.syncData.rescanning
}
func (lw *LibWallet) PublishLastSyncProgress() {
if lw.activeSyncData == nil {
return
}
switch lw.activeSyncData.syncStage {
case HeadersFetchSyncStage:
lw.publishFetchHeadersProgress()
case AddressDiscoverySyncStage:
lw.publishAddressDiscoveryProgress()
case HeadersRescanSyncStage:
lw.publishHeadersRescanProgress()
}
}
func (lw *LibWallet) GetBestBlock() int32 {
_, height := lw.wallet.MainChainTip()
return height
}
func (lw *LibWallet) GetBestBlockTimeStamp() int64 {
_, height := lw.wallet.MainChainTip()
identifier := wallet.NewBlockIdentifierFromHeight(height)
info, err := lw.wallet.BlockInfo(identifier)
if err != nil {
log.Error(err)
return 0
}
return info.Timestamp
}
|
package controller
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
"walletApi/src/model"
"github.com/astaxie/beego"
)
type QuestionController struct {
beego.Controller
}
// @router /Question/InitList/ [get]
func (c *QuestionController) InitList() {
c.TplName = "questionlist.html"
}
// @router /Question/InitAdd/ [get]
func (c *QuestionController) InitAdd() {
idStr := c.GetString("id")
if idStr != "" {
id, _ := strconv.ParseInt(idStr, 10, 64)
question, _ := model.GetQuestionInfo(id)
if question != nil {
c.Data["question"] = question
if question.ImgName != "" {
arr := strings.Split(question.ImgName, ",")
fileMap := make(map[string]string, 0)
for i, v := range arr {
//一种是 ascii 字符,另一种为本地编码 (如:utf8) 的字符
//fmt.Println("Hello, 世界", len("世界"), utf8.RuneCountInString("世界"))
len := utf8.RuneCountInString(question.BaseUrl)
r := question.BaseUrl[len-1 : len]
if r == "/" {
fileMap[fmt.Sprintf("附件%d", i+1)] = question.BaseUrl + v
} else {
fileMap[fmt.Sprintf("附件%d", i+1)] = question.BaseUrl + "/" + v
}
}
c.Data["fileMap"] = fileMap
}
}
}
c.TplName = "questioninfo.html"
}
// @router /Question/List/ [post]
func (c *QuestionController) List() {
keyWord := c.GetString("keyWord")
pageNo, _ := c.GetInt("current")
rowCount, _ := c.GetInt("rowCount")
if pageNo == 0 {
pageNo = 1
}
resultMap := model.SearchQuestions(rowCount, pageNo, keyWord)
c.Data["json"] = map[string]interface{}{"rows": resultMap["data"], "rowCount": rowCount, "current": pageNo, "total": resultMap["total"]}
c.ServeJSON()
}
// @router /Question/DeleteQuestion/:id [get]
func (c *QuestionController) DeleteQuestion() {
result := new(model.Result)
id, _ := strconv.ParseInt(c.Ctx.Input.Param(":id"), 10, 64)
err := model.DeleteQuestion(id)
if err != nil {
result.Status = 1
result.Msg = fmt.Sprintf("删除失败,%s", err.Error())
} else {
result.Status = 0
}
c.Data["json"] = result
c.ServeJSON()
}
|
package main
import (
"bufio"
"fmt"
"net"
"os"
)
func main() {
conn, err := net.Dial("tcp", "localhost:20000")
if err != nil {
fmt.Println("tcpClient connect failed, err:", err)
}
defer conn.Close()
reader := bufio.NewReader(os.Stdin)
for {
msg, err := reader.ReadString('\n')
if err != nil {
fmt.Println("tcpClient reader failed, err:", err)
break
}
_, err = conn.Write([]byte(msg))
if err != nil {
fmt.Println("tcpClient Write failed, err:", err)
break
}
}
}
|
package mockingjay
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
)
const debugModeOff = false
type mjLogger interface {
Println(...interface{})
}
// Server allows you to configure a HTTP server for a slice of fake endpoints
type Server struct {
Endpoints []FakeEndpoint
requests []Request
requestMatcher func(a, b Request, endpointName string) bool
logger mjLogger
}
// NewServer creates a new Server instance
func NewServer(endpoints []FakeEndpoint, debugMode bool) *Server {
s := new(Server)
s.Endpoints = endpoints
s.requests = make([]Request, 0)
if debugMode {
s.logger = log.New(os.Stdout, "mocking-jay", log.Ldate|log.Ltime)
} else {
s.logger = log.New(ioutil.Discard, "mocking-jay", log.Ldate|log.Ltime)
}
s.requestMatcher = func(a, b Request, endpointName string) bool {
return requestMatches(a, b, endpointName, s.logger)
}
return s
}
const requestsURL = "/requests"
const endpointsURL = "/mj-endpoints"
const newEndpointURL = "/mj-new-endpoint"
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case endpointsURL:
s.serveEndpoints(w)
case newEndpointURL:
s.createEndpoint(w, r)
case requestsURL:
s.listAvailableRequests(w)
default:
mjRequest := NewRequest(r)
s.logger.Println("Trying to match a request")
s.logger.Println(mjRequest.String())
s.requests = append(s.requests, mjRequest)
writeToHTTP(s.getResponse(mjRequest), w)
}
}
func (s *Server) listAvailableRequests(w http.ResponseWriter) {
payload, err := json.Marshal(s.requests)
if err != nil {
log.Println(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
w.WriteHeader(http.StatusOK)
w.Write(payload)
}
}
func (s *Server) getResponse(r Request) *response {
for _, endpoint := range s.Endpoints {
if s.requestMatcher(endpoint.Request, r, endpoint.Name) {
return &endpoint.Response
}
}
return newNotFound(r, s.Endpoints)
}
func (s *Server) serveEndpoints(w http.ResponseWriter) {
endpointsBody, err := json.Marshal(s.Endpoints)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.Write(endpointsBody)
w.Header().Set("Content-Type", "application/json")
}
func (s *Server) createEndpoint(w http.ResponseWriter, r *http.Request) {
var newEndpoint FakeEndpoint
defer r.Body.Close()
endpointBody, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
err = json.Unmarshal(endpointBody, &newEndpoint)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
s.Endpoints = append(s.Endpoints, newEndpoint)
w.WriteHeader(http.StatusCreated)
}
|
package benchmark
import (
"context"
"database/sql"
"fmt"
"testing"
"github.com/go-gorp/gorp"
"github.com/jinzhu/gorm"
"github.com/jmoiron/sqlx"
"github.com/ulule/loukoum/v3"
"xorm.io/xorm"
"github.com/ulule/makroud"
"github.com/ulule/makroud-benchmarks/mimic"
)
func BenchmarkMakroud_SelectAll(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
node, err := makroud.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
driver, err := makroud.New(makroud.WithNode(node))
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := loukoum.Select("*").From("jets")
b.Run("makroud", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetMakroud
err = makroud.Exec(ctx, driver, query, &store)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkSQLX_SelectAll(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
dbx, err := sqlx.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := "SELECT * FROM jets"
args := []interface{}{}
b.Run("sqlx", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetSQLX
stmt, err := dbx.PreparexContext(ctx, query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
err = stmt.SelectContext(ctx, &store, args...)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORM_SelectAll(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
gormdb, err := gorm.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("gorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorm
err := gormdb.Find(&store).Error
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORP_SelectAll(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
db, err := sql.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
gorpdb := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
if err != nil {
b.Fatal(err)
}
b.Run("gorp", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorp
_, err = gorpdb.Select(&store, "SELECT * FROM jets")
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkXORM_SelectAll(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
xormdb, err := xorm.NewEngine("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("xorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetXorm
err = xormdb.Find(&store)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkMakroud_SelectSubset(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
node, err := makroud.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
driver, err := makroud.New(makroud.WithNode(node))
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := loukoum.Select("id", "name", "color", "uuid", "identifier", "cargo", "manifest").From("jets")
b.Run("makroud", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetMakroud
err = makroud.Exec(ctx, driver, query, &store)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkSQLX_SelectSubset(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
dbx, err := sqlx.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := "SELECT id, name, color, uuid, identifier, cargo, manifest FROM jets"
args := []interface{}{}
b.Run("sqlx", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetSQLX
stmt, err := dbx.PreparexContext(ctx, query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
err = stmt.SelectContext(ctx, &store, args...)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORM_SelectSubset(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
gormdb, err := gorm.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("gorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorm
err := gormdb.Select("id, name, color, uuid, identifier, cargo, manifest").Find(&store).Error
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORP_SelectSubset(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
db, err := sql.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
gorpdb := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
if err != nil {
b.Fatal(err)
}
b.Run("gorp", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorp
_, err = gorpdb.Select(&store, "SELECT id, name, color, uuid, identifier, cargo, manifest FROM jets")
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkXORM_SelectSubset(b *testing.B) {
exec := jetExecSelect()
dsn := mimic.NewQuery(exec)
xormdb, err := xorm.NewEngine("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("xorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetXorm
err = xormdb.Select("id, name, color, uuid, identifier, cargo, manifest").Find(&store)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkMakroud_SelectComplex(b *testing.B) {
exec := jetExecSelect()
exec.NumInput = -1
dsn := mimic.NewQuery(exec)
node, err := makroud.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
driver, err := makroud.New(makroud.WithNode(node))
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := loukoum.
Select("id", "name", "color", "uuid", "identifier", "cargo", "manifest").
From("jets").
Where(loukoum.Condition("id").GreaterThan(1)).
Where(loukoum.Condition("name").NotEqual("thing")).
Limit(1).
Offset(1).
GroupBy("id")
b.Run("makroud", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetMakroud
err = makroud.Exec(ctx, driver, query, &store)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkSQLX_SelectComplex(b *testing.B) {
exec := jetExecSelect()
exec.NumInput = -1
dsn := mimic.NewQuery(exec)
dbx, err := sqlx.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := fmt.Sprint(
"SELECT id, name, color, uuid, identifier, cargo, manifest FROM jets ",
"WHERE id > :id AND name != :name GROUP BY id OFFSET 1 LIMIT 1",
)
args := []interface{}{1, "thing"}
b.Run("sqlx", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetSQLX
stmt, err := dbx.PreparexContext(ctx, query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
err = stmt.SelectContext(ctx, &store, args...)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORM_SelectComplex(b *testing.B) {
exec := jetExecSelect()
exec.NumInput = -1
dsn := mimic.NewQuery(exec)
gormdb, err := gorm.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("gorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorm
err = gormdb.Where("id > ?", 1).
Where("name <> ?", "thing").
Limit(1).
Group("id").
Offset(1).
Select("id, name, color, uuid, identifier, cargo, manifest").
Find(&store).Error
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORP_SelectComplex(b *testing.B) {
exec := jetExecSelect()
exec.NumInput = -1
dsn := mimic.NewQuery(exec)
db, err := sql.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
gorpdb := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
if err != nil {
b.Fatal(err)
}
query := fmt.Sprint(
"SELECT id, name, color, uuid, identifier, cargo, manifest FROM jets ",
"WHERE id > $1 AND name != $2 GROUP BY id OFFSET $3 LIMIT $4",
)
b.Run("gorp", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetGorp
_, err = gorpdb.Select(&store, query, 1, "thing", 1, 1)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkXORM_SelectComplex(b *testing.B) {
exec := jetExecSelect()
exec.NumInput = -1
dsn := mimic.NewQuery(exec)
xormdb, err := xorm.NewEngine("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("xorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var store []JetXorm
err = xormdb.
Select("id, name, color, uuid, identifier, cargo, manifest").
Where("id > ?", 1).
Where("name <> ?", "thing").
Limit(1, 1).
GroupBy("id").
Find(&store)
if err != nil {
b.Fatal(err)
}
}
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.