text stringlengths 11 4.05M |
|---|
package model
type Order struct {
UserId int64 `json:"user_id"`
OrderGoods []CartGoods `json:"goods"`
TotalPrice float64 `json:"total_price"`
Consignee string `json:"consignee"`
Mobile int64 `json:"mobile"`
Province string `json:"province"`
City string `json:"city"`
District string `json:"district"`
Detail string `json:"detail"`
OrderId int64 `json:"order_id"`
PayStatus int `json:"pay_status"`
PayPrice float64 `json:"pay_price"`
CreatedAt string `json:"create_at"`
UpdatedAt string `json:"updated_at"`
NonceStr string `json:"nonce_str"`
SignType string `json:"sign_type"`
Openid string `json:"openid"`
IsSubscribe string `json:"is_subscribe"`
TradeType string `json:"trade_type"`
BankType string `json:"bank_type"`
TransactionId string `json:"transaction_id"`
PayTimeEnd string `json:"pay_time_end"`
OrderStatus int `json:"order_status"`
}
type CreateOrder struct {
CartId int64 `json:"cart_id"`
AddressId int64 `json:"address_id"`
}
type PayOrder struct {
OrderId int64 `json:"order_id"`
UserId int64 `json:"user_id"`
}
type OrderList struct {
UserId int64 `json:"user_id"`
PayStatus int `json:"pay_status"`
OrderStatus int `json:"order_status"`
Page Page
}
type Page struct {
PageSize int `json:"page_size"`
PageNum int `json:"page_num"`
}
|
package url
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestFnPort(t *testing.T) {
f := &fnPort{}
input := "https://subdomain.example.com:8080/path?q=hello world#fragment with space"
v, err := f.Eval(input)
assert.Nil(t, err)
assert.Equal(t, "8080", v)
}
|
package connection
import (
jsoniter "github.com/json-iterator/go"
)
var json = jsoniter.ConfigFastest
|
package mailchimp_test
import (
"context"
"net/url"
"os"
"testing"
"github.com/spotlightpa/almanack/internal/mailchimp"
)
func TestV3(t *testing.T) {
apiKey := os.Getenv("ALMANACK_MC_TEST_API_KEY")
listID := os.Getenv("ALMANACK_MC_TEST_LISTID")
if apiKey == "" || listID == "" {
t.Skip("Missing MailChimp ENV vars")
}
v3 := mailchimp.NewV3(apiKey, listID, nil)
res, err := v3.ListCampaigns(context.Background())
if err != nil {
t.Fatalf("err != nil: %v", err)
}
if len(res.Campaigns) == 0 {
t.Fatal("no campaigns found")
}
for _, c := range res.Campaigns {
if _, err := url.Parse(c.ArchiveURL); c.ArchiveURL == "" || err != nil {
t.Errorf("received bad archive URL: %q", c.ArchiveURL)
}
if c.SentAt.IsZero() {
t.Errorf("missing send time for %q", c.ArchiveURL)
}
if c.Settings.Subject == "" {
t.Errorf("missing subject line for %q", c.ArchiveURL)
}
if c.Settings.Title == "" {
t.Errorf("missing title for %q", c.ArchiveURL)
}
}
}
|
package server
import (
"fmt"
"log"
"net/http"
"runtime/debug"
"text/template"
"github.com/Gigamons/common/logger"
"github.com/gorilla/mux"
)
func errHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
fmt.Println("------------ERROR------------")
fmt.Println(err)
fmt.Println("---------ERROR TRACE---------")
fmt.Println(string(debug.Stack()))
fmt.Println("----------END ERROR----------")
}
}()
next.ServeHTTP(w, r)
})
}
// StartServer to start the HTTP Server.
func StartServer(host string, port int16) {
r := mux.NewRouter()
r.Use(errHandler)
logger.Info(" Serverlist is listening on port %v", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%v", host, port), r))
}
func render(w http.ResponseWriter, file string) {
tmpl, err := template.ParseFiles("templates/" + file)
if err != nil {
panic(err)
}
tmpl.Execute(w, tmpl)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package sync2
import (
"context"
"time"
"storj.io/common/time2"
)
// Sleep implements sleeping with cancellation.
func Sleep(ctx context.Context, duration time.Duration) bool {
return time2.Sleep(ctx, duration)
}
|
package database
import (
"log"
"math/rand"
"strconv"
utils "banners.utils"
_ "github.com/denisenkom/go-mssqldb"
"github.com/jmoiron/sqlx"
)
func Seed() {
db, err := sqlx.Open("sqlserver", ConnectionString)
if err != nil {
log.Fatal(err)
} else {
InitializeTables(*db)
}
defer db.Close()
}
func InitializeTables(connection sqlx.DB) {
utils.ExecuteQuery(DestroyBannerTable, connection)
utils.ExecuteQuery(DestroyZonesTable, connection)
utils.ExecuteQuery(BannersSchema, connection)
utils.ExecuteQuery(ZonesSchema, connection)
utils.ExecuteQuery(StoredProcedureQuery, connection)
PopulateZones(connection)
PopulateBanners(connection)
}
func PopulateBanners(connection sqlx.DB) {
for i := 0; i < 20; i++ {
id := strconv.Itoa(i)
zoneId := strconv.Itoa(rand.Intn(6))
var s string
s = "INSERT INTO [dbo].[banners]([zoneid], [id],[date],[background_color],[background_image],[button_text],[button_color],[button_background],[title],[description],[text_align],[link],[link_isExternal])VALUES(" + zoneId + ", " + id + ",'2019-09-13T08:00:00Z','bg_col', 'bg_img', 'btn_txt', 'btn_col', 'btn_bg', 'title', 'description', 'left', 'link', 'true')"
utils.ExecuteQuery(s, connection)
}
}
func PopulateZones(connection sqlx.DB) {
var s string
s = "INSERT INTO [dbo].[zones]([id],[deviceId],[pageId],[languageCode],[width],[height])VALUES(1, 1, 1, 'UK', 1920, 400)"
utils.ExecuteQuery(s, connection)
s = "INSERT INTO [dbo].[zones]([id],[deviceId],[pageId],[languageCode],[width],[height])VALUES(2, 1, 1, 'IT', 1920, 400)"
utils.ExecuteQuery(s, connection)
s = "INSERT INTO [dbo].[zones]([id],[deviceId],[pageId],[languageCode],[width],[height])VALUES(3, 2, 1, 'IT', 1920, 400)"
utils.ExecuteQuery(s, connection)
s = "INSERT INTO [dbo].[zones]([id],[deviceId],[pageId],[languageCode],[width],[height])VALUES(4, 1, 2, 'UK', 1920, 400)"
utils.ExecuteQuery(s, connection)
s = "INSERT INTO [dbo].[zones]([id],[deviceId],[pageId],[languageCode],[width],[height])VALUES(5, 1, 2, 'IT', 1920, 400)"
utils.ExecuteQuery(s, connection)
}
|
package generator
import (
"fmt"
"os"
"time"
"github.com/RomanosTrechlis/blog-generator/config"
"github.com/beevik/etree"
)
// rssGenerator object
type rssGenerator struct {
posts []*post
destination string
siteInfo *config.SiteInformation
}
const rssDateFormat = "02 Jan 2006 15:04 -0700"
// Generate creates an RSS feed
func (g *rssGenerator) Generate() (err error) {
fmt.Println("\tGenerating RSS...")
posts := g.posts
destination := g.destination
doc := etree.NewDocument()
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
rss := doc.CreateElement("rss")
rss.CreateAttr("xmlns:atom", "http://www.w3.org/2005/Atom")
rss.CreateAttr("version", "2.0")
channel := rss.CreateElement("channel")
siteInfo := g.siteInfo
channel.CreateElement("title").SetText(siteInfo.BlogTitle)
channel.CreateElement("link").SetText(siteInfo.BlogURL)
channel.CreateElement("language").SetText(siteInfo.BlogLanguage)
channel.CreateElement("description").SetText(siteInfo.BlogDescription)
channel.CreateElement("lastBuildDate").SetText(time.Now().Format(rssDateFormat))
atomLink := channel.CreateElement("atom:link")
atomLink.CreateAttr("href", fmt.Sprintf("%s/index.xml", siteInfo.BlogURL))
atomLink.CreateAttr("rel", "self")
atomLink.CreateAttr("type", "application/rss+xml")
for _, post := range posts {
err := g.addItem(channel, post)
if err != nil {
return err
}
}
filePath := fmt.Sprintf("%s/index.xml", destination)
f, err := os.Create(filePath)
if err != nil {
return fmt.Errorf("error creating file %s: %v", filePath, err)
}
f.Close()
err = doc.WriteToFile(filePath)
if err != nil {
return fmt.Errorf("error writing to file %s: %v", filePath, err)
}
fmt.Println("\tFinished generating RSS...")
return nil
}
func (g *rssGenerator) addItem(element *etree.Element, post *post) (err error) {
path := fmt.Sprintf("%s/%s/", g.siteInfo.BlogURL, post.name[1:])
meta := post.meta
item := element.CreateElement("item")
item.CreateElement("title").SetText(meta.Title)
item.CreateElement("link").SetText(path)
item.CreateElement("guid").SetText(path)
pubDate, err := time.Parse(g.siteInfo.DateFormat, meta.Date)
if err != nil {
return fmt.Errorf("error parsing date %s: %v", meta.Date, err)
}
item.CreateElement("pubDate").SetText(pubDate.Format(rssDateFormat))
item.CreateElement("description").SetText(string(post.html))
return nil
}
|
package receiver
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/mariusler/filesender/config"
"github.com/mariusler/filesender/messages"
"github.com/mariusler/filesender/progressBar"
"github.com/mariusler/filesender/utility"
)
// Receiver calles when we are receivging a file
func Receiver() {
var msg messages.TransferInfo
conn := connectToServer()
defer conn.Close()
fmt.Println("Waiting for server")
sizeBuf := make([]byte, 32)
n, err := conn.Read(sizeBuf)
msgSize, _ := strconv.Atoi(string(sizeBuf[:n]))
if err != nil {
fmt.Println(err)
}
buf := make([]byte, msgSize)
_, copyErr := io.ReadFull(conn, buf)
if copyErr != nil {
fmt.Println(err)
}
jsonErr := json.Unmarshal(buf, &msg)
if jsonErr != nil {
fmt.Println(jsonErr)
}
printTransferInfo(msg)
fmt.Println("Enter y or yes to receive these files")
var input string
fmt.Scanln(&input)
input = strings.ToLower(input)
if input != "y" && input != "yes" {
fmt.Println("You decided not to receive the files")
return
}
createFolders(msg.Files)
conn.Write([]byte(input))
receiveFiles(msg, conn)
}
func receiveFiles(msg messages.TransferInfo, conn net.Conn) { // Use sizes to display progress
var progressInfo messages.ProgressInfo
var totalReceivedBytes int64
ticker := time.NewTicker(time.Millisecond * config.ProgressBarRefreshTime)
for ind, file := range msg.Files {
var receivedBytes int64
fileSize := msg.Sizes[ind]
f, err := os.Create(file)
if err != nil {
fmt.Println(err)
f.Close()
break
}
var n int64
var copyErr error
for {
select {
case <-ticker.C:
progressInfo.Progresses[0] = float32(totalReceivedBytes) / float32(msg.TotalSize) * 100
progressInfo.Progresses[1] = float32(receivedBytes) / float32(fileSize) * 100
progressInfo.Currentfile = msg.Files[ind]
go progressBar.PrintProgressBar(progressInfo)
default: // Skip if ticker is not out
}
if (fileSize - receivedBytes) < config.ChunkSize {
n, copyErr = io.CopyN(f, conn, (fileSize - receivedBytes)) // Onle read the remaining bytes, nothing more
receivedBytes += n
totalReceivedBytes += n
if copyErr != nil {
fmt.Println(err)
}
break
}
n, copyErr = io.CopyN(f, conn, config.ChunkSize)
receivedBytes += n
totalReceivedBytes += n
if copyErr != nil {
fmt.Println(copyErr)
break
}
}
f.Close()
}
time.Sleep(time.Millisecond)
progressInfo.Progresses[0] = float32(totalReceivedBytes) / float32(msg.TotalSize) * 100
progressInfo.Progresses[1] = float32(100)
progressBar.PrintProgressBar(progressInfo)
fmt.Println()
fmt.Println("Done")
}
func connectToServer() net.Conn {
var ip string
for {
fmt.Println("Type in the ip address")
fmt.Scanln(&ip)
var addr net.TCPAddr
addr.Port = config.Port
addr.IP = net.ParseIP(ip)
var counter = 0
for {
conn, err := net.DialTCP("tcp", nil, &addr)
if err != nil {
fmt.Println("Error connecting to server", err)
counter++
if counter == 10 {
fmt.Println("Timed out connecting")
break
}
time.Sleep(time.Second)
} else {
return conn
}
}
}
}
func printTransferInfo(msg messages.TransferInfo) {
fmt.Println("Listing all files to be received")
for ind := range msg.Files {
fmt.Print("File: ", msg.Files[ind], " Size: ")
utility.PrintBytesPrefix(msg.Sizes[ind])
fmt.Println()
}
fmt.Print("Total size: ")
utility.PrintBytesPrefix(msg.TotalSize)
fmt.Println()
}
func createFolders(files []string) {
for _, file := range files {
hierarchy := strings.Split(file, string(filepath.Separator))
if len(hierarchy) > 0 {
pathLength := len(file) - len(hierarchy[len(hierarchy)-1])
os.MkdirAll(file[:pathLength], os.ModePerm)
}
}
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package ownership provides utilities to run ownership API related tests.
package ownership
import (
"chromiumos/policy/chromium/policy/enterprise_management_proto"
lm "chromiumos/system_api/login_manager_proto"
)
// BuildTestSettings returns the ChromeDeviceSettingsProto instance which
// can be used for testing settings.
func BuildTestSettings(user string) *enterprise_management_proto.ChromeDeviceSettingsProto {
boolTrue := true
boolFalse := false
return &enterprise_management_proto.ChromeDeviceSettingsProto{
GuestModeEnabled: &enterprise_management_proto.GuestModeEnabledProto{
GuestModeEnabled: &boolFalse,
},
ShowUserNames: &enterprise_management_proto.ShowUserNamesOnSigninProto{
ShowUserNames: &boolTrue,
},
DataRoamingEnabled: &enterprise_management_proto.DataRoamingEnabledProto{
DataRoamingEnabled: &boolTrue,
},
AllowNewUsers: &enterprise_management_proto.AllowNewUsersProto{
AllowNewUsers: &boolFalse,
},
UserWhitelist: &enterprise_management_proto.UserWhitelistProto{
UserWhitelist: []string{user, "a@b.c"},
},
}
}
// UserPolicyDescriptor creates a PolicyDescriptor suitable for storing and
// retrieving user policy using session_manager's policy storage interface.
func UserPolicyDescriptor(accountID string) *lm.PolicyDescriptor {
accountType := lm.PolicyAccountType_ACCOUNT_TYPE_USER
domain := lm.PolicyDomain_POLICY_DOMAIN_CHROME
return &lm.PolicyDescriptor{
AccountType: &accountType,
AccountId: &accountID,
Domain: &domain,
}
}
|
package examples
import "context"
/*
* 支持自定义请求路径,每一个自定义路径都将进行匹配。
*/
// @HttpGet("/user")
// @HttpGet("/user/more")
// @HttpPost("/user")
// @HttpPost("/user/info")
func User5() {}
/*
* 路径模糊匹配也是支持的。被匹配上的参数,将存放在 context 中等待被获取。
* 该方法不会匹配 /user/
*
* 需要注意的是:如果有确定的路径被注册,将优先使用确定的路径。
* 例如:还有一个方法 UserAdmin,路径 `/user/admin` 被注册。
* 1. 请求 `/user/admin` 将请求到 UserAdmin 方法。
* 2. 请求 `/user/zhangsan` 将请求到 UserBlurry 方法。
*/
// @HttpPost("/user/{id}")
func UserBlurry(ctx context.Context) {
// id := ctx.Value("id") // id 是 string 类型
}
/*
* 路径模糊匹配不仅支持将参数存放在 context 中,还支持匹配方法中的参数并自动设置。
* 注意:参数自动匹配后,不再将参数放在 context 中
*/
// @HttpPost("/user/{id}")
func UserBlurry1(ctx context.Context, id string) {
// id1 := ctx.Value("id") // id1 将是 nil,无法取到值
}
/*
* 路径模糊匹配也可以自动匹配方法中的参数类型,并自动转换。
* 如果参数转换错误,将返回错误,不再请求方法。
*/
// @HttpPost("/user/{id}")
func UserBlurry2(ctx context.Context, id int64) {}
/*
* 程序也支持自动按文件路径进行路由注册。
* 扫描地址下所有非特定的公开方法都将被注册进路由,方法将改为小写,并以下划线分割。
* @NotRouter 将避开路由注册扫描。
*
* 例如项目结构如下:
* | ui/
* | servelet/
* | v1/
* | user/
* | create.go
*
* crete.go 中有一个函数:AutoRegist
*
* chero 扫描地址如下
* chero.Scan("./ui/servelet")
*
* 则会自动注册:
* GET /v1/user/auto_regist
* POST /v1/user/auto_regist
* PUT /v1/user/auto_regist
* PATCH /v1/user/auto_regist
* DELETE /v1/user/auto_regist
* OPTIONS /v1/user/auto_regist
*/
func AutoRegist() {}
|
package filesystem
import (
"io/ioutil"
"os"
"reflect"
"github.com/juntaki/transparent"
"github.com/juntaki/transparent/simple"
"github.com/pkg/errors"
)
// simpleStorage store file at directory, filename is key
type simpleStorage struct {
directory string
}
// NewSimpleStorage returns SimpleStorage
// SimpleStorage only accepts string key and []byte value.
func NewSimpleStorage(directory string) transparent.BackendStorage {
return &simpleStorage{
directory: directory + "/",
}
}
// NewStorage returns Storage
func NewStorage(directory string) transparent.BackendStorage {
return &simple.StorageWrapper{
BackendStorage: &simpleStorage{
directory: directory + "/",
}}
}
// Get is file read
func (f *simpleStorage) Get(k interface{}) (interface{}, error) {
filename, err := f.validateKey(k)
if err != nil {
return nil, err
}
data, cause := ioutil.ReadFile(f.directory + filename)
if cause != nil {
if os.IsNotExist(cause) {
return nil, &transparent.KeyNotFoundError{Key: filename}
}
return nil, errors.Wrapf(cause, "failed to read file. filename = %s", filename)
}
return data, nil
}
// Add is file write
func (f *simpleStorage) Add(k interface{}, v interface{}) error {
filename, err := f.validateKey(k)
if err != nil {
return err
}
data, err := f.validateValue(v)
if err != nil {
return err
}
cause := ioutil.WriteFile(f.directory+filename, data, 0600)
if cause != nil {
return errors.Wrapf(cause, "failed to write file. filename = %s", filename)
}
return nil
}
// Remove is file unlink
func (f *simpleStorage) Remove(k interface{}) error {
filename, err := f.validateKey(k)
if err != nil {
return err
}
cause := os.Remove(f.directory + filename)
if cause != nil {
return errors.Wrapf(cause, "failed to remove file. filename = %s", filename)
}
return nil
}
func (f *simpleStorage) validateKey(k interface{}) (string, error) {
key, ok := k.(string)
if !ok {
return "", &simple.StorageInvalidKeyError{
Valid: reflect.TypeOf((string)("")),
Invalid: reflect.TypeOf(k),
}
}
return key, nil
}
func (f *simpleStorage) validateValue(v interface{}) ([]byte, error) {
value, ok := v.([]byte)
if !ok {
return []byte{}, &simple.StorageInvalidValueError{
Valid: reflect.TypeOf(([]byte)("")),
Invalid: reflect.TypeOf(v),
}
}
return value, nil
}
|
package model
import (
"github.com/jinzhu/gorm"
)
type Author struct{
gorm.Model
Id string
Name string
Introduce string
Love int
View int
}
|
package main
import "C"
//export _TestPlugin_Test_GoPlugin
func _TestPlugin_Test_GoPlugin() {
p.Test()
}
//export _Type
func _Type() uint16 {
return uint16(example_plugin)
}
func main() {}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package feedback
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/apps"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/feedbackapp"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SearchHelpContent,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Suggested help content is updated as user enters issue description",
Contacts: []string{
"wangdanny@google.com",
"zhangwenyu@google.com",
"xiangdongkong@google.com",
"cros-feedback-app@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Timeout: 2 * time.Minute,
Params: []testing.Param{{
Name: "ash",
Fixture: "chromeLoggedInWithOsFeedback",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacrosOsFeedback",
Val: browser.TypeLacros,
}},
})
}
// SearchHelpContent verifies the suggested help content will be updated as user
// enters issue description.
func SearchHelpContent(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to Test API: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr,
"ui_dump")
ui := uiauto.New(tconn)
// Set up keyboard.
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to find keyboard: ", err)
}
defer kb.Close()
// Launch feedback app.
feedbackRootNode, err := feedbackapp.Launch(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch feedback app: ", err)
}
// Verify help content title exists.
title := nodewith.Name("Top help content").Role(role.StaticText).Ancestor(feedbackRootNode)
if err := ui.WaitUntilExists(title)(ctx); err != nil {
s.Fatal("Failed to find the help content title: ", err)
}
// Verify there are five default help links.
_, err = verifyLinks(ctx, tconn, 5)
if err != nil {
s.Fatal("Failed to find five help links: ", err)
}
// Find the issue description text input.
issueDescriptionInput := nodewith.Role(role.TextField).Ancestor(feedbackRootNode)
if err := ui.EnsureFocused(issueDescriptionInput)(ctx); err != nil {
s.Fatal("Failed to find the issue description text input: ", err)
}
// Type issue description.
if err := kb.Type(ctx, "I am not able to connect to Bluetooth"); err != nil {
s.Fatal("Failed to type issue description: ", err)
}
// Verify help content title has changed.
updatedTitle := nodewith.Name("Suggested help content").Role(role.StaticText).Ancestor(
feedbackRootNode)
if err := ui.WaitUntilExists(updatedTitle)(ctx); err != nil {
s.Fatal("Failed to find the updated help content title: ", err)
}
// Verify there are five help content link.
updatedHelpLink, err := verifyLinks(ctx, tconn, 5)
if err != nil {
s.Fatal("Failed to find five help links: ", err)
}
// Verify the link can be opened.
if err := ui.LeftClick(updatedHelpLink.First())(ctx); err != nil {
s.Fatal("Failed to open help link: ", err)
}
bt := s.Param().(browser.Type)
// Verify browser is opened.
id := apps.Chrome.ID
if bt != browser.TypeAsh {
id = apps.LacrosID
}
if err = ash.WaitForApp(ctx, tconn, id, time.Minute); err != nil {
s.Fatal("Could not find browser in shelf after launch: ", err)
}
}
// verifyLinks function verifies there are n links existed.
func verifyLinks(ctx context.Context, tconn *chrome.TestConn, n int) (
*nodewith.Finder, error) {
ui := uiauto.New(tconn)
link := nodewith.Role(role.Link).Ancestor(nodewith.Role(role.Iframe))
for i := 0; i < n; i++ {
item := link.Nth(i)
if err := ui.WithTimeout(20 * time.Second).WaitUntilExists(item)(ctx); err != nil {
return nil, errors.Wrap(err, "failed to find help links")
}
}
return link, nil
}
|
package logs
import "testing"
func testConsole(bl *BeeLogger) {
bl.Emergency("emergency")
bl.Alert("alter")
bl.Critical("critical")
bl.Error("error")
bl.Warn("warning")
bl.Notice("notice")
bl.Info("informational")
bl.Debug("debug")
}
func TestConsole(t *testing.T) {
log1 := NewLogger(10000)
log1.EnableFuncCallDepth(true)
log1.SetLogger("console", "")
testConsole(log1)
log2 := NewLogger(100)
log2.SetLogger("console", `{"level":3}`)
testConsole(log2)
//output
//2019-01-22 19:46:27.3856002 +0800 CST m=+0.024000001[console_test.go:6] emergency
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:7] alter
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:8] critical
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:9] error
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:10] warning
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:11] notice
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:12] informational
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001[console_test.go:13] debug
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001 emergency
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001 alter
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001 critical
//2019-01-22 19:46:27.4146002 +0800 CST m=+0.053000001 error
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package heapprofiler
import (
"context"
"os"
"runtime/pprof"
"github.com/cockroachdb/cockroach/pkg/server/dumpstore"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// HeapProfiler is used to take Go heap profiles.
//
// MaybeTakeProfile() is supposed to be called periodically. A profile is taken
// every time Go heap allocated bytes exceeds the previous high-water mark. The
// recorded high-water mark is also reset periodically, so that we take some
// profiles periodically.
// Profiles are also GCed periodically. The latest is always kept, and a couple
// of the ones with the largest heap are also kept.
type HeapProfiler struct {
profiler
}
// HeapFileNamePrefix is the prefix of files containing pprof data.
const HeapFileNamePrefix = "memprof"
// HeapFileNameSuffix is the suffix of files containing pprof data.
const HeapFileNameSuffix = ".pprof"
// NewHeapProfiler creates a HeapProfiler. dir is the directory in which
// profiles are to be stored.
func NewHeapProfiler(ctx context.Context, dir string, st *cluster.Settings) (*HeapProfiler, error) {
if dir == "" {
return nil, errors.AssertionFailedf("need to specify dir for NewHeapProfiler")
}
log.Infof(ctx, "writing go heap profiles to %s at least every %s", dir, resetHighWaterMarkInterval)
dumpStore := dumpstore.NewStore(dir, maxCombinedFileSize, st)
hp := &HeapProfiler{
profiler{
store: newProfileStore(dumpStore, HeapFileNamePrefix, HeapFileNameSuffix, st),
},
}
return hp, nil
}
// MaybeTakeProfile takes a heap profile if the heap is big enough.
func (o *HeapProfiler) MaybeTakeProfile(ctx context.Context, curHeap int64) {
o.maybeTakeProfile(ctx, curHeap, takeHeapProfile)
}
// takeHeapProfile returns true if and only if the profile dump was
// taken successfully.
func takeHeapProfile(ctx context.Context, path string) (success bool) {
// Try writing a go heap profile.
f, err := os.Create(path)
if err != nil {
log.Warningf(ctx, "error creating go heap profile %s: %v", path, err)
return false
}
defer f.Close()
if err = pprof.WriteHeapProfile(f); err != nil {
log.Warningf(ctx, "error writing go heap profile %s: %v", path, err)
return false
}
return true
}
|
package main
return
|
/*
*Author: Eddie_Ivan
*Blog: http://nemesisly.xyz
*Github: https://github.com/eddieivan01
*
*爬虫代理IP池
*抓取
* http://www.xicidaili.com
* http://www.66ip.cn
* https://list.proxylistplus.com
*三家代理IP,存入本地Sqlite3数据库中,并在本地开启Http Server监听,提供json API服务
*
*程序架构:
*+ 判断本地是否已存在数据库
* + =>True: 返回数据库句柄,pass
* + =>False: 建立数据库,进行创建表等初始化工作,请求代理网站并筛选,将IP插入数据库,返回句柄
*+ 开启监听,提供API
* + /proxy?act=get: 查询数据库,并返回数据库中全部代理
* + /proxy?act=reflush: 检查数据库已存在代理的可用性;请求代理网站获取IP,将可用IP插入数据库
*
*验证策略:
*请求http://ip.chinaz.com/getip.aspx
*http代理: 代理IP与响应IP相同 && 请求成功 && 请求响应 <= 5s
*https代理: 请求成功 && 请求响应 <= 5s
*
*API状态码:
* 200:正常
* 201:数据库为空
* 202:请求参数错误
*
*/
package main
import "fmt"
func main() {
fmt.Println(`
___ ___ __
/ _ \ ____ ___ __ __ __ __ / _ \ ___ ___ / /
/ ___/ / __// _ \ \ \ / / // / / ___// _ \/ _ \ / /
/_/ /_/ \___//_\_\ \_, / /_/ \___/\___//_/
/___/
`)
initHTTP()
}
|
package gosys
import (
"testing"
"fmt"
)
//str
func Test_GetneiIp_str(t *testing.T) {
a:=NewAddr(4440)
a.IntranetAddr()
fmt.Println(a.GetIPstr())
}
func Test_GetwaiIp_str(t *testing.T) {
a:=NewAddr(4441)
a.ExternalAddr()
fmt.Println(a.GetIPstr())
}
func Test_Getlocal_str(t *testing.T) {
a:= NewAddr(4002)
a.LocalAddr()
fmt.Println("aaaaaaaaaaaaaaaaaaaaaaaaa",a.GetIPstr())
}
//tcp
func Test_GetneiIp_tcp(t *testing.T) {
a:=NewAddr(4440)
a.IntranetAddr()
fmt.Println(a.GetTCPAddr())
}
func Test_GetwaiIp_tcp(t *testing.T) {
a:=NewAddr(4441)
a.ExternalAddr()
fmt.Println(a.GetTCPAddr())
}
//udp
func Test_GetneiIp_udp(t *testing.T) {
a:=NewAddr(4440)
a.IntranetAddr()
fmt.Println(a.GetUDPAddr())
}
func Test_GetwaiIp_udp(t *testing.T) {
a:=NewAddr(4441)
a.ExternalAddr()
fmt.Println(a.GetUDPAddr())
} |
// Copyright 2016 The Lucas Alves Author. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"github.com/luk4z7/pagarme-go/auth"
"github.com/luk4z7/pagarme-go/lib/recipient"
"net/url"
"os"
)
var recipientRecord recipient.Recipient
func main() {
// Criando um recebedor com uma conta bancária existente
data := []byte(`{
"transfer_interval":"monthly",
"transfer_day": 8,
"transfer_enabled": true,
"bank_account_id": 15897336
}`)
create, err, errorsApi := recipientRecord.Create(data, url.Values{}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseCreate, _ := json.MarshalIndent(create, "", " ")
os.Stdout.Write(responseCreate)
}
// Criando um recebedor com antecipação automática
data2 := []byte(`{
"transfer_interval":"monthly",
"transfer_day": 8,
"transfer_enabled": true,
"bank_account_id": 15897336,
"automatic_anticipation_enabled": true,
"anticipatable_volume_percentage": 88
}`)
create2, err, errorsApi := recipientRecord.Create(data2, url.Values{}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseCreate2, _ := json.MarshalIndent(create2, "", " ")
os.Stdout.Write(responseCreate2)
}
// Criando um recebedor com uma conta bancária nova
data3 := []byte(`{
"transfer_interval":"weekly",
"transfer_day": 1,
"transfer_enabled": true,
"bank_account": {
"bank_code":"184",
"agencia":"8",
"conta":"08808",
"conta_dv":"8",
"document_number":"80802694594",
"legal_name":"Lucas Alves"
}
}`)
create3, err, errorsApi := recipientRecord.Create(data3, url.Values{}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseCreate3, _ := json.MarshalIndent(create3, "", " ")
os.Stdout.Write(responseCreate3)
}
// Retornando o saldo de um recebedor
get, err, errorsApi := recipientRecord.Get(url.Values{"id": {"re_cishgtigt012zv86e859ajse4"}}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseGet, _ := json.MarshalIndent(get, "", " ")
os.Stdout.Write(responseGet)
}
getall, err, errorsApi := recipientRecord.GetAll(url.Values{}, auth.Headers{
"page": "1",
"count": "10",
})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseGetAll, _ := json.MarshalIndent(getall, "", " ")
os.Stdout.Write(responseGetAll)
}
// Atualizando um recebedor com uma outra conta bancária existente
data4 := []byte(`{
"bank_account_id": 6626431
}`)
update, err, errorsApi := recipientRecord.Update(data4, url.Values{"id": {"re_ciflm3dq9008r116ds3o8afvt"}}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseUpdate, _ := json.MarshalIndent(update, "", " ")
os.Stdout.Write(responseUpdate)
}
// Atualizando um recebedor para usar antecipação automática
data5 := []byte(`{
"automatic_anticipation_enabled": true,
"anticipatable_volume_percentage": 40
}`)
update2, err, errorsApi := recipientRecord.Update(data5, url.Values{"id": {"re_ciflm3dq9008r116ds3o8afvt"}}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseUpdate2, _ := json.MarshalIndent(update2, "", " ")
os.Stdout.Write(responseUpdate2)
}
// Atualizando um recebedor com novo dia para transferência
data6 := []byte(`{
"transfer_day": 5,
"transfer_interval": "weekly"
}`)
update3, err, errorsApi := recipientRecord.Update(data6, url.Values{"id": {"re_ciflm3dq9008r116ds3o8afvt"}}, auth.Headers{})
if err != nil {
response, _ := json.MarshalIndent(errorsApi, "", " ")
os.Stdout.Write(response)
} else {
responseUpdate3, _ := json.MarshalIndent(update3, "", " ")
os.Stdout.Write(responseUpdate3)
}
}
|
package orders
import (
"app/utils"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"unsafe"
)
var URL = utils.GetStateStoreUrl()
func GetHandler(w http.ResponseWriter, r *http.Request) {
url := URL + "/order"
resp, e := http.Get(url)
if resp != nil {
defer resp.Body.Close()
}
if e != nil {
fmt.Println(e.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
if resp.StatusCode/100 != 2 {
fmt.Println("Could not get state. " + resp.Status)
w.WriteHeader(http.StatusInternalServerError)
return
}
buff, _ := ioutil.ReadAll(resp.Body)
w.Write(buff)
str := *(*string)(unsafe.Pointer(&buff))
fmt.Println("Got a order: ", str)
}
func PostHandler(w http.ResponseWriter, r *http.Request) {
type Data struct {
OrderId int `json:"orderId"`
}
type Body struct {
Data Data `json:"data"`
}
type State struct {
Key string `json:"key"`
Value Data `json:"value"`
}
bodyBuff, e := ioutil.ReadAll(r.Body)
if e != nil {
fmt.Println(e.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
body := Body{}
e = json.Unmarshal(bodyBuff, &body)
if e != nil {
fmt.Println(e.Error())
w.WriteHeader(http.StatusBadRequest)
return
}
fmt.Println("Got a new order! Order ID: ", body.Data.OrderId)
state := State{"order", body.Data}
payload, e := json.Marshal([]State{state})
if e != nil {
fmt.Println(e.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
resp, e := http.Post(URL, "application/json", bytes.NewBuffer(payload))
if resp != nil {
defer resp.Body.Close()
}
if e != nil {
fmt.Println(e.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
if resp.StatusCode/100 != 2 {
fmt.Println("Could not set state. " + resp.Status)
w.WriteHeader(http.StatusInternalServerError)
return
}
fmt.Println("Successfully persisted state.")
}
|
package user
import (
"log"
"net/http"
"github.com/lazhari/web-jwt/models"
"github.com/lazhari/web-jwt/utils"
"golang.org/x/crypto/bcrypt"
)
type userService struct {
authRepo Repository
}
// NewAuthService creates a new auth service
func NewAuthService(userRepo Repository) Service {
return &userService{
userRepo,
}
}
func (authSrv *userService) Login(user *models.User) (*models.JWT, error) {
resErr := &models.RequestError{}
jwt := models.JWT{}
if user.Email == "" {
resErr.Message = "Email is missing."
resErr.StatusCode = http.StatusBadRequest
return nil, resErr
}
if user.Password == "" {
resErr.Message = "Password is missing"
resErr.StatusCode = http.StatusBadRequest
return nil, resErr
}
password := user.Password
user, err := authSrv.authRepo.Login(user)
if err != nil {
return nil, err
}
hashedPassword := user.Password
ok := utils.ComparePasswords(hashedPassword, []byte(password))
if !ok {
resErr.Message = "Invalid credentials!"
resErr.StatusCode = http.StatusUnauthorized
return nil, resErr
}
token, err := utils.GenerateToken(*user)
if err != nil {
resErr.Message = err.Error()
resErr.StatusCode = http.StatusInternalServerError
return nil, resErr
}
jwt.Token = token
return &jwt, nil
}
func (authSrv *userService) SignUp(user *models.User) (*models.User, error) {
err := &models.RequestError{}
validationErr := user.Validate()
if validationErr != nil {
return nil, &models.RequestError{
StatusCode: http.StatusBadRequest,
Message: "Invalid request",
ValidationErrors: validationErr,
}
}
if !utils.IsEmailValid(user.Email) {
err.Message = "Email is not valid."
err.StatusCode = http.StatusBadRequest
return nil, err
}
hash, errHash := bcrypt.GenerateFromPassword([]byte(user.Password), 10)
if errHash != nil {
log.Printf("Error while hashing the password: %v\n", errHash)
err.Message = "Internal Server Error"
err.StatusCode = http.StatusInternalServerError
return nil, err
}
user.Password = string(hash)
return authSrv.authRepo.SignUp(user)
}
|
package preprocess
import (
"testing"
"github.com/jagandecapri/vision/tree"
"github.com/stretchr/testify/assert"
)
func TestNormalize(t *testing.T) {
points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 5,
"second": 10,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 2,
"second": 6,
}},
{Id: 3, Vec_map: map[string]float64{
"first": 3,
"second": 7,
}},
}
normalized_points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 0.9999999,
"second": 0.9999999,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 0,
"second": 0,
}},
{Id: 3, Vec_map: map[string]float64{
"first": 0.3333333333333333,
"second": 0.25,
}},
}
sorter := []string{"first", "second"}
norm_points := Normalize(points, sorter)
assert.Equal(t, normalized_points, norm_points)
}
func TestNormalize2(t *testing.T) {
points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 5,
"second": 10,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 0,
"second": 6,
}},
{Id: 3, Vec_map: map[string]float64{
"first": 3,
"second": 7,
}},
}
normalized_points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 0.9999999,
"second": 0.9999999,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 0,
"second": 0,
}},
{Id: 3, Vec_map: map[string]float64{
"first": 0.6,
"second": 0.25,
}},
}
sorter := []string{"first", "second"}
norm_points := Normalize(points, sorter)
assert.Equal(t, normalized_points, norm_points)
}
func TestNormalize3(t *testing.T) {
points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 1,
"second": 1,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 1,
"second": 1,
}},
}
normalized_points := []tree.Point{{Id: 1, Vec_map: map[string]float64{
"first": 0,
"second": 0,
}},
{Id: 2, Vec_map: map[string]float64{
"first": 0,
"second": 0,
}},
}
sorter := []string{"first", "second"}
norm_points := Normalize(points, sorter)
assert.Equal(t, normalized_points, norm_points)
} |
package broker
import (
"context"
"github.com/LiveRamp/gazette/v2/pkg/allocator"
pb "github.com/LiveRamp/gazette/v2/pkg/protocol"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/trace"
)
// Service is the top-level runtime concern of a Gazette Broker process. It
// drives local journal handling in response to allocator.State, powers
// journal resolution, and is also an implementation of protocol.JournalServer.
type Service struct {
jc pb.JournalClient
etcd clientv3.KV
resolver *resolver
}
// NewService constructs a new broker Service, driven by allocator.State.
func NewService(state *allocator.State, jc pb.JournalClient, etcd clientv3.KV) *Service {
var svc = &Service{jc: jc, etcd: etcd}
svc.resolver = newResolver(state, func(journal pb.Journal) *replica {
var rep = newReplica(journal)
go maintenanceLoop(rep, state.KS, pb.NewRoutedJournalClient(jc, svc), etcd)
return rep
})
return svc
}
func addTrace(ctx context.Context, format string, args ...interface{}) {
if tr, ok := trace.FromContext(ctx); ok {
tr.LazyPrintf(format, args...)
}
}
// Route an item using the Service resolver. Route implements the
// protocol.DispatchRouter interface, and enables usages of
// protocol.WithDispatchItemRoute (eg, `client` & `http_gateway` packages) to
// resolve items via the Service resolver.
func (svc *Service) Route(ctx context.Context, item string) pb.Route {
var res, err = svc.resolver.resolve(resolveArgs{
ctx: ctx,
journal: pb.Journal(item),
mayProxy: true,
})
if err != nil {
panic(err) // Cannot err because we use neither minEtcdRevision nor proxyHeader.
}
// If Status != OK, Route will be zero-valued, which directs dispatcher
// to use the default service address (localhost), which will then re-run
// resolution and generate a proper error message for the client.
return res.Route
}
// UpdateRoute is a no-op implementation of protocol.DispatchRouter.
func (svc *Service) UpdateRoute(string, *pb.Route) {} // No-op.
// IsNoopRouter returns false.
func (svc *Service) IsNoopRouter() bool { return false }
|
package go_image
import (
"fmt"
"testing"
)
//将某一图片文件进行缩放后存入另外的文件中
func TestImage(t *testing.T) {
//打印当前文件夹位置
fmt.Printf("本文件文件夹位置:%s\n", CurDir())
//图像位置
filename := "./testdata/gopher.png"
//宽度,高度
width := 500
height := 800
//保存位置
save1 := "./testdata/gopher500.jpg"
save2 := "./testdata/gopher500_800.png"
//按照宽度进行等比例缩放
err := ScaleF2F(filename, save1, width)
if err != nil {
fmt.Printf("生成按宽度缩放图失败:%s\n", err.Error())
} else {
fmt.Printf("生成按宽度缩放图:%s\n", save1)
}
//按照宽度和高度进行等比例缩放
err = ThumbnailF2F(filename, save2, width, height)
if err != nil {
fmt.Printf("生成按宽度高度缩放图:%s\n", err.Error())
} else {
fmt.Printf("生成按宽度高度缩放图:%s\n", save2)
}
//查看图像文件的真正名字
//如 ./testdata/gopher500.jpg其实是png类型,但是命名错误,需要纠正!
realfilename, err := RealImageName(save1)
if err != nil {
fmt.Printf("真正的文件名: %s->? err:%s\n", save1, err.Error())
} else {
fmt.Printf("真正的文件名:%s->%s\n", save1, realfilename)
}
//文件改名,强制性
err = ChangeImageName(save1, realfilename, true)
if err != nil {
fmt.Printf("文件改名失败:%s->%s,%s\n", save1, realfilename, err.Error())
} else {
fmt.Println("改名成功")
}
//文件改名,不强制性
err = ChangeImageName(save1, realfilename, false)
if err != nil {
fmt.Printf("文件改名失败:%s->%s,%s\n", save1, realfilename, err.Error())
}
}
|
/*
* @lc app=leetcode.cn id=124 lang=golang
*
* [124] 二叉树中的最大路径和
*/
// @lc code=start
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func maxPathSum(root *TreeNode) int {
if root == nil {
return 0
}
var max = new(int)
*max = math.MinInt
maxNode(root, max)
return *max
}
func maxNode(node *TreeNode, max *int) int {
if node == nil {
return 0
}
val := node.Val
if node.Left == nil && node.Right == nil {
*max = maxArray(val, *max)
} else {
maxLeft := maxNode(node.Left, max)
maxRight := maxNode(node.Right, max)
node.Val = maxArray(val, val+maxLeft, val+maxRight)
*max = maxArray(node.Val, val+maxLeft+maxRight, *max)
}
return node.Val
}
func maxArray(l ...int) (max int) {
max = math.MinInt
for _, e := range l {
if e > max {
max = e
}
}
return max
}
// @lc code=end
|
package main
import (
"context"
"encoding/json"
"fmt"
"runtime"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
func Handler(_ context.Context, r events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
data, _ := json.Marshal(map[string]interface{}{
"message": fmt.Sprintf("Hello World from Lambda with %s", runtime.Version()),
"event": r,
})
return events.APIGatewayProxyResponse{
StatusCode: 200,
Body: string(data),
}, nil
}
func main() {
lambda.Start(Handler)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package inputs
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/inputs/fixture"
"chromiumos/tast/local/bundles/cros/inputs/pre"
"chromiumos/tast/local/bundles/cros/inputs/testserver"
"chromiumos/tast/local/bundles/cros/inputs/util"
"chromiumos/tast/local/chrome/ime"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/useractions"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: PhysicalKeyboardZhuyinTyping,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checks that Zhuyin physical keyboard works",
Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"},
Attr: []string{"group:mainline", "group:input-tools"},
SoftwareDeps: []string{"chrome", "chrome_internal"},
HardwareDeps: hwdep.D(pre.InputsStableModels),
Timeout: 5 * time.Minute,
SearchFlags: util.IMESearchFlags([]ime.InputMethod{ime.ChineseZhuyin}),
Params: []testing.Param{
{
Fixture: fixture.ClamshellNonVK,
},
{
Name: "lacros",
Fixture: fixture.LacrosClamshellNonVK,
ExtraSoftwareDeps: []string{"lacros_stable"},
ExtraAttr: []string{"informational"},
},
},
})
}
func PhysicalKeyboardZhuyinTyping(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(fixture.FixtData).Chrome
tconn := s.FixtValue().(fixture.FixtData).TestAPIConn
uc := s.FixtValue().(fixture.FixtData).UserContext
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree")
im := ime.ChineseZhuyin
s.Log("Set current input method to: ", im)
if err := im.InstallAndActivateUserAction(uc)(ctx); err != nil {
s.Fatalf("Failed to set input method to %v: %v: ", im, err)
}
uc.SetAttribute(useractions.AttributeInputMethod, im.Name)
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer kb.Close()
its, err := testserver.LaunchBrowser(ctx, s.FixtValue().(fixture.FixtData).BrowserType, cr, tconn)
if err != nil {
s.Fatal("Failed to launch inputs test server: ", err)
}
defer its.CloseAll(cleanupCtx)
inputField := testserver.TextAreaInputField
ui := uiauto.New(tconn)
subtests := []struct {
name string
scenario string
action uiauto.Action
}{
{
// Type something and check that the symbols automatically form Chinese characters.
name: "TypeZhuyinConvertsToChineseCharacters",
scenario: "verify Zhuyin symbols automatically convert to chinese characters",
action: its.ValidateInputOnField(inputField, kb.TypeAction("z06wu35j/ jp6"), "繁體中文"),
},
{
// Type symbols without tone should show the symbols.
name: "TypeZhuyinWithoutToneShowsSymbols",
scenario: "Type symbols without tone should show symbols",
action: its.ValidateInputOnField(inputField, kb.TypeAction("5j/"), "ㄓㄨㄥ"),
},
{
// Type Zhuyin replaces corresponding initial/medial/final.
name: "TypeZhuyinReplacesCorrespondingInitialMedialFinal",
scenario: "Type Zhuyin without tones to replace the corresponding initial/medial/final",
action: its.ValidateInputOnField(inputField, kb.TypeAction("5j/125qwertyasdfghzxcvbnujm8ik,9ol.0p;/-"), "ㄙㄩㄦ"),
},
{
// Type various tone keys to convert to Chinese characters.
name: "TypeZhuyinTonesConvertsToChinese",
scenario: "Type various tone keys to convert to Chinese characters",
// Test each character separately so that the IME doesn't adjust the character based on previous characters.
// Esc will clear the composition for the next character.
action: uiauto.Combine("type Zhuyin with tones and verify the character",
its.ClearThenClickFieldAndWaitForActive(inputField),
kb.TypeAction("g3"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "使"),
kb.AccelAction("Esc"),
kb.TypeAction("su4"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "逆"),
kb.AccelAction("Esc"),
kb.TypeAction("5j/ "),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "中"),
kb.AccelAction("Esc"),
kb.TypeAction("586"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "紮"),
kb.AccelAction("Esc"),
kb.TypeAction("m "),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "瘀"),
kb.AccelAction("Esc"),
kb.TypeAction("up3"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "尹"),
kb.AccelAction("Esc"),
kb.TypeAction(",4"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "誒"),
),
},
{
// Type backspace to delete symbols one by one.
name: "TypeBackspaceDeletes",
scenario: "Type backspace to delete symbols one by one",
action: uiauto.Combine("type some text and press backspace repeatedly",
its.ClearThenClickFieldAndWaitForActive(inputField),
kb.TypeAction("5j/ 5j/"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "中ㄓㄨㄥ"),
kb.AccelAction("Backspace"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "中ㄓㄨ"),
kb.AccelAction("Backspace"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "中ㄓ"),
kb.AccelAction("Backspace"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "中"),
kb.AccelAction("Backspace"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), ""),
),
},
{
// Press SPACE to show candidates window after tone key.
name: "TypeSpaceShowsCandidates",
scenario: "Press SPACE to show candidates window after tone key",
action: uiauto.Combine("type SPACE to show candidates window",
its.ClearThenClickFieldAndWaitForActive(inputField),
kb.TypeAction("5j/ "),
kb.AccelAction("Space"),
ui.WaitUntilExists(util.PKCandidatesFinder.First()),
),
},
{
// Press arrow keys and down arrow to select alternate candidates.
name: "TypeSpaceShowsCandidates",
scenario: "Press arrow keys and down arrow to select alternate candidates",
action: uiauto.Combine("type something, press arrow keys, and down arrow to show candidates window",
its.ClearThenClickFieldAndWaitForActive(inputField),
kb.TypeAction("z06wu35j/ jp6"),
kb.AccelAction("Left"),
kb.AccelAction("Left"),
kb.AccelAction("Down"),
util.GetNthCandidateTextAndThen(tconn, 1, func(text string) uiauto.Action {
return uiauto.Combine("select another candidate and press enter to confirm it",
kb.AccelAction("Down"),
kb.AccelAction("Enter"),
ui.WaitUntilGone(util.PKCandidatesFinder),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "繁體鍾文"),
)
}),
),
},
}
for _, subtest := range subtests {
s.Run(ctx, subtest.name, func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+string(subtest.name))
if err := uiauto.UserAction(
"Zhuyin PK input",
subtest.action,
uc, &useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeTestScenario: subtest.scenario,
useractions.AttributeInputField: string(inputField),
useractions.AttributeFeature: useractions.FeaturePKTyping,
},
},
)(ctx); err != nil {
s.Fatalf("Failed to validate keys input in %s: %v", inputField, err)
}
})
}
}
|
// Package recode rewrites json-interface objects ( with golang upper case keys )
// to Json dictionaries ( and lowercase keys. )
package recode
import "strings"
type jsonSlice []interface{}
type jsonMap map[string]interface{}
func Dict(src jsonMap) jsonMap {
dst := map[string]interface{}{}
for k, v := range src {
lowerKey := strings.ToLower(k[0:1]) + k[1:]
dst[lowerKey] = Value(v)
}
return dst
}
func Array(src jsonSlice) jsonSlice {
for i, v := range src {
src[i] = Value(v)
}
return src
}
func Value(v interface{}) interface{} {
switch v := v.(type) {
case []interface{}:
return Array(v)
case map[string]interface{}:
return Dict(v)
}
return v
}
|
package ircmsg
import (
"testing"
)
func TestParseMessage(t *testing.T) {
msg_string := ":kyle!~kyle@localhost PRIVMSG #tenyks :tenyks: messages are awesome"
msg := ParseMessage(msg_string)
if msg == nil {
t.Error("Expected", Message{}, "got", msg)
}
if msg.Command != "PRIVMSG" {
t.Error("Expected", Message{}, "got", msg)
}
if msg.Trail != "tenyks: messages are awesome" {
t.Error("Expected", "tenyks: messages are awesome", "got", msg)
}
if msg.Params[0] != "#tenyks" {
t.Error("Expected", "#tenyks", "got", msg.Params[0])
}
}
|
package model
import (
"crypto/rand"
"encoding/base64"
"sync"
"time"
)
// News type
type News struct {
ID string
Title string
Image string
Detail string
CreatedAt time.Time
UpdatedAt time.Time
}
var (
newsStorage []News
mutexNews sync.RWMutex
)
func generateID() string {
buf := make([]byte, 16)
rand.Read(buf)
return base64.StdEncoding.EncodeToString(buf)
}
func CreateNews(news News) {
news.ID = generateID()
news.CreatedAt = time.Now()
news.UpdatedAt = news.CreatedAt
mutexNews.Lock()
defer mutexNews.Unlock()
newsStorage = append(newsStorage, news)
}
func ListNews() []*News {
mutexNews.RLock()
defer mutexNews.RUnlock()
r := make([]*News, len(newsStorage))
for i := range newsStorage {
n := newsStorage[i]
r[i] = &n
}
return r
}
func GetNews(id string) *News {
mutexNews.RLock()
defer mutexNews.RUnlock()
for _, news := range newsStorage {
if news.ID == id {
n := news
return &n
}
}
return nil
}
func DeleteNews(id string) {
mutexNews.Lock()
defer mutexNews.Unlock()
for i, news := range newsStorage {
if news.ID == id {
newsStorage = append(newsStorage[:i], newsStorage[i+1:]...)
return
}
}
}
|
package main
import (
"fmt"
)
type gopher struct{
name string
age int
isAdult bool
}
func (g gopher) jump() string {
if g.age < 64 {
return g.name + " can jump HIGH"
}
return g.name + " can still jump"
}
// This will pass it in as a copy
// func validateAge(g gopher) {
// g.isAdult = g.age >= 21
// }
func validateAge(g *gopher){
g.isAdult = g.age >= 21
}
func main() {
var a int
a = 30
var aPtr *int
aPtr = &a
// address of aPtr
fmt.Println("address of aPtr %p", &aPtr)
// address of aPtr
fmt.Println("value of aPtr %p", aPtr)
// address of a
fmt.Println("address of aPtr %p", &a)
// value of aPtr derefernce aPtr
fmt.Println("value of aPtr derefernce %d", *aPtr)
a = 20
// value of aPtr derefernce aPtr
fmt.Println("value of aPtr derrefernce after a change %d", *aPtr)
*aPtr = 500
fmt.Println("address of a after aPtr change %d", a)
}
|
package main
import "Craftorio/game"
func main() {
game := game.New()
game.Init()
}
|
package main
//1450. 在既定时间做作业的学生人数
//给你两个整数数组 startTime(开始时间)和 endTime(结束时间),并指定一个整数 queryTime 作为查询时间。
//
//已知,第 i 名学生在 startTime[i] 时开始写作业并于 endTime[i] 时完成作业。
//
//请返回在查询时间 queryTime 时正在做作业的学生人数。形式上,返回能够使 queryTime 处于区间 [startTime[i], endTime[i]](含)的学生人数。
//
//
//
//示例 1:
//
//输入:startTime = [1,2,3], endTime = [3,2,7], queryTime = 4
//输出:1
//解释:一共有 3 名学生。
//第一名学生在时间 1 开始写作业,并于时间 3 完成作业,在时间 4 没有处于做作业的状态。
//第二名学生在时间 2 开始写作业,并于时间 2 完成作业,在时间 4 没有处于做作业的状态。
//第三名学生在时间 3 开始写作业,预计于时间 7 完成作业,这是是唯一一名在时间 4 时正在做作业的学生。
//示例 2:
//
//输入:startTime = [4], endTime = [4], queryTime = 4
//输出:1
//解释:在查询时间只有一名学生在做作业。
//示例 3:
//
//输入:startTime = [4], endTime = [4], queryTime = 5
//输出:0
//示例 4:
//
//输入:startTime = [1,1,1,1], endTime = [1,3,2,4], queryTime = 7
//输出:0
//示例 5:
//
//输入:startTime = [9,8,7,6,5,4,3,2,1], endTime = [10,10,10,10,10,10,10,10,10], queryTime = 5
//输出:5
//
//
//提示:
//
//startTime.length == endTime.length
//1 <= startTime.length <= 100
//1 <= startTime[i] <= endTime[i] <= 1000
//1 <=queryTime <= 1000
func busyStudent(startTime []int, endTime []int, queryTime int) int {
var result int
for i := 0; i < len(startTime); i++ {
if startTime[i] <= queryTime && queryTime <= endTime[i] {
result++
}
}
return result
}
|
package Pigeon
import "github.com/gorilla/websocket"
type MessagesRepo struct {
messages []*Message
}
type WebSocketsRepo struct {
connections []*websocket.Conn
}
var MessagesRepository = MessagesRepo{}
var WebSocketsRepository = WebSocketsRepo{}
func (r *MessagesRepo) Add(m *Message) {
if r.messages == nil {
r.messages = []*Message{}
}
r.messages = append(r.messages, m)
} |
package standard
import (
. "github.com/ionous/sashimi/script"
)
//
func init() {
AddScript(func(s *Script) {
// FIX: the player should really be a global variable; not an actor instance.
// ( or, possibly a game object type of which there is one, with a relation of an actor. )
s.The("actor",
Called("player"),
Is("scenery"),
)
})
}
|
package googlecloud
import (
"context"
"fmt"
"net"
"strconv"
"sync"
"testing"
"time"
vkit "cloud.google.com/go/logging/apiv2"
"github.com/golang/protobuf/ptypes"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"github.com/observiq/stanza/entry"
"github.com/observiq/stanza/operator/buffer"
"github.com/observiq/stanza/operator/helper"
"github.com/observiq/stanza/testutil"
"github.com/stretchr/testify/require"
"google.golang.org/api/option"
"google.golang.org/genproto/googleapis/api/monitoredres"
sev "google.golang.org/genproto/googleapis/logging/type"
logpb "google.golang.org/genproto/googleapis/logging/v2"
"google.golang.org/grpc"
)
type googleCloudTestCase struct {
name string
config *GoogleCloudOutputConfig
input *entry.Entry
expectedOutput *logpb.WriteLogEntriesRequest
}
func googleCloudBasicConfig() *GoogleCloudOutputConfig {
cfg := NewGoogleCloudOutputConfig("test_id")
cfg.ProjectID = "test_project_id"
bufferCfg := buffer.NewMemoryBufferConfig()
bufferCfg.MaxChunkDelay = helper.NewDuration(50 * time.Millisecond)
cfg.BufferConfig = buffer.Config{Builder: bufferCfg}
return cfg
}
func googleCloudBasicWriteEntriesRequest() *logpb.WriteLogEntriesRequest {
return &logpb.WriteLogEntriesRequest{
LogName: "projects/test_project_id/logs/default",
Resource: &monitoredres.MonitoredResource{
Type: "global",
Labels: map[string]string{
"project_id": "test_project_id",
},
},
}
}
func googleCloudTimes() (time.Time, *tspb.Timestamp) {
now, _ := time.Parse(time.RFC3339, time.RFC3339)
protoTs, _ := ptypes.TimestampProto(now)
return now, protoTs
}
func TestGoogleCloudOutput(t *testing.T) {
now, protoTs := googleCloudTimes()
cases := []googleCloudTestCase{
{
"Basic",
googleCloudBasicConfig(),
&entry.Entry{
Timestamp: now,
Record: map[string]interface{}{
"message": "test message",
},
},
func() *logpb.WriteLogEntriesRequest {
req := googleCloudBasicWriteEntriesRequest()
req.Entries = []*logpb.LogEntry{
{
Timestamp: protoTs,
Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jsonMapToProtoStruct(map[string]interface{}{
"message": "test message",
})},
},
}
return req
}(),
},
{
"LogNameField",
func() *GoogleCloudOutputConfig {
c := googleCloudBasicConfig()
f := entry.NewRecordField("log_name")
c.LogNameField = &f
return c
}(),
&entry.Entry{
Timestamp: now,
Record: map[string]interface{}{
"message": "test message",
"log_name": "mylogname",
},
},
func() *logpb.WriteLogEntriesRequest {
req := googleCloudBasicWriteEntriesRequest()
req.Entries = []*logpb.LogEntry{
{
LogName: "projects/test_project_id/logs/mylogname",
Timestamp: protoTs,
Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jsonMapToProtoStruct(map[string]interface{}{
"message": "test message",
})},
},
}
return req
}(),
},
{
"Labels",
func() *GoogleCloudOutputConfig {
return googleCloudBasicConfig()
}(),
&entry.Entry{
Timestamp: now,
Labels: map[string]string{
"label1": "value1",
},
Record: map[string]interface{}{
"message": "test message",
},
},
func() *logpb.WriteLogEntriesRequest {
req := googleCloudBasicWriteEntriesRequest()
req.Entries = []*logpb.LogEntry{
{
Labels: map[string]string{
"label1": "value1",
},
Timestamp: protoTs,
Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jsonMapToProtoStruct(map[string]interface{}{
"message": "test message",
})},
},
}
return req
}(),
},
googleCloudSeverityTestCase(entry.Catastrophe, sev.LogSeverity_EMERGENCY),
googleCloudSeverityTestCase(entry.Severity(95), sev.LogSeverity_EMERGENCY),
googleCloudSeverityTestCase(entry.Emergency, sev.LogSeverity_EMERGENCY),
googleCloudSeverityTestCase(entry.Severity(85), sev.LogSeverity_ALERT),
googleCloudSeverityTestCase(entry.Alert, sev.LogSeverity_ALERT),
googleCloudSeverityTestCase(entry.Severity(75), sev.LogSeverity_CRITICAL),
googleCloudSeverityTestCase(entry.Critical, sev.LogSeverity_CRITICAL),
googleCloudSeverityTestCase(entry.Severity(65), sev.LogSeverity_ERROR),
googleCloudSeverityTestCase(entry.Error, sev.LogSeverity_ERROR),
googleCloudSeverityTestCase(entry.Severity(55), sev.LogSeverity_WARNING),
googleCloudSeverityTestCase(entry.Warning, sev.LogSeverity_WARNING),
googleCloudSeverityTestCase(entry.Severity(45), sev.LogSeverity_NOTICE),
googleCloudSeverityTestCase(entry.Notice, sev.LogSeverity_NOTICE),
googleCloudSeverityTestCase(entry.Severity(35), sev.LogSeverity_INFO),
googleCloudSeverityTestCase(entry.Info, sev.LogSeverity_INFO),
googleCloudSeverityTestCase(entry.Severity(25), sev.LogSeverity_DEBUG),
googleCloudSeverityTestCase(entry.Debug, sev.LogSeverity_DEBUG),
googleCloudSeverityTestCase(entry.Severity(15), sev.LogSeverity_DEBUG),
googleCloudSeverityTestCase(entry.Trace, sev.LogSeverity_DEBUG),
googleCloudSeverityTestCase(entry.Severity(5), sev.LogSeverity_DEBUG),
googleCloudSeverityTestCase(entry.Default, sev.LogSeverity_DEFAULT),
{
"TraceAndSpanFields",
func() *GoogleCloudOutputConfig {
c := googleCloudBasicConfig()
traceField := entry.NewRecordField("trace")
spanIDField := entry.NewRecordField("span_id")
c.TraceField = &traceField
c.SpanIDField = &spanIDField
return c
}(),
&entry.Entry{
Timestamp: now,
Record: map[string]interface{}{
"message": "test message",
"trace": "projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824",
"span_id": "000000000000004a",
},
},
func() *logpb.WriteLogEntriesRequest {
req := googleCloudBasicWriteEntriesRequest()
req.Entries = []*logpb.LogEntry{
{
Trace: "projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824",
SpanId: "000000000000004a",
Timestamp: protoTs,
Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jsonMapToProtoStruct(map[string]interface{}{
"message": "test message",
})},
},
}
return req
}(),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
buildContext := testutil.NewBuildContext(t)
ops, err := tc.config.Build(buildContext)
op := ops[0]
require.NoError(t, err)
conn, received, stop, err := startServer()
require.NoError(t, err)
defer stop()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
client, err := vkit.NewClient(ctx, option.WithGRPCConn(conn))
require.NoError(t, err)
op.(*GoogleCloudOutput).client = client
op.(*GoogleCloudOutput).startFlushing()
defer op.Stop()
err = op.Process(context.Background(), tc.input)
require.NoError(t, err)
select {
case req := <-received:
// Apparently there is occasionally an infinite loop in req.stat
// and testify freezes up trying to infinitely unpack it
// So instead, just compare the meaningful portions
require.Equal(t, tc.expectedOutput.LogName, req.LogName)
require.Equal(t, tc.expectedOutput.Labels, req.Labels)
require.Equal(t, tc.expectedOutput.Resource, req.Resource)
require.Equal(t, tc.expectedOutput.Entries, req.Entries)
case <-time.After(time.Second):
require.FailNow(t, "Timed out waiting for writeLogEntries request")
}
})
}
}
func googleCloudSeverityTestCase(s entry.Severity, expected sev.LogSeverity) googleCloudTestCase {
now, protoTs := googleCloudTimes()
return googleCloudTestCase{
fmt.Sprintf("Severity%s", s),
func() *GoogleCloudOutputConfig {
return googleCloudBasicConfig()
}(),
&entry.Entry{
Timestamp: now,
Severity: s,
Record: map[string]interface{}{
"message": "test message",
},
},
func() *logpb.WriteLogEntriesRequest {
req := googleCloudBasicWriteEntriesRequest()
req.Entries = []*logpb.LogEntry{
{
Severity: expected,
Timestamp: protoTs,
Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jsonMapToProtoStruct(map[string]interface{}{
"message": "test message",
})},
},
}
return req
}(),
}
}
type googleCloudProtobufTest struct {
name string
record interface{}
}
func (g *googleCloudProtobufTest) Run(t *testing.T) {
t.Run(g.name, func(t *testing.T) {
e := &logpb.LogEntry{}
err := setPayload(e, g.record)
require.NoError(t, err)
})
}
func TestGoogleCloudSetPayload(t *testing.T) {
cases := []googleCloudProtobufTest{
{
"string",
"test",
},
{
"[]byte",
[]byte("test"),
},
{
"map[string]string",
map[string]string{"test": "val"},
},
{
"Nested_[]string",
map[string]interface{}{
"sub": []string{"1", "2"},
},
},
{
"Nested_[]int",
map[string]interface{}{
"sub": []int{1, 2},
},
},
{
"Nested_uint32",
map[string]interface{}{
"sub": uint32(32),
},
},
{
"DeepNested_map",
map[string]interface{}{
"0": map[string]map[string]map[string]string{
"1": {"2": {"3": "test"}},
},
},
},
{
"DeepNested_slice",
map[string]interface{}{
"0": [][][]string{{{"0", "1"}}},
},
},
{
"AnonymousStruct",
map[string]interface{}{
"0": struct{ Field string }{Field: "test"},
},
},
{
"NamedStruct",
map[string]interface{}{
"0": time.Now(),
},
},
}
for _, tc := range cases {
tc.Run(t)
}
}
// Adapted from https://github.com/googleapis/google-cloud-go/blob/master/internal/testutil/server.go
type loggingHandler struct {
logpb.LoggingServiceV2Server
received chan *logpb.WriteLogEntriesRequest
}
func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) {
h.received <- req
return &logpb.WriteLogEntriesResponse{}, nil
}
func startServer() (*grpc.ClientConn, chan *logpb.WriteLogEntriesRequest, func(), error) {
received := make(chan *logpb.WriteLogEntriesRequest, 10)
serv := grpc.NewServer()
logpb.RegisterLoggingServiceV2Server(serv, &loggingHandler{
received: received,
})
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
return nil, nil, nil, err
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
return nil, nil, nil, err
}
return conn, received, serv.Stop, nil
}
type googleCloudOutputBenchmark struct {
name string
entry *entry.Entry
configMod func(*GoogleCloudOutputConfig)
}
func (g *googleCloudOutputBenchmark) Run(b *testing.B) {
conn, received, stop, err := startServer()
require.NoError(b, err)
defer stop()
client, err := vkit.NewClient(context.Background(), option.WithGRPCConn(conn))
require.NoError(b, err)
cfg := NewGoogleCloudOutputConfig(g.name)
cfg.ProjectID = "test_project_id"
if g.configMod != nil {
g.configMod(cfg)
}
ops, err := cfg.Build(testutil.NewBuildContext(b))
require.NoError(b, err)
op := ops[0]
op.(*GoogleCloudOutput).client = client
op.(*GoogleCloudOutput).timeout = 30 * time.Second
defer op.(*GoogleCloudOutput).flusher.Stop()
b.ResetTimer()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < b.N; i++ {
op.Process(context.Background(), g.entry)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
i := 0
for i < b.N {
req := <-received
i += len(req.Entries)
}
}()
wg.Wait()
err = op.Stop()
require.NoError(b, err)
}
func BenchmarkGoogleCloudOutput(b *testing.B) {
t := time.Date(2007, 01, 01, 10, 15, 32, 0, time.UTC)
cases := []googleCloudOutputBenchmark{
{
"Simple",
&entry.Entry{
Timestamp: t,
Record: "test",
},
nil,
},
{
"MapRecord",
&entry.Entry{
Timestamp: t,
Record: mapOfSize(1, 0),
},
nil,
},
{
"LargeMapRecord",
&entry.Entry{
Timestamp: t,
Record: mapOfSize(30, 0),
},
nil,
},
{
"DeepMapRecord",
&entry.Entry{
Timestamp: t,
Record: mapOfSize(1, 10),
},
nil,
},
{
"Labels",
&entry.Entry{
Timestamp: t,
Record: "test",
Labels: map[string]string{
"test": "val",
},
},
nil,
},
{
"NoCompression",
&entry.Entry{
Timestamp: t,
Record: "test",
},
func(cfg *GoogleCloudOutputConfig) {
cfg.UseCompression = false
},
},
}
for _, tc := range cases {
b.Run(tc.name, tc.Run)
}
}
func mapOfSize(keys, depth int) map[string]interface{} {
m := make(map[string]interface{})
for i := 0; i < keys; i++ {
if depth == 0 {
m["k"+strconv.Itoa(i)] = "v" + strconv.Itoa(i)
} else {
m["k"+strconv.Itoa(i)] = mapOfSize(keys, depth-1)
}
}
return m
}
|
package manager
import (
"github.com/liasece/micchaos/ccmd"
"github.com/liasece/micchaos/mongodb"
"github.com/liasece/micchaos/playermodule/boxes"
"github.com/liasece/micserver/log"
"github.com/liasece/micserver/module"
"github.com/liasece/micserver/roc"
"go.mongodb.org/mongo-driver/bson"
)
type PlayerDocManager struct {
*log.Logger
mod *module.BaseModule
mongo_userinfos *mongodb.UserInfos
playerRoc *roc.ROC
}
func (this *PlayerDocManager) Init(mod *module.BaseModule,
userinfos *mongodb.UserInfos) {
this.mod = mod
this.mongo_userinfos = userinfos
this.playerRoc = this.mod.GetROC(ccmd.ROCTypePlayer)
}
func (this *PlayerDocManager) getPlayerDoc(uuid string) *boxes.Player {
if vi, ok := this.playerRoc.GetObj(uuid); ok {
if p, ok := vi.(*boxes.Player); ok {
return p
}
}
return nil
}
func (this *PlayerDocManager) GetPlayerDoc(uuid string) *boxes.Player {
return this.getPlayerDoc(uuid)
}
func (this *PlayerDocManager) loadOrStore(
uuid string, p *boxes.Player) *boxes.Player {
if vi, isLoad := this.playerRoc.GetOrRegObj(uuid, p); !isLoad {
if p, ok := vi.(*boxes.Player); ok {
this.Info("mod.ROCManager.RegObj OK")
return p
}
}
return p
}
// 从数据库获取用户信息
func (this *PlayerDocManager) getPlayerFromDB(uuid string) *boxes.Player {
readPlayer := &boxes.Player{}
err := this.mongo_userinfos.SelectOneByKey(bson.M{
"account.uuid": uuid,
}, readPlayer)
if err != nil {
this.Error("mongo_userinfos.SelectOneByKey err:%s", err.Error())
return nil
}
readPlayer.Logger = this.Logger.Clone()
readPlayer.Init(this.mod)
readPlayer.AfterLoad()
return readPlayer
}
// 必须取到用户数据,即使是从数据库取
func (this *PlayerDocManager) GetPlayerDocMust(uuid string) *boxes.Player {
p := this.getPlayerDoc(uuid)
if p == nil {
p = this.getPlayerFromDB(uuid)
p = this.loadOrStore(uuid, p)
}
return p
}
// 马上更新用户数据到数据库
func (this *PlayerDocManager) SavePlayerDocNow(player *boxes.Player) {
_, err := this.mongo_userinfos.Update(player)
if err != nil {
this.Error("mongo_userinfos.Update err:%s", err.Error())
}
}
// 向数据库中插入一个玩家
func (this *PlayerDocManager) InsertPlayerDocNow(player *boxes.Player) {
_, err := this.mongo_userinfos.Upsert(player)
if err != nil {
this.Error("mongo_userinfos.Upsert err:%s", err.Error())
}
}
|
package main
type task struct {
ID int `json:"ID"`
Name string `json:"Name"`
Content string `json:"Content"`
}
var tasks = allTasks{
{
ID: 1,
Name: "Test Task",
Content: "Some first content for test",
},
}
type allTasks []task
|
package multierror_test
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/socialpoint-labs/bsk/multierror"
)
func TestAppend(t *testing.T) {
t.Parallel()
t.Run("it returns nil if no errors", func(t *testing.T) {
assert.Nil(t, multierror.Append(multierror.Append()))
assert.Nil(t, multierror.Append(nil))
var e error
assert.Nil(t, multierror.Append(e))
})
t.Run("it appends multiple errors", func(t *testing.T) {
e1 := errors.New("e1")
e2 := errors.New("e2")
e3 := errors.New("e3")
assert.Error(t, multierror.Append(e1, e2, e3))
assert.Len(t, multierror.Append(e1, e2, e3), 3)
})
t.Run("it flattens multiple errors", func(t *testing.T) {
e1 := errors.New("e1")
e2 := errors.New("e2")
e3 := errors.New("e3")
me := multierror.Append(e1, e2, e3)
assert.Len(t, multierror.Append(e1, e2, me, e3), 6)
})
t.Run("it does not append nil errors", func(t *testing.T) {
e1 := errors.New("e1")
e2 := errors.New("e2")
e3 := errors.New("e3")
assert.Len(t, multierror.Append(nil, e1, e2, nil, e3, nil), 3)
})
}
func TestWalk(t *testing.T) {
t.Parallel()
e1 := errors.New("e1")
e2 := errors.New("e2")
e3 := errors.New("e3")
var s string
walker := func(e error) { s += e.Error() }
err := multierror.Append(e1, e2, e3)
multierror.Walk(err, walker)
assert.Equal(t, "e1e2e3", s)
s = ""
multierror.Walk(errors.New("test error"), walker)
assert.Equal(t, "test error", s)
}
|
package lc
import "math"
// Time: O(n*m)
// Benchmark: 4ms 6mb | 99%
func coinChange(coins []int, amount int) int {
min := func(x, y int) int {
if x < y {
return x
}
return y
}
sums := make([]int, amount+1)
for i := 1; i <= amount; i++ {
sums[i] = math.MaxInt32
for j := 0; j < len(coins); j++ {
if coins[j] <= i {
sums[i] = min(sums[i], sums[i-coins[j]]+1)
}
}
}
if sums[amount] == math.MaxInt32 {
return -1
}
return sums[amount]
}
|
package strategy02
import (
"finantial/ema"
"finantial/rsi"
"fmt"
"log"
. "markets/exchange"
"markets/generic"
"markets/poloniex"
"time"
tgbotapi "gopkg.in/telegram-bot-api.v4"
)
var UNDEF = int32(-1)
var TRUE = int32(1)
var FALSE = int32(0)
var exchange Exchange = poloniex.Poloniex{}
var bot *tgbotapi.BotAPI
func Start(buycoin string, sellcoin string, invest float64, fee float64, period int, training_iters int, fast int, slow int, rsi_win_len int, rsi_buy_level float64, rsi_sell_level float64) {
var ema_fast ema.TFinantial_EMA
var ema_slow ema.TFinantial_EMA
var rsi rsi.TFinantial_RSI
var ema_vol ema.TFinantial_EMA
var err error
bot, err = tgbotapi.NewBotAPI("574939541:AAEvcnRyxRK1ICJ_y6bwvWyqimqqVcHJQow")
if err != nil {
log.Panic(err)
}
ema_fast.Reset(fast)
ema_slow.Reset(slow)
ema_vol.Reset(10)
rsi.Reset(rsi_win_len, rsi_buy_level, rsi_sell_level)
var market generic.TMarket
market.Reset(buycoin, sellcoin, invest, fee)
var fast_gt_slow = int32(UNDEF)
iter := 0
pair := exchange.FormatCoinPair(buycoin, sellcoin)
notify("Start running " + pair)
fmt.Println(pair)
for {
time.Sleep(time.Duration(period) * time.Second)
price, err := exchange.DoGet(pair)
if err != nil {
fmt.Println("Error en el doget")
continue
}
ema_fast.NewPrice(price)
ema_slow.NewPrice(price)
rsi.NewPrice(price)
fmt.Println("price: ", fmt.Sprintf("%.8f", price),
"\tema_fast: ", fmt.Sprintf("%.8f", ema_fast.Ema()),
"\tema_slow: ", fmt.Sprintf("%.8f", ema_slow.Ema()),
"\trsi: ", fmt.Sprintf("%.8f", rsi.RSI()),
"\ttime: ", time.Now())
if iter < training_iters {
iter++
continue
}
// End of training, start trading
// Initialize fast_gt_slow only once after training
if fast_gt_slow == UNDEF {
if ema_fast.Ema() > ema_slow.Ema() {
fast_gt_slow = TRUE
} else {
fast_gt_slow = FALSE
}
fmt.Println("Training ready. Starting trade now...")
continue
}
// fast_gt_slow already defined
/*
if (market.InsideMarket()) {
if (price < market.LastBuyPrice()) {
market.DoSell(price)
fmt.Println("********************************** Activated: CONTROL1")
fmt.Println("********************************** VENDE a: ", market.LastSellPrice())
fmt.Println("********************************** FIAT: ", market.Fiat())
} else {
fmt.Println("===> He comprado y esta subiendo, GOOD SIGNAL")
}
} */
if fast_gt_slow == FALSE {
if ema_fast.Ema() < ema_slow.Ema() {
fmt.Println("ema_fast < ema_slow... Se mantiene la tendencia de bajada")
// tendency is maintained (falling price)
continue
} else {
fmt.Println("ema_fast > ema_slow... Cambio de tendencia, comprobemos si se puede comprar")
if market.InsideMarket() == false {
fmt.Println("InsideMarket = false")
if rsi.Buy() {
market.DoBuy(price)
fmt.Printf("********************************** Buy at: %.8f\n", market.LastBuyPrice())
fmt.Printf("********************************** CRYPTO: %.8f\n", market.Crypto())
notify(fmt.Sprintf(" Buy at: %.8f\n CRYPTO: %.8f\n", market.LastBuyPrice(), market.Crypto()))
} else {
fmt.Println("Improper RSI to buy: ", rsi.RSI(), "rsi.BuyLevel = ", rsi.BuyLevel())
continue
}
} else {
fmt.Println("===> Tocaba comprar pero ya estoy dentro")
}
fast_gt_slow = TRUE
}
} else {
if ema_fast.Ema() > ema_slow.Ema() {
fmt.Println("ema_fast > ema_slow... Se mantiene la tendencia de subida")
// tendency is maintained (climbing price)
continue
} else {
fmt.Println("ema_fast < ema_slow... Cambio de tendencia, comprobemos si se puede vender")
if market.InsideMarket() == true {
fmt.Println("InsideMarket = true")
if rsi.Sell() {
market.DoSell(price)
fmt.Printf("********************************** Sell at: %.8f\n", market.LastSellPrice())
fmt.Printf("********************************** FIAT: %.8f\n", market.Fiat())
notify(fmt.Sprintf(" Sell at: %.8f\n FIAT: %.8f\n", market.LastSellPrice(), market.Fiat()))
} else {
fmt.Println("Improper RSI to sell: ", rsi.RSI(), "rsi.SellLevel = ", rsi.SellLevel())
continue
}
} else {
fmt.Println("===> Tocaba vender pero estoy fuera")
}
fast_gt_slow = FALSE
}
}
}
}
func notify(message string) {
msg := tgbotapi.NewMessage(-211932566, message)
bot.Send(msg)
}
|
package db
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
)
type Game struct {
ID string `bson:"_id"`
Competition string `bson:"competition"`
Home []string `bson:"home"`
Away []string `bson:"away"`
Goals []Goal `bson:"goals"`
Substitutions []Substitution `bson:"subs"`
Date primitive.Timestamp `bson:"date"`
}
type Goal struct {
Home bool
Min int
}
type Substitution struct {
Home bool
Min int
In string
Out string
}
var ColGames string = "games"
func (c *MongoDBClient) InsertGameInfo(competition, url string, home, away []string,
goals []Goal, subs []Substitution) error {
_, err := c.Client.Database(c.Database).Collection(ColGames).InsertOne(
context.TODO(),
&Game{
ID: url,
Competition: competition,
Home: home,
Away: away,
Goals: goals,
Substitutions: subs,
},
)
return err
}
// returns true if already exists
func (c *MongoDBClient) GameExists(id string) (bool, error) {
// ctx, cancel := context.WithTimeout(context.Background(), ctxDeadline)
// defer cancel()
err := c.Client.Database(c.Database).Collection(ColGames).FindOne(
context.TODO(),
bson.M{"_id": id},
).Err()
return err != mongo.ErrNoDocuments, err
}
|
package models
import (
"bytes"
"encoding/json"
"strings"
"time"
)
type StringArray []string
func (s *StringArray) FromDB(bts []byte) error {
if len(bts) == 0 {
return nil
}
str := string(bts)
if strings.HasPrefix(str, "{") {
str = str[1:len(str)]
}
if strings.HasSuffix(str, "}") {
str = str[0: len(str)-1]
}
//var ia = &[]string{}
scopes := strings.Split(str,",")
aa:=StringArray(scopes)
/* err := json.Unmarshal([]byte(str), ia)
if err != nil {
return err
}*/
*s=aa
//*s = StringArray(ia)
return nil
}
func (s *StringArray) ToDB() ([]byte, error) {
return serializeBigIntArray(*s, "{", "}"), nil
}
func (arr StringArray) MarshalJSON() ([]byte, error) {
return serializeBigIntArrayAsString(arr, "[", "]"), nil
}
func (arr *StringArray) UnmarshalJSON(b []byte) error {
var strarr []string
var intarr []string
err := json.Unmarshal(b, &strarr)
if err != nil {
return err
}
for _, s := range strarr {
intarr = append(intarr, s )
}
*arr = intarr
return nil
}
func serializeBigIntArray(s []string, prefix string, suffix string) []byte {
var buffer bytes.Buffer
buffer.WriteString(prefix)
for idx, val := range s {
if idx > 0 {
buffer.WriteString(",")
}
buffer.WriteString(val)
}
buffer.WriteString(suffix)
return buffer.Bytes()
}
func serializeBigIntArrayAsString(s []string, prefix string, suffix string) []byte {
var buffer bytes.Buffer
buffer.WriteString(prefix)
for idx, val := range s {
if idx > 0 {
buffer.WriteString(",")
}
buffer.WriteString("\"")
buffer.WriteString(val)
buffer.WriteString("\"")
}
buffer.WriteString(suffix)
return buffer.Bytes()
}
//json array to db demo
type Cover struct {
Id int64 `json:"id,omitempty"`
Fid string `json:"fid,omitempty"`
Type int8 `json:"type,omitempty"`
Url string `json:"url,omitempty"`
}
func (c *Cover) FromDB(bytes []byte) error {
return json.Unmarshal(bytes, c)
}
func (c *Cover) ToDB() (bytes []byte, err error) {
bytes, err = json.Marshal(c)
return
}
type Course struct {
Id int64 `json:"id,string" form:"id"`
Name string `json:"name" form:"name"`
Brief string `json:"brief" form:"brief"`
Description string `json:"description" form:"description"`
Cover Cover `xorm:"Text" json:"cover" form:"cover"`
Categories StringArray `xorm:"Text" json:"categories" form:"categories[]"`
Tags StringArray `json:"tags" form:"tags[]"`
Difficulty float64 `json:"difficulty" form:"difficulty"`
Price float64 `json:"price" form:"price"`
Markets StringArray `json:"markets" form:"markets[]"`
StudentAmount int64 `json:"studentAmount" form:"studentAmount"`
SubjectAmount int64 `json:"subjectAmount" form:"subjectAmount"`
Crt time.Time `json:"crt"`
Lut time.Time `json:"lut"`
Status int16 `json:"status" form:"status"`
} |
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"github.com/aws/aws-sdk-go/service/route53"
"yunion.io/x/pkg/errors"
)
type STrafficPolicy struct {
client *SAwsClient
Comment string `json:"Comment"`
Document string `json:"Document"` //require decode
Id string `json:"Id"`
Name string `json:"Name"`
DNSType string `json:"Type"`
Version int64 `json:"Version"`
}
func (client *SAwsClient) GetSTrafficPolicyById(TrafficPolicyInstanceId string) (*STrafficPolicy, error) {
s, err := client.getAwsRoute53Session()
if err != nil {
return nil, errors.Wrap(err, "region.getAwsRoute53Session()")
}
route53Client := route53.New(s)
params := route53.GetTrafficPolicyInput{}
params.Id = &TrafficPolicyInstanceId
var Version int64 = 1
params.Version = &Version
ret, err := route53Client.GetTrafficPolicy(¶ms)
if err != nil {
return nil, errors.Wrap(err, "route53Client.GetTrafficPolicy")
}
result := STrafficPolicy{}
err = unmarshalAwsOutput(ret, "TrafficPolicy", &result)
if err != nil {
return nil, errors.Wrap(err, "unmarshalAwsOutput(TrafficPolicy)")
}
return &result, nil
}
|
package adding
import (
"github.com/elhamza90/lifelog/internal/domain"
)
// NewActivity validates the activity and calls the repo to store it.
// It does the following checks:
// - Check primitive fields are valid
// - Check Tags exist in DB
func (srv Service) NewActivity(act domain.Activity) (domain.ActivityID, error) {
// Check primitive fields are valid
if err := act.Validate(); err != nil {
return 0, err
}
// Check & Fetch Tags
fetchedTags := []domain.Tag{}
for _, t := range act.Tags {
fetched, err := srv.repo.FindTagByID(t.ID)
if err != nil {
return 0, err
}
fetchedTags = append(fetchedTags, fetched)
}
act.Tags = fetchedTags
return srv.repo.SaveActivity(act)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"fmt"
"net/url"
"strings"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
)
type SRedisAcl struct {
redis *SRedisCache
multicloud.SResourceBase
multicloud.AzureTags
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
Properties struct {
Startip string `json:"startIP"`
Endip string `json:"endIP"`
} `json:"properties"`
}
func (self *SRedisAcl) GetId() string {
return self.ID
}
func (self *SRedisAcl) GetGlobalId() string {
return strings.ToLower(self.ID)
}
func (self *SRedisAcl) GetName() string {
return self.Name
}
func (self *SRedisAcl) GetStatus() string {
return api.ELASTIC_CACHE_ACL_STATUS_AVAILABLE
}
func (self *SRedisAcl) Refresh() error {
acl, err := self.redis.region.GetRedisAcl(self.ID)
if err != nil {
return err
}
return jsonutils.Update(self, acl)
}
func (self *SRedisAcl) GetIpList() string {
return fmt.Sprintf("%s-%s", self.Properties.Startip, self.Properties.Endip)
}
func (self *SRedisAcl) Delete() error {
return self.redis.region.Delete(self.ID)
}
func (self *SRedisAcl) UpdateAcl(securityIps string) error {
return errors.Wrapf(cloudprovider.ErrNotImplemented, "UpdateAcl")
}
func (self *SRegion) GetRedisAcls(id string) ([]SRedisAcl, error) {
result := struct {
Value []SRedisAcl
}{}
return result.Value, self.get(id+"/firewallRules", url.Values{}, &result)
}
func (self *SRegion) GetRedisAcl(id string) (*SRedisAcl, error) {
acl := &SRedisAcl{}
return acl, self.get(id, url.Values{}, acl)
}
|
package scheduler_test
import (
"testing"
"time"
"github.com/waybeams/waybeams/pkg/clock"
"github.com/waybeams/waybeams/pkg/ctrl"
"github.com/waybeams/waybeams/pkg/spec"
"github.com/waybeams/assert"
"github.com/waybeams/waybeams/pkg/env/fake"
"github.com/waybeams/waybeams/pkg/scheduler"
)
func TestScheduler(t *testing.T) {
t.Run("Surface", func(t *testing.T) {
factoryCalled := false
fakeWindow := fake.NewWindow()
fakeSurface := fake.NewSurface()
fakeAppFactory := func() spec.ReadWriter {
factoryCalled = true
return ctrl.VBox()
}
fakeClock := clock.NewFake()
b := scheduler.New(fakeWindow, fakeSurface, fakeAppFactory, fakeClock)
// Ensure we close the blocked goroutine.
defer b.Close()
// Listen in a goroutine.
go b.Listen()
// Move time forward and ensure our factory was called.
fakeClock.Add(100 * time.Millisecond)
assert.True(factoryCalled)
})
}
|
// Package display provides controllers that update the status fields on several resources.
package display
import (
"context"
"fmt"
"strings"
"github.com/rancher/fleet/internal/cmd/controller/summary"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/rancher/wrangler/pkg/genericcondition"
)
type handler struct {
}
func Register(ctx context.Context,
clusters fleetcontrollers.ClusterController,
clustergroups fleetcontrollers.ClusterGroupController,
gitrepos fleetcontrollers.GitRepoController,
bundledeployments fleetcontrollers.BundleDeploymentController) {
h := &handler{}
// NOTE these handlers have an empty "condition", so they won't update lastUpdateTime in the status
fleetcontrollers.RegisterClusterStatusHandler(ctx, clusters, "", "cluster-display", h.OnClusterChange)
fleetcontrollers.RegisterClusterGroupStatusHandler(ctx, clustergroups, "", "clustergroup-display", h.OnClusterGroupChange)
fleetcontrollers.RegisterGitRepoStatusHandler(ctx, gitrepos, "", "gitrepo-display", h.OnRepoChange)
fleetcontrollers.RegisterBundleDeploymentStatusHandler(ctx, bundledeployments, "", "bundledeployment-display", h.OnBundleDeploymentChange)
}
func (h *handler) OnClusterChange(cluster *fleet.Cluster, status fleet.ClusterStatus) (fleet.ClusterStatus, error) {
logrus.Debugf("OnClusterChange: cluster '%s' changed, updating its status.Display", cluster.Name)
status.Display.ReadyBundles = fmt.Sprintf("%d/%d",
cluster.Status.Summary.Ready,
cluster.Status.Summary.DesiredReady)
status.Display.ReadyNodes = fmt.Sprintf("%d/%d",
cluster.Status.Agent.ReadyNodes,
cluster.Status.Agent.NonReadyNodes+cluster.Status.Agent.ReadyNodes)
status.Display.SampleNode = sampleNode(status)
var state fleet.BundleState
for _, nonReady := range status.Summary.NonReadyResources {
if fleet.StateRank[nonReady.State] > fleet.StateRank[state] {
state = nonReady.State
}
}
status.Display.State = string(state)
if status.Agent.LastSeen.IsZero() {
status.Display.State = "WaitCheckIn"
}
return status, nil
}
func (h *handler) OnClusterGroupChange(cluster *fleet.ClusterGroup, status fleet.ClusterGroupStatus) (fleet.ClusterGroupStatus, error) {
logrus.Debugf("OnClusterGroupChange: cluster group '%s' changed, updating its status.Display", cluster.Name)
status.Display.ReadyBundles = fmt.Sprintf("%d/%d",
cluster.Status.Summary.Ready,
cluster.Status.Summary.DesiredReady)
status.Display.ReadyClusters = fmt.Sprintf("%d/%d",
cluster.Status.ClusterCount-cluster.Status.NonReadyClusterCount,
cluster.Status.ClusterCount)
if len(cluster.Status.NonReadyClusters) > 0 {
status.Display.ReadyClusters += " (" + strings.Join(cluster.Status.NonReadyClusters, ",") + ")"
}
var state fleet.BundleState
for _, nonReady := range status.Summary.NonReadyResources {
if fleet.StateRank[nonReady.State] > fleet.StateRank[state] {
state = nonReady.State
}
}
status.Display.State = string(state)
return status, nil
}
func (h *handler) OnRepoChange(gitrepo *fleet.GitRepo, status fleet.GitRepoStatus) (fleet.GitRepoStatus, error) {
logrus.Debugf("OnRepoChange: git repo '%s' changed, updating its status.Display", gitrepo.Name)
status.Display.ReadyBundleDeployments = fmt.Sprintf("%d/%d",
gitrepo.Status.Summary.Ready,
gitrepo.Status.Summary.DesiredReady)
return status, nil
}
func (h *handler) OnBundleDeploymentChange(bundleDeployment *fleet.BundleDeployment, status fleet.BundleDeploymentStatus) (fleet.BundleDeploymentStatus, error) {
var (
deployed, monitored string
)
logrus.Debugf("OnBundleDeploymentChange: bundle deployment '%s' changed, updating its status.Display", bundleDeployment.Name)
for _, cond := range status.Conditions {
switch cond.Type {
case "Deployed":
deployed = conditionToMessage(cond)
case "Monitored":
monitored = conditionToMessage(cond)
}
}
status.Display = fleet.BundleDeploymentDisplay{
Deployed: deployed,
Monitored: monitored,
State: string(summary.GetDeploymentState(bundleDeployment)),
}
return status, nil
}
func conditionToMessage(cond genericcondition.GenericCondition) string {
if cond.Reason == "Error" {
return "Error: " + cond.Message
}
return string(cond.Status)
}
func sampleNode(status fleet.ClusterStatus) string {
if len(status.Agent.ReadyNodeNames) > 0 {
return status.Agent.ReadyNodeNames[0]
}
if len(status.Agent.NonReadyNodeNames) > 0 {
return status.Agent.NonReadyNodeNames[0]
}
return ""
}
|
package service
import (
"html/template"
"net/http"
"github.com/NYTimes/gizmo/server"
)
// Demo will serve an HTML page that demonstrates how to use the 'stream'
// endpoint.
func (s *StreamService) Demo(w http.ResponseWriter, r *http.Request) {
vals := struct {
Port int
StreamID int64
}{
s.port,
server.GetInt64Var(r, "stream_id"),
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
err := demoTempl.Execute(w, &vals)
if err != nil {
server.Log.Error("template error ", err)
http.Error(w, "problems loading HTML", http.StatusInternalServerError)
}
}
var demoTempl = template.Must(template.New("demo").Parse(demoHTML))
const demoHTML = `<!DOCTYPE html>
<html lang="en">
<head>
<title>StreamService Demo</title>
</head>
<body>
<h1>Welcome to the stream for {{ .StreamID }}!</h1>
<p>Open multiple tabs to see messages broadcast across all views</p>
<div id="consumed" style="float:left; width:50%">
</div>
<div id="published" style="float:left">
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script type="text/javascript">
(function()
{
var conn = new WebSocket(
"ws://localhost:{{ .Port }}/svc/v1/stream/{{ .StreamID }}"
);
// consume from websocket/Kafka
conn.onmessage = function(evt)
{
var evts = $("#consumed");
evts.prepend("<p> Received: " + evt.data + "</p>");
}
// publish to websocket/Kafka
setInterval(publishMessage(conn), 1000);
function publishMessage(conn) {
return function() {
var alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
var msg = '{"game":"crossword","user_id":12345,"time":'+new Date().getTime()+',"cell":' + Math.floor(
(Math.random() * 10) + 1) +
',"value":"' + alpha.charAt(Math.floor(
Math.random() * alpha.length)) + '"}'
conn.send(msg);
var evts = $("#published");
evts.prepend("<p> Sent: " + msg + "</p>");
}
}
})();
</script>
</body>
</html>`
|
package br_test
import (
"testing"
"time"
"github.com/olebedev/when"
"github.com/olebedev/when/rules"
"github.com/olebedev/when/rules/br"
)
func TestPastTime(t *testing.T) {
fixt := []Fixture{
{"meia hora atrás", 0, "meia hora atrás", -(time.Hour / 2)},
{"1 hora atrás", 0, "1 hora atrás", -(time.Hour)},
{"5 minutos atrás", 0, "5 minutos atrás", -(time.Minute * 5)},
{"5 minutos atrás eu fui ao zoológico", 0, "5 minutos atrás", -(time.Minute * 5)},
{"nós fizemos algo 10 dias atrás.", 18, "10 dias atrás", -(10 * 24 * time.Hour)},
{"nós fizemos algo cinco dias atrás.", 18, "cinco dias atrás", -(5 * 24 * time.Hour)},
{"fizemos algo 5 dias atrás.", 13, "5 dias atrás", -(5 * 24 * time.Hour)},
{"5 segundos atrás, um carro foi movido", 0, "5 segundos atrás", -(5 * time.Second)},
{"duas semanas atrás", 0, "duas semanas atrás", -(14 * 24 * time.Hour)},
{"um mês atrás", 0, "um mês atrás", -(31 * 24 * time.Hour)},
{"uns meses atrás", 0, "uns meses atrás", -(92 * 24 * time.Hour)},
{"há um ano", 4, "um ano", -(365 * 24 * time.Hour)},
{"há duas semanas", 4, "duas semanas", -(2 * 7 * 24 * time.Hour)},
{"poucas semanas atrás", 0, "poucas semanas atrás", -(3 * 7 * 24 * time.Hour)},
{"há poucas semanas", 4, "poucas semanas", -(3 * 7 * 24 * time.Hour)},
{"alguns dias atrás", 0, "alguns dias atrás", -(3 * 24 * time.Hour)},
{"há alguns dias", 4, "alguns dias", -(3 * 24 * time.Hour)},
}
w := when.New(nil)
w.Add(br.PastTime(rules.Skip))
ApplyFixtures(t, "br.PastTime", w, fixt)
}
|
package text
import "strings"
// ReleaseNotes generates the output mentioned in the expected-output.md
func ReleaseNotes(sections Sections) string {
builder := strings.Builder{}
// Extra lines at the start to make sure formatting starts correctly
builder.WriteString("\n\n")
if len(sections.Features) > 0 {
builder.WriteString(buildSection("features", sections.Features))
}
if len(sections.Bugs) > 0 {
builder.WriteString(buildSection("bugs", sections.Bugs))
}
if len(sections.Chores) > 0 {
builder.WriteString(buildSection("chores", sections.Chores))
}
if len(sections.Others) > 0 {
builder.WriteString(buildSection("others", sections.Others))
}
return builder.String()
}
func buildSection(category string, commits []Commit) string {
builder := strings.Builder{}
builder.WriteString(buildHeading(category))
builder.WriteString(buildCommitLog(commits))
return builder.String()
}
func buildHeading(category string) string {
builder := strings.Builder{}
builder.WriteString("## ")
if category == "features" {
builder.WriteString("Features ")
builder.WriteString(EmoticonFeature)
}
if category == "bugs" {
builder.WriteString("Bug fixes ")
builder.WriteString(EmoticonBug)
}
if category == "chores" {
builder.WriteString("Chores and Improvements ")
builder.WriteString(EmoticonChores)
}
if category == "others" {
builder.WriteString("Other ")
builder.WriteString(EmoticonOthers)
}
builder.WriteString("\n\n")
return builder.String()
}
func buildCommitLog(commits []Commit) string {
builder := strings.Builder{}
for _, commit := range commits {
builder.WriteString("- ")
// Short version of hash usable on Github
builder.WriteString(commit.Hash.String()[:7])
builder.WriteString(" ")
builder.WriteString(commit.Heading)
builder.WriteString("\n")
}
builder.WriteString("\n")
return builder.String()
}
|
package geo
import (
"bytes"
"fmt"
"io"
"math"
)
// Path represents a set of points to be thought of as a polyline.
type Path struct {
points []Point
}
func NewPath() *Path {
p := &Path{}
p.points = make([]Point, 0, 1000)
return p
}
// SetPoints allows you to set the complete pointset yourself.
// Note that the input is an array of Points (not pointers to points)
func (p *Path) SetPoints(points []Point) *Path {
p.points = points
return p
}
// GetPoints returns the raw points stored with the path.
// Note the output is an array of Points (not pointers to points)
func (p *Path) GetPoints() []Point {
return p.points
}
// Transform applies a given projection or inverse projection to all
// the points in the path.
func (p *Path) Transform(projection func(*Point)) *Path {
for i := range p.points {
projection(&p.points[i])
}
return p
}
// Reduce the path using Douglas-Peucker to the given threshold.
// Modifies the existing path.
func (p *Path) Reduce(threshold float64) *Path {
if p.Length() <= 2 {
return p
}
mask := make([]byte, p.Length())
mask[0] = 1
mask[p.Length()-1] = 1
p.workerReduce(0, p.Length()-1, threshold, mask)
count := 0
for i, v := range mask {
if v == 1 {
p.points[count] = p.points[i]
count++
}
}
p.points = p.points[:count]
return p
}
func (p *Path) workerReduce(start, end int, threshold float64, mask []byte) {
if end-start <= 1 {
return
}
l := Line{p.points[start], p.points[end]}
maxDist := 0.0
maxIndex := start + 1
for i := start + 1; i < end; i++ {
dist := l.DistanceFrom(&p.points[i])
if dist >= maxDist {
maxDist = dist
maxIndex = i
}
}
if maxDist > threshold {
mask[maxIndex] = 1
p.workerReduce(start, maxIndex, threshold, mask)
p.workerReduce(maxIndex, end, threshold, mask)
}
}
// Resample converts the path into count-1 evenly spaced segments.
func (p *Path) Resample(totalPoints int) *Path {
// degenerate case
if len(p.points) <= 1 {
return p
}
if totalPoints <= 0 {
p.points = make([]Point, 0)
return p
}
points := make([]Point, 1, totalPoints)
points[0] = p.points[0] // start stays the same
// location on the original line
prevIndex := 0
prevDistance := 0.0
// first distance we're looking for
step := 1
totalDistance := p.Distance()
currentDistance := totalDistance * float64(step) / float64(totalPoints-1)
for {
currentLine := NewLine(&p.points[prevIndex], &p.points[prevIndex+1])
currentLineDistance := currentLine.Distance()
nextDistance := prevDistance + currentLineDistance
for currentDistance <= nextDistance {
// need to add a point
percent := (currentDistance - prevDistance) / currentLineDistance
points = append(points, *currentLine.Interpolate(percent))
// move to the next distance we want
step++
currentDistance = totalDistance * float64(step) / float64(totalPoints-1)
if step == totalPoints-1 { // weird round off error on my machine
currentDistance = totalDistance
}
}
// past the current point in the original line, so move to the next one
prevIndex++
prevDistance = nextDistance
if prevIndex == len(p.points)-1 {
break
}
}
// end stays the same, to handle round off errors
if totalPoints != 1 { // for 1, we want the first point
points[totalPoints-1] = p.points[len(p.points)-1]
}
p.points = points
return p
}
// Decode is the inverse of Encode. It takes a string encoding of path
// and returns the actual path it represents. Factor defaults to 1.0e5,
// the same used by Google for polyline encoding.
func Decode(encoded string, factor ...int) *Path {
var count, index int
f := 1.0e5
if len(factor) != 0 {
f = float64(factor[0])
}
p := NewPath()
tempLatLng := [2]int{0, 0}
for index < len(encoded) {
var result int
var b int = 0x20
var shift uint
for b >= 0x20 {
b = int(encoded[index]) - 63
index++
result |= (b & 0x1f) << shift
shift += 5
}
// sign dection
if result&1 != 0 {
result = ^(result >> 1)
} else {
result = result >> 1
}
if count%2 == 0 {
result += tempLatLng[0]
tempLatLng[0] = result
} else {
result += tempLatLng[1]
tempLatLng[1] = result
p.Push(&Point{float64(tempLatLng[1]) / f, float64(tempLatLng[0]) / f})
}
count++
}
return p
}
// Google polyline Encode the path into a string.
// Factor defaults to 1.0e5, the same used by Google for polyline encoding.
func (p *Path) Encode(factor ...int) string {
f := 1.0e5
if len(factor) != 0 {
f = float64(factor[0])
}
var pLat int
var pLng int
var result bytes.Buffer
for _, p := range p.points {
lat5 := int(p.Lat() * f)
lng5 := int(p.Lng() * f)
deltaLat := lat5 - pLat
deltaLng := lng5 - pLng
pLat = lat5
pLng = lng5
result.WriteString(encodeSignedNumber(deltaLat))
result.WriteString(encodeSignedNumber(deltaLng))
}
return result.String()
}
func encodeSignedNumber(num int) string {
shiftedNum := num << 1
if num < 0 {
shiftedNum = ^shiftedNum
}
return encodeNumber(shiftedNum)
}
func encodeNumber(num int) string {
result := ""
for num >= 0x20 {
result += string((0x20 | (num & 0x1f)) + 63)
num >>= 5
}
result += string(num + 63)
return result
}
// Distance computes the total distance in the units of the points.
func (p *Path) Distance() float64 {
sum := 0.0
loopTo := len(p.points) - 1
for i := 0; i < loopTo; i++ {
sum += p.points[i].DistanceFrom(&p.points[i+1])
}
return sum
}
// GeoDistance computes the total distance using spherical geometry.
func (p *Path) GeoDistance(haversine ...bool) float64 {
yesgeo := yesHaversine(haversine)
sum := 0.0
loopTo := len(p.points) - 1
for i := 0; i < loopTo; i++ {
sum += p.points[i].GeoDistanceFrom(&p.points[i+1], yesgeo)
}
return sum
}
// DistanceFrom computes an O(n) distance from the path. Loops over every
// subline to find the minimum distance.
func (p *Path) DistanceFrom(point *Point) float64 {
dist := math.Inf(1)
loopTo := len(p.points) - 1
for i := 0; i < loopTo; i++ {
l := &Line{p.points[i], p.points[i+1]}
dist = math.Min(l.DistanceFrom(point), dist)
}
return dist
}
// Measure computes the distance along this path to the point nearest the given point.
func (p *Path) Measure(point *Point) float64 {
minDistance := math.Inf(1)
measure := math.Inf(-1)
sum := 0.0
for i := 0; i < len(p.points)-1; i++ {
seg := NewLine(&p.points[i], &p.points[i+1])
distanceToLine := seg.DistanceFrom(point)
if distanceToLine < minDistance {
minDistance = distanceToLine
measure = sum + seg.Measure(point)
}
sum += seg.Distance()
}
return measure
}
// Project computes the measure along this path closest to the given point,
// normalized to the length of the path.
func (p *Path) Project(point *Point) float64 {
return p.Measure(point) / p.Distance()
}
// Intersection calls IntersectionPath or IntersectionLine depending on the
// type of the provided geometry.
func (p *Path) Intersection(geometry interface{}) ([]*Point, [][2]int) {
switch g := geometry.(type) {
case Line:
return p.IntersectionLine(&g)
case *Line:
return p.IntersectionLine(g)
case Path:
return p.IntersectionPath(&g)
case *Path:
return p.IntersectionPath(g)
default:
panic("can only determine intersection with lines and paths")
}
return nil, nil // unreachable
}
// IntersectionPath returns a slice of points and a slice of tuples [i, j] where i is the segment
// in the parent path and j is the segment in the given path that intersect to form the given point.
// Slices will be empty if there is no intersection.
func (p *Path) IntersectionPath(path *Path) ([]*Point, [][2]int) {
// TODO: done some sort of line sweep here if p.Lenght() is big enough
points := make([]*Point, 0)
indexes := make([][2]int, 0)
for i := 0; i < len(p.points)-1; i++ {
pLine := NewLine(&p.points[i], &p.points[i+1])
for j := 0; j < len(path.points)-1; j++ {
pathLine := NewLine(&path.points[j], &path.points[j+1])
if point := pLine.Intersection(pathLine); point != nil {
points = append(points, point)
indexes = append(indexes, [2]int{i, j})
}
}
}
return points, indexes
}
// IntersectionLine returns a slice of points and a slice of tuples [i, 0] where i is the segment
// in path that intersects with the line at the given point.
// Slices will be empty if there is no intersection.
func (p *Path) IntersectionLine(line *Line) ([]*Point, [][2]int) {
points := make([]*Point, 0)
indexes := make([][2]int, 0)
for i := 0; i < len(p.points)-1; i++ {
pTest := NewLine(&p.points[i], &p.points[i+1])
if point := pTest.Intersection(line); point != nil {
points = append(points, point)
indexes = append(indexes, [2]int{i, 0})
}
}
return points, indexes
}
// Intersects can take a line or a path to determine if there is an intersection.
func (p *Path) Intersects(geometry interface{}) bool {
switch g := geometry.(type) {
case Line:
return p.IntersectsLine(&g)
case *Line:
return p.IntersectsLine(g)
case Path:
return p.IntersectsPath(&g)
case *Path:
return p.IntersectsPath(g)
default:
panic("can only determine intersection with lines and paths")
}
return false // unreachable
}
// IntersectsPath takes a Path and checks if it intersects with the path.
func (p *Path) IntersectsPath(path *Path) bool {
// TODO: done some sort of line sweep here if p.Lenght() is big enough
for i := 0; i < len(p.points)-1; i++ {
pLine := NewLine(&p.points[i], &p.points[i+1])
for j := 0; j < len(path.points)-1; j++ {
pathLine := NewLine(&path.points[j], &path.points[j+1])
if pLine.Intersects(pathLine) {
return true
}
}
}
return false
}
// IntersectsLine takes a Line and checks if it intersects with the path.
func (p *Path) IntersectsLine(line *Line) bool {
for i := 0; i < len(p.points)-1; i++ {
pTest := NewLine(&p.points[i], &p.points[i+1])
if pTest.Intersects(line) {
return true
}
}
return false
}
// Bounds returns a bound around the path. Simply uses rectangular coordinates.
func (p *Path) Bound() *Bound {
if len(p.points) == 0 {
return NewBound(0, 0, 0, 0)
}
minX := math.Inf(1)
minY := math.Inf(1)
maxX := math.Inf(-1)
maxY := math.Inf(-1)
for _, v := range p.points {
minX = math.Min(minX, v.X())
minY = math.Min(minY, v.Y())
maxX = math.Max(maxX, v.X())
maxY = math.Max(maxY, v.Y())
}
return NewBound(maxX, minX, maxY, minY)
}
// SetAt updates a position at i along the path.
// Panics if index is out of range.
func (p *Path) SetAt(index int, point *Point) *Path {
if index >= len(p.points) || index < 0 {
panic(fmt.Sprintf("geo: set index out of range, requested: %d, length: %d", index, len(p.points)))
}
p.points[index] = *point
return p
}
// GetAt returns the pointer to the Point in the page.
// This function is good for modifying values in place.
// Returns nil if index is out of range.
func (p *Path) GetAt(i int) *Point {
if i >= len(p.points) || i < 0 {
return nil
}
return &p.points[i]
}
// InsertAt inserts a Point at i along the path.
// Panics if index is out of range.
func (p *Path) InsertAt(index int, point *Point) *Path {
if index > len(p.points) || index < 0 {
panic(fmt.Sprintf("geo: insert index out of range, requested: %d, length: %d", index, len(p.points)))
}
if index == len(p.points) {
p.points = append(p.points, *point)
return p
}
p.points = append(p.points, Point{})
copy(p.points[index+1:], p.points[index:])
p.points[index] = *point
return p
}
// RemoveAt removes a Point at i along the path.
// Panics if index is out of range.
func (p *Path) RemoveAt(index int) *Path {
if index >= len(p.points) || index < 0 {
panic(fmt.Sprintf("geo: remove index out of range, requested: %d, length: %d", index, len(p.points)))
}
p.points = append(p.points[:index], p.points[index+1:]...)
return p
}
// Push appends a point to the end of the path.
func (p *Path) Push(point *Point) *Path {
p.points = append(p.points, *point)
return p
}
// Pop removes and returns the last point.
func (p *Path) Pop() *Point {
if len(p.points) == 0 {
return nil
}
x := p.points[len(p.points)-1]
p.points = p.points[:len(p.points)-1]
return &x
}
// Length returns the number of points in the path.
func (p *Path) Length() int {
return len(p.points)
}
// Equals compares two paths. Returns true if lengths are the same
// and all points are Equal.
func (p *Path) Equals(path *Path) bool {
if p.Length() != path.Length() {
return false
}
for i, v := range p.points {
if !v.Equals(&path.points[i]) {
return false
}
}
return true
}
func (p *Path) Clone() *Path {
n := NewPath()
n.points = append(n.points, p.points[:]...)
return n
}
// WriteOffFile writes an Object File Format representation of
// the points of the path to the writer provided. This is for viewing
// in MeshLab or something like that. You should close the
// writer yourself after this function returns.
// http://segeval.cs.princeton.edu/public/off_format.html
func (p *Path) WriteOffFile(w io.Writer, rgb ...[3]int) {
r := 170
g := 170
b := 170
if len(rgb) != 0 {
r = rgb[0][0]
g = rgb[0][1]
b = rgb[0][2]
}
w.Write([]byte("OFF\n"))
w.Write([]byte(fmt.Sprintf("%d %d 0\n", p.Length(), p.Length()-2)))
for i := range p.points {
w.Write([]byte(fmt.Sprintf("%f %f 0\n", p.points[i][0], p.points[i][1])))
}
for i := 0; i < len(p.points)-2; i++ {
w.Write([]byte(fmt.Sprintf("3 %d %d %d %d %d %d\n", i, i+1, i+2, r, g, b)))
}
}
|
// support {} pattern like
// - /api/user/{userid}/info
// - /api/user/user-{userid}/info
// not support
// - /api/user/{userid}-user/info
package main
import (
"fmt"
)
const (
patternStart = '{'
patternEnd = '}'
separator = '/'
)
func main() {
t := &tree{}
t.Insert("/api/user", func() {
fmt.Println("certain 1")
})
t.Insert("/api/user/123/info", func() {
fmt.Println("certain 2")
})
t.Insert("/api/user-{id}/info", func() {
fmt.Println("prefix pattern 1")
})
t.Insert("/api/user/user-{id}/info", func() {
fmt.Println("prefix pattern 2")
})
t.Insert("/api/user/{userid}/info", func() {
fmt.Println("pattern")
})
t.Insert("/api/user/456/info", func() {
fmt.Println("certain 3")
})
t.Insert("/api/unit", func(){
fmt.Println("certain 4")
})
print(t)
fmt.Println()
fmt.Println(t.Find("/api/unit/") == nil)
fmt.Println()
t.Find("/api/user")()
t.Find("/api/user/123/info")()
t.Find("/api/user-123/info")()
t.Find("/api/user/user-1234/info")()
t.Find("/api/user/1234/info")()
t.Find("/api/user/456/info")()
t.Find("/api/unit")()
}
type tree struct {
children []*tree
char byte
isPattern bool
handler func()
}
func hasPattern(path string, index int) int {
if path[index] != patternStart {
return -1
}
for i := index + 1; i < len(path); i++ {
if path[i] == patternEnd {
return i
}
}
return -1
}
func (t *tree) Insert(path string, handler func()) {
index := 0
for index < len(path) {
var nextTree *tree
if patternIndex := hasPattern(path, index); patternIndex != -1 {
for _, child := range t.children {
if child.isPattern {
nextTree = child
}
break
}
if nextTree == nil {
nextTree = &tree{isPattern: true}
t.children = append(t.children, nextTree)
}
index = patternIndex + 1
} else {
char := path[index]
for _, child := range t.children {
if child.char == char {
nextTree = child
break
}
}
if nextTree == nil {
nextTree = &tree{char: char}
t.children = append(t.children, nextTree)
}
index++
}
t = nextTree
}
t.handler = handler
}
func (t *tree) Find(path string) func() {
return t.find(path, -1)
}
func (t *tree) find(path string, index int) func() {
if t.isPattern {
for index+1 < len(path) && path[index+1] != separator {
index++
}
} else {
if index != -1 && index < len(path) {
if path[index] != t.char {
return nil
}
}
}
if index == len(path)-1 {
return t.handler
}
for _, child := range t.children {
if handler := child.find(path, index+1); handler != nil {
return handler
}
}
return nil
}
func print(t *tree) {
helper(t, "")
}
func helper(t *tree, path string) {
if t == nil {
return
}
if t.isPattern {
path += string([]byte{patternStart, patternEnd})
} else {
path += string(t.char)
}
if t.handler != nil {
fmt.Println(path)
}
for _, child := range t.children {
helper(child, path)
}
}
|
package main
func main() {
tt := new(MyCircularDeque).Constructor(3)
tt.InsertFront(1)
tt.InsertFront(2)
tt.InsertLast(3)
}
type DoubleListNode struct {
pre *DoubleListNode
next *DoubleListNode
val int
}
type MyCircularDeque struct {
size int
k int
head *DoubleListNode
tail *DoubleListNode
}
/** Initialize your data structure here. Set the size of the deque to be k. */
func (this *MyCircularDeque) Constructor(k int) *MyCircularDeque {
this.head = &DoubleListNode{val: -1}
this.tail = &DoubleListNode{val: -1}
this.head.pre = this.tail
this.tail.next = this.head
this.size = 0
this.k = k
return this
}
/** Adds an item at the front of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) InsertFront(value int) bool {
if this.size == this.k {
return false
}
node := &DoubleListNode{val: value}
node.next = this.head
node.pre = this.head.pre
this.head.pre.next = node
this.head.pre = node
this.size++
return true
}
/** Adds an item at the rear of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) InsertLast(value int) bool {
if this.size == this.k {
return false
}
node := &DoubleListNode{val: value}
node.next = this.tail.next
this.tail.next.pre = node
this.tail.next = node
node.pre = this.tail
this.size++
return true
}
/** Deletes an item from the front of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) DeleteFront() bool {
if this.size == 0 {
return false
}
this.head.pre.pre.next = this.head
this.head.pre = this.head.pre.pre
this.size--
return true
}
/** Deletes an item from the rear of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) DeleteLast() bool {
if this.size == 0 {
return false
}
this.tail.next.next.pre = this.tail
this.tail.next = this.tail.next.next
this.size--
return true
}
/** Get the front item from the deque. */
func (this *MyCircularDeque) GetFront() int {
return this.head.pre.val
}
/** Get the last item from the deque. */
func (this *MyCircularDeque) GetRear() int {
return this.tail.next.val
}
/** Checks whether the circular deque is empty or not. */
func (this *MyCircularDeque) IsEmpty() bool {
return this.size == 0
}
/** Checks whether the circular deque is full or not. */
func (this *MyCircularDeque) IsFull() bool {
return this.size == this.k
}
|
package neo4j
import (
"encoding/json"
"fmt"
"github.com/go-ginger/helpers"
"github.com/go-ginger/models"
"github.com/neo4j/neo4j-go-driver/neo4j"
"math"
"strings"
)
func (handler *DbHandler) countDocuments(query string, params map[string]interface{},
done chan bool, count *uint64) {
session, err := handler.DB.Driver.Session(neo4j.AccessModeRead)
if err != nil {
return
}
defer func() {
done <- true
e := session.Close()
if e != nil && err == nil {
err = e
return
}
}()
queryResult, err := session.Run(query, params)
if err != nil {
return
}
for queryResult.Next() {
totalCount := uint64(queryResult.Record().Values()[0].(int64))
*count += totalCount
}
}
func (handler *DbHandler) populateMap(nodeName string, source map[string]interface{}) (result map[string]interface{}, err error) {
result = map[string]interface{}{}
for key, value := range source {
di := strings.Index(key, "$")
if di > 0 {
actualKey := key[:di]
nestedSource := make(map[string]interface{}, 0)
actualKeyLen := len(actualKey)
for k, v := range source {
if strings.Index(k, actualKey) == 0 {
nestedSource[k[actualKeyLen+1:]] = v
delete(source, k)
}
}
nestedObj, e := handler.populateMap(nodeName, nestedSource)
if e != nil {
err = e
return
}
nodeIndex := strings.Index(actualKey, nodeName+".")
if nodeIndex == 0 {
actualKey = actualKey[len(nodeName)+1:]
}
result[actualKey] = nestedObj
} else {
nodeIndex := strings.Index(key, nodeName+".")
if nodeIndex == 0 {
key = key[len(nodeName)+1:]
}
result[key] = value
}
}
return
}
func (handler *DbHandler) Paginate(request models.IRequest) (result *models.PaginateResult, err error) {
req := request.GetBaseRequest()
model := handler.GetModelInstance()
nodeName := handler.DB.Config.NodeNamer.GetName(model)
iNodeKey := req.GetTemp("node_key")
var nodeKey string
if iNodeKey != nil {
nodeKey = iNodeKey.(string)
} else {
nodeKey = "n"
}
keyPrefix := nodeKey + "."
parseResult := handler.QueryParser.Parse(request, keyPrefix)
var query string
var countQuery string
var params map[string]interface{}
var countParams map[string]interface{}
if req.ExtraQuery != nil {
if iQuery, ok := req.ExtraQuery["query"]; ok {
query = iQuery.(string)
}
if iQuery, ok := req.ExtraQuery["count_query"]; ok {
countQuery = iQuery.(string)
}
if iParams, ok := req.ExtraQuery["params"]; ok {
params = iParams.(map[string]interface{})
}
if iParams, ok := req.ExtraQuery["countParams"]; ok {
countParams = iParams.(map[string]interface{})
}
}
if query == "" {
query = fmt.Sprintf("MATCH (%s:%s) "+
"WHERE %v "+
"RETURN %s",
nodeKey, nodeName, parseResult.GetQuery(), nodeKey)
}
if countQuery == "" {
countQuery = fmt.Sprintf("MATCH (%s:%s) "+
"WHERE %v "+
"RETURN COUNT(%s)",
nodeKey, nodeName, parseResult.GetQuery(), nodeKey)
}
if params == nil {
params = parseResult.GetParams().(map[string]interface{})
}
if countParams == nil {
countParams = params
}
var totalCount uint64
done := make(chan bool, 1)
go handler.countDocuments(countQuery, countParams, done, &totalCount)
if req.Sort != nil {
sorts := make([]string, 0)
for _, s := range *req.Sort {
sort := s.Name
if !s.Ascending {
sort += " DESC"
}
sort = fmt.Sprintf("%s.%s", nodeKey, sort)
sorts = append(sorts, sort)
}
if len(sorts) > 0 {
query += " ORDER BY " + strings.Join(sorts, ",")
}
}
query += fmt.Sprintf(" SKIP %v LIMIT %v",
(req.Page-1)*req.PerPage, req.PerPage)
err = handler.NormalizeFilter(req.Filters)
if err != nil {
return
}
session, err := handler.DB.Driver.Session(neo4j.AccessModeRead)
if err != nil {
return
}
defer func() {
e := session.Close()
if e != nil && err == nil {
err = e
return
}
}()
queryResult, err := session.Run(query, params)
if err != nil {
return
}
items := handler.GetModelsInstance()
var count uint64 = 0
for queryResult.Next() {
count++
record := queryResult.Record()
iNode, _ := record.Get(nodeKey)
node := iNode.(neo4j.Node)
properties := node.Props()
obj, e := handler.populateMap(nodeKey, properties)
if e != nil {
err = e
return
}
bytes, e := json.Marshal(obj)
if e != nil {
err = e
return
}
model := handler.GetModelInstance()
err = json.Unmarshal(bytes, model)
if err != nil {
return
}
items = helpers.AppendToSlice(items, model)
}
if err = queryResult.Err(); err != nil {
return
}
<-done
pageCount := uint64(math.Ceil(float64(totalCount) / float64(req.PerPage)))
result = &models.PaginateResult{
Items: items,
Pagination: models.PaginationInfo{
Page: req.Page,
PerPage: req.PerPage,
PageCount: pageCount,
TotalCount: totalCount,
HasNext: req.Page < pageCount,
},
}
return
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ring0
import (
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
)
// Init initializes a new kernel.
//
//go:nosplit
func (k *Kernel) Init(maxCPUs int) {
k.init(maxCPUs)
}
// Halt halts execution.
func Halt()
// defaultHooks implements hooks.
type defaultHooks struct{}
// KernelSyscall implements Hooks.KernelSyscall.
//
// +checkescape:all
//
//go:nosplit
func (defaultHooks) KernelSyscall() {
Halt()
}
// KernelException implements Hooks.KernelException.
//
// +checkescape:all
//
//go:nosplit
func (defaultHooks) KernelException(Vector) {
Halt()
}
// kernelSyscall is a trampoline.
//
// When in amd64, it is called with %rip on the upper half, so it can
// NOT access to any global data which is not mapped on upper and must
// call to function pointers or interfaces to switch to the lower half
// so that callee can access to global data.
//
// +checkescape:hard,stack
//
//go:nosplit
func kernelSyscall(c *CPU) {
c.hooks.KernelSyscall()
}
// kernelException is a trampoline.
//
// When in amd64, it is called with %rip on the upper half, so it can
// NOT access to any global data which is not mapped on upper and must
// call to function pointers or interfaces to switch to the lower half
// so that callee can access to global data.
//
// +checkescape:hard,stack
//
//go:nosplit
func kernelException(c *CPU, vector Vector) {
c.hooks.KernelException(vector)
}
// Init initializes a new CPU.
//
// Init allows embedding in other objects.
func (c *CPU) Init(k *Kernel, cpuID int, hooks Hooks) {
c.self = c // Set self reference.
c.kernel = k // Set kernel reference.
c.init(cpuID) // Perform architectural init.
c.floatingPointState = fpu.NewState()
// Require hooks.
if hooks != nil {
c.hooks = hooks
} else {
c.hooks = defaultHooks{}
}
}
|
package server
import (
"net/http"
"strconv"
"github.com/go-chi/chi"
"github.com/pkg/errors"
"nidavellir/services/store"
)
type IAccountStore interface {
GetAccount(name string) (*store.Account, error)
AddAccount(account *store.Account) (*store.Account, error)
UpdateAccount(account *store.Account) (*store.Account, error)
RemoveAccount(id int) error
IsLastAdmin(id int) (bool, error)
}
type AccountHandler struct {
DB IAccountStore
}
func (a *AccountHandler) AddAccount() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var account *store.Account
err := readJson(r, &account)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
account, err = a.DB.AddAccount(account)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
account.MaskSensitiveData()
toJson(w, account)
}
}
func (a *AccountHandler) UpdateAccount() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var payload *store.Account
err := readJson(r, &payload)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
account, err := a.DB.UpdateAccount(payload)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
toJson(w, account)
}
}
func (a *AccountHandler) RemoveAccount() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id, err := strconv.Atoi(chi.URLParam(r, "id"))
if err != nil {
http.Error(w, errors.Wrapf(err, "invalid id: %d", id).Error(), 400)
return
}
isLastAdmin, err := a.DB.IsLastAdmin(id)
if isLastAdmin {
http.Error(w, "Cannot remove last admin account", 400)
return
}
err = a.DB.RemoveAccount(id)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
ok(w)
}
}
func (a *AccountHandler) ValidateAccount() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var payload *store.Account
err := readJson(r, &payload)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
account, err := a.DB.GetAccount(payload.Username)
if err != nil {
http.Error(w, err.Error(), 400)
return
} else if account == nil {
http.Error(w, "user not found", 400)
return
} else if account.Password != payload.Password {
http.Error(w, "invalid credentials", 400)
return
}
account.MaskSensitiveData()
toJson(w, account)
}
}
|
package test
import (
"testing"
"dwc.com/lumiere/utils"
)
func Test_GeneratorGeneratesCodeOfLength(t *testing.T) {
expectedLength := 10
generator := utils.CodeGenerator{}
code, err := generator.Generate(expectedLength)
if err != nil {
t.Errorf("Did not expect error: %v", err)
}
if len(code) != expectedLength {
t.Errorf("Expected code of length %d", expectedLength)
}
}
|
//
// Illustration of the behavior of appengine/search with stemming in queries :
// stemming seems to be ignored by "goapp serve" on localhost,
// but works well in prod at http://gae-go-stemming.appspot.com/ .
//
// Official doc is https://cloud.google.com/appengine/docs/go/search/query_strings#Go_Stemming
//
package stemming
import (
"appengine"
"appengine/search"
"fmt"
"net/http"
"strconv"
)
func init() {
http.HandleFunc("/", handle)
}
func handle(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
//
// Put some items in the index.
//
fmt.Fprintln(w, "Indexing :")
for i, sentence := range []string{
"I have a dog.",
"I like dogs.",
"I have a cat.",
"I like cats.",
} {
docId := strconv.Itoa(i)
err := writeToIndex(c, docId, sentence)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "%q \n", sentence)
}
fmt.Fprintln(w)
//
// Search the index.
//
// The stemmed queries (with ~) are expected to hit more results.
//
fmt.Fprintln(w, "Searching :")
for _, queryValue := range []string{
"dog",
"~dog",
"cat",
"~cat",
} {
hits, err := searchInIndex(c, queryValue)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "%q => %q \n", queryValue, hits)
}
}
// This sample struct contains only 1 string field Bulk.
// Bulk will be automatically tokenized, etc.
type searchableDoc struct {
Bulk string
}
func writeToIndex(c appengine.Context, docId, text string) error {
index, err := search.Open("my-index")
if err != nil {
return err
}
doc := &searchableDoc{
Bulk: text,
}
_, err = index.Put(c, docId, doc)
return err
}
func searchInIndex(c appengine.Context, query string) ([]string, error) {
index, err := search.Open("my-index")
if err != nil {
return nil, err
}
sentences := make([]string, 0)
it := index.Search(c, query, nil)
for {
var hit searchableDoc
_, err := it.Next(&hit)
if err == search.Done {
break
}
if err != nil {
return nil, err
}
sentences = append(sentences, hit.Bulk)
}
return sentences, nil
}
|
package jumphelper
import (
"fmt"
"strconv"
)
// ServerOption is a server option
type ServerOption func(*Server) error
//SetServerAddressBookPath sets the host of the Server client's SAM bridge
func SetServerAddressBookPath(s string) func(*Server) error {
return func(c *Server) error {
c.addressBookPath = s
return nil
}
}
//SetServerBase32 sets the host of the Server client's SAM bridge
func SetServerBase32(s string) func(*Server) error {
return func(c *Server) error {
c.base32 = s
return nil
}
}
//SetServerBase64 sets the host of the Server client's SAM bridge
func SetServerBase64(s string) func(*Server) error {
return func(c *Server) error {
c.base64 = s
return nil
}
}
//SetServerHost sets the host of the Server client's SAM bridge
func SetServerHost(s string) func(*Server) error {
return func(c *Server) error {
c.host = s
return nil
}
}
//SetServerPort sets the port of the Server client's SAM bridge
func SetServerPort(s string) func(*Server) error {
return func(c *Server) error {
port, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("Invalid port; non-number")
}
if port < 65536 && port > -1 {
c.port = s
return nil
}
return fmt.Errorf("Invalid port")
}
}
//SetServerPortInt sets the port of the Server client's SAM bridge with an int
func SetServerPortInt(s int) func(*Server) error {
return func(c *Server) error {
if s < 65536 && s > -1 {
c.port = strconv.Itoa(s)
return nil
}
return fmt.Errorf("Invalid port")
}
}
//SetServerRate sets the host of the Server client's SAM bridge
func SetServerRate(s int) func(*Server) error {
return func(c *Server) error {
c.rate = s
return nil
}
}
//SetServerBurst sets the host of the Server client's SAM bridge
func SetServerBurst(s int) func(*Server) error {
return func(c *Server) error {
c.burst = s
return nil
}
}
//SetServerUseHelper sets the host of the Server client's SAM bridge
func SetServerUseHelper(s bool) func(*Server) error {
return func(c *Server) error {
c.ext = s
return nil
}
}
//SetServerSubscription sets the subscription list slice contents
func SetServerSubscription(s []string) func(*Server) error {
return func(c *Server) error {
if s != nil {
for _, d := range s {
c.subscriptionURLs = append(c.subscriptionURLs, d)
}
return nil
}
c.subscriptionURLs = append(c.subscriptionURLs, "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt")
return nil
}
}
//SetServerEnableListing Allows the server to export a list of addresses to others
func SetServerEnableListing(s bool) func(*Server) error {
return func(c *Server) error {
c.listing = s
return nil
}
}
//SetServerDifficulty sets the host of the Server client's SAM bridge
func SetServerDifficulty(s int) func(*Server) error {
return func(c *Server) error {
if s > 0 && s < 20 {
c.difficulty = s
return nil
}
return fmt.Errorf("Invalid proof-of-work difficulty")
}
}
|
package main
/**
BMI계산기
키, 몸무게를 입력받아 체질량 지수를 계산하는 프로그램을 작성하라
bmi = (weight / (height * height))
bmi값이 18.5~25 사이로 나타나면 정상적인 몸무게라고 출력하고
그렇지 않는 경우는 과체중이나 저체중으로 나타낸다음
의사와 상의하라는 문구도 출력해보자
*/
import (
"fmt"
"bufio"
"os"
"strconv"
)
const lowerBound = 18.5
const higherBound = 25
func inFloat(txt string) float64 {
scan := bufio.NewScanner(os.Stdin)
for {
fmt.Print(txt)
scan.Scan()
line := scan.Text()
f, e := strconv.ParseFloat(line, 64)
if e != nil {
fmt.Println(e)
continue
}
return f
}
}
func main(){
height := inFloat("키? ")
weight := inFloat("몸무게? ")
bmi := (weight / (height*0.01 * height*0.01))
fmt.Printf("%.2f bmi\n", bmi)
if bmi >= lowerBound && bmi <= higherBound {
fmt.Println("OK")
} else if bmi < lowerBound {
fmt.Println("저체중이야 병원가야 될것 같은데??")
} else {
fmt.Println("과체중이야 병원가야 될것 같은데??")
}
}
|
package main
import (
"testing"
)
const testData = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
func TestTask1(t *testing.T) {
rootNode := getRootNode(testData)
expected := 138
actual := getMetadataSum(rootNode)
if actual != expected {
t.Error("Expected ", expected, ", got ", actual)
}
}
func TestTask2(t *testing.T) {
rootNode := getRootNode(testData)
expected := 66
actual := rootNode.Value
if actual != expected {
t.Error("Expected ", expected, ", got ", actual)
}
}
|
// Copyright 2017 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemonset
import (
"log"
appCore "alauda.io/app-core/pkg/app"
"alauda.io/diablo/src/backend/api"
"alauda.io/diablo/src/backend/errors"
metricapi "alauda.io/diablo/src/backend/integration/metric/api"
"alauda.io/diablo/src/backend/resource/common"
"alauda.io/diablo/src/backend/resource/dataselect"
"alauda.io/diablo/src/backend/resource/ingress"
"alauda.io/diablo/src/backend/resource/network"
"alauda.io/diablo/src/backend/resource/service"
"k8s.io/client-go/kubernetes"
)
// DaemonSetList contains a list of Daemon Sets in the cluster.
type DaemonSetList struct {
ListMeta api.ListMeta `json:"listMeta"`
DaemonSets []DaemonSet `json:"daemonSets"`
CumulativeMetrics []metricapi.Metric `json:"cumulativeMetrics"`
// Basic information about resources status on the list.
Status common.ResourceStatus `json:"status"`
// List of non-critical errors, that occurred during resource retrieval.
Errors []error `json:"errors"`
}
func (list *DaemonSetList) GetItems() (res []common.Resource) {
if list == nil {
res = []common.Resource{}
} else {
res = make([]common.Resource, len(list.DaemonSets))
for i, d := range list.DaemonSets {
res[i] = d
}
}
return
}
// DaemonSet plus zero or more Kubernetes services that target the Daemon Set.
type DaemonSet struct {
ObjectMeta api.ObjectMeta `json:"objectMeta"`
TypeMeta api.TypeMeta `json:"typeMeta"`
PodInfo common.PodControllerInfo `json:"podInfo"`
Status common.ControllerStatus `json:"status"`
// Container images of the Daemon Set.
ContainerImages []string `json:"containerImages"`
// InitContainer images of the Daemon Set.
InitContainerImages []string `json:"initContainerImages"`
VisitAddresses network.VisitAddress `json:"visitAddresses"`
}
type DaemonSetSlice []DaemonSet
func (s DaemonSetSlice) Len() int { return len(s) }
func (s DaemonSetSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s DaemonSetSlice) Less(i, j int) bool { return s[i].ObjectMeta.Name < s[j].ObjectMeta.Name }
func (ing DaemonSet) GetObjectMeta() api.ObjectMeta {
return ing.ObjectMeta
}
// GetDaemonSetList returns a list of all Daemon Set in the cluster.
func GetDaemonSetList(client kubernetes.Interface, nsQuery *common.NamespaceQuery, dsQuery *dataselect.DataSelectQuery,
metricClient metricapi.MetricClient) (*DaemonSetList, error) {
log.Print("Getting list of all daemonSets in the cluster")
channels := &common.ResourceChannels{
DaemonSetList: common.GetDaemonSetListChannel(client, nsQuery, 1),
PodList: common.GetPodListChannel(client, nsQuery, 1),
EventList: common.GetEventListChannel(client, nsQuery, 1),
// List and error channels to Services.
ServiceList: common.GetServiceListChannel(client, nsQuery, 1),
// List and error channels to Ingresses.
IngressList: common.GetIngressListChannel(client, nsQuery, 1),
}
return GetDaemonSetListFromChannels(channels, dsQuery, metricClient)
}
// GetDaemonSetListFromChannels returns a list of all Daemon Set in the cluster
// reading required resource list once from the channels.
func GetDaemonSetListFromChannels(channels *common.ResourceChannels, dsQuery *dataselect.DataSelectQuery,
metricClient metricapi.MetricClient) (*DaemonSetList, error) {
daemonSets := <-channels.DaemonSetList.List
err := <-channels.DaemonSetList.Error
nonCriticalErrors, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, criticalError
}
pods := <-channels.PodList.List
err = <-channels.PodList.Error
nonCriticalErrors, criticalError = errors.AppendError(err, nonCriticalErrors)
if criticalError != nil {
return nil, criticalError
}
events := <-channels.EventList.List
err = <-channels.EventList.Error
nonCriticalErrors, criticalError = errors.AppendError(err, nonCriticalErrors)
if criticalError != nil {
return nil, criticalError
}
ss := <-channels.ServiceList.List
err = <-channels.ServiceList.Error
nonCriticalErrors, criticalError = errors.AppendError(err, nonCriticalErrors)
if criticalError != nil {
return nil, criticalError
}
is := <-channels.IngressList.List
err = <-channels.IngressList.Error
nonCriticalErrors, criticalError = errors.AppendError(err, nonCriticalErrors)
if criticalError != nil {
return nil, criticalError
}
rc := &common.ResourceCollection{
DaemonSets: daemonSets.Items,
Pods: pods.Items,
Events: events.Items,
Services: ss.Items,
Ingresses: is.Items,
}
dsList := toDaemonSetList(rc, nonCriticalErrors, dsQuery, metricClient)
return dsList, nil
}
func toDaemonSetList(rc *common.ResourceCollection, nonCriticalErrors []error,
dsQuery *dataselect.DataSelectQuery, metricClient metricapi.MetricClient) *DaemonSetList {
daemonSetList := &DaemonSetList{
DaemonSets: make([]DaemonSet, 0),
ListMeta: api.ListMeta{TotalItems: len(rc.DaemonSets)},
Errors: nonCriticalErrors,
}
if len(rc.DaemonSets) == 0 {
return daemonSetList
}
cachedResources := &metricapi.CachedResources{
Pods: rc.Pods,
}
dsCells, metricPromises, filteredTotal := dataselect.GenericDataSelectWithFilterAndMetrics(ToCells(rc.DaemonSets),
dsQuery, cachedResources, metricClient)
daemonSets := FromCells(dsCells)
daemonSetList.ListMeta = api.ListMeta{TotalItems: filteredTotal}
for _, daemonSet := range daemonSets {
matchingPods := common.FilterPodsByControllerRef(&daemonSet, rc.Pods)
_, visitAddresses := network.GetNetworkInfo(daemonSet.Spec.Template.Spec.Containers, rc.Ingresses, rc.Services, daemonSet.Namespace, daemonSet.Spec.Template.Labels)
podInfo := common.GetPodControllerInfo(daemonSet.Status.CurrentNumberScheduled, &daemonSet.Status.DesiredNumberScheduled, daemonSet.GetObjectMeta(), matchingPods, rc.Events)
daemonSetList.DaemonSets = append(daemonSetList.DaemonSets, DaemonSet{
ObjectMeta: api.NewObjectMeta(daemonSet.ObjectMeta),
TypeMeta: api.NewTypeMeta(api.ResourceKindDaemonSet),
ContainerImages: common.GetContainerImages(&daemonSet.Spec.Template.Spec),
InitContainerImages: common.GetInitContainerImages(&daemonSet.Spec.Template.Spec),
VisitAddresses: visitAddresses,
Status: common.GetControllerStatus(&podInfo),
PodInfo: podInfo,
})
}
cumulativeMetrics, err := metricPromises.GetMetrics()
daemonSetList.CumulativeMetrics = cumulativeMetrics
if err != nil {
daemonSetList.CumulativeMetrics = make([]metricapi.Metric, 0)
}
return daemonSetList
}
func ToDaemonSetList(res []common.Resource) (list []DaemonSet) {
var (
ok bool
cm DaemonSet
)
list = make([]DaemonSet, 0, len(res))
for _, r := range res {
if cm, ok = r.(DaemonSet); ok {
list = append(list, cm)
}
}
return
}
func GenerateFromCore(app appCore.Application, rc *common.ResourceCollection, metricClient metricapi.MetricClient) (*DaemonSetList, error) {
daemonSets, err := GetFormCore(app)
if err != nil {
return nil, err
}
rc.DaemonSets = daemonSets
ingresses, err := ingress.GetFormCore(app)
if err != nil {
return nil, err
}
rc.Ingresses = ingresses
services, err := service.GetFormCore(app)
if err != nil {
return nil, err
}
rc.Services = services
nonCriticalErrors := make([]error, 0)
return toDaemonSetList(rc, nonCriticalErrors, dataselect.NoDataSelect, metricClient), nil
}
|
package gb32100
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseCode(t *testing.T) {
u, err := ParseCode("91350100M000100Y43")
assert.NoError(t, err)
assert.Equal(t, &Code{
RegAdminCode: "9",
OrgTypeCode: "1",
DivisionCode: "350100",
OrgCode: "M000100Y4",
Sum: "3",
}, u)
assert.Equal(t, "工商", u.RegAdminName())
assert.Equal(t, "企业", u.OrgTypeName())
assert.True(t, u.IsValid())
assert.True(t, IsValid(u.String()))
assert.True(t, IsValid("91350100M000100Y43"))
{
next, err := Next("91310230MA1K314J7C")
assert.NoError(t, err)
assert.Equal(t, "91310230MA1K314K57", next)
}
{
next, err := Prev("91310230MA1K314K57")
assert.NoError(t, err)
assert.Equal(t, "91310230MA1K314J7C", next)
}
}
|
package handlers
import (
"fmt"
"strconv"
"strings"
"d7y.io/dragonfly/v2/manager/service"
"github.com/gin-gonic/gin"
)
type Handlers struct {
Service service.REST
}
func New(service service.REST) *Handlers {
return &Handlers{Service: service}
}
func (h *Handlers) setPaginationDefault(page, perPage *int) {
if *page == 0 {
*page = 1
}
if *perPage == 0 {
*perPage = 10
}
}
func (h *Handlers) setPaginationLinkHeader(ctx *gin.Context, page, perPage, totalCount int) {
totalPage := totalCount / perPage
if totalPage == 0 {
totalPage = 1
}
var prevPage int
if page == 1 {
prevPage = 1
} else {
prevPage = page - 1
}
var nextPage int
if page == totalPage {
nextPage = page
} else {
nextPage = page + 1
}
var links []string
for _, v := range []struct {
Name string
Page int
}{
{
Name: "prev",
Page: prevPage,
},
{
Name: "next",
Page: nextPage,
},
{
Name: "first",
Page: 1,
},
{
Name: "last",
Page: totalPage,
},
} {
url := ctx.Request.URL
query := url.Query()
query.Set("page", strconv.Itoa(v.Page))
query.Set("per_page", strconv.Itoa(perPage))
url.RawQuery = query.Encode()
links = append(links, fmt.Sprintf("<%s>;rel=%s", url.String(), v.Name))
}
ctx.Header("Link", strings.Join(links, ","))
}
|
// SPDX-License-Identifier: MIT
//go:build ignore
// +build ignore
package main
import (
"io/ioutil"
"os"
"github.com/caixw/apidoc/v7/core"
"github.com/caixw/apidoc/v7/internal/ast/asttest"
"github.com/caixw/apidoc/v7/internal/xmlenc"
)
func main() {
data, err := xmlenc.Encode("\t", asttest.Get(), core.XMLNamespace, "aa")
if err != nil {
panic(err)
}
err = ioutil.WriteFile(asttest.Filename, data, os.ModePerm)
if err != nil {
panic(err)
}
}
|
package clusterregistration
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/service_account_cache_mock.go -package=mocks github.com/rancher/wrangler/pkg/generated/controllers/core/v1 ServiceAccountCache
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/secret_cache_mock.go -package=mocks github.com/rancher/wrangler/pkg/generated/controllers/core/v1 SecretCache
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/secret_controller_mock.go -package=mocks github.com/rancher/wrangler/pkg/generated/controllers/core/v1 SecretController
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/cluster_client_mock.go -package=mocks github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1 ClusterClient
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/cluster_cache_mock.go -package=mocks github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1 ClusterCache
//go:generate mockgen --build_flags=--mod=mod -destination=../../mocks/cluster_registration_controller_mock.go -package=mocks github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1 ClusterRegistrationController
import (
"fmt"
"github.com/golang/mock/gomock"
"github.com/rancher/fleet/internal/cmd/controller/mocks"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/wrangler/pkg/generic"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("ClusterRegistration OnChange", func() {
var (
request *fleet.ClusterRegistration
status fleet.ClusterRegistrationStatus
cluster *fleet.Cluster
sa *corev1.ServiceAccount
saCache *mocks.MockServiceAccountCache
secretCache *mocks.MockSecretCache
secretController *mocks.MockSecretController
clusterClient *mocks.MockClusterClient
clusterRegistrationController *mocks.MockClusterRegistrationController
clusterCache *mocks.MockClusterCache
h *handler
notFound = errors.NewNotFound(schema.GroupResource{}, "")
anError = fmt.Errorf("an error occurred")
)
BeforeEach(func() {
ctrl := gomock.NewController(GinkgoT())
saCache = mocks.NewMockServiceAccountCache(ctrl)
secretCache = mocks.NewMockSecretCache(ctrl)
secretController = mocks.NewMockSecretController(ctrl)
clusterClient = mocks.NewMockClusterClient(ctrl)
clusterRegistrationController = mocks.NewMockClusterRegistrationController(ctrl)
clusterCache = mocks.NewMockClusterCache(ctrl)
h = &handler{
systemNamespace: "fleet-system",
systemRegistrationNamespace: "fleet-clusters-system",
clusterRegistration: clusterRegistrationController,
clusterCache: clusterCache,
clusters: clusterClient,
secretsCache: secretCache,
secrets: secretController,
serviceAccountCache: saCache,
}
})
Context("ClusterRegistration already granted", func() {
BeforeEach(func() {
status = fleet.ClusterRegistrationStatus{
Granted: true,
}
})
It("does nothing", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).To(Equal(generic.ErrSkip))
Expect(objs).To(BeEmpty())
Expect(newStatus.Granted).To(BeTrue())
})
})
Context("Cluster is missing", func() {
BeforeEach(func() {
request = &fleet.ClusterRegistration{
Spec: fleet.ClusterRegistrationSpec{
ClientID: "client-id",
},
}
status = fleet.ClusterRegistrationStatus{}
clusterCache.EXPECT().GetByIndex(gomock.Any(), gomock.Any()).Return(nil, nil)
// code panics if cache.Get returns an error or nil
clusterCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, nil).Return(nil, notFound)
})
When("cluster creation works", func() {
BeforeEach(func() {
clusterClient.EXPECT().Create(gomock.Any()).Return(nil, nil).Do(func(obj interface{}) {
switch cluster := obj.(type) {
case *fleet.Cluster:
Expect(cluster.Spec.ClientID).To(Equal("client-id"))
default:
Fail("unexpected type")
}
})
})
It("creates the missing cluster", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).ToNot(HaveOccurred())
Expect(objs).To(BeEmpty())
Expect(newStatus.Granted).To(BeFalse())
})
})
When("cluster creation fails", func() {
BeforeEach(func() {
clusterClient.EXPECT().Create(gomock.Any()).Return(nil, anError)
})
It("returns an error", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).To(HaveOccurred())
Expect(objs).To(BeEmpty())
Expect(newStatus.Granted).To(BeFalse())
})
})
})
Context("Cluster exists", func() {
BeforeEach(func() {
request = &fleet.ClusterRegistration{
ObjectMeta: metav1.ObjectMeta{
Name: "request-1",
Namespace: "fleet-default",
},
Spec: fleet.ClusterRegistrationSpec{
ClientID: "client-id",
},
}
cluster = &fleet.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
Namespace: "fleet-default",
},
Spec: fleet.ClusterSpec{
ClientID: "client-id",
},
}
status = fleet.ClusterRegistrationStatus{}
clusterCache.EXPECT().GetByIndex(gomock.Any(), gomock.Any()).Return(nil, nil)
clusterCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, nil).Return(cluster, nil)
})
When("cluster status has no namespace", func() {
It("sets the cluster name into the registrations status", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).ToNot(HaveOccurred())
Expect(objs).To(BeEmpty())
Expect(newStatus.Granted).To(BeFalse())
Expect(newStatus.ClusterName).To(Equal("cluster"))
})
})
When("service account does not exist", func() {
BeforeEach(func() {
cluster.Status = fleet.ClusterStatus{Namespace: "fleet-default"}
saCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, notFound)
})
It("creates a new service account", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).ToNot(HaveOccurred())
Expect(objs).To(HaveLen(1))
Expect(newStatus.Granted).To(BeFalse())
Expect(newStatus.ClusterName).To(Equal("cluster"))
})
})
When("service account secret is missing", func() {
BeforeEach(func() {
cluster.Status = fleet.ClusterStatus{Namespace: "fleet-default"}
// post k8s 1.24 service account without sa.Secrets list
sa = &corev1.ServiceAccount{}
saCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(sa, nil)
})
Context("cannot create secret", func() {
BeforeEach(func() {
secretController.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, notFound)
secretController.EXPECT().Create(gomock.Any()).Return(nil, anError)
})
It("creates a new service account and errors", func() {
objs, _, err := h.OnChange(request, status)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to authorize cluster"))
Expect(objs).To(BeEmpty())
})
})
Context("authorizeCluster returns nil,nil", func() {
BeforeEach(func() {
// pre k8s 1.24 service account has sa.Secrets list
sa.Secrets = []corev1.ObjectReference{{Name: "tokensecret"}}
secretCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, notFound)
secretController.EXPECT().Get(gomock.Any(), "tokensecret", gomock.Any()).Return(nil, nil)
})
It("returns early", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).ToNot(HaveOccurred())
Expect(objs).To(BeEmpty())
Expect(newStatus.ClusterName).To(Equal("cluster"))
Expect(newStatus.Granted).To(BeFalse())
})
})
})
When("service account secret exists", func() {
BeforeEach(func() {
cluster.Status = fleet.ClusterStatus{Namespace: "fleet-default"}
sa = &corev1.ServiceAccount{}
saCache.EXPECT().Get(gomock.Any(), gomock.Any()).Return(sa, nil)
// needs token here, otherwise controller will sleep to wait for it
secret := &corev1.Secret{
Data: map[string][]byte{"token": []byte("secrettoken")},
}
secretController.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(secret, nil)
clusterRegistrationController.EXPECT().List(gomock.Any(), gomock.Any()).Return(&fleet.ClusterRegistrationList{}, nil)
})
Context("grants registration, cleans up and creates objects", func() {
BeforeEach(func() {
})
It("creates a new secret", func() {
objs, newStatus, err := h.OnChange(request, status)
Expect(err).ToNot(HaveOccurred())
Expect(objs).To(HaveLen(6))
Expect(newStatus.Granted).To(BeTrue())
})
})
})
})
})
|
package controller
import (
"fmt"
"github.com/go-lib-utils-master/time"
"io"
"log"
"mime/multipart"
"net/http"
"os"
"strconv"
"strings"
"text/template"
time2 "time"
"web/Cinema/modle"
"web/Cinema/utils"
)
const PATH = "D:\\gogo\\src\\web\\Cinema\\view\\static\\img\\"
func UpLoadImg(f multipart.File, h *multipart.FileHeader, err error) string {
if err != nil {
log.Println(err)
return ""
}
if err != nil {
fmt.Println("one", err)
}
fileName := h.Filename
fmt.Println(fileName)
t, err := os.Create(PATH + fileName)
if err != nil {
log.Println(err)
}
if _, err := io.Copy(t, f); err != nil {
fmt.Println(err)
}
return "static/img/" + fileName
}
func checkIsManager(w http.ResponseWriter, r *http.Request) bool {
is,see:=IsLogin(r)
if !is {
t := template.Must(template.ParseFiles("view/pages/users/login.html"))
t.Execute(w, r)
return false
}
is=modle.CheckUserState(see.UserName)
if !is {
GetMoviePage(w,r)
return false
}
return true
}
func AddInfo(w http.ResponseWriter, r *http.Request) {
if !checkIsManager(w,r){
return
}
t := template.Must(template.ParseFiles("view/pages/manager/addMovie.html"))
flag := r.FormValue("flag")
t.Execute(w, flag)
}
func AddMovie(w http.ResponseWriter, r *http.Request) {
if !checkIsManager(w,r){
return
}
movieName := r.PostFormValue("movieName")
ge, _ := strconv.ParseFloat(r.PostFormValue("ge"), 0)
point, _ := strconv.ParseFloat(r.PostFormValue("point"), 0)
f, h, err := r.FormFile("test")
ImgPath := UpLoadImg(f, h, err)
flag, _ := strconv.ParseInt(r.PostFormValue("flag"), 10, 0)
release := r.PostFormValue("release")
timeLong := r.PostFormValue("timelong")
offTime := r.PostFormValue("offtime")
fmt.Println(movieName, ge, point, ImgPath, err, flag, release, timeLong, offTime)
movie := modle.Movie{
MovieName: movieName,
MovieGrade: ge + point/10,
BoxOffice: 0,
MovieFlag: int(flag),
TimeLong: timeLong,
ImgPath: ImgPath,
ReleaseTime: release,
OffTime: offTime,
}
modle.AddMovie(movie)
http.Redirect(w, r, "http://localhost:1122/addInfo?flag="+"1", http.StatusMovedPermanently)
}
func AddPlan(w http.ResponseWriter, r *http.Request) {
t := template.Must(template.ParseFiles("view/pages/manager/addPlan.html"))
movies := modle.GetAllMovies()
flag := r.FormValue("flag")
type temp struct {
Movies []string
Flag string
}
t.Execute(w, temp{movies, flag})
}
func AddMyPlan(w http.ResponseWriter, r *http.Request) {
if !checkIsManager(w,r){
return
}
movieName := r.PostFormValue("movieName")
dateStart := r.PostFormValue("date")
dateStart = strings.Replace(dateStart, "T", " ", -1)
dateStart = dateStart + ":00"
end := GetEndDate(movieName, dateStart)
dateEnd := time.ParseDataTimeToStr(end)
start, _ := time.ParseDateTime(dateStart)
hall := r.PostFormValue("hall")
money := r.PostFormValue("money")
elseSpe := r.PostFormValue("elseSpe")
re := CheckDate(start, end, hall)
if re {
plan := modle.PerfPlan{
PerfID: utils.CreateUUID(),
MovieName: movieName,
StartTime: dateStart,
EndTime: dateEnd,
Halls: hall,
Money: money,
ElseSpe: elseSpe,
}
modle.AddPlan(plan)
modle.AddSit(plan.PerfID, hall)
http.Redirect(w, r, "http://localhost:1122/addPlan?flag="+"增加成功", http.StatusMovedPermanently)
} else {
http.Redirect(w, r, "http://localhost:1122/addPlan?flag="+"请检查时间是否冲突", http.StatusMovedPermanently)
}
}
func GetEndDate(movieName, dateStart string) time2.Time {
timeLong := modle.GetTimeLongAndImgPath(movieName)
fmt.Println(dateStart)
start, err := time.ParseDateTime(dateStart)
if err != nil {
log.Println(err)
}
mm, err := time2.ParseDuration(timeLong + "m")
if err != nil {
log.Println(err)
}
fmt.Println(mm)
end := start.Add(mm)
return end
}
func CheckDate(dateStart, dateEnd time2.Time, hall string) bool {
dates := modle.GetPlanByHall(hall)
for _, v := range dates {
t, err := time.ParseDateTime(v)
if err != nil {
log.Println("??", err)
}
if t.After(dateStart) && t.Before(dateEnd) {
return false
}
}
return true
}
func DeletePlan(w http.ResponseWriter, r *http.Request) {
if !checkIsManager(w,r){
return
}
perfID := r.FormValue("perfID")
flag := modle.QueryHas(perfID)
if flag {
modle.DeletePlan(perfID)
http.Redirect(w, r, "http://localhost:1122/deleteInfo", http.StatusMovedPermanently)
} else {
http.Redirect(w, r, "http://localhost:1122/deleteInfo?flag="+"该影片存在售出,不可删除", http.StatusMovedPermanently)
}
}
func LoadAllPlan(w http.ResponseWriter, r *http.Request) {
if !checkIsManager(w,r){
return
}
t := template.Must(template.ParseFiles("view/pages/manager/deletePlan.html"))
plans := modle.GetAllPlans()
flag := r.FormValue("flag")
type temp struct {
Plans []modle.PerfPlan
Flag string
}
t.Execute(w, temp{plans,flag})
}
|
package main
import "fmt"
func sendx(ch chan int) {
i := 0
for {
i++
ch <- i
}
}
func recvx(ch chan int) {
value := <- ch
fmt.Println(value)
value = <- ch
fmt.Println(value)
close(ch)
}
func main() {
var ch = make(chan int, 4)
go recvx(ch)
sendx(ch)
}
|
package limiter
import (
"time"
"github.com/gin-gonic/gin"
"github.com/juju/ratelimit"
)
type LimitInterface interface {
Key(c *gin.Context) string
GetBucket(key string) (*ratelimit.Bucket, bool)
AddBucket(rules ...BucketRule) LimitInterface
}
type Limiter struct {
limiterBuckets map[string]*ratelimit.Bucket
}
type BucketRule struct {
Key string
Interval time.Duration
Capacity int64
Quantum int64
}
|
package main
func (a *App) initializeRoutes() {
// endpoints
a.Router.HandleFunc("/reviews/status", a.getStatus).Methods("GET")
a.Router.HandleFunc("/reviews", a.getAllMyReviews).Methods("GET")
a.Router.HandleFunc("/reviews", a.createReview).Methods("POST")
a.Router.HandleFunc("/reviews/{reviewId}", a.getReview).Methods("GET")
a.Router.HandleFunc("/reviews/{reviewId}", a.deleteReview).Methods("DELETE")
a.Router.HandleFunc("/reviews/auction/{auctionId}",
a.getAllReviewsByAuction).Methods("GET")
a.Router.HandleFunc("/reviews/item/{itemId}",
a.getReviewByItem).Methods("GET")
a.Router.HandleFunc("/reviews/of/user/{publicId}",
a.getAllReviewsAboutUser).Methods("GET")
a.Router.HandleFunc("/reviews/by/user/{publicId}",
a.getAllReviewsByUser).Methods("GET")
a.Router.HandleFunc("/reviews/user/{publicId}",
a.getMetadataOfUser).Methods("GET")
}
|
/*
* Licensed to Echogogo under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Echogogo licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package main
import (
"bytes"
"fmt"
"github.com/buger/jsonparser"
"github.com/quoeamaster/echogogo_plugin"
"io/ioutil"
"math/rand"
"net/http"
"strings"
"time"
)
// response model for the Mock module
type MockModuleModel struct {
ResponseBody string
MockMethodName string
Timestamp time.Time
TimestampEpoch int64
isJsonResponse bool
ResponseBodyArray []map[string]interface{}
}
// return the string representation of the Model
func (m* MockModuleModel) String() string {
var bBufPtr = new(bytes.Buffer)
bBufPtr.WriteString("{")
bBufPtr.WriteString("\"ResponseBody\": \"")
bBufPtr.WriteString(m.ResponseBody)
bBufPtr.WriteString("\"")
bBufPtr.WriteString(",\\n\"MockMethodName\": \"")
bBufPtr.WriteString(m.MockMethodName)
bBufPtr.WriteString("\"")
bBufPtr.WriteString(",\\n\"Timestamp\": \"")
bBufPtr.WriteString(m.Timestamp.String())
bBufPtr.WriteString("\"")
bBufPtr.WriteString(",\\n\"TimestampEpoch\": ")
bBufPtr.WriteString(fmt.Sprintf("%v", m.TimestampEpoch))
bBufPtr.WriteString("\\n}")
return bBufPtr.String()
}
func extractPathParamMock(method string) (methodName string, isJsonResponse bool) {
// check whether there are "paths"
methodName = method
isJsonResponse = true
if strings.HasPrefix(method, "/json/") {
idx := len("/json")
methodName = method[idx:]
} else if strings.HasPrefix(method, "/xml/") {
idx := len("/xml")
methodName = method[idx:]
isJsonResponse = false
}
return
}
/* (m *MockModule) */
func GetRestConfig() map[string]interface{} {
/* TODO: either read from a file or simply overwrite it programmatically.... */
mapModelPtr := make(map[string]interface{})
mapModelPtr["consumeFormat"] = echogogo.FORMAT_JSON
mapModelPtr["produceFormat"] = echogogo.FORMAT_XML_JSON
mapModelPtr["path"] = "/mock"
mapModelPtr["endPoints"] = []string {
"GET::/{method}", "GET::/json/{method}", "GET::/xml/{method}",
"POST::/{method}", "POST::/json/{method}", "POST::/xml/{method}",
"PUT::/{method}", "PUT::/json/{method}", "PUT::/xml/{method}",
"DELETE::/{method}", "DELETE::/json/{method}", "DELETE::/xml/{method}",
"POST::/configMockEndPoints",
}
return mapModelPtr
}
/* ====================================== */
/* = DoAction - request handling = */
/* ====================================== */
var randomMessagesMock = [3]string{
"Life is soooo Good.",
"With great power comes great responsibility",
"I shall shed my light over dark evil",
}
var mockInstructionsLoader mockLoader
/* (m *MockModule) */
func DoAction(request http.Request, endPoint string, optionalMap ...map[string]interface{}) interface{} {
modelPtr := new(MockModuleModel)
method, isJsonResponse := extractPathParamMock(echogogo.ExtractPathParameterFromUrl(request.URL.Path, endPoint))
switch method {
case "/configMockEndPoints":
err := loadMockConfigMock(request)
if err != nil {
return err
} else {
// prepare the responseBody message
prepareMockModuleModel(modelPtr, method, "mock instructions loaded", isJsonResponse, time.Now())
}
default:
// add logics to check whether a mock instruction is available or not...
if len(mockInstructionsLoader.mockInstructionsMap) > 0 {
// looking for mock instructions
methodNameWOSlash := method[1:]
mockInModel := mockInstructionsLoader.GetMockInstructionByMethodNVerb(methodNameWOSlash, request.Method)
// check validity... example an empty struct is NOT valid...
if mockInModel.Method == "" && len(mockInModel.Conditions) == 0 {
prepareMockModuleModel(modelPtr, method, "no such mock API to simulate~", isJsonResponse, time.Now())
} else {
bodyMsg, err := getMockResultMock(&mockInModel, isJsonResponse, request)
if err != nil {
return err
}
prepareMockModuleModel(modelPtr, method, bodyMsg, isJsonResponse, time.Now())
}
} else {
prepareMockModuleModel(modelPtr, method, randomMessageGeneratorMock(), isJsonResponse, time.Now())
}
}
return *modelPtr
}
// verify the conditions and check if any mocked result should be returned instead
func getMockResultMock(model *mockInstructionModel, isJsonResponse bool, request http.Request) (result string, err error) {
contentInBytes, err := ioutil.ReadAll(request.Body)
for _, cond := range model.Conditions {
paramsMatched := false
// either 0 or 1 set of params ONLY
if len(cond.Params) == 0 {
paramsMatched = true
// return the response, assume additional params are ignored
if isJsonResponse {
result = prepareJsonForDisplayMock(cond.ReturnJson)
} else {
result = cond.ReturnXml
}
break
} else {
paramsMap := cond.Params[0]
// assume all matched and set to false when unmatch case occurs
paramsMatched = true
for param, paramVal := range paramsMap {
valBytes, _, _, err1 := jsonparser.Get(contentInBytes, param)
if err1 != nil {
if strings.Index(err1.Error(), "Key path not found") != -1 {
paramsMatched = false
break
} else {
err = err1
return
}
} // end -- if (err1 valid, check if it was the case of unmatched key -> continue)
if paramVal != string(valBytes) {
paramsMatched = false
break
}
} // end -- for (per param match check)
if paramsMatched {
if isJsonResponse {
result = prepareJsonForDisplayMock(cond.ReturnJson)
} else {
result = cond.ReturnXml
}
break
} else {
result = "mock api found, but non-matchable params found, hence no results~"
} // end -- if (paramsMatch - all match scenario)
}
} // end -- for (conditions)
return
}
// method to "pretty" the json string a bit
func prepareJsonForDisplayMock(val string) (value string) {
var contentInBytes bytes.Buffer
for _, char := range val {
if char != '\n' && char != ' ' {
contentInBytes.WriteRune(char)
}
}
value = contentInBytes.String()
return
}
// DoAction routing method - handles /configMockEndPoints
func loadMockConfigMock(request http.Request) (err error) {
contentInBytes, err := ioutil.ReadAll(request.Body)
if err != nil {
return
}
// try to load and parse it...
mockInstructionsLoader = *newMockLoader(contentInBytes)
err = mockInstructionsLoader.Load(nil)
return
}
func prepareMockModuleModel(modelPtr *MockModuleModel, method, responseBody string, isJsonResponse bool, timestamp time.Time) *MockModuleModel {
modelPtr.MockMethodName = method
if timestamp.Nanosecond() > 0 {
modelPtr.Timestamp = timestamp.UTC()
modelPtr.TimestampEpoch = modelPtr.Timestamp.UnixNano()
}
if responseBody != "" {
if isJsonResponse == true {
if isValidJsonString(responseBody) {
// handle the array of map
modelPtr.ResponseBodyArray = make([]map[string]interface{}, 0)
jsonparser.ArrayEach([]byte(responseBody), func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
if err == nil {
// iterate the "keys" and "values" of the Object
valueMap := make(map[string]interface{})
jsonparser.ObjectEach(value, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
valueMap[string(key)] = string(value)
return nil
})
modelPtr.ResponseBodyArray = append(modelPtr.ResponseBodyArray, valueMap)
} else {
fmt.Printf("%v: %v\n", responseBody, err)
} // end - if (err is nil)
})
} else {
modelPtr.ResponseBodyArray = make([]map[string]interface{}, 0)
valueMap := make(map[string]interface{})
valueMap["message"] = responseBody
modelPtr.ResponseBodyArray = append(modelPtr.ResponseBodyArray, valueMap)
}
} else {
modelPtr.ResponseBody = responseBody
}
}
modelPtr.isJsonResponse = isJsonResponse
return modelPtr
}
func isValidJsonString(jsonString string) (validJson bool) {
if strings.HasPrefix(strings.Trim(jsonString, " "), "[") {
validJson = true
} else {
validJson = false
}
return
}
// default "mock" result if no other mocking instructions are available
func randomMessageGeneratorMock() string {
random := rand.New(rand.NewSource(time.Now().UnixNano()))
idx := random.Intn(len(randomMessagesMock))
return randomMessagesMock[idx]
}
|
package main
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"log"
)
func GetAllProcessingHashes() []string {
processingHashSearchKey := fmt.Sprintf("ProcessingHash:%s:%s", "*", "*")
processingHashes := RedisSearchKeys(processingHashSearchKey)
return processingHashes
}
func GetAllProcessingItems(_processingHashKey string) []Request {
fmt.Println(_processingHashKey)
keyItems := strings.Split(_processingHashKey, ":")
company := keyItems[1]
tenant := keyItems[2]
strHash := RedisHashGetAll(_processingHashKey)
processingReqObjs := make([]Request, 0)
for queueId, sessionId := range strHash {
fmt.Println("queueId:", queueId, "sessionId:", sessionId)
requestKey := fmt.Sprintf("Request:%s:%s:%s", tenant, company, sessionId)
strReqObj := RedisGet(requestKey)
fmt.Println(strReqObj)
if strReqObj == "" {
fmt.Println("Start SetNextProcessingItem")
tenantInt, _ := strconv.Atoi(tenant)
companyInt, _ := strconv.Atoi(company)
SetNextProcessingItem(tenantInt, companyInt, _processingHashKey, queueId, sessionId, "")
} else {
var reqObj Request
json.Unmarshal([]byte(strReqObj), &reqObj)
if reqObj.SessionId == "" {
fmt.Println("Critical issue request object found empty ---> set next item "+ queueId + "value " + sessionId)
tenantInt, _ := strconv.Atoi(tenant)
companyInt, _ := strconv.Atoi(company)
SetNextProcessingItem(tenantInt, companyInt, _processingHashKey, queueId, sessionId, "")
}else {
processingReqObjs = AppendIfMissingReq(processingReqObjs, reqObj)
}
}
}
return processingReqObjs
}
func GetRejectedQueueId(_queueId string) string {
rejectQueueId := fmt.Sprintf("%s:REJECTED", _queueId)
return rejectQueueId
}
func SetNextProcessingItem(tenant, company int, _processingHash, _queueId, currentSession, requestState string) {
eSession := RedisHashGetValue(_processingHash, _queueId)
fmt.Println("Item in "+_processingHash+"set next processing item in queue "+_queueId+ " with session "+ currentSession +" has now in hash "+eSession)
if eSession != "" && eSession == currentSession {
rejectedQueueId := GetRejectedQueueId(_queueId)
nextRejectedQueueItem := RedisListLpop(rejectedQueueId)
if nextRejectedQueueItem == "" {
nextQueueItem := RedisListLpop(_queueId)
if nextQueueItem == "" {
removeHResult := RedisRemoveHashField(_processingHash, _queueId)
if removeHResult {
fmt.Println("Remove HashField Success.." + _processingHash + "::" + _queueId)
} else {
fmt.Println("Remove HashField Failed.." + _processingHash + "::" + _queueId)
}
} else {
setHResult := RedisHashSetField(_processingHash, _queueId, nextQueueItem)
if setHResult {
fmt.Println("Set HashField Success.." + _processingHash + "::" + _queueId + "::" + nextQueueItem)
} else {
fmt.Println("Set HashField Failed.." + _processingHash + "::" + _queueId + "::" + nextQueueItem)
}
}
} else {
setHResult := RedisHashSetField(_processingHash, _queueId, nextRejectedQueueItem)
if setHResult {
fmt.Println("Set HashField Success.." + _processingHash + "::" + _queueId + "::" + nextRejectedQueueItem)
} else {
fmt.Println("Set HashField Failed.." + _processingHash + "::" + _queueId + "::" + nextRejectedQueueItem)
}
}
} else {
fmt.Println("session Mismatched, " + requestState + " ignore setNextItem")
/*there is a new session added to the hash,
now the item should route on next processing
process next item will run through status and remove if the status is not queued
there is a possibility to lost the item if status changes has failed.
recheck all queue status set methods for concurrency and async operations.
*/
}
defer func() {
//ReleasetLock(setNextLock, u1)l
}()
}
func ContinueArdsProcess(_request Request) bool {
if _request.ReqHandlingAlgo == "QUEUE" && _request.HandlingResource != "No matching resources at the moment" {
req, _ := json.Marshal(_request)
authToken := fmt.Sprintf("Bearer %s", accessToken)
internalAuthToken := fmt.Sprintf("%d:%d", _request.Tenant, _request.Company)
ardsUrl := fmt.Sprintf("http://%s/DVP/API/1.0.0.0/ARDS/continueprocess", CreateHost(_request.LbIp, _request.LbPort))
if Post(ardsUrl, string(req[:]), authToken, internalAuthToken) {
fmt.Println("Continue Ards Process Success")
return true
} else {
fmt.Println("Continue Ards Process Failed")
return false
}
} else {
return false
}
}
func GetRequestState(_company, _tenant int, _sessionId string) string {
reqStateKey := fmt.Sprintf("RequestState:%d:%d:%s", _tenant, _company, _sessionId)
reqState := RedisGet(reqStateKey)
return reqState
}
func SetRequestState(_company, _tenant int, _sessionId, _newState string) string {
reqStateKey := fmt.Sprintf("RequestState:%d:%d:%s", _tenant, _company, _sessionId)
reqState := RedisSet(reqStateKey, _newState)
return reqState
}
func ContinueProcessing(_request Request, _selectedResources SelectedResource) (continueProcessingResult bool, handlingResource []string) {
fmt.Println("ReqOtherInfo:", _request.OtherInfo)
var result string
result, handlingResource = HandlingResources(_request.Company, _request.Tenant, _request.ResourceCount, _request.LbIp, _request.LbPort, _request.SessionId, _request.ServerType, _request.RequestType, _request.HandlingAlgo, _request.OtherInfo, _selectedResources)
_request.HandlingResource = result
continueProcessingResult = ContinueArdsProcess(_request)
return
}
func AcquireProcessingHashLock(hashId, uuid string) bool {
lockKey := fmt.Sprintf("ProcessingHashLock:%s", hashId)
if RedisSetNx(lockKey, uuid, 60) == true {
fmt.Println("lockKey: ", lockKey)
return true
} else {
return false
}
}
func ReleasetLock(hashId, uuid string) {
lockKey := fmt.Sprintf("ProcessingHashLock:%s", hashId)
if RedisRemoveRLock(lockKey, uuid) == true {
fmt.Println("Release lock ", lockKey, "success.")
} else {
fmt.Println("Release lock ", lockKey, "failed.")
}
return
}
func ExecuteRequestHash(_processingHashKey, uuid string) {
defer func() {
ReleasetLock(_processingHashKey, uuid)
}()
if RedisCheckKeyExist(_processingHashKey) {
processingItems := GetAllProcessingItems(_processingHashKey)
if len(processingItems) > 0 {
defaultRequest := processingItems[0]
sort.Sort(ByReqPriority(processingItems))
selectedResourcesForHash := SelectResources(processingItems, defaultRequest.SelectionAlgo)
pickedResources := make([]string, 0)
for _, longestWItem := range processingItems {
fmt.Println("Execute processing hash item::", longestWItem.Priority)
if longestWItem.SessionId != "" {
requestState := GetRequestState(longestWItem.Company, longestWItem.Tenant, longestWItem.SessionId)
if requestState == "QUEUED" {
log.Println("pickedResources: ", pickedResources)
resourceForRequest, isExist := GetSelectedResourceForRequest(selectedResourcesForHash, longestWItem.SessionId, pickedResources)
log.Println("resourceForRequest: ", resourceForRequest)
if isExist {
continueProcessingResult, handlingResource := ContinueProcessing(longestWItem, resourceForRequest)
if continueProcessingResult{
log.Println("handlingResource: ", handlingResource)
pickedResources = append(pickedResources, handlingResource...)
fmt.Println("Continue ARDS Process Success")
}
}else {
fmt.Println("Request not found in Selected Resource Data")
}
} else {
fmt.Println("State of the queue item" + longestWItem.SessionId + "is not queued ->" + requestState)
SetNextProcessingItem(longestWItem.Tenant, longestWItem.Company, _processingHashKey, longestWItem.QueueId, longestWItem.SessionId, requestState)
}
} else {
fmt.Println("No Session Found")
}
}
} else {
fmt.Println("No Processing Items Found")
}
} else {
fmt.Println("No Processing Hash Found")
}
}
func ExecuteRequestHashWithMsgQueue(_processingHashKey, uuid string) {
defer func() {
ReleasetLock(_processingHashKey, uuid)
}()
for RedisCheckKeyExist(_processingHashKey) {
processingItems := GetAllProcessingItems(_processingHashKey)
if len(processingItems) > 0 {
defaultRequest := processingItems[0]
sort.Sort(ByReqPriority(processingItems))
selectedResourcesForHash := SelectResources(processingItems, defaultRequest.SelectionAlgo)
pickedResources := make([]string, 0)
for _, longestWItem := range processingItems {
fmt.Println("Execute processing hash item::", longestWItem.Priority)
if longestWItem.SessionId != "" {
requestState := GetRequestState(longestWItem.Company, longestWItem.Tenant, longestWItem.SessionId)
if requestState == "QUEUED" {
resourceForRequest, isExist := GetSelectedResourceForRequest(selectedResourcesForHash, longestWItem.SessionId, pickedResources)
if isExist {
continueProcessingResult, handlingResource := ContinueProcessing(longestWItem, resourceForRequest)
if continueProcessingResult{
pickedResources = append(pickedResources, handlingResource...)
fmt.Println("Continue ARDS Process Success")
}
}else {
fmt.Println("Request not found in Selected Resource Data")
}
} else {
SetNextProcessingItem(longestWItem.Tenant, longestWItem.Company, _processingHashKey, longestWItem.QueueId, longestWItem.SessionId, requestState)
}
} else {
fmt.Println("No Session Found")
}
}
} else {
fmt.Println("No Processing Items Found")
}
}
}
|
package controllers
import (
"context"
"encoding/json"
"fmt"
"math"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/MakeNowJust/heredoc"
"github.com/go-logr/logr"
getter "github.com/hashicorp/go-getter"
tfv1alpha2 "github.com/isaaguilar/terraform-operator/pkg/apis/tf/v1alpha2"
"github.com/isaaguilar/terraform-operator/pkg/utils"
localcache "github.com/patrickmn/go-cache"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
runtimecontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// ReconcileTerraform reconciles a Terraform object
type ReconcileTerraform struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
Client client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
Log logr.Logger
MaxConcurrentReconciles int
Cache *localcache.Cache
GlobalEnvFromConfigmapData map[string]string
GlobalEnvFromSecretData map[string][]byte
GlobalEnvSuffix string
// InheritNodeSelector to use the controller's nodeSelectors for every task created by the controller.
// Value of this field will come from the owning deployment and cached.
InheritNodeSelector bool
NodeSelectorCacheKey string
// InheritAffinity to use the controller's affinity rules for every task created by the controller
// Value of this field will come from the owning deployment and cached.
InheritAffinity bool
AffinityCacheKey string
// InheritTolerations to use the controller's tolerations for every task created by the controller
// Value of this field will come from the owning deployment and cached.
InheritTolerations bool
TolerationsCacheKey string
}
// createEnvFromSources adds any of the global environment vars defined at the controller scope
// and generates a configmap or secret that will be loaded into the resource Task pods.
//
// TODO Each time a new generation is created of the tfo resource, this "global" env from vars should
// generate a new configap and secret. The reason for this is to prevent a generation from producing a
// different plan when is was the controller that changed options. A new generation should be forced
// if the plan needs to change.
func (r ReconcileTerraform) createEnvFromSources(ctx context.Context, tf *tfv1alpha2.Terraform) error {
resourceName := tf.Name
resourceNamespace := tf.Namespace
name := fmt.Sprintf("%s-%s", resourceName, r.GlobalEnvSuffix)
if len(r.GlobalEnvFromConfigmapData) > 0 {
configMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: resourceNamespace,
},
Data: r.GlobalEnvFromConfigmapData,
}
controllerutil.SetControllerReference(tf, &configMap, r.Scheme)
errOnCreate := r.Client.Create(ctx, &configMap)
if errOnCreate != nil {
if errors.IsAlreadyExists(errOnCreate) {
errOnUpdate := r.Client.Update(ctx, &configMap)
if errOnUpdate != nil {
return errOnUpdate
}
} else {
return errOnCreate
}
}
}
if len(r.GlobalEnvFromSecretData) > 0 {
secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: resourceNamespace,
},
Data: r.GlobalEnvFromSecretData,
}
controllerutil.SetControllerReference(tf, &secret, r.Scheme)
errOnCreate := r.Client.Create(ctx, &secret)
if errOnCreate != nil {
if errors.IsAlreadyExists(errOnCreate) {
errOnUpdate := r.Client.Update(ctx, &secret)
if errOnUpdate != nil {
return errOnUpdate
}
} else {
return errOnCreate
}
}
}
return nil
}
// listEnvFromSources makes an assumption that if global envs are defined in the controller, the
// configmap and secrets for the envs have been created or updated when initializing the workflow.
//
// This function will return the envFrom of the resources that should exist but does not validate that
// they do exist. If the configmap or secret is missing, force the generation of the tfo resource to update
// and the controller will recreate the missing resources.
func (r ReconcileTerraform) listEnvFromSources(tf *tfv1alpha2.Terraform) []corev1.EnvFromSource {
envFrom := []corev1.EnvFromSource{}
resourceName := tf.Name
name := fmt.Sprintf("%s-%s", resourceName, r.GlobalEnvSuffix)
if len(r.GlobalEnvFromConfigmapData) > 0 {
// ConfigMap that should exist
envFrom = append(envFrom, corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
},
})
}
if len(r.GlobalEnvFromSecretData) > 0 {
// Secret that should exist
envFrom = append(envFrom, corev1.EnvFromSource{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
},
})
}
return envFrom
}
// SetupWithManager sets up the controller with the Manager.
func (r *ReconcileTerraform) SetupWithManager(mgr ctrl.Manager) error {
controllerOptions := runtimecontroller.Options{
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
}
// only listen to v1alpha2
err := ctrl.NewControllerManagedBy(mgr).
For(&tfv1alpha2.Terraform{}).
Watches(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &tfv1alpha2.Terraform{},
}).
WithOptions(controllerOptions).
Complete(r)
if err != nil {
return err
}
return nil
}
// ParsedAddress uses go-getter's detect mechanism to get the parsed url
// TODO ParsedAddress can be moved into it's own package
type ParsedAddress struct {
// DetectedScheme is the name of the bin or protocol to use to fetch. For
// example, git will be used to fetch git repos (over https or ssh
// "protocol").
DetectedScheme string `json:"detect"`
// Path the target path for the downloaded file or directory
Path string `json:"path"`
// The files downloaded get called out in the terraform plan as -var-file
UseAsVar bool `json:"useAsVar"`
// Url is the raw address + query
Url string `json:"url"`
// Files are the files to find with a repo.
Files []string `json:"files"`
// Hash is also known as the `ref` query argument. For git this is the
// commit-sha or branch-name to checkout.
Hash string `json:"hash"`
// UrlScheme is the protocol of the URL
UrlScheme string `json:"protocol"`
// Uri is the path of the URL after the proto://host.
Uri string `json:"uri"`
// Host is the host of the URL.
Host string `json:"host"`
// Port is the port to use when fetching the URL.
Port string `json:"port"`
// User is the user to use when fetching the URL.
User string `json:"user"`
// Repo when using a SCM is the URL of the repo which is the same as the
// URL and omitting the query args.
Repo string `json:"repo"`
}
type TaskOptions struct {
annotations map[string]string
configMapSourceName string
configMapSourceKey string
credentials []tfv1alpha2.Credentials
env []corev1.EnvVar
envFrom []corev1.EnvFromSource
generation int64
image string
imagePullPolicy corev1.PullPolicy
inheritedAffinity *corev1.Affinity
inheritedNodeSelector map[string]string
inheritedTolerations []corev1.Toleration
labels map[string]string
mainModulePluginData map[string]string
namespace string
outputsSecretName string
outputsToInclude []string
outputsToOmit []string
policyRules []rbacv1.PolicyRule
prefixedName string
resourceLabels map[string]string
resourceName string
resourceUUID string
task tfv1alpha2.TaskName
saveOutputs bool
secretData map[string][]byte
serviceAccount string
cleanupDisk bool
stripGenerationLabelOnOutputsSecret bool
terraformModuleParsed ParsedAddress
terraformVersion string
urlSource string
versionedName string
requireApproval bool
restartPolicy corev1.RestartPolicy
}
func newTaskOptions(tf *tfv1alpha2.Terraform, task tfv1alpha2.TaskName, generation int64, globalEnvFrom []corev1.EnvFromSource, affinity *corev1.Affinity, nodeSelector map[string]string, tolerations []corev1.Toleration) TaskOptions {
// TODO Read the tfstate and decide IF_NEW_RESOURCE based on that
// applyAction := false
resourceName := tf.Name
resourceUUID := string(tf.UID)
prefixedName := tf.Status.PodNamePrefix
versionedName := prefixedName + "-v" + fmt.Sprint(tf.Generation)
terraformVersion := tf.Spec.TerraformVersion
if terraformVersion == "" {
terraformVersion = "latest"
}
image := ""
imagePullPolicy := corev1.PullAlways
policyRules := []rbacv1.PolicyRule{}
labels := make(map[string]string)
annotations := make(map[string]string)
env := []corev1.EnvVar{}
envFrom := globalEnvFrom
cleanupDisk := false
urlSource := ""
configMapSourceName := ""
configMapSourceKey := ""
restartPolicy := corev1.RestartPolicyNever
// TaskOptions have data for all the tasks but since we're only interested
// in the ones for this taskType, extract and add them to RunOptions
for _, taskOption := range tf.Spec.TaskOptions {
if tfv1alpha2.ListContainsTask(taskOption.For, task) ||
tfv1alpha2.ListContainsTask(taskOption.For, "*") {
policyRules = append(policyRules, taskOption.PolicyRules...)
for key, value := range taskOption.Annotations {
annotations[key] = value
}
for key, value := range taskOption.Labels {
labels[key] = value
}
env = append(env, taskOption.Env...)
envFrom = append(envFrom, taskOption.EnvFrom...)
if taskOption.RestartPolicy != "" {
restartPolicy = taskOption.RestartPolicy
}
}
if tfv1alpha2.ListContainsTask(taskOption.For, task) {
urlSource = taskOption.Script.Source
if configMapSelector := taskOption.Script.ConfigMapSelector; configMapSelector != nil {
configMapSourceName = configMapSelector.Name
configMapSourceKey = configMapSelector.Key
}
}
}
images := tf.Spec.Images
if images == nil {
// setup default images
images = &tfv1alpha2.Images{}
}
if images.Terraform == nil {
images.Terraform = &tfv1alpha2.ImageConfig{
ImagePullPolicy: corev1.PullIfNotPresent,
}
}
if images.Terraform.Image == "" {
images.Terraform.Image = fmt.Sprintf("%s:%s", tfv1alpha2.TerraformTaskImageRepoDefault, terraformVersion)
} else {
terraformImage := images.Terraform.Image
splitImage := strings.Split(images.Terraform.Image, ":")
if length := len(splitImage); length > 1 {
terraformImage = strings.Join(splitImage[:length-1], ":")
}
images.Terraform.Image = fmt.Sprintf("%s:%s", terraformImage, terraformVersion)
}
if images.Setup == nil {
images.Setup = &tfv1alpha2.ImageConfig{
ImagePullPolicy: corev1.PullIfNotPresent,
}
}
if images.Setup.Image == "" {
images.Setup.Image = fmt.Sprintf("%s:%s", tfv1alpha2.SetupTaskImageRepoDefault, tfv1alpha2.SetupTaskImageTagDefault)
}
if images.Script == nil {
images.Script = &tfv1alpha2.ImageConfig{
ImagePullPolicy: corev1.PullIfNotPresent,
}
}
if images.Script.Image == "" {
images.Script.Image = fmt.Sprintf("%s:%s", tfv1alpha2.ScriptTaskImageRepoDefault, tfv1alpha2.ScriptTaskImageTagDefault)
}
terraformTasks := []tfv1alpha2.TaskName{
tfv1alpha2.RunInit,
tfv1alpha2.RunInitDelete,
tfv1alpha2.RunPlan,
tfv1alpha2.RunPlanDelete,
tfv1alpha2.RunApply,
tfv1alpha2.RunApplyDelete,
}
scriptTasks := []tfv1alpha2.TaskName{
tfv1alpha2.RunPreInit,
tfv1alpha2.RunPreInitDelete,
tfv1alpha2.RunPostInit,
tfv1alpha2.RunPostInitDelete,
tfv1alpha2.RunPrePlan,
tfv1alpha2.RunPrePlanDelete,
tfv1alpha2.RunPostPlan,
tfv1alpha2.RunPostPlanDelete,
tfv1alpha2.RunPreApply,
tfv1alpha2.RunPreApplyDelete,
tfv1alpha2.RunPostApply,
tfv1alpha2.RunPostApplyDelete,
}
setupTasks := []tfv1alpha2.TaskName{
tfv1alpha2.RunSetup,
tfv1alpha2.RunSetupDelete,
}
if tfv1alpha2.ListContainsTask(terraformTasks, task) {
image = images.Terraform.Image
imagePullPolicy = images.Terraform.ImagePullPolicy
if urlSource == "" {
urlSource = "https://raw.githubusercontent.com/GalleyBytes/terraform-operator-tasks/master/tf.sh"
}
} else if tfv1alpha2.ListContainsTask(scriptTasks, task) {
image = images.Script.Image
imagePullPolicy = images.Script.ImagePullPolicy
if urlSource == "" {
urlSource = "https://raw.githubusercontent.com/GalleyBytes/terraform-operator-tasks/master/noop.sh"
}
} else if tfv1alpha2.ListContainsTask(setupTasks, task) {
image = images.Setup.Image
imagePullPolicy = images.Setup.ImagePullPolicy
if urlSource == "" {
urlSource = "https://raw.githubusercontent.com/GalleyBytes/terraform-operator-tasks/master/setup.sh"
}
}
// sshConfig := utils.TruncateResourceName(tf.Name, 242) + "-ssh-config"
serviceAccount := tf.Spec.ServiceAccount
if serviceAccount == "" {
// By prefixing the service account with "tf-", IRSA roles can use wildcard
// "tf-*" service account for AWS credentials.
serviceAccount = "tf-" + versionedName
}
credentials := tf.Spec.Credentials
// Outputs will be saved as a secret that will have the same lifecycle
// as the Terraform CustomResource by adding the ownership metadata
outputsSecretName := versionedName + "-outputs"
saveOutputs := false
stripGenerationLabelOnOutputsSecret := false
if tf.Spec.OutputsSecret != "" {
outputsSecretName = tf.Spec.OutputsSecret
saveOutputs = true
stripGenerationLabelOnOutputsSecret = true
} else if tf.Spec.WriteOutputsToStatus {
saveOutputs = true
}
outputsToInclude := tf.Spec.OutputsToInclude
outputsToOmit := tf.Spec.OutputsToOmit
if tf.Spec.Setup != nil {
cleanupDisk = tf.Spec.Setup.CleanupDisk
}
resourceLabels := map[string]string{
"terraforms.tf.isaaguilar.com/generation": fmt.Sprintf("%d", generation),
"terraforms.tf.isaaguilar.com/resourceName": resourceName,
"terraforms.tf.isaaguilar.com/podPrefix": prefixedName,
"terraforms.tf.isaaguilar.com/terraformVersion": terraformVersion,
"app.kubernetes.io/name": "terraform-operator",
"app.kubernetes.io/component": "terraform-operator-runner",
"app.kubernetes.io/created-by": "controller",
}
requireApproval := tf.Spec.RequireApproval
if task.ID() == -2 {
// This is not one of the main tasks so it's probably an plugin
resourceLabels["terraforms.tf.isaaguilar.com/isPlugin"] = "true"
}
return TaskOptions{
env: env,
generation: generation,
configMapSourceName: configMapSourceName,
configMapSourceKey: configMapSourceKey,
envFrom: envFrom,
policyRules: policyRules,
annotations: annotations,
labels: labels,
imagePullPolicy: imagePullPolicy,
inheritedAffinity: affinity,
inheritedNodeSelector: nodeSelector,
inheritedTolerations: tolerations,
namespace: tf.Namespace,
resourceName: resourceName,
prefixedName: prefixedName,
versionedName: versionedName,
credentials: credentials,
terraformVersion: terraformVersion,
image: image,
task: task,
resourceLabels: resourceLabels,
resourceUUID: resourceUUID,
serviceAccount: serviceAccount,
mainModulePluginData: make(map[string]string),
secretData: make(map[string][]byte),
cleanupDisk: cleanupDisk,
outputsSecretName: outputsSecretName,
saveOutputs: saveOutputs,
stripGenerationLabelOnOutputsSecret: stripGenerationLabelOnOutputsSecret,
outputsToInclude: outputsToInclude,
outputsToOmit: outputsToOmit,
urlSource: urlSource,
requireApproval: requireApproval,
restartPolicy: restartPolicy,
}
}
const terraformFinalizer = "finalizer.tf.isaaguilar.com"
// Reconcile reads that state of the cluster for a Terraform object and makes changes based on the state read
// and what is in the Terraform.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileTerraform) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
reconcilerID := string(uuid.NewUUID())
reqLogger := r.Log.WithValues("Terraform", request.NamespacedName, "id", reconcilerID)
err := r.cacheNodeSelectors(ctx, reqLogger)
if err != nil {
panic(err)
}
lockKey := request.String() + "-reconcile-lock"
lockOwner, lockFound := r.Cache.Get(lockKey)
if lockFound {
reqLogger.Info(fmt.Sprintf("Request is locked by '%s'", lockOwner.(string)))
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
}
r.Cache.Set(lockKey, reconcilerID, -1)
defer r.Cache.Delete(lockKey)
defer reqLogger.V(6).Info("Request has released reconcile lock")
reqLogger.V(6).Info("Request has acquired reconcile lock")
tf, err := r.getTerraformResource(ctx, request.NamespacedName, 3, reqLogger)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
// reqLogger.Info(fmt.Sprintf("Not found, instance is defined as: %+v", instance))
reqLogger.V(1).Info("Terraform resource not found. Ignoring since object must be deleted")
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get Terraform")
return reconcile.Result{}, err
}
// Final delete by removing finalizers
if tf.Status.Phase == tfv1alpha2.PhaseDeleted {
reqLogger.Info("Remove finalizers")
_ = updateFinalizer(tf)
err := r.update(ctx, tf)
if err != nil {
r.Recorder.Event(tf, "Warning", "ProcessingError", err.Error())
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Finalizers
if updateFinalizer(tf) {
err := r.update(ctx, tf)
if err != nil {
return reconcile.Result{}, err
}
reqLogger.V(1).Info("Updated finalizer")
return reconcile.Result{}, nil
}
// Initialize resource
if tf.Status.PodNamePrefix == "" {
// Generate a unique name for everything related to this tf resource
// Must trucate at 220 chars of original name to ensure room for the
// suffixes that will be added (and possible future suffix expansion)
tf.Status.PodNamePrefix = fmt.Sprintf("%s-%s",
utils.TruncateResourceName(tf.Name, 220),
utils.StringWithCharset(8, utils.AlphaNum),
)
tf.Status.Stages = []tfv1alpha2.Stage{}
tf.Status.LastCompletedGeneration = 0
tf.Status.Phase = tfv1alpha2.PhaseInitializing
err := r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
}
return reconcile.Result{}, nil
}
// Add the first stage
if tf.Status.Stage.Generation == 0 {
task := tfv1alpha2.RunSetup
stageState := tfv1alpha2.StateInitializing
interruptible := tfv1alpha2.CanNotBeInterrupt
stage := newStage(tf, task, "TF_RESOURCE_CREATED", interruptible, stageState)
if stage == nil {
return reconcile.Result{}, fmt.Errorf("failed to create a new stage")
}
tf.Status.Stage = *stage
tf.Status.Plugins = []tfv1alpha2.TaskName{}
err := r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
deletePhases := []string{
string(tfv1alpha2.PhaseDeleting),
string(tfv1alpha2.PhaseInitDelete),
string(tfv1alpha2.PhaseDeleted),
}
// Check if the resource is marked to be deleted which is
// indicated by the deletion timestamp being set.
if tf.GetDeletionTimestamp() != nil && !utils.ListContainsStr(deletePhases, string(tf.Status.Phase)) {
tf.Status.Phase = tfv1alpha2.PhaseInitDelete
}
// // TODO Check the status on stages that have not completed
// for _, stage := range tf.Status.Stages {
// if stage.State == tfv1alpha1.StateInProgress {
//
// }
// }
stage := r.checkSetNewStage(ctx, tf)
if stage != nil {
tf.Status.Stage = *stage
if stage.Reason == "RESTARTED_WORKFLOW" || stage.Reason == "RESTARTED_DELETE_WORKFLOW" {
_ = r.removeOldPlan(tf)
// TODO what to do if the remove old plan function fails
}
reqLogger.V(2).Info(fmt.Sprintf("Stage moving from '%s' -> '%s'", tf.Status.Stage.TaskType, stage.TaskType))
desiredStatus := tf.Status
err := r.updateStatusWithRetry(ctx, tf, &desiredStatus, reqLogger)
if err != nil {
reqLogger.V(1).Info(fmt.Sprintf("Error adding stage '%s': %s", stage.TaskType, err.Error()))
}
if tf.Spec.KeepLatestPodsOnly {
go r.backgroundReapOldGenerationPods(tf, 0)
}
return reconcile.Result{}, nil
}
globalEnvFrom := r.listEnvFromSources(tf)
if err != nil {
return reconcile.Result{}, err
}
currentStage := tf.Status.Stage
podType := currentStage.TaskType
generation := currentStage.Generation
affinity, nodeSelector, tolerations := r.getNodeSelectorsFromCache()
runOpts := newTaskOptions(tf, currentStage.TaskType, generation, globalEnvFrom, affinity, nodeSelector, tolerations)
if podType == tfv1alpha2.RunNil {
// podType is blank when the terraform workflow has completed for
// either create or delete.
if tf.Status.Phase == tfv1alpha2.PhaseRunning {
// Updates the status as "completed" on the resource
tf.Status.Phase = tfv1alpha2.PhaseCompleted
if tf.Spec.WriteOutputsToStatus {
// runOpts.outputsSecetName
secret, err := r.loadSecret(ctx, runOpts.outputsSecretName, runOpts.namespace)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("failed to load secret '%s'", runOpts.outputsSecretName))
}
// Get a list of outputs to clean up any removed outputs
keysInOutputs := []string{}
for key := range secret.Data {
keysInOutputs = append(keysInOutputs, key)
}
for key := range tf.Status.Outputs {
if !utils.ListContainsStr(keysInOutputs, key) {
// remove the key if its not in the new list of outputs
delete(tf.Status.Outputs, key)
}
}
for key, value := range secret.Data {
if tf.Status.Outputs == nil {
tf.Status.Outputs = make(map[string]string)
}
tf.Status.Outputs[key] = string(value)
}
}
err := r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{}, err
}
} else if tf.Status.Phase == tfv1alpha2.PhaseDeleting {
// Updates the status as "deleted" which will be used to tell the
// controller to remove any finalizers).
tf.Status.Phase = tfv1alpha2.PhaseDeleted
err := r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{}, err
}
}
return reconcile.Result{Requeue: false}, nil
}
// Check for the current stage pod
inNamespace := client.InNamespace(tf.Namespace)
f := fields.Set{
"metadata.generateName": fmt.Sprintf("%s-%s-", tf.Status.PodNamePrefix+"-v"+fmt.Sprint(generation), podType),
}
labelSelector := map[string]string{
"terraforms.tf.isaaguilar.com/generation": fmt.Sprintf("%d", generation),
}
matchingFields := client.MatchingFields(f)
matchingLabels := client.MatchingLabels(labelSelector)
pods := &corev1.PodList{}
err = r.Client.List(ctx, pods, inNamespace, matchingFields, matchingLabels)
if err != nil {
reqLogger.Error(err, "")
return reconcile.Result{}, nil
}
if len(pods.Items) == 0 && tf.Status.Stage.State == tfv1alpha2.StateInProgress {
// This condition is generally met when the user deletes the pod.
// Force the state to transition away from in-progress and then
// requeue.
tf.Status.Stage.State = tfv1alpha2.StateInitializing
err = r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, nil
}
if len(pods.Items) == 0 {
// Trigger a new pod when no pods are found for current stage
reqLogger.V(1).Info(fmt.Sprintf("Setting up the '%s' pod", podType))
err := r.setupAndRun(ctx, tf, runOpts)
if err != nil {
reqLogger.Error(err, "")
return reconcile.Result{}, err
}
if tf.Status.Phase == tfv1alpha2.PhaseInitializing {
tf.Status.Phase = tfv1alpha2.PhaseRunning
} else if tf.Status.Phase == tfv1alpha2.PhaseInitDelete {
tf.Status.Phase = tfv1alpha2.PhaseDeleting
}
tf.Status.Stage.State = tfv1alpha2.StateInProgress
// TODO because the pod is already running, is it critical that the
// phase and state be updated. The updateStatus function needs to retry
// if it fails to update.
err = r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{Requeue: true}, nil
}
// When the pod is created, don't requeue. The pod's status changes
// will trigger tfo to reconcile.
return reconcile.Result{}, nil
}
// By now, the task pod exists and the controller has to check and update on the status of the pod.
for pluginTaskName, pluginConfig := range tf.Spec.Plugins {
if tfv1alpha2.ListContainsTask(tf.Status.Plugins, pluginTaskName) {
continue
}
when := pluginConfig.When
whenTask := pluginConfig.Task
switch when {
case "After":
if whenTask.ID() < podType.ID() {
return r.createPluginPod(ctx, reqLogger, tf, pluginTaskName, pluginConfig, globalEnvFrom)
}
case "At":
if whenTask.ID() == podType.ID() {
return r.createPluginPod(ctx, reqLogger, tf, pluginTaskName, pluginConfig, globalEnvFrom)
}
}
}
// At this point, a pod is found for the current stage. We can check the
// pod status to find out more info about the pod.
realPod := pods.Items[0]
podName := realPod.ObjectMeta.Name
podPhase := realPod.Status.Phase
msg := fmt.Sprintf("Pod '%s' %s", podName, podPhase)
// if tf.Status.Stage.PodName != podName {
// if tf.Status.Stage.PodName == "" {
// // This is the first time this pod is found. Set the rerun attempt to 0
// tf.Status.Stage.RerunAttempt = 0
// } else {
// tf.Status.Stage.RerunAttempt++
// }
// }
tf.Status.Stage.PodUID = string(realPod.UID)
tf.Status.Stage.PodName = podName
if tf.Status.Stage.Message != msg {
tf.Status.Stage.Message = msg
reqLogger.Info(msg)
}
// TODO Does the user need reason and message?
// reason := realPod.Status.Reason
// message := realPod.Status.Message
// if reason != "" {
// msg = fmt.Sprintf("%s %s", msg, reason)
// }
// if message != "" {
// msg = fmt.Sprintf("%s %s", msg, message)
// }
if realPod.Status.Phase == corev1.PodFailed {
tf.Status.Stage.State = tfv1alpha2.StateFailed
tf.Status.Stage.StopTime = metav1.NewTime(time.Now())
err = r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
if realPod.Status.Phase == corev1.PodSucceeded {
tf.Status.Stage.State = tfv1alpha2.StateComplete
tf.Status.Stage.StopTime = metav1.NewTime(time.Now())
err = r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{}, err
}
if !tf.Spec.KeepCompletedPods && !tf.Spec.KeepLatestPodsOnly {
err := r.Client.Delete(ctx, &realPod)
if err != nil {
reqLogger.V(1).Info(err.Error())
}
}
return reconcile.Result{}, nil
}
tf.Status.Stage.State = tfv1alpha2.StageState(realPod.Status.Phase)
// Finally, update any statuses that have been changed if not already saved. This is probablye
// for pending condition that does not require anything to be done.
err = r.updateStatusWithRetry(ctx, tf, &tf.Status, reqLogger)
if err != nil {
reqLogger.V(1).Info(err.Error())
return reconcile.Result{}, err
}
// TODO should tf operator "auto" reconciliate (eg plan+apply)?
// TODO how should we handle manually triggering apply
return reconcile.Result{}, nil
}
// getTerraformResource fetches the terraform resource with a retry
func (r ReconcileTerraform) getTerraformResource(ctx context.Context, namespacedName types.NamespacedName, maxRetry int, reqLogger logr.Logger) (*tfv1alpha2.Terraform, error) {
tf := &tfv1alpha2.Terraform{}
for retryCount := 1; retryCount <= maxRetry; retryCount++ {
err := r.Client.Get(ctx, namespacedName, tf)
if err != nil {
if errors.IsNotFound(err) {
return tf, err
} else if retryCount < maxRetry {
time.Sleep(100 * time.Millisecond)
continue
}
return tf, err
} else {
break
}
}
return tf, nil
}
func newStage(tf *tfv1alpha2.Terraform, taskType tfv1alpha2.TaskName, reason string, interruptible tfv1alpha2.Interruptible, stageState tfv1alpha2.StageState) *tfv1alpha2.Stage {
if reason == "GENERATION_CHANGE" {
tf.Status.Plugins = []tfv1alpha2.TaskName{}
tf.Status.Phase = tfv1alpha2.PhaseInitializing
}
startTime := metav1.NewTime(time.Now())
stopTime := metav1.NewTime(time.Unix(0, 0))
if stageState == tfv1alpha2.StateComplete {
stopTime = startTime
}
return &tfv1alpha2.Stage{
Generation: tf.Generation,
Interruptible: interruptible,
Reason: reason,
State: stageState,
TaskType: taskType,
StartTime: startTime,
StopTime: stopTime,
}
}
func getConfiguredTasks(taskOptions *[]tfv1alpha2.TaskOption) []tfv1alpha2.TaskName {
tasks := []tfv1alpha2.TaskName{
tfv1alpha2.RunSetup,
tfv1alpha2.RunInit,
tfv1alpha2.RunPlan,
tfv1alpha2.RunApply,
tfv1alpha2.RunSetupDelete,
tfv1alpha2.RunInitDelete,
tfv1alpha2.RunPlanDelete,
tfv1alpha2.RunApplyDelete,
}
if taskOptions == nil {
return tasks
}
for _, taskOption := range *taskOptions {
for _, affected := range taskOption.For {
if affected == "*" {
continue
}
if !tfv1alpha2.ListContainsTask(tasks, affected) {
tasks = append(tasks, affected)
}
}
}
return tasks
}
// checkSetNewStage uses the tf resource's `.status.stage` state to find the next stage of the terraform run.
// The following set of rules are used:
//
// 1. Generation - Check that the resource's generation matches the stage's generation. When the generation
// changes the old generation can no longer add a new stage.
//
// 2. Check that the current stage is completed. If it is not, this function returns false and the pod status
// will be determined which will update the stage for the next iteration.
//
// 3. Scripts defined in the tf resource manifest will trigger the script runner podTypes.
//
// When a stage has already triggered a pod, the only way for the pod to transition to the next stage is for
// the pod to complete successfully. Any other pod phase will keep the pod in the current stage, or in the
// case of the apply task, the workflow will be restarted.
func (r ReconcileTerraform) checkSetNewStage(ctx context.Context, tf *tfv1alpha2.Terraform) *tfv1alpha2.Stage {
var isNewStage bool
var podType tfv1alpha2.TaskName
var reason string
configuredTasks := getConfiguredTasks(&tf.Spec.TaskOptions)
deletePhases := []string{
string(tfv1alpha2.PhaseDeleted),
string(tfv1alpha2.PhaseInitDelete),
string(tfv1alpha2.PhaseDeleted),
}
tfIsFinalizing := utils.ListContainsStr(deletePhases, string(tf.Status.Phase))
tfIsNotFinalizing := !tfIsFinalizing
initDelete := tf.Status.Phase == tfv1alpha2.PhaseInitDelete
stageState := tfv1alpha2.StateInitializing
interruptible := tfv1alpha2.CanBeInterrupt
currentStage := tf.Status.Stage
currentStagePodType := currentStage.TaskType
currentStageCanNotBeInterrupted := currentStage.Interruptible == tfv1alpha2.CanNotBeInterrupt
currentStageIsRunning := currentStage.State == tfv1alpha2.StateInProgress
isNewGeneration := currentStage.Generation != tf.Generation
// resource status
if currentStageCanNotBeInterrupted && currentStageIsRunning {
// Cannot change to the next stage because the current stage cannot be
// interrupted and is currently running
isNewStage = false
} else if isNewGeneration && tfIsNotFinalizing {
// The current generation has changed and this is the first pod in the
// normal terraform workflow
isNewStage = true
reason = "GENERATION_CHANGE"
podType = tfv1alpha2.RunSetup
// } else if initDelete && !utils.ListContainsStr(deletePodTypes, string(currentStagePodType)) {
} else if initDelete && isNewGeneration {
// The tf resource is marked for deletion and this is the first pod
// in the terraform destroy workflow.
isNewStage = true
reason = "TF_RESOURCE_DELETED"
podType = tfv1alpha2.RunSetupDelete
interruptible = tfv1alpha2.CanNotBeInterrupt
} else if currentStage.State == tfv1alpha2.StateComplete {
isNewStage = true
reason = fmt.Sprintf("COMPLETED_%s", strings.ToUpper(currentStage.TaskType.String()))
switch currentStagePodType {
case tfv1alpha2.RunNil:
isNewStage = false
default:
podType = nextTask(currentStagePodType, configuredTasks)
interruptible = isTaskInterruptable(podType)
if podType == tfv1alpha2.RunNil {
stageState = tfv1alpha2.StateComplete
}
}
} else if currentStage.State == tfv1alpha2.StateFailed {
if currentStage.TaskType == tfv1alpha2.RunApply {
err := r.Client.Get(ctx, types.NamespacedName{Namespace: tf.Namespace, Name: tf.Status.Stage.PodName}, &corev1.Pod{})
if err != nil && errors.IsNotFound(err) {
// If the task failed, is of type "apply", and the pod does not exist, restart the workflow.
isNewStage = true
reason = "RESTARTED_WORKFLOW"
podType = nextTask(tfv1alpha2.RunPostInit, configuredTasks)
interruptible = isTaskInterruptable(podType)
}
} else if currentStage.TaskType == tfv1alpha2.RunApplyDelete {
pod := corev1.Pod{}
err := r.Client.Get(ctx, types.NamespacedName{Namespace: tf.Namespace, Name: tf.Status.Stage.PodName}, &pod)
if err != nil && errors.IsNotFound(err) {
// If the task failed, is of type "apply", and the pod does not exist, restart the workflow.
isNewStage = true
reason = "RESTARTED_DELETE_WORKFLOW"
podType = nextTask(tfv1alpha2.RunPostInitDelete, configuredTasks)
interruptible = isTaskInterruptable(podType)
}
}
}
if !isNewStage {
return nil
}
return newStage(tf, podType, reason, interruptible, stageState)
}
func (r ReconcileTerraform) removeOldPlan(tf *tfv1alpha2.Terraform) error {
labelSelectors := []string{
fmt.Sprintf("terraforms.tf.isaaguilar.com/generation==%d", tf.Generation),
fmt.Sprintf("terraforms.tf.isaaguilar.com/resourceName=%s", tf.Name),
"app.kubernetes.io/instance",
}
if tf.Status.Stage.Reason == "RESTARTED_WORKFLOW" {
labelSelectors = append(labelSelectors, []string{
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunSetup),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunPreInit),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunInit),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunPostInit),
}...)
} else if tf.Status.Stage.Reason == "RESTARTED_DELETE_WORKFLOW" {
labelSelectors = append(labelSelectors, []string{
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunSetupDelete),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunPreInitDelete),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunInitDelete),
fmt.Sprintf("app.kubernetes.io/instance!=%s", tfv1alpha2.RunPostInitDelete),
}...)
}
labelSelector, err := labels.Parse(strings.Join(labelSelectors, ","))
if err != nil {
return err
}
fieldSelector, err := fields.ParseSelector("status.phase!=Running")
if err != nil {
return err
}
err = r.Client.DeleteAllOf(context.TODO(), &corev1.Pod{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
FieldSelector: fieldSelector,
},
})
if err != nil {
return err
}
return nil
}
// These are pods that are known to cause issues with terraform state when
// not run to completion.
func isTaskInterruptable(task tfv1alpha2.TaskName) tfv1alpha2.Interruptible {
uninterruptibleTasks := []tfv1alpha2.TaskName{
tfv1alpha2.RunInit,
tfv1alpha2.RunPlan,
tfv1alpha2.RunApply,
tfv1alpha2.RunInitDelete,
tfv1alpha2.RunPlanDelete,
tfv1alpha2.RunApplyDelete,
}
if tfv1alpha2.ListContainsTask(uninterruptibleTasks, task) {
return tfv1alpha2.CanNotBeInterrupt
}
return tfv1alpha2.CanBeInterrupt
}
func nextTask(currentTask tfv1alpha2.TaskName, configuredTasks []tfv1alpha2.TaskName) tfv1alpha2.TaskName {
tasksInOrder := []tfv1alpha2.TaskName{
tfv1alpha2.RunSetup,
tfv1alpha2.RunPreInit,
tfv1alpha2.RunInit,
tfv1alpha2.RunPostInit,
tfv1alpha2.RunPrePlan,
tfv1alpha2.RunPlan,
tfv1alpha2.RunPostPlan,
tfv1alpha2.RunPreApply,
tfv1alpha2.RunApply,
tfv1alpha2.RunPostApply,
}
deleteTasksInOrder := []tfv1alpha2.TaskName{
tfv1alpha2.RunSetupDelete,
tfv1alpha2.RunPreInitDelete,
tfv1alpha2.RunInitDelete,
tfv1alpha2.RunPostInitDelete,
tfv1alpha2.RunPrePlanDelete,
tfv1alpha2.RunPlanDelete,
tfv1alpha2.RunPostPlanDelete,
tfv1alpha2.RunPreApplyDelete,
tfv1alpha2.RunApplyDelete,
tfv1alpha2.RunPostApplyDelete,
}
next := tfv1alpha2.RunNil
isUpNext := false
if tfv1alpha2.ListContainsTask(tasksInOrder, currentTask) {
for _, task := range tasksInOrder {
if task == currentTask {
isUpNext = true
continue
}
if isUpNext && tfv1alpha2.ListContainsTask(configuredTasks, task) {
next = task
break
}
}
} else if tfv1alpha2.ListContainsTask(deleteTasksInOrder, currentTask) {
for _, task := range deleteTasksInOrder {
if task == currentTask {
isUpNext = true
continue
}
if isUpNext && tfv1alpha2.ListContainsTask(configuredTasks, task) {
next = task
break
}
}
}
return next
}
func (r ReconcileTerraform) backgroundReapOldGenerationPods(tf *tfv1alpha2.Terraform, attempt int) {
logger := r.Log.WithName("Reaper").WithValues("Terraform", fmt.Sprintf("%s/%s", tf.Namespace, tf.Name))
if attempt > 20 {
// TODO explain what and way resources cannot be reaped
logger.Info("Could not reap resources: Max attempts to reap old-generation resources")
return
}
// Before running a deletion, make sure we've got the most up-to-date resource in case a background
// process takes longer than normal to complete.
ctx := context.TODO()
namespacedName := types.NamespacedName{Namespace: tf.Namespace, Name: tf.Name}
tf, err := r.getTerraformResource(ctx, namespacedName, 3, logger)
if err != nil {
if errors.IsNotFound(err) {
logger.V(1).Info("Terraform resource not found. Ignoring since object must be deleted")
return
}
// Error reading the object - requeue the request.
logger.Error(err, "Failed to get Terraform")
return
}
// The labels required are read as:
// 1. The terraforms.tf.isaaguilar.com/generation key MUST exist
// 2. The terraforms.tf.isaaguilar.com/generation value MUST match the current resource generation
// 3. The terraforms.tf.isaaguilar.com/resourceName key MUST exist
// 4. The terraforms.tf.isaaguilar.com/resourceName value MUST match the resource name
labelSelector, err := labels.Parse(fmt.Sprintf("terraforms.tf.isaaguilar.com/generation,terraforms.tf.isaaguilar.com/generation!=%d,terraforms.tf.isaaguilar.com/resourceName,terraforms.tf.isaaguilar.com/resourceName=%s", tf.Generation, tf.Name))
if err != nil {
logger.Error(err, "Could not parse labels")
}
fieldSelector, err := fields.ParseSelector("status.phase!=Running")
if err != nil {
logger.Error(err, "Could not parse fields")
}
err = r.Client.DeleteAllOf(context.TODO(), &corev1.Pod{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
FieldSelector: fieldSelector,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation pods")
}
// Wait for all the pods of the previous generations to be gone. Only after
// the pods are cleaned up, clean up other associated resources like roles
// and rolebindings.
podList := corev1.PodList{}
err = r.Client.List(context.TODO(), &podList, &client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
})
if err != nil {
logger.Error(err, "Could not list pods to reap")
}
if len(podList.Items) > 0 {
// There are still some pods from a previous generation hanging around
// for some reason. Wait some time and try to reap again later.
time.Sleep(30 * time.Second)
attempt++
go r.backgroundReapOldGenerationPods(tf, attempt)
} else {
// All old pods are gone and the other resouces will now be removed
err = r.Client.DeleteAllOf(context.TODO(), &corev1.ConfigMap{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation configmaps")
}
err = r.Client.DeleteAllOf(context.TODO(), &corev1.Secret{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation secrets")
}
err = r.Client.DeleteAllOf(context.TODO(), &rbacv1.Role{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation roles")
}
err = r.Client.DeleteAllOf(context.TODO(), &rbacv1.RoleBinding{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation roleBindings")
}
err = r.Client.DeleteAllOf(context.TODO(), &corev1.ServiceAccount{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelector,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation serviceAccounts")
}
}
}
func (r ReconcileTerraform) reapPlugins(tf *tfv1alpha2.Terraform, attempt int) {
logger := r.Log.WithName("ReaperPlugins").WithValues("Terraform", fmt.Sprintf("%s/%s", tf.Namespace, tf.Name))
if attempt > 20 {
// TODO explain what and way resources cannot be reaped
logger.Info("Could not reap resources: Max attempts to reap old-generation resources")
return
}
// Before running a deletion, make sure we've got the most up-to-date resource in case a background
// process takes longer than normal to complete.
ctx := context.TODO()
namespacedName := types.NamespacedName{Namespace: tf.Namespace, Name: tf.Name}
tf, err := r.getTerraformResource(ctx, namespacedName, 3, logger)
if err != nil {
if errors.IsNotFound(err) {
logger.V(1).Info("Terraform resource not found. Ignoring since object must be deleted")
return
}
// Error reading the object - requeue the request.
logger.Error(err, "Failed to get Terraform")
return
}
// Delete old plugins regardless of pod phase
labelSelectorForPlugins, err := labels.Parse(fmt.Sprintf("terraforms.tf.isaaguilar.com/isPlugin=true,terraforms.tf.isaaguilar.com/generation,terraforms.tf.isaaguilar.com/generation!=%d,terraforms.tf.isaaguilar.com/resourceName,terraforms.tf.isaaguilar.com/resourceName=%s", tf.Generation, tf.Name))
if err != nil {
logger.Error(err, "Could not parse labels")
}
deleteProppagationBackground := metav1.DeletePropagationBackground
err = r.Client.DeleteAllOf(context.TODO(), &batchv1.Job{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelectorForPlugins,
Namespace: tf.Namespace,
},
DeleteOptions: client.DeleteOptions{
PropagationPolicy: &deleteProppagationBackground,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation jobs")
}
err = r.Client.DeleteAllOf(context.TODO(), &corev1.Pod{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labelSelectorForPlugins,
Namespace: tf.Namespace,
},
})
if err != nil {
logger.Error(err, "Could not reap old generation pods")
}
// Wait for all the pods of the previous generations to be gone. Only after
// the pods are cleaned up, clean up other associated resources like roles
// and rolebindings.
podList := corev1.PodList{}
err = r.Client.List(context.TODO(), &podList, &client.ListOptions{
LabelSelector: labelSelectorForPlugins,
Namespace: tf.Namespace,
})
if err != nil {
logger.Error(err, "Could not list pods to reap")
}
if len(podList.Items) > 0 {
// There are still some pods from a previous generation hanging around
// for some reason. Wait some time and try to reap again later.
time.Sleep(30 * time.Second)
attempt++
go r.reapPlugins(tf, attempt)
}
}
func (r ReconcileTerraform) getNodeSelectorsFromCache() (*corev1.Affinity, map[string]string, []corev1.Toleration) {
var affinity *corev1.Affinity
var nodeSelector map[string]string
var tolerations []corev1.Toleration
if r.InheritAffinity {
if obj, found := r.Cache.Get(r.AffinityCacheKey); found {
affinity = obj.(*corev1.Affinity)
}
}
if r.InheritNodeSelector {
if obj, found := r.Cache.Get(r.NodeSelectorCacheKey); found {
nodeSelector = obj.(map[string]string)
}
}
if r.InheritTolerations {
if obj, found := r.Cache.Get(r.TolerationsCacheKey); found {
tolerations = obj.([]corev1.Toleration)
}
}
return affinity, nodeSelector, tolerations
}
// createPluginPod will attempt to create the plugin pod and mark it as added in the resource's status.
// No logic is used to determine if the plugin was successful. If the createPod function errors, a log event
// is recorded in the controller.
func (r ReconcileTerraform) createPluginPod(ctx context.Context, logger logr.Logger, tf *tfv1alpha2.Terraform, pluginTaskName tfv1alpha2.TaskName, pluginConfig tfv1alpha2.Plugin, globalEnvFrom []corev1.EnvFromSource) (reconcile.Result, error) {
affinity, nodeSelector, tolerations := r.getNodeSelectorsFromCache()
pluginRunOpts := newTaskOptions(tf, pluginTaskName, tf.Generation, globalEnvFrom, affinity, nodeSelector, tolerations)
pluginRunOpts.image = pluginConfig.Image
pluginRunOpts.imagePullPolicy = pluginConfig.ImagePullPolicy
go func() {
err := r.createJob(ctx, tf, pluginRunOpts)
if err != nil {
logger.Error(err, fmt.Sprintf("Failed creating plugin job %s", pluginTaskName))
} else {
logger.Info(fmt.Sprintf("Starting the plugin job '%s'", pluginTaskName.String()))
}
}()
tf.Status.Plugins = append(tf.Status.Plugins, pluginTaskName)
err := r.updateStatusWithRetry(ctx, tf, &tf.Status, logger)
if err != nil {
logger.V(1).Info(err.Error())
}
return reconcile.Result{}, err
}
// updateFinalizer sets and unsets the finalizer on the tf resource. When
// IgnoreDelete is true, the finalizer is removed. When IgnoreDelete is false,
// the finalizer is added.
//
// The finalizer will be responsible for starting the destroy-workflow.
func updateFinalizer(tf *tfv1alpha2.Terraform) bool {
finalizers := tf.GetFinalizers()
if tf.Status.Phase == tfv1alpha2.PhaseDeleted {
if utils.ListContainsStr(finalizers, terraformFinalizer) {
tf.SetFinalizers(utils.ListRemoveStr(finalizers, terraformFinalizer))
return true
}
}
if tf.Spec.IgnoreDelete && len(finalizers) > 0 {
if utils.ListContainsStr(finalizers, terraformFinalizer) {
tf.SetFinalizers(utils.ListRemoveStr(finalizers, terraformFinalizer))
return true
}
}
if !tf.Spec.IgnoreDelete {
if !utils.ListContainsStr(finalizers, terraformFinalizer) {
tf.SetFinalizers(append(finalizers, terraformFinalizer))
return true
}
}
return false
}
func (r ReconcileTerraform) update(ctx context.Context, tf *tfv1alpha2.Terraform) error {
err := r.Client.Update(ctx, tf)
if err != nil {
return fmt.Errorf("failed to update tf resource: %s", err)
}
return nil
}
func (r ReconcileTerraform) updateStatus(ctx context.Context, tf *tfv1alpha2.Terraform) error {
err := r.Client.Status().Update(ctx, tf)
if err != nil {
return fmt.Errorf("failed to update tf status: %s", err)
}
return nil
}
func (r ReconcileTerraform) updateStatusWithRetry(ctx context.Context, tf *tfv1alpha2.Terraform, desiredStatus *tfv1alpha2.TerraformStatus, logger logr.Logger) error {
resourceNamespacedName := types.NamespacedName{Namespace: tf.Namespace, Name: tf.Name}
var getResourceErr error
var updateErr error
for i := 0; i < 10; i++ {
if i > 0 {
n := math.Pow(2, float64(i+3))
backoffTime := math.Ceil(.5 * (n - 1))
time.Sleep(time.Duration(backoffTime) * time.Millisecond)
tf, getResourceErr = r.getTerraformResource(ctx, resourceNamespacedName, 10, logger)
if getResourceErr != nil {
return fmt.Errorf("failed to get latest terraform while updating status: %s", getResourceErr)
}
if desiredStatus != nil {
tf.Status = *desiredStatus
}
}
updateErr = r.Client.Status().Update(ctx, tf)
if updateErr != nil {
logger.V(7).Info(fmt.Sprintf("Retrying to update status because an error has occurred while updating: %s", updateErr))
continue
}
// Confirm the status is up to date
isUpdateConfirmed := false
for j := 0; j < 10; j++ {
tf, updatedResourceErr := r.getTerraformResource(ctx, resourceNamespacedName, 10, logger)
if updatedResourceErr != nil {
return fmt.Errorf("failed to get latest terraform while validating status: %s", updatedResourceErr)
}
if !tfv1alpha2.TaskListsAreEqual(tf.Status.Plugins, desiredStatus.Plugins) {
logger.V(7).Info(fmt.Sprintf("Failed to confirm the status update because plugins did not equal. Have %s and Want %s", tf.Status.Plugins, desiredStatus.Plugins))
} else if stageItem := tf.Status.Stage.IsEqual(desiredStatus.Stage); stageItem != "" {
logger.V(7).Info(fmt.Sprintf("Failed to confirm the status update because stage item %s did not equal", stageItem))
} else if tf.Status.Phase != desiredStatus.Phase {
logger.V(7).Info("Failed to confirm the status update because phase did not equal")
} else if tf.Status.PodNamePrefix != desiredStatus.PodNamePrefix {
logger.V(7).Info("Failed to confirm the status update because podNamePrefix did not equal")
} else {
isUpdateConfirmed = true
}
if isUpdateConfirmed {
break
}
logger.V(7).Info("Retrying to confirm the status update")
n := math.Pow(2, float64(j+3))
backoffTime := math.Ceil(.5 * (n - 1))
time.Sleep(time.Duration(backoffTime) * time.Millisecond)
}
if isUpdateConfirmed {
break
}
logger.V(7).Info("Retrying to update status because the update was not confirmed")
}
if updateErr != nil {
return fmt.Errorf("failed to update tf status: %s", updateErr)
}
return nil
}
// IsJobFinished returns true if the job has completed
func IsJobFinished(job *batchv1.Job) bool {
BackoffLimit := job.Spec.BackoffLimit
return job.Status.CompletionTime != nil || (job.Status.Active == 0 && BackoffLimit != nil && job.Status.Failed >= *BackoffLimit)
}
func formatJobSSHConfig(ctx context.Context, reqLogger logr.Logger, tf *tfv1alpha2.Terraform, k8sclient client.Client) (map[string][]byte, error) {
data := make(map[string]string)
dataAsByte := make(map[string][]byte)
if tf.Spec.SSHTunnel != nil {
data["config"] = fmt.Sprintf("Host proxy\n"+
"\tStrictHostKeyChecking no\n"+
"\tUserKnownHostsFile=/dev/null\n"+
"\tUser %s\n"+
"\tHostname %s\n"+
"\tIdentityFile ~/.ssh/proxy_key\n",
tf.Spec.SSHTunnel.User,
tf.Spec.SSHTunnel.Host)
k := tf.Spec.SSHTunnel.SSHKeySecretRef.Key
if k == "" {
k = "id_rsa"
}
ns := tf.Spec.SSHTunnel.SSHKeySecretRef.Namespace
if ns == "" {
ns = tf.Namespace
}
key, err := loadPassword(ctx, k8sclient, k, tf.Spec.SSHTunnel.SSHKeySecretRef.Name, ns)
if err != nil {
return dataAsByte, err
}
data["proxy_key"] = key
}
for _, m := range tf.Spec.SCMAuthMethods {
// TODO validate SSH in resource manifest
if m.Git.SSH != nil {
if m.Git.SSH.RequireProxy {
data["config"] += fmt.Sprintf("\nHost %s\n"+
"\tStrictHostKeyChecking no\n"+
"\tUserKnownHostsFile=/dev/null\n"+
"\tHostname %s\n"+
"\tIdentityFile ~/.ssh/%s\n"+
"\tProxyJump proxy",
m.Host,
m.Host,
m.Host)
} else {
data["config"] += fmt.Sprintf("\nHost %s\n"+
"\tStrictHostKeyChecking no\n"+
"\tUserKnownHostsFile=/dev/null\n"+
"\tHostname %s\n"+
"\tIdentityFile ~/.ssh/%s\n",
m.Host,
m.Host,
m.Host)
}
k := m.Git.SSH.SSHKeySecretRef.Key
if k == "" {
k = "id_rsa"
}
ns := m.Git.SSH.SSHKeySecretRef.Namespace
if ns == "" {
ns = tf.Namespace
}
key, err := loadPassword(ctx, k8sclient, k, m.Git.SSH.SSHKeySecretRef.Name, ns)
if err != nil {
return dataAsByte, err
}
data[m.Host] = key
}
}
for k, v := range data {
dataAsByte[k] = []byte(v)
}
return dataAsByte, nil
}
func (r *ReconcileTerraform) setupAndRun(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
reqLogger := r.Log.WithValues("Terraform", types.NamespacedName{Name: tf.Name, Namespace: tf.Namespace}.String())
var err error
reason := tf.Status.Stage.Reason
isNewGeneration := reason == "GENERATION_CHANGE" || reason == "TF_RESOURCE_DELETED"
isFirstInstall := reason == "TF_RESOURCE_CREATED"
isChanged := isNewGeneration || isFirstInstall
// r.Recorder.Event(tf, "Normal", "InitializeJobCreate", fmt.Sprintf("Setting up a Job"))
// TODO(user): Add the cleanup steps that the operator
// needs to do before the CR can be deleted. Examples
// of finalizers include performing backups and deleting
// resources that are not owned by this CR, like a PVC.
scmMap := make(map[string]scmType)
for _, v := range tf.Spec.SCMAuthMethods {
if v.Git != nil {
scmMap[v.Host] = gitScmType
}
}
if tf.Spec.TerraformModule.Inline != "" {
// Add add inline to configmap and instruct the pod to fetch the
// configmap as the main module
runOpts.mainModulePluginData["inline-module.tf"] = tf.Spec.TerraformModule.Inline
} else if tf.Spec.TerraformModule.ConfigMapSelector != nil {
// Instruct the setup pod to fetch the configmap as the main module
b, err := json.Marshal(tf.Spec.TerraformModule.ConfigMapSelector)
if err != nil {
return err
}
runOpts.mainModulePluginData[".__TFO__ConfigMapModule.json"] = string(b)
} else if tf.Spec.TerraformModule.Source != "" {
runOpts.terraformModuleParsed, err = getParsedAddress(tf.Spec.TerraformModule.Source, "", false, scmMap)
if err != nil {
return err
}
} else {
return fmt.Errorf("no terraform module detected")
}
if isChanged {
go r.reapPlugins(tf, 0)
for _, taskOption := range tf.Spec.TaskOptions {
if inlineScript := taskOption.Script.Inline; inlineScript != "" {
for _, affected := range taskOption.For {
if affected.String() == "*" {
continue
}
runOpts.mainModulePluginData[fmt.Sprintf("inline-%s.sh", affected)] = inlineScript
}
}
}
// Set up the HTTPS token to use if defined
for _, m := range tf.Spec.SCMAuthMethods {
// This loop is used to find the first HTTPS token-based
// authentication which gets added to all runners' "GIT_ASKPASS"
// script/env var.
// TODO
// Is there a way to allow multiple tokens for HTTPS access
// to git scm?
if m.Git.HTTPS != nil {
if _, found := runOpts.secretData["gitAskpass"]; found {
continue
}
tokenSecret := *m.Git.HTTPS.TokenSecretRef
if tokenSecret.Key == "" {
tokenSecret.Key = "token"
}
gitAskpass, err := r.createGitAskpass(ctx, tokenSecret)
if err != nil {
return err
}
runOpts.secretData["gitAskpass"] = gitAskpass
}
}
// Set up the SSH keys to use if defined
sshConfigData, err := formatJobSSHConfig(ctx, reqLogger, tf, r.Client)
if err != nil {
r.Recorder.Event(tf, "Warning", "SSHConfigError", fmt.Errorf("%v", err).Error())
return fmt.Errorf("error setting up sshconfig: %v", err)
}
for k, v := range sshConfigData {
runOpts.secretData[k] = v
}
resourceDownloadItems := []ParsedAddress{}
// Configure the resourceDownloads in JSON that the setupRunner will
// use to download the resources into the main module directory
// ConfigMap Data only needs to be updated when generation changes
if tf.Spec.Setup != nil {
for _, s := range tf.Spec.Setup.ResourceDownloads {
address := strings.TrimSpace(s.Address)
parsedAddress, err := getParsedAddress(address, s.Path, s.UseAsVar, scmMap)
if err != nil {
return err
}
// b, err := json.Marshal(parsedAddress)
// if err != nil {
// return err
// }
resourceDownloadItems = append(resourceDownloadItems, parsedAddress)
}
}
b, err := json.Marshal(resourceDownloadItems)
if err != nil {
return err
}
resourceDownloads := string(b)
runOpts.mainModulePluginData[".__TFO__ResourceDownloads.json"] = resourceDownloads
// Override the backend.tf by inserting a custom backend
runOpts.mainModulePluginData["backend_override.tf"] = tf.Spec.Backend
/*
All the tasks will perform external fetching of the scripts to
execute. The downloader has yet to be determined... working on it
:)
*/
// if tf.Spec.PreInitScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPreInit)] = tf.Spec.PreInitScript
// }
// if tf.Spec.PostInitScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostInit)] = tf.Spec.PostInitScript
// }
// if tf.Spec.PrePlanScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPrePlan)] = tf.Spec.PrePlanScript
// }
// if tf.Spec.PostPlanScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostPlan)] = tf.Spec.PostPlanScript
// }
// if tf.Spec.PreApplyScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPreApply)] = tf.Spec.PreApplyScript
// }
// if tf.Spec.PostApplyScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostApply)] = tf.Spec.PostApplyScript
// }
// if tf.Spec.PreInitDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPreInitDelete)] = tf.Spec.PreInitDeleteScript
// }
// if tf.Spec.PostInitDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostInitDelete)] = tf.Spec.PostInitDeleteScript
// }
// if tf.Spec.PrePlanDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPrePlanDelete)] = tf.Spec.PrePlanDeleteScript
// }
// if tf.Spec.PostPlanDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostPlanDelete)] = tf.Spec.PostPlanDeleteScript
// }
// if tf.Spec.PreApplyDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPreApplyDelete)] = tf.Spec.PostApplyScript
// }
// if tf.Spec.PostApplyDeleteScript != "" {
// runOpts.mainModuleAddonData[string(tfv1alpha1.PodPostApplyDelete)] = tf.Spec.PostApplyDeleteScript
// }
}
// RUN
err = r.run(ctx, reqLogger, tf, runOpts, isNewGeneration, isFirstInstall)
if err != nil {
return err
}
return nil
}
func (r ReconcileTerraform) checkPersistentVolumeClaimExists(ctx context.Context, lookupKey types.NamespacedName) (*corev1.PersistentVolumeClaim, bool, error) {
resource := &corev1.PersistentVolumeClaim{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) createPVC(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "PersistentVolumeClaim"
_, found, err := r.checkPersistentVolumeClaimExists(ctx, types.NamespacedName{
Name: runOpts.prefixedName,
Namespace: runOpts.namespace,
})
if err != nil {
return nil
} else if found {
return nil
}
persistentVolumeSize := resource.MustParse("2Gi")
if tf.Spec.PersistentVolumeSize != nil {
persistentVolumeSize = *tf.Spec.PersistentVolumeSize
}
resource := runOpts.generatePVC(persistentVolumeSize, tf.Spec.StorageClassName)
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err = r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r ReconcileTerraform) checkConfigMapExists(ctx context.Context, lookupKey types.NamespacedName) (*corev1.ConfigMap, bool, error) {
resource := &corev1.ConfigMap{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) deleteConfigMapIfExists(ctx context.Context, name, namespace string) error {
lookupKey := types.NamespacedName{
Name: name,
Namespace: namespace,
}
resource, found, err := r.checkConfigMapExists(ctx, lookupKey)
if err != nil {
return err
}
if found {
err = r.Client.Delete(ctx, resource)
if err != nil {
return err
}
}
return nil
}
func (r ReconcileTerraform) createConfigMap(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "ConfigMap"
resource := runOpts.generateConfigMap()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.deleteConfigMapIfExists(ctx, resource.Name, resource.Namespace)
if err != nil {
return err
}
err = r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r ReconcileTerraform) checkSecretExists(ctx context.Context, lookupKey types.NamespacedName) (*corev1.Secret, bool, error) {
resource := &corev1.Secret{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) deleteSecretIfExists(ctx context.Context, name, namespace string) error {
lookupKey := types.NamespacedName{
Name: name,
Namespace: namespace,
}
resource, found, err := r.checkSecretExists(ctx, lookupKey)
if err != nil {
return err
}
if found {
err = r.Client.Delete(ctx, resource)
if err != nil {
return err
}
}
return nil
}
func (r ReconcileTerraform) createSecret(ctx context.Context, tf *tfv1alpha2.Terraform, name, namespace string, data map[string][]byte, recreate bool, labelsToOmit []string, runOpts TaskOptions) error {
kind := "Secret"
// Must make a clean map of labels since the memory address is shared
// for the entire RunOptions struct
labels := make(map[string]string)
for key, value := range runOpts.resourceLabels {
labels[key] = value
}
for _, labelKey := range labelsToOmit {
delete(labels, labelKey)
}
resource := runOpts.generateSecret(name, namespace, data, labels)
controllerutil.SetControllerReference(tf, resource, r.Scheme)
if recreate {
err := r.deleteSecretIfExists(ctx, resource.Name, resource.Namespace)
if err != nil {
return err
}
}
err := r.Client.Create(ctx, resource)
if err != nil {
if !recreate && errors.IsAlreadyExists(err) {
// This is acceptable since the resource exists and was not
// expected to be a new resource.
} else {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
} else {
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
}
return nil
}
func (r ReconcileTerraform) checkServiceAccountExists(ctx context.Context, lookupKey types.NamespacedName) (*corev1.ServiceAccount, bool, error) {
resource := &corev1.ServiceAccount{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) deleteServiceAccountIfExists(ctx context.Context, name, namespace string) error {
lookupKey := types.NamespacedName{
Name: name,
Namespace: namespace,
}
resource, found, err := r.checkServiceAccountExists(ctx, lookupKey)
if err != nil {
return err
}
if found {
err = r.Client.Delete(ctx, resource)
if err != nil {
return err
}
}
return nil
}
func (r ReconcileTerraform) createServiceAccount(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "ServiceAccount"
resource := runOpts.generateServiceAccount()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.deleteServiceAccountIfExists(ctx, resource.Name, resource.Namespace)
if err != nil {
return err
}
err = r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r ReconcileTerraform) checkRoleExists(ctx context.Context, lookupKey types.NamespacedName) (*rbacv1.Role, bool, error) {
resource := &rbacv1.Role{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) deleteRoleIfExists(ctx context.Context, name, namespace string) error {
lookupKey := types.NamespacedName{
Name: name,
Namespace: namespace,
}
resource, found, err := r.checkRoleExists(ctx, lookupKey)
if err != nil {
return err
}
if found {
err = r.Client.Delete(ctx, resource)
if err != nil {
return err
}
}
return nil
}
func (r ReconcileTerraform) createRole(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "Role"
resource := runOpts.generateRole()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.deleteRoleIfExists(ctx, resource.Name, resource.Namespace)
if err != nil {
return err
}
err = r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r ReconcileTerraform) checkRoleBindingExists(ctx context.Context, lookupKey types.NamespacedName) (*rbacv1.RoleBinding, bool, error) {
resource := &rbacv1.RoleBinding{}
err := r.Client.Get(ctx, lookupKey, resource)
if err != nil && errors.IsNotFound(err) {
return resource, false, nil
} else if err != nil {
return resource, false, err
}
return resource, true, nil
}
func (r ReconcileTerraform) deleteRoleBindingIfExists(ctx context.Context, name, namespace string) error {
lookupKey := types.NamespacedName{
Name: name,
Namespace: namespace,
}
resource, found, err := r.checkRoleBindingExists(ctx, lookupKey)
if err != nil {
return err
}
if found {
err = r.Client.Delete(ctx, resource)
if err != nil {
return err
}
}
return nil
}
func (r ReconcileTerraform) createRoleBinding(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "RoleBinding"
resource := runOpts.generateRoleBinding()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.deleteRoleBindingIfExists(ctx, resource.Name, resource.Namespace)
if err != nil {
return err
}
err = r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r ReconcileTerraform) createPod(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "Pod"
resource := runOpts.generatePod()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func int32p(i int32) *int32 {
return &i
}
func (r ReconcileTerraform) createJob(ctx context.Context, tf *tfv1alpha2.Terraform, runOpts TaskOptions) error {
kind := "Job"
resource := runOpts.generateJob()
controllerutil.SetControllerReference(tf, resource, r.Scheme)
err := r.Client.Create(ctx, resource)
if err != nil {
r.Recorder.Event(tf, "Warning", fmt.Sprintf("%sCreateError", kind), fmt.Sprintf("Could not create %s %v", kind, err))
return err
}
r.Recorder.Event(tf, "Normal", "SuccessfulCreate", fmt.Sprintf("Created %s: '%s'", kind, resource.Name))
return nil
}
func (r TaskOptions) generateJob() *batchv1.Job {
pod := r.generatePod()
// In a job, pod's can only have OnFailure or Never restart policies
if pod.Spec.RestartPolicy == corev1.RestartPolicyAlways || pod.Spec.RestartPolicy == corev1.RestartPolicyOnFailure {
pod.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
} else {
pod.Spec.RestartPolicy = corev1.RestartPolicyNever
}
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
GenerateName: pod.GenerateName,
Labels: pod.Labels,
Annotations: pod.Annotations,
Namespace: pod.Namespace,
},
Spec: batchv1.JobSpec{
BackoffLimit: int32p(1000000),
Template: corev1.PodTemplateSpec{
Spec: pod.Spec,
},
},
}
}
func (r TaskOptions) generateConfigMap() *corev1.ConfigMap {
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: r.versionedName,
Namespace: r.namespace,
Labels: r.resourceLabels,
},
Data: r.mainModulePluginData,
}
return cm
}
func (r TaskOptions) generateServiceAccount() *corev1.ServiceAccount {
annotations := make(map[string]string)
for _, c := range r.credentials {
for k, v := range c.ServiceAccountAnnotations {
annotations[k] = v
}
if c.AWSCredentials.IRSA != "" {
annotations["eks.amazonaws.com/role-arn"] = c.AWSCredentials.IRSA
}
}
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: r.serviceAccount, // "tf-" + r.versionedName
Namespace: r.namespace,
Annotations: annotations,
Labels: r.resourceLabels,
},
}
return sa
}
func (r TaskOptions) generateRole() *rbacv1.Role {
// TODO tighten up default rbac security since all the cm and secret names
// can be predicted.
rules := []rbacv1.PolicyRule{
{
Verbs: []string{"*"},
APIGroups: []string{""},
Resources: []string{"configmaps"},
},
{
Verbs: []string{"get"},
APIGroups: []string{"tf.isaaguilar.com"},
Resources: []string{"terraforms"},
ResourceNames: []string{r.resourceName},
},
}
// When using the Kubernetes backend, allow the operator to create secrets and leases
secretsRule := rbacv1.PolicyRule{
Verbs: []string{"*"},
APIGroups: []string{""},
Resources: []string{"secrets"},
}
leasesRule := rbacv1.PolicyRule{
Verbs: []string{"*"},
APIGroups: []string{"coordination.k8s.io"},
Resources: []string{"leases"},
}
if r.mainModulePluginData["backend_override.tf"] != "" {
// parse the backennd string the way most people write it
// example:
// terraform {
// backend "kubernetes" {
// ...
// }
// }
s := strings.Split(r.mainModulePluginData["backend_override.tf"], "\n")
for _, line := range s {
// Assuming that config lines contain an equal sign
// All other lines are discarded
if strings.Contains(line, "backend ") && strings.Contains(line, "kubernetes") {
// the extra space in "backend " is intentional since thats generally
// how it's written
rules = append(rules, secretsRule, leasesRule)
break
}
}
}
rules = append(rules, r.policyRules...)
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: r.versionedName,
Namespace: r.namespace,
Labels: r.resourceLabels,
},
Rules: rules,
}
return role
}
func (r TaskOptions) generateRoleBinding() *rbacv1.RoleBinding {
rb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: r.versionedName,
Namespace: r.namespace,
Labels: r.resourceLabels,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: r.serviceAccount,
Namespace: r.namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: r.versionedName,
APIGroup: "rbac.authorization.k8s.io",
},
}
return rb
}
func (r TaskOptions) generatePVC(size resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim {
return &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: r.prefixedName,
Namespace: r.namespace,
Labels: r.resourceLabels,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
StorageClassName: storageClassName,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: size,
},
},
},
}
}
// generatePod puts together all the contents required to execute the taskType.
// Although most of the tasks use similar.... (TODO EDIT ME)
func (r TaskOptions) generatePod() *corev1.Pod {
home := "/home/tfo-runner"
generateName := r.versionedName + "-" + r.task.String() + "-"
generationPath := fmt.Sprintf("%s/generations/%d", home, r.generation)
runnerLabels := r.labels
annotations := r.annotations
envFrom := r.envFrom
envs := r.env
envs = append(envs, []corev1.EnvVar{
{
Name: "POD_UID",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.uid",
},
},
},
{
/*
What is the significance of having an env about the TFO_RUNNER?
Only used to idenify the taskType for the log.out file. This
should simply be the taskType name.
*/
Name: "TFO_TASK",
Value: r.task.String(),
},
{
Name: "TFO_TASK_EXEC_URL_SOURCE",
Value: r.urlSource,
},
{
Name: "TFO_TASK_EXEC_CONFIGMAP_SOURCE_NAME",
Value: r.configMapSourceName,
},
{
Name: "TFO_TASK_EXEC_CONFIGMAP_SOURCE_KEY",
Value: r.configMapSourceKey,
},
{
Name: "TFO_RESOURCE",
Value: r.resourceName,
},
{
Name: "TFO_RESOURCE_UUID",
Value: r.resourceUUID,
},
{
Name: "TFO_NAMESPACE",
Value: r.namespace,
},
{
Name: "TFO_GENERATION",
Value: fmt.Sprintf("%d", r.generation),
},
{
Name: "TFO_GENERATION_PATH",
Value: generationPath,
},
{
Name: "TFO_MAIN_MODULE",
Value: generationPath + "/main",
},
{
Name: "TFO_TERRAFORM_VERSION",
Value: r.terraformVersion,
},
{
Name: "TFO_SAVE_OUTPUTS",
Value: strconv.FormatBool(r.saveOutputs),
},
{
Name: "TFO_OUTPUTS_SECRET_NAME",
Value: r.outputsSecretName,
},
{
Name: "TFO_OUTPUTS_TO_INCLUDE",
Value: strings.Join(r.outputsToInclude, ","),
},
{
Name: "TFO_OUTPUTS_TO_OMIT",
Value: strings.Join(r.outputsToOmit, ","),
},
{
Name: "TFO_REQUIRE_APPROVAL",
Value: strconv.FormatBool(r.requireApproval),
},
}...)
if r.cleanupDisk {
envs = append(envs, corev1.EnvVar{
Name: "TFO_CLEANUP_DISK",
Value: "true",
})
}
volumes := []corev1.Volume{
{
Name: "tfohome",
VolumeSource: corev1.VolumeSource{
//
// TODO add an option to the tf to use host or pvc
// for the plan.
//
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: r.prefixedName,
ReadOnly: false,
},
//
// TODO if host is used, develop a cleanup plan so
// so the volume does not fill up with old data
//
// TODO if host is used, affinity rules must be placed
// that will ensure all the pods use the same host
//
// HostPath: &corev1.HostPathVolumeSource{
// Path: "/mnt",
// },
},
},
}
volumeMounts := []corev1.VolumeMount{
{
Name: "tfohome",
MountPath: home,
ReadOnly: false,
},
}
envs = append(envs, corev1.EnvVar{
Name: "TFO_ROOT_PATH",
Value: home,
})
if r.terraformModuleParsed.Repo != "" {
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_MAIN_MODULE_REPO",
Value: r.terraformModuleParsed.Repo,
},
{
Name: "TFO_MAIN_MODULE_REPO_REF",
Value: r.terraformModuleParsed.Hash,
},
}...)
if len(r.terraformModuleParsed.Files) > 0 {
// The terraform module may be in a sub-directory of the repo
// Add this subdir value to envs so the pod can properly fetch it
value := r.terraformModuleParsed.Files[0]
if value == "" {
value = "."
}
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_MAIN_MODULE_REPO_SUBDIR",
Value: value,
},
}...)
} else {
// TODO maybe set a default in r.stack.subdirs[0] so we can get rid
// of this if statement
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_MAIN_MODULE_REPO_SUBDIR",
Value: ".",
},
}...)
}
}
configMapSourceVolumeName := "config-map-source"
configMapSourcePath := "/tmp/config-map-source"
if r.configMapSourceName != "" && r.configMapSourceKey != "" {
volumes = append(volumes, corev1.Volume{
Name: configMapSourceVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.configMapSourceName,
},
},
},
})
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: configMapSourceVolumeName,
MountPath: configMapSourcePath,
})
}
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_TASK_EXEC_CONFIGMAP_SOURCE_PATH",
Value: configMapSourcePath,
},
}...)
mainModulePluginsConfigMapName := "main-module-addons"
mainModulePluginsConfigMapPath := "/tmp/main-module-addons"
volumes = append(volumes, []corev1.Volume{
{
Name: mainModulePluginsConfigMapName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: r.versionedName,
},
},
},
},
}...)
volumeMounts = append(volumeMounts, []corev1.VolumeMount{
{
Name: mainModulePluginsConfigMapName,
MountPath: mainModulePluginsConfigMapPath,
},
}...)
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_MAIN_MODULE_ADDONS",
Value: mainModulePluginsConfigMapPath,
},
}...)
optional := true
xmode := int32(0775)
volumes = append(volumes, corev1.Volume{
Name: "gitaskpass",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: r.versionedName,
Optional: &optional,
Items: []corev1.KeyToPath{
{
Key: "gitAskpass",
Path: "GIT_ASKPASS",
Mode: &xmode,
},
},
},
},
})
volumeMounts = append(volumeMounts, []corev1.VolumeMount{
{
Name: "gitaskpass",
MountPath: "/git/askpass",
},
}...)
envs = append(envs, []corev1.EnvVar{
{
Name: "GIT_ASKPASS",
Value: "/git/askpass/GIT_ASKPASS",
},
}...)
sshMountName := "ssh"
sshMountPath := "/tmp/ssh"
mode := int32(0775)
sshConfigItems := []corev1.KeyToPath{}
keysToIgnore := []string{"gitAskpass"}
for key := range r.secretData {
if utils.ListContainsStr(keysToIgnore, key) {
continue
}
sshConfigItems = append(sshConfigItems, corev1.KeyToPath{
Key: key,
Path: key,
Mode: &mode,
})
}
volumes = append(volumes, []corev1.Volume{
{
Name: sshMountName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: r.versionedName,
DefaultMode: &mode,
Optional: &optional,
Items: sshConfigItems,
},
},
},
}...)
volumeMounts = append(volumeMounts, []corev1.VolumeMount{
{
Name: sshMountName,
MountPath: sshMountPath,
},
}...)
envs = append(envs, []corev1.EnvVar{
{
Name: "TFO_SSH",
Value: sshMountPath,
},
}...)
for _, c := range r.credentials {
if c.AWSCredentials.KIAM != "" {
annotations["iam.amazonaws.com/role"] = c.AWSCredentials.KIAM
}
}
for _, c := range r.credentials {
if (tfv1alpha2.SecretNameRef{}) != c.SecretNameRef {
envFrom = append(envFrom, []corev1.EnvFromSource{
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: c.SecretNameRef.Name,
},
},
},
}...)
}
}
// labels for all resources for use in queries
for key, value := range r.resourceLabels {
runnerLabels[key] = value
}
runnerLabels["app.kubernetes.io/instance"] = r.task.String()
// Make sure to use the same uid for containers so the dir in the
// PersistentVolume have the correct permissions for the user
user := int64(2000)
group := int64(2000)
runAsNonRoot := true
securityContext := &corev1.SecurityContext{
RunAsUser: &user,
RunAsGroup: &group,
RunAsNonRoot: &runAsNonRoot,
}
restartPolicy := r.restartPolicy
containers := []corev1.Container{}
containers = append(containers, corev1.Container{
Name: "task",
SecurityContext: securityContext,
Image: r.image,
ImagePullPolicy: r.imagePullPolicy,
EnvFrom: envFrom,
Env: envs,
VolumeMounts: volumeMounts,
})
podSecurityContext := corev1.PodSecurityContext{
FSGroup: &user,
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: r.namespace,
Labels: runnerLabels,
Annotations: annotations,
},
Spec: corev1.PodSpec{
Affinity: r.inheritedAffinity,
NodeSelector: r.inheritedNodeSelector,
Tolerations: r.inheritedTolerations,
SecurityContext: &podSecurityContext,
ServiceAccountName: r.serviceAccount,
RestartPolicy: restartPolicy,
Containers: containers,
Volumes: volumes,
},
}
return pod
}
func (r ReconcileTerraform) run(ctx context.Context, reqLogger logr.Logger, tf *tfv1alpha2.Terraform, runOpts TaskOptions, isNewGeneration, isFirstInstall bool) (err error) {
if isFirstInstall || isNewGeneration {
if err := r.createEnvFromSources(ctx, tf); err != nil {
return err
}
if err := r.createPVC(ctx, tf, runOpts); err != nil {
return err
}
if err := r.createSecret(ctx, tf, runOpts.versionedName, runOpts.namespace, runOpts.secretData, true, []string{}, runOpts); err != nil {
return err
}
if err := r.createConfigMap(ctx, tf, runOpts); err != nil {
return err
}
if err := r.createRoleBinding(ctx, tf, runOpts); err != nil {
return err
}
if err := r.createRole(ctx, tf, runOpts); err != nil {
return err
}
if tf.Spec.ServiceAccount == "" {
// since sa is not defined in the resource spec, it must be created
if err := r.createServiceAccount(ctx, tf, runOpts); err != nil {
return err
}
}
labelsToOmit := []string{}
if runOpts.stripGenerationLabelOnOutputsSecret {
labelsToOmit = append(labelsToOmit, "terraforms.tf.isaaguilar.com/generation")
}
if err := r.createSecret(ctx, tf, runOpts.outputsSecretName, runOpts.namespace, map[string][]byte{}, false, labelsToOmit, runOpts); err != nil {
return err
}
} else {
// check resources exists
lookupKey := types.NamespacedName{
Name: runOpts.prefixedName,
Namespace: runOpts.namespace,
}
if _, found, err := r.checkPersistentVolumeClaimExists(ctx, lookupKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find PersistentVolumeClaim '%s'", lookupKey)
}
lookupVersionedKey := types.NamespacedName{
Name: runOpts.versionedName,
Namespace: runOpts.namespace,
}
if _, found, err := r.checkConfigMapExists(ctx, lookupVersionedKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find ConfigMap '%s'", lookupVersionedKey)
}
if _, found, err := r.checkSecretExists(ctx, lookupVersionedKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find Secret '%s'", lookupVersionedKey)
}
if _, found, err := r.checkRoleBindingExists(ctx, lookupVersionedKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find RoleBinding '%s'", lookupVersionedKey)
}
if _, found, err := r.checkRoleExists(ctx, lookupVersionedKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find Role '%s'", lookupVersionedKey)
}
serviceAccountLookupKey := types.NamespacedName{
Name: runOpts.serviceAccount,
Namespace: runOpts.namespace,
}
if _, found, err := r.checkServiceAccountExists(ctx, serviceAccountLookupKey); err != nil {
return err
} else if !found {
return fmt.Errorf("could not find ServiceAccount '%s'", serviceAccountLookupKey)
}
}
if err := r.createPod(ctx, tf, runOpts); err != nil {
return err
}
return nil
}
func (r ReconcileTerraform) createGitAskpass(ctx context.Context, tokenSecret tfv1alpha2.TokenSecretRef) ([]byte, error) {
secret, err := r.loadSecret(ctx, tokenSecret.Name, tokenSecret.Namespace)
if err != nil {
return []byte{}, err
}
if key, ok := secret.Data[tokenSecret.Key]; !ok {
return []byte{}, fmt.Errorf("secret '%s' did not contain '%s'", secret.Name, key)
}
s := heredoc.Docf(`
#!/bin/sh
exec echo "%s"
`, secret.Data[tokenSecret.Key])
gitAskpass := []byte(s)
return gitAskpass, nil
}
func (r ReconcileTerraform) loadSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) {
if namespace == "" {
namespace = "default"
}
lookupKey := types.NamespacedName{Name: name, Namespace: namespace}
secret := &corev1.Secret{}
err := r.Client.Get(ctx, lookupKey, secret)
if err != nil {
return secret, err
}
return secret, nil
}
func (r ReconcileTerraform) cacheNodeSelectors(ctx context.Context, logger logr.Logger) error {
var affinity *corev1.Affinity
var tolerations []corev1.Toleration
var nodeSelector map[string]string
if !r.InheritAffinity && !r.InheritNodeSelector && !r.InheritTolerations {
return nil
}
foundAll := true
_, found := r.Cache.Get(r.AffinityCacheKey)
if r.InheritAffinity && !found {
foundAll = false
}
_, found = r.Cache.Get(r.NodeSelectorCacheKey)
if r.InheritNodeSelector && !found {
foundAll = false
}
_, found = r.Cache.Get(r.TolerationsCacheKey)
if r.InheritTolerations && !found {
foundAll = false
}
if foundAll {
return nil
}
podNamespace := os.Getenv("POD_NAMESPACE")
if podNamespace == "" {
logger.Info("POD_NAMESPACE not found but required to get node selectors configs")
return nil
}
podName := os.Getenv("POD_NAME")
if podName == "" {
logger.Info("POD_NAME not found but required to get node selectors configs")
return nil
}
podNamespacedName := types.NamespacedName{Namespace: podNamespace, Name: podName}
pod := corev1.Pod{}
err := r.Client.Get(ctx, podNamespacedName, &pod)
if err != nil {
logger.Info(fmt.Sprintf("Could not get pod '%s'", podNamespacedName.String()))
return nil
}
if len(pod.ObjectMeta.OwnerReferences) != 1 {
logger.Info(fmt.Sprintf("unexpected ownership for pod '%s'", podNamespacedName.String()))
return nil
}
if pod.ObjectMeta.OwnerReferences[0].Kind != "ReplicaSet" {
logger.Info(fmt.Sprintf("unexpected ownership kind for pod '%s'", podNamespacedName.String()))
return nil
}
replicaSetName := pod.ObjectMeta.OwnerReferences[0].Name
replicaSetNamespacedName := types.NamespacedName{Namespace: podNamespace, Name: replicaSetName}
replicaSet := appsv1.ReplicaSet{}
err = r.Client.Get(ctx, replicaSetNamespacedName, &replicaSet)
if err != nil {
logger.Info(fmt.Sprintf("Could not get replicaset '%s'", replicaSetNamespacedName.String()))
return nil
}
if len(replicaSet.ObjectMeta.OwnerReferences) != 1 {
logger.Info(fmt.Sprintf("unexpected ownership for replicaSet '%s'", replicaSetNamespacedName.String()))
return nil
}
if replicaSet.ObjectMeta.OwnerReferences[0].Kind != "Deployment" {
logger.Info(fmt.Sprintf("unexpected ownership kind for replicaSet '%s'", replicaSetNamespacedName.String()))
return nil
}
deploymentName := replicaSet.ObjectMeta.OwnerReferences[0].Name
deploymentNamespacedName := types.NamespacedName{Namespace: podNamespace, Name: deploymentName}
deployment := appsv1.Deployment{}
err = r.Client.Get(ctx, deploymentNamespacedName, &deployment)
if err != nil {
logger.Info(fmt.Sprintf("Could not get deployment '%s'", deploymentNamespacedName.String()))
return nil
}
affinity = deployment.Spec.Template.Spec.Affinity
tolerations = deployment.Spec.Template.Spec.Tolerations
nodeSelector = deployment.Spec.Template.Spec.NodeSelector
if r.InheritAffinity {
r.Cache.Set(r.AffinityCacheKey, affinity, localcache.NoExpiration)
}
if r.InheritNodeSelector {
r.Cache.Set(r.NodeSelectorCacheKey, nodeSelector, localcache.NoExpiration)
}
if r.InheritTolerations {
r.Cache.Set(r.TolerationsCacheKey, tolerations, localcache.NoExpiration)
}
return nil
}
func (r TaskOptions) generateSecret(name, namespace string, data map[string][]byte, labels map[string]string) *corev1.Secret {
secretObject := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: labels,
},
Data: data,
Type: corev1.SecretTypeOpaque,
}
return secretObject
}
func loadPassword(ctx context.Context, k8sclient client.Client, key, name, namespace string) (string, error) {
secret := &corev1.Secret{}
namespacedName := types.NamespacedName{Namespace: namespace, Name: name}
err := k8sclient.Get(ctx, namespacedName, secret)
// secret, err := c.clientset.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("could not get secret: %v", err)
}
var password []byte
for k, value := range secret.Data {
if k == key {
password = value
}
}
if len(password) == 0 {
return "", fmt.Errorf("unable to locate '%s' in secret: %v", key, err)
}
return string(password), nil
}
// forcedRegexp is the regular expression that finds forced getters. This
// syntax is schema::url, example: git::https://foo.com
var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
// getForcedGetter takes a source and returns the tuple of the forced
// getter and the raw URL (without the force syntax).
func getForcedGetter(src string) (string, string) {
var forced string
if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
forced = ms[1]
src = ms[2]
}
return forced, src
}
var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$")
type sshDetector struct{}
func (s *sshDetector) Detect(src, _ string) (string, bool, error) {
matched := sshPattern.FindStringSubmatch(src)
if matched == nil {
return "", false, nil
}
user := matched[1]
host := matched[2]
path := matched[3]
qidx := strings.Index(path, "?")
if qidx == -1 {
qidx = len(path)
}
var u url.URL
u.Scheme = "ssh"
u.User = url.User(user)
u.Host = host
u.Path = path[0:qidx]
if qidx < len(path) {
q, err := url.ParseQuery(path[qidx+1:])
if err != nil {
return "", false, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
}
u.RawQuery = q.Encode()
}
return u.String(), true, nil
}
type scmType string
var gitScmType scmType = "git"
func getParsedAddress(address, path string, useAsVar bool, scmMap map[string]scmType) (ParsedAddress, error) {
detectors := []getter.Detector{
new(sshDetector),
}
detectors = append(detectors, getter.Detectors...)
output, err := getter.Detect(address, "moduleDir", detectors)
if err != nil {
return ParsedAddress{}, err
}
forcedDetect, result := getForcedGetter(output)
urlSource, filesSource := getter.SourceDirSubdir(result)
parsedURL, err := url.Parse(urlSource)
if err != nil {
return ParsedAddress{}, err
}
scheme := parsedURL.Scheme
// TODO URL parse rules: github.com should check the url is 'host/user/repo'
// Currently the below is just a host check which isn't 100% correct
if utils.ListContainsStr([]string{"github.com"}, parsedURL.Host) {
scheme = "git"
}
// Check scm configuration for hosts and what scheme to map them as
// Use the scheme of the scm configuration.
// If git && another scm is defined in the scm configuration, select git.
// If the user needs another scheme, the user must use forceDetect
// (ie scheme::url://host...)
hosts := []string{}
for host := range scmMap {
hosts = append(hosts, host)
}
if utils.ListContainsStr(hosts, parsedURL.Host) {
scheme = string(scmMap[parsedURL.Host])
}
// forceDetect shall override all other schemes
if forcedDetect != "" {
scheme = forcedDetect
}
y, err := url.ParseQuery(parsedURL.RawQuery)
if err != nil {
return ParsedAddress{}, err
}
hash := y.Get("ref")
if hash == "" {
hash = "master"
}
// subdir can contain a list seperated by double slashes
files := strings.Split(filesSource, "//")
if len(files) == 1 && files[0] == "" {
files = []string{"."}
}
// Assign default ports for common protos
port := parsedURL.Port()
if port == "" {
if parsedURL.Scheme == "ssh" {
port = "22"
} else if parsedURL.Scheme == "https" {
port = "443"
}
}
p := ParsedAddress{
DetectedScheme: scheme,
Path: path,
UseAsVar: useAsVar,
Url: parsedURL.String(),
Files: files,
Hash: hash,
UrlScheme: parsedURL.Scheme,
Host: parsedURL.Host,
Uri: strings.Split(parsedURL.RequestURI(), "?")[0],
Port: port,
User: parsedURL.User.Username(),
Repo: strings.Split(parsedURL.String(), "?")[0],
}
return p, nil
}
|
package main
import (
"fmt"
"os"
"strings"
)
var directories []string = []string {
"/home/continuum",
"/storage",
"/storage01/replication",
"/storage02/replication",
"/storage03/replication",
"/storage04/replication",
"/storage05/replication",
"/storage06/replication",
"/storage07/replication",
"/storage08/replication",
"/storage09/replication",
}
func checkDirPerms(dirname string) {
hostname, _ := os.Hostname()
fileInfo, err := os.Lstat(dirname)
if err != nil {
fmt.Printf("Error getting directory info for dir: %s\n", dirname)
return
}
mode := strings.Trim(fileInfo.Mode().Perm().String(), "\n")
var wantedMode = ""
if dirname == "/home/continuum" {
wantedMode = "-rwxr-xr-x"
} else if dirname == "/storage" {
wantedMode = "-rwxrwxrwx"
} else {
wantedMode = "-rwx------"
}
ret := strings.Compare(mode, wantedMode)
if ret == 0 {
fmt.Println("GOOD " + hostname + " " + dirname + " perms: \t\t", mode)
} else {
fmt.Println("BAD " + hostname + " " + dirname + " perms: \t\t", mode)
}
}
func main() {
for i := range directories {
checkDirPerms(directories[i])
}
}
|
package enums
//MsgTypes he~he~
type MsgTypes uint16
|
package main
import (
"github.com/gorilla/mux"
"io"
"log"
"net/http"
)
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// 一个非常简单的健康检查实现:如果此 HTTP 接口调用成功,则表示应用健康
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// 后续我们还可以通过执行 PING 指令反馈 DB、缓存状态,并将它们的健康检查结果放到响应中
io.WriteString(w, `{"alive": true}`)
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/health", HealthCheckHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
|
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"os"
"github.com/bazelbuild/rules_docker/contrib/go/pkg/metadata"
)
var (
outFile = flag.String("outFile", "", "Output merged YAML file to generate.")
)
// strArgList implements a command line flag that can be specified multiple
// times to define a list of values.
type strArgList struct {
// Args is the list of command line flags.
Args []string
}
func (l *strArgList) String() string {
return fmt.Sprintf("%v", l.Args)
}
// Set appends the given value for a particular occurance of the flag to the
// list of flag values.
func (l *strArgList) Set(value string) error {
l.Args = append(l.Args, value)
return nil
}
// Get returns an empty interface that may be type-asserted to the underlying
// value of type []string.
func (l *strArgList) Get() interface{} {
return l.Args
}
// metadataYAML stores the contents of one or more YAML file with the following
// top level keys:
// 1. "tags" (list of strings).
// 2. "packages" (list of YAML objects with keys "name" & "version" which are
// strings).
type metadataYAML struct {
// Tags is the list of tags read from YAML files with a top level "tags"
// key.
Tags []string `yaml:"tags"`
// Packages is the list of software package entries read from YAML files
// with a top level "packages" key.
Packages []metadata.PackageMetadata `yaml:"packages"`
// tagsLookup maintains a map of tags in the "Tags" field.
tagsLookup map[string]bool
}
// merge merges the contents of the metadataYaml 'from' into the metadataYAML
// 'm'. This does the following:
// 1. Add every tag that appears in 'from' into 'm' if it doesn't already exist
// in 'm'.
// 2. Add every package that apppears in 'from' into 'm'. If the list of
// packages in 'from' have duplicates with the list of packages in 'm', the
// list of packages in 'm' will contain these duplicates after the merge.
func (m *metadataYAML) merge(from *metadataYAML) error {
for _, t := range from.Tags {
if _, ok := m.tagsLookup[t]; ok {
// This tag has been added already.
continue
}
m.tagsLookup[t] = true
m.Tags = append(m.Tags, t)
}
for _, p := range from.Packages {
m.Packages = append(m.Packages, p)
}
return nil
}
func main() {
var yamlFiles strArgList
flag.Var(&yamlFiles, "yamlFile", "Path to an input YAML file to process. Can be specified multiple times to process more than one file.")
flag.Parse()
log.Println("Running the YAML Metadata merger.")
for _, f := range yamlFiles.Args {
log.Println("-yamlFile", f)
}
log.Println("-outFile", *outFile)
if len(yamlFiles.Args) == 0 {
log.Fatalf("No input YAML files provided. Use the -yamlFile flag to provide at least 1 YAML file.")
}
if *outFile == "" {
log.Fatalf("-outFile was not specified.")
}
result := metadataYAML{tagsLookup: make(map[string]bool)}
for _, yamlFile := range yamlFiles.Args {
log.Println("Loading metadata from", yamlFile)
blob, err := ioutil.ReadFile(yamlFile)
if err != nil {
log.Fatalf("Unable to read data from %s: %v", yamlFile, err)
}
m := new(metadataYAML)
if err := yaml.UnmarshalStrict(blob, m); err != nil {
log.Fatalf("Unable to parse data read from %s as metadata YAML: %v", yamlFile, err)
}
if err := result.merge(m); err != nil {
log.Fatalf("Unable to merge metadata read from %s into a single merged YAML: %v", yamlFile, err)
}
}
log.Printf("Merged YAML has %d tags and %d packages.", len(result.Tags), len(result.Packages))
blob, err := yaml.Marshal(&result)
if err != nil {
log.Fatalf("Unable to generate a merged YAML blob for the output merged YAML file: %v", err)
}
if err := ioutil.WriteFile(*outFile, blob, os.FileMode(0644)); err != nil {
log.Fatalf("Unable to write %d bytes of content to output YAML file %s: %v", len(blob), *outFile, err)
}
log.Printf("Successfully generated output %s that merged %d YAML files.", *outFile, len(yamlFiles.Args))
}
|
package xhtml5_test
import (
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("unordered lists", func() {
It("simple unordered list with no title", func() {
source := `* item 1
* item 2
* item 3`
expected := `<div class="ulist">
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
<li>
<p>item 3</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple unordered list with no title then a paragraph", func() {
source := `* item 1
* item 2
* item 3
and a standalone paragraph`
expected := `<div class="ulist">
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
<li>
<p>item 3</p>
</li>
</ul>
</div>
<div class="paragraph">
<p>and a standalone paragraph</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple unordered list with id, title and role", func() {
source := `.mytitle
[#foo]
[.myrole]
* item 1
* item 2`
expected := `<div id="foo" class="ulist myrole">
<div class="title">mytitle</div>
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple unordered list with id, title and role", func() {
source := `.mytitle
[#foo]
[.myrole]
* item 1
* item 2`
expected := `<div id="foo" class="ulist myrole">
<div class="title">mytitle</div>
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple unordered list with style id, title and role", func() {
source := `.mytitle
[#foo]
[disc.myrole]
* item 1
* item 2`
expected := `<div id="foo" class="ulist disc myrole">
<div class="title">mytitle</div>
<ul class="disc">
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple unordered list with continuation", func() {
source := `* item 1
+
foo
* item 2`
expected := `<div class="ulist">
<ul>
<li>
<p>item 1</p>
<div class="paragraph">
<p>foo</p>
</div>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("nested unordered lists without a title", func() {
source := `* item 1
** item 1.1
** item 1.2
* item 2`
expected := `<div class="ulist">
<ul>
<li>
<p>item 1</p>
<div class="ulist">
<ul>
<li>
<p>item 1.1</p>
</li>
<li>
<p>item 1.2</p>
</li>
</ul>
</div>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("nested unordered lists with a title", func() {
source := `[#listID]
* item 1
** item 1.1
** item 1.2
* item 2`
expected := `<div id="listID" class="ulist">
<ul>
<li>
<p>item 1</p>
<div class="ulist">
<ul>
<li>
<p>item 1.1</p>
</li>
<li>
<p>item 1.2</p>
</li>
</ul>
</div>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("unordered list with item continuation", func() {
source := `* foo
+
----
a delimited block
----
+
----
another delimited block
----
* bar
`
expected := `<div class="ulist">
<ul>
<li>
<p>foo</p>
<div class="listingblock">
<div class="content">
<pre>a delimited block</pre>
</div>
</div>
<div class="listingblock">
<div class="content">
<pre>another delimited block</pre>
</div>
</div>
</li>
<li>
<p>bar</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("unordered list without item continuation", func() {
source := `* foo
----
a delimited block
----
* bar
----
another delimited block
----`
expected := `<div class="ulist">
<ul>
<li>
<p>foo</p>
</li>
</ul>
</div>
<div class="listingblock">
<div class="content">
<pre>a delimited block</pre>
</div>
</div>
<div class="ulist">
<ul>
<li>
<p>bar</p>
</li>
</ul>
</div>
<div class="listingblock">
<div class="content">
<pre>another delimited block</pre>
</div>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
})
var _ = Describe("checklists", func() {
It("checklist with title and dashes", func() {
source := `.Checklist
- [*] checked
- [x] also checked
- [ ] not checked
- normal list item`
expected := `<div class="ulist checklist">
<div class="title">Checklist</div>
<ul class="checklist">
<li>
<p>✓ checked</p>
</li>
<li>
<p>✓ also checked</p>
</li>
<li>
<p>❏ not checked</p>
</li>
<li>
<p>normal list item</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple checklist with style id, title and role", func() {
// style is overridden to checklist on ul, but div keeps it (asciidoctor compat)
source := `.mytitle
[#foo]
[disc.myrole]
* [x] item 1
* [x] item 2`
expected := `<div id="foo" class="ulist checklist disc myrole">
<div class="title">mytitle</div>
<ul class="checklist">
<li>
<p>✓ item 1</p>
</li>
<li>
<p>✓ item 2</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("parent checklist with title and nested checklist", func() {
source := `.Checklist
* [ ] parent not checked
** [*] checked
** [x] also checked
** [ ] not checked
* normal list item`
expected := `<div class="ulist checklist">
<div class="title">Checklist</div>
<ul class="checklist">
<li>
<p>❏ parent not checked</p>
<div class="ulist checklist">
<ul class="checklist">
<li>
<p>✓ checked</p>
</li>
<li>
<p>✓ also checked</p>
</li>
<li>
<p>❏ not checked</p>
</li>
</ul>
</div>
</li>
<li>
<p>normal list item</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("parent checklist with role and nested normal list", func() {
source := `[.Checklist]
* [ ] parent not checked
** a normal list item
** another normal list item
* normal list item`
expected := `<div class="ulist checklist Checklist">
<ul class="checklist">
<li>
<p>❏ parent not checked</p>
<div class="ulist">
<ul>
<li>
<p>a normal list item</p>
</li>
<li>
<p>another normal list item</p>
</li>
</ul>
</div>
</li>
<li>
<p>normal list item</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
Context("attach to unordered list item ancestor", func() {
It("attach to grandparent unordered list item", func() {
source := `* grandparent list item
** parent list item
*** child list item
+
paragraph attached to grandparent list item`
expected := `<div class="ulist">
<ul>
<li>
<p>grandparent list item</p>
<div class="ulist">
<ul>
<li>
<p>parent list item</p>
<div class="ulist">
<ul>
<li>
<p>child list item</p>
</li>
</ul>
</div>
</li>
</ul>
</div>
<div class="paragraph">
<p>paragraph attached to grandparent list item</p>
</div>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("attach to parent unordered list item", func() {
source := `* grandparent list item
** parent list item
*** child list item
+
paragraph attached to parent list item`
expected := `<div class="ulist">
<ul>
<li>
<p>grandparent list item</p>
<div class="ulist">
<ul>
<li>
<p>parent list item</p>
<div class="ulist">
<ul>
<li>
<p>child list item</p>
</li>
</ul>
</div>
<div class="paragraph">
<p>paragraph attached to parent list item</p>
</div>
</li>
</ul>
</div>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("attach to child unordered list item", func() {
source := `* grandparent list item
** parent list item
*** child list item
+
paragraph attached to child list item`
expected := `<div class="ulist">
<ul>
<li>
<p>grandparent list item</p>
<div class="ulist">
<ul>
<li>
<p>parent list item</p>
<div class="ulist">
<ul>
<li>
<p>child list item</p>
<div class="paragraph">
<p>paragraph attached to child list item</p>
</div>
</li>
</ul>
</div>
</li>
</ul>
</div>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
})
})
|
package cmd
import (
"fmt"
"regexp"
"strings"
)
// formatHelp formats the help text for Cmd or Group.
func formatHelp(usage, summary, details string, defs []*definitionList) string {
columns := terminalColumns()
sections := []string{}
sections = append(sections, wrapParagraphs(usage, columns))
if summary != "" {
sections = append(sections, wrapParagraphs(summary, columns))
}
for _, d := range defs {
if len(d.definitions) == 0 {
continue
}
sections = append(sections, d.format(columns))
}
if details != "" {
sections = append(sections, wrapParagraphs(details, columns))
}
return strings.Join(sections, "\n")
}
var whitespaceRe = regexp.MustCompile(`\s+`)
// wrapParagraphs wraps text to fit the given number of columns, preserving paragraphs separated by
// a blank line.
func wrapParagraphs(text string, columns int) string {
input := strings.Split(text, "\n\n")
output := []string{}
for _, p := range input {
b := new(strings.Builder)
lines := wrapText(p, columns)
for _, line := range lines {
fmt.Fprintln(b, line)
}
output = append(output, b.String())
}
return strings.Join(output, "\n")
}
// wrapText splits text into lines that fit the given number of columns.
func wrapText(text string, columns int) []string {
text = strings.TrimSpace(text)
// split text into words and convert to []rune
words := whitespaceRe.Split(text, -1)
runeWords := make([][]rune, len(words))
for i, word := range words {
runeWords[i] = []rune(word)
}
// join words into lines
lines := []string{}
current := []rune{}
firstWord := true
for _, word := range runeWords {
if len(current)+1+len(word) > columns {
lines = append(lines, string(current))
current = word
continue
}
if !firstWord {
current = append(current, ' ')
}
current = append(current, word...)
firstWord = false
}
lines = append(lines, string(current))
return lines
}
// A definitionList is used to represent a list of flags, commands, or groups.
type definitionList struct {
title string
definitions []*definition
}
// format prints the definitionList in a nicely-formatted way that fits in the given number of
// columns.
func (d *definitionList) format(columns int) string {
b := new(strings.Builder)
// set a maximum for left column
maxLeftCols := (columns - 4) / 2
if maxLeftCols > 25 {
maxLeftCols = 25
}
// get text for left column
terms := []termLines{}
for _, def := range d.definitions {
terms = append(terms, def.formatTerms(maxLeftCols))
}
// find out size of left and right column
leftCols := 0
for _, t := range terms {
if t.inline == "" {
continue
}
cols := len([]rune(t.inline))
if cols > leftCols {
leftCols = cols
}
}
rightCols := columns - 4 - leftCols
if rightCols > 80 {
rightCols = 80
}
// print text
fmt.Fprintf(b, "%s:\n", d.title)
for i, def := range d.definitions {
flagDef := terms[i]
for _, line := range flagDef.separate {
fmt.Fprintf(b, " %s\n", line)
}
usageLines := wrapText(def.text, rightCols)
fmt.Fprintf(b, " %-*s %s\n", leftCols, flagDef.inline, usageLines[0])
for _, line := range usageLines[1:] {
fmt.Fprintf(b, "%*s%s\n", 2+leftCols+2, "", line)
}
}
return b.String()
}
// A definition describes one definition.
type definition struct {
terms []string
text string
}
// termLines defines the text printed in the left column for a definition.
type termLines struct {
separate []string
inline string
}
// formatTerms formats the left columns for a definition.
func (d *definition) formatTerms(maxCols int) termLines {
joined := strings.Join(d.terms, ", ")
last := len(d.terms) - 1
if len([]rune(d.terms[last])) > maxCols {
return termLines{
separate: d.terms,
}
}
if len([]rune(joined)) > maxCols {
return termLines{
separate: d.terms[:last],
inline: d.terms[last],
}
}
return termLines{
inline: joined,
}
}
|
package runner
import (
"strings"
"testing"
)
func TestConvertEnvMapToList(t *testing.T) {
t.Run("should convert map to list of key=val", func(t *testing.T) {
env := make(map[string]string, 1)
env["ONE"] = "1"
envList := convertEnvMapToList(env)
exp := "ONE=1"
if envList[0] != exp {
t.Errorf("failed to convert env map to list. \nexp: %s\ngot: %s", exp, envList[0])
}
})
}
func TestConvertChecksumMapToEnvList(t *testing.T) {
findEnv := func(key string, list []string) bool {
found := false
for _, item := range list {
if item == key {
found = true
}
}
return found
}
t.Run("should convert map to list of key=val", func(t *testing.T) {
env := make(map[string]string)
env["one"] = "111"
env["two-two"] = "222"
env["three_three"] = "333"
envList := convertChecksumMapToEnvForCmd(env)
joinedEnv := strings.Join(envList, ";")
if !findEnv("LETS_CHECKSUM_ONE=111", envList) {
t.Errorf("failed to convert env map to list. \nexp: %s\ngot: %s", "LETS_CHECKSUM_ONE=1", joinedEnv)
}
if !findEnv("LETS_CHECKSUM_TWO_TWO=222", envList) {
t.Errorf("failed to convert env map to list. \nexp: %s\ngot: %s", "LETS_CHECKSUM_TWO_TWO=222", joinedEnv)
}
if !findEnv("LETS_CHECKSUM_THREE_THREE=333", envList) {
t.Errorf("failed to convert env map to list. \nexp: %s\ngot: %s", "LETS_CHECKSUM_THREE_THREE=333", joinedEnv)
}
})
}
func TestComposeEnv(t *testing.T) {
t.Run("should compose env", func(t *testing.T) {
toCompose := []string{"A=1"}
toCompose1 := []string{"B=2"}
total := len(toCompose) + len(toCompose1)
env := composeEnvs(toCompose, toCompose1)
if len(env) != total {
t.Errorf("composed env len different from expected: exp: %d, got: %d", total, len(env))
}
if env[0] != "A=1" {
t.Errorf("first element from composed env different from expected: exp: %s, got: %s", toCompose[0], env[0])
}
if env[1] != "B=2" {
t.Errorf("first element from composed env different from expected: exp: %s, got: %s", toCompose1[0], env[1])
}
})
}
|
package cmd
import (
"fmt"
"github.com/fsnotify/fsnotify"
"github.com/jweny/pocassist/api/routers"
conf2 "github.com/jweny/pocassist/pkg/conf"
"github.com/jweny/pocassist/pkg/db"
"github.com/jweny/pocassist/pkg/logging"
"github.com/jweny/pocassist/pkg/util"
"github.com/jweny/pocassist/poc/rule"
"github.com/spf13/viper"
"github.com/urfave/cli/v2"
"log"
"os"
"path"
"path/filepath"
"sort"
)
var (
url string
urlFile string
rawFile string
loadPoc string
condition string
)
func init() {
welcome := `
_ _
_ __ ___ ___ __ _ ___ ___(_)___| |_
| '_ \ / _ \ / __/ _' / __/ __| / __| __|
| |_) | (_) | (_| (_| \__ \__ \ \__ \ |_
| .__/ \___/ \___\__,_|___/___/_|___/\__|
|_|
`
fmt.Println(welcome)
}
func InitAll() {
// config 必须最先加载
conf2.Setup()
logging.Setup()
db.Setup()
routers.Setup()
util.Setup()
rule.Setup()
}
// 使用viper 对配置热加载
func HotConf() {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatalf("conf.Setup, fail to get current path: %v", err)
}
// 配置文件路径 当前文件夹 + config.yaml
configFile := path.Join(dir, "config.yaml")
viper.SetConfigType("yaml")
viper.SetConfigFile(configFile)
// watch 监控配置文件变化
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
// 配置文件发生变更之后会调用的回调函数
log.Println("Config file changed:", e.Name)
InitAll()
})
}
func RunApp() {
app := cli.NewApp()
app.Name = "pocassist"
app.Usage = "New POC Framework Without Writing Code"
app.Version = "0.3.0"
// 子命令
app.Commands = []*cli.Command{
&subCommandCli,
&subCommandServer,
}
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
err := app.Run(os.Args)
if err != nil {
log.Fatalf("cli.RunApp err: %v", err)
return
}
}
|
package cmd
import (
"testing"
"net/http/httptest"
"net/http"
"io/ioutil"
"github.com/HotelsDotCom/flyte/flytepath"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"github.com/HotelsDotCom/flyte/httputil"
"fmt"
)
func TestUploadDs_ShouldUploadDsFromFile(t *testing.T) {
//given
rec := struct {
reqURL string
reqMethod string
reqContentType string
fileBody []byte
fileContentType string
}{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rec.reqURL = r.URL.String()
rec.reqMethod = r.Method
rec.reqContentType = r.Header.Get(httputil.HeaderContentType)
f, h, err := r.FormFile("value")
if err != nil {
panic(err)
}
defer f.Close()
rec.fileBody, err = ioutil.ReadAll(f)
if err != nil {
panic(err)
}
rec.fileContentType = h.Header.Get(httputil.HeaderContentType)
w.WriteHeader(http.StatusCreated)
}))
defer ts.Close()
dsFile := "./testdata/env.json"
//when
output, err := executeCommand("upload", "ds", "-f", dsFile, "--url", ts.URL)
require.NoError(t, err)
//then
assert.Equal(t, flytepath.DatastorePath+"/env", rec.reqURL)
assert.Equal(t, http.MethodPut, rec.reqMethod)
assert.Contains(t, rec.reqContentType, "multipart/form-data; boundary=")
wantContent, err := ioutil.ReadFile(dsFile)
require.NoError(t, err)
assert.Equal(t, wantContent, rec.fileBody)
assert.Equal(t, httputil.MediaTypeJson, rec.fileContentType)
l := fmt.Sprintf("Location: %s%s/%s", ts.URL, flytepath.DatastorePath, "env")
assert.Contains(t, output, l)
}
func TestUploadDs_ShouldUploadDsFromFileWithDefaultsOverriddenByFlags(t *testing.T) {
//given
rec := struct {
reqURL string
description string
contentType string
}{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rec.reqURL = r.URL.String()
f, h, err := r.FormFile("value")
if err != nil {
panic(err)
}
defer f.Close()
rec.description = r.Form.Get("description")
rec.contentType = h.Header.Get(httputil.HeaderContentType)
w.WriteHeader(http.StatusCreated)
}))
defer ts.Close()
file := "./testdata/env.json"
name := "my-data"
description := "This is my data"
contentType := "text/plain; charset=us-ascii"
//when
output, err := executeCommand("upload", "ds",
"-f", file,
"--name", name,
"--content-type", contentType,
"--description", description,
"--url", ts.URL)
require.NoError(t, err)
//then
assert.Equal(t, flytepath.DatastorePath+"/"+name, rec.reqURL)
assert.Equal(t, description, rec.description)
assert.Equal(t, contentType, rec.contentType)
l := fmt.Sprintf("Location: %s%s/%s", ts.URL, flytepath.DatastorePath, name)
assert.Contains(t, output, l)
}
func TestUploadDs_ShouldCreateResource(t *testing.T) {
//given
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusCreated)
}))
defer ts.Close()
//when
output, err := executeCommand("upload", "ds", "-f", "./testdata/env.json", "--url", ts.URL)
require.NoError(t, err)
//then
l := fmt.Sprintf("Location: %s%s/%s", ts.URL, flytepath.DatastorePath, "env")
assert.Contains(t, output, l)
}
func TestUploadDs_ShouldUpdateResource(t *testing.T) {
//given
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}))
defer ts.Close()
//when
output, err := executeCommand("upload", "ds", "-f", "./testdata/env.json", "--url", ts.URL)
require.NoError(t, err)
//then
l := fmt.Sprintf("Location: %s%s/%s", ts.URL, flytepath.DatastorePath, "env")
assert.Contains(t, output, l)
}
func TestUploadDs_ShouldErrorForNon201Or204Response(t *testing.T) {
//given
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer ts.Close()
//when
_, err := executeCommand("upload", "ds", "-f", "./testdata/env.json", "--url", ts.URL)
//then
assert.Error(t, err)
assert.Contains(t, err.Error(), "404 Not Found")
}
|
package gb11714
func Prev(s string) (string, error) {
n, err := dec(s[0:ContentLen])
return build(n, -1), err
}
func Next(s string) (string, error) {
n, err := dec(s[0:ContentLen])
return build(n, +1), err
}
func build(n int, d int) string {
v := n
for {
v += d
// GB 32100-2015
switch (v % 36) - 10 + 'A' {
case 'I':
case 'O':
case 'Z':
case 'S':
case 'V':
default:
goto done
}
}
done:
o := enc(v)
c, _ := Sum(o)
o += c
return o
}
|
//go:build e2e
package cloudnativeproxy
import (
"context"
"path"
"testing"
"github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
"github.com/Dynatrace/dynatrace-operator/test/dynakube"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/daemonset"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/deployment"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/manifests"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/namespace"
"github.com/Dynatrace/dynatrace-operator/test/oneagent"
"github.com/Dynatrace/dynatrace-operator/test/project"
"github.com/Dynatrace/dynatrace-operator/test/proxy"
"github.com/Dynatrace/dynatrace-operator/test/sampleapps"
"github.com/Dynatrace/dynatrace-operator/test/secrets"
"github.com/Dynatrace/dynatrace-operator/test/setup"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
httpsProxy = "https_proxy"
sampleNamespace = "test-namespace-1"
dtProxy = "DT_PROXY"
)
var (
sampleAppDeployment = path.Join(project.TestDataDir(), "cloudnative/sample-deployment.yaml")
secretPath = path.Join(project.TestDataDir(), "secrets/single-tenant.yaml")
kubernetesAllPath = path.Join(project.RootDir(), "config/deploy/kubernetes/kubernetes-all.yaml")
curlPodPath = path.Join(project.TestDataDir(), "activegate/curl-pod-webhook-via-proxy.yaml")
proxyPath = path.Join(project.TestDataDir(), "proxy/proxy.yaml")
injectionLabel = map[string]string{
"inject": "dynakube",
}
)
func WithProxy(t *testing.T, proxySpec *v1beta1.DynaKubeProxy) features.Feature {
secretConfig, err := secrets.NewFromConfig(afero.NewOsFs(), secretPath)
require.NoError(t, err)
cloudNativeWithProxy := features.New("cloudNative with proxy")
cloudNativeWithProxy.Setup(namespace.Create(
namespace.NewBuilder(sampleNamespace).
WithLabels(injectionLabel).
Build()),
)
cloudNativeWithProxy.Setup(secrets.ApplyDefault(secretConfig))
cloudNativeWithProxy.Setup(manifests.InstallFromFile(kubernetesAllPath))
setup.AssessOperatorDeployment(cloudNativeWithProxy)
assesProxy(cloudNativeWithProxy, proxySpec)
cloudNativeWithProxy.Assess("install dynakube", dynakube.Apply(
dynakube.NewBuilder().
WithDefaultObjectMeta().
WithDynakubeNamespaceSelector().
ApiUrl(secretConfig.ApiUrl).
CloudNative(&v1beta1.CloudNativeFullStackSpec{}).
Proxy(proxySpec).
Build()),
)
setup.AssessDynakubeStartup(cloudNativeWithProxy)
cloudNativeWithProxy.Assess("osAgent can connect", oneagent.OSAgentCanConnect())
proxy.CutOffDynatraceNamespace(cloudNativeWithProxy, proxySpec)
proxy.CutOffSampleNamespace(cloudNativeWithProxy, proxySpec)
cloudNativeWithProxy.Assess("check env variables of oneagent pods", checkOneAgentEnvVars)
cloudNativeWithProxy.Assess("install deployment", manifests.InstallFromFile(sampleAppDeployment))
cloudNativeWithProxy.Assess("check existing init container and env var", checkSampleInitContainerEnvVars)
return cloudNativeWithProxy.Feature()
}
func assesProxy(builder *features.FeatureBuilder, proxySpec *v1beta1.DynaKubeProxy) {
if proxySpec != nil {
builder.Assess("install proxy", manifests.InstallFromFile(proxyPath))
builder.Assess("proxy started", deployment.WaitFor(proxy.ProxyDeployment, proxy.ProxyNamespace))
builder.Assess("query webhook via proxy", manifests.InstallFromFile(curlPodPath))
builder.Assess("query is completed", proxy.WaitForCurlProxyPod(proxy.CurlPodProxy, dynakube.Namespace))
builder.Assess("proxy is running", proxy.CheckProxyService())
}
}
func checkOneAgentEnvVars(ctx context.Context, t *testing.T, environmentConfig *envconf.Config) context.Context {
resources := environmentConfig.Client().Resources()
err := daemonset.NewQuery(ctx, resources, client.ObjectKey{
Name: "dynakube-oneagent",
Namespace: "dynatrace",
}).ForEachPod(func(podItem v1.Pod) {
require.NotNil(t, podItem)
require.NotNil(t, podItem.Spec)
checkEnvVarsInContainer(t, podItem, "dynakube-oneagent", httpsProxy)
})
require.NoError(t, err)
return ctx
}
func checkSampleInitContainerEnvVars(ctx context.Context, t *testing.T, environmentConfig *envconf.Config) context.Context {
resources := environmentConfig.Client().Resources()
pods := sampleapps.Get(ctx, t, resources)
for _, podItem := range pods.Items {
require.NotNil(t, podItem)
require.NotNil(t, podItem.Spec)
require.NotNil(t, podItem.Spec.InitContainers)
checkEnvVarsInContainer(t, podItem, sampleapps.Name, dtProxy)
}
return ctx
}
func checkEnvVarsInContainer(t *testing.T, podItem v1.Pod, containerName string, envVar string) {
for _, container := range podItem.Spec.Containers {
if container.Name == containerName {
require.NotNil(t, container.Env)
require.True(t, kubeobjects.EnvVarIsIn(container.Env, envVar))
for _, env := range container.Env {
if env.Name == envVar {
require.NotNil(t, env.Value)
}
}
}
}
}
|
package array
import (
"testing"
"github.com/stretchr/testify/assert"
)
var d = &Delete{}
func TestStaticDelete(t *testing.T) {
expectedResult := []string{"Cat", "Dog", "Snake"}
final, err := d.Eval(expectedResult, 2)
assert.Nil(t, err)
assert.Equal(t, []string{"Cat", "Dog"}, final)
}
|
// 微信支付参数服务列表
// 1. 新增公众号支付开发参数
// 2. 获取公众号支付开发参数
// 3. 上传证书 证书包括apiclient_key.pem和apiclient_cert.pem
package controllers
import (
"encoding/json"
"github.com/1046102779/common/consts"
. "github.com/1046102779/official_account/logger"
"github.com/1046102779/official_account/models"
"github.com/astaxie/beego"
"github.com/pkg/errors"
)
// OfficialAccountsPayParamsController oprations for OfficialAccountsPayParams
type OfficialAccountsPayParamsController struct {
beego.Controller
}
// 上传证书 证书包括apiclient_key.pem和apiclient_cert.pem
// @params :id 表示内部公众号ID
// @router /:id/certification [POST]
func (t *OfficialAccountsPayParamsController) UploadCertification() {
id, _ := t.GetInt(":id")
if id <= 0 {
err := errors.New("param `:id` empty")
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": consts.ERROR_CODE__SOURCE_DATA__ILLEGAL,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
if retcode, err := models.UploadCertification(id, t.Ctx.Request); err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": retcode,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
t.Data["json"] = map[string]interface{}{
"err_code": 0,
"err_msg": "",
}
t.ServeJSON()
return
}
type PayParamInfo struct {
Appkey string `json:"appkey"`
MchId string `json:"mch_id"`
Name string `json:"name"`
}
// 1. 新增公众号支付开发参数
// @router /:id/payparams [POST]
func (t *OfficialAccountsPayParamsController) ModifyWechatParams() {
var (
payParamInfo *PayParamInfo = new(PayParamInfo)
)
id, _ := t.GetInt(":id")
if id <= 0 {
err := errors.New("param `:id` empty")
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": consts.ERROR_CODE__SOURCE_DATA__ILLEGAL,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
if err := json.Unmarshal(t.Ctx.Input.RequestBody, payParamInfo); err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": consts.ERROR_CODE__JSON__PARSE_FAILED,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
if retcode, err := models.ModifyWechatParams(id, payParamInfo.Appkey, payParamInfo.MchId, payParamInfo.Name); err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": retcode,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
t.Data["json"] = map[string]interface{}{
"err_code": 0,
"err_msg": "",
}
t.ServeJSON()
return
}
// 2. 获取公众号支付开发参数
// @router /:id/payparams [GET]
func (t *OfficialAccountsPayParamsController) GetOfficialAccountPayParamByOfficialAccountId() {
type PayParamInfo struct {
Appkey string `json:"appkey"`
MchId string `json:"mch_id"`
Name string `json:"name"`
}
var (
payParamInfo *PayParamInfo = new(PayParamInfo)
)
id, _ := t.GetInt(":id")
if id <= 0 {
err := errors.New("param `:id` empty")
t.Data["json"] = map[string]interface{}{
"err_code": consts.ERROR_CODE__SOURCE_DATA__ILLEGAL,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
payParam, retcode, err := models.GetOfficialAccountPayParamByOfficialAccountId(id)
if err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": retcode,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
if payParam != nil {
payParamInfo = &PayParamInfo{
Appkey: payParam.Appkey,
MchId: payParam.MchId,
Name: payParam.Name,
}
}
t.Data["json"] = map[string]interface{}{
"err_code": 0,
"err_msg": "",
"wechat_pay_params": *payParamInfo,
}
t.ServeJSON()
return
}
|
package heap
import (
"fmt"
)
func ExampleHeap() {
h := &Heap{}
for _, v := range []int{8, 19, 12, 23, 78} {
h.Add(v)
}
for v := range h.Traverse() {
fmt.Printf(" %d", v)
}
fmt.Println()
h.Remove(8)
for v := range h.Traverse() {
fmt.Printf(" %d", v)
}
fmt.Println()
h.Add(8)
for v := range h.Traverse() {
fmt.Printf(" %d", v)
}
fmt.Println()
h.Add(15)
h.Add(11)
for v := range h.Traverse() {
fmt.Printf(" %d", v)
}
fmt.Println()
// Output:
// 8 19 12 23 78
// 12 19 78 23
// 8 12 78 23 19
// 8 12 11 23 19 78 15
}
|
package bkz
import (
"fmt"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
type Book struct {
Title string
Author string
ISBN string
Genre string
Id string
}
// Creates an account and adds it to the Database
func CreateBook(book *Book) bool {
session, err := mgo.Dial("127.0.0.1:27017/")
if err != nil {
fmt.Println(err)
return false
}
c := session.DB("library").C("users")
result := Book{}
err = c.Find(bson.M{"id": book.Id}).One(&result)
if result.Id != "" {
// return true because book is present in the database
// and we can say, "it's been added" without causing errors
return true
}
err = c.Insert(*book)
if err != nil {
return false
}
return true
}
func FindBook(bookid string) (book *Book) {
session, err := mgo.Dial("127.0.0.1:27017/")
if err != nil {
fmt.Println(err)
return nil
}
// Should probably change "users" to "books"
c := session.DB("library").C("users")
err = c.Find(bson.M{"id": bookid}).One(&book)
return book
}
|
package endpoint
import (
"fmt"
)
type Endpoint interface {
Upload(destFolder, endpointUsername, endpointPassword, endpointURL string) error
}
func New(endpointType string) (endpoint Endpoint, err error) {
switch endpointType {
case "git":
endpoint = newGitEndpoint()
default:
err = fmt.Errorf("no endpoint information found in the config file")
}
return endpoint, err
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"sync"
"github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
"time"
)
type DbConf struct {
User string `json:"user"`
PassWord string `json:"passWord"`
Host string `json:"host"`
Name string `json:"name"`
}
type DataUpdater struct {
endKeeper chan bool
ticker *time.Ticker
mt *sync.Mutex
wg *sync.WaitGroup
}
const (
updatePeriod = 5 * 60 // 5min
)
func NewDataUpdateTimer() *DataUpdater {
s := &DataUpdater{}
s.endKeeper = make(chan bool, 1)
s.ticker = time.NewTicker(time.Second * time.Duration(updatePeriod))
s.mt = &sync.Mutex{}
s.wg = &sync.WaitGroup{}
return s
}
func main() {
dbConf := &DbConf{}
var err error
DbPool, err = newDb(dbConf)
if err != nil {
return
}
{
dnR := new(DnReGion)
//datas := make([]string, 0, 10)
//err := DbPool.Table(dnR.TableName()).Cols("country","city").Find(&datas) // 报错
datas, err := (DbPool.Table(dnR.TableName()).Cols("country", "city").Distinct("country").Query())
if err != nil {
fmt.Println(err)
}
for _, data := range datas {
fmt.Println(string(data["country"]))
fmt.Println(string(data["city"]))
}
}
return
}
func get() (*DnReGion, bool, error) {
dnR := new(DnReGion)
fmt.Printf("%p\n", dnR)
has, err := DbPool.Where("id = ?", 1234).Get(dnR)
fmt.Printf("%p\n", dnR)
return dnR, has, err
}
type Employee struct {
Id int64 `xorm:"id autoincr"`
Name string `xorm:"name"`
Salary string `xorm:"salary"`
ManagerId int64 `xorm:"ManagerId"`
}
func getdata(user *UserTest) (bool, error) {
return DbPool.Get(user)
}
type UserTest struct {
Id int64
Name string
Desc string `xorm:"desc"`
DeleteAt int64 `xorm:"deleted"`
UpdateAt int64 `xorm:"updated"`
}
type Conf struct {
DbConf `json:"dbConf"`
}
var DbPool *xorm.Engine
func loadConfig(path string) (*Conf, error) {
opts := new(Conf)
confBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
fmt.Println(string(confBytes))
err = json.Unmarshal(confBytes, opts)
if err != nil {
return nil, err
}
return opts, nil
}
func newDb(dbConf *DbConf) (*xorm.Engine, error) {
driverName := "mysql"
dsnConfig := &mysql.Config{
User: dbConf.User,
Passwd: dbConf.PassWord,
Addr: dbConf.Host,
Net: "tcp",
DBName: dbConf.Name,
AllowNativePasswords: true,
ReadTimeout: time.Duration(60) * time.Second, // I/O read timeout
WriteTimeout: time.Duration(60) * time.Second, // I/O write timeout
Timeout: time.Duration(60) * time.Second, // Dial timeout
}
dataSourceName := dsnConfig.FormatDSN()
fmt.Printf("dataSourceName %s\n", dataSourceName)
dB, err := xorm.NewEngine(driverName, dataSourceName)
if err != nil {
fmt.Println("xorm new engin err: %s ", err.Error())
return nil, err
}
//dB.SetTableMapper(core.SameMapper{})
dB.ShowSQL(true)
err = dB.Ping()
if err != nil {
fmt.Println("db ping err: %a", err.Error())
return nil, err
}
return dB, nil
}
|
package main
import (
"fmt"
"github.com/duego/mongotool/storage"
"labix.org/v2/mgo"
"net/url"
"os"
"strings"
)
// mongoSession gives a session or dies trying.
func mongoSession(addr string) *mgo.Session {
fmt.Fprintln(os.Stderr, "Connecting to", addr)
s, err := mgo.Dial(addr + "?connect=direct")
if err != nil {
errorf("Error connecting to %s: %v", addr, err)
exit()
}
return s
}
// selectStorage will figure out what kind of storage we're looking for in specified target.
func selectStorage(target string, compression bool) (root string, store storage.SaveFetcher) {
if target == "-" {
errorf("%s", "TODO: Set stdin storage here")
exit()
}
if strings.HasPrefix(target, "http") {
if u, err := url.Parse(target); err != nil {
errorf("%v", err)
exit()
} else {
store = storage.NewS3(fmt.Sprintf("%s://%s", u.Scheme, u.Host))
root = u.Path
}
} else {
store = storage.Filesystem{target}
root = ""
}
// Apply compression
if compression {
store = storage.NewGzipSaveFetcher(store)
}
return
}
|
package main
//989. 数组形式的整数加法
//对于非负整数X而言,X的数组形式是每位数字按从左到右的顺序形成的数组。例如,如果X = 1231,那么其数组形式为[1,2,3,1]。
//
//给定非负整数 X 的数组形式A,返回整数X+K的数组形式。
//
//
//
//示例 1:
//
//输入:A = [1,2,0,0], K = 34
//输出:[1,2,3,4]
//解释:1200 + 34 = 1234
//示例 2:
//
//输入:A = [2,7,4], K = 181
//输出:[4,5,5]
//解释:274 + 181 = 455
//示例 3:
//
//输入:A = [2,1,5], K = 806
//输出:[1,0,2,1]
//解释:215 + 806 = 1021
//示例 4:
//
//输入:A = [9,9,9,9,9,9,9,9,9,9], K = 1
//输出:[1,0,0,0,0,0,0,0,0,0,0]
//解释:9999999999 + 1 = 10000000000
//
//
//提示:
//
//1 <= A.length <= 10000
//0 <= A[i] <= 9
//0 <= K <= 10000
//如果A.length > 1,那么A[0] != 0
func addToArrayForm(A []int, K int) []int {
result := make([]int, 0)
low, high := 0, 0
for i := len(A) - 1; i > -1; i-- {
low = high + A[i] + K%10
high = low / 10
low %= 10
K /= 10
result = append(result, low)
}
for K > 0 || high > 0 {
low = high + K%10
high = low / 10
low %= 10
K /= 10
result = append(result, low)
}
n := len(result)
for i := 0; i < n/2; i++ {
result[i], result[n-1-i] = result[n-1-i], result[i]
}
return result
}
|
package mapping
import (
"github.com/omniscale/imposm3/element"
"github.com/omniscale/imposm3/geom"
)
func init() {
RegisterFieldTypes(
FieldType{
Name: "echo_hello_world",
GoType: "string",
Func: getField_Echo_parameters,
MakeFunc: nil,
},
)
}
func getField_Echo_parameters(val string, elem *element.OSMElem, geom *geom.Geometry, match Match) interface{} {
return "Hello World!"
}
|
package math
// todo 实现
|
package main
//Invalid
// Chekcs if every case inside switch has a return of type func f return type as f itself does not have a return statement
func f () int {
var a int = 10;
switch a {
case 1 : { }
default : { }
}
} |
package cwsharp
import (
"unicode"
"bufio"
"fmt"
"io"
)
type bufReader struct {
offset int
buf []rune
src *bufio.Reader
}
func (b *bufReader) init(src io.Reader) {
b.src = bufio.NewReader(src)
b.offset = 0
b.fill()
}
func NewReader(src io.Reader) Reader {
b := &bufReader{}
b.init(src)
return b
}
func (b *bufReader) ReadRule() rune {
if b.offset == len(b.buf) && !b.fill() {
return EOF
}
r := b.buf[b.offset]
b.offset += 1
if r == 65279 { //BOM-utf8
r= b.ReadRule()
}
return normalize(r)
}
func (b *bufReader) Peek() rune {
if b.offset == len(b.buf) && !b.fill() {
return EOF
}
r := b.buf[b.offset]
if r == 65279 {
b.ReadRule() //ignore next rune
r= b.Peek()
}
return unicode.ToLower(r)
}
func (b *bufReader) Seek(offset int) {
b.offset = offset
}
func (b *bufReader) Pos() int {
return b.offset
}
func (b *bufReader) fill() bool {
line, _ := b.src.ReadString('\n')
if len(line) == 0 {
return false
}
b.offset = 0
b.buf = []rune(line)
fmt.Println(b.buf)
return true
}
func normalize(r rune) rune {
if r >= 0x41 && r <= 0x5A { //A-Z
return r + 32
} else if r >= 0xff01 && r <= 0xff5d {
return r - 0xFEE0
}
return r
} |
// Copyright (C) 2017 Michał Matczuk
// Use of this source code is governed by an AGPL-style
// license that can be found in the LICENSE file.
package server
import (
"reflect"
"testing"
)
func TestNewAuth(t *testing.T) {
tests := []struct {
actual string
expected *Auth
}{
{"", nil},
{"token", &Auth{Token: "token"}},
}
for _, tt := range tests {
if !reflect.DeepEqual(NewAuth(tt.actual), tt.expected) {
t.Errorf("Invalid auth for %s", tt.actual)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.