text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"regexp"
"strconv"
)
// ok, _ := regexp.Match(pat, []byte(searchIn))
// ok, _ := regexp.MatchString(pat, searchIn)
func main() {
//目标字符串
searchIn := "John: 2578.34 William: 4567.23 Steve: 5632.18"
pattern := "[0-9]+.[0-9]+" //正则
f := func(s string) string {
v, _ := strconv.ParseFloat(s, 32)
return strconv.FormatFloat(v, 'f', 1, 32)
}
if ok, _ := regexp.Match(pattern, []byte(searchIn)); ok {
fmt.Println("Match Found!")
}
reg, _ := regexp.Compile(pattern)
// 将匹配到的部分替换为 “##.#”
str := reg.ReplaceAllString(searchIn, "##.#")
fmt.Println(str)
// 参数为函数
str2 := reg.ReplaceAllStringFunc(searchIn, f)
fmt.Println(str2)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cca provides utilities to interact with Chrome Camera App.
package cca
import (
"context"
"fmt"
"math"
"regexp"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/camera/testutil"
mediacpu "chromiumos/tast/local/media/cpu"
"chromiumos/tast/testing"
)
const (
// Time reserved for cleanup.
cleanupTime = 10 * time.Second
// Duration to wait for CPU to stabalize.
stabilizationDuration time.Duration = 5 * time.Second
// Duration of the interval during which CPU usage will be measured for streaming.
measureDuration = 20 * time.Second
)
type metricValuePair struct {
metric perf.Metric
value float64
}
// PerfData saves performance record collected by performance test.
type PerfData struct {
// metricValues saves metric-value pair.
metricValues []metricValuePair
// durations maps from event name to a list of durations for each
// time the event happened aggregated from different app instance.
durations map[string][]float64
}
// NewPerfData creates new PerfData instance.
func NewPerfData() *PerfData {
return &PerfData{durations: make(map[string][]float64)}
}
// SetMetricValue sets the metric and the corresponding value.
func (p *PerfData) SetMetricValue(m perf.Metric, value float64) {
p.metricValues = append(p.metricValues, metricValuePair{m, value})
}
// SetDuration sets perf event name and the duration that event takes.
func (p *PerfData) SetDuration(name string, duration float64) {
var values []float64
if v, ok := p.durations[name]; ok {
values = v
}
p.durations[name] = append(values, duration)
}
func averageDurations(durations []float64) float64 {
sum := 0.0
for _, v := range durations {
sum += v
}
return sum / float64(len(durations))
}
// Save saves perf data into output directory.
func (p *PerfData) Save(outDir string) error {
pv := perf.NewValues()
for _, pair := range p.metricValues {
pv.Set(pair.metric, pair.value)
}
for name, values := range p.durations {
pv.Set(perf.Metric{
Name: name,
Unit: "milliseconds",
Direction: perf.SmallerIsBetter,
}, averageDurations(values))
}
return pv.Save(outDir)
}
// measureStablizedUsage measures the CPU and power usage after it's cooled down for stabilizationDuration.
func measureStablizedUsage(ctx context.Context) (map[string]float64, error) {
testing.ContextLog(ctx, "Sleeping to wait for CPU usage to stabilize for ", stabilizationDuration)
if err := testing.Sleep(ctx, stabilizationDuration); err != nil {
return nil, errors.Wrap(err, "failed to wait for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage for ", measureDuration)
return mediacpu.MeasureUsage(ctx, measureDuration)
}
// MeasurePreviewPerformance measures the performance of preview with QR code detection on and off.
func MeasurePreviewPerformance(ctx context.Context, app *App, perfData *PerfData, facing Facing) error {
testing.ContextLog(ctx, "Switching to photo mode")
if err := app.SwitchMode(ctx, Photo); err != nil {
return errors.Wrap(err, "failed to switch to photo mode")
}
scanBarcode, err := app.State(ctx, "enable-scan-barcode")
if err != nil {
return errors.Wrap(err, "failed to check barcode state")
}
if scanBarcode {
return errors.New("QR code detection should be off by default")
}
usage, err := measureStablizedUsage(ctx)
if err != nil {
return errors.Wrap(err, "failed to measure CPU and power usage")
}
var cpuUsage float64
if cpuUsage, exist := usage["cpu"]; exist {
testing.ContextLogf(ctx, "Measured preview CPU usage: %.1f%%", cpuUsage)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("cpu_usage_preview-facing-%s", facing),
Unit: "percent",
Direction: perf.SmallerIsBetter,
}, cpuUsage)
} else {
testing.ContextLog(ctx, "Failed to measure preview CPU usage")
}
var powerUsage float64
if powerUsage, exist := usage["power"]; exist {
testing.ContextLogf(ctx, "Measured preview power usage: %.1f Watts", powerUsage)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("power_usage_preview-facing-%s", facing),
Unit: "Watts",
Direction: perf.SmallerIsBetter,
}, powerUsage)
} else {
testing.ContextLog(ctx, "Failed to measure preview power usage")
}
// Enable QR code detection and measure the performance again.
if err := app.EnableQRCodeDetection(ctx); err != nil {
return errors.Wrap(err, "failed to ensure QR code detection is enabled")
}
usageQR, err := measureStablizedUsage(ctx)
if err != nil {
return errors.Wrap(err, "failed to measure CPU and power usage with QR code detection")
}
if err := app.DisableQRCodeDetection(ctx); err != nil {
return errors.Wrap(err, "failed to ensure QR code detection is disabled")
}
if cpuUsageQR, exist := usageQR["cpu"]; exist {
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("cpu_usage_qrcode-facing-%s", facing),
Unit: "percent",
Direction: perf.SmallerIsBetter,
}, cpuUsageQR)
if cpuUsage != 0 {
overhead := math.Max(0, cpuUsageQR-cpuUsage)
testing.ContextLogf(ctx, "Measured QR code detection CPU usage: %.1f%%, overhead = %.1f%%", cpuUsageQR, overhead)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("cpu_overhead_qrcode-facing-%s", facing),
Unit: "percent",
Direction: perf.SmallerIsBetter,
}, overhead)
}
} else {
testing.ContextLog(ctx, "Failed to measure preview CPU usage with QR code detection")
}
if powerUsageQR, exist := usageQR["power"]; exist {
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("power_usage_qrcode-facing-%s", facing),
Unit: "Watts",
Direction: perf.SmallerIsBetter,
}, powerUsageQR)
if powerUsage != 0 {
overhead := math.Max(0, powerUsageQR-powerUsage)
testing.ContextLogf(ctx, "Measured QR code detection power usage: %.1f Watts, overhead = %.1f Watts", powerUsageQR, overhead)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("power_overhead_qrcode-facing-%s", facing),
Unit: "Watts",
Direction: perf.SmallerIsBetter,
}, overhead)
}
} else {
testing.ContextLog(ctx, "Failed to measure preview power usage with QR code detection")
}
return nil
}
// MeasureRecordingPerformance measures the performance of video recording.
func MeasureRecordingPerformance(ctx context.Context, app *App, perfData *PerfData, facing Facing) error {
testing.ContextLog(ctx, "Switching to video mode")
if err := app.SwitchMode(ctx, Video); err != nil {
return errors.Wrap(err, "failed to switch to video mode")
}
recordingStartTime, err := app.StartRecording(ctx, TimerOff)
if err != nil {
return errors.Wrap(err, "failed to start recording for performance measurement")
}
usage, err := measureStablizedUsage(ctx)
if err != nil {
return errors.Wrap(err, "failed to measure CPU and power usage")
}
if _, _, err := app.StopRecording(ctx, false, recordingStartTime); err != nil {
return errors.Wrap(err, "failed to stop recording for performance measurement")
}
if cpuUsage, exist := usage["cpu"]; exist {
testing.ContextLogf(ctx, "Measured recording CPU usage: %.1f%%", cpuUsage)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("cpu_usage_recording-facing-%s", facing),
Unit: "percent",
Direction: perf.SmallerIsBetter,
}, cpuUsage)
} else {
testing.ContextLog(ctx, "Failed to measure recording CPU usage")
}
if powerUsage, exist := usage["power"]; exist {
testing.ContextLogf(ctx, "Measured recording power usage: %.1f Watts", powerUsage)
perfData.SetMetricValue(perf.Metric{
Name: fmt.Sprintf("power_usage_recording-facing-%s", facing),
Unit: "Watts",
Direction: perf.SmallerIsBetter,
}, powerUsage)
} else {
testing.ContextLog(ctx, "Failed to measure recording power usage")
}
return nil
}
// MeasureTakingPicturePerformance takes a picture and measure the performance of UI operations.
func MeasureTakingPicturePerformance(ctx context.Context, app *App) error {
if err := app.WaitForVideoActive(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "Switching to photo mode")
if err := app.SwitchMode(ctx, Photo); err != nil {
return err
}
if _, err := app.TakeSinglePhoto(ctx, TimerOff); err != nil {
return err
}
return nil
}
// MeasureGifRecordingPerformance records a gif and measure the performance of UI operations.
func MeasureGifRecordingPerformance(ctx context.Context, app *App) error {
if err := app.WaitForVideoActive(ctx); err != nil {
return err
}
testing.ContextLog(ctx, "Switching to video mode")
if err := app.SwitchMode(ctx, Video); err != nil {
return err
}
if _, err := app.RecordGif(ctx, true); err != nil {
return err
}
return nil
}
// CollectPerfEvents returns a map containing all perf events collected since
// app launch. The returned map maps from event name to a list of durations for
// each time that event happened.
func (a *App) CollectPerfEvents(ctx context.Context, perfData *PerfData) error {
entries, err := a.appWindow.Perfs(ctx)
if err != nil {
return err
}
informativeEventName := func(entry testutil.PerfEntry) string {
perfInfo := entry.PerfInfo
if len(perfInfo.Facing) > 0 {
// To avoid containing invalid character in the metrics name, we should remove the non-Alphanumeric characters from the facing.
// e.g. When the facing is not set, the corresponding string will be (not-set).
reg := regexp.MustCompile("[^a-zA-Z0-9]+")
validFacingString := reg.ReplaceAllString(perfInfo.Facing, "")
return fmt.Sprintf(`%s-facing-%s`, entry.Event, validFacingString)
}
return entry.Event
}
durationMap := make(map[string][]float64)
for _, entry := range entries {
name := informativeEventName(entry)
perfData.SetDuration(name, entry.Duration)
var values []float64
if v, ok := durationMap[name]; ok {
values = v
}
durationMap[name] = append(values, entry.Duration)
}
for name, values := range durationMap {
testing.ContextLogf(ctx, "Perf event: %s => %f ms", name, averageDurations(values))
}
return nil
}
|
package main
import (
"fmt"
)
func main(){
for i, course := range topoSort(prereqs) {
fmt.Printf("%d:\t%s\n", i+1, course)
}
}
func topoSort(m map[sring][]string){
seen := make(map[string]bool)
var visitAll func(items []string)
visitAll = func(items []string){
for item := range m {
if !seen[item] {
seen[item] = true
visitAll(m[item])
}
}
}
visitAll()
} |
package main
import (
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func main() {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Info().
Str("Scale", "833 cents").
Float64("Interval", 833.09).
Msg("Fibonacci is everywhere")
log.Print("Print")
log.Trace().Msg("Trace")
log.Debug().Msg("Debug")
log.Info().Msg("Info")
log.Warn().Msg("Warn")
log.Error().Msg("Error")
log.Fatal().Msg("Fatal")
log.Panic().Msg("Panic")
}
|
package main
import (
"bytes"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"github.com/gorilla/mux"
)
var groupID = -1001288115081
type reqBody struct {
ChatID int64 `json:"chat_id"`
Text string `json:"text"`
}
type sendMessageReqBody struct {
ChatID int64 `json:"chat_id"`
Text string `json:"text"`
}
func HandlerSendMessage(res http.ResponseWriter, req *http.Request) {
body := &reqBody{}
if err := json.NewDecoder(req.Body).Decode(body); err != nil {
fmt.Println("could not decode request body", err)
return
}
if err := sendMessage(body.ChatID, body.Text); err != nil {
fmt.Println("error in sending reply:", err)
return
}
}
func HandlerUpdateMembers(res http.ResponseWriter, req *http.Request) {
if err := updateMembers(); err != nil {
fmt.Println("error in sending reply:", err)
return
}
}
func sendMessage(chatID int64, text string) error {
reqBody := &sendMessageReqBody{
ChatID: chatID,
Text: text,
}
reqBytes, err := json.Marshal(reqBody)
if err != nil {
return err
}
res, err := http.Post("https://api.telegram.org/bot1880447222:AAFJkmhX4V9u7mK5mUtTQoJTeQr4YjSTWPg/sendMessage", "application/json", bytes.NewBuffer(reqBytes))
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return errors.New("unexpected status" + res.Status)
}
return nil
}
func updateMembers() error {
res, err := http.Get("https://api.telegram.org/bot1880447222:AAFJkmhX4V9u7mK5mUtTQoJTeQr4YjSTWPg/getUpdates")
if err != nil {
return err
}
var data map[string][]map[string]map[string]map[string]int64
if res.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
json.Unmarshal([]byte(bodyBytes), &data)
listUser := getListUser()
var left_chat_participant_id, new_chat_participant_id int64
left_chat_participant_id = data["result"][len(data["result"])-1]["message"]["left_chat_participant"]["id"]
new_chat_participant_id = data["result"][len(data["result"])-2]["message"]["new_chat_participant"]["id"]
new_chat_participant_id = new_chat_participant_id + 2
left_chat_participant_id += 1
if new_chat_participant_id > 0 {
index := find(listUser, new_chat_participant_id)
if index == -1 {
listUser = append(listUser, new_chat_participant_id)
}
}
if left_chat_participant_id > 0 {
index := find(listUser, left_chat_participant_id)
if index != -1 {
listUser = removeIndex(listUser, index)
}
}
updateListUser(listUser)
} else {
return errors.New("unexpected status" + res.Status)
}
return nil
}
func handleRequests() {
myRouter := mux.NewRouter().StrictSlash(true)
myRouter.HandleFunc("/sendMessage", HandlerSendMessage)
myRouter.HandleFunc("/updateMembers", HandlerUpdateMembers)
log.Fatal(http.ListenAndServe(":3000", myRouter))
}
func removeIndex(arr []int64, index int) []int64 {
return append(arr[:index], arr[index+1:]...)
}
func find(arr []int64, elementToFind int64) int {
for i, n := range arr {
if n == elementToFind {
return i
}
}
return -1
}
func getListUser() []int64 {
csvFile, err := os.Open("list_users.csv")
if err != nil {
log.Fatal(err)
}
r := csv.NewReader(csvFile)
record, err := r.Read()
fmt.Println(record)
// for {
// record, err := r.Read()
// if err == io.EOF {
// break
// }
new_record := []int64{}
for i := range record {
str_number := record[i]
number, _ := strconv.ParseInt(str_number, 10, 64)
new_record = append(new_record, number)
}
csvFile.Close()
return new_record
}
func updateListUser(listUser []int64) {
record_to_update := []string{}
for i := range listUser {
number := listUser[i]
str_number := strconv.Itoa(int(number))
record_to_update = append(record_to_update, str_number)
}
fmt.Println(record_to_update)
csvFile, err := os.Create("list_users.csv")
if err != nil {
log.Fatal(err)
}
w := csv.NewWriter(csvFile)
err = w.Write(record_to_update)
if err != nil {
log.Fatal(err)
}
w.Flush()
csvFile.Close()
}
// FInally, the main funtion starts our server on port 3000
func main() {
handleRequests()
}
|
package parser
import (
"testing"
"github.com/amsa/doop/common"
"github.com/stretchr/testify/assert"
)
func TestSelect(t *testing.T) {
parser := MakeSqlParser()
sql, err := parser.Parse(`SELECT * FROM users;`)
common.HandleError(err)
assert.Equal(t, "SELECT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "*", sql.Columns)
//fmt.Printf("%#v\n", sql)
sql, err = parser.Parse(`SELECT * FROM users WHERE id=2 AND username='amsa'`)
common.HandleError(err)
assert.Equal(t, "SELECT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "*", sql.Columns)
assert.Equal(t, "WHERE id=2 AND username='amsa'", sql.Tail)
//fmt.Printf("%#v\n", sql)
sql, err = parser.Parse(`SELECT * FROM users ORDER BY fname`)
common.HandleError(err)
assert.Equal(t, "SELECT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "*", sql.Columns)
assert.Equal(t, "ORDER BY fname", sql.Tail)
sql, err = parser.Parse(`SELECT * FROM users WHERE id=2 AND username='amsa' ORDER BY id GROUP BY test`)
common.HandleError(err)
assert.Equal(t, "SELECT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "*", sql.Columns)
assert.Equal(t, "WHERE id=2 AND username='amsa' ORDER BY id GROUP BY test", sql.Tail)
}
func TestInsert(t *testing.T) {
parser := MakeSqlParser()
sql, err := parser.Parse(`INSERT INTO users VALUES ('Amin', 'Saeidi');`)
common.HandleError(err)
assert.Equal(t, "INSERT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "'Amin', 'Saeidi'", sql.Values)
sql, err = parser.Parse(`INSERT INTO users (fname, lname) VALUES ('Amin', 'Saeidi');`)
common.HandleError(err)
assert.Equal(t, "INSERT", sql.Op)
assert.Equal(t, "users", sql.TblName)
assert.Equal(t, "fname, lname", sql.Columns)
assert.Equal(t, "'Amin', 'Saeidi'", sql.Values)
}
|
package mint
import "fmt"
// MaskString6P4 masks a string exposing first 6 and 4 last symbols, like: YeAHCqTJk4aFnHXGV4zaaf3dTqJkdjQzg8TJENmP3zxDMpa97 => YeAHCq***pa97
func MaskString6P4(s string) string {
charz := []rune(s)
if len(charz) <= 10 {
return s
}
return fmt.Sprintf("%s***%s", string(charz[0:6]), string(charz[len(charz)-4:]))
}
|
package log
import (
"os"
"github.com/op/go-logging"
)
var Log = logging.MustGetLogger("FrankLog")
var format = logging.MustStringFormatter(
`%{color}%{level:.4s} %{time:15:04:05.000} %{shortfunc} ▶%{color:reset} %{message}`,
)
func InitLogger() {
backend := logging.NewLogBackend(os.Stderr, "", 0)
formatter := logging.NewBackendFormatter(backend, format)
//formatter.SetLevel(logging.DEBUG, "")
// Set the backends to be used.
logging.SetBackend(formatter)
}
|
package bd
import (
"context"
"time"
"github.com/paolapesantez/avatweet-server/models"
)
/*EliminarRelacion borra la relación en la bd */
func EliminarRelacion(relacion models.Relacion) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
db := MongoCN.Database("microblogging")
col := db.Collection("relaciones")
_, err := col.DeleteOne(ctx, relacion)
if err != nil {
return false, err
}
return true, nil
}
|
package logger
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
)
func TestUnmarshalText(t *testing.T) {
cases := map[string]struct {
input string
output Level
err error
}{
"select log level Not_A_Level": {"Not_A_Level", 0, ErrInvalidLogLevel},
"select log level Bad_Input": {"Bad_Input", 0, ErrInvalidLogLevel},
"select log level debug": {"debug", Debug, nil},
"select log level DEBUG": {"DEBUG", Debug, nil},
"select log level info": {"info", Info, nil},
"select log level INFO": {"INFO", Info, nil},
"select log level warn": {"warn", Warn, nil},
"select log level WARN": {"WARN", Warn, nil},
"select log level Error": {"Error", Error, nil},
"select log level ERROR": {"ERROR", Error, nil},
}
for desc, tc := range cases {
var logLevel Level
err := logLevel.UnmarshalText(tc.input)
assert.Equal(t, tc.output, logLevel, fmt.Sprintf("%s: expected %s got %d", desc, tc.output, logLevel))
assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %d", desc, tc.err, err))
}
}
func TestLevelIsAllowed(t *testing.T) {
cases := map[string]struct {
requestedLevel Level
allowedLevel Level
output bool
}{
"log debug when level debug": {Debug, Debug, true},
"log info when level debug": {Info, Debug, true},
"log warn when level debug": {Warn, Debug, true},
"log error when level debug": {Error, Debug, true},
"log warn when level info": {Warn, Info, true},
"log error when level warn": {Error, Warn, true},
"log error when level error": {Error, Error, true},
"log debug when level error": {Debug, Error, false},
"log info when level error": {Info, Error, false},
"log warn when level error": {Warn, Error, false},
"log debug when level warn": {Debug, Warn, false},
"log info when level warn": {Info, Warn, false},
"log debug when level info": {Debug, Info, false},
}
for desc, tc := range cases {
result := tc.requestedLevel.isAllowed(tc.allowedLevel)
assert.Equal(t, tc.output, result, fmt.Sprintf("%s: expected %t got %t", desc, tc.output, result))
}
}
|
package models
type TgAdminUserMoneyRecharge struct {
Id int `xorm:"primary_key autoincr comment('') INT(11)" json:"id"`
Uid int `xorm:"int(10)" json:"uid"`
Cid int `xorm:"int(10)" json:"cid"`
Way string `xorm:"varchar(200)" json:"way"`
Money float64 `xorm:"decimal(10,2)" json:"money"`
CreateBy int `xorm:"int(10)" json:"create_by"`
CreateTime string `xorm:"datetime" json:"create_time"`
RechargeFrom uint8 `xorm:"tinyint(1)" json:"recharge_from"`
ToAccountNum string `xorm:"varchar(255)" json:"to_account_num"`
OrderSn string `xorm:"varchar(255)" json:"order_sn"`
Status uint8 `xorm:"tinyint(1)" json:"status"`
} |
package shlex
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tilt-dev/tilt/internal/tiltfile/starkit"
)
func TestQuote(t *testing.T) {
f := starkit.NewFixture(t, NewPlugin())
f.File("Tiltfile", `
s = shlex.quote("foo '$FOO'")
print(shlex.quote("foo '$FOO'"))
`)
_, err := f.ExecFile("Tiltfile")
require.NoError(t, err)
require.Equal(t, `'foo '"'"'$FOO'"'"''
`, f.PrintOutput())
}
|
package main
import (
"fmt"
"os"
"encoding/csv"
"io/ioutil"
"database/sql"
_ "github.com/mattn/go-sqlite3"
"strings"
"flag"
"github.com/pkg/errors"
"path"
)
type GTFSFile struct {
name string
fields []string
}
func main() {
var gtfsDir string
var dbPath string
var batchSize int
flag.StringVar(>fsDir, "source", "", "Directory path for gtfs files is passed in source flag")
flag.StringVar(&dbPath, "db", "", "target db path goes in db flag")
flag.IntVar(&batchSize, "size", 100000, "batch size can be passed default: 100000")
flag.Parse()
if gtfsDir == "" || dbPath == "" {
panic(errors.New("source and db are params are required"))
}
db, err := sql.Open("sqlite3", dbPath)
checkError(err)
defer db.Close()
files, err := getFileNames(gtfsDir)
checkError(err)
for _,file := range files {
filePath := fmt.Sprintf(path.Join(gtfsDir, file))
data, headers, err := readCSV(filePath, true)
checkError(err)
name := strings.Replace(file, ".txt", "", 1)
db.Exec(getCreateTableQuery(name, headers))
fmt.Printf("%s created\n", name)
if batchSize > 0 {
batchSliceTask(data, batchSize, getMultiInsertTask(db, name))
} else {
db.Exec(getMultiInsertQuery(name, data))
}
}
}
func getFileNames(dirPath string) (fileNames []string, err error){
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return
}
for _, fileInfo := range files {
if ! fileInfo.IsDir() {
fileNames = append(fileNames, fileInfo.Name())
}
}
return
}
func readCSV(filePath string, hasHeader bool) (fileData [][]string, header []string, err error) {
csvFile, err := os.Open(filePath);
if err != nil {
return
}
defer csvFile.Close()
reader := csv.NewReader(csvFile)
if hasHeader {
reader.FieldsPerRecord = 0
} else {
reader.FieldsPerRecord = -1
}
fileData, err = reader.ReadAll()
if err != nil {
return
}
if len(fileData) > 0 && hasHeader {
header = fileData[0]
deleteFromSlice(&fileData, 0)
}
return
}
func deleteFromSlice(s *[][]string, i int) {
sl := *s
*s = append(sl[:i], sl[i + 1:]...)
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
func getMultiInsertTask (db *sql.DB, name string) (func(data [][]string)){
return func(data [][]string) {
query := getMultiInsertQuery(name, data)
db.Exec(query)
fmt.Printf("%d records added to %s\n", len(data), name)
}
}
func getCreateTableQuery(name string, headers []string) (finalQuery string) {
innerQuery := strings.Join(headers[:], " test, ") + " text"
finalQuery = fmt.Sprintf("Create table %s (%s);", name, innerQuery)
return
}
func getMultiInsertQuery(name string, data [][]string) (finalQuery string) {
var innerQueries []string
for _, record := range data {
innerQueries = append(innerQueries[:], "('" + strings.Join(record[:], "', '") + "')")
}
finalQuery = fmt.Sprintf("Insert into %s values %s;", name, strings.Join(innerQueries[:], ", "))
return
}
func batchSliceTask (s [][]string, size int, task func(data [][]string)) {
var subSlice [][]string
run := len(s) > 0
for run {
if size < len(s) {
subSlice = s[:size]
s = s[size:]
task(subSlice)
} else {
task(s)
run = false
}
}
}
|
package controllers
import "edwardhey.com/football/wx/models"
type AsyncschedulerController struct {
BaseController
}
func (c *AsyncschedulerController) DeactiveActivity() {
c.IsJSON = true
// safe := &io.LimitedReader{R: c.Ctx..Context.Request.Body, N: 100000000}
_id, err := c.GetInt64("ID")
if err != nil {
c.ThrowErr("ID 不合法")
}
id := uint64(_id)
activity := models.Get(models.TActivity, id).(*models.Activity)
if activity.IsNew() {
return
}
activity.Status = models.ActivityStatusComplete
models.Save(activity)
}
func (c *AsyncschedulerController) ActiveActivity() {
c.IsJSON = true
// safe := &io.LimitedReader{R: c.Ctx..Context.Request.Body, N: 100000000}
_id, err := c.GetInt64("ID")
if err != nil {
c.ThrowErr("ID 不合法")
}
id := uint64(_id)
activity := models.Get(models.TActivity, id).(*models.Activity)
if activity.IsNew() {
c.ThrowErr("活动已下线或不存在")
}
activity.Status = models.ActivityStatusActivated
// fmt.Println(activity)
models.Save(activity)
// fmt.Println(id, aa, string(c.Ctx.Input.RequestBody))
// // c.JsonData["data"] = "123"
// c.StartSession()
// fmt.Println(c.Data, c.GetSession("openID"))
// c.Data["Website"] = "fb.edwardhey.com"
// c.Data["Email"] = "fb@edwardhey.com"
// c.TplName = "index.tpl"
}
|
package main
import (
"fmt"
"os"
"github.com/kavirajk/gojek/battleship/game"
)
func main() {
g := game.New(os.Stdin)
g.Play()
fmt.Println("Player1")
fmt.Println(g.Grid1)
fmt.Println("Player2")
fmt.Println(g.Grid2)
p1Score := g.P1Score()
p2Score := g.P2Score()
fmt.Println("P1:", p1Score)
fmt.Println("P2:", p2Score)
fmt.Println(g.Result())
}
|
package ravendb
func queryFieldUtilEscapeIfNecessary(name string) string {
if stringIsEmpty(name) ||
IndexingFieldNameDocumentID == name ||
IndexingFieldNameReduceKeyHash == name ||
IndexingFieldNameReduceKeyValue == name ||
IndexingFieldsNameSpatialShare == name {
return name
}
escape := false
insideEscaped := false
for i, c := range name {
if c == '\'' || c == '"' {
insideEscaped = !insideEscaped
continue
}
if i == 0 {
if !isLetter(c) && c != '_' && c != '@' && !insideEscaped {
escape = true
break
}
} else {
if !isLetterOrDigit(c) && c != '_' && c != '-' && c != '@' && c != '.' && c != '[' && c != ']' && !insideEscaped {
escape = true
break
}
}
}
if escape || insideEscaped {
return "'" + name + "'"
}
return name
}
|
package cmd
import (
"fmt"
"runtime"
"github.com/spf13/cobra"
)
func NewVersionCmd(version, commit, date, builtBy string) *cobra.Command {
// versionCmd represents the version command
return &cobra.Command{
Use: "version",
Short: "Prints the version of chekr.",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Version: %v \n", version)
fmt.Printf("Commit: %v \n", commit)
fmt.Printf("Buit at: %v \n", date)
fmt.Printf("Buit by: %v \n", builtBy)
fmt.Printf("Go Version: %v \n", runtime.Version())
},
}
}
|
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/xindong/frontd/aes256cbc"
)
var (
_echoServerAddr = []byte("127.0.0.1:62863")
_expectAESCiphertext = []byte("U2FsdGVkX19KIJ9OQJKT/yHGMrS+5SsBAAjetomptQ0=")
_secret = []byte("p0S8rX680*48")
)
func servEcho() {
l, err := net.Listen("tcp", string(_echoServerAddr))
if err != nil {
fmt.Println("Error listening:", err.Error())
os.Exit(1)
}
// Close the listener when the application closes.
defer l.Close()
fmt.Println("Listening on " + string(_echoServerAddr))
for {
// Listen for an incoming connection.
c, err := l.Accept()
if err != nil {
fmt.Println("Error accepting: ", err.Error())
os.Exit(1)
}
// Handle connections in a new goroutine.
go func(c net.Conn) {
defer c.Close()
_, err := io.Copy(c, c)
switch err {
case io.EOF:
err = nil
return
case nil:
return
}
panic(err)
}(c)
}
}
func TestMain(m *testing.M) {
// start echo server
go servEcho()
// start listen
os.Setenv("SECRET", string(_secret))
go main()
rand.Seed(time.Now().UnixNano())
// TODO: better way to wait for server to start
time.Sleep(time.Second)
os.Exit(m.Run())
}
func TestTextDecryptAES(t *testing.T) {
o := aes256cbc.New()
dec, err := o.Decrypt(_secret, _expectAESCiphertext)
if err != nil {
panic(err)
}
if !bytes.Equal(dec, _echoServerAddr) {
panic(errors.New("not match"))
}
}
func encryptText(plaintext, passphrase []byte) ([]byte, error) {
o := aes256cbc.New()
return o.Encrypt(passphrase, plaintext)
}
func randomBytes(n int) []byte {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i := 0; i < n; i++ {
b[i] = byte(rand.Int())
}
return b
}
func testEchoRound(conn net.Conn) {
conn.SetDeadline(time.Now().Add(time.Second * 10))
n := rand.Int()%2048 + 10
out := randomBytes(n)
n0, err := conn.Write(out)
if err != nil {
panic(err)
}
rcv := make([]byte, n)
n1, err := io.ReadFull(conn, rcv)
if err != nil && err != io.EOF {
panic(err)
}
if !bytes.Equal(out[:n0], rcv[:n1]) {
fmt.Println("out: ", n0, "in:", n1)
fmt.Println("out: ", hex.EncodeToString(out), "in:", hex.EncodeToString(rcv))
panic(errors.New("echo server reply is not match"))
}
}
func TestEchoServer(t *testing.T) {
conn, err := net.Dial("tcp", string(_echoServerAddr))
if err != nil {
panic(err)
}
defer conn.Close()
n := rand.Int() % 10
for i := 0; i < n; i++ {
testEchoRound(conn)
}
}
func testProtocol(cipherAddr []byte) {
// * test decryption
conn, err := net.Dial("tcp", "127.0.0.1:"+_DefaultPort)
if err != nil {
panic(err)
}
defer conn.Close()
_, err = conn.Write(cipherAddr)
if err != nil {
panic(err)
}
_, err = conn.Write([]byte("\n"))
if err != nil {
panic(err)
}
for i := 0; i < 5; i++ {
testEchoRound(conn)
}
}
func TestProtocolDecrypt(*testing.T) {
b, err := encryptText(_echoServerAddr, _secret)
if err != nil {
panic(err)
}
testProtocol(b)
}
// TODO: test decryption with extra bytes in packet and check data
// TODO: test decryption with seperated packet simulate loss connection and check data
// TODO: benchmark 100, 1000 connect with 1k 10k 100k 1m data
func BenchmarkEncryptText(b *testing.B) {
s1 := randomBytes(255)
s2 := randomBytes(32)
for i := 0; i < b.N; i++ {
_, err := encryptText(s1, s2)
if err != nil {
panic(err)
}
}
}
func BenchmarkDecryptText(b *testing.B) {
for i := 0; i < b.N; i++ {
o := aes256cbc.New()
_, err := o.Decrypt(_secret, _expectAESCiphertext)
if err != nil {
panic(err)
}
}
}
func BenchmarkEcho(b *testing.B) {
for i := 0; i < b.N; i++ {
TestEchoServer(&testing.T{})
}
}
func BenchmarkLatency(b *testing.B) {
cipherAddr, err := encryptText(_echoServerAddr, _secret)
if err != nil {
panic(err)
}
for i := 0; i < b.N; i++ {
testProtocol(cipherAddr)
}
}
func BenchmarkNoHitLatency(b *testing.B) {
for i := 0; i < b.N; i++ {
TestProtocolDecrypt(&testing.T{})
}
}
// with echo server with random hanging
// * benchmark latency
// * benchmark throughput
// * benchmark copy-on-write performance BackendAddrCache
// * benchmark memory footprint
|
package responses
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
func createTestServer(f http.HandlerFunc) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(f))
}
func callTestServer(ts *httptest.Server) (string, int, error) {
res, err := http.Get(ts.URL)
if err != nil {
return "", -1, err
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return "", -1, err
}
return string(body), res.StatusCode, nil
}
func TestCheckCodes(t *testing.T) {
assert := assert.New(t)
tests := []struct {
Function http.HandlerFunc
Code int
}{
{
func(w http.ResponseWriter, r *http.Request) {
RespondSuccess(w, nil)
},
200,
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondNotFound(w)
},
404,
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondUnauthorizedBearerJWT(w)
},
401,
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondInternalError(w, "")
},
500,
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondBadRequest(w, "")
},
400,
},
}
for _, test := range tests {
func() {
ts := createTestServer(test.Function)
defer ts.Close()
_, code, err := callTestServer(ts)
if !assert.Nil(err) {
return
}
assert.Equal(test.Code, code)
}()
}
}
func TestNoBody(t *testing.T) {
assert := assert.New(t)
ts := createTestServer(func(w http.ResponseWriter, r *http.Request) {
RespondSuccess(w, nil)
})
defer ts.Close()
body, _, err := callTestServer(ts)
if !assert.Nil(err) {
return
}
assert.Equal("", string(body))
}
// Unsure how to check JSON equivalence
func TestJsonMarshaling(t *testing.T) {
assert := assert.New(t)
tests := []struct {
Function http.HandlerFunc
JSON string
}{
{
func(w http.ResponseWriter, r *http.Request) {
RespondSuccess(w, nil)
},
"",
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondSuccess(w,
struct {
S string `json:"string"`
I int `json:"int"`
}{
"Test",
34,
})
},
"{\"string\":\"Test\",\"int\":34}\n",
},
{
func(w http.ResponseWriter, r *http.Request) {
RespondBadRequest(w, "Bad request")
},
"{\"error\":\"Bad request\"}\n",
},
}
for _, test := range tests {
func() {
ts := createTestServer(test.Function)
defer ts.Close()
body, _, err := callTestServer(ts)
if !assert.Nil(err) {
return
}
assert.Equal(test.JSON, body)
}()
}
}
func TestHeaderForUnauthorized(t *testing.T) {
assert := assert.New(t)
ts := createTestServer(func(w http.ResponseWriter, r *http.Request) {
RespondUnauthorizedBearerJWT(w)
})
defer ts.Close()
res, err := http.Get(ts.URL)
if !assert.Nil(err) {
return
}
assert.Equal(`Bearer token_type="JWT"`, res.Header.Get("WWW-Authenticate"))
}
|
package main
var x, i = []int{1, 2}, 0
func f() int { i = 1; return 9 }
//func main() {
// x[i] = f()
// println(x[0], x[1])
//}
|
package utils
import (
"io"
"log"
"strings"
)
type loggerWriter struct {
logger *log.Logger
}
func (lw loggerWriter) Write(p []byte) (int, error) {
str := strings.Trim(string(p), "\x00")
l := len(str)
lw.logger.Printf("%s", strings.Trim(string(p), "\x00"))
return l, nil
}
// LogWriter turns a *log.Logger into an io.Writer
func LogWriter(logger *log.Logger) io.Writer {
return loggerWriter{
logger: logger,
}
}
|
/*
* @lc app=leetcode.cn id=1352 lang=golang
*
* [1352] 最后 K 个数的乘积
*
* https://leetcode.cn/problems/product-of-the-last-k-numbers/description/
*
* algorithms
* Medium (47.16%)
* Likes: 89
* Dislikes: 0
* Total Accepted: 10.5K
* Total Submissions: 22.3K
* Testcase Example: '["ProductOfNumbers","add","add","add","add","add","getProduct","getProduct","getProduct","add","getProduct"]\n' +
'[[],[3],[0],[2],[5],[4],[2],[3],[4],[8],[2]]'
*
* 请你实现一个「数字乘积类」ProductOfNumbers,要求支持下述两种方法:
*
* 1. add(int num)
*
*
* 将数字 num 添加到当前数字列表的最后面。
*
*
* 2. getProduct(int k)
*
*
* 返回当前数字列表中,最后 k 个数字的乘积。
* 你可以假设当前列表中始终 至少 包含 k 个数字。
*
*
* 题目数据保证:任何时候,任一连续数字序列的乘积都在 32-bit 整数范围内,不会溢出。
*
*
*
* 示例:
*
* 输入:
*
* ["ProductOfNumbers","add","add","add","add","add","getProduct","getProduct","getProduct","add","getProduct"]
* [[],[3],[0],[2],[5],[4],[2],[3],[4],[8],[2]]
*
* 输出:
* [null,null,null,null,null,null,20,40,0,null,32]
*
* 解释:
* ProductOfNumbers productOfNumbers = new ProductOfNumbers();
* productOfNumbers.add(3); // [3]
* productOfNumbers.add(0); // [3,0]
* productOfNumbers.add(2); // [3,0,2]
* productOfNumbers.add(5); // [3,0,2,5]
* productOfNumbers.add(4); // [3,0,2,5,4]
* productOfNumbers.getProduct(2); // 返回 20 。最后 2 个数字的乘积是 5 * 4 = 20
* productOfNumbers.getProduct(3); // 返回 40 。最后 3 个数字的乘积是 2 * 5 * 4 = 40
* productOfNumbers.getProduct(4); // 返回 0 。最后 4 个数字的乘积是 0 * 2 * 5 * 4 = 0
* productOfNumbers.add(8); // [3,0,2,5,4,8]
* productOfNumbers.getProduct(2); // 返回 32 。最后 2 个数字的乘积是 4 * 8 = 32
*
*
*
*
* 提示:
*
*
* add 和 getProduct 两种操作加起来总共不会超过 40000 次。
* 0 <= num <= 100
* 1 <= k <= 40000
*
*
*/
package leetcode
// @lc code=start
type ProductOfNumbers struct {
pre []int
}
func Constructor() ProductOfNumbers {
return ProductOfNumbers{[]int{1}}
}
func (this *ProductOfNumbers) Add(num int) {
if num == 0 {
this.pre = []int{1}
return
}
if len(this.pre) == 0 {
this.pre = append(this.pre, num)
} else {
this.pre = append(this.pre, this.pre[len(this.pre) - 1] * num)
}
}
func (this *ProductOfNumbers) GetProduct(k int) int {
length := len(this.pre)
// 如果length < k,说明最后k个当中一定有0
// 如果this.pre[length - 1 - k]
if length <= k {
return 0
}
return this.pre[length - 1] / this.pre[length -1 - k]
}
/**
* Your ProductOfNumbers object will be instantiated and called as such:
* obj := Constructor();
* obj.Add(num);
* param_2 := obj.GetProduct(k);
*/
// @lc code=end
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geomfn
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/stretchr/testify/require"
)
func TestNode(t *testing.T) {
tests := []struct {
name string
arg geo.Geometry
want geo.Geometry
wantErr bool
}{
{
"LineString, 3 nodes",
geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0)"),
geo.MustParseGeometry("MULTILINESTRING((0 0,5 5),(5 5,10 10,0 10,5 5),(5 5,10 0))"),
false,
},
{
"LineString, 4 nodes",
geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0, 10 10)"),
geo.MustParseGeometry("MULTILINESTRING((0 0,5 5),(5 5,10 10),(10 10,0 10,5 5),(5 5,10 0,10 10))"),
false,
},
{
"LineString, two lines intersection in one point",
geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0, 10 10, 10 5, 0 5)"),
geo.MustParseGeometry("MULTILINESTRING((0 0,5 5),(5 5,0 5),(10 5,5 5),(5 5,10 10),(10 10,0 10,5 5),(5 5,10 0,10 5),(10 5,10 10))"),
false,
},
{
"LineString consisting of duplicated points",
geo.MustParseGeometry("LINESTRING(0 0, 10 10, 0 10, 10 0, 0 10, 0 0)"),
geo.MustParseGeometry("MULTILINESTRING((5 5,10 10,0 10),(0 10,0 0),(0 0,5 5),(0 10,5 5),(5 5,10 0))"),
false,
},
{
"MultiLineString",
geo.MustParseGeometry("MULTILINESTRING((1 1, 4 4), (1 3, 4 2))"),
geo.MustParseGeometry("MULTILINESTRING((1 1, 2.5 2.5), (1 3, 2.5 2.5), (2.5 2.5, 4 4), (2.5 2.5, 4 2))"),
false,
},
{
"MultiLineString, one line intersected twice by the other",
geo.MustParseGeometry("MULTILINESTRING((5 0, 5 5), (0 0, 10 2, 0 4))"),
geo.MustParseGeometry("MULTILINESTRING((0 0,5 1),(5 3,0 4),(5 0,5 1),(5 1,10 2,5 3),(5 1,5 3),(5 3,5 5))"),
false,
},
{
"MultiLineString with one LineString included in the other",
geo.MustParseGeometry("MULTILINESTRING((0 0, 10 10, 0 10, 10 0, 0 10), (1 1, 4 4), (1 3, 4 2), (0 0, 10 10, 0 10, 10 0))"),
geo.MustParseGeometry("MULTILINESTRING((0 0,1 1),(1 1,2.5 2.5),(1 3,2.5 2.5),(2.5 2.5,4 4),(4 4,5 5),(2.5 2.5,4 2),(5 5,10 10,0 10),(0 10,5 5),(5 5,10 0))"),
false,
},
{
"MultiLineString with no nodes",
geo.MustParseGeometry("MULTILINESTRING((1 1, 4 4), (0 0, -2 2))"),
geo.MustParseGeometry("MULTILINESTRING((0 0,-2 2),(1 1,4 4))"),
false,
},
{
"LineString with no nodes",
geo.MustParseGeometry("LINESTRING(0 0, -10 10, 0 10)"),
geo.MustParseGeometry("MULTILINESTRING((0 0, -10 10, 0 10))"),
false,
},
{
"LineString with specified SRID",
geo.MustParseGeometry("SRID=4269;LINESTRING(0 0, 10 10, 0 10, 10 0)"),
geo.MustParseGeometry("SRID=4269;MULTILINESTRING((0 0,5 5),(5 5,10 10,0 10,5 5),(5 5,10 0))"),
false,
},
{
"unsupported type: Polygon",
geo.MustParseGeometry("SRID=4269;POLYGON((-71.1776585052917 42.3902909739571,-71.1776820268866 42.3903701743239,-71.1776063012595 42.3903825660754,-71.1775826583081 42.3903033653531,-71.1776585052917 42.3902909739571))"),
geo.Geometry{},
true,
},
{
"unsupported type: GeometryCollection",
geo.MustParseGeometry("GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)))"),
geo.Geometry{},
true,
},
{
"EMPTY LineString",
geo.MustParseGeometry("SRID=4326;LINESTRING EMPTY"),
geo.MustParseGeometry("SRID=4326;GEOMETRYCOLLECTION EMPTY"),
false,
},
{
"EMPTY MultiLineString",
geo.MustParseGeometry("SRID=4326;MULTILINESTRING EMPTY"),
geo.MustParseGeometry("SRID=4326;GEOMETRYCOLLECTION EMPTY"),
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Node(tt.arg)
if (err != nil) != tt.wantErr {
t.Errorf("Node() error = %v, wantErr %v", err, tt.wantErr)
return
}
require.Equal(t, tt.want, got)
})
}
}
|
package main
import (
"flag"
"fmt"
"os"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/pschlump/godebug"
"gitlab.com/pschlump/PureImaginationServer/ReadConfig"
)
type EthAccount struct {
Address string
KeyFile string
KeyFilePassword string
}
type ConfigType struct {
URL string `json:"URL" default:"http://127.0.0.1:8545/"` // Address of server, http://localhost:8545?
ContractAddr map[string]string `json:"ContractAddr"`
Account EthAccount
LogAddress string `json:"LogAddress"`
// Global Data
Client *ethclient.Client
LogIt *InsLogEventControl
AccountKey *keystore.Key
}
var Version = flag.Bool("version", false, "Report version of code and exit")
var Cfg = flag.String("cfg", "cfg.json", "config file for this call")
var GitCommit string
var DbOn map[string]bool
func init() {
DbOn = make(map[string]bool)
GitCommit = "Unknown"
}
var gCfg ConfigType
func main() {
flag.Parse() // Parse CLI arguments to this, --cfg <name>.json
fns := flag.Args()
if len(fns) != 0 {
fmt.Printf("Extra arguments are not supported [%s]\n", fns)
os.Exit(1)
}
if *Version {
fmt.Printf("Version (Git Commit): %s\n", GitCommit)
os.Exit(0)
}
if Cfg == nil {
fmt.Printf("--cfg is a required parameter\n")
os.Exit(1)
}
// ------------------------------------------------------------------------------
// Read in Configuration
// ------------------------------------------------------------------------------
err := ReadConfig.ReadFile(*Cfg, &gCfg)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read confguration: %s error %s\n", *Cfg, err)
os.Exit(1)
}
err = Connect(&gCfg)
if err != nil {
fmt.Printf("Unable to connect to %s: error:%s\n", gCfg.URL, err)
os.Exit(1)
}
ni, err := NewInsLogEvent(&gCfg)
if err != nil {
fmt.Printf("Unable to instanciate the contract: %s\n", err)
os.Exit(1)
}
// addr := common.HexToAddress(gCfg.LogAddress)
tx, err := ni.IndexedEvent(gCfg.LogAddress, "log-test")
if err != nil {
// todo
} else {
fmt.Printf("Success Tx: %s\n", godebug.SVarI(tx))
}
}
|
package main
import (
pb "hello-grpc/hello"
"log"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
const (
listen = ":50051"
)
type server struct{}
// Echo implements hello.HelloServiceServer
func (s *server) Echo(ctx context.Context, in *pb.StringMessage) (*pb.StringMessage, error) {
log.Printf("server: %s", in.Value)
return in, nil
}
func main() {
log.Printf("listen on %s", listen)
lis, err := net.Listen("tcp", listen)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterHelloServiceServer(s, &server{})
// Register reflection service on gRPC server.
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"context"
"fmt"
"net/http"
"testing"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
func TestRegisterHTTP(t *testing.T) {
RegisterHTTP("httpfn", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World!")
})
fn, ok := GetRegisteredFunction("httpfn")
if !ok {
t.Fatalf("Expected function to be registered")
}
if fn.Name != "httpfn" {
t.Errorf("Expected function name to be 'httpfn', got %s", fn.Name)
}
}
func TestRegisterCE(t *testing.T) {
RegisterCloudEvent("cefn", func(context.Context, cloudevents.Event) error {
return nil
})
fn, ok := GetRegisteredFunction("cefn")
if !ok {
t.Fatalf("Expected function to be registered")
}
if fn.Name != "cefn" {
t.Errorf("Expected function name to be 'cefn', got %s", fn.Name)
}
}
func TestRegisterMultipleFunctions(t *testing.T) {
if err := RegisterHTTP("multifn1", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World!")
}); err != nil {
t.Error("Expected \"multifn1\" function to be registered")
}
if err := RegisterHTTP("multifn2", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World 2!")
}); err != nil {
t.Error("Expected \"multifn2\" function to be registered")
}
if err := RegisterCloudEvent("multifn3", func(context.Context, cloudevents.Event) error {
return nil
}); err != nil {
t.Error("Expected \"multifn3\" function to be registered")
}
}
func TestRegisterMultipleFunctionsError(t *testing.T) {
if err := RegisterHTTP("samename", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World!")
}); err != nil {
t.Error("Expected no error registering function")
}
if err := RegisterHTTP("samename", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World 2!")
}); err == nil {
t.Error("Expected error registering function with same name")
}
}
|
package testutils
import (
"encoding/json"
. "github.com/onsi/gomega"
)
func CheckJSONPrettyPrint(a string, b string) {
raw := []byte(b)
var rawI interface{}
json.Unmarshal(raw, &rawI)
jsonBytes, _ := json.MarshalIndent(rawI, "", " ")
expect := string(jsonBytes)
Expect(a).To(Equal(expect))
}
|
import (
"fmt"
"math"
)
func getSum(p, a float64) int {
fmt.Println("Factor", p, "is", a, "times")
dobP := (math.Pow(p, a+1) - 1) / (p - 1)
return int(dobP)
}
func primeSummation(n int) int {
var i, count, sum int = 3, 0, 0
for n%2 == 0 {
n /= 2
count++
}
sum += getSum(float64(2), float64(count))
for n != 1 {
count = 0
for n%i == 0 {
n /= i
count++
}
if count > 0 {
sum += getSum(float64(i), float64(count))
}
i += 2
}
// var MAX = n
// var sum int = 0
// var arr = make([]bool, n)
// var count int = 0
// arr[0], arr[1] = true, true
// for i := 0; i < MAX; i++{
// //fmt.Println(i)
// if !arr[i]{
// sum += i
// for j, k := i * 2, 0; k < ((MAX - 1)/i) - 1; k++{
// arr[j] = true
// j += i
// }
// count += 1
// }
// if count == n{
// return sum
// }
// }
return sum
}
|
package number
import "testing"
func TestBitNumberBase(t *testing.T) {
bitNum := NewBitNumber()
bitNum.Mark(1)
bitNum.IsMarked(1)
t.Logf("%v", bitNum)
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package vm
import (
"bufio"
"bytes"
"context"
"net"
"os"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/dbusutil"
"chromiumos/tast/timing"
)
const (
// TerminaComponentName is the name of the Chrome component for the VM kernel and rootfs.
TerminaComponentName = "cros-termina"
// TerminaMountDir is a path to the location where we will mount the termina component.
TerminaMountDir = "/run/imageloader/cros-termina/99999.0.0"
// ImageServerURLComponentName is the name of the Chrome component for the image server URL.
ImageServerURLComponentName = "cros-crostini-image-server-url"
lsbReleasePath = "/etc/lsb-release"
milestoneKey = "CHROMEOS_RELEASE_CHROME_MILESTONE"
)
// ComponentType represents the VM component type.
type ComponentType int
const (
// ComponentUpdater indicates that the live component should be fetched from the component updater service.
ComponentUpdater ComponentType = iota
// StagingComponent indicates that the current staging component should be fetched from the GS component testing bucket.
StagingComponent
)
// MountComponent mounts a component image from the provided image path.
func MountComponent(ctx context.Context, image string) error {
ctx, st := timing.Start(ctx, "mount_component")
defer st.End()
if err := os.MkdirAll(TerminaMountDir, 0755); err != nil {
return err
}
// Unmount any existing component, ignoring errors.
_ = testexec.CommandContext(ctx, "umount", TerminaMountDir).Run()
// We could call losetup manually and use the mount syscall... or
// we could let mount(8) do the work.
mountCmd := testexec.CommandContext(ctx, "mount", image, "-o", "loop", TerminaMountDir)
if err := mountCmd.Run(); err != nil {
mountCmd.DumpLog(ctx)
return errors.Wrap(err, "failed to mount component")
}
return nil
}
// UnmountComponent unmounts any active VM component.
func UnmountComponent(ctx context.Context) error {
ctx, st := timing.Start(ctx, "umount_component")
defer st.End()
if err := testexec.CommandContext(ctx, "umount", TerminaMountDir).Run(testexec.DumpLogOnError); err != nil {
return errors.Wrap(err, "failed to unmount component")
}
if err := os.Remove(TerminaMountDir); err != nil {
return errors.Wrap(err, "failed to remove component mount directory")
}
return nil
}
// getMilestone returns the ChromeOS milestone for this build.
func getMilestone() (int, error) {
f, err := os.Open(lsbReleasePath)
if err != nil {
return 0, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
s := strings.Split(scanner.Text(), "=")
if len(s) != 2 {
continue
}
if s[0] == milestoneKey {
val, err := strconv.Atoi(s[1])
if err != nil {
return 0, errors.Wrapf(err, "%q is not a valid milestone number", s[1])
}
return val, nil
}
}
return 0, errors.New("no milestone key in lsb-release file")
}
// EnableCrostini sets the preference for Crostini being enabled as this is required for
// some of the Chrome integration tests to function properly.
func EnableCrostini(ctx context.Context, tconn *chrome.TestConn) error {
if err := tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.setCrostiniEnabled)`, true); err != nil {
return errors.Wrap(err, "running autotestPrivate.setCrostiniEnabled failed")
}
return nil
}
// waitForDBusSignal waits on a SignalWatcher and returns the unmarshaled signal. optSpec matches a subset of the watching signals if watcher
// listens on multiple signals. Pass nil if we want to wait for any signal matches by watcher.
func waitForDBusSignal(ctx context.Context, watcher *dbusutil.SignalWatcher, optSpec *dbusutil.MatchSpec, sigResult proto.Message) error {
for {
select {
case sig := <-watcher.Signals:
if optSpec == nil || optSpec.MatchesSignal(sig) {
if len(sig.Body) == 0 {
return errors.New("signal lacked a body")
}
buf, ok := sig.Body[0].([]byte)
if !ok {
return errors.New("signal body is not a byte slice")
}
if err := proto.Unmarshal(buf, sigResult); err != nil {
return errors.Wrap(err, "failed unmarshaling signal body")
}
return nil
}
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't get D-Bus signal")
}
}
}
// findIPv4 returns the first IPv4 address found in a space separated list of IPs.
func findIPv4(ips string) (string, error) {
for _, v := range strings.Fields(ips) {
ip := net.ParseIP(v)
if ip != nil && ip.To4() != nil {
return ip.String(), nil
}
}
return "", errors.Errorf("could not find IPv4 address in %q", ips)
}
// RestartDefaultVMContainer restarts a VM and container that were previously shut down.
func RestartDefaultVMContainer(ctx context.Context, dir string, container *Container) error {
if err := container.VM.Start(ctx); err != nil {
return err
}
if err := container.StartAndWait(ctx, dir); err != nil {
return err
}
return nil
}
// CreateVSHCommand creates a command to be run in a VM over vsh. The command
// parameter is required followed by an optional variatic list of strings as
// args. The command object is returned.
func CreateVSHCommand(ctx context.Context, cid int, command string, args ...string) *testexec.Cmd {
params := append([]string{"--cid=" + strconv.Itoa(cid), "--", command}, args...)
cmd := testexec.CommandContext(ctx, "vsh", params...)
// Add an empty buffer for stdin to force allocating a pipe. vsh uses
// epoll internally and generates a warning (EPERM) if stdin is /dev/null.
cmd.Stdin = &bytes.Buffer{}
return cmd
}
|
package cache
import (
"fmt"
"github.com/Skipor/memcached/recycle"
)
type Item struct {
ItemMeta
Data *recycle.Data
}
type ItemMeta struct {
Key string
Flags uint32
Exptime int64
Bytes int
}
func (m ItemMeta) expired(now int64) bool {
return m.Exptime != 0 && m.Exptime < now
}
func (i Item) NewView() ItemView {
return ItemView{
i.ItemMeta,
i.Data.NewReader(),
}
}
type ItemView struct {
ItemMeta
Reader *recycle.DataReader
}
func (i Item) GoString() string {
return fmt.Sprintf("%#v, Data:%#v}", i.ItemMeta, i.Data)
}
|
package oauth
import (
"errors"
"time"
. "github.com/jsl0820/wechat"
)
const TICKET_URL = "/cgi-bin/ticket/getticket?type=jsapi&access_token={{TOKEN}}"
var ticketInstance = &Ticket{Expires: GetConfig().Expires}
type Ticket struct {
Expires uint
Ticket string
}
//刷新票据
func (ti *Ticket) ticketRefresh() {
url := Url(TICKET_URL)
type Resp struct {
Errcode int
Errmsg string
Ticket string
ExpiresIn uint
}
var resp Resp
if err := NewRequest().Get(url).JsonResp(&resp); err != nil {
panic(err)
}
if resp.Errcode != 0 {
panic(errors.New("errmsg:" + resp.Errmsg))
}
ti.Ticket = resp.Ticket
}
//定期清理
func (ti *Ticket) Clear() {
d := time.Duration(ti.Expires) * time.Second
for {
<-time.After(d)
if ti.Ticket != "" {
ti.Ticket = ""
}
}
}
//获取
func (ti *Ticket) GetTicket() string {
if ti.Ticket == "" {
ti.ticketRefresh()
}
return ti.Ticket
}
//js-sdk配置
func SdkConfig(url string) map[string]string {
m := make(map[string]string)
sign := make(map[string]string)
m["url"] = url
m["timestamp"] = StampString()
m["noncestr"] = NonceStringGenerator(32)
m["jsapi_ticket"] = ticketInstance.GetTicket()
sign["timestamp"] = m["timestamp"]
sign["noncestr"] = m["noncestr"]
sign["signature"] = Sha1Sign(StringSign(m))
return sign
}
func init() {
go ticketInstance.Clear()
}
|
package comparisons
import (
"jean/instructions/base"
"jean/instructions/factory"
"jean/rtda/heap"
"jean/rtda/jvmstack"
)
type IF_ACMPEQ struct {
base.BranchInstruction
}
func (ifAcmp *IF_ACMPEQ) Execute(frame *jvmstack.Frame) {
_ifAcmp(frame, func(r1, r2 *heap.Object) bool {
return r1 == r2
}, ifAcmp.Offset)
}
type IF_ACMPNE struct {
base.BranchInstruction
}
func (ifAcmp *IF_ACMPNE) Execute(frame *jvmstack.Frame) {
_ifAcmp(frame, func(r1, r2 *heap.Object) bool {
return r1 != r2
}, ifAcmp.Offset)
}
func _ifAcmp(frame *jvmstack.Frame, cond func(r1, r2 *heap.Object) bool, offset int) {
stack := frame.OperandStack()
ref2 := stack.PopRef()
ref1 := stack.PopRef()
if cond(ref1, ref2) {
base.Branch(frame, offset)
}
}
func init() {
factory.Factory.AddInstruction(0xa5, func() base.Instruction {
return &IF_ACMPEQ{}
})
factory.Factory.AddInstruction(0xa6, func() base.Instruction {
return &IF_ACMPNE{}
})
}
|
package main
import (
"fmt"
"net"
"strconv"
"strings"
"time"
"github.com/BurntSushi/toml"
log "github.com/Sirupsen/logrus"
)
type PortScannerConfig struct {
Portrange string
Ipaddress string
Protocol string
}
type PortScannerResult struct {
portScannerResult portScannerResultMap
running int
timeOut int
}
type portScannerResultMap map[string]bool
var portScannerTuple PortScannerResult
func main() {
log.SetLevel(log.DebugLevel)
log.Infoln("*******************************************")
log.Infoln("Port Scanner")
log.Infoln("*******************************************")
t := time.Now()
defer func() {
if e := recover(); e != nil {
log.Debugln(e)
}
}()
log.Debugln(loadConfig("config.properties"))
log.Debugln("Parsed input data ", len(portScannerTuple.portScannerResult))
CheckPort(&portScannerTuple)
for key, value := range portScannerTuple.portScannerResult {
if value {
log.Debugln("Port Scanner Result", key, " port is open :", value)
}
}
log.Debugln("Total time taken %s to scan %d ports", time.Since(t), len(portScannerTuple.portScannerResult))
}
func CheckPort(portScannerTuple *PortScannerResult) {
for record := range portScannerTuple.portScannerResult {
for portScannerTuple.running >= 2000 {
log.Debugln("Maximum threads spawned", portScannerTuple.running, " waiting ...", 1*time.Second)
time.Sleep(1 * time.Second)
}
r := strings.Split(record, ":")
port, _ := strconv.Atoi(r[1])
portScannerTuple.running++
go check(portScannerTuple, r[0], uint16(port))
}
for portScannerTuple.running != 0 {
time.Sleep(1 * time.Second)
}
}
func check(portScannerTuple *PortScannerResult, ip string, port uint16) {
connection, err := net.DialTimeout("tcp", ip+":"+fmt.Sprintf("%d", port), time.Duration(portScannerTuple.timeOut)*time.Second)
if err == nil {
portScannerTuple.portScannerResult[fmt.Sprintf("%s:%d", ip, port)] = true
//log.Debugln(fmt.Sprintf("%s:%d - true", ip, port))
connection.Close()
} else {
portScannerTuple.portScannerResult[fmt.Sprintf("%s:%d", ip, port)] = false
//log.Debugln(fmt.Sprintf("%s:%d - %s", ip, port, err))
}
portScannerTuple.running--
}
func loadConfig(file string) PortScannerConfig {
var readConfigStruct PortScannerConfig
if metaData, err := toml.DecodeFile(file, &readConfigStruct); err != nil {
log.Debugln("Error Occured Reading file", err, metaData)
}
ports := strings.Split(readConfigStruct.Portrange, "-")
p1, err := strconv.Atoi(ports[0])
if err != nil {
log.Errorln(err)
}
log.Debugln("p1", p1)
p2, err := strconv.Atoi(ports[1])
if err != nil {
log.Errorln(err)
}
log.Debugln("p2", p2)
portScannerTuple.portScannerResult = make(portScannerResultMap)
for port := p1; port <= p2; port++ {
portScannerTuple.timeOut = 5
portScannerTuple.portScannerResult[readConfigStruct.Ipaddress+fmt.Sprintf(":%d", port)] = false
}
return readConfigStruct
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package header_test
import (
"testing"
"github.com/google/go-cmp/cmp"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/prependable"
)
func TestIPv4OptionsSerializer(t *testing.T) {
optCases := []struct {
name string
option []header.IPv4SerializableOption
expect []byte
}{
{
name: "NOP",
option: []header.IPv4SerializableOption{
&header.IPv4SerializableNOPOption{},
},
expect: []byte{1, 0, 0, 0},
},
{
name: "ListEnd",
option: []header.IPv4SerializableOption{
&header.IPv4SerializableListEndOption{},
},
expect: []byte{0, 0, 0, 0},
},
{
name: "RouterAlert",
option: []header.IPv4SerializableOption{
&header.IPv4SerializableRouterAlertOption{},
},
expect: []byte{148, 4, 0, 0},
}, {
name: "NOP and RouterAlert",
option: []header.IPv4SerializableOption{
&header.IPv4SerializableNOPOption{},
&header.IPv4SerializableRouterAlertOption{},
},
expect: []byte{1, 148, 4, 0, 0, 0, 0, 0},
},
}
for _, opt := range optCases {
t.Run(opt.name, func(t *testing.T) {
s := header.IPv4OptionsSerializer(opt.option)
l := s.Length()
if got := len(opt.expect); got != int(l) {
t.Fatalf("s.Length() = %d, want = %d", got, l)
}
b := make([]byte, l)
for i := range b {
// Fill the buffer with full bytes to ensure padding is being set
// correctly.
b[i] = 0xFF
}
if serializedLength := s.Serialize(b); serializedLength != l {
t.Fatalf("s.Serialize(_) = %d, want %d", serializedLength, l)
}
if diff := cmp.Diff(opt.expect, b); diff != "" {
t.Errorf("mismatched serialized option (-want +got):\n%s", diff)
}
})
}
}
// TestIPv4Encode checks that ipv4.Encode correctly fills out the requested
// fields when options are supplied.
func TestIPv4EncodeOptions(t *testing.T) {
tests := []struct {
name string
numberOfNops int
encodedOptions header.IPv4Options // reply should look like this
wantIHL int
}{
{
name: "valid no options",
wantIHL: header.IPv4MinimumSize,
},
{
name: "one byte options",
numberOfNops: 1,
encodedOptions: header.IPv4Options{1, 0, 0, 0},
wantIHL: header.IPv4MinimumSize + 4,
},
{
name: "two byte options",
numberOfNops: 2,
encodedOptions: header.IPv4Options{1, 1, 0, 0},
wantIHL: header.IPv4MinimumSize + 4,
},
{
name: "three byte options",
numberOfNops: 3,
encodedOptions: header.IPv4Options{1, 1, 1, 0},
wantIHL: header.IPv4MinimumSize + 4,
},
{
name: "four byte options",
numberOfNops: 4,
encodedOptions: header.IPv4Options{1, 1, 1, 1},
wantIHL: header.IPv4MinimumSize + 4,
},
{
name: "five byte options",
numberOfNops: 5,
encodedOptions: header.IPv4Options{1, 1, 1, 1, 1, 0, 0, 0},
wantIHL: header.IPv4MinimumSize + 8,
},
{
name: "thirty nine byte options",
numberOfNops: 39,
encodedOptions: header.IPv4Options{
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0,
},
wantIHL: header.IPv4MinimumSize + 40,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
serializeOpts := header.IPv4OptionsSerializer(make([]header.IPv4SerializableOption, test.numberOfNops))
for i := range serializeOpts {
serializeOpts[i] = &header.IPv4SerializableNOPOption{}
}
paddedOptionLength := serializeOpts.Length()
ipHeaderLength := int(header.IPv4MinimumSize + paddedOptionLength)
if ipHeaderLength > header.IPv4MaximumHeaderSize {
t.Fatalf("IP header length too large: got = %d, want <= %d ", ipHeaderLength, header.IPv4MaximumHeaderSize)
}
totalLen := uint16(ipHeaderLength)
hdr := prependable.New(int(totalLen))
ip := header.IPv4(hdr.Prepend(ipHeaderLength))
// To check the padding works, poison the last byte of the options space.
if paddedOptionLength != serializeOpts.Length() {
ip.SetHeaderLength(uint8(ipHeaderLength))
ip.Options()[paddedOptionLength-1] = 0xff
ip.SetHeaderLength(0)
}
ip.Encode(&header.IPv4Fields{
Options: serializeOpts,
})
options := ip.Options()
wantOptions := test.encodedOptions
if got, want := int(ip.HeaderLength()), test.wantIHL; got != want {
t.Errorf("got IHL of %d, want %d", got, want)
}
// cmp.Diff does not consider nil slices equal to empty slices, but we do.
if len(wantOptions) == 0 && len(options) == 0 {
return
}
if diff := cmp.Diff(wantOptions, options); diff != "" {
t.Errorf("options mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestIsV4LinkLocalUnicastAddress(t *testing.T) {
tests := []struct {
name string
addr string
expected bool
}{
{
name: "Valid (lowest)",
addr: "\xa9\xfe\x00\x00",
expected: true,
},
{
name: "Valid (highest)",
addr: "\xa9\xfe\xff\xff",
expected: true,
},
{
name: "Invalid (before subnet)",
addr: "\xa9\xfd\xff\xff",
expected: false,
},
{
name: "Invalid (after subnet)",
addr: "\xa9\xff\x00\x00",
expected: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if got := header.IsV4LinkLocalUnicastAddress(tcpip.AddrFromSlice([]byte(test.addr))); got != test.expected {
t.Errorf("got header.IsV4LinkLocalUnicastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
}
})
}
}
func TestIsV4LinkLocalMulticastAddress(t *testing.T) {
tests := []struct {
name string
addr string
expected bool
}{
{
name: "Valid (lowest)",
addr: "\xe0\x00\x00\x00",
expected: true,
},
{
name: "Valid (highest)",
addr: "\xe0\x00\x00\xff",
expected: true,
},
{
name: "Invalid (before subnet)",
addr: "\xdf\xff\xff\xff",
expected: false,
},
{
name: "Invalid (after subnet)",
addr: "\xe0\x00\x01\x00",
expected: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if got := header.IsV4LinkLocalMulticastAddress(tcpip.AddrFrom4Slice([]byte(test.addr))); got != test.expected {
t.Errorf("got header.IsV4LinkLocalMulticastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
}
})
}
}
|
package modifiers
import (
"path/filepath"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/consts"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/statefulset/builder"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
var _ volumeModifier = CertificatesModifier{}
var _ volumeMountModifier = CertificatesModifier{}
var _ builder.Modifier = CertificatesModifier{}
const (
jettyCerts = "server-certs"
secretsRootDir = "/var/lib/dynatrace/secrets/"
)
func NewCertificatesModifier(dynakube dynatracev1beta1.DynaKube) CertificatesModifier {
return CertificatesModifier{
dynakube: dynakube,
}
}
type CertificatesModifier struct {
dynakube dynatracev1beta1.DynaKube
}
func (mod CertificatesModifier) Enabled() bool {
return mod.dynakube.HasActiveGateCaCert()
}
func (mod CertificatesModifier) Modify(sts *appsv1.StatefulSet) error {
baseContainer := kubeobjects.FindContainerInPodSpec(&sts.Spec.Template.Spec, consts.ActiveGateContainerName)
sts.Spec.Template.Spec.Volumes = append(sts.Spec.Template.Spec.Volumes, mod.getVolumes()...)
baseContainer.VolumeMounts = append(baseContainer.VolumeMounts, mod.getVolumeMounts()...)
return nil
}
func (mod CertificatesModifier) getVolumes() []corev1.Volume {
return []corev1.Volume{
{
Name: consts.GatewaySslVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
{
Name: jettyCerts,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: mod.dynakube.Spec.ActiveGate.TlsSecretName,
},
},
},
}
}
func (mod CertificatesModifier) getVolumeMounts() []corev1.VolumeMount {
return []corev1.VolumeMount{
{
ReadOnly: false,
Name: consts.GatewaySslVolumeName,
MountPath: consts.GatewaySslMountPoint,
},
{
ReadOnly: true,
Name: jettyCerts,
MountPath: filepath.Join(secretsRootDir, "tls"),
},
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asm
import (
"context"
"fmt"
"github.com/google/gapid/core/data/binary"
"github.com/google/gapid/gapis/replay/opcode"
"github.com/google/gapid/gapis/replay/protocol"
"github.com/google/gapid/gapis/replay/value"
)
const (
// Various bit-masks used by this function.
// Many opcodes can fit values into the opcode itself.
// These masks are used to determine which values fit.
mask19 = uint64(0x7ffff)
mask20 = uint64(0xfffff)
mask26 = uint64(0x3ffffff)
mask45 = uint64(0x1fffffffffff)
mask46 = uint64(0x3fffffffffff)
mask52 = uint64(0xfffffffffffff)
// ▏60 ▏50 ▏40 ▏30 ▏20 ▏10
// ○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●● mask19
// ○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●●● mask20
// ○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●●●●●●●●● mask26
// ○○○○○○○○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●● mask45
// ○○○○○○○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●● mask46
// ○○○○○○○○○○○○●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●● mask52
// ▕ PUSHI 20 ▕
// ▕ EXTEND 26 ▕
)
// Instruction is the interface of all instruction types.
//
// Encode writes the instruction's opcodes to the binary writer w, translating
// all pointers to their final, resolved addresses using the PointerResolver r.
// An instruction can produce zero, one or many opcodes.
type Instruction interface {
Encode(r value.PointerResolver, w binary.Writer) error
}
func encodePush(t protocol.Type, v uint64, w binary.Writer) error {
switch t {
case protocol.Type_Float:
push := opcode.PushI{DataType: t, Value: uint32(v >> 23)}
if err := push.Encode(w); err != nil {
return err
}
if v&0x7fffff != 0 {
return opcode.Extend{Value: uint32(v & 0x7fffff)}.Encode(w)
}
return nil
case protocol.Type_Double:
push := opcode.PushI{DataType: t, Value: uint32(v >> 52)}
if err := push.Encode(w); err != nil {
return err
}
v &= mask52
if v != 0 {
ext := opcode.Extend{Value: uint32(v >> 26)}
if err := ext.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
}
return nil
case protocol.Type_Int8, protocol.Type_Int16, protocol.Type_Int32, protocol.Type_Int64:
// Signed PUSHI types are sign-extended
switch {
case v&^mask19 == 0:
// ○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕
return opcode.PushI{DataType: t, Value: uint32(v)}.Encode(w)
case v&^mask19 == ^mask19:
// ●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●●◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕
return opcode.PushI{DataType: t, Value: uint32(v & mask20)}.Encode(w)
case v&^mask45 == 0:
// ○○○○○○○○○○○○○○○○○○○◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕ EXTEND 26 ▕
push := opcode.PushI{DataType: t, Value: uint32(v >> 26)}
if err := push.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
case v&^mask45 == ^mask45:
// ●●●●●●●●●●●●●●●●●●●◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕ EXTEND 26 ▕
push := opcode.PushI{DataType: t, Value: uint32((v >> 26) & mask20)}
if err := push.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
default:
// ◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
//▕ PUSHI 12 ▕ EXTEND 26 ▕ EXTEND 26 ▕
push := opcode.PushI{DataType: t, Value: uint32(v >> 52)}
if err := push.Encode(w); err != nil {
return err
}
ext := opcode.Extend{Value: uint32((v >> 26) & mask26)}
if err := ext.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
}
case protocol.Type_Bool,
protocol.Type_Uint8, protocol.Type_Uint16, protocol.Type_Uint32, protocol.Type_Uint64,
protocol.Type_AbsolutePointer, protocol.Type_ConstantPointer, protocol.Type_VolatilePointer:
switch {
case v&^mask20 == 0:
// ○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○○◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕
return opcode.PushI{DataType: t, Value: uint32(v)}.Encode(w)
case v&^mask46 == 0:
// ○○○○○○○○○○○○○○○○○○◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
// ▕ PUSHI 20 ▕ EXTEND 26 ▕
push := opcode.PushI{DataType: t, Value: uint32(v >> 26)}
if err := push.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
default:
// ◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒◒
//▕ PUSHI 12 ▕ EXTEND 26 ▕ EXTEND 26 ▕
push := opcode.PushI{DataType: t, Value: uint32(v >> 52)}
if err := push.Encode(w); err != nil {
return err
}
ext := opcode.Extend{Value: uint32((v >> 26) & mask26)}
if err := ext.Encode(w); err != nil {
return err
}
return opcode.Extend{Value: uint32(v & mask26)}.Encode(w)
}
}
return fmt.Errorf("Cannot push value type %s", t)
}
// Nop is a no-operation Instruction. Instructions of this type do nothing.
type Nop struct{}
func (Nop) Encode(r value.PointerResolver, w binary.Writer) error {
return nil
}
// Call is an Instruction to call a VM registered function.
// This instruction will pop the parameters from the VM stack starting with the
// first parameter. If PushReturn is true, then the return value of the function
// call will be pushed to the top of the VM stack.
type Call struct {
PushReturn bool // If true, the return value is pushed to the VM stack.
ApiIndex uint8 // The index of the API this call belongs to
FunctionID uint16 // The function id registered with the VM to invoke.
}
func (a Call) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Call{
PushReturn: a.PushReturn,
ApiIndex: a.ApiIndex,
FunctionID: a.FunctionID,
}.Encode(w)
}
// Push is an Instruction to push Value to the top of the VM stack.
type Push struct {
Value value.Value // The value to push on to the VM stack.
}
func (a Push) Encode(r value.PointerResolver, w binary.Writer) error {
if ty, val, onStack := a.Value.Get(r); onStack {
return nil
} else {
return encodePush(ty, val, w)
}
}
// Pop is an Instruction that discards Count values from the top of the VM
// stack.
type Pop struct {
Count uint32 // Number of values to discard from the top of the VM stack.
}
func (a Pop) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Pop{Count: a.Count}.Encode(w)
}
// Copy is an Instruction that pops the target address and then the source
// address from the top of the VM stack, and then copies Count bytes from
// source to target.
type Copy struct {
Count uint64 // Number of bytes to copy.
}
func (a Copy) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Copy{Count: uint32(a.Count)}.Encode(w)
}
// Clone is an Instruction that makes a copy of the the n-th element from the
// top of the VM stack and pushes the copy to the top of the VM stack.
type Clone struct {
Index int
}
func (a Clone) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Clone{Index: uint32(a.Index)}.Encode(w)
}
// Load is an Instruction that loads the value of type DataType from pointer
// Source and pushes the loaded value to the top of the VM stack.
type Load struct {
DataType protocol.Type
Source value.Pointer
}
func (a Load) Encode(r value.PointerResolver, w binary.Writer) error {
ty, addr, onStack := a.Source.Get(r)
if !onStack {
switch ty {
case protocol.Type_ConstantPointer:
if addr&^mask20 == 0 {
return opcode.LoadC{DataType: a.DataType, Address: uint32(addr)}.Encode(w)
}
case protocol.Type_VolatilePointer:
if addr&^mask20 == 0 {
return opcode.LoadV{DataType: a.DataType, Address: uint32(addr)}.Encode(w)
}
default:
return fmt.Errorf("Unsupported load source type %T", a.Source)
}
if err := encodePush(ty, addr, w); err != nil {
return err
}
}
return opcode.Load{DataType: a.DataType}.Encode(w)
}
// Store is an Instruction that pops the value from the top of the VM stack and
// writes the value to Destination.
type Store struct {
Destination value.Pointer
}
func (a Store) Encode(r value.PointerResolver, w binary.Writer) error {
ty, addr, onStack := a.Destination.Get(r)
if !onStack {
if addr&^mask26 == 0 {
return opcode.StoreV{Address: uint32(addr)}.Encode(w)
} else {
if err := encodePush(ty, addr, w); err != nil {
return err
}
}
}
return opcode.Store{}.Encode(w)
}
// Strcpy is an Instruction that pops the target address then the source address
// from the top of the VM stack, and then copies at most MaxCount-1 bytes from
// source to target. If the MaxCount is greater than the source string length,
// then the target will be padded with 0s. The destination buffer will always be
// 0-terminated.
type Strcpy struct {
MaxCount uint64
}
func (a Strcpy) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Strcpy{
MaxSize: uint32(a.MaxCount),
}.Encode(w)
}
// Resource is an Instruction that loads the resource with index Index of Size
// bytes and writes the resource to Destination.
type Resource struct {
Index uint32
Destination value.Pointer
}
func (a Resource) Encode(r value.PointerResolver, w binary.Writer) error {
ty, val, onStack := a.Destination.Get(r)
if !onStack {
if err := encodePush(ty, val, w); err != nil {
return err
}
}
return opcode.Resource{
ID: a.Index,
}.Encode(w)
}
// InlineResource is an Instruction that loads the resource with index Index of Size
// bytes and writes the resource to Destination. Unlike the regular Resource instruction
// InlineResource packs the resource into the bytes following the initial 32 bit instruction
// In turn, this "inline" resource is followed by a pair of patch up tables. First some
// addresses to overwrite with constant values, then some pairs of addresses where the
// first address is an address to load from and the second is an address to store the loaded
// value.
type InlineResourceValuePatchUp struct {
Destination value.Pointer
Value value.Value
}
type InlineResourcePointerPatchUp struct {
Destination value.Pointer
Source value.Pointer
}
type InlineResource struct {
Data []byte
Destination value.Pointer
ValuePatchUps []InlineResourceValuePatchUp
PointerPatchUps []InlineResourcePointerPatchUp
Ctx context.Context
}
func (a InlineResource) Encode(r value.PointerResolver, w binary.Writer) error {
ty, val, onStack := a.Destination.Get(r)
if !onStack {
if err := encodePush(ty, val, w); err != nil {
return err
}
}
valuePatchUps := make([]opcode.InlineResourceValuePatchUp, 0)
pointerPatchUps := make([]opcode.InlineResourcePointerPatchUp, 0)
for _, valuePatchUp := range a.ValuePatchUps {
valuePatchUps = append(valuePatchUps, opcode.InlineResourceValuePatchUp{Destination: valuePatchUp.Destination, Value: valuePatchUp.Value})
}
for _, pointerPatchUp := range a.PointerPatchUps {
pointerPatchUps = append(pointerPatchUps, opcode.InlineResourcePointerPatchUp{Destination: pointerPatchUp.Destination, Source: pointerPatchUp.Source})
}
dataInstructions := len(a.Data) / 4
if len(a.Data)%4 != 0 {
dataInstructions = dataInstructions + 1
}
data := make([]uint32, dataInstructions)
for i := 0; i < dataInstructions; i++ {
v0 := uint32(0)
v1 := uint32(0)
v2 := uint32(0)
v3 := uint32(0)
v0 = uint32(a.Data[i*4+0])
if i*4+1 < len(a.Data) {
v1 = uint32(a.Data[i*4+1]) * 256
}
if i*4+2 < len(a.Data) {
v2 = uint32(a.Data[i*4+2]) * 256 * 256
}
if i*4+3 < len(a.Data) {
v3 = uint32(a.Data[i*4+3]) * 256 * 256 * 256
}
data[i] = v0 + v1 + v2 + v3
}
return opcode.InlineResource{
Data: data,
DataSize: uint32(len(a.Data)),
ValuePatchUps: valuePatchUps,
PointerPatchUps: pointerPatchUps,
Resolver: r,
Ctx: a.Ctx,
}.Encode(w)
}
// Post is an Instruction that posts Size bytes from Source to the server.
type Post struct {
Source value.Pointer
Size uint64
}
func (a Post) Encode(r value.PointerResolver, w binary.Writer) error {
ty, val, onStack := a.Source.Get(r)
if !onStack {
if err := encodePush(ty, val, w); err != nil {
return err
}
}
if err := encodePush(protocol.Type_Uint32, a.Size, w); err != nil {
return err
}
return opcode.Post{}.Encode(w)
}
// Add is an Instruction that pops and sums the top N stack values, pushing the
// result to the top of the stack. Each summed value must have the same type.
type Add struct {
Count uint32
}
func (a Add) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Add{Count: a.Count}.Encode(w)
}
// Label is an Instruction that holds a marker value, used for debugging.
type Label struct {
Value uint32
}
func (a Label) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Label{Value: a.Value}.Encode(w)
}
// SwitchThread is an Instruction that changes execution to a different thread.
type SwitchThread struct {
Index uint32
}
func (a SwitchThread) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.SwitchThread{Index: a.Index}.Encode(w)
}
type JumpLabel struct {
Label uint32
}
func (a JumpLabel) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.JumpLabel{Label: a.Label}.Encode(w)
}
type JumpNZ struct {
Label uint32
}
func (a JumpNZ) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.JumpNZ{Label: a.Label}.Encode(w)
}
type JumpZ struct {
Label uint32
}
func (a JumpZ) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.JumpZ{Label: a.Label}.Encode(w)
}
// Notification is an Instruction that sends Size bytes from Source to the server, with the ID returned as well.
type Notification struct {
ID uint64
Source value.Pointer
Size uint64
}
func (a Notification) Encode(r value.PointerResolver, w binary.Writer) error {
ty, val, onStack := a.Source.Get(r)
if !onStack {
if err := encodePush(ty, val, w); err != nil {
return err
}
}
if err := encodePush(protocol.Type_Uint32, a.ID, w); err != nil {
return err
}
if err := encodePush(protocol.Type_Uint32, a.Size, w); err != nil {
return err
}
return opcode.Notification{}.Encode(w)
}
type Wait struct {
ID uint32
}
func (a Wait) Encode(r value.PointerResolver, w binary.Writer) error {
return opcode.Wait{ID: a.ID}.Encode(w)
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql_test
import (
"context"
"fmt"
"net/url"
"os"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/jackc/pgx"
"github.com/jackc/pgx/pgtype"
)
// TestGetUserHashedPasswordTimeout verifies that user login attempts
// fail with a suitable timeout when some system range(s) are
// unavailable.
//
// To achieve this it creates a 2-node cluster, moves all ranges
// from node 1 to node 2, then stops node 2, then attempts
// to connect to node 1.
func TestGetUserHashedPasswordTimeout(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// We want to use a low timeout below to prevent
// this test from taking forever, however
// race builds are so slow as to trigger this timeout spuriously.
skip.UnderRace(t)
ctx := context.Background()
// unavailableCh is used by the replica command filter
// to conditionally block requests and simulate unavailability.
var unavailableCh atomic.Value
closedCh := make(chan struct{})
close(closedCh)
unavailableCh.Store(closedCh)
knobs := &kvserver.StoreTestingKnobs{
TestingRequestFilter: func(ctx context.Context, _ roachpb.BatchRequest) *roachpb.Error {
select {
case <-unavailableCh.Load().(chan struct{}):
case <-ctx.Done():
}
return nil
},
}
params := base.TestServerArgs{Knobs: base.TestingKnobs{Store: knobs}}
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
// Make a user that must use a password to authenticate.
// Default privileges on defaultdb are needed to run simple queries.
if _, err := db.Exec(`
CREATE USER foo WITH PASSWORD 'testabc';
GRANT ALL ON DATABASE defaultdb TO foo`); err != nil {
t.Fatal(err)
}
// We'll attempt connections on gateway node 0.
userURL, cleanupFn := sqlutils.PGUrlWithOptionalClientCerts(t,
s.ServingSQLAddr(), t.Name(), url.UserPassword("foo", "testabc"), false /* withClientCerts */)
defer cleanupFn()
rootURL, rootCleanupFn := sqlutils.PGUrl(t,
s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer rootCleanupFn()
// Override the timeout built into pgx so we are only subject to
// what the server thinks.
userURL.RawQuery += "&connect_timeout=0"
rootURL.RawQuery += "&connect_timeout=0"
fmt.Fprintln(os.Stderr, "-- sanity checks --")
// We use a closure here and below to ensure the defers are run
// before the rest of the test.
func() {
// Sanity check: verify that secure mode is enabled: password is
// required. If this part fails, this means the test cluster is
// not properly configured, and the remainder of the test below
// would report false positives.
unauthURL := userURL
unauthURL.User = url.User("foo")
dbSQL, err := pgxConn(t, unauthURL)
if err == nil {
defer func() { _ = dbSQL.Close() }()
}
if !testutils.IsError(err, "password authentication failed for user foo") {
t.Fatalf("expected password error, got %v", err)
}
}()
func() {
// Sanity check: verify that the new user is able to log in with password.
dbSQL, err := pgxConn(t, userURL)
if err != nil {
t.Fatal(err)
}
defer func() { _ = dbSQL.Close() }()
row := dbSQL.QueryRow("SELECT current_user")
var username string
if err := row.Scan(&username); err != nil {
t.Fatal(err)
}
if username != "foo" {
t.Fatalf("invalid username: expected foo, got %q", username)
}
}()
// Configure the login timeout to just 1s.
if _, err := db.Exec(`SET CLUSTER SETTING server.user_login.timeout = '200ms'`); err != nil {
t.Fatal(err)
}
fmt.Fprintln(os.Stderr, "-- make ranges unavailable --")
ch := make(chan struct{})
unavailableCh.Store(ch)
defer close(ch)
fmt.Fprintln(os.Stderr, "-- expect timeout --")
func() {
// Now attempt to connect again. We're expecting a timeout within 5 seconds.
start := timeutil.Now()
dbSQL, err := pgxConn(t, userURL)
if err == nil {
defer func() { _ = dbSQL.Close() }()
}
if !testutils.IsError(err, "internal error while retrieving user account") {
t.Fatalf("expected error during connection, got %v", err)
}
timeoutDur := timeutil.Now().Sub(start)
if timeoutDur > 5*time.Second {
t.Fatalf("timeout lasted for more than 5 second (%s)", timeoutDur)
}
}()
fmt.Fprintln(os.Stderr, "-- no timeout for root --")
func() {
dbSQL, err := pgxConn(t, rootURL)
if err != nil {
t.Fatal(err)
}
defer func() { _ = dbSQL.Close() }()
// A simple query must work for 'root' even without a system range available.
if _, err := dbSQL.Exec("SELECT 1"); err != nil {
t.Fatal(err)
}
}()
}
func pgxConn(t *testing.T, connURL url.URL) (*pgx.Conn, error) {
pgxConfig, err := pgx.ParseConnectionString(connURL.String())
if err != nil {
t.Fatal(err)
}
// Override the conninfo to avoid a bunch of pg_catalog
// queries when the connection is being set up.
pgxConfig.CustomConnInfo = func(c *pgx.Conn) (*pgtype.ConnInfo, error) {
return c.ConnInfo, nil
}
return pgx.Connect(pgxConfig)
}
|
package core
import (
"github.com/Peakchen/xgameCommon/akLog"
)
/*
by stefan 2572915286@qq.com
Based upon https://github.com/qiao/PathFinding.js
*/
type TGrid struct {
width int
height int
nodes DoubleNode
}
const (
allWalked = bool(false)
)
/**
* The Grid class, which serves as the encapsulation of the layout of the nodes.
* @constructor
* @param {number} width Number of columns of the grid, or matrix
* @param {number} height Number of rows of the grid.
* @param {Doubleint32} [matrix] - A 0-1 matrix
* representing the Walkable status of the nodes(0 or false for Walkable).
* If the matrix is not supplied, all the nodes will be Walkable. */
func Grid(width, height int, matrix DoubleInt32) *TGrid {
return &TGrid{
width: width,
height: height,
nodes: buildNodes(width, height, matrix),
}
}
/**
* Build and return the nodes.
* @private
* @param {number} width
* @param {number} height
* @param {DoubleNode} [matrix] - A 0-1 matrix representing
* the Walkable status of the nodes.
* @see Grid
*/
func buildNodes(width, height int, matrix DoubleInt32) DoubleNode {
var nodes = make(DoubleNode, height)
for i := 0; i < height; i++ {
nodes[i] = make(ArrayNode, width)
for j := 0; j < width; j++ {
nodes[i][j] = Node(int32(j), int32(i), true)
}
}
if matrix == nil {
return nodes
}
if len(matrix) != height || len(matrix[0]) != width {
akLog.Error("Matrix size does not fit")
return nodes
}
for i := 0; i < height; i++ {
for j := 0; j < width; j++ {
// 0, false, null will be Walkable
// while others will be un-walkable
if !allWalked {
// rule for yourself(...)
nodes[i][j].Walkable = matrix[i][j] == 0
} else {
nodes[i][j].Walkable = allWalked
}
}
}
return nodes
}
func (this *TGrid) GetNodeAt(x, y int) *TNode {
return this.nodes[y][x]
}
/**
* Determine whether the node at the given position is Walkable.
* (Also returns false if the position is outside the grid.)
* @param {number} x - The x coordinate of the node.
* @param {number} y - The y coordinate of the node.
* @return {boolean} - The walkability of the node.
*/
func (this *TGrid) IsWalkableAt(x, y int) bool {
return this.isInside(x, y) && this.nodes[y][x].Walkable
}
/**
* Determine whether the position is inside the grid.
* XXX: `grid.isInside(x, y)` is wierd to read.
* It should be `(x, y) is inside grid`, but I failed to find a better
* name for this method.
* @param {number} x
* @param {number} y
* @return {boolean}
*/
func (this *TGrid) isInside(x, y int) bool {
return (x >= 0 && x < this.width) && (y >= 0 && y < this.height)
}
/**
* Set whether the node on the given position is Walkable.
* NOTE: throws exception if the coordinate is not inside the grid.
* @param {number} x - The x coordinate of the node.
* @param {number} y - The y coordinate of the node.
* @param {boolean} Walkable - Whether the position is Walkable.
*/
func (this *TGrid) setWalkableAt(x, y int32, Walkable bool) {
this.nodes[y][x].Walkable = Walkable
}
/**
* Get the neighbors of the given node.
*
* offsets diagonalOffsets:
* +---+---+---+ +---+---+---+
* | | 0 | | | 0 | | 1 |
* +---+---+---+ +---+---+---+
* | 3 | | 1 | | | | |
* +---+---+---+ +---+---+---+
* | | 2 | | | 3 | | 2 |
* +---+---+---+ +---+---+---+
*
* When allowDiagonal is true, if offsets[i] is valid, then
* diagonalOffsets[i] and
* diagonalOffsets[(i + 1) % 4] is valid.
* @param {Node} node
* @param {DiagonalMovement} diagonalMovement
*/
func (this *TGrid) GetNeighbors(node *TNode, move DiagonalMovement) ArrayNode {
var x = int(node.X)
var y = int(node.Y)
var neighbors = ArrayNode{}
var (
s0 = false
d0 = false
s1 = false
d1 = false
s2 = false
d2 = false
s3 = false
d3 = false
nodes = this.nodes
)
// ↑
if this.IsWalkableAt(x, y-1) {
neighbors = append(neighbors, nodes[y-1][x])
s0 = true
}
// →
if this.IsWalkableAt(x+1, y) {
neighbors = append(neighbors, nodes[y][x+1])
s1 = true
}
// ↓
if this.IsWalkableAt(x, y+1) {
neighbors = append(neighbors, nodes[y+1][x])
s2 = true
}
// ←
if this.IsWalkableAt(x-1, y) {
neighbors = append(neighbors, nodes[y][x-1])
s3 = true
}
if move == Never {
return neighbors
}
if move == OnlyWhenNoObstacles {
d0 = s3 && s0
d1 = s0 && s1
d2 = s1 && s2
d3 = s2 && s3
} else if move == IfAtMostOneObstacle {
d0 = s3 || s0
d1 = s0 || s1
d2 = s1 || s2
d3 = s2 || s3
} else if move == Always {
d0 = true
d1 = true
d2 = true
d3 = true
} else {
panic("Incorrect value of diagonalMovement")
}
// ↖
if d0 && this.IsWalkableAt(x-1, y-1) {
neighbors = append(neighbors, nodes[y-1][x-1])
}
// ↗
if d1 && this.IsWalkableAt(x+1, y-1) {
neighbors = append(neighbors, nodes[y-1][x+1])
}
// ↘
if d2 && this.IsWalkableAt(x+1, y+1) {
neighbors = append(neighbors, nodes[y+1][x+1])
}
// ↙
if d3 && this.IsWalkableAt(x-1, y+1) {
neighbors = append(neighbors, nodes[y+1][x-1])
}
return neighbors
}
/**
* Get a clone of this grid.
* @return {Grid} Cloned grid.
*/
func (this *TGrid) clone() *TGrid {
var i, j int
width := this.width
height := this.height
thisNodes := this.nodes
newGrid := Grid(width, height, nil)
newNodes := make(DoubleNode, height)
for i = 0; i < height; i++ {
newNodes[i] = make(ArrayNode, width)
for j = 0; j < width; j++ {
newNodes[i][j] = Node(int32(j), int32(i), thisNodes[i][j].Walkable)
}
}
newGrid.nodes = newNodes
return newGrid
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package taskmanager
import (
"context"
"math/rand"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/uiauto/taskmanager"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: EndProcess,
LacrosStatus: testing.LacrosVariantNeeded,
Desc: "Verify the 'End process' button works on plugin, non-plugin and grouped tabs",
Contacts: []string{
"sun.tsai@cienet.com",
"cienet-development@googlegroups.com",
"chromeos-sw-engprod@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
Timeout: 10 * time.Minute,
})
}
type endProcessTestResources struct {
tconn *chrome.TestConn
ui *uiauto.Context
kb *input.KeyboardEventWriter
taskManager *taskmanager.TaskManager
}
// EndProcess verifies the "End process" button works on plugin, non-plugin and grouped tabs.
func EndProcess(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
kb, err := input.Keyboard(ctx)
if err != nil {
errors.Wrap(err, "failed to get keyboard")
}
defer kb.Close()
resources := &endProcessTestResources{
tconn: tconn,
ui: uiauto.New(tconn),
kb: kb,
taskManager: taskmanager.New(tconn, kb),
}
for _, test := range []endProcessTest{
newNonPluginTest(),
newPluginTest(),
newGroupedTabsTest(),
} {
f := func(ctx context.Context, s *testing.State) {
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
for _, process := range test.getProcesses() {
if err := process.Open(ctx, cr, tconn, kb); err != nil {
s.Fatal("Failed to open the process: ", err)
}
defer process.Close(cleanupCtx)
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, test.getDescription()+"_before_closing_tab")
if tab, ok := process.(*pluginTab); ok {
// Under some slow network connections or DUTs, the plugin node might not be loaded instantly.
// Therefore, give some time to wait until the target node exists.
if err := resources.ui.WithTimeout(time.Minute).WaitUntilExists(tab.pluginNode)(ctx); err != nil {
s.Fatal("Failed to find the plugin node: ", err)
}
}
}
if err := resources.taskManager.Open(ctx); err != nil {
s.Fatal("Failed to open the task manager: ", err)
}
defer resources.taskManager.Close(cleanupCtx, tconn)
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, test.getDescription()+"_before_closing_tm")
if err := resources.taskManager.WaitUntilStable(ctx); err != nil {
s.Fatal("Failed to wait until the Task Manager becomes stable: ", err)
}
if err := test.terminateAndVerify(ctx, resources); err != nil {
s.Fatal("Failed to terminate the process: ", err)
}
}
if !s.Run(ctx, test.getDescription(), f) {
s.Error("Failed to run ", test.getDescription())
}
}
}
type endProcessTest interface {
terminateAndVerify(ctx context.Context, res *endProcessTestResources) error
getDescription() string
getProcesses() []taskmanager.Process
}
type nonPluginTest struct {
description string
processes []taskmanager.Process
}
func newNonPluginTest() *nonPluginTest {
processes := []taskmanager.Process{
taskmanager.NewChromeTabProcess("https://translate.google.com/?hl=en"),
taskmanager.NewChromeTabProcess("https://news.ycombinator.com/news"),
taskmanager.NewChromeTabProcess("http://lite.cnn.com/en"),
taskmanager.NewChromeTabProcess("https://help.netflix.com/en"),
taskmanager.NewChromeTabProcess("https://www.cbc.ca/lite/trending-news"),
}
return &nonPluginTest{"non_plugin_test", processes}
}
func (npt *nonPluginTest) terminateAndVerify(ctx context.Context, res *endProcessTestResources) error {
return terminateAndVerify(ctx, npt, res)
}
func (npt *nonPluginTest) getDescription() string {
return npt.description
}
func (npt *nonPluginTest) getProcesses() []taskmanager.Process {
return npt.processes
}
type pluginTab struct {
*taskmanager.ChromeTab
pluginName string
pluginNode *nodewith.Finder
}
func newPluginTab(url, pluginName string, pluginNode *nodewith.Finder) *pluginTab {
return &pluginTab{
ChromeTab: taskmanager.NewChromeTabProcess(url),
pluginName: pluginName,
pluginNode: pluginNode,
}
}
func (pTab *pluginTab) NameInTaskManager(ctx context.Context, tconn *chrome.TestConn) (string, error) {
// Plugin name is not changed dynamically. Just return its name directly.
return "Subframe: " + pTab.pluginName, nil
}
type pluginTest struct {
description string
processes []taskmanager.Process
}
func newPluginTest() *pluginTest {
processes := []taskmanager.Process{
newPluginTab("https://twitter.com/i/flow/signup",
"https://accounts.google.com/", nodewith.Name("Sign up with Google").Role(role.Button),
),
newPluginTab("https://www.oreilly.com",
"https://driftt.com/", nodewith.NameStartingWith("Chat message from O'Reilly Bot:").Role(role.Button),
),
}
return &pluginTest{"plugin_test", processes}
}
func (pt *pluginTest) terminateAndVerify(ctx context.Context, res *endProcessTestResources) error {
rand.Seed(time.Now().UnixNano())
p := pt.processes[rand.Intn(len(pt.processes))]
tab, ok := p.(*pluginTab)
if !ok {
return errors.New("unexpected process")
}
name, err := p.NameInTaskManager(ctx, res.tconn)
if err != nil {
return errors.Wrap(err, "failed to obtain the process name in task manager")
}
testing.ContextLogf(ctx, "Terminate plugin process %q", name)
if err := res.taskManager.TerminateProcess(name)(ctx); err != nil {
return errors.Wrap(err, "failed to verify 'End process' button works")
}
if err := res.tconn.Call(ctx, nil, "async (id) => tast.promisify(chrome.tabs.update)(id, {active: true})", tab.ID); err != nil {
return errors.Wrap(err, "failed to focus on the target tab")
}
return res.ui.WaitUntilGone(tab.pluginNode)(ctx)
}
func (pt *pluginTest) getDescription() string {
return pt.description
}
func (pt *pluginTest) getProcesses() []taskmanager.Process {
return pt.processes
}
type groupedTabsTest struct {
description string
processes []taskmanager.Process
}
func newGroupedTabsTest() *groupedTabsTest {
var processes []taskmanager.Process
const groupedTabsAmount = 5
for i := 0; i < groupedTabsAmount; i++ {
tab := taskmanager.NewChromeTabProcess(chrome.NewTabURL)
processes = append(processes, tab)
}
return &groupedTabsTest{"grouped_tabs_test", processes}
}
func (gtt *groupedTabsTest) terminateAndVerify(ctx context.Context, res *endProcessTestResources) error {
return terminateAndVerify(ctx, gtt, res)
}
func (gtt *groupedTabsTest) getDescription() string {
return gtt.description
}
func (gtt *groupedTabsTest) getProcesses() []taskmanager.Process {
return gtt.processes
}
func terminateAndVerify(ctx context.Context, test endProcessTest, res *endProcessTestResources) error {
rand.Seed(time.Now().UnixNano())
n := rand.Intn(len(test.getProcesses()))
p := test.getProcesses()[n]
var processesToBeVerified []taskmanager.Process
switch test.(type) {
case *nonPluginTest:
processesToBeVerified = append(processesToBeVerified, p)
case *groupedTabsTest:
for _, process := range test.getProcesses() {
processesToBeVerified = append(processesToBeVerified, process)
}
default:
return errors.New("unexpected test type")
}
for _, process := range processesToBeVerified {
if status, err := process.Status(ctx, res.tconn); err != nil {
return err
} else if status != taskmanager.ProcessAlive {
return errors.Errorf("expecting the tab process to be alive, but got %q", status)
}
}
name, err := p.NameInTaskManager(ctx, res.tconn)
if err != nil {
return errors.Wrap(err, "failed to obtain the process name in task manager")
}
testing.ContextLogf(ctx, "Terminate process %q", name)
if err := res.taskManager.TerminateProcess(name)(ctx); err != nil {
return errors.Wrap(err, "failed to verify 'End process' button works")
}
for _, process := range processesToBeVerified {
if err := testing.Poll(ctx, func(ctx context.Context) error {
if status, err := process.Status(ctx, res.tconn); err != nil {
return err
} else if status != taskmanager.ProcessDead {
return errors.Errorf("expecting the tab process to be dead, but got %q", status)
}
return nil
}, &testing.PollOptions{Timeout: 5 * time.Second}); err != nil {
return errors.Wrapf(err, "failed to verify the process %q is terminated", name)
}
}
return nil
}
|
package ymdRedisServer
import "github.com/orestonce/ymd/ymdRedis/ymdRedisProtocol"
func (this *RedisCore) Echo(message string) (reply ymdRedisProtocol.BulkReply, errMsg string) {
reply.Value = []byte(message)
return reply, ``
}
func (this *RedisCore) Ping(message string) (reply ymdRedisProtocol.BulkReply, errMsg string) {
return this.Echo(message)
}
func (this *RedisCore) Quit() (errMsg string) {
return ``
}
// TODO
// select, swapdb
|
package aws
import (
"encoding/base64"
"errors"
"sync/atomic"
"time"
"github.com/NYTimes/gizmo/pubsub"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sns/snsiface"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
)
// publisher will accept AWS credentials and an SNS topic name
// and it will emit any publish events to it.
type publisher struct {
sns snsiface.SNSAPI
topic string
}
// NewPublisher will initiate the SNS client.
// If no credentials are passed in with the config,
// the publisher is instantiated with the AWS_ACCESS_KEY
// and the AWS_SECRET_KEY environment variables.
func NewPublisher(cfg SNSConfig) (pubsub.Publisher, error) {
p := &publisher{}
if cfg.Topic == "" {
return p, errors.New("SNS topic name is required")
}
p.topic = cfg.Topic
if cfg.Region == "" {
return p, errors.New("SNS region is required")
}
sess, err := session.NewSession()
if err != nil {
return p, err
}
var creds *credentials.Credentials
if cfg.AccessKey != "" {
creds = credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, cfg.SessionToken)
} else if cfg.RoleARN != "" {
var err error
creds, err = requestRoleCredentials(sess, cfg.RoleARN, cfg.MFASerialNumber)
if err != nil {
return p, err
}
} else {
creds = credentials.NewEnvCredentials()
}
p.sns = sns.New(sess, &aws.Config{
Credentials: creds,
Region: &cfg.Region,
Endpoint: cfg.EndpointURL,
})
return p, nil
}
// Publish will marshal the proto message and emit it to the SNS topic.
// The key will be used as the SNS message subject.
func (p *publisher) Publish(ctx context.Context, key string, m proto.Message) error {
mb, err := proto.Marshal(m)
if err != nil {
return err
}
return p.PublishRaw(ctx, key, mb)
}
// PublishRaw will emit the byte array to the SNS topic.
// The key will be used as the SNS message subject.
func (p *publisher) PublishRaw(_ context.Context, key string, m []byte) error {
msg := &sns.PublishInput{
TopicArn: &p.topic,
Subject: &key,
Message: aws.String(base64.StdEncoding.EncodeToString(m)),
}
_, err := p.sns.Publish(msg)
return err
}
var (
// defaultSQSMaxMessages is default the number of bulk messages
// the subscriber will attempt to fetch on each
// receive.
defaultSQSMaxMessages int64 = 10
// defaultSQSTimeoutSeconds is the default number of seconds the
// SQS client will wait before timing out.
defaultSQSTimeoutSeconds int64 = 2
// defaultSQSSleepInterval is the default time.Duration the
// subscriber will wait if it sees no messages
// on the queue.
defaultSQSSleepInterval = 2 * time.Second
// defaultSQSDeleteBufferSize is the default limit of messages
// allowed in the delete buffer before
// executing a 'delete batch' request.
defaultSQSDeleteBufferSize = 0
defaultSQSConsumeBase64 = true
)
func defaultSQSConfig(cfg *SQSConfig) {
if cfg.MaxMessages == nil {
cfg.MaxMessages = &defaultSQSMaxMessages
}
if cfg.TimeoutSeconds == nil {
cfg.TimeoutSeconds = &defaultSQSTimeoutSeconds
}
if cfg.SleepInterval == nil {
cfg.SleepInterval = &defaultSQSSleepInterval
}
if cfg.DeleteBufferSize == nil {
cfg.DeleteBufferSize = &defaultSQSDeleteBufferSize
}
if cfg.ConsumeBase64 == nil {
cfg.ConsumeBase64 = &defaultSQSConsumeBase64
}
}
type (
// subscriber is an SQS client that allows a user to
// consume messages via the pubsub.Subscriber interface.
subscriber struct {
sqs sqsiface.SQSAPI
cfg SQSConfig
queueURL *string
toDelete chan *deleteRequest
// inFlight and stopped are signals to manage delete requests
// at shutdown.
inFlight uint64
stopped uint32
stop chan chan error
sqsErr error
}
// SQSMessage is the SQS implementation of `SubscriberMessage`.
subscriberMessage struct {
sub *subscriber
message *sqs.Message
}
deleteRequest struct {
entry *sqs.DeleteMessageBatchRequestEntry
receipt chan error
}
)
// incrementInflight will increment the add in flight count.
func (s *subscriber) incrementInFlight() {
atomic.AddUint64(&s.inFlight, 1)
}
// removeInfFlight will decrement the in flight count.
func (s *subscriber) decrementInFlight() {
atomic.AddUint64(&s.inFlight, ^uint64(0))
}
// inFlightCount returns the number of in-flight requests currently
// running on this server.
func (s *subscriber) inFlightCount() uint64 {
return atomic.LoadUint64(&s.inFlight)
}
// NewSubscriber will initiate a new Decrypter for the subscriber
// if a key file is provided. It will also fetch the SQS Queue Url
// and set up the SQS client.
func NewSubscriber(cfg SQSConfig) (pubsub.Subscriber, error) {
var err error
defaultSQSConfig(&cfg)
s := &subscriber{
cfg: cfg,
toDelete: make(chan *deleteRequest),
stop: make(chan chan error, 1),
}
if (len(cfg.QueueName) == 0) && (len(cfg.QueueURL) == 0) {
return s, errors.New("sqs queue name or url is required")
}
sess, err := session.NewSession()
if err != nil {
return s, err
}
var creds *credentials.Credentials
if cfg.AccessKey != "" {
creds = credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, cfg.SessionToken)
} else if cfg.RoleARN != "" {
var err error
creds, err = requestRoleCredentials(sess, cfg.RoleARN, cfg.MFASerialNumber)
if err != nil {
return s, err
}
} else {
creds = credentials.NewEnvCredentials()
}
s.sqs = sqs.New(sess, &aws.Config{
Credentials: creds,
Region: &cfg.Region,
Endpoint: cfg.EndpointURL,
})
if len(cfg.QueueURL) == 0 {
var urlResp *sqs.GetQueueUrlOutput
urlResp, err = s.sqs.GetQueueUrl(&sqs.GetQueueUrlInput{
QueueName: &cfg.QueueName,
QueueOwnerAWSAccountId: &cfg.QueueOwnerAccountID,
})
if err != nil {
return s, err
}
s.queueURL = urlResp.QueueUrl
} else {
s.queueURL = &cfg.QueueURL
}
return s, nil
}
// Message will decode protobufed message bodies and simply return
// a byte slice containing the message body for all others types.
func (m *subscriberMessage) Message() []byte {
if !*m.sub.cfg.ConsumeBase64 {
return []byte(*m.message.Body)
}
msgBody, err := base64.StdEncoding.DecodeString(*m.message.Body)
if err != nil {
pubsub.Log.Warnf("unable to parse message body: %s", err)
}
return msgBody
}
// ExtendDoneDeadline changes the visibility timeout of the underlying SQS
// message. It will set the visibility timeout of the message to the given
// duration.
func (m *subscriberMessage) ExtendDoneDeadline(d time.Duration) error {
_, err := m.sub.sqs.ChangeMessageVisibility(&sqs.ChangeMessageVisibilityInput{
QueueUrl: m.sub.queueURL,
ReceiptHandle: m.message.ReceiptHandle,
VisibilityTimeout: aws.Int64(int64(d.Seconds())),
})
return err
}
// Done will queue up a message to be deleted. By default,
// the `SQSDeleteBufferSize` will be 0, so this will block until the
// message has been deleted.
func (m *subscriberMessage) Done() error {
defer m.sub.decrementInFlight()
receipt := make(chan error)
m.sub.toDelete <- &deleteRequest{
entry: &sqs.DeleteMessageBatchRequestEntry{
Id: m.message.MessageId,
ReceiptHandle: m.message.ReceiptHandle,
},
receipt: receipt,
}
return <-receipt
}
// Start will start consuming messages on the SQS queue
// and emit any messages to the returned channel.
// If it encounters any issues, it will populate the Err() error
// and close the returned channel.
func (s *subscriber) Start() <-chan pubsub.SubscriberMessage {
output := make(chan pubsub.SubscriberMessage)
go s.handleDeletes()
go func(s *subscriber, output chan pubsub.SubscriberMessage) {
defer close(output)
var (
resp *sqs.ReceiveMessageOutput
err error
)
for {
select {
case exit := <-s.stop:
exit <- nil
return
default:
// get messages
pubsub.Log.Debugf("receiving messages")
resp, err = s.sqs.ReceiveMessage(&sqs.ReceiveMessageInput{
MaxNumberOfMessages: s.cfg.MaxMessages,
QueueUrl: s.queueURL,
WaitTimeSeconds: s.cfg.TimeoutSeconds,
})
if err != nil {
// we've encountered a major error
// this will set the error value and close the channel
// so the user will stop iterating and check the err
s.sqsErr = err
go s.Stop()
continue
}
// if we didn't get any messages, lets chill out for a sec
if len(resp.Messages) == 0 {
pubsub.Log.Debugf("no messages found. sleeping for %s", s.cfg.SleepInterval)
time.Sleep(*s.cfg.SleepInterval)
continue
}
pubsub.Log.Debugf("found %d messages", len(resp.Messages))
// for each message, pass to output
for _, msg := range resp.Messages {
output <- &subscriberMessage{
sub: s,
message: msg,
}
s.incrementInFlight()
}
}
}
}(s, output)
return output
}
func (s *subscriber) handleDeletes() {
batchInput := &sqs.DeleteMessageBatchInput{
QueueUrl: s.queueURL,
}
var (
err error
entriesBuffer []*sqs.DeleteMessageBatchRequestEntry
delRequest *deleteRequest
)
for delRequest = range s.toDelete {
entriesBuffer = append(entriesBuffer, delRequest.entry)
// if the subber is stopped and this is the last request,
// flush quit!
if s.isStopped() && s.inFlightCount() == 1 {
break
}
// if buffer is full, send the request
if len(entriesBuffer) > *s.cfg.DeleteBufferSize {
batchInput.Entries = entriesBuffer
_, err = s.sqs.DeleteMessageBatch(batchInput)
// cleaer buffer
entriesBuffer = []*sqs.DeleteMessageBatchRequestEntry{}
}
delRequest.receipt <- err
}
// clear any remainders before shutdown
if len(entriesBuffer) > 0 {
batchInput.Entries = entriesBuffer
_, err = s.sqs.DeleteMessageBatch(batchInput)
delRequest.receipt <- err
}
}
func (s *subscriber) isStopped() bool {
return atomic.LoadUint32(&s.stopped) == 1
}
// Stop will block until the consumer has stopped consuming
// messages.
func (s *subscriber) Stop() error {
if s.isStopped() {
return errors.New("sqs subscriber is already stopped")
}
exit := make(chan error)
s.stop <- exit
atomic.SwapUint32(&s.stopped, uint32(1))
return <-exit
}
// Err will contain any errors that occurred during
// consumption. This method should be checked after
// a user encounters a closed channel.
func (s *subscriber) Err() error {
return s.sqsErr
}
// requestRoleCredentials return the credentials from AssumeRoleProvider to assume the role
// referenced by the roleARN. If MFASerialNumber is specified, prompt for MFA token from stdin.
func requestRoleCredentials(sess *session.Session, roleARN string, MFASerialNumber string) (*credentials.Credentials, error) {
if roleARN == "" {
return nil, errors.New("role ARN is required")
}
return stscreds.NewCredentials(sess, roleARN, func(provider *stscreds.AssumeRoleProvider) {
if MFASerialNumber != "" {
provider.SerialNumber = &MFASerialNumber
provider.TokenProvider = stscreds.StdinTokenProvider
}
}), nil
}
|
package main
import (
"context"
"github.com/aws/aws-lambda-go/lambda"
"github.com/iarlyy/golang-multicloud-function/function"
)
func HandleRequest(ctx context.Context, msg function.MsgPayload) (function.MsgPayload, error) {
res_msg := msg.Process()
return res_msg, nil
}
func main() {
lambda.Start(HandleRequest)
}
|
package fourway
const (
PictureLength = 600.0 // pixels
IntersectionLength = 200.0 // px
) |
package entities
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCreatesCorrectly(t *testing.T) {
registration := CreateNewRegistration("myevent", "mycallback")
assert.NotZero(t, registration.Id)
assert.NotZero(t, registration.CreationDate)
assert.Equal(t, "myevent", registration.EventName)
assert.Equal(t, "mycallback", registration.CallbackUrl)
}
|
package fridge
import (
"github.com/shomali11/fridge/item"
"time"
)
const (
// Fresh is when an item has not passed its "Best By" duration
Fresh = "FRESH"
// Cold is when an item has passed its "Best By" duration but not its "Use By" one
Cold = "COLD"
// Expired is when an item has passed its "Use By" duration
Expired = "EXPIRED"
// NotFound is when an item was not found due to being removed or was never stored before
NotFound = "NOT_FOUND"
// Refresh is when an item was restocked with a fresher one
Refresh = "REFRESH"
// OutOfStock is when an item needs restocking, but no restocking function was provided
OutOfStock = "OUT_OF_STOCK"
// Unchanged is when the restocked item is not different from the version in the cache
Unchanged = "UNCHANGED"
)
const (
empty = ""
)
// NewClient returns a client using an xredis client
func NewClient(options ...ConfigOption) *Client {
config := NewConfig(options...)
client := &Client{
itemRegistry: item.NewRegistry(config.defaultBestBy, config.defaultUseBy),
itemDao: item.NewDao(config.xredisClient),
eventBus: NewEventBus(),
}
return client
}
// Client fridge client
type Client struct {
itemRegistry *item.Registry
itemDao *item.Dao
eventBus *EventBus
}
// Register an item
func (c *Client) Register(key string, options ...item.ConfigOption) {
itemConfig := item.NewConfig(key, options...)
c.itemRegistry.Set(itemConfig)
}
// Deregister an item
func (c *Client) Deregister(key string) {
c.itemRegistry.Remove(key)
}
// Put an item
func (c *Client) Put(key string, value string) error {
itemConfig := c.itemRegistry.Get(key)
err := c.itemDao.Set(key, value, itemConfig.GetUseByInSeconds())
if err != nil {
return err
}
return nil
}
// Get an item
func (c *Client) Get(key string) (string, bool, error) {
itemConfig := c.itemRegistry.Get(key)
cachedValue, found, stockTimestamp, err := c.itemDao.Get(key)
if err != nil {
return empty, false, err
}
if !found {
if stockTimestamp.IsZero() {
go c.publish(key, NotFound)
} else {
go c.publish(key, Expired)
}
return c.restock(itemConfig)
}
now := time.Now().UTC()
if now.Before(stockTimestamp.Add(itemConfig.BestBy)) {
go c.publish(key, Fresh)
return cachedValue, true, nil
}
if now.Before(stockTimestamp.Add(itemConfig.UseBy)) {
go c.publish(key, Cold)
go c.restockAndCompare(cachedValue, itemConfig)
return cachedValue, true, nil
}
go c.publish(key, Expired)
return c.restockAndCompare(cachedValue, itemConfig)
}
// Remove an item
func (c *Client) Remove(key string) error {
return c.itemDao.Remove(key)
}
// Ping pings redis
func (c *Client) Ping() error {
return c.itemDao.Ping()
}
// Close closes resources
func (c *Client) Close() error {
return c.itemDao.Close()
}
// HandleEvent overrides the default handleEvent callback
func (c *Client) HandleEvent(handleEvent func(event *Event)) {
c.eventBus.HandleEvent(handleEvent)
}
func (c *Client) publish(key string, eventType string) {
c.eventBus.Publish(key, eventType)
}
func (c *Client) restockAndCompare(cachedValue string, itemConfig *item.Config) (string, bool, error) {
newValue, found, err := c.restock(itemConfig)
if err != nil || !found {
return empty, found, err
}
if newValue == cachedValue {
go c.publish(itemConfig.Key, Unchanged)
}
return newValue, true, nil
}
func (c *Client) restock(itemConfig *item.Config) (string, bool, error) {
if itemConfig.Restock == nil {
go c.publish(itemConfig.Key, OutOfStock)
return empty, false, nil
}
result, err := itemConfig.Restock()
if err != nil {
return empty, false, err
}
go c.publish(itemConfig.Key, Refresh)
err = c.Put(itemConfig.Key, result)
if err != nil {
return empty, false, err
}
return result, true, nil
}
|
package model
import (
"time"
"github.com/caos/zitadel/internal/crypto"
"github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/model"
)
type KeyView struct {
ID string
Private bool
Expiry time.Time
Algorithm string
Usage KeyUsage
Key *crypto.CryptoValue
Sequence uint64
}
type SigningKey struct {
ID string
Algorithm string
Key interface{}
}
type PublicKey struct {
ID string
Algorithm string
Usage KeyUsage
Key interface{}
}
type KeySearchRequest struct {
Offset uint64
Limit uint64
SortingColumn KeySearchKey
Asc bool
Queries []*KeySearchQuery
}
type KeySearchKey int32
const (
KeySearchKeyUnspecified KeySearchKey = iota
KeySearchKeyID
KeySearchKeyPrivate
KeySearchKeyExpiry
KeySearchKeyUsage
)
type KeySearchQuery struct {
Key KeySearchKey
Method model.SearchMethod
Value interface{}
}
type KeySearchResponse struct {
Offset uint64
Limit uint64
TotalResult uint64
Result []*KeyView
}
func (r *KeySearchRequest) EnsureLimit(limit uint64) {
if r.Limit == 0 || r.Limit > limit {
r.Limit = limit
}
}
func SigningKeyFromKeyView(key *KeyView, alg crypto.EncryptionAlgorithm) (*SigningKey, error) {
if key.Usage != KeyUsageSigning || !key.Private {
return nil, errors.ThrowInvalidArgument(nil, "MODEL-5HBdh", "key must be private signing key")
}
keyData, err := crypto.Decrypt(key.Key, alg)
if err != nil {
return nil, err
}
privateKey, err := crypto.BytesToPrivateKey(keyData)
if err != nil {
return nil, err
}
return &SigningKey{
ID: key.ID,
Algorithm: key.Algorithm,
Key: privateKey,
}, nil
}
func PublicKeysFromKeyView(keys []*KeyView, alg crypto.EncryptionAlgorithm) ([]*PublicKey, error) {
converted := make([]*PublicKey, len(keys))
var err error
for i, key := range keys {
converted[i], err = PublicKeyFromKeyView(key, alg)
if err != nil {
return nil, err
}
}
return converted, nil
}
func PublicKeyFromKeyView(key *KeyView, alg crypto.EncryptionAlgorithm) (*PublicKey, error) {
if key.Private {
return nil, errors.ThrowInvalidArgument(nil, "MODEL-dTZa2", "key must be public")
}
keyData, err := crypto.Decrypt(key.Key, alg)
if err != nil {
return nil, err
}
publicKey, err := crypto.BytesToPublicKey(keyData)
if err != nil {
return nil, err
}
return &PublicKey{
ID: key.ID,
Algorithm: key.Algorithm,
Usage: key.Usage,
Key: publicKey,
}, nil
}
|
package main
import "sort"
//42 接雨水
//1-D的接雨水问题有一种解法是从左右两边的边界往中间不断进行收缩,收缩的过程中,对每个坐标(一维坐标)能接的雨水进行求解
func trap(height []int) int {
left, right := 0, len(height)
lmax, rmax := 0, 0
sum := 0
for left < right {
if height[left] <= height[right] {
if height[left] > lmax {
lmax = height[left]
} else {
sum += lmax - height[left]
}
left++
} else {
if height[right] > rmax {
rmax = height[right]
} else {
sum += rmax - height[right]
}
right--
}
}
return sum
}
//2-D的接雨水问题的边界不再是线段的两个端点,而是矩形的一周,所以我们用优先队列维护所有边界点,收缩时,也不仅仅只有左右两个方向,而是上下左右四个方向,并且维护一个visit的数组,记录哪些坐标已经被访问过,不然会造成重复求解。
type Rain struct {
x int
y int
v int
}
type PriorityQueue []*Rain
func (q PriorityQueue) Less(i, j int) bool {
return q[i].v < q[j].v
}
func (q PriorityQueue) Len() int {
return len(q)
}
func (q PriorityQueue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}
func (q *PriorityQueue) Push(r *Rain) {
*q = append(*q, r)
}
func (q *PriorityQueue) Pop() *Rain {
if q.Len() > 0 {
item := (*q)[0]
*q = (*q)[:q.Len()-1]
return item
}
return nil
}
func trapRainWater(heightMap [][]int) int {
if len(heightMap) < 3 || len(heightMap[0]) < 3 {
return 0
}
n := len(heightMap)
m := len(heightMap[0])
sum := 0
bucket := make(PriorityQueue, 0)
var visited [][]bool
for i := 0; i < n; i++ {
visited = append(visited, make([]bool, m))
for j := 0; j < m; j++ {
if i == 0 || i == n-1 || j == 0 || j == m-1 {
visited[i][j] = true
bucket.Push(&Rain{x: i, y: j, v: heightMap[i][j]})
}
}
}
sort.Sort(bucket)
// 方向压缩成一维
dir := []int{-1, 0, 1, 0, -1}
for bucket.Len() > 0 {
head := bucket.Pop()
for k := 0; k < 4; k++ {
x := head.x + dir[k]
y := head.y + dir[k+1]
if x >= 0 && y >= 0 && x < m && y < n && !visited[x][y] {
if heightMap[x][y] < head.v {
sum += head.v - heightMap[x][y]
}
bucket.Push(&Rain{x: x, y: y, v: heightMap[x][y]})
sort.Sort(bucket)
visited[x][y] = true
}
}
}
return sum
}
type User struct {
}
func test() []func() {
var funs []func()
for i := 0; i < 2; i++ {
funs = append(funs, func() {
println(&i, i)
})
}
return funs
}
func main() {
funs := test()
for _, f := range funs {
f()
}
//println(trap([]int{}))
println(trapRainWater([][]int{
{1, 4, 3, 1, 3, 2},
{3, 2, 1, 3, 2, 4},
{2, 3, 3, 2, 3, 1},
}))
}
|
/**
Code for parsing Kustomize YAML and analyzing dependencies.
Adapted from
https://github.com/GoogleContainerTools/skaffold/blob/511c77f1736b657415500eb9b820ae7e4f753347/pkg/skaffold/deploy/kustomize.go
Copyright 2018 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kustomize
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/tilt-dev/tilt/internal/ospath"
yaml "gopkg.in/yaml.v2"
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/types"
)
// Mostly taken from the [kustomize source code](https://github.com/kubernetes-sigs/kustomize/blob/ee68a9c450bc884b0d657fb7e3d62eb1ac59d14f/pkg/target/kusttarget.go#L97) itself.
func loadKustFile(dir string) ([]byte, string, error) {
var content []byte
var path string
match := 0
for _, kf := range konfig.RecognizedKustomizationFileNames() {
p := filepath.Join(dir, kf)
c, err := os.ReadFile(p)
if err == nil {
path = p
match += 1
content = c
}
}
switch match {
case 0:
return nil, "", fmt.Errorf(
"unable to find one of %v in directory '%s'",
konfig.RecognizedKustomizationFileNames(), dir)
case 1:
return content, path, nil
default:
return nil, "", fmt.Errorf(
"Found multiple kustomization files under: %s\n", dir)
}
}
// Code for parsing Kustomize adapted from Kustomize
// https://github.com/kubernetes-sigs/kustomize/blob/ee68a9c450bc884b0d657fb7e3d62eb1ac59d14f/pkg/target/kusttarget.go#L97
//
// Code for parsing out dependencies copied from Skaffold
// https://github.com/GoogleContainerTools/skaffold/blob/511c77f1736b657415500eb9b820ae7e4f753347/pkg/skaffold/deploy/kustomize.go
func dependenciesForKustomization(dir string) ([]string, error) {
var deps []string
buf, path, err := loadKustFile(dir)
if err != nil {
return nil, err
}
content := types.Kustomization{}
if err := yaml.Unmarshal(buf, &content); err != nil {
return nil, err
}
errs := content.EnforceFields()
if len(errs) > 0 {
return nil, fmt.Errorf("Failed to read kustomization file under %s:\n"+strings.Join(errs, "\n"), dir)
}
paths := append([]string{}, content.Bases...)
paths = append(paths, content.Resources...)
for _, p := range paths {
abs := filepath.Join(dir, p)
if ospath.IsDir(abs) {
curDeps, err := dependenciesForKustomization(filepath.Join(dir, p))
if err != nil {
return nil, err
}
deps = append(deps, curDeps...)
} else {
deps = append(deps, abs)
}
}
deps = append(deps, path)
for _, patch := range content.Patches {
if patch.Path != "" {
deps = append(deps, filepath.Join(dir, patch.Path))
}
}
for _, patch := range content.PatchesStrategicMerge {
deps = append(deps, filepath.Join(dir, string(patch)))
}
deps = append(deps, joinPaths(dir, content.Crds)...)
for _, patch := range content.PatchesJson6902 {
deps = append(deps, filepath.Join(dir, patch.Path))
}
for _, generator := range content.ConfigMapGenerator {
deps = append(deps, joinPaths(dir, generator.FileSources)...)
}
return deps, nil
}
func Deps(baseDir string) ([]string, error) {
deps, err := dependenciesForKustomization(baseDir)
if err != nil {
return nil, err
}
return uniqDependencies(deps), nil
}
func joinPaths(root string, paths []string) []string {
var list []string
for _, path := range paths {
list = append(list, filepath.Join(root, path))
}
return list
}
func uniqDependencies(deps []string) []string {
seen := make(map[string]struct{}, len(deps))
j := 0
for _, v := range deps {
if _, ok := seen[v]; ok {
continue
}
seen[v] = struct{}{}
deps[j] = v
j++
}
return deps[:j]
}
|
package main
import (
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
)
var (
listenAddr = flag.String("addr", ":8080", "Address to listen on")
cacheMaxAge = flag.Int("max-age", 60, "Seconds to allow caching of resources on the client side")
cert = flag.String("cert", "", "Certificate file for TLS. The concatenation of the certificates of the server all the way up to the CA's.")
key = flag.String("key", "", "Key file for TLS.")
)
func init() {
flag.Parse()
}
func printAddresses(port string) {
// Print this machine's interface addresses
ifaces, err := net.Interfaces()
if err != nil {
log.Println("Error getting interfaces: ", err)
} else {
for i, iface := range ifaces {
if (iface.Flags & net.FlagUp) == 0 {
continue
}
log.Printf("%02d: %s (%s)", i, iface.Name, iface.Flags)
addrs, err := iface.Addrs()
if err == nil {
for _, a := range addrs {
var ip net.IP
switch v := a.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
log.Printf(" http://%s/", net.JoinHostPort(ip.String(), port))
}
}
}
}
}
func main() {
maxAge := fmt.Sprintf("max-age=%d", *cacheMaxAge)
h := http.FileServer(http.Dir("./"))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
log.Printf("%s -> %s", r.RemoteAddr, r.URL.String())
w.Header().Set("Cache-Control", maxAge)
h.ServeHTTP(w, r)
})
listenHost, listenPort, err := net.SplitHostPort(*listenAddr)
if err != nil {
log.Fatal(err)
}
if len(listenHost) == 0 {
printAddresses(listenPort)
} else {
log.Printf("Listening on http://%s/", *listenAddr)
}
// whereami?
cwd, err := os.Getwd()
if err != nil {
log.Println("Could not get current working directory:", err)
cwd = ""
}
// start serving
log.Printf("Serving in %s", cwd)
if *key != "" && *cert != "" {
log.Fatal(http.ListenAndServeTLS(*listenAddr, *cert, *key, nil))
} else {
log.Fatal(http.ListenAndServe(*listenAddr, nil))
}
}
|
package util
import (
"math/rand"
"time"
"fmt"
"strconv"
)
func CheckErr(err error) {
if err != nil {
panic(err)
}
}
func GetRandomCode(n int8) string {
randomCode := ""
rand.Seed(int64(time.Now().Nanosecond()))
for i := int8(0); i < n; i ++ {
randomCode +=fmt.Sprintf("%v", rand.Intn(10))
}
return randomCode
}
func GenerateKey(key string) string {
o_id := key + GetRandomCode(10) + strconv.FormatInt(time.Now().Unix(), 10)
return MD5(o_id)
} |
package config
import (
stdlog "log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
"github.com/skidder/streammarker-writer/db"
)
const (
defaultQueueName = "streammarker-collector-messages"
defaultInfluxDBUsername = "streammarker"
defaultInfluxDBAddress = "http://127.0.0.1:8086"
defaultInfluxDBName = "streammarker_measurements"
)
// Configuration holds application configuration details
type Configuration struct {
HealthCheckAddress string
SQSService sqsiface.SQSAPI
DynamoDBService dynamodbiface.DynamoDBAPI
QueueName string
QueueURL string
MeasurementWriter db.MeasurementWriter
DeviceManager db.DeviceManager
}
// LoadConfiguration loads the app config
func LoadConfiguration() (*Configuration, error) {
queueName := os.Getenv("STREAMMARKER_QUEUE_NAME")
if queueName == "" {
queueName = defaultQueueName
}
influxDBUsername := os.Getenv("STREAMMARKER_INFLUXDB_USERNAME")
if influxDBUsername == "" {
influxDBUsername = defaultInfluxDBUsername
}
influxDBPassword := os.Getenv("STREAMMARKER_INFLUXDB_PASSWORD")
influxDBAddress := os.Getenv("STREAMMARKER_INFLUXDB_ADDRESS")
if influxDBAddress == "" {
influxDBAddress = defaultInfluxDBAddress
}
influxDBName := os.Getenv("STREAMMARKER_INFLUXDB_NAME")
if influxDBName == "" {
influxDBName = defaultInfluxDBName
}
// Create external service connections
s := session.New()
sqsService := createSQSConnection(s)
dynamoDBService := createDynamoDBConnection(s)
queueURL := findQueueURL(sqsService, queueName)
deviceManager := db.NewDynamoDAO(dynamoDBService)
measurementWriter, err := db.NewInfluxDAO(influxDBAddress, influxDBUsername, influxDBPassword, influxDBName, deviceManager)
return &Configuration{
QueueName: queueName,
QueueURL: queueURL,
SQSService: sqsService,
DynamoDBService: dynamoDBService,
HealthCheckAddress: ":3100",
MeasurementWriter: measurementWriter,
DeviceManager: deviceManager,
}, err
}
func createSQSConnection(s *session.Session) *sqs.SQS {
config := &aws.Config{}
if endpoint := os.Getenv("STREAMMARKER_SQS_ENDPOINT"); endpoint != "" {
config.Endpoint = &endpoint
}
return sqs.New(s, config)
}
func findQueueURL(sqsService *sqs.SQS, queueName string) string {
// check the environment variable first
var queueURL string
if queueURL = os.Getenv("STREAMMARKER_SQS_QUEUE_URL"); queueURL != "" {
return queueURL
}
// otherwise, query SQS for the queue URL
params := &sqs.GetQueueUrlInput{
QueueName: aws.String(queueName),
}
if resp, err := sqsService.GetQueueUrl(params); err == nil {
queueURL = *resp.QueueUrl
} else {
stdlog.Panicf("Unable to retrieve queue URL: %s", err.Error())
}
return queueURL
}
func createDynamoDBConnection(s *session.Session) *dynamodb.DynamoDB {
config := &aws.Config{}
if endpoint := os.Getenv("STREAMMARKER_DYNAMO_ENDPOINT"); endpoint != "" {
config.Endpoint = &endpoint
}
return dynamodb.New(s, config)
}
|
package models
import (
"github.com/dgrijalva/jwt-go"
"github.com/jinzhu/gorm"
)
type User struct {
gorm.Model
Name string
Email string `gorm:"type:varchar(100);unique_index"`
Gender string `json:"Gender"`
Password string `json:"Password"`
}
type Token struct {
UserID uint
Name string
Email string
*jwt.StandardClaims
}
type Exception struct {
Message string `json:"message"`
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
// "log"
)
func main() {
resp, _ := http.Get("http://shop.nordstrom.com")
/*resp, err := http.Get("http://shop.nordstrom.com")
if err != nil{
log.Fatal(err)
}*/
page, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Println(string(page))
}
// Here _ is called blank indentifier.
// Basically we specify this if we don't want to assign it to a varible and use it.
// here the return value is error which we don't want to use |
package examples
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/openshift/origin/pkg/api/latest"
configapi "github.com/openshift/origin/pkg/config/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
imageapi "github.com/openshift/origin/pkg/image/api"
projectapi "github.com/openshift/origin/pkg/project/api"
routeapi "github.com/openshift/origin/pkg/route/api"
templateapi "github.com/openshift/origin/pkg/template/api"
)
func TestExamples(t *testing.T) {
expected := map[string]runtime.Object{
"guestbook/template.json": &templateapi.Template{},
"hello-openshift/hello-pod.json": &kapi.Pod{},
"hello-openshift/hello-project.json": &projectapi.Project{},
"sample-app/github-webhook-example.json": nil, // Skip.
"sample-app/docker-registry-config.json": &configapi.Config{},
"sample-app/application-template-stibuild.json": &templateapi.Template{},
"sample-app/application-template-dockerbuild.json": &templateapi.Template{},
"jenkins/jenkins-config.json": &configapi.Config{},
"jenkins/docker-registry-config.json": &configapi.Config{},
"jenkins/application-template.json": &templateapi.Template{},
"../test/integration/fixtures/test-deployment-config.json": &deployapi.DeploymentConfig{},
"../test/integration/fixtures/test-image-repository.json": &imageapi.ImageRepository{},
"../test/integration/fixtures/test-image.json": &imageapi.Image{},
"../test/integration/fixtures/test-mapping.json": &imageapi.ImageRepositoryMapping{},
"../test/integration/fixtures/test-route.json": &routeapi.Route{},
"../test/integration/fixtures/test-service.json": &kapi.Service{},
}
// Add the root directory to search for files you want to test, if is not in the list below.
rootDirs := []string{".", "../test/integration/fixtures"}
files := []string{}
for _, rootDir := range rootDirs {
err := filepath.Walk(rootDir, func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".json" {
files = append(files, path)
}
return err
})
if err != nil {
t.Errorf("%v", err)
}
}
// Check all files which are expected to be validated, each file should have
// an existent corresponding JSON file on disk.
for fileName := range expected {
if exists := func() bool {
for _, file := range files {
if fileName == file {
return true
}
}
return false
}(); exists == false {
t.Errorf("No JSON file was found for the expected file: '%v'", fileName)
continue
}
}
for _, file := range files {
expectedObject, ok := expected[file]
if !ok {
t.Errorf("No test case defined for example JSON file '%v'", file)
continue
}
if expectedObject == nil {
continue
}
jsonData, _ := ioutil.ReadFile(file)
if err := latest.Codec.DecodeInto(jsonData, expectedObject); err != nil {
t.Errorf("Unexpected error while decoding example JSON file '%v': %v", file, err)
}
}
}
func TestReadme(t *testing.T) {
path := "../README.md"
_, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("Unable to read file: %v", err)
}
}
|
package ghcapi
import (
"fmt"
"github.com/go-openapi/runtime/middleware"
"github.com/gobuffalo/validate/v3"
"go.uber.org/zap"
"github.com/gofrs/uuid"
mtoshipmentops "github.com/transcom/mymove/pkg/gen/ghcapi/ghcoperations/mto_shipment"
"github.com/transcom/mymove/pkg/gen/ghcmessages"
"github.com/transcom/mymove/pkg/handlers"
"github.com/transcom/mymove/pkg/handlers/ghcapi/internal/payloads"
"github.com/transcom/mymove/pkg/models"
"github.com/transcom/mymove/pkg/services"
"github.com/transcom/mymove/pkg/services/event"
mtoshipment "github.com/transcom/mymove/pkg/services/mto_shipment"
"github.com/transcom/mymove/pkg/services/query"
)
// ListMTOShipmentsHandler returns a list of MTO Shipments
type ListMTOShipmentsHandler struct {
handlers.HandlerContext
services.ListFetcher
services.Fetcher
}
// Handle listing mto shipments for the move task order
func (h ListMTOShipmentsHandler) Handle(params mtoshipmentops.ListMTOShipmentsParams) middleware.Responder {
logger := h.LoggerFromRequest(params.HTTPRequest)
moveTaskOrderID, err := uuid.FromString(params.MoveTaskOrderID.String())
// return any parsing error
if err != nil {
parsingError := fmt.Errorf("UUID Parsing for %s: %w", "MoveTaskOrderID", err).Error()
logger.Error(parsingError)
payload := payloadForValidationError("UUID(s) parsing error", parsingError, h.GetTraceID(), validate.NewErrors())
return mtoshipmentops.NewListMTOShipmentsUnprocessableEntity().WithPayload(payload)
}
// check if move task order exists first
queryFilters := []services.QueryFilter{
query.NewQueryFilter("id", "=", moveTaskOrderID.String()),
}
moveTaskOrder := &models.Move{}
err = h.Fetcher.FetchRecord(moveTaskOrder, queryFilters)
if err != nil {
logger.Error("Error fetching move task order: ", zap.Error(fmt.Errorf("Move Task Order ID: %s", moveTaskOrder.ID)), zap.Error(err))
return mtoshipmentops.NewListMTOShipmentsNotFound()
}
queryFilters = []services.QueryFilter{
query.NewQueryFilter("move_id", "=", moveTaskOrderID.String()),
}
queryAssociations := query.NewQueryAssociations([]services.QueryAssociation{
query.NewQueryAssociation("MTOServiceItems.ReService"),
query.NewQueryAssociation("MTOAgents"),
query.NewQueryAssociation("PickupAddress"),
query.NewQueryAssociation("DestinationAddress"),
})
var shipments models.MTOShipments
err = h.ListFetcher.FetchRecordList(&shipments, queryFilters, queryAssociations, nil, nil)
// return any errors
if err != nil {
logger.Error("Error fetching mto shipments : ", zap.Error(err))
return mtoshipmentops.NewListMTOShipmentsInternalServerError()
}
payload := payloads.MTOShipments(&shipments)
return mtoshipmentops.NewListMTOShipmentsOK().WithPayload(*payload)
}
// PatchShipmentHandler patches shipments
type PatchShipmentHandler struct {
handlers.HandlerContext
services.Fetcher
services.MTOShipmentStatusUpdater
}
// Handle patches shipments
func (h PatchShipmentHandler) Handle(params mtoshipmentops.PatchMTOShipmentStatusParams) middleware.Responder {
logger := h.LoggerFromRequest(params.HTTPRequest)
shipmentID := uuid.FromStringOrNil(params.ShipmentID.String())
status := models.MTOShipmentStatus(params.Body.Status)
rejectionReason := params.Body.RejectionReason
eTag := params.IfMatch
shipment, err := h.UpdateMTOShipmentStatus(shipmentID, status, rejectionReason, eTag)
if err != nil {
logger.Error("UpdateMTOShipmentStatus error: ", zap.Error(err))
switch e := err.(type) {
case services.NotFoundError:
return mtoshipmentops.NewPatchMTOShipmentStatusNotFound()
case services.InvalidInputError:
payload := payloadForValidationError("Validation errors", "UpdateShipmentMTOStatus", h.GetTraceID(), e.ValidationErrors)
return mtoshipmentops.NewPatchMTOShipmentStatusUnprocessableEntity().WithPayload(payload)
case services.PreconditionFailedError:
return mtoshipmentops.NewPatchMTOShipmentStatusPreconditionFailed().WithPayload(&ghcmessages.Error{Message: handlers.FmtString(err.Error())})
case mtoshipment.ConflictStatusError:
return mtoshipmentops.NewPatchMTOShipmentStatusConflict().WithPayload(&ghcmessages.Error{Message: handlers.FmtString(err.Error())})
default:
return mtoshipmentops.NewPatchMTOShipmentStatusInternalServerError()
}
}
_, err = event.TriggerEvent(event.Event{
EndpointKey: event.GhcPatchMTOShipmentStatusEndpointKey,
// Endpoint that is being handled
EventKey: event.MTOShipmentUpdateEventKey, // Event that you want to trigger
UpdatedObjectID: shipment.ID, // ID of the updated logical object
MtoID: shipment.MoveTaskOrderID, // ID of the associated Move
Request: params.HTTPRequest, // Pass on the http.Request
DBConnection: h.DB(), // Pass on the pop.Connection
HandlerContext: h, // Pass on the handlerContext
})
// If the event trigger fails, just log the error.
if err != nil {
logger.Error("ghcapi.PatchShipmentHandler could not generate the event")
}
payload := payloads.MTOShipment(shipment)
return mtoshipmentops.NewPatchMTOShipmentStatusOK().WithPayload(payload)
}
|
package configfile
import (
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestAttachWatcher(t *testing.T) {
t.Run("AttachWatcher", func(t *testing.T) {
// Mock the log.Fatal function to intercept fatal errors for inspection
var watcherErr error
logFatal = func(v ...interface{}) {
watcherErr = v[0].(error)
}
// Create a listener that will track the number of times the watcher detects a file change
changeCount := 0
// Allow a 10ms delay for the watcher to detect the file change
delay := time.Millisecond * 10
onChange := func() {
changeCount++
}
checkChangeCount := func(t *testing.T, expectedValue int) {
// Wait the specified amount, the assert that the change count is as expected
time.Sleep(delay)
assert.Equal(t, expectedValue, changeCount)
}
// Create a temp file to watch
tempDir, _ := ioutil.TempDir("", "configfile_watcher")
defer os.RemoveAll(tempDir)
file, err := os.CreateTemp(tempDir, "configfile")
assert.NoError(t, err)
AttachWatcher(file.Name(), onChange)
checkChangeCount(t, 0)
assert.NoError(t, watcherErr)
// Write to the file and check that the listener is called
_, err = file.WriteString("test")
assert.NoError(t, err)
checkChangeCount(t, 1)
assert.NoError(t, watcherErr)
_, err = file.WriteString("test again")
file.Close()
assert.NoError(t, err)
checkChangeCount(t, 2)
assert.NoError(t, watcherErr)
// Delete the file and check that the listener is called and an error occurs
err = os.Remove(file.Name())
assert.NoError(t, err)
// Wait longer because the watcher tries to reattach to deleted files in case they're recreated
time.Sleep(1 * time.Second)
checkChangeCount(t, 3)
assert.Error(t, watcherErr)
})
}
|
// 函数
func protect(g func()){
defer func(){
log.PrintLn("done")
if x:= recover(),x !=nil {
log.Printf("run time panic: %v", x)
}
}{}
log.Println("start")
g()
} |
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ui
import (
"context"
"time"
"chromiumos/tast/errors"
uiperf "chromiumos/tast/local/bundles/cros/ui/perf"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/input"
"chromiumos/tast/local/perfutil"
"chromiumos/tast/local/power"
"chromiumos/tast/local/ui"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: OverviewScrollPerf,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Measures the presentation time of scrolling the overview grid in tablet mode",
Contacts: []string{"sammiequon@chromium.org", "chromeos-wmp@google.com"},
Attr: []string{"group:crosbolt", "crosbolt_perbuild"},
SoftwareDeps: []string{"chrome"},
HardwareDeps: hwdep.D(hwdep.InternalDisplay()),
Fixture: "chromeLoggedIn",
})
}
func OverviewScrollPerf(ctx context.Context, s *testing.State) {
// Ensure display on to record ui performance correctly.
if err := power.TurnOnDisplay(ctx); err != nil {
s.Fatal("Failed to turn on display: ", err)
}
cr := s.FixtValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
orientation, err := display.GetOrientation(ctx, tconn)
if err != nil {
s.Fatal("Failed to obtain the display rotation: ", err)
}
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, true)
if err != nil {
s.Fatal("Failed to ensure in tablet mode: ", err)
}
defer cleanup(ctx)
// Prepare the touch screen as this test requires touch scroll events.
tsew, err := input.Touchscreen(ctx)
if err != nil {
s.Fatal("Failed to create touch screen event writer: ", err)
}
defer tsew.Close()
if err = tsew.SetRotation(-orientation.Angle); err != nil {
s.Fatal("Failed to set rotation: ", err)
}
stw, err := tsew.NewSingleTouchWriter()
if err != nil {
s.Fatal("Failed to create single touch writer: ", err)
}
defer stw.Close()
// Use a total of 16 windows for this test, so that scrolling can happen.
const numWindows = 16
if err := ash.CreateWindows(ctx, tconn, cr, ui.PerftestURL, numWindows); err != nil {
s.Fatal("Failed to open browser windows: ", err)
}
if err := ash.SetOverviewModeAndWait(ctx, tconn, true); err != nil {
s.Fatal("Failed to enter overview mode: ", err)
}
defer func() {
if err := ash.SetOverviewModeAndWait(ctx, tconn, false); err != nil {
s.Fatal("Failed to exit overview mode: ", err)
}
}()
if err := perfutil.RunMultipleAndSave(ctx, s.OutDir(), cr.Browser(), uiperf.Run(s, perfutil.RunAndWaitAll(tconn, func(ctx context.Context) error {
// Scroll from the top right of the screen to the top middle (1/4 of the
// screen width). The destination position should match with the next swipe
// to make the same amount of scrolling.
if err := stw.Swipe(ctx, tsew.Width()-10, 10, tsew.Width()/4, 10, 500*time.Millisecond); err != nil {
return errors.Wrap(err, "failed to execute a swipe gesture")
}
if err := stw.End(); err != nil {
return errors.Wrap(err, "failed to finish the swipe gesture")
}
// Scroll back from the top middle to the top right so that the test returns
// back to the original status. Note that this can't be starting from the
// top left, since it can be recognized as another gesture (back gesture).
if err := stw.Swipe(ctx, tsew.Width()/4, 10, tsew.Width()-10, 10, 500*time.Millisecond); err != nil {
return errors.Wrap(err, "failed to execute a swipe gesture")
}
if err := stw.End(); err != nil {
return errors.Wrap(err, "failed to finish the swipe gesture")
}
return nil
}, "Ash.Overview.Scroll.PresentationTime.TabletMode")), perfutil.StoreLatency); err != nil {
s.Fatal("Failed to run or save: ", err)
}
}
|
package model
import (
"gorm.io/datatypes"
)
type CDNCluster struct {
Model
Name string `gorm:"column:name;size:256;uniqueIndex;not null" json:"name"`
BIO string `gorm:"column:bio;size:1024" json:"bio"`
Config datatypes.JSONMap `gorm:"column:config;not null" json:"config"`
SchedulerClusters []SchedulerCluster `gorm:"many2many:cdn_cluster_scheduler_cluster;" json:"-"`
CDNs []CDN `json:"-"`
SecurityGroupID *uint
SecurityGroup SecurityGroup `json:"-"`
}
|
package controller
import (
"context"
"fmt"
"sort"
"time"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/coll"
"github.com/mylxsw/glacier/infra"
"github.com/mylxsw/glacier/web"
"github.com/mylxsw/go-utils/str"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// StatisticsController 统计功能
type StatisticsController struct {
cc infra.Resolver
}
// NewStatisticsController create a new StatisticsController
func NewStatisticsController(cc infra.Resolver) web.Controller {
return StatisticsController{cc: cc}
}
// Register 注册路由
func (s StatisticsController) Register(router web.Router) {
router.Group("/statistics", func(router web.Router) {
router.Get("/daily-group-counts/", s.DailyGroupCounts).Name("statistics:daily-group-counts")
router.Get("/user-group-counts/", s.UserGroupCounts).Name("statistics:user-group-counts")
router.Get("/rule-group-counts/", s.RuleGroupCounts).Name("statistics:rule-group-counts")
router.Get("/group-agg-period-counts/", s.EventGroupAggInPeriod).Name("statistics:group-agg-period-counts")
router.Get("/group-agg-counts/", s.EventGroupAggCounts).Name("statistics:group-agg-counts")
router.Group("/events/", func(router web.Router) {
router.Get("/period-counts/", s.EventCountInPeriod).Name("statistics:events:period-counts")
})
})
}
// MessageGroupByDatetimeCount 周期内事件组数量
type MessageGroupByDatetimeCount struct {
Datetime string `json:"datetime"`
Total int64 `json:"total"`
TotalMessages int64 `json:"total_messages"`
}
// extractDateRange 从请求中提取时间范围
func extractDateRange(webCtx web.Context, defaultDays int) (time.Time, time.Time) {
startAt := webCtx.Input("start_at")
endAt := webCtx.Input("end_at")
startTime := time.Now().Add(-time.Duration(defaultDays*24) * time.Hour)
endTime := time.Now()
if startAt != "" {
if len(startAt) == 10 {
startAt = startAt + " 00:00:00"
}
parsed, err := time.Parse("2006-01-02 15:04:05", startAt)
if err == nil {
startTime = parsed
}
}
if endAt != "" {
if len(endAt) == 10 {
endAt = endAt + " 23:59:59"
}
parsed, err := time.Parse("2006-01-02 15:04:05", endAt)
if err == nil {
endTime = parsed
}
}
return startTime, endTime
}
// DailyGroupCounts 每日报警次数汇总
func (s StatisticsController) DailyGroupCounts(ctx web.Context, groupRepo repository.EventGroupRepo) ([]MessageGroupByDatetimeCount, error) {
timeoutCtx, cancel := context.WithTimeout(ctx.Context(), 15*time.Second)
defer cancel()
startTime, endTime := extractDateRange(ctx, ctx.IntInput("days", 30))
dailyCounts, err := groupRepo.StatByDatetimeCount(timeoutCtx, groupFilter(ctx), startTime, endTime, 24)
if err != nil {
return nil, err
}
if len(dailyCounts) == 0 {
return make([]MessageGroupByDatetimeCount, 0), nil
}
dailyCountsByDate := make(map[string]MessageGroupByDatetimeCount)
for _, d := range dailyCounts {
datetime := d.Datetime.In(time.Local).Format("2006-01-02")
dailyCountsByDate[datetime] = MessageGroupByDatetimeCount{
Datetime: datetime,
Total: d.Total,
TotalMessages: d.TotalMessages,
}
}
startDate := dailyCounts[0].Datetime
endDate := dailyCounts[len(dailyCounts)-1].Datetime
if log.DebugEnabled() {
log.Debugf("%v: %v", startDate, endDate)
}
results := make([]MessageGroupByDatetimeCount, 0)
for startDate.Before(endDate) || startDate.Equal(endDate) {
startDateF := startDate.Format("2006-01-02")
if d, ok := dailyCountsByDate[startDateF]; ok {
results = append(results, d)
} else {
results = append(results, MessageGroupByDatetimeCount{
Datetime: startDateF,
Total: 0,
TotalMessages: 0,
})
}
startDate = startDate.Add(24 * time.Hour)
}
return results, nil
}
type EventGroupByUserCounts []repository.EventGroupByUserCount
func (e EventGroupByUserCounts) Len() int {
return len(e)
}
func (e EventGroupByUserCounts) Less(i, j int) bool {
return e[i].Total < e[j].Total
}
func (e EventGroupByUserCounts) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
// UserGroupCounts 用户报警次数汇总
func (s StatisticsController) UserGroupCounts(ctx web.Context, groupRepo repository.EventGroupRepo) ([]repository.EventGroupByUserCount, error) {
timeoutCtx, _ := context.WithTimeout(ctx.Context(), 5*time.Second)
startTime, endTime := extractDateRange(ctx, ctx.IntInput("days", 30))
res, err := groupRepo.StatByUserCount(timeoutCtx, startTime, endTime)
if err != nil {
return nil, err
}
sort.Sort(sort.Reverse(EventGroupByUserCounts(res)))
if len(res) > 10 {
other := repository.EventGroupByUserCount{
UserName: "Others",
Total: 0,
TotalMessages: 0,
}
for _, v := range res[10:] {
other.Total += v.Total
other.TotalMessages += v.TotalMessages
}
res = append(res[:10], other)
}
return res, nil
}
type EventGroupByRuleCounts []repository.EventGroupByRuleCount
func (e EventGroupByRuleCounts) Len() int {
return len(e)
}
func (e EventGroupByRuleCounts) Less(i, j int) bool {
return e[i].Total < e[j].Total
}
func (e EventGroupByRuleCounts) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
// RuleGroupCounts 报警规则报警次数汇总
func (s StatisticsController) RuleGroupCounts(ctx web.Context, groupRepo repository.EventGroupRepo) ([]repository.EventGroupByRuleCount, error) {
timeoutCtx, _ := context.WithTimeout(ctx.Context(), 5*time.Second)
startTime, endTime := extractDateRange(ctx, ctx.IntInput("days", 30))
res, err := groupRepo.StatByRuleCount(timeoutCtx, startTime, endTime)
if err != nil {
return nil, err
}
sort.Sort(sort.Reverse(EventGroupByRuleCounts(res)))
if len(res) > 10 {
other := repository.EventGroupByRuleCount{
RuleName: "Others",
Total: 0,
TotalMessages: 0,
}
for _, v := range res[10:] {
other.Total += v.Total
other.TotalMessages += v.TotalMessages
}
res = append(res[:10], other)
}
return res, nil
}
// EventByDatetimeCount 周期内事件数量统计返回对象
type EventByDatetimeCount struct {
Datetime string `json:"datetime"`
Total int64 `json:"total"`
}
// EventCountInPeriod 统计周期内的事件数量
// 支持的参数: days/step/format/meta/tags/origin/status/relation_id/group_id/event_id
func (s StatisticsController) EventCountInPeriod(webCtx web.Context, evtRepo repository.EventRepo) ([]EventByDatetimeCount, error) {
ctx, cancel := context.WithTimeout(webCtx.Context(), 15*time.Second)
defer cancel()
dateTimeFormat := webCtx.InputWithDefault("format", "01-02 15:00")
var step int64 = 1
startDate, endDate := extractDateRange(webCtx, webCtx.IntInput("days", 7))
if log.DebugEnabled() {
log.Debugf("%v: %v", startDate, endDate)
}
filter := eventsFilter(webCtx)
dailyCounts, err := evtRepo.CountByDatetime(ctx, filter, startDate, endDate, step)
if err != nil {
return nil, err
}
if len(dailyCounts) == 0 {
return make([]EventByDatetimeCount, 0), nil
}
dailyCountsByDate := make(map[string]repository.EventByDatetimeCount)
for _, d := range dailyCounts {
datetime := d.Datetime.In(time.Local).Format(dateTimeFormat)
dailyCountsByDate[datetime] = d
}
results := make([]EventByDatetimeCount, 0)
startDateTmp := startDate.Add(time.Duration(step) * time.Hour)
for startDateTmp.Before(endDate) || startDateTmp.Equal(endDate) {
startDateF := startDateTmp.Format(dateTimeFormat)
if d, ok := dailyCountsByDate[startDateF]; ok {
results = append(results, EventByDatetimeCount{
Datetime: startDateF,
Total: d.Total,
})
} else {
results = append(results, EventByDatetimeCount{
Datetime: startDateF,
Total: 0,
})
}
startDateTmp = startDateTmp.Add(time.Duration(step) * time.Hour)
}
return results, nil
}
// AggCount 聚合key包含的事件组数量
type AggCount struct {
AggregateKey string `json:"aggregate_key"`
Total int64 `json:"total"`
TotalMessages int64 `json:"total_messages"`
}
// EventGroupAggByDatetimeCount 时间范围内事件组聚合数量
type EventGroupAggByDatetimeCount struct {
Datetime string `json:"datetime"`
AggCount []AggCount `json:"agg_count"`
}
type EventGroupAggByDatetimeCountResp struct {
Data []EventGroupAggByDatetimeCount `json:"data"`
AggregateKeys []string `json:"aggregate_keys"`
}
// EventCountInPeriod 统计周期内的事件组数量,按照聚合key分类返回
// 支持的参数: rule_id
func (s StatisticsController) EventGroupAggInPeriod(webCtx web.Context, evtGrpRepo repository.EventGroupRepo) (EventGroupAggByDatetimeCountResp, error) {
ruleID, err := primitive.ObjectIDFromHex(webCtx.Input("rule_id"))
if err != nil {
return EventGroupAggByDatetimeCountResp{}, fmt.Errorf("invalid rule_id: %v", err)
}
startTime, endTime := extractDateRange(webCtx, webCtx.IntInput("days", 30))
timeoutCtx, cancel := context.WithTimeout(webCtx.Context(), 15*time.Second)
defer cancel()
dailyCounts, err := evtGrpRepo.StatByAggCountInPeriod(timeoutCtx, ruleID, startTime, endTime, 24)
if err != nil {
return EventGroupAggByDatetimeCountResp{}, err
}
if len(dailyCounts) == 0 {
return EventGroupAggByDatetimeCountResp{Data: []EventGroupAggByDatetimeCount{}, AggregateKeys: []string{}}, nil
}
aggregateKeys := make([]string, 0)
dailyCountsByDate := make(map[string]EventGroupAggByDatetimeCount)
for _, d := range dailyCounts {
datetime := d.Datetime.In(time.Local).Format("2006-01-02")
if _, ok := dailyCountsByDate[datetime]; !ok {
dailyCountsByDate[datetime] = EventGroupAggByDatetimeCount{
Datetime: datetime,
AggCount: make([]AggCount, 0),
}
}
dailyCountsByDate[datetime] = EventGroupAggByDatetimeCount{
Datetime: dailyCountsByDate[datetime].Datetime,
AggCount: append(dailyCountsByDate[datetime].AggCount, AggCount{
AggregateKey: d.AggregateKey,
Total: d.Total,
TotalMessages: d.TotalMessages,
}),
}
aggregateKeys = append(aggregateKeys, d.AggregateKey)
}
aggregateKeys = str.Distinct(aggregateKeys)
var defaultAggCounts []AggCount
_ = coll.MustNew(aggregateKeys).Map(func(k string) AggCount {
return AggCount{AggregateKey: k}
}).All(&defaultAggCounts)
startDate := dailyCounts[0].Datetime
endDate := dailyCounts[len(dailyCounts)-1].Datetime
if log.DebugEnabled() {
log.Debugf("%v: %v", startDate, endDate)
}
results := make([]EventGroupAggByDatetimeCount, 0)
for startDate.Before(endDate) || startDate.Equal(endDate) {
startDateF := startDate.Format("2006-01-02")
if d, ok := dailyCountsByDate[startDateF]; ok {
aggMap := make(map[string]AggCount)
for _, v := range d.AggCount {
aggMap[v.AggregateKey] = v
}
fullAgg := make([]AggCount, 0)
for _, v := range defaultAggCounts {
if ex, ok := aggMap[v.AggregateKey]; ok {
fullAgg = append(fullAgg, ex)
} else {
fullAgg = append(fullAgg, v)
}
}
d.AggCount = fullAgg
results = append(results, d)
} else {
results = append(results, EventGroupAggByDatetimeCount{
Datetime: startDateF,
AggCount: defaultAggCounts,
})
}
startDate = startDate.Add(24 * time.Hour)
}
return EventGroupAggByDatetimeCountResp{
Data: results,
AggregateKeys: aggregateKeys,
}, nil
}
// EventGroupAggCounts 事件组聚合Key数量统计
// 参数 rule_id
func (s StatisticsController) EventGroupAggCounts(webCtx web.Context, evtGrpRepo repository.EventGroupRepo) ([]repository.EventGroupAggCount, error) {
ruleID, err := primitive.ObjectIDFromHex(webCtx.Input("rule_id"))
if err != nil {
return nil, fmt.Errorf("invalid rule_id: %v", err)
}
timeoutCtx, cancel := context.WithTimeout(webCtx.Context(), 15*time.Second)
defer cancel()
startTime, endTime := extractDateRange(webCtx, webCtx.IntInput("days", 30))
aggCounts, err := evtGrpRepo.StatByAggCount(timeoutCtx, ruleID, startTime, endTime)
if err != nil {
return nil, err
}
return aggCounts, nil
}
|
// Copyright 2013 (c) Freek Kalter. All rights reserved.
// Use of this source code is governed by the "Revised BSD License"
// that can be found in the LICENSE file.
package main
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/gorilla/mux"
"launchpad.net/goyaml"
)
type Config struct {
Api_key string
Api_url string
Max_speed int
Port int
Times []int
}
var slog *log.Logger
var config Config
type countDown struct {
SetAt time.Time
Duration time.Duration
Limit, LimitPercentage int64
ReturnState ReturnState
}
type ReturnState struct {
Speedlimit string
Paused bool
}
var cDown countDown = countDown{
SetAt: time.Unix(0, 0),
Duration: 0,
Limit: 0,
}
var compiledTemplates = template.Must(template.ParseFiles("404.html"))
var sabNzbFunctions map[string]string
// ignore invalid certificates (todo: make it accecpt a valid cert)
var client = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
func (c countDown) ExpiresAt() (expire time.Time, err error) {
if c.SetAt.Equal(time.Unix(0, 0)) {
err = errors.New("timer not running")
}
expire = c.SetAt.Add(c.Duration)
return
}
func (c countDown) SecondsLeft() (secs int64, err error) {
expires, err := c.ExpiresAt()
if err != nil {
err = errors.New("timer not running")
}
secs = int64(expires.Sub(time.Now()).Seconds())
return
}
func homeHandler(w http.ResponseWriter, r *http.Request) {
indexContent, err := ioutil.ReadFile("index.html")
if err != nil {
slog.Panic(err)
}
fmt.Fprintf(w, string(indexContent))
}
func resumeHandler(w http.ResponseWriter, r *http.Request) {
cDown.Duration = 0
cDown.Limit = 0
resumeDownload(cDown.ReturnState)
}
func formHandler(w http.ResponseWriter, r *http.Request) {
formVars := mux.Vars(r)
valid_integer_regex := regexp.MustCompile("^[0-9]{1,3}$")
if !valid_integer_regex.MatchString(strings.TrimSpace(formVars["time"])) ||
!valid_integer_regex.MatchString(strings.TrimSpace(formVars["limit"])) {
cDown.Duration = 0
return // TODO: proper error message maybe
}
timer_value, _ := strconv.ParseInt(formVars["time"], 10, 32) //base 10, 32bit integer
cDown.LimitPercentage, _ = strconv.ParseInt(formVars["limit"], 10, 32) //base 10, 32bit integer
cDown.Duration = time.Minute * time.Duration(timer_value)
cDown.Limit = int64(config.Max_speed) - ((int64(config.Max_speed) / 100) * cDown.LimitPercentage) // percentage give is how much to block, so inverse that to get how much to let through
cDown.SetAt = time.Now()
cDown.ReturnState = getCurrentState()
slog.Printf("timer started: %+v\n", cDown)
time.AfterFunc(cDown.Duration, func() {
slog.Printf("timer done: %+v\n", cDown)
cDown.Duration = 0
cDown.SetAt = time.Unix(0, 0)
resumeDownload(cDown.ReturnState)
})
if cDown.LimitPercentage == 100 {
go callSabnzbd(fmt.Sprintf(sabNzbFunctions["pause_time"], timer_value))
} else {
go callSabnzbd(fmt.Sprintf(sabNzbFunctions["limit"], cDown.Limit))
}
}
func resumeDownload(cs ReturnState) {
slog.Println("Resuming downloads")
if cs.Paused {
go callSabnzbd(sabNzbFunctions["pause"])
} else {
go callSabnzbd(sabNzbFunctions["resume_download"])
}
go callSabnzbd(fmt.Sprintf(sabNzbFunctions["limit"], cs.Speedlimit))
}
func getCurrentState() ReturnState {
resp, err := client.Get(sabNzbFunctions["status"])
if err != nil {
slog.Panic(err)
}
defer resp.Body.Close()
text, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
type Queue struct {
Queue ReturnState
}
var q Queue
err = json.Unmarshal(text, &q)
if err != nil {
panic(err)
}
slog.Printf("%+v", q.Queue)
return q.Queue
}
func currentStateHandler(w http.ResponseWriter, r *http.Request) {
var limit, dur int64
secs, err := cDown.SecondsLeft()
if err != nil || secs <= 0 {
limit, dur = 0, 0
} else {
dur = int64(cDown.Duration.Minutes())
limit = cDown.LimitPercentage
}
state := map[string]interface{}{"secondsLeft": secs, "limit": limit, "time": dur, "times": config.Times}
jsonEncoder := json.NewEncoder(w)
jsonEncoder.Encode(state)
}
func notFound(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
err := compiledTemplates.ExecuteTemplate(w, "404.html", r.URL)
if err != nil {
slog.Panic(err)
}
}
func callSabnzbd(url string) {
resp, err := client.Get(url)
if err != nil {
slog.Panic(err)
}
defer resp.Body.Close()
}
func initSabnzbFunctions() {
sabNzbFunctions = map[string]string{
"reset_limit": fmt.Sprintf("%sapi?mode=config&name=speedlimit&value=0&apikey=%v", config.Api_url, config.Api_key),
"resume_download": fmt.Sprintf("%vapi?mode=resume&apikey=%v", config.Api_url, config.Api_key),
"pause_time": fmt.Sprintf("%vapi?mode=config&name=set_pause&value=%%v&apikey=%v", config.Api_url, config.Api_key),
"pause": fmt.Sprintf("%vapi?mode=pause&apikey=%v", config.Api_url, config.Api_key),
"limit": fmt.Sprintf("%vapi?mode=config&name=speedlimit&value=%%v&apikey=%v", config.Api_url, config.Api_key),
"status": fmt.Sprintf("%vapi?mode=queue&start=START&limit=LIMIT&apikey=%v&output=json", config.Api_url, config.Api_key),
}
}
func cacheHandler(dur time.Duration, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Cache-Control", fmt.Sprintf("max-age=%d, public, must-revalidate, proxy-revalidate", int64(dur.Seconds())))
h.ServeHTTP(w, r)
})
}
func main() {
// Set up logging
var err error
slog = log.New(os.Stdout, "pauzer: ", log.LstdFlags)
if err != nil {
slog.Panic(err)
}
// Set up gracefull termination
killChannel := make(chan os.Signal, 1)
signal.Notify(killChannel, os.Interrupt, os.Kill, syscall.SIGTERM)
go func(c chan os.Signal, l *log.Logger) {
<-c
l.Println("shutting down")
os.Exit(0)
}(killChannel, slog)
// Load config
configFile, err := ioutil.ReadFile("config.yml")
if err != nil {
slog.Panic(err)
}
err = goyaml.Unmarshal(configFile, &config)
if err != nil {
slog.Panic(err)
}
initSabnzbFunctions()
// set up gorilla/mux handlers
r := mux.NewRouter()
r.HandleFunc("/", homeHandler)
r.HandleFunc("/action/{time:[0-9]+}/{limit:[0-9]+}", formHandler)
r.HandleFunc("/resume", resumeHandler)
r.HandleFunc("/state", currentStateHandler)
// static files get served directly
r.PathPrefix("/js/").Handler(http.StripPrefix("/js/", cacheHandler(time.Second*2678400, http.FileServer(http.Dir("js/")))))
r.PathPrefix("/img/").Handler(http.StripPrefix("/img/", cacheHandler(time.Second*2678400, http.FileServer(http.Dir("img/")))))
r.PathPrefix("/css/").Handler(http.StripPrefix("/css/", cacheHandler(time.Second*2678400, http.FileServer(http.Dir("css/")))))
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Cache-Control", fmt.Sprintf("max-age=%d, public, must-revalidate, proxy-revalidate", int64(time.Second*2678400)))
http.ServeFile(w, r, "favicon.ico")
})
r.NotFoundHandler = http.HandlerFunc(notFound)
http.Handle("/", r)
slog.Println("started on port", config.Port)
http.ListenAndServe(":4000", r)
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// FamilyMemberHistory is documented here http://hl7.org/fhir/StructureDefinition/FamilyMemberHistory
type FamilyMemberHistory struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Text *Narrative `bson:"text,omitempty" json:"text,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
InstantiatesCanonical []string `bson:"instantiatesCanonical,omitempty" json:"instantiatesCanonical,omitempty"`
InstantiatesUri []string `bson:"instantiatesUri,omitempty" json:"instantiatesUri,omitempty"`
Status FamilyHistoryStatus `bson:"status" json:"status"`
DataAbsentReason *CodeableConcept `bson:"dataAbsentReason,omitempty" json:"dataAbsentReason,omitempty"`
Patient Reference `bson:"patient" json:"patient"`
Date *string `bson:"date,omitempty" json:"date,omitempty"`
Name *string `bson:"name,omitempty" json:"name,omitempty"`
Relationship CodeableConcept `bson:"relationship" json:"relationship"`
Sex *CodeableConcept `bson:"sex,omitempty" json:"sex,omitempty"`
BornPeriod *Period `bson:"bornPeriod,omitempty" json:"bornPeriod,omitempty"`
BornDate *string `bson:"bornDate,omitempty" json:"bornDate,omitempty"`
BornString *string `bson:"bornString,omitempty" json:"bornString,omitempty"`
AgeAge *Age `bson:"ageAge,omitempty" json:"ageAge,omitempty"`
AgeRange *Range `bson:"ageRange,omitempty" json:"ageRange,omitempty"`
AgeString *string `bson:"ageString,omitempty" json:"ageString,omitempty"`
EstimatedAge *bool `bson:"estimatedAge,omitempty" json:"estimatedAge,omitempty"`
DeceasedBoolean *bool `bson:"deceasedBoolean,omitempty" json:"deceasedBoolean,omitempty"`
DeceasedAge *Age `bson:"deceasedAge,omitempty" json:"deceasedAge,omitempty"`
DeceasedRange *Range `bson:"deceasedRange,omitempty" json:"deceasedRange,omitempty"`
DeceasedDate *string `bson:"deceasedDate,omitempty" json:"deceasedDate,omitempty"`
DeceasedString *string `bson:"deceasedString,omitempty" json:"deceasedString,omitempty"`
ReasonCode []CodeableConcept `bson:"reasonCode,omitempty" json:"reasonCode,omitempty"`
ReasonReference []Reference `bson:"reasonReference,omitempty" json:"reasonReference,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
Condition []FamilyMemberHistoryCondition `bson:"condition,omitempty" json:"condition,omitempty"`
}
type FamilyMemberHistoryCondition struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Code CodeableConcept `bson:"code" json:"code"`
Outcome *CodeableConcept `bson:"outcome,omitempty" json:"outcome,omitempty"`
ContributedToDeath *bool `bson:"contributedToDeath,omitempty" json:"contributedToDeath,omitempty"`
OnsetAge *Age `bson:"onsetAge,omitempty" json:"onsetAge,omitempty"`
OnsetRange *Range `bson:"onsetRange,omitempty" json:"onsetRange,omitempty"`
OnsetPeriod *Period `bson:"onsetPeriod,omitempty" json:"onsetPeriod,omitempty"`
OnsetString *string `bson:"onsetString,omitempty" json:"onsetString,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
}
type OtherFamilyMemberHistory FamilyMemberHistory
// MarshalJSON marshals the given FamilyMemberHistory as JSON into a byte slice
func (r FamilyMemberHistory) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherFamilyMemberHistory
ResourceType string `json:"resourceType"`
}{
OtherFamilyMemberHistory: OtherFamilyMemberHistory(r),
ResourceType: "FamilyMemberHistory",
})
}
// UnmarshalFamilyMemberHistory unmarshals a FamilyMemberHistory.
func UnmarshalFamilyMemberHistory(b []byte) (FamilyMemberHistory, error) {
var familyMemberHistory FamilyMemberHistory
if err := json.Unmarshal(b, &familyMemberHistory); err != nil {
return familyMemberHistory, err
}
return familyMemberHistory, nil
}
|
/*Package helpers containts helper functions used within project */
package helpers
import (
"errors"
"fmt"
"os"
logic "github.com/xDarkicex/Logic"
)
// vars for use of logic ops
var (
Equal = logic.Eq
And = logic.And
argOne bool
argTwo bool
)
/*
Manual - Accepts two boolean values and
Prints out avalible flags for new project generation
Manual is an exclusive or function
Manual(true, false) prints output
Manual(true, true) doesn't print output
public function
*/
func Manual(a, b bool) error {
if Equal(a, b) {
argOne = (a == false)
argTwo = (b == false)
if And(argOne, argTwo) {
return errors.New("can not set both -m and -man")
}
}
fmt.Printf("getGoing: options require an argument\nUsage: [-new application name] [-template defines templates options: basic, mvc]\n")
os.Exit(0)
return nil
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
file, err := os.Open("input")
if err != nil {
return
}
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
var txtlines []string
for scanner.Scan() {
txtlines = append(txtlines, scanner.Text())
}
file.Close()
for _, line := range txtlines {
for _, line2 := range txtlines {
first, _ := strconv.Atoi(line)
second, _ := strconv.Atoi(line2)
if first+second == 2020 {
fmt.Println(strconv.Itoa(first * second))
}
}
}
for _, line := range txtlines {
for _, line2 := range txtlines {
for _, line3 := range txtlines {
first, _ := strconv.Atoi(line)
second, _ := strconv.Atoi(line2)
third, _ := strconv.Atoi(line3)
if first+second+third == 2020 {
fmt.Println(strconv.Itoa(first * second * third))
}
}
}
}
}
|
package jit
import "fmt"
// FoldConst returns a new expression where all constant subexpressions have been replaced by numbers.
// E.g.:
// 1+1 -> 2
func FoldConst(e expr) expr {
switch e := e.(type) {
default:
return e
case binexpr:
return foldBinexpr(e)
case callexpr:
return foldCallexpr(e)
}
}
func isConst(e expr) bool {
_, ok := e.(*constant)
return ok
}
func foldBinexpr(e binexpr) expr {
x := FoldConst(e.x)
y := FoldConst(e.y)
if isConst(x) && isConst(y) {
x := x.(*constant).value
y := y.(*constant).value
var v float64
switch e.op {
default:
panic(fmt.Sprintf("foldBinexpr %v", e.op))
case "+":
v = x + y
case "-":
v = x - y
case "*":
v = x * y
case "/":
v = x / y
}
return constant{v}
}
return binexpr{op: e.op, x: x, y: y}
}
func foldCallexpr(e callexpr) expr {
arg := FoldConst(e.arg)
if isConst(arg) {
a := arg.(*constant).value
f := funcs[e.fun]
v := callCFunc(f, a)
return constant{v}
}
return callexpr{fun: e.fun, arg: arg}
}
|
package util
import "container/heap"
type Element struct {
Val int
}
type PQ []*Element
func (q *PQ) Len() int {
return len(*q)
}
func (q *PQ) Less(i, j int) bool {
return (*q)[i].Val < (*q)[j].Val
}
func (q *PQ) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PQ) Push(v interface{}) {
*q = append(*q, v.(*Element))
}
func (q *PQ) Pop() interface{} {
last := (*q)[len(*q)-1]
*q = (*q)[:len(*q)-1]
return last
}
type PriorityQueue interface {
Len() int
Push(ele *Element)
Pop() *Element
}
type priorityQueueImpl struct {
*PQ
}
func NewPriorityQueue() PriorityQueue {
return &priorityQueueImpl{
&PQ{},
}
}
func (pq *priorityQueueImpl) Len() int {
return pq.PQ.Len()
}
func (pq *priorityQueueImpl) Push(e *Element) {
heap.Push(pq.PQ, e)
}
func (pq *priorityQueueImpl) Pop() *Element {
return heap.Pop(pq.PQ).(*Element)
}
|
package backoff
import (
"context"
"math/rand"
"time"
)
// An Option configures a BackOff.
type Option interface {
apply(b Policy) Policy
}
// optionFunc wraps a func so it satisfies the Option interface.
type optionFunc func(Policy) Policy
func (f optionFunc) apply(p Policy) Policy {
return f(p)
}
// statelessOptionFunc wraps a function modifying the duration so it satisfies the Option interface.
type statelessOptionFunc func(time.Duration) time.Duration
func (f statelessOptionFunc) apply(p Policy) Policy {
return &statelessOption{
delegate: p,
f: f,
}
}
// MaxRetries configures a backoff policy to return Stop if NextBackOff() has been called too many times.
func MaxRetries(max int) Option {
return optionFunc(func(p Policy) Policy {
return &maxRetriesOption{
delegate: p,
maxTries: max,
numTries: 0,
}
})
}
// MaxInterval configures a backoff policy to not return longer intervals when NextBackOff() is called.
func MaxInterval(maxInterval time.Duration) Option {
return statelessOptionFunc(func(duration time.Duration) time.Duration {
if duration > maxInterval {
return maxInterval
}
return duration
})
}
// Timeout configures a backoff policy to stop when the current time passes the time given with timeout.
func Timeout(timeout time.Time) Option {
return statelessOptionFunc(func(duration time.Duration) time.Duration {
if duration == Stop {
return Stop
}
now := time.Now()
if now.After(timeout) {
return Stop
}
if now.Add(duration).After(timeout) {
return timeout.Sub(now)
}
return duration
})
}
// Cancel configures a backoff policy to stop if the given context is done.
func Cancel(ctx context.Context) Option {
return statelessOptionFunc(func(duration time.Duration) time.Duration {
select {
case <-ctx.Done():
return Stop
default:
}
return duration
})
}
// Jitter configures a backoff policy to randomly modify the duration by the given factor.
// The modified duration is a random value in the interval [randomFactor * duration, duration).
func Jitter(randomFactor float64) Option {
return statelessOptionFunc(func(duration time.Duration) time.Duration {
if duration == Stop {
return Stop
}
if randomFactor <= 0 {
return duration
}
delta := randomFactor * float64(duration)
//nolint:gosec // we do not care about weak random numbers here
return time.Duration(float64(duration) - rand.Float64()*delta)
})
}
type statelessOption struct {
delegate Policy
f func(time.Duration) time.Duration
}
func (o *statelessOption) apply(p Policy) Policy {
return &statelessOption{
delegate: p,
f: o.f,
}
}
func (o *statelessOption) NextBackOff() time.Duration {
return o.f(o.delegate.NextBackOff())
}
func (o *statelessOption) New() Policy {
return &statelessOption{
delegate: o.delegate.New(),
f: o.f,
}
}
type maxRetriesOption struct {
delegate Policy
maxTries int
numTries int
}
func (b *maxRetriesOption) NextBackOff() time.Duration {
if b.maxTries == 0 {
return Stop
}
if b.maxTries > 0 {
if b.maxTries <= b.numTries {
return Stop
}
b.numTries++
}
return b.delegate.NextBackOff()
}
func (b *maxRetriesOption) New() Policy {
return &maxRetriesOption{
delegate: b.delegate.New(),
maxTries: b.maxTries,
numTries: 0,
}
}
|
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/swag"
strfmt "github.com/go-openapi/strfmt"
)
// NewCreateGetInputControlsInitialValuesViaPostParams creates a new CreateGetInputControlsInitialValuesViaPostParams object
// with the default values initialized.
func NewCreateGetInputControlsInitialValuesViaPostParams() *CreateGetInputControlsInitialValuesViaPostParams {
var (
freshDataDefault bool = bool(false)
)
return &CreateGetInputControlsInitialValuesViaPostParams{
FreshData: &freshDataDefault,
timeout: cr.DefaultTimeout,
}
}
// NewCreateGetInputControlsInitialValuesViaPostParamsWithTimeout creates a new CreateGetInputControlsInitialValuesViaPostParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateGetInputControlsInitialValuesViaPostParamsWithTimeout(timeout time.Duration) *CreateGetInputControlsInitialValuesViaPostParams {
var (
freshDataDefault bool = bool(false)
)
return &CreateGetInputControlsInitialValuesViaPostParams{
FreshData: &freshDataDefault,
timeout: timeout,
}
}
/*CreateGetInputControlsInitialValuesViaPostParams contains all the parameters to send to the API endpoint
for the create get input controls initial values via post operation typically these are written to a http.Request
*/
type CreateGetInputControlsInitialValuesViaPostParams struct {
/*FreshData*/
FreshData *bool
/*ReportUnitURI*/
ReportUnitURI *string
timeout time.Duration
}
// WithFreshData adds the freshData to the create get input controls initial values via post params
func (o *CreateGetInputControlsInitialValuesViaPostParams) WithFreshData(FreshData *bool) *CreateGetInputControlsInitialValuesViaPostParams {
o.FreshData = FreshData
return o
}
// WithReportUnitURI adds the reportUnitUri to the create get input controls initial values via post params
func (o *CreateGetInputControlsInitialValuesViaPostParams) WithReportUnitURI(ReportUnitURI *string) *CreateGetInputControlsInitialValuesViaPostParams {
o.ReportUnitURI = ReportUnitURI
return o
}
// WriteToRequest writes these params to a swagger request
func (o *CreateGetInputControlsInitialValuesViaPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
r.SetTimeout(o.timeout)
var res []error
if o.FreshData != nil {
// query param freshData
var qrFreshData bool
if o.FreshData != nil {
qrFreshData = *o.FreshData
}
qFreshData := swag.FormatBool(qrFreshData)
if qFreshData != "" {
if err := r.SetQueryParam("freshData", qFreshData); err != nil {
return err
}
}
}
if o.ReportUnitURI != nil {
// query param reportUnitURI
var qrReportUnitURI string
if o.ReportUnitURI != nil {
qrReportUnitURI = *o.ReportUnitURI
}
qReportUnitURI := qrReportUnitURI
if qReportUnitURI != "" {
if err := r.SetQueryParam("reportUnitURI", qReportUnitURI); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
package config
import (
"context"
"errors"
"flag"
"fmt"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
var (
Conf *conf // 静态配置
DynamicConf *dynamicConf // 动态配置
_path string
_etcd string
_client *clientv3.Client
_clientOnce sync.Once
)
const SERVER_NAME = "iris_web"
// 静态配置,程序启动后无法再做更改的参数配置
type conf struct {
ServerPort int `yaml:"server_port"`
LogPath string `yaml:"log_path"`
MysqlDsn string `yaml:"mysql_dsn"`
MysqlMaxIdle int `yaml:"mysql_max_idle"`
MysqlMaxOpen int `yaml:"mysql_max_open"`
RedisAddr string `yaml:"redis_addr"`
RedisDB int `yaml:"redis_db"`
RedisMaxIdle int `yaml:"redis_max_idle"`
RedisMaxOpen int `yaml:"redis_max_open"`
}
// 动态配置,程序运行过程中,可以动态更改的参数配置
type dynamicConf struct {
UserDefaultName string `yaml:"user_default_name"`
UserDefaultAge int `yaml:"user_default_age"`
}
// 初始化解析参数
func init() {
flag.StringVar(&_path, "c", SERVER_NAME+".yml", "default config path")
flag.StringVar(&_etcd, "etcd", os.Getenv("ETCD"), "default etcd address")
}
// 优先从etcd中加载配置,没有则从配置文件中加载配置
func InitConfig() error {
var err error
var content []byte
if _etcd != "" {
content, err = fetchConfig("/config/"+SERVER_NAME, watchDynamicConfig)
} else {
content, err = ioutil.ReadFile(_path)
}
if err != nil {
return err
}
if len(content) == 0 {
return errors.New("not found nothing config")
}
Conf = &conf{}
if err := yaml.Unmarshal(content, Conf); err != nil {
return err
}
fmt.Printf("static config => [%#v]\n", Conf)
DynamicConf = &dynamicConf{}
if err := yaml.Unmarshal(content, DynamicConf); err != nil {
return err
}
fmt.Printf("dynamic config => [%#v]\n", DynamicConf)
return nil
}
// 从etcd中获取配置信息
func fetchConfig(nodePath string, watchFn func(k, v string)) ([]byte, error) {
var err error
var result string
var resp *clientv3.GetResponse
_clientOnce.Do(func() {
c := clientv3.Config{Endpoints: strings.Split(_etcd, ";"), DialTimeout: 5 * time.Second}
_client, err = clientv3.New(c)
})
if err != nil {
return []byte(""), err
}
if resp, err = _client.Get(context.Background(), nodePath, clientv3.WithPrefix()); err != nil {
return []byte(""), err
}
if resp == nil || resp.Kvs == nil {
return []byte(""), errors.New("no response data")
}
for _, kvs := range resp.Kvs {
if kvs != nil {
result += fmt.Sprintf("%s: %s\n", filepath.Base(string(kvs.Key)), string(kvs.Value))
}
}
if watchFn != nil {
go func() {
rch := _client.Watch(context.Background(), nodePath, clientv3.WithPrefix())
for wResp := range rch {
for _, ev := range wResp.Events {
switch ev.Type {
case mvccpb.PUT:
watchFn(filepath.Base(string(ev.Kv.Key)), string(ev.Kv.Value))
case mvccpb.DELETE:
watchFn(filepath.Base(string(ev.Kv.Key)), "")
}
}
}
}()
}
return []byte(result), nil
}
// 监控动态配置,并使用值拷贝进行全部替换
func watchDynamicConfig(key, val string) {
dc := new(dynamicConf)
*dc = *DynamicConf
yaml.Unmarshal([]byte(key+": "+val), dc)
DynamicConf = dc
fmt.Printf("Latest dynamic config => [%#v]\n", DynamicConf)
}
|
package main
import (
"net/http"
"github.com/SuperTikuwa/mission-techdojo/handler"
)
func main() {
http.HandleFunc("/user/create", handler.CreateHandler)
http.HandleFunc("/user/get", handler.GetHandler)
http.HandleFunc("/user/update", handler.UpdateHandler)
http.HandleFunc("/gacha/draw", handler.DrawHandler)
http.HandleFunc("/character/list", handler.ListHandler)
http.HandleFunc("/emission/rate", handler.EmissionRateHandler)
// http.HandleFunc("/query", handler.Query)
http.ListenAndServe(":8080", nil)
}
|
package oneagent_mutation
import (
"strconv"
"github.com/Dynatrace/dynatrace-operator/src/config"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
dtwebhook "github.com/Dynatrace/dynatrace-operator/src/webhook"
corev1 "k8s.io/api/core/v1"
)
func (mutator *OneAgentPodMutator) configureInitContainer(request *dtwebhook.MutationRequest, installer installerInfo) {
addInstallerInitEnvs(request.InstallContainer, installer, mutator.getVolumeMode(request.DynaKube))
addInitVolumeMounts(request.InstallContainer)
}
func (mutator *OneAgentPodMutator) setContainerCount(initContainer *corev1.Container, containerCount int) {
desiredContainerCountEnvVarValue := strconv.Itoa(containerCount)
initContainer.Env = kubeobjects.AddOrUpdate(initContainer.Env, corev1.EnvVar{Name: config.AgentContainerCountEnv, Value: desiredContainerCountEnvVarValue})
}
func (mutator *OneAgentPodMutator) mutateUserContainers(request *dtwebhook.MutationRequest) {
for i := range request.Pod.Spec.Containers {
container := &request.Pod.Spec.Containers[i]
addContainerInfoInitEnv(request.InstallContainer, i+1, container.Name, container.Image)
mutator.addOneAgentToContainer(request.ToReinvocationRequest(), container)
}
}
// reinvokeUserContainers mutates each user container that hasn't been injected yet.
// It makes sure that the new containers will have an environment variable in the install-container
// that doesn't conflict with the previous environment variables of the originally injected containers
func (mutator *OneAgentPodMutator) reinvokeUserContainers(request *dtwebhook.ReinvocationRequest) bool {
pod := request.Pod
oneAgentInstallContainer := findOneAgentInstallContainer(pod.Spec.InitContainers)
newContainers := []*corev1.Container{}
for i := range pod.Spec.Containers {
currentContainer := &pod.Spec.Containers[i]
if containerIsInjected(currentContainer) {
continue
}
newContainers = append(newContainers, currentContainer)
}
oldContainersLen := len(pod.Spec.Containers) - len(newContainers)
for i := range newContainers {
currentContainer := newContainers[i]
addContainerInfoInitEnv(oneAgentInstallContainer, oldContainersLen+i+1, currentContainer.Name, currentContainer.Image)
mutator.addOneAgentToContainer(request, currentContainer)
}
if len(newContainers) == 0 {
return false
}
mutator.setContainerCount(oneAgentInstallContainer, len(request.Pod.Spec.Containers))
return true
}
func (mutator *OneAgentPodMutator) addOneAgentToContainer(request *dtwebhook.ReinvocationRequest, container *corev1.Container) {
log.Info("adding OneAgent to container", "name", container.Name)
installPath := kubeobjects.GetField(request.Pod.Annotations, dtwebhook.AnnotationInstallPath, dtwebhook.DefaultInstallPath)
dynakube := request.DynaKube
addOneAgentVolumeMounts(container, installPath)
addDeploymentMetadataEnv(container, dynakube, mutator.clusterID)
addPreloadEnv(container, installPath)
if dynakube.HasActiveGateCaCert() {
addCertVolumeMounts(container)
}
if dynakube.FeatureAgentInitialConnectRetry() > 0 {
addCurlOptionsVolumeMount(container)
}
if dynakube.NeedsOneAgentProxy() {
addProxyEnv(container)
}
if dynakube.Spec.NetworkZone != "" {
addNetworkZoneEnv(container, dynakube.Spec.NetworkZone)
}
if dynakube.FeatureLabelVersionDetection() {
addVersionDetectionEnvs(container, newVersionLabelMapping(request.Namespace))
}
}
func findOneAgentInstallContainer(initContainers []corev1.Container) *corev1.Container {
for i := range initContainers {
container := &initContainers[i]
if container.Name == dtwebhook.InstallContainerName {
return container
}
}
return nil
}
|
package schedule
type Job struct {
//CodeName string `json:"code_name"`
Image string `json:"image"`
Payload string `json:"payload"`
}
type ReqData struct {
Jobs []*Job `json:"jobs"`
}
|
// Copyright 2018 Satoshi Konno. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package transport
import (
"testing"
)
const (
testUnicastTCPSocketPort = 32001
)
func TestUnicastTCPSocketOpenClose(t *testing.T) {
sock := NewUnicastTCPSocket()
ifs, err := GetAvailableInterfaces()
if err != nil {
t.Error(err)
return
}
err = sock.Bind(ifs[0], testUnicastTCPSocketPort)
if err != nil {
t.Error(err)
return
}
err = sock.Close()
if err != nil {
t.Error(err)
}
}
|
/**
阶乘
数字最小是从 0 开始的
*/
package main
import "fmt"
func main() {
num := factorial(0)
fmt.Println(num)
}
func factorial(n int) int {
if n == 0 || n == 1 {
return 1
}
return factorial(n-1) * n
}
|
package panic_recover
import "fmt"
func ProductCode(x, y int) {
var z int
func() {
defer func() {
if recover() != nil {
z = 0
}
}()
panic("test panic")
z = x / y
return
}()
fmt.Printf("x/y = %d\n", z)
}
|
package dynamic_programming
import (
"sort"
"testing"
)
//俄罗斯套娃信封问题
//二维数组下的 最长递增子序列
func maxEnvelopes1(envelopes [][]int) int {
if len(envelopes) <= 1 {
return len(envelopes)
}
sort.Slice(envelopes, func(i, j int) bool {
//宽相同 对高进行排序
if envelopes[i][0] == envelopes[j][0] {
return envelopes[i][1] < envelopes[j][1]
}
return envelopes[i][0] < envelopes[j][0]
})
//保存当前最长的信封序列
max := 1
dp := make([]int, len(envelopes))
dp[0] = 1
for i := 0; i < len(envelopes); i++ {
dp[i] = 1
for j := i - 1; j >= 0; j-- {
if envelopes[j][0] < envelopes[i][0] && envelopes[j][1] < envelopes[i][1] {
if dp[i] < dp[j]+1 {
dp[i] = dp[j] + 1
}
if dp[i] > max {
max = dp[i]
break
}
}
}
}
return max
}
//俄罗斯套娃信封问题
//二维数组下的 最长递增子序列 对顺序查找进行优化
func maxEnvelopes(envelopes [][]int) int {
if len(envelopes) <= 1 {
return len(envelopes)
}
sort.Slice(envelopes, func(i, j int) bool {
if envelopes[i][0] == envelopes[j][0] {
return envelopes[i][1] > envelopes[j][1]
}
return envelopes[i][0] < envelopes[j][0]
})
dp := make([]int, 0)
//只判断高度
for i := 0; i < len(envelopes); i++ {
low, high := 0, len(dp)-1
for low <= high {
mid := low + (high-low)/2
if envelopes[i][1] > dp[mid] {
low = mid + 1
} else {
high = mid - 1
}
}
if low >= len(dp) {
dp = append(dp, envelopes[i][1])
} else {
dp[low] = envelopes[i][1]
}
}
return len(dp)
}
func Test_354(t *testing.T) {
//t.Log(maxEnvelopes([][]int{{1, 2}, {3, 4}, {5, 6}, {7, 8}})) //4
//t.Log(maxEnvelopes([][]int{{7, 8}, {1, 2}, {5, 6}, {3, 4}})) //4
t.Log(maxEnvelopes([][]int{{5, 4}, {6, 4}, {6, 7}, {2, 3}})) //3
}
|
package rating
import (
"math"
"log"
)
// some constants copied from https://github.com/golang/go/blob/master/src/math/bits.go
const (
shift = 64 - 11 - 1
bias = 1023
mask = 0x7FF
)
// Round returns the nearest integer, rounding half away from zero.
// This function is available natively in Go 1.10
//
// Special cases are:
// Round(±0) = ±0
// Round(±Inf) = ±Inf
// Round(NaN) = NaN
func Round(x float64) float64 {
// Round is a faster implementation of:
//
// func Round(x float64) float64 {
// t := Trunc(x)
// if Abs(x-t) >= 0.5 {
// return t + Copysign(1, x)
// }
// return t
// }
const (
signMask = 1 << 63
fracMask = 1<<shift - 1
half = 1 << (shift - 1)
one = bias << shift
)
bits := math.Float64bits(x)
e := uint(bits>>shift) & mask
if e < bias {
// Round abs(x) < 1 including denormals.
bits &= signMask // +-0
if e == bias-1 {
bits |= one // +-1
}
} else if e < bias+shift {
// Round any abs(x) >= 1 containing a fractional component [0,1).
//
// Numbers with larger exponents are returned unchanged since they
// must be either an integer, infinity, or NaN.
e -= bias
bits += half >> e
bits &^= fracMask >> e
}
return math.Float64frombits(bits)
}
func CalculateNewRating(old1 int, old2 int, winner int) (int, int) {
e1 := 1.0 / (1.0 + math.Pow(10.0, (float64)(old2 - old1)/400))
e2 := 1.0 / (1.0 + math.Pow(10.0, (float64)(old1 - old2)/400))
K := 32
var s1, s2 float64
if winner == 0 {
s1, s2 = 0.5, 0.5
} else if winner == 1 {
s1, s2 = 1.0, 0.0
} else if winner == 2 {
s1, s2 = 0.0, 1.0
} else {
log.Printf("invalid winner %v", winner)
}
new1 := float64(old1) + float64(K) * (s1 - e1)
new2 := float64(old2) + float64(K) * (s2 - e2)
return int(Round(new1)), int(Round(new2))
}
|
package main
import (
"bytes"
)
//49. 字母异位词分组
//给定一个字符串数组,将字母异位词组合在一起。字母异位词指字母相同,但排列不同的字符串。
//
//示例:
//
//输入: ["eat", "tea", "tan", "ate", "nat", "bat"]
//输出:
//[
//["ate","eat","tea"],
//["nat","tan"],
//["bat"]
//]
//说明:
//
//所有输入均为小写字母。
//不考虑答案输出的顺序。
//思路 字典表统计,给特征码
func groupAnagrams(strs []string) [][]string {
result := make([][]string, 0)
n := len(strs)
tb := make(map[string]int)
for i := 0; i < n; i++ {
dic := [26]int{}
for _, v := range strs[i] {
dic[v-'a'] ++
}
finger := comToFinger(dic, ",")
if tb[finger] == 0 {
tb[finger] = len(result) + 1
result = append(result, []string{strs[i]})
} else {
index := tb[finger] - 1
result[index] = append(result[index],strs[i])
}
}
return result
}
func comToFinger(a [26]int, sep string) string {
var buff bytes.Buffer
n := len(a)
for i := 0; i < n; i++ {
buff.WriteByte(byte(a[i]))
buff.WriteByte(byte(a[i] >> 8))
buff.WriteByte(byte(a[i] >> 16))
buff.WriteByte(byte(a[i] >> 24))
if i < n-1 {
buff.WriteString(sep)
}
}
return buff.String()
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package camera
import (
"context"
"chromiumos/tast/autocaps"
"chromiumos/tast/local/camera/testutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: Capability,
Desc: "Compare capabilities defined in autocaps package with ones detected by platform camera tools",
Contacts: []string{"kamesan@chromium.org", "chromeos-camera-eng@google.com"},
Attr: []string{"group:mainline", "informational"},
})
}
// Capability compares the static capabilities versus those detected in the DUT.
func Capability(ctx context.Context, s *testing.State) {
// Get capabilities defined in autocaps package.
staticCaps, err := autocaps.Read(autocaps.DefaultCapabilityDir, nil)
if err != nil {
s.Fatal("Failed to read statically-set capabilities: ", err)
}
// Detect USB cameras.
usbCams, err := testutil.USBCamerasFromV4L2Test(ctx)
if err != nil {
s.Fatal("Failed to get USB cameras: ", err)
}
hasUSB := len(usbCams) > 0
// Detect MIPI cameras.
mipiCams, err := testutil.MIPICamerasFromCrOSCameraTool(ctx)
if err != nil {
s.Fatal("Failed to get MIPI cameras: ", err)
}
hasMIPI := len(mipiCams) > 0
hasVivid := testutil.IsVividDriverLoaded(ctx)
capsToVerify := map[string]bool{
"builtin_usb_camera": hasUSB,
"builtin_mipi_camera": hasMIPI,
"vivid_camera": hasVivid,
"builtin_camera": hasUSB || hasMIPI,
"builtin_or_vivid_camera": hasUSB || hasMIPI || hasVivid,
}
for c, detected := range capsToVerify {
if staticCaps[c] == autocaps.Yes && !detected {
s.Errorf("%q statically set but not detected", c)
} else if staticCaps[c] != autocaps.Yes && detected {
s.Errorf("%q detected but not statically set", c)
}
}
}
|
package log
import (
"testing"
)
func TestLogger(t *testing.T) {
log := New("trace")
// this is a facade to get code coverage up
t.Run("Testing PluggableLogger : should pass", func(t *testing.T) {
log.Info("Test %s ", "log")
log.Warn("Test %s ", "log")
log.Debug("Test %s ", "log")
log.Trace("Test %s ", "log")
log.Error("Test %s ", "log")
})
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validator
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestStandalonePodsSelector(t *testing.T) {
tests := []struct {
description string
allPods []v1.Pod
expectedPods []v1.Pod
}{
{
description: "pod don't exist in test namespace",
allPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo-ns",
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"}},
},
expectedPods: nil,
},
{
description: "only deployment pods",
allPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
OwnerReferences: []metav1.OwnerReference{{UID: "", Controller: truePtr()}},
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}},
expectedPods: nil,
},
{
description: "only standalone pods",
allPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}},
expectedPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}},
},
{
description: "standalone pods and deployment pods",
allPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo1",
Namespace: "test",
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}, {
ObjectMeta: metav1.ObjectMeta{
Name: "foo2",
Namespace: "test",
OwnerReferences: []metav1.OwnerReference{{UID: "", Controller: truePtr()}},
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}},
expectedPods: []v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: "foo1",
Namespace: "test",
},
TypeMeta: metav1.TypeMeta{Kind: "Pod"},
}},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
var rs []runtime.Object
for i := range test.allPods {
p := test.allPods[i]
rs = append(rs, &p)
}
f := fakekubeclientset.NewSimpleClientset(rs...)
s := NewStandalonePodsSelector(f)
actualPods, err := s.Select(context.Background(), "test", metav1.ListOptions{})
t.CheckNoError(err)
t.CheckDeepEqual(test.expectedPods, actualPods)
})
}
}
|
package repo
import (
"github.com/emicklei/go-restful"
)
type RepoResourceOptions struct {
RepoDir string
RepoUrl string
}
//RepoResource ...
type RepoResource struct {
RepoDir string
RepoUrl string
}
//NewRepoResource ...
func NewRepoResource(options *RepoResourceOptions) *RepoResource {
return &RepoResource{
RepoDir: options.RepoDir,
RepoUrl: options.RepoUrl,
}
}
//Register ...
func (pr *RepoResource) Register(container *restful.Container) {
ws := new(restful.WebService)
ws.
Path("/envs/{env}/charts").
Doc("Manage charts").
Consumes(restful.MIME_XML, restful.MIME_JSON, restful.MIME_OCTET).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{chart}").To(pr.chartCtrl).
Doc("get a chart").
Operation("chartCtrl").
Param(ws.PathParameter("chart", "identifier of the chart file").DataType("string")))
ws.Route(ws.PUT("/upload/{chart}").To(pr.uploadChartCtrl).
Doc("upload a chart").
Operation("uploadChartCtrl").
Param(ws.PathParameter("chart", "identifier of the chart file").DataType("string")))
container.Add(ws)
}
|
/*
MIT License
Copyright (c) 2020-2021 Kazuhito Suda
This file is part of NGSI Go
https://github.com/lets-fiware/ngsi-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package ngsilib
import (
"bytes"
"errors"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRequestTokenWSO2(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ResBody = []byte(`{"scope":"default","token_type":"Bearer","expires_in":3600,"refresh_token":"a7d6bae2b1d36c041787e9c9e2d6cbf8","access_token":"cba95432f1f8227f5bc6cf4a20633cb3"}`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
actual, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.NoError(t, err) {
assert.Equal(t, CWSO2, actual.Type)
expected := "cba95432f1f8227f5bc6cf4a20633cb3"
assert.Equal(t, expected, actual.WSO2.AccessToken)
}
}
func TestRequestTokenWSO2Refresh(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ResBody = []byte(`{"scope":"default","token_type":"Bearer","expires_in":3600,"refresh_token":"a7d6bae2b1d36c041787e9c9e2d6cbf8","access_token":"cba95432f1f8227f5bc6cf4a20633cb3"}`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{RefreshToken: "refresh"}
actual, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.NoError(t, err) {
assert.Equal(t, CWSO2, actual.Type)
expected := "cba95432f1f8227f5bc6cf4a20633cb3"
assert.Equal(t, expected, actual.WSO2.AccessToken)
}
}
func TestRequestTokenWSO2ErrorUser(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ResBody = []byte(`{"scope":"default","token_type":"Bearer","expires_in":3600,"refresh_token":"a7d6bae2b1d36c041787e9c9e2d6cbf8","access_token":"cba95432f1f8227f5bc6cf4a20633cb3"}`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
_, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 1, ngsiErr.ErrNo)
assert.Equal(t, "password is required", ngsiErr.Message)
}
}
func TestRequestTokenWSO2ErrorHTTP(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ResBody = []byte(`{"scope":"default","token_type":"Bearer","expires_in":3600,"refresh_token":"a7d6bae2b1d36c041787e9c9e2d6cbf8","access_token":"cba95432f1f8227f5bc6cf4a20633cb3"}`)
reqRes.Err = errors.New("http error")
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
_, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 2, ngsiErr.ErrNo)
assert.Equal(t, "http error", ngsiErr.Message)
}
}
func TestRequestTokenWSO2ErrorUnmarshal(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ResBody = []byte(`"scope":"default","token_type":"Bearer","expires_in":3600,"refresh_token":"a7d6bae2b1d36c041787e9c9e2d6cbf8","access_token":"cba95432f1f8227f5bc6cf4a20633cb3"}`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
_, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 3, ngsiErr.ErrNo)
actual := "json: cannot unmarshal string into Go value of type ngsilib.WSO2Token Field: (7) \"scope\":\"default\",\"tok"
assert.Equal(t, actual, ngsiErr.Message)
}
}
func TestRequestTokenWSO2ErrorHTTPStatus(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusBadRequest
reqRes.ResBody = []byte(`bad request`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
_, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 4, ngsiErr.ErrNo)
assert.Equal(t, "error bad request", ngsiErr.Message)
}
}
func TestRequestTokenWSO2ErrorHTTPStatusUnauthorized(t *testing.T) {
ngsi := testNgsiLibInit()
ngsi.tokenList = tokenInfoList{}
filename := ""
ngsi.CacheFile = &MockIoLib{filename: &filename}
ngsi.LogWriter = &bytes.Buffer{}
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusUnauthorized
reqRes.ResBody = []byte(`Unauthorized`)
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
ngsi.tokenList["token1"] = TokenInfo{}
ngsi.tokenList["token2"] = TokenInfo{}
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CWSO2, IdmHost: "http://idm", Username: "fiware", Password: "1234", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{}
_, err := idm.requestToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 4, ngsiErr.ErrNo)
assert.Equal(t, "error Unauthorized", ngsiErr.Message)
}
}
func TestRevokeTokenWSO2(t *testing.T) {
ngsi := testNgsiLibInit()
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ReqData = []byte("token=1a8346b8df2881c8b3407b0f39c80d1374204b93&token_type_hint=refresh_token")
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CBasic, IdmHost: "http://idm", Username: "fiware", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{RefreshToken: "1a8346b8df2881c8b3407b0f39c80d1374204b93"}
err := idm.revokeToken(ngsi, client, tokenInfo)
assert.NoError(t, err)
}
func TestRevokeTokenWSO2ErrorHTTP(t *testing.T) {
ngsi := testNgsiLibInit()
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusOK
reqRes.ReqData = []byte("token=1a8346b8df2881c8b3407b0f39c80d1374204b93&token_type_hint=refresh_token")
reqRes.Err = errors.New("http error")
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CBasic, IdmHost: "http://idm", Username: "fiware", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{RefreshToken: "1a8346b8df2881c8b3407b0f39c80d1374204b93"}
err := idm.revokeToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 1, ngsiErr.ErrNo)
assert.Equal(t, "http error", ngsiErr.Message)
}
}
func TestRevokeTokenWSO2ErrorHTTPStatus(t *testing.T) {
ngsi := testNgsiLibInit()
reqRes := MockHTTPReqRes{}
reqRes.Res.StatusCode = http.StatusBadRequest
reqRes.ReqData = []byte("token=1a8346b8df2881c8b3407b0f39c80d1374204b93&token_type_hint=refresh_token")
reqRes.ResBody = []byte("bad request")
mock := NewMockHTTP()
mock.ReqRes = append(mock.ReqRes, reqRes)
ngsi.HTTP = mock
client := &Client{Server: &Server{ServerHost: "http://orion/", IdmType: CBasic, IdmHost: "http://idm", Username: "fiware", ClientID: "0000", ClientSecret: "1111"}}
idm := &idmWSO2{}
tokenInfo := &TokenInfo{RefreshToken: "1a8346b8df2881c8b3407b0f39c80d1374204b93"}
err := idm.revokeToken(ngsi, client, tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 2, ngsiErr.ErrNo)
assert.Equal(t, "error bad request", ngsiErr.Message)
}
}
func TestGetAuthHeaderWSO2(t *testing.T) {
idm := &idmWSO2{}
key, value := idm.getAuthHeader("9e7067026d0aac494e8fedf66b1f585e79f52935")
assert.Equal(t, "Authorization", key)
assert.Equal(t, "Bearer 9e7067026d0aac494e8fedf66b1f585e79f52935", value)
}
func TestGetTokenInfoWSO2(t *testing.T) {
testNgsiLibInit()
idm := &idmWSO2{}
tokenInfo := &TokenInfo{
WSO2: &WSO2Token{
Scope: "default",
TokenType: "Bearer",
ExpiresIn: 3600,
RefreshToken: "a7d6bae2b1d36c041787e9c9e2d6cbf8",
AccessToken: "cba95432f1f8227f5bc6cf4a20633cb3",
},
}
actual, err := idm.getTokenInfo(tokenInfo)
if assert.NoError(t, err) {
expected := "{\"access_token\":\"cba95432f1f8227f5bc6cf4a20633cb3\",\"expires_in\":3600,\"refresh_token\":\"a7d6bae2b1d36c041787e9c9e2d6cbf8\",\"scope\":\"default\",\"token_type\":\"Bearer\"}"
assert.Equal(t, expected, string(actual))
}
}
func TestGetTokenInfoWSO2Error(t *testing.T) {
testNgsiLibInit()
idm := &idmWSO2{}
tokenInfo := &TokenInfo{
WSO2: &WSO2Token{
Scope: "default",
TokenType: "Bearer",
ExpiresIn: 3600,
RefreshToken: "a7d6bae2b1d36c041787e9c9e2d6cbf8",
AccessToken: "cba95432f1f8227f5bc6cf4a20633cb3",
},
}
gNGSI.JSONConverter = &MockJSONLib{EncodeErr: errors.New("json error")}
_, err := idm.getTokenInfo(tokenInfo)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 1, ngsiErr.ErrNo)
assert.Equal(t, "json error", ngsiErr.Message)
}
}
func TestCheckIdmParamsWSO2(t *testing.T) {
idm := &idmWSO2{}
idmParams := &IdmParams{
IdmHost: "http://wso2am:8243/token",
Username: "fiware",
Password: "1234",
ClientID: "0000000000000000000000_A_ZZZ",
ClientSecret: "00000000-1111-2222-3333-444444444444",
}
err := idm.checkIdmParams(idmParams)
assert.NoError(t, err)
}
func TestCheckIdmParamsWSO2Error(t *testing.T) {
idm := &idmWSO2{}
idmParams := &IdmParams{
IdmHost: "http://wso2am:8243/token",
Username: "fiware",
Password: "1234",
ClientID: "0000000000000000000000_A_ZZZ",
}
err := idm.checkIdmParams(idmParams)
if assert.Error(t, err) {
ngsiErr := err.(*LibError)
assert.Equal(t, 1, ngsiErr.ErrNo)
assert.Equal(t, "idmHost, username, password, clientID and clientSecret are needed", ngsiErr.Message)
}
}
|
package mongodb
import (
"context"
"fmt"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Repository struct {
collection *mongo.Collection
}
func NewRepository(db *Client, collName string) *Repository {
collection := db.Database.Collection("urls")
return &Repository{collection}
}
func (m *Repository) FindAll(ctx context.Context, filter interface{}, v []interface{}) ([]interface{}, error) {
cursor, err := m.collection.Find(ctx, filter)
if err != nil {
return nil, err
}
err = cursor.All(ctx, &v)
if err != nil {
return nil, err
}
return v, nil
}
func (m *Repository) CreateDocument(ctx context.Context, d interface{}) (primitive.ObjectID, error) {
if d == nil {
return primitive.NilObjectID, fmt.Errorf("document cannot be nil")
}
res, err := m.collection.InsertOne(ctx, d)
if err != nil {
return primitive.NilObjectID, err
}
if res == nil {
return primitive.NilObjectID, nil
}
return res.InsertedID.(primitive.ObjectID), nil
}
func (m *Repository) Count(ctx context.Context, f interface{}) (int64, error) {
return m.collection.CountDocuments(ctx, f)
}
func (m *Repository) FindOne(ctx context.Context, f interface{}, v interface{}) error {
return m.collection.FindOne(ctx, f).Decode(v)
}
func (m *Repository) Delete(ctx context.Context, f interface{}) error {
_, err := m.collection.DeleteMany(ctx, f)
return err
}
func (m *Repository) Update(ctx context.Context, oid primitive.ObjectID, d interface{}) error {
res := m.collection.FindOneAndUpdate(
ctx,
primitive.M{"_id": oid},
primitive.M{"$set": d}, options.FindOneAndUpdate().SetReturnDocument(options.After))
if res.Err() != nil {
return fmt.Errorf("failed to perform find one and update: %v", res.Err())
}
if err := res.Decode(d); err != nil {
return fmt.Errorf("failed to decode: %v", err)
}
return nil
}
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"sync"
chatgpt "github.com/golang-infrastructure/go-ChatGPT"
"github.com/line/line-bot-sdk-go/linebot"
)
var mu sync.Mutex
type User struct {
DisplayName string
InHour int
InMin int
OutHour int
OutMin int
}
var bot *linebot.Client
const chatGptURL = "https://api.openai.com/v1/completions"
func main() {
client, err := linebot.New(
os.Getenv("CHANNEL_SECRET"),
os.Getenv("CHANNEL_TOKEN"),
)
bot = client
if err != nil {
log.Fatal(err)
}
http.HandleFunc("/callback", func(w http.ResponseWriter, req *http.Request) {
events, err := bot.ParseRequest(req)
if err != nil {
if err == linebot.ErrInvalidSignature {
w.WriteHeader(400)
} else {
w.WriteHeader(500)
}
return
}
for _, event := range events {
if event.Type == linebot.EventTypeMessage {
switch message := event.Message.(type) {
case *linebot.TextMessage:
// 呼叫 ChatGPT API 並取得回應
response, err := callChatGptAPI(message.Text)
if err != nil {
fmt.Println(err)
continue
}
// 回傳回應給使用者
if _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(response)).Do(); err != nil {
fmt.Println(err)
}
}
}
}
})
// This is just sample code.
// For actual use, you must support HTTPS by using `ListenAndServeTLS`, a reverse proxy or something else.
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal(err)
}
}
func callChatGptAPI(input string) (string, error) {
jwt := "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1UaEVOVUpHTkVNMVFURTRNMEZCTWpkQ05UZzVNRFUxUlRVd1FVSkRNRU13UmtGRVFrRXpSZyJ9.eyJodHRwczovL2FwaS5vcGVuYWkuY29tL3Byb2ZpbGUiOnsiZW1haWwiOiJlNTAzMTBAZ21haWwuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImdlb2lwX2NvdW50cnkiOiJUVyJ9LCJodHRwczovL2FwaS5vcGVuYWkuY29tL2F1dGgiOnsidXNlcl9pZCI6InVzZXItVUhwVWhBUlcyYWk4ems1VWhkallnTndEIn0sImlzcyI6Imh0dHBzOi8vYXV0aDAub3BlbmFpLmNvbS8iLCJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNjgxNDkwODQ2NDgxODY3MzA2MCIsImF1ZCI6WyJodHRwczovL2FwaS5vcGVuYWkuY29tL3YxIiwiaHR0cHM6Ly9vcGVuYWkuYXV0aDAuY29tL3VzZXJpbmZvIl0sImlhdCI6MTY3MTQxMzYzOSwiZXhwIjoxNjcyMDE4NDM5LCJhenAiOiJUZEpJY2JlMTZXb1RIdE45NW55eXdoNUU0eU9vNkl0RyIsInNjb3BlIjoib3BlbmlkIHByb2ZpbGUgZW1haWwgbW9kZWwucmVhZCBtb2RlbC5yZXF1ZXN0IG9yZ2FuaXphdGlvbi5yZWFkIG9mZmxpbmVfYWNjZXNzIn0.X40vjCMeAOsj89npWCy47MZkFLu6mt5ZzPvD9m97q1OG_9SzLYo6kBxSZSpaCKVrDF9AzDRNIASoeDd5FKMvS6VhCtilLo8WT-D1sHsuDdoRKbM4lJEteA-AqZSKSKRj4upwCLSngWqJf0nRrPWMYRmlUr7CJMQQ0575r1UPCu0mcSg-g6aKHvy1UTxR3jaKqfrluWqFU-vD3VwkqtcMhwuhLhT9eKtzWVulzXtqZKMO1M5VMRker4Au-n7KrXwWvgWj3UXECYts0k9Ozzm1Ogg78EXulHpO1xfnTQriqfiLc_68UUMxxrH4Ig4995Al1UsdOWlmvG8O2msHVNK89Q"
chat := chatgpt.NewChatGPT(jwt)
talk, err := chat.Talk(input)
if err != nil {
fmt.Println(err.Error())
return "", err
}
return talk.Message.Content.Parts[0], nil
}
|
package wax
import (
"fmt"
"github.com/pgavlin/warp/wasm"
"github.com/pgavlin/warp/wasm/code"
"github.com/willf/bitset"
)
const ValueTypeBool = 1
type Flags int32
const (
FlagsLoadLocal = 1 << iota
FlagsLoadGlobal
FlagsLoadMem
FlagsStoreLocal
FlagsStoreGlobal
FlagsStoreMem
FlagsMayTrap
FlagsPseudo
FlagsBackend = 1 << 16
FlagsLoadMask = (FlagsLoadMem << 1) - 1
FlagsStoreMask = (FlagsStoreMem << 1) - 1
)
func (f Flags) CanMoveAfter(g Flags) bool {
return (((f&FlagsStoreMask)>>3)&g) == 0 && (((g&FlagsStoreMask)>>3)&f) == 0
}
type Expression struct {
Function *Function
IP int
Instr code.Instruction
Uses Uses
Flags Flags
}
const (
PseudoBoolConst = 0 + iota
PseudoI32ConvertBool
PseudoBackend = 128
)
func Pseudo(f *Function, opcode byte, immediate uint64, flags Flags, uses ...*Use) *Expression {
return &Expression{
Function: f,
Instr: code.Instruction{
Opcode: opcode,
Immediate: immediate,
},
Uses: uses,
Flags: flags | FlagsPseudo,
}
}
func BoolConst(x *Expression) bool {
return x.Instr.Immediate != 0
}
func (x *Expression) IsPseudo() bool {
return x.Flags&FlagsPseudo != 0
}
func (x *Expression) Format(f fmt.State, verb rune) {
x.Function.Formatter.FormatExpression(f, verb, x)
}
type Use struct {
Function *Function
Type wasm.ValueType
AllFlags Flags
Locals bitset.BitSet
Temp int
X *Expression
}
func UseExpression(type_ wasm.ValueType, x *Expression) *Use {
flags := x.Flags
for _, u := range x.Uses {
flags |= u.AllFlags
}
return &Use{
Function: x.Function,
Type: type_,
AllFlags: flags,
X: x,
}
}
func (u *Use) Format(f fmt.State, verb rune) {
u.Function.Formatter.FormatUse(f, verb, u)
}
func (u *Use) IsTemp() bool {
return u.X == nil
}
func (u *Use) CanMoveAfter(flags Flags, localStores bitset.BitSet) bool {
if u.IsTemp() {
return true
}
// If there is global or memory interference, no move is possible.
if !(u.AllFlags &^ FlagsLoadLocal).CanMoveAfter(flags &^ (FlagsLoadLocal | FlagsStoreLocal)) {
return false
}
// Otherwise, the use can be moved if the local sets do not intersect.
return u.Locals.IntersectionCardinality(&localStores) == 0
}
func (u *Use) IsConst() bool {
if !u.IsTemp() {
switch u.X.Instr.Opcode {
case code.OpI32Const, code.OpI64Const, code.OpF32Const, code.OpF64Const:
return true
}
}
return false
}
func (u *Use) IsZeroIConst() bool {
if !u.IsTemp() {
switch u.X.Instr.Opcode {
case code.OpI32Const:
return u.X.Instr.I32() == 0
case code.OpI64Const:
return u.X.Instr.I64() == 0
}
}
return false
}
type Uses []*Use
func (us Uses) Format(f fmt.State, verb rune) {
if len(us) != 0 {
us[0].Function.Formatter.FormatUses(f, verb, us)
}
}
type Block struct {
Entry *Expression
Else *Expression
End *Expression
Label int
StackHeight int
BranchTarget bool
Unreachable bool
NeverReachable bool
Ins []wasm.ValueType
Outs []wasm.ValueType
InTemp int
OutTemp int
}
type Def struct {
*Expression
Block *Block
BranchTargets []*Block
Types []wasm.ValueType
Temp int
}
func (d *Def) Format(f fmt.State, verb rune) {
d.Function.Formatter.FormatDef(f, verb, d)
}
type Formatter interface {
FormatExpression(f fmt.State, verb rune, x *Expression)
FormatUse(f fmt.State, verb rune, u *Use)
FormatUses(f fmt.State, verb rune, us Uses)
FormatDef(f fmt.State, verb rune, d *Def)
}
|
package tests_test
import (
"sigs.k8s.io/kustomize/k8sdeps/kunstruct"
"sigs.k8s.io/kustomize/k8sdeps/transformer"
"sigs.k8s.io/kustomize/pkg/fs"
"sigs.k8s.io/kustomize/pkg/loader"
"sigs.k8s.io/kustomize/pkg/resmap"
"sigs.k8s.io/kustomize/pkg/resource"
"sigs.k8s.io/kustomize/pkg/target"
"testing"
)
func writeKatibBase(th *KustTestHarness) {
th.writeF("/manifests/katib/base/katib-db-pvc.yaml", `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: katib-mysql
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
`)
th.writeF("/manifests/katib/base/katib-ui-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: katib-ui
labels:
component: ui
spec:
replicas: 1
template:
metadata:
name: katib-ui
labels:
component: ui
spec:
containers:
- name: katib-ui
image: gcr.io/kubeflow-images-public/katib/katib-ui:v0.1.2-alpha-156-g4ab3dbd
command:
- './katib-ui'
ports:
- name: ui
containerPort: 80
serviceAccountName: katib-ui
`)
th.writeF("/manifests/katib/base/katib-ui-rbac.yaml", `
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: katib-ui
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- "*"
- apiGroups:
- kubeflow.org
resources:
- studyjobs
verbs:
- "*"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: katib-ui
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: katib-ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: katib-ui
subjects:
- kind: ServiceAccount
name: katib-ui
`)
th.writeF("/manifests/katib/base/katib-ui-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: katib-ui
labels:
component: ui
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
name: ui
selector:
component: ui
`)
th.writeF("/manifests/katib/base/katib-ui-virtual-service.yaml", `
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: katib-ui
spec:
gateways:
- kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /katib/
rewrite:
uri: /katib/
route:
- destination:
host: katib-ui.$(namespace).svc.$(clusterDomain)
port:
number: 80
`)
th.writeF("/manifests/katib/base/metrics-collector-rbac.yaml", `
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: metrics-collector
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
- pods/status
verbs:
- "*"
- apiGroups:
- batch
resources:
- jobs
verbs:
- "*"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-collector
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: metrics-collector
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metrics-collector
subjects:
- kind: ServiceAccount
name: metrics-collector
`)
th.writeF("/manifests/katib/base/metrics-collector-template-configmap.yaml", `
apiVersion: v1
kind: ConfigMap
metadata:
name: metricscollector-template
data:
defaultMetricsCollectorTemplate.yaml : |-
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{.WorkerID}}
namespace: {{.NameSpace}}
spec:
schedule: "*/1 * * * *"
successfulJobsHistoryLimit: 0
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
spec:
serviceAccountName: metrics-collector
containers:
- name: {{.WorkerID}}
image: gcr.io/kubeflow-images-public/katib/metrics-collector:v0.1.2-alpha-156-g4ab3dbd
args:
- "./metricscollector"
- "-s"
- "{{.StudyID}}"
- "-t"
- "{{.TrialID}}"
- "-w"
- "{{.WorkerID}}"
- "-k"
- "{{.WorkerKind}}"
- "-n"
- "{{.NameSpace}}"
- "-m"
- "{{.ManagerSerivce}}"
restartPolicy: Never
`)
th.writeF("/manifests/katib/base/studyjob-controller-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: studyjob-controller
labels:
app: studyjob-controller
spec:
replicas: 1
selector:
matchLabels:
app: studyjob-controller
template:
metadata:
labels:
app: studyjob-controller
spec:
serviceAccountName: studyjob-controller
containers:
- name: studyjob-controller
image: gcr.io/kubeflow-images-public/katib/studyjob-controller:v0.1.2-alpha-156-g4ab3dbd
imagePullPolicy: Always
ports:
- containerPort: 443
name: validating
protocol: TCP
env:
- name: VIZIER_CORE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
`)
th.writeF("/manifests/katib/base/studyjob-crd.yaml", `
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: studyjobs.kubeflow.org
spec:
group: kubeflow.org
version: v1alpha1
scope: Namespaced
names:
kind: StudyJob
singular: studyjob
plural: studyjobs
additionalPrinterColumns:
- JSONPath: .status.condition
name: Condition
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
`)
th.writeF("/manifests/katib/base/studyjob-rbac.yaml", `
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: studyjob-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
- serviceaccounts
- services
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- pods/log
- pods/status
verbs:
- "*"
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- "*"
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- '*'
- apiGroups:
- kubeflow.org
resources:
- studyjobs
verbs:
- "*"
- apiGroups:
- kubeflow.org
resources:
- tfjobs
- pytorchjobs
verbs:
- "*"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: studyjob-controller
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: studyjob-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: studyjob-controller
subjects:
- kind: ServiceAccount
name: studyjob-controller
`)
th.writeF("/manifests/katib/base/studyjob-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: studyjob-controller
spec:
ports:
- port: 443
protocol: TCP
targetPort: 443
selector:
app: studyjob-controller
`)
th.writeF("/manifests/katib/base/suggestion-bayesianoptimization-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-suggestion-bayesianoptimization
labels:
component: suggestion-bayesianoptimization
spec:
replicas: 1
template:
metadata:
name: vizier-suggestion-bayesianoptimization
labels:
component: suggestion-bayesianoptimization
spec:
containers:
- name: vizier-suggestion-bayesianoptimization
image: gcr.io/kubeflow-images-public/katib/suggestion-bayesianoptimization:v0.1.2-alpha-156-g4ab3dbd
ports:
- name: api
containerPort: 6789
`)
th.writeF("/manifests/katib/base/suggestion-bayesianoptimization-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-suggestion-bayesianoptimization
labels:
component: suggestion-bayesianoptimization
spec:
type: ClusterIP
ports:
- port: 6789
protocol: TCP
name: api
selector:
component: suggestion-bayesianoptimization
`)
th.writeF("/manifests/katib/base/suggestion-grid-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-suggestion-grid
labels:
component: suggestion-grid
spec:
replicas: 1
template:
metadata:
name: vizier-suggestion-grid
labels:
component: suggestion-grid
spec:
containers:
- name: vizier-suggestion-grid
image: gcr.io/kubeflow-images-public/katib/suggestion-grid:v0.1.2-alpha-156-g4ab3dbd
ports:
- name: api
containerPort: 6789
`)
th.writeF("/manifests/katib/base/suggestion-grid-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-suggestion-grid
labels:
component: suggestion-grid
spec:
type: ClusterIP
ports:
- port: 6789
protocol: TCP
name: api
selector:
component: suggestion-grid
`)
th.writeF("/manifests/katib/base/suggestion-hyperband-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-suggestion-hyperband
labels:
component: suggestion-hyperband
spec:
replicas: 1
template:
metadata:
name: vizier-suggestion-hyperband
labels:
component: suggestion-hyperband
spec:
containers:
- name: vizier-suggestion-hyperband
image: gcr.io/kubeflow-images-public/katib/suggestion-hyperband:v0.1.2-alpha-156-g4ab3dbd
ports:
- name: api
containerPort: 6789
`)
th.writeF("/manifests/katib/base/suggestion-hyperband-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-suggestion-hyperband
labels:
component: suggestion-hyperband
spec:
type: ClusterIP
ports:
- port: 6789
protocol: TCP
name: api
selector:
component: suggestion-hyperband
`)
th.writeF("/manifests/katib/base/suggestion-nasrl-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-suggestion-nasrl
labels:
component: suggestion-nasrl
spec:
replicas: 1
template:
metadata:
name: vizier-suggestion-nasrl
labels:
component: suggestion-nasrl
spec:
containers:
- name: vizier-suggestion-nasrl
image: gcr.io/kubeflow-images-public/katib/suggestion-nasrl:v0.1.2-alpha-156-g4ab3dbd
ports:
- name: api
containerPort: 6789
`)
th.writeF("/manifests/katib/base/suggestion-nasrl-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-suggestion-nasrl
labels:
component: suggestion-nasrl
spec:
type: ClusterIP
ports:
- port: 6789
protocol: TCP
name: api
selector:
component: suggestion-nasrl
`)
th.writeF("/manifests/katib/base/suggestion-random-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-suggestion-random
labels:
component: suggestion-random
spec:
replicas: 1
template:
metadata:
name: vizier-suggestion-random
labels:
component: suggestion-random
spec:
containers:
- name: vizier-suggestion-random
image: gcr.io/kubeflow-images-public/katib/suggestion-random:v0.1.2-alpha-156-g4ab3dbd
ports:
- name: api
containerPort: 6789
`)
th.writeF("/manifests/katib/base/suggestion-random-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-suggestion-random
labels:
component: suggestion-random
spec:
type: ClusterIP
ports:
- port: 6789
protocol: TCP
name: api
selector:
component: suggestion-random
`)
th.writeF("/manifests/katib/base/vizier-core-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-core
labels:
component: core
spec:
replicas: 1
template:
metadata:
name: vizier-core
labels:
component: core
spec:
serviceAccountName: vizier-core
containers:
- name: vizier-core
image: gcr.io/kubeflow-images-public/katib/vizier-core:v0.1.2-alpha-156-g4ab3dbd
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: vizier-db-secrets
key: MYSQL_ROOT_PASSWORD
command:
- './vizier-manager'
ports:
- name: api
containerPort: 6789
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:6789"]
initialDelaySeconds: 5
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:6789"]
initialDelaySeconds: 10
`)
th.writeF("/manifests/katib/base/vizier-core-rbac.yaml", `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: vizier-core
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vizier-core
subjects:
- kind: ServiceAccount
name: vizier-core
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: vizier-core
rules:
- apiGroups: [""]
resources: ["pods", "nodes", "nodes/*", "pods/log", "pods/status", "services", "persistentvolumes", "persistentvolumes/status","persistentvolumeclaims","persistentvolumeclaims/status"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "jobs/status"]
verbs: ["*"]
- apiGroups: ["extensions"]
verbs: ["*"]
resources: ["ingresses","ingresses/status","deployments","deployments/status"]
- apiGroups: [""]
verbs: ["*"]
resources: ["services"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vizier-core
`)
th.writeF("/manifests/katib/base/vizier-core-rest-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-core-rest
labels:
component: core-rest
spec:
replicas: 1
template:
metadata:
name: vizier-core-rest
labels:
component: core-rest
spec:
containers:
- name: vizier-core-rest
image: gcr.io/kubeflow-images-public/katib/vizier-core-rest:v0.1.2-alpha-156-g4ab3dbd
command:
- './vizier-manager-rest'
ports:
- name: api
containerPort: 80
`)
th.writeF("/manifests/katib/base/vizier-core-rest-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-core-rest
labels:
component: core-rest
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
name: api
selector:
component: core-rest
`)
th.writeF("/manifests/katib/base/vizier-core-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-core
labels:
component: core
spec:
type: NodePort
ports:
- port: 6789
protocol: TCP
nodePort: 30678
name: api
selector:
component: core
`)
th.writeF("/manifests/katib/base/vizier-db-deployment.yaml", `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vizier-db
labels:
component: db
spec:
replicas: 1
template:
metadata:
name: vizier-db
labels:
component: db
spec:
containers:
- name: vizier-db
image: mysql:8.0.3
args:
- --datadir
- /var/lib/mysql/datadir
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: vizier-db-secrets
key: MYSQL_ROOT_PASSWORD
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "true"
- name: MYSQL_DATABASE
value: "vizier"
ports:
- name: dbapi
containerPort: 3306
readinessProbe:
exec:
command:
- "/bin/bash"
- "-c"
- "mysql -D $$MYSQL_DATABASE -p$$MYSQL_ROOT_PASSWORD -e 'SELECT 1'"
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
volumeMounts:
- name: katib-mysql
mountPath: /var/lib/mysql
volumes:
- name: katib-mysql
persistentVolumeClaim:
claimName: katib-mysql
`)
th.writeF("/manifests/katib/base/vizier-db-secret.yaml", `
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: vizier-db-secrets
data:
MYSQL_ROOT_PASSWORD: dGVzdA== # "test"
`)
th.writeF("/manifests/katib/base/vizier-db-service.yaml", `
apiVersion: v1
kind: Service
metadata:
name: vizier-db
labels:
component: db
spec:
type: ClusterIP
ports:
- port: 3306
protocol: TCP
name: dbapi
selector:
component: db
`)
th.writeF("/manifests/katib/base/worker-template.yaml", `
apiVersion: v1
kind: ConfigMap
metadata:
name: worker-template
data:
defaultWorkerTemplate.yaml : |-
apiVersion: batch/v1
kind: Job
metadata:
name: {{.WorkerID}}
namespace: {{.NameSpace}}
spec:
template:
spec:
containers:
- name: {{.WorkerID}}
image: alpine
restartPolicy: Never
`)
th.writeF("/manifests/katib/base/params.yaml", `
varReference:
- path: spec/http/route/destination/host
kind: VirtualService
`)
th.writeF("/manifests/katib/base/params.env", `
clusterDomain=cluster.local
`)
th.writeK("/manifests/katib/base", `
namespace: kubeflow
resources:
- katib-db-pvc.yaml
- katib-ui-deployment.yaml
- katib-ui-rbac.yaml
- katib-ui-service.yaml
- katib-ui-virtual-service.yaml
- metrics-collector-rbac.yaml
- metrics-collector-template-configmap.yaml
- studyjob-controller-deployment.yaml
- studyjob-crd.yaml
- studyjob-rbac.yaml
- studyjob-service.yaml
- suggestion-bayesianoptimization-deployment.yaml
- suggestion-bayesianoptimization-service.yaml
- suggestion-grid-deployment.yaml
- suggestion-grid-service.yaml
- suggestion-hyperband-deployment.yaml
- suggestion-hyperband-service.yaml
- suggestion-nasrl-deployment.yaml
- suggestion-nasrl-service.yaml
- suggestion-random-deployment.yaml
- suggestion-random-service.yaml
- vizier-core-deployment.yaml
- vizier-core-rbac.yaml
- vizier-core-rest-deployment.yaml
- vizier-core-rest-service.yaml
- vizier-core-service.yaml
- vizier-db-deployment.yaml
- vizier-db-secret.yaml
- vizier-db-service.yaml
- worker-template.yaml
configMapGenerator:
- name: parameters
env: params.env
generatorOptions:
disableNameSuffixHash: true
images:
- name: gcr.io/kubeflow-images-public/katib/vizier-core
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/suggestion-hyperband
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/katib-ui
newTag: v0.1.2-alpha-157-g3d4cd04
- name: mysql
newTag: 8.0.3
- name: gcr.io/kubeflow-images-public/katib/suggestion-bayesianoptimization
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/suggestion-grid
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/vizier-core-rest
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/metrics-collector
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/studyjob-controller
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/suggestion-random
newTag: v0.1.2-alpha-157-g3d4cd04
- name: gcr.io/kubeflow-images-public/katib/suggestion-nasrl
newTag: v0.1.2-alpha-157-g3d4cd04
vars:
- name: clusterDomain
objref:
kind: ConfigMap
name: parameters
apiVersion: v1
fieldref:
fieldpath: data.clusterDomain
- name: namespace
objref:
kind: Service
name: katib-ui
apiVersion: v1
fieldref:
fieldpath: metadata.namespace
configurations:
- params.yaml
`)
}
func TestKatibBase(t *testing.T) {
th := NewKustTestHarness(t, "/manifests/katib/base")
writeKatibBase(th)
m, err := th.makeKustTarget().MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
targetPath := "../katib/base"
fsys := fs.MakeRealFS()
_loader, loaderErr := loader.NewLoader(targetPath, fsys)
if loaderErr != nil {
t.Fatalf("could not load kustomize loader: %v", loaderErr)
}
rf := resmap.NewFactory(resource.NewFactory(kunstruct.NewKunstructuredFactoryImpl()))
kt, err := target.NewKustTarget(_loader, rf, transformer.NewFactoryImpl())
if err != nil {
th.t.Fatalf("Unexpected construction error %v", err)
}
n, err := kt.MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
expected, err := n.EncodeAsYaml()
th.assertActualEqualsExpected(m, string(expected))
}
|
package code
import (
"github.com/spf13/viper"
"os"
"strings"
)
/*
*@Author Administrator
*@Date 9/4/2021 12:17
*@desc
*/
func RouterFile(SelectTableName string) {
f2 := new(FileNameChange)
model := f2.Case2Camel(SelectTableName)
router := f2.Lcfirst(model)
all := strings.ReplaceAll(routerTemp, "{{model}}", model)
path := viper.GetString("temp.path")
modepath := viper.GetString("temp.modepath")
routerpath := viper.GetString("temp.routerpath")
routerpath = path + "/" + routerpath
CreateMutiDir(routerpath)
modelFile := routerpath + "/" + SelectTableName + ".go"
all = strings.ReplaceAll(all, "{{path}}", modepath+"/"+path)
all = strings.ReplaceAll(all, "{{table}}", SelectTableName)
all = strings.ReplaceAll(all, "{{router}}", router)
f, _ := os.Create(modelFile)
f.Write([]byte(all))
}
|
package ssss
import (
"fmt"
)
import ()
type Result struct {
Code int `json:"code" xml:"code"` //0为成功,其它值为错误码
Message string `json:"message,omitempty" xml:"message,omitempty"`
Info interface{} `json:"info,omitempty" xml:"info,omitempty"` //具体结果数据, 只有当code为0时,才设置此属性值
}
func NewErrorResult(code int, msgs ...interface{}) *Result {
if len(msgs) > 0 {
return &Result{Code: code, Message: fmt.Sprint(msgs...)}
}
return &Result{Code: code}
}
func NewSucceedResult(info interface{}) *Result {
return &Result{Code: 0, Info: info}
}
//将错误转换为Result
func convertErrorResult(err interface{}) *Result {
switch e := err.(type) {
case *Result:
return e
case Result:
return &e
case error:
return NewErrorResult(ERROR_CODE_RUNTIME, e.Error())
}
if err != nil {
return NewErrorResult(ERROR_CODE_RUNTIME, fmt.Sprint(err))
}
return NewErrorResult(ERROR_CODE_RUNTIME, "运行时异常")
}
|
package service
import (
"fmt"
"github.com/google/uuid"
"io/ioutil"
"net/http"
"os"
"time"
"verification-service/dto"
"verification-service/model"
"verification-service/repository"
)
type VerificationService struct {
VerificationRepository *repository.VerificationRepository
}
func (handler *VerificationService) AddVerificationRequest(request *model.VerificationRequest) error {
request.DateSubmitted = time.Now()
request.IsAnswered = false
request.Id = uuid.New()
err := handler.VerificationRepository.AddRequest(request)
if err != nil {
return err
}
return nil
}
func (handler *VerificationService) GetAllUnAnsweredRequests() ( []model.VerificationRequest ,error) {
requests, err := handler.VerificationRepository.GetAllUnAnsweredRequests()
if err != nil{
return nil, err
}
for i, request := range requests {
b, err := ioutil.ReadFile(request.Path)
if err != nil {
fmt.Print(err)
}
requests[i].Image = b
}
return requests, nil
}
func (handler *VerificationService) AnswerRequest(requestDTO *dto.VerificationAnswerDTO, token string) error {
uid, err := uuid.Parse(requestDTO.Id)
if err != nil {
return nil
}
request, err := handler.VerificationRepository.GetVerificationRequestById(uid)
if err != nil{
return err
}
// TODO send notification?
request.DateAnswered = time.Now()
request.IsAnswered = true
request.VerificationAnswer = requestDTO.VerificationAnswer
if requestDTO.VerificationAnswer {
client := &http.Client{}
// TODO put env
requestUrl := fmt.Sprintf("http://%s:%s/verify/", os.Getenv("USER_SERVICE_DOMAIN"), os.Getenv("USER_SERVICE_PORT"))
req, err := http.NewRequest(http.MethodPut, requestUrl + request.Username, nil)
if err != nil {
panic(err)
}
req.Header.Set("Host", "http://verification-service:8080")
req.Header.Set("Authorization", token)
_, err = client.Do(req)
if err != nil {
return err
}
}else{
request.Answer = requestDTO.Answer
}
err = handler.VerificationRepository.UpdateVerificationRequest(request)
if err != nil{
return err
}
return nil
}
|
// Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controlplane
import (
"errors"
"io/ioutil"
"os"
"github.com/banzaicloud/banzai-cli/internal/cli/utils"
"github.com/goph/emperror"
log "github.com/sirupsen/logrus"
)
const valuesDefault = "values.yaml"
// copyKubeconfig copies current Kubeconfig to the named file to a place where it is more likely that it can be mounted to DfM
func copyKubeconfig(kubeconfigName string) error {
kubeconfigSource := os.Getenv("KUBECONFIG")
if kubeconfigSource == "" {
kubeconfigSource = os.Getenv("HOME") + "/.kube/config"
}
kubeconfigContent, err := ioutil.ReadFile(kubeconfigSource)
if err != nil {
return emperror.With(emperror.Wrapf(err, "failed to read kubeconfig %q", kubeconfigSource), "path", kubeconfigSource)
}
config := map[string]interface{}{}
if err := utils.Unmarshal(kubeconfigContent, &config); err != nil {
return emperror.Wrapf(err, "failed to parse kubeconfig %q", kubeconfigSource)
}
currentContext := config["current-context"]
if currentContext == nil {
return errors.New("can't find current context in kubeconfig")
}
log.Infof("Current Kubernetes context: %s", currentContext)
if err := ioutil.WriteFile(kubeconfigName, kubeconfigContent, 0600); err != nil {
return emperror.With(emperror.Wrapf(err, "failed to write temporary file %q", kubeconfigName), "path", kubeconfigName)
}
return nil
}
|
package constants
// Version number string.
const Version = "0.0.1"
|
package grpcx_test
import (
"context"
"errors"
"fmt"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/test/bufconn"
"github.com/socialpoint-labs/bsk/grpcx"
)
func ExampleDaemon_Run() {
server := exampleServer()
lis := bufconn.Listen(1024 * 1024)
defer lis.Close()
ctx, cancel := context.WithCancel(context.Background())
app := exampleApplication{}
dae := grpcx.NewDaemon(server, lis, grpcx.WithApplications(app))
go dae.Run(ctx)
exampleCall(ctx, lis)
cancel()
// Output: example rpcs registered
// example application run
// /test.service/test.call rpc handled!
// got response from server: bye
}
func exampleServer() *grpc.Server {
encoding.RegisterCodec(noopCodec{})
return grpc.NewServer(grpc.UnknownServiceHandler(exampleServiceHandler))
}
func exampleServiceHandler(srv interface{}, stream grpc.ServerStream) error {
fullMethodName, ok := grpc.MethodFromServerStream(stream)
if !ok {
return errors.New("could not determine method from server stream")
}
fmt.Println(fullMethodName, "rpc handled!")
return stream.SendMsg([]byte("bye"))
}
func exampleCall(ctx context.Context, lis *bufconn.Listener) {
cc, err := grpc.DialContext(
ctx, "",
grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return lis.Dial()
}),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.CustomCodecCallOption{Codec: noopCodec{}}),
)
if err != nil {
panic(err)
}
defer cc.Close()
var out []byte
err = cc.Invoke(ctx, "/test.service/test.call", nil, &out)
if err != nil {
panic(err)
}
fmt.Println("got response from server:", string(out))
}
type exampleApplication struct {
}
func (e exampleApplication) Run(ctx context.Context) {
fmt.Println("example application run")
}
func (e exampleApplication) RegisterGRPC(registrar grpc.ServiceRegistrar) {
fmt.Println("example rpcs registered")
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grid
import "github.com/google/gapid/test/robot/web/client/dom"
// Icons holds the characters to use to draw the icons using the icons font.
type Icons struct {
Succeeded rune
Failed rune
Unknown rune
}
// Style holds parameters to style the grid.
type Style struct {
GridPadding float64 // Padding in pixels from the top-left of the canvas.
CellSize float64 // Width and height in pixels of each cell.
CellShadowColor dom.Color // The color of the shadow of a raised cell / header.
HeaderFont dom.Font // The header font.
HeaderFontColor dom.Color // The header font color.
GridLineColor dom.Color // The line color for the grid.
GridLineWidth float64 // The line width for the grid.
BackgroundColor dom.Color // The regular background color of cells and headers.
CurrentSucceededBackgroundColor dom.Color // The background color used for tasks that have succeeded and are current.
CurrentSucceededForegroundColor dom.Color // The foreground color used for tasks that have succeeded and are current.
StaleSucceededBackgroundColor dom.Color // The background color used for tasks that have succeeded and are stale.
StaleSucceededForegroundColor dom.Color // The foreground color used for tasks that have succeeded and are stale.
CurrentFailedBackgroundColor dom.Color // The background color used for tasks that have failed and are current.
CurrentFailedForegroundColor dom.Color // The foreground color used for tasks that have failed and are current.
StaleFailedBackgroundColor dom.Color // The background color used for tasks that have failed and are stale.
StaleFailedForegroundColor dom.Color // The foreground color used for tasks that have failed and are stale.
InProgressForegroundColor dom.Color // The foreground color used for tasks that last failed and are currently in progress.
RegressedForegroundColor dom.Color // The foreground color used for tasks that last succeeded and now are failing.
FixedForegroundColor dom.Color // The foreground color used for tasks that last failed and now are succeeding.
UnknownBackgroundColor dom.Color // The background color used for tasks that are in an unknown state.
UnknownForegroundColor dom.Color // The foreground color used for tasks that are in an unknown state.
StaleUnknownForegroundColor dom.Color // The foreground color used for tasks that are in an unknown state and are stale.
SelectedBackgroundColor dom.Color // The background color used for cells and headers when selected.
IconsFont dom.Font // The font to use for icon drawing.
Icons Icons // The character table used for icon drawing.
}
func (s *Style) statsStyle(stats taskStats) (icon rune, backgroundColor, foregroundColor dom.Color) {
switch {
case stats.numFailedWasSucceeded > 0:
return s.Icons.Failed, s.CurrentFailedBackgroundColor, s.RegressedForegroundColor
case stats.numCurrentFailed > 0:
return s.Icons.Failed, s.CurrentFailedBackgroundColor, s.CurrentFailedForegroundColor
case stats.numSucceededWasFailed > 0:
return s.Icons.Succeeded, s.CurrentSucceededBackgroundColor, s.FixedForegroundColor
case stats.numInProgressWasFailed+stats.numStaleFailed > 0:
return s.Icons.Failed, s.StaleFailedBackgroundColor, s.StaleFailedForegroundColor
case stats.numInProgressWasSucceeded+stats.numStaleSucceeded > 0:
return s.Icons.Succeeded, s.StaleSucceededBackgroundColor, s.StaleSucceededForegroundColor
case stats.numCurrentSucceeded > 0:
return s.Icons.Succeeded, s.CurrentSucceededBackgroundColor, s.CurrentSucceededForegroundColor
case stats.numInProgressWasUnknown+stats.numStaleUnknown > 0:
return s.Icons.Unknown, s.UnknownBackgroundColor, s.UnknownForegroundColor
case stats.numStaleUnknown > 0:
return s.Icons.Unknown, s.UnknownBackgroundColor, s.StaleUnknownForegroundColor
default:
return s.Icons.Unknown, s.BackgroundColor, s.UnknownForegroundColor
}
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// CompartmentType is documented here http://hl7.org/fhir/ValueSet/compartment-type
type CompartmentType int
const (
CompartmentTypePatient CompartmentType = iota
CompartmentTypeEncounter
CompartmentTypeRelatedPerson
CompartmentTypePractitioner
CompartmentTypeDevice
)
func (code CompartmentType) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *CompartmentType) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "Patient":
*code = CompartmentTypePatient
case "Encounter":
*code = CompartmentTypeEncounter
case "RelatedPerson":
*code = CompartmentTypeRelatedPerson
case "Practitioner":
*code = CompartmentTypePractitioner
case "Device":
*code = CompartmentTypeDevice
default:
return fmt.Errorf("unknown CompartmentType code `%s`", s)
}
return nil
}
func (code CompartmentType) String() string {
return code.Code()
}
func (code CompartmentType) Code() string {
switch code {
case CompartmentTypePatient:
return "Patient"
case CompartmentTypeEncounter:
return "Encounter"
case CompartmentTypeRelatedPerson:
return "RelatedPerson"
case CompartmentTypePractitioner:
return "Practitioner"
case CompartmentTypeDevice:
return "Device"
}
return "<unknown>"
}
func (code CompartmentType) Display() string {
switch code {
case CompartmentTypePatient:
return "Patient"
case CompartmentTypeEncounter:
return "Encounter"
case CompartmentTypeRelatedPerson:
return "RelatedPerson"
case CompartmentTypePractitioner:
return "Practitioner"
case CompartmentTypeDevice:
return "Device"
}
return "<unknown>"
}
func (code CompartmentType) Definition() string {
switch code {
case CompartmentTypePatient:
return "The compartment definition is for the patient compartment."
case CompartmentTypeEncounter:
return "The compartment definition is for the encounter compartment."
case CompartmentTypeRelatedPerson:
return "The compartment definition is for the related-person compartment."
case CompartmentTypePractitioner:
return "The compartment definition is for the practitioner compartment."
case CompartmentTypeDevice:
return "The compartment definition is for the device compartment."
}
return "<unknown>"
}
|
package utils
import (
"fmt"
"testing"
)
func TestDoubleAverage(t *testing.T) {
testDoubleAverage(10, 10000)
type args struct {
count int64
amount int64
}
tests := []struct {
name string
args args
want int64
}{
{
name:"1",
args:args{10, 10000},
want:10000,
},
{
name:"2",
args:args{10, 10000},
want:10000,
},
{
name:"3",
args:args{10, 10000},
want:10000,
},
{
name:"4",
args:args{10, 10000},
want:10000,
},
{
name:"1",
args:args{1, 10000},
want:10001,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := DoubleAverage(tt.args.count, tt.args.amount); got > tt.want {
t.Errorf("DoubleAverage() = %v, want %v", got, tt.want)
}
})
}
}
func testDoubleAverage(count, amount int64) {
sum := int64(0)
for i := int64(0); i < count; i++ {
x := DoubleAverage(count - i, amount)
fmt.Printf("用户%d获得了红包为%f元\n", i + 1, float64(x)/float64(100))
sum += x
amount -= x
}
fmt.Printf("合计发红包为%d分",sum)
} |
package viz
import (
"errors"
)
type DB interface {
Init(Config) error
Get(int, int) (string, error)
Lines() (int, error)
Update() error
Watch(chan bool, chan bool) error
}
// init nil DB
var db DB
func InitDB() error {
switch config.DB {
case "csv":
if config.CSV.File == "" {
return errors.New("Field \"csv\" must be path to CSV file, but it is not defined")
}
config.Format = config.CSV.Format
db = &CSV{}
db.Init(config)
}
return nil
}
|
package rtq
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
func (rt *rtQ) processMessage(msg Message, rawData []byte) error {
start := time.Now()
defer rt.cfg.Pmx.ProcessingTime.Observe(float64(time.Since(start).Seconds()))
// all data is json
payload := make(map[string]interface{})
err := json.Unmarshal(rawData, &payload)
if err != nil {
// increment metric msg_errors
rt.cfg.Pmx.MsgError.Inc()
return errors.New(fmt.Sprintf("could not unmarshal json: %s", rawData))
}
msg.Payload = payload
// write the message
err = rt.QWrite(msg)
if err != nil {
// increment metric msg_errors
rt.cfg.Pmx.MsgError.Inc()
return errors.New(fmt.Sprintf("failed to write message: %s", err.Error()))
}
return nil
}
// RxRouteHandler handles the http route for inbound data
func (rt *rtQ) RxRouteHandler(c *gin.Context) {
start := time.Now()
defer rt.cfg.Pmx.ResponseTime.Observe(float64(time.Since(start).Seconds()))
rawData, err := c.GetRawData()
if err != nil {
rt.cfg.Logger.Error("Payload error", zap.Error(err))
c.JSON(500, gin.H{
"status": "FAIL",
"message": err.Error(),
})
return
}
err = rt.processMessage(Message{
Producer: c.Param("producer"),
Key: c.Param("key"),
Label: c.Param("label"),
}, rawData)
if err != nil {
rt.cfg.Logger.Error("Message processing er, or", zap.Error(err))
c.JSON(500, gin.H{
"status": "FAIL",
"message": err.Error(),
})
return
}
c.JSON(200, gin.H{
"status": "OK",
})
}
func (rt *rtQ) RxRouteHandlerAsync(c *gin.Context) {
start := time.Now()
rawData, err := c.GetRawData()
if err != nil {
rt.cfg.Logger.Error("Payload error", zap.Error(err))
c.JSON(500, gin.H{
"status": "FAIL",
"message": err.Error(),
})
}
msg := Message{
Producer: c.Param("producer"),
Key: c.Param("key"),
Label: c.Param("label"),
}
go func(msg Message, rawData []byte) {
err := rt.processMessage(msg, rawData)
if err != nil {
rt.cfg.Logger.Error("Message processing error", zap.Error(err))
c.JSON(500, gin.H{
"status": "FAIL",
"message": err.Error(),
})
}
}(msg, rawData)
rt.cfg.Pmx.ResponseTimeAsync.Observe(float64(time.Since(start).Seconds()))
c.JSON(200, gin.H{
"status": "OK",
})
}
|
package main
import (
"database/sql"
)
func connect(url string) (*sql.DB, error) {
database, err := sql.Open("postgres", url)
if err != nil {
return database, err
}
return database, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.