text stringlengths 11 4.05M |
|---|
package main
import (
"crypto/sha256"
b64 "encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"sort"
"time"
)
//Item contains a news item
type Item struct {
Title string `xml:"title"`
Link string `xml:"link"`
Desc string `xml:"description"`
PubDate string `xml:"pubDate"`
Key string `xml:"guid"`
}
//Items contains a slice of pointers to a news item.
type Items []*Item
//Channel contains an RSS feed in its entirety.
type Channel struct {
Title string `xml:"title"`
Link string `xml:"link"`
Desc string `xml:"description"`
Items []Item `xml:"item"`
}
type Rss struct {
Channel Channel `xml:"channel"`
}
//ChannelReader visits source website and returns a list of news items.
type ChannelReader func(s string) Items
type Reader struct {
sources []string
channelReader ChannelReader
items Items
}
func NewReader(sources []string, channelReader ChannelReader) Reader {
return Reader{
sources: sources,
channelReader: channelReader,
items: Items{},
}
}
func (r *Reader) Read() {
for _, v := range r.sources {
r.items = append(r.items, r.channelReader(v)...)
}
// additional processing to ensure date order, and a unique retrievable reference.
r.items.dateOrderedItems()
r.items.hashItemKey()
}
// RSSReader (the default implementation) will visit the provided URL and decode XML into a RSS Channel Struct.
// then convert to a slice of actual news items.
func RSSReader(address string) Items {
var allItems Items
rss := Rss{}
resp, err := http.Get(address)
if err != nil {
fmt.Printf("Error GET: %v\n", err)
return nil
}
defer resp.Body.Close()
decoder := xml.NewDecoder(resp.Body)
err = decoder.Decode(&rss)
if err != nil {
fmt.Printf("Error Decode: %v\n", err)
return nil
}
for k := range rss.Channel.Items {
allItems = append(allItems, &rss.Channel.Items[k])
}
return allItems
}
// dateOrderedItems returns a date desc slice of news items.
func (items Items) dateOrderedItems() {
dateFormat := "Mon, 02 Jan 2006 15:04:05 GMT"
alternativeFormat := "Mon, 02 Jan 2006 15:04:05 Z0700"
sort.Slice(items, func(i, j int) bool {
iTime, err := time.Parse(dateFormat, items[i].PubDate)
if err != nil {
iTime, _ = time.Parse(alternativeFormat, items[i].PubDate)
}
jTime, err := time.Parse(dateFormat, items[j].PubDate)
if err != nil {
jTime, _ = time.Parse(alternativeFormat, items[j].PubDate)
}
return iTime.After(jTime)
})
}
//hashItemKey converts news item unique reference to a hash.
func (items Items) hashItemKey() {
for _, v := range items {
shaBytes := sha256.Sum256([]byte(v.Key))
v.Key = b64.URLEncoding.EncodeToString(shaBytes[:])
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wilco
import (
"context"
"regexp"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/common/servo"
"chromiumos/tast/errors"
"chromiumos/tast/rpc"
pb "chromiumos/tast/services/cros/wilco"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: DrallionTabletPower,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies power button behavior on Drallion 360 devices in tablet mode",
Contacts: []string{
"mwiitala@google.com", // Author
"tast-owners@google.com",
},
SoftwareDeps: []string{"wilco", "chrome"},
ServiceDeps: []string{"tast.cros.wilco.PowerMenuService"},
// On Drallion360, the power button is on the keyboard rather than the side
// of the device. To account for this, the power button behaves differently
// on drallion360 devices when in tablet mode and requires a separate test.
HardwareDeps: hwdep.D(hwdep.Model("drallion360")),
// TODO(mwiitala): Restore attributes after fixing http://b/149035007
// Attr: []string{ "group:mainline", "informational"},
Vars: []string{"servo"},
})
}
func DrallionTabletPower(ctx context.Context, s *testing.State) {
d := s.DUT()
readBootID := func(ctx context.Context) (string, error) {
out, err := d.Conn().CommandContext(ctx, "cat", "/proc/sys/kernel/random/boot_id").Output()
if err != nil {
return "", errors.Wrap(err, "error reading boot id")
}
return strings.TrimSpace(string(out)), nil
}
// This is expected to fail in VMs, since Servo is unusable there and the "servo" var won't
// be supplied. https://crbug.com/967901 tracks finding a way to skip tests when needed.
servoSpec, _ := s.Var("servo")
pxy, err := servo.NewProxy(ctx, servoSpec, d.KeyFile(), d.KeyDir())
if err != nil {
s.Fatal("Failed to connect to servo: ", err)
}
defer pxy.Close(ctx)
// Connect to the gRPC server on the DUT.
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC: ", err)
}
defer cl.Close(ctx)
powerMenuService := pb.NewPowerMenuServiceClient(cl.Conn)
// Get initial boot ID
initID, err := readBootID(ctx)
if err != nil {
s.Fatal("Failed to read boot ID: ", err)
}
s.Logf("Initial boot ID: %s", initID)
// Get the initial tablet_mode_angle settings to set back at end of test
re := regexp.MustCompile(`tablet_mode_angle=(\d+) hys=(\d+)`)
out, err := d.Conn().CommandContext(ctx, "ectool", "--name=cros_ish", "motionsense", "tablet_mode_angle").Output()
if err != nil {
s.Fatal("Failed to retreive tablet_mode_angle settings: ", err)
}
initLidAngle := re.FindStringSubmatch(string(out))
if len(initLidAngle) != 3 {
s.Fatal("Failed to get initial tablet_mode_angle settings")
}
s.Logf("Initial settings: lid_angle=%s hys=%s", initLidAngle[1], initLidAngle[2])
// Restore tablet_mode_angle settings before returning
defer d.Conn().CommandContext(ctx, "ectool", "--name=cros_ish", "motionsense", "tablet_mode_angle", initLidAngle[1], initLidAngle[2]).Run()
setTabletMode := func(tabletMode bool) error {
// Setting tabletModeAngle to 0 will force the DUT into tablet mode
tabletModeAngle := "0"
mode := "tablet"
if !tabletMode {
// Setting tabletModeAngle to 360 will force the DUT into clamshell mode
tabletModeAngle = "360"
mode = "clamshell"
}
// Use servo to set tablet_mode_angle
out, err = d.Conn().CommandContext(ctx, "ectool", "--name=cros_ish", "motionsense", "tablet_mode_angle", tabletModeAngle, "0").Output()
if err != nil {
return errors.Wrap(err, "failed to set tablet_mode_angle")
}
s.Logf("Put DUT into %s mode", mode)
return nil
}
// Press power key for pressDuration seconds and verify DUT reboots as expected
testCaseReboot := func(pressDuration string) error {
// Restarting Chrome clears the power down menu if already present
if _, err = powerMenuService.NewChrome(ctx, &empty.Empty{}); err != nil {
return errors.Wrap(err, "failed to create new chrome instance")
}
// Close chrome instance before rebooting
if _, err = powerMenuService.CloseChrome(ctx, &empty.Empty{}); err != nil {
return errors.Wrap(err, "failed to close chrome instance")
}
// Use servo to hold down power button
s.Logf("Pressing power key for %s seconds", pressDuration)
if err = pxy.Servo().SetString(ctx, "power_key", pressDuration); err != nil {
return errors.Wrap(err, "error pressing the power button")
}
waitUnreachableCtx, cancelUnreachable := context.WithTimeout(ctx, 30*time.Second)
defer cancelUnreachable()
// Wait for DUT to power off as expected
s.Log("Waiting for DUT to power OFF")
if err = d.WaitUnreachable(waitUnreachableCtx); err != nil {
return errors.New("DUT did not power down after power key press > 8 seconds")
}
// Use servo to power DUT on
s.Log("Sending power key press to turn DUT back on")
if err = pxy.Servo().SetString(ctx, "power_state", "on"); err != nil {
return errors.Wrap(err, "failed to send power key press")
}
s.Log("Waiting for DUT to power ON")
waitConnectCtx, cancelWaitConnect := context.WithTimeout(ctx, 30*time.Second)
defer cancelWaitConnect()
// Wait for DUT to reboot and reconnect
if err = d.WaitConnect(waitConnectCtx); err != nil {
return errors.Wrap(err, "failed to reconnect to DUT")
}
// Verify that DUT rebooted
curID, err := readBootID(ctx)
if err != nil {
return errors.Wrap(err, "failed to read boot ID")
}
if curID == initID {
return errors.Errorf("DUT failed to reboot after power key press of %s seconds", pressDuration)
}
// Update initID for following test cases
initID = curID
// Reconnect to the gRPC server on the DUT for following test cases
cl, err = rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
return errors.Wrap(err, "failed to connect to the RPC")
}
powerMenuService = pb.NewPowerMenuServiceClient(cl.Conn)
return nil
}
// Press power key for pressDuration seconds, check that power menu only appears if expected, confirm DUT did not reboot
testCaseNoReboot := func(pressDuration string, menuExpected bool) error {
// Chrome instance is necessary to check for the presence of the power menu
if _, err = powerMenuService.NewChrome(ctx, &empty.Empty{}); err != nil {
return errors.Wrap(err, "failed to create new chrome instance")
}
defer powerMenuService.CloseChrome(ctx, &empty.Empty{})
// Use servo to hold down power button
s.Logf("Pressing power key for %s seconds", pressDuration)
if err = pxy.Servo().SetString(ctx, "power_key", pressDuration); err != nil {
return errors.Wrap(err, "error pressing the power button")
}
// Verify that power down menu is only present when expected
res, err := powerMenuService.IsPowerMenuPresent(ctx, &empty.Empty{})
if err != nil {
return errors.Wrap(err, "RPC call failed")
}
if res.IsMenuPresent != menuExpected {
return errors.Errorf("Power menu did not behave as expected after pressing power key for %s seconds", pressDuration)
}
// Verify that DUT did not reboot
curID, err := readBootID(ctx)
if err != nil {
return errors.Wrap(err, "failed to read boot ID")
}
if curID != initID {
return errors.Errorf("DUT rebooted after power key press of %s seconds", pressDuration)
}
return nil
}
/* Iterate over test cases and verify expected behavior:
Clamshell mode: Power menu appears after power key press of any duration
Tablet mode: Power menu only appears after power key press > 1.5 seconds
DUT powers down after power key press > 8.0 seconds in both modes
*/
for _, tc := range []struct {
tabletMode bool
pressDuration string
menuExpected bool
rebootExpected bool
}{
{false, "0.5", true, false},
{true, "0.5", false, false},
{false, "2.0", true, false},
{true, "2.0", true, false},
{false, "8.5", true, true},
{true, "8.5", true, true},
} {
// Use servo to force DUT into tablet or clamshell mode
if err := setTabletMode(tc.tabletMode); err != nil {
s.Fatal("Failed to set tablet mode angel: ", err)
}
// Verify test case expectations
if !tc.rebootExpected {
err = testCaseNoReboot(tc.pressDuration, tc.menuExpected)
} else {
err = testCaseReboot(tc.pressDuration)
}
if err != nil {
s.Fatalf("Failed on test case with tabletMode=%t, pressDuration=%s, menuExpected=%t, rebootExpected=%t: %v",
tc.tabletMode, tc.pressDuration, tc.menuExpected, tc.rebootExpected, err)
}
}
}
|
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"encoding/json"
"expvar"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
)
var (
dnsMu sync.Mutex
dnsCache = map[string][]net.IP{}
)
var bootstrapDNSRequests = expvar.NewInt("counter_bootstrap_dns_requests")
func refreshBootstrapDNSLoop() {
if *bootstrapDNS == "" {
return
}
for {
refreshBootstrapDNS()
time.Sleep(10 * time.Minute)
}
}
func refreshBootstrapDNS() {
if *bootstrapDNS == "" {
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
names := strings.Split(*bootstrapDNS, ",")
var r net.Resolver
for _, name := range names {
addrs, err := r.LookupIP(ctx, "ip", name)
if err != nil {
log.Printf("bootstrap DNS lookup %q: %v", name, err)
continue
}
dnsMu.Lock()
dnsCache[name] = addrs
dnsMu.Unlock()
}
}
func handleBootstrapDNS(w http.ResponseWriter, r *http.Request) {
bootstrapDNSRequests.Add(1)
dnsMu.Lock()
j, err := json.MarshalIndent(dnsCache, "", "\t")
dnsMu.Unlock()
if err != nil {
log.Printf("bootstrap DNS JSON: %v", err)
http.Error(w, "JSON marshal error", 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
|
package tournament
/*
TE DEBO EL TESTING
func TestTournamentAdd(t *testing.T) {
c := NewTournament()
e0 := c.GetTeam(0)
if e0 != nil {
t.Error("El equipo con Id 0 ya existe")
}
c.add(NewTeam("test", 0))
e0 = c.GetTeam(0)
if e0 == nil {
t.Error("El equipo con ID 0 no fue agregado")
}
if e0.name != "test" {
t.Error("El equipo con ID 0 no tiene el equipo correcto")
}
}
*/
|
package main
import (
"github.com/polluxx/yard/search"
"fmt"
"net/http"
"time"
"encoding/json"
"github.com/polluxx/yard/encoding/csv"
"log"
"os"
"regexp"
//"log/syslog"
"sort"
)
type Report struct {
Title string
Body []Record
}
type Record struct {
time string
rank string
count string
rangeit string
}
func main() {
http.HandleFunc("/links/", mainHandler(linksHandler));
http.HandleFunc("/aggregate", queryHandler(aggregateHandler));
http.HandleFunc("/report/", mainHandler(reportHandler));
http.HandleFunc("/list", queryHandler(listHandler));
http.HandleFunc("/counter", queryHandler(counterHandler));
s := &http.Server{
Addr: ":8090",
//Handler: Handle,
ReadTimeout: 120 * time.Second,
WriteTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 20,
}
log.Fatal(s.ListenAndServe())
//Aggregate()
}
func mainHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// change valid path for main queries
var validPath = regexp.MustCompile("^/(links|report|relinks)/([a-zA-Z0-9-]+)$")
mess := validPath.FindStringSubmatch(r.URL.Path)
if mess == nil {
http.NotFound(w,r);
return
}
fn(w, r, mess[2])
}
}
func queryHandler (fn func(http.ResponseWriter, *http.Request, map[string]string)) http.HandlerFunc {
var validPath = regexp.MustCompile("^/(list|aggregate|counter)")
return func(w http.ResponseWriter, r *http.Request) {
mess := validPath.FindStringSubmatch(r.URL.Path)
if mess == nil {
http.NotFound(w,r);
return
}
r.ParseForm();
queryParams := make(map[string]string)
for index, value := range r.Form {
queryParams[index] = value[0];
}
fn(w, r, queryParams)
}
}
func linksHandler(w http.ResponseWriter, r *http.Request, param string) {
switch param {
case "auto":
GetProjectLinks(w, r, 1);
case "dom":
GetProjectLinks(w, r, 3);
case "ria":
GetProjectLinks(w, r, 2);
case "market":
GetProjectLinks(w, r, 5);
default:
http.NotFound(w, r)
}
}
func aggregateHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
//sl, _ := syslog.New(syslog.LOG_INFO, "info")
expected := []string{"project"}
for _, val := range expected {
_, exist := params[val]
if (!exist) {
http.Error(w, fmt.Sprintf("Param '%s' expected, but not provided", val), 403)
return
}
}
const longForm = "2006-Jan-02"
//t, _ := time.Parse(longForm, dateto)
if (params["to"] == "") {
curr := time.Now()
//params["to"] = time.Parse(longForm, curr.Date())
params["to"] = curr.Format(longForm)
}
if (params["from"] == "") {
curr, parseErr := time.Parse(longForm, params["to"])
if parseErr != nil {
http.Error(w, fmt.Sprintf("%s", parseErr), 501)
return
}
timeFrom := curr.AddDate(0, -1, 0)
params["from"] = timeFrom.Format(longForm)
}
if (params["group"] == "") {
params["group"] = "all"
}
/*var validDate = regexp.MustCompile("^d{4}-([A-Z])w+-d{2}$")
isValid := validDate.FindStringSubmatch(params["to"])
if isValid == nil {
http.Error(w, fmt.Sprintf("date '%s' isn't correct", params["to"]), 203);
return
}*/
filename := fmt.Sprintf("%s-%s-%s-%s", params["from"], params["to"], params["project"], params["group"])
// checking if needed raw data or only report link
isRaw := true
if(params["raw"] != "true") {
isRaw = false
link, errorRead := findReport(filename, r)
if errorRead == nil {
response := map[string]string{"resource":link, "error":"null"}
makeResp(w, r, response)
return
}
}
// end
fromT, _ := time.Parse(longForm, params["from"])
toT, _ := time.Parse(longForm, params["to"])
duration := toT.Sub(fromT)/24
params["duration"] = fmt.Sprintf("%d", int(duration.Hours()))
//project, _ := strconv.ParseInt(params["project"], 10, 64)
itemsResp := search.LogSearch(params["project"], params["from"], int(duration.Hours()), 10000);
if (!isRaw) {
if (len(itemsResp) == 0) {
response := map[string]string{"resource":"null", "error":"no data for report"}
makeResp(w, r, response)
return
}
formed := ReparseToCSV(itemsResp)
filedata := &Report{Title: filename, Body: formed}
errorWrite := filedata.saveReport(formed)
if errorWrite != nil {
http.Error(w, errorWrite.Error(), http.StatusInternalServerError)
}
link := fmt.Sprintf("%s/report/%s", r.Host, filename)
response := map[string]string{"resource":link, "error":"null"}
makeResp(w, r, response)
return
}
ResponseRawData(w, itemsResp)
}
func ResponseRawData(w http.ResponseWriter, itemsResp map[string]map[string]string) {
reports := make([]Record, len(itemsResp))
in :=0
var report = Record{}
for index, value := range itemsResp {
report.time = index
report.rank = value["rank"]
report.count = value["count"]
report.rangeit = value["rangeitem"]
reports[in] = report
in++
}
reports = MakeSort(reports)
flat := make([]map[string]string, len(reports))
i:=0;
flatval := make(map[string]string)
for _, tVal := range reports {
flatval = map[string]string{"timedate":tVal.time,"rank":tVal.rank,"range":tVal.rangeit,"count":tVal.count}
flat[i] = flatval
i++
}
jsn, err := json.Marshal(flat)
//sl.Info(fmt.Sprintf("%s", jsn))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "authorization")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
w.Write(jsn);
}
func ReparseToCSV(data map[string]map[string]string) []Record{
formed := make([]Record, len(data))
ind := 0
var rec = Record{}
for time, value := range data {
rec.time = time
rec.rank = value["rank"]
rec.count = value["count"]
rec.rangeit = value["rangeitem"]
formed[ind] = rec
ind++
}
log.Print(fmt.Sprintf("%v", formed))
return formed
}
func GetProjectLinks(w http.ResponseWriter, r *http.Request, project int) {
data := search.Links(project)
makeResp(w, r, data)
}
func makeResp(w http.ResponseWriter, r *http.Request, data map[string]string) {
jsn, err := json.Marshal(data)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "authorization")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
w.Write(jsn);
}
func findReport(link string, r *http.Request) (string, error) {
errorChd := os.Chdir("/var/spool/reports")
if errorChd != nil {
errCrt := os.Mkdir("/var/spool/reports", os.ModePerm)
if (errCrt != nil) {
log.Fatal(errCrt)
return "",errCrt
}
os.Chdir("/var/spool/reports")
log.Fatal(errorChd)
}
_, err := os.Open(fmt.Sprintf("%s.csv", link))
return fmt.Sprintf("%s/report/%s", r.Host, link), err
}
func loadReport(link string) (file *os.File, err error) {
err = os.Chdir("/var/spool/reports")
if err != nil {
return
}
file, err = os.Open(fmt.Sprintf("%s.csv", link))
return
}
func (rep *Report) saveReport(datafile []Record) error {
os.Chdir("/var/spool/reports")
filename := rep.Title + ".csv"
newFile, err := os.Create(filename)
if err != nil {
log.Fatal(err)
return err
}
writer := csv.NewWriter(newFile)
return writer.WriteAllCsv(datafile)
}
func reportHandler(w http.ResponseWriter, r *http.Request, param string) {
_, err := loadReport(param)
if err != nil {
http.Error(w, err.Error(), 501)
return
}
http.ServeFile(w, r, fmt.Sprintf("/var/spool/reports/%s.csv", param))
}
func listHandler(w http.ResponseWriter, r *http.Request, param map[string]string) {
}
func counterHandler(w http.ResponseWriter, r *http.Request, params map[string]string) {
resp := search.Count()
jsn, err := json.Marshal(resp)
//sl.Info(fmt.Sprintf("%s", jsn))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "authorization")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
w.Write(jsn)
}
type By func(a1, a2 *Record) bool
func (by By) Sort(items []Record) {
isor := &itemsSorter {
items: items,
by : by,
}
sort.Sort(isor)
}
type itemsSorter struct {
items []Record
by func(a1, a2 *Record) bool
}
func (s *itemsSorter) Len() int {
return len(s.items)
}
func (s *itemsSorter) Swap(i,j int) {
s.items[i], s.items[j] = s.items[j], s.items[i]
}
func (s *itemsSorter) Less(i,j int) bool {
return s.by(&s.items[i], &s.items[j])
}
func MakeSort(items []Record) []Record {
// sort closure
date := func(r1, r2 *Record) bool {
return r1.time < r2.time
}
By(date).Sort(items)
return items
}
|
package krc
import "github.com/jbvmio/kafkactl"
const (
ErrorStatusKey = "ERR_TOPICS_7777"
TopicListKey = "TOPIC_LIST"
LocalKind = "localDC"
RemoteKind = "crossDC"
StopCount = 5
)
var StopChannel chan bool
type ErrorStatus struct {
ErrorStatusKey string `json:"errStatusKey"` //key
ErrorState map[string]int16 `json:"errorState"` //value
}
type TopicList struct {
TopicListKey string `json:"topicListKey"` //key
TopicList []string `json:"topicList"` //value
}
// Currently Not Using
type PeerCheck struct {
Topic string `json:"topic"` //key
Sources string `json:"sources"` //value
}
type TopicCheck struct {
Topic string `json:"topic"` //key
Details string `json:"details"` //value
}
type Detail struct {
Summary Status `json:"summary"`
ReplicatedPartitions []*ReplicatedPartition `json:"replicatedPartitions"`
}
type Status struct {
Status int16 `json:"status"`
Topic string `json:"topic"`
State string `json:"state"`
}
type ReplicatedPartition struct {
Kind string `json:"kind"`
DateTimeSecs int64 `json:"dateTimeSecs"`
LastSuccessDateTimeSecs int64 `json:"lastSuccessDateTimeSecs"`
SetKey string `json:"setKey"`
SetValue string `json:"setValue"`
GetValue string `json:"getValue"`
Result string `json:"result"`
Match bool `json:"match"`
Status int16 `json:"status"`
Topic string `json:"topic"`
LocalBroker string `json:"localBroker"`
LocalPartition int32 `json:"localPartition"`
LocalOffset int64 `json:"localOffset"`
LocalDateTimeNano int64 `json:"localDateTimeNano"`
RemoteBroker string `json:"remoteBroker"`
RemotePartition int32 `json:"remotePartition"`
RemoteOffset int64 `json:"remoteOffset"`
RemoteDateTimeNano int64 `json:"remoteDateTimeNano"`
PreviousResult string `json:"previousResult"`
PreviousStatus int16 `json:"previousStatus"`
Metadata kafkactl.TopicMeta `json:"metadata,omitempty"`
RemoteMetadata kafkactl.TopicOffsetMap `json:"remoteMetadata,omitempty"`
LatencyNano int64 `json:"latencyNano"`
LatencyMS float32 `json:"latencyMS"`
LocalNeedPRE uint8 `json:"localNeedPRE"`
RemoteNeedPRE uint8 `json:"remoteNeedPRE"`
}
type PeerData struct {
Topic string `json:"topic"`
Sources []Source `json:"sources"`
}
type Source struct {
Kind string `json:"kind"`
DateTimeSecs int64 `json:"dateTimeSecs"`
SetKey string `json:"setKey"`
Topic string `json:"topic"`
LocalBroker string `json:"localBroker"`
LocalPartition int32 `json:"localPartition"`
}
type PeerStatus struct {
Kind string `json:"kind"`
Topic string `json:"topic"`
LocalBroker string `json:"localBroker"`
Partitions int `json:"partitions"`
State string `json:"state"`
Status int16 `json:"status"`
SyncDifferenceSecs int64 `json:"syncDifferenceSecs"`
LastSeenSecsAgo int64 `json:"lastSeenSecsAgo"`
}
// ErrorState Descriptions
const (
NeedLocalPRE uint8 = 1
NeedRemotePRE uint8 = 1
ErrInitializing int16 = 1
ErrInitializingString string = "Initializing"
ErrNone int16 = 2
ErrNoneString string = "Success"
ErrTopicMeta int16 = 3
ErrTopicMetaString string = "ErrTopicMetadata"
ErrSendMsg int16 = 4
ErrSendMsgString string = "ErrSendMsg"
ErrTopicOrPartNotFound int16 = 5
ErrTopicOrPartNotFoundString string = "ErrTopicOrPartNotFound"
ErrTimedOut int16 = 6
ErrTimedOutString string = "ErrTimedOut"
ErrPartitionOrOffset int16 = 7777
ErrPartitionOrOffsetString string = "ErrPartitionOrOffset"
ErrCreateClient int16 = -1001
ErrCreateClientString string = "ErrCreateClient"
)
|
package leetcode
/*
* @lc app=leetcode id=4 lang=golang
*
* [4] Median of Two Sorted Arrays
*/
// @lc code=start
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 {
var (
nums1Length = len(nums1)
nums2Length = len(nums2)
)
if nums1Length > nums2Length {
return findMedianSortedArrays(nums2, nums1)
}
var (
iMin = 0
iMax = nums1Length
i, j int
halfLength = (nums1Length + nums2Length + 1) / 2
maxLeft int
minRight int
isOdd = (nums1Length + nums2Length) % 2 == 1
)
for iMin <= iMax {
i = (iMin + iMax) / 2
j = halfLength - i
if i < iMax && nums2[j - 1] > nums1[i] {
// i is small
iMin = i + 1
} else if i > iMin && nums1[i - 1] > nums2[j] {
// i is big
iMax = i - 1
} else {
// i is perfect
// i == 0
// i == nums1Length
if i == 0 {
maxLeft = nums2[j - 1]
} else if j == 0 {
maxLeft = nums1[i - 1]
} else {
maxLeft = max(nums1[i - 1], nums2[j - 1])
}
if isOdd {
return float64(maxLeft)
}
if i == nums1Length {
minRight = nums2[j]
} else if j == nums2Length {
minRight = nums1[i]
} else {
minRight = min(nums1[i], nums2[j])
}
return float64(minRight + maxLeft) / 2
}
}
return 0.0
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func max(a int, b int) int {
if a > b {
return a
}
return b
}
// @lc code=end
|
package namegen
import (
"context"
"fmt"
"github.com/docker/docker/client"
"github.com/hinshun/pls/pkg/failsafe"
)
func GetUnusedContainerName(ctx context.Context, cli client.APIClient, prefix string) (string, error) {
var (
containerName string
retryPolicy = failsafe.NewRetryPolicy()
)
err := failsafe.New(retryPolicy).Run(ctx, func() error {
containerName = fmt.Sprintf("%s-%s", prefix, GetRandomName())
_, err := cli.ContainerInspect(ctx, containerName)
if err != nil {
if client.IsErrNotFound(err) {
retryPolicy.Cancel()
return nil
}
return err
}
return fmt.Errorf("container name '%s' already in use", containerName)
})
if err != nil {
return containerName, err
}
return containerName, nil
}
|
package myreplication
import (
"fmt"
"net"
"strconv"
)
type (
connection struct {
conn net.Conn
packReader *packReader
packWriter *packWriter
currentDb string
masterPosition uint64
fileName string
}
)
const (
_DEFAULT_DB = "mysql"
)
func NewConnection() *connection {
return &connection{
conn: nil,
}
}
func (c *connection) Connection() net.Conn {
return c.conn
}
func (c *connection) ConnectAndAuth(host string, port int, username, password string) error {
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", host, port))
if err != nil {
return err
}
c.conn = conn
c.packReader = newPackReader(conn)
c.packWriter = newPackWriter(conn)
err = c.init(username, password)
if err != nil {
return err
}
return nil
}
func (c *connection) init(username, password string) (err error) {
pack, err := c.packReader.readNextPack()
if err != nil {
return err
}
//receive handshake
//get handshake data and parse
handshake := &pkgHandshake{}
err = handshake.readServer(pack)
if err != nil {
return
}
//prepare and buff handshake auth response
pack = handshake.writeServer(username, password)
pack.setSequence(byte(1))
err = c.packWriter.flush(pack)
if err != nil {
return
}
pack, err = c.packReader.readNextPack()
if err != nil {
return err
}
return pack.isError()
}
func (c *connection) GetMasterStatus() (pos uint32, filename string, err error) {
rs, err := c.query("SHOW MASTER STATUS")
if err != nil {
return
}
pack, err := rs.nextRow()
if err != nil {
return
}
_fileName, _ := pack.readStringLength()
_pos, _ := pack.readStringLength()
filename = string(_fileName)
pos64, err := strconv.ParseUint(string(_pos), 10, 32)
if err != nil {
return
}
pos = uint32(pos64)
rs.nextRow()
rs = nil
return
}
func (c *connection) ChecksumCompatibility() (ok bool, err error) {
err = c.initDb(_DEFAULT_DB)
if err != nil {
return
}
rs, err := c.query("SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'")
if err != nil {
return
}
pack, err := rs.nextRow()
if err != nil {
if err == EOF_ERR {
return false, nil
}
return
}
pack.readStringLength()
_type, _ := pack.readStringLength()
rs.nextRow()
if len(_type) == 0 {
return
}
ok = true
_, err = c.query("set @master_binlog_checksum = @@global.binlog_checksum")
return
}
func (c *connection) initDb(schemaName string) error {
q := &initDb{}
pack := q.writeServer(schemaName)
err := c.packWriter.flush(pack)
if err != nil {
return err
}
pack, err = c.packReader.readNextPack()
if err != nil {
return err
}
return pack.isError()
}
func (c *connection) query(command string) (*resultSet, error) {
q := &query{}
pack := q.writeServer(command)
err := c.packWriter.flush(pack)
if err != nil {
return nil, err
}
rs := &resultSet{}
rs.setReader(c.packReader)
err = rs.init()
if err != nil {
return nil, err
}
return rs, nil
}
func (c *connection) connectDb(db string) error {
q := &connectDb{}
pack := q.writeServer(db)
err := c.packWriter.flush(pack)
if err != nil {
return err
}
pack, err = c.packReader.readNextPack()
if err != nil {
return err
}
return pack.isError()
}
func (c *connection) fieldList(db, table string) (*resultSet, error) {
if c.currentDb != db {
err := c.connectDb(db)
if err != nil {
return nil, nil
}
}
q := &fieldList{}
pack := q.writeServer(table)
err := c.packWriter.flush(pack)
if err != nil {
return nil, err
}
rs := &resultSet{}
rs.setReader(c.packReader)
err = rs.initFieldList()
if err != nil {
return nil, err
}
return rs, nil
}
func (c *connection) StartBinlogDump(position uint32, fileName string, serverId uint32) (el *eventLog, err error) {
ok, err := c.ChecksumCompatibility()
if err != nil {
return
}
register := ®isterSlave{}
pack := register.writeServer(serverId)
err = c.packWriter.flush(pack)
if err != nil {
return nil, err
}
pack, err = c.packReader.readNextPack()
if err != nil {
return nil, err
}
err = pack.isError()
if err != nil {
return nil, err
}
startBinLog := &binlogDump{}
pack = startBinLog.writeServer(position, fileName, serverId)
err = c.packWriter.flush(pack)
if err != nil {
return nil, err
}
var additionalLength int
if ok {
additionalLength = 4
}
el = newEventLog(c, additionalLength)
return el, nil
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
)
type User struct {
FirstName string
LastName string
Sex string
Age int
Attr []bool
}
var templates = template.Must(template.ParseFiles("tmpl/attrs.html"))
func handleFunc (path string, fn func(http.ResponseWriter, *http.Request, string)) {
lenPath := len(path)
handler := func(w http.ResponseWriter, r *http.Request) {
title := r.URL.Path[lenPath:]
/*if !titleValidator.MatchString(title) {
http.NotFound(w, r)
return
}*/
fn(w, r, title)
}
http.HandleFunc(path, handler)
}
func saveHandler(w http.ResponseWriter, r *http.Request) {
j := r.FormValue("json")
if len(j) > 0 {
if _, err := os.Stat("data"); os.IsNotExist(err) {
os.Mkdir("data", 0644)
}
err := ioutil.WriteFile("data/users.json", []byte(j), 0644)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
http.Redirect(w, r, "/load/", http.StatusFound)
}
func loadHandler(w http.ResponseWriter, r *http.Request) {
var users []User
j, err := ioutil.ReadFile("data/users.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
err = json.Unmarshal([]byte(j), &users)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
fmt.Println(users)
http.Redirect(w, r, "/", http.StatusFound)
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
var output bytes.Buffer
err := templates.ExecuteTemplate(&output, "attrs.html", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.Write(output.Bytes())
}
func main() {
// start the server
fmt.Println("starting")
// dynamic content
http.HandleFunc("/", rootHandler)
http.HandleFunc("/save/", saveHandler)
http.HandleFunc("/load/", loadHandler)
// static content
http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css"))))
http.Handle("/js/", http.StripPrefix("/js/", http.FileServer(http.Dir("js"))))
http.Handle("/data/", http.StripPrefix("/data/", http.FileServer(http.Dir("data"))))
http.ListenAndServe(":8080", nil)
}
|
package resource
import (
"fmt"
"io"
"os"
"strconv"
"github.com/mebyus/ffd/cmn"
"github.com/mebyus/ffd/resource/fiction"
"github.com/mebyus/ffd/setting"
"github.com/mebyus/ffd/track/fic"
)
// Download fetches a fic from a given target.
// An appropriate target is fic page URL.
// SaveSource flag indicates whether responses will be saved before parsing.
// Format argument determines format of the resulting output file
func Download(target string, saveSource bool, format fiction.RenderFormat) (err error) {
ficNumber, err := strconv.ParseInt(target, 10, 64)
if err != nil {
// target is not a number, thus treat it as URL
err = downloadFromURL(target, saveSource, format)
} else {
// target is the number of a fic in the list
err = downloadFromList(int(ficNumber), saveSource, format)
}
return
}
func saveHistory(target string) (err error) {
file, err := os.OpenFile(setting.HistoryPath, os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
return
}
defer cmn.SmartClose(file)
_, err = io.WriteString(file, target+"\n")
return
}
func downloadFromURL(target string, saveSource bool, format fiction.RenderFormat) (err error) {
t, err := ChooseByTarget(target)
if err != nil {
err = fmt.Errorf("choosing tool for %s: %v", target, err)
return
}
err = saveHistory(target)
if err != nil {
fmt.Printf("Saving history: %v\n", err)
err = nil
}
book, err := t.Download(target, saveSource)
if err != nil {
return
}
err = book.Save(setting.OutDir, format)
return
}
func downloadFromList(ficNumber int, saveSource bool, format fiction.RenderFormat) (err error) {
f, err := fic.Get(ficNumber)
if err != nil {
return
}
err = downloadFromURL(f.BaseURL, saveSource, format)
if err != nil {
return
}
return
}
|
package deployer
import (
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"io"
)
// Interface defines the common interface used for the deployment methods
type Interface interface {
Status(ctx devspacecontext.Context) (*StatusResult, error)
Deploy(ctx devspacecontext.Context, forceDeploy bool) (bool, error)
Render(ctx devspacecontext.Context, out io.Writer) error
}
// StatusResult holds the status of a deployment
type StatusResult struct {
Name string
Type string
Target string
Status string
}
|
package bindings
import (
validation "github.com/go-ozzo/ozzo-validation"
"github.com/go-ozzo/ozzo-validation/is"
)
// HelpRequest - this is the format an help would be sent to this app
type HelpRequest struct {
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Age int `json:"age"`
PhoneNumber string `json:"phone_number"`
Email string `json:"email"`
ContactAddress string `json:"contact_address"`
City string `json:"city"`
MOI string `json:"means_of_identification"` // means_of_identification
EL string `json:"education_level"` // education_level
// job details
Service string `json:"service"`
Meals string `json:"meals"` // can be null
Experience bool `json:"experience"`
YearsOfExperience int `json:"years_of_experience"`
ExperienceLocations string `json:"experience_locations"` // seperated by commas
HaveTrainingCertificate bool `json:"have_training_experience"`
SchoolNames string `json:"school_names"`
SuitableStory string `json:"suitable_story"`
HoursOfWork int `json:"hours_of_work"`
// guarantor
GuarantorFirstName string `json:"guarantor_first_name"`
GuarantorLastName string `json:"guarantor_last_name"`
GuarantorPhoneNumber string `json:"guarantor_phone_number"`
GuarantorEmail string `json:"guarantor_email"`
GuarantorContactAddress string `json:"guarantor_contact_address"`
GuarantorCity string `json:"guarantor_city"`
GuarantorIdentification string `json:"guarantor_moi"`
}
// Validate - this is the validator class for the request.
func (h HelpRequest) Validate() error {
return validation.ValidateStruct(&h,
validation.Field(&h.FirstName, validation.Required, validation.Length(3, 50)),
validation.Field(&h.LastName, validation.Required, validation.Length(3, 50)),
validation.Field(&h.Age, validation.Required, validation.Min(1), validation.Max(100)),
validation.Field(&h.Email, validation.Required, is.Email),
validation.Field(&h.PhoneNumber, validation.Required, validation.Length(11, 11)),
validation.Field(&h.ContactAddress, validation.Required, validation.Length(2, 500)),
validation.Field(&h.City, validation.Required, validation.Length(2, 50)),
validation.Field(&h.MOI, validation.Required),
validation.Field(&h.EL, validation.Required),
validation.Field(&h.Service, validation.Required),
validation.Field(&h.Meals, validation.Length(0, 500)),
// job details
validation.Field(&h.SuitableStory, validation.Required),
validation.Field(&h.HoursOfWork, validation.Required, validation.Max(120)),
// guarantor
validation.Field(&h.GuarantorFirstName, validation.Required, validation.Length(2, 50)),
validation.Field(&h.GuarantorLastName, validation.Required, validation.Length(2, 50)),
validation.Field(&h.GuarantorEmail, validation.Required, is.Email),
validation.Field(&h.GuarantorPhoneNumber, validation.Required, validation.Length(11, 11)),
validation.Field(&h.GuarantorContactAddress, validation.Required, validation.Length(5, 500)),
validation.Field(&h.GuarantorCity, validation.Required, validation.Length(2, 50)),
validation.Field(&h.GuarantorIdentification, validation.Required),
)
}
|
/*
* traPCollection API
*
* traPCollectionのAPI
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// Maintainers - 管理者の一覧
type Maintainers struct {
Maintainers []string `json:"maintainers"`
}
|
package main
import "fmt"
import "github.com/contactless/wb-mqtt-noolite/noolite"
import "github.com/evgeny-boger/wbgo"
import "math/rand"
import "sync"
import "time"
import "unsafe"
type callback func(*noolite.Response) error
type MTRF64 struct {
inBuff chan *noolite.Request
nlCallbacks [64]callback
nlfUsedChannels [64]bool
nlUsedChannels [64]bool
nlfCallbacks map[uint32]callback
conn *noolite.Connection
Devices []*Device
mu sync.Mutex
}
func SbytesToUint32(in [4]byte) uint32 {
return *((*uint32)(unsafe.Pointer(&in)))
}
func Uint32To4Bytes(in uint32) [4]byte {
return *((*[4]byte)(unsafe.Pointer(&in)))
}
func NewMTRF64(addr string) *MTRF64 {
m := new(MTRF64)
m.conn, _ = noolite.NewConnection(addr) //TODO
m.inBuff = make(chan *noolite.Request, 64)
m.nlfCallbacks = make(map[uint32]callback)
go m.run()
return m
}
func (m *MTRF64) AddNooliteFDevice() {
req := new(noolite.Request)
req.Ch = byte(rand.Intn(63))
req.Mode = noolite.NooLiteFTX
req.Cmd = noolite.BindCmd
m.inBuff <- req
}
func (m *MTRF64) AddNooliteSensor() {
req := new(noolite.Request)
req.Mode = noolite.NooLiteRx
req.Ctr = 3
req.Ch = byte(rand.Intn(63))
m.inBuff <- req
}
type Device struct {
IsNewProtocol bool //false - NooLite, true - NooLite-F
IsTx bool //sensor (false) or device (true)
Channel byte
Addr uint32 //Address if NooLite-F
Type byte //Device type, if avaible
Status [4]byte //Status, if avaible
m *MTRF64
}
func (d *Device) Switch() {
if d.IsTx {
req := new(noolite.Request)
req.Ch = d.Channel
req.Cmd = 4
if d.IsNewProtocol {
addr := Uint32To4Bytes(d.Addr)
req.ID0 = addr[0]
req.ID1 = addr[1]
req.ID2 = addr[2]
req.ID3 = addr[3]
req.Mode = noolite.NooLiteFTX
} else {
req.Mode = noolite.NooLiteTX
}
d.m.inBuff <- req
}
}
func (m *MTRF64) run() {
go func() {
var readCount int32
needRead := make(chan struct{})
for {
select {
case in := <-m.inBuff:
println("in buf")
m.mu.Lock()
_ = m.conn.Write(in)
m.mu.Unlock()
readCount++
case <-needRead:
m.mu.Lock()
resp, err := m.conn.Read()
m.mu.Unlock()
if err != nil {
continue
}
if resp.Togl > 0 {
readCount = int32(resp.Togl)
}
if resp.Ctr == 3 {
d := new(Device)
d.Channel = resp.Ch
d.IsNewProtocol = resp.Mode > noolite.NooLiteRx
d.IsTx = resp.Mode != noolite.NooLiteRx
d.Type = resp.D0
d.m = m
if d.IsNewProtocol {
addr := [4]byte{resp.ID0, resp.ID1, resp.ID2, resp.ID3}
d.Addr = SbytesToUint32(addr)
}
m.Devices = append(m.Devices, d)
fmt.Printf("%+v\n", d)
fmt.Printf("%+v\n", m.Devices)
}
default:
if readCount > 0 {
needRead <- struct{}{}
}
}
}
}()
}
func main() {
wbgo.SetDebuggingEnabled(true)
m := NewMTRF64("/dev/ttyUSB0")
m.AddNooliteFDevice()
// m.AddNooliteSensor()
for {
time.Sleep(time.Second)
fmt.Printf("%+v\n", m.Devices)
for _, dev := range m.Devices {
dev.Switch()
}
}
}
|
package task
import (
"net/http"
"github.com/synoday/gateway/web/router"
)
// routes list all task domain routes.
var routes = []*router.Route{
{
Method: http.MethodGet,
Path: "/task/{period}",
Handler: List,
},
{
Method: http.MethodPost,
Path: "/task",
Handler: Add,
},
{
Method: http.MethodDelete,
Path: "/task/{id}",
Handler: Remove,
},
}
|
package main
import (
"time"
"github.com/typical-go/typical-go/pkg/typgo"
"github.com/typical-go/typical-go/pkg/typmock"
)
var descriptor = typgo.Descriptor{
ProjectName: "typmock-sample",
ProjectVersion: "1.0.0",
Tasks: []typgo.Tasker{
// mock
&typmock.GoMock{},
// test
&typgo.GoTest{
Timeout: 30 * time.Second,
Includes: []string{"internal/*"},
Excludes: []string{"internal/generated"},
},
// compile
&typgo.GoBuild{},
// run
&typgo.RunBinary{Before: typgo.TaskNames{"build"}},
},
}
func main() {
typgo.Start(&descriptor)
}
|
package adyoulike
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/buger/jsonparser"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
)
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
return &adapter{
endpoint: config.Endpoint,
}, nil
}
type adapter struct {
endpoint string
}
func (a *adapter) MakeRequests(
openRTBRequest *openrtb2.BidRequest,
reqInfo *adapters.ExtraRequestInfo,
) (
requestsToBidder []*adapters.RequestData,
errs []error,
) {
var err error
var tagID string
reqCopy := *openRTBRequest
reqCopy.Imp = []openrtb2.Imp{}
for ind, imp := range openRTBRequest.Imp {
// Check if imp comes with bid floor amount defined in a foreign currency
if imp.BidFloor > 0 && imp.BidFloorCur != "" && strings.ToUpper(imp.BidFloorCur) != "USD" {
// Convert to US dollars
convertedValue, err := reqInfo.ConvertCurrency(imp.BidFloor, imp.BidFloorCur, "USD")
if err != nil {
return nil, []error{err}
}
// Update after conversion. All imp elements inside request.Imp are shallow copies
// therefore, their non-pointer values are not shared memory and are safe to modify.
imp.BidFloorCur = "USD"
imp.BidFloor = convertedValue
}
// Set the CUR of bid to USD after converting all floors
reqCopy.Cur = []string{"USD"}
reqCopy.Imp = append(reqCopy.Imp, imp)
tagID, err = jsonparser.GetString(reqCopy.Imp[ind].Ext, "bidder", "placement")
if err != nil {
errs = append(errs, err)
continue
}
reqCopy.Imp[ind].TagID = tagID
}
openRTBRequestJSON, err := json.Marshal(reqCopy)
if err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return nil, errs
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
headers.Add("x-openrtb-version", "2.5")
requestToBidder := &adapters.RequestData{
Method: "POST",
Uri: a.endpoint,
Body: openRTBRequestJSON,
Headers: headers,
}
requestsToBidder = append(requestsToBidder, requestToBidder)
return requestsToBidder, errs
}
const unexpectedStatusCodeFormat = "" +
"Unexpected status code: %d. Run with request.debug = 1 for more info"
func (a *adapter) MakeBids(
openRTBRequest *openrtb2.BidRequest,
requestToBidder *adapters.RequestData,
bidderRawResponse *adapters.ResponseData,
) (
bidderResponse *adapters.BidderResponse,
errs []error,
) {
switch bidderRawResponse.StatusCode {
case http.StatusOK:
break
case http.StatusNoContent:
return nil, nil
case http.StatusBadRequest:
err := &errortypes.BadInput{
Message: fmt.Sprintf(unexpectedStatusCodeFormat, bidderRawResponse.StatusCode),
}
return nil, []error{err}
default:
err := &errortypes.BadServerResponse{
Message: fmt.Sprintf(unexpectedStatusCodeFormat, bidderRawResponse.StatusCode),
}
return nil, []error{err}
}
var openRTBBidderResponse openrtb2.BidResponse
if err := json.Unmarshal(bidderRawResponse.Body, &openRTBBidderResponse); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(len(openRTBRequest.Imp))
bidResponse.Currency = "USD"
for _, seatBid := range openRTBBidderResponse.SeatBid {
for idx := range seatBid.Bid {
b := &adapters.TypedBid{
Bid: &seatBid.Bid[idx],
BidType: getMediaTypeForImp(seatBid.Bid[idx].ImpID, openRTBRequest.Imp),
}
bidResponse.Bids = append(bidResponse.Bids, b)
}
}
return bidResponse, nil
}
// getMediaTypeForBid determines which type of bid.
func getMediaTypeForImp(impID string, imps []openrtb2.Imp) openrtb_ext.BidType {
mediaType := openrtb_ext.BidTypeBanner
for _, imp := range imps {
if imp.ID == impID {
if imp.Banner == nil && imp.Video != nil {
mediaType = openrtb_ext.BidTypeVideo
} else if imp.Banner == nil && imp.Native != nil {
mediaType = openrtb_ext.BidTypeNative
}
}
}
return mediaType
}
|
package common
type SMSSend struct {
Phone string `validate:"required" form:"phone"`
Genre string `validate:"required,oneof=registered edit_password" form:"genre"`
}
type QueryAreaForm struct {
SuperiorId *int `validate:"required" form:"superior_id" json:"superior_id" error_message:"上级地区编号~required:此为必填;"`
}
|
package main
import "fmt"
var z = "car"
// use var when its outside function
// use := inside the function
func main() {
x := 32
// first time declaration :=
fmt.Println("Hello World", x)
// in future, once a var is declared, use =
x = 99
fmt.Println("Hello World", x)
// example of operators
y := 100 + 24
fmt.Println("Hello World", y)
fmt.Println("Hello World", z)
}
|
import (
"strings"
)
/*
* @lc app=leetcode id=6 lang=golang
*
* [6] ZigZag Conversion
*
* https://leetcode.com/problems/zigzag-conversion/description/
*
* algorithms
* Medium (35.79%)
* Likes: 1749
* Dislikes: 4733
* Total Accepted: 472.7K
* Total Submissions: 1.3M
* Testcase Example: '"PAYPALISHIRING"\n3'
*
* The string "PAYPALISHIRING" is written in a zigzag pattern on a given number
* of rows like this: (you may want to display this pattern in a fixed font for
* better legibility)
*
*
* P A H N
* A P L S I I G
* Y I R
*
*
* And then read line by line: "PAHNAPLSIIGYIR"
*
* Write the code that will take a string and make this conversion given a
* number of rows:
*
*
* string convert(string s, int numRows);
*
* Example 1:
*
*
* Input: s = "PAYPALISHIRING", numRows = 3
* Output: "PAHNAPLSIIGYIR"
*
*
* Example 2:
*
*
* Input: s = "PAYPALISHIRING", numRows = 4
* Output: "PINALSIGYAHRPI"
* Explanation:
*
* P I N
* A L S I G
* Y A H R
* P I
*
*/
// @lc code=start
func convert(s string, numRows int) string {
return convert1(s, numRows)
}
// line 0: indexK = 2 * numRows -2
// numRows -1, indexK = k(2*numRows-2) + numRows -1
// line i: k, 2 * numRows -2 + i or (k+1)(2*numRows-2)
/*n=numRows
Δ=2n-2 1 2n-1 4n-3
Δ= 2 2n-2 2n 4n-4 4n-2
Δ= 3 2n-3 2n+1 4n-5 .
Δ= . . . . .
Δ= . n+2 . 3n .
Δ= n-1 n+1 3n-3 3n-1 5n-5
Δ=2n-2 n 3n-2 5n-4
*/
func convert2(s string, numRows int) string {
if len(s) <= numRows || numRows == 1 {
return s
}
period := 2*numRows - 2
res := make([]string, numRows)
for i, v := range s {
mod := i % period
// fmt.Printf("i: %d, mod: %d, period: %d\n",i, mod, period)
if mod < numRows {
res[mod] += string(v)
} else {
res[period-mod] += string(v)
}
}
return strings.Join(res, "")
}
func convert1(s string, numRows int) string {
if len(s) == 0 || numRows == 0 || numRows == 1 {
return s
}
rows := min(len(s), numRows)
retVals := make([]string, rows)
curRow, goingDown := 0, false
for i := range s {
retVals[curRow] += string(s[i : i+1])
if curRow == 0 || curRow == rows-1 {
goingDown = !goingDown
}
if goingDown {
curRow++
} else {
curRow--
}
}
vals := ""
for _, val := range retVals {
vals += val
}
return vals
// return strings.Join(retVals, "")
}
func min(val1, val2 int) int {
if val1 < val2 {
return val1
}
return val2
}
// @lc code=end |
package taller
import (
"testing"
"os"
"path"
"bytes"
)
const (
TESTDIR string = "test"
)
// return a template absolute path
func update_environment() {
current_dir, _ := os.Getwd()
err := os.Setenv(TALLER_ENV_VARIABLE, path.Join(current_dir, TESTDIR))
if err != nil {
panic("Failed to set environment variable" + err.String())
}
}
//test if we can open a file and read the contents out of it
func TestTemplateFile(t *testing.T) {
update_environment()
template := NewTemplateFile("base.html")
content := template.Content()
if bytes.Count(content, []byte("<html>")) != 1 {
t.Error("Cannot read the template")
}
}
//test if the TemplateBytes works
func TestTemplateBytes(t *testing.T) {
template := NewTemplateBytes([]byte("<html></html>"))
content := template.Content()
if bytes.Count(content, []byte("<html>")) != 1 {
t.Error("Cannot read the template")
}
}
//rendering base.html
func TestBaseRender(t *testing.T) {
/*
update_environment()
template := NewTemplateFile("base.html")
rendered_content := Compile(template)
expected_content := ReadTemplateFile("results/base.html")
if string(rendered_content) != string(expected_content) {
t.Errorf("Got: \n%s\nExpected: \n%s", rendered_content,
expected_content)
}
*/
}
//test a simple [expand "template"] and [include "include.html"]
func TestExpandInclude(t *testing.T) {
update_environment()
template := NewTemplateFile("expand_include.html")
rendered_content := Render(template, Context{})
expected_content := ReadTemplateFile("results/expand_include.html")
if string(rendered_content) != string(expected_content) {
t.Errorf("Got: \n%s\nExpected: \n%s", rendered_content,
expected_content)
}
}
//utils
//Check if split of paths works the way it's suppose to
func TestGetTallerPaths(t *testing.T) {
os.Setenv(TALLER_ENV_VARIABLE, "aaa/b:xxx/yy")
paths := GetTallerPaths()
if len(paths) != 2 {
t.Errorf("Got: %d\nExpected: 2\n", len(paths))
}
if paths[0] != "aaa/b" {
t.Errorf("Got: %s\nExpected: %s\n", paths[0], "aaa/b")
}
}
//testing order resolution when reading the template.
func TestReadTemplateFile(t *testing.T) {
current_dir, _ := os.Getwd()
os.Setenv(TALLER_ENV_VARIABLE, path.Join(current_dir, TESTDIR, "results")+":"+path.Join(current_dir, TESTDIR))
content := ReadTemplateFile("base.html")
expected := []byte(`<body>
content`)
if bytes.Count(content, expected) != 1 {
t.Errorf("Cannot find:\n %s\n in:\n %s\n", expected, content)
}
}
func TestJoinBytes(t *testing.T) {
if !bytes.Equal(JoinBytes("a"), []byte("a")) {
t.Errorf("Got: %s\nExpected: %s\n", JoinBytes("a"), "a")
}
if !bytes.Equal(JoinBytes("a", "b"), []byte("ab")) {
t.Errorf("Got: %s\nExpected: %s\n", JoinBytes("ab"), "ab")
}
}
func TestSplitToLines(t *testing.T) {
splited := SplitToLines([]byte("a\nb"))
if !bytes.Equal(splited[0], []byte("a")) {
t.Errorf("Got: %s\nExpected: %s\n", splited[0], []byte("a"))
}
}
|
package _1_addTwoDigits
func main() {}
func addTwoDigits(n int) int {
return int(n/10)+int(n%10)
} |
package keeper
import (
"bytes"
"encoding/json"
"time"
gogotypes "github.com/gogo/protobuf/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/irismod/service/types"
)
// AddServiceBinding creates a new service binding
func (k Keeper) AddServiceBinding(
ctx sdk.Context,
serviceName string,
provider sdk.AccAddress,
deposit sdk.Coins,
pricing string,
qos uint64,
options string,
owner sdk.AccAddress,
) error {
if _, found := k.GetServiceDefinition(ctx, serviceName); !found {
return sdkerrors.Wrap(types.ErrUnknownServiceDefinition, serviceName)
}
if _, found := k.GetServiceBinding(ctx, serviceName, provider); found {
return sdkerrors.Wrap(types.ErrServiceBindingExists, "")
}
currentOwner, found := k.GetOwner(ctx, provider)
if found && !owner.Equals(currentOwner) {
return sdkerrors.Wrap(types.ErrNotAuthorized, "owner not matching")
}
if err := k.validateDeposit(ctx, deposit); err != nil {
return err
}
maxReqTimeout := k.MaxRequestTimeout(ctx)
if qos > uint64(maxReqTimeout) {
return sdkerrors.Wrapf(
types.ErrInvalidQoS,
"qos [%d] must not be greater than maximum request timeout [%d]",
qos, maxReqTimeout,
)
}
if err := types.ValidateOptions(options); err != nil {
return err
}
parsedPricing, err := k.ParsePricing(ctx, pricing)
if err != nil {
return err
}
if err := types.ValidatePricing(parsedPricing); err != nil {
return err
}
minDeposit := k.getMinDeposit(ctx, parsedPricing)
if !deposit.IsAllGTE(minDeposit) {
return sdkerrors.Wrapf(
types.ErrInvalidDeposit,
"insufficient deposit: minimum deposit %s, %s got",
minDeposit, deposit,
)
}
// Send coins from owner's account to the deposit module account
if err := k.bankKeeper.SendCoinsFromAccountToModule(ctx, owner, types.DepositAccName, deposit); err != nil {
return err
}
available := true
disabledTime := time.Time{}
svcBinding := types.NewServiceBinding(serviceName, provider, deposit, pricing, qos, options, available, disabledTime, owner)
k.SetServiceBinding(ctx, svcBinding)
k.SetOwnerServiceBinding(ctx, svcBinding)
k.SetPricing(ctx, serviceName, provider, parsedPricing)
if currentOwner.Empty() {
k.SetOwner(ctx, provider, owner)
k.SetOwnerProvider(ctx, owner, provider)
}
return nil
}
// SetServiceBindingForGenesis sets the service binding for genesis
func (k Keeper) SetServiceBindingForGenesis(
ctx sdk.Context,
svcBinding types.ServiceBinding,
) error {
k.SetServiceBinding(ctx, svcBinding)
k.SetOwnerServiceBinding(ctx, svcBinding)
k.SetOwner(ctx, svcBinding.Provider, svcBinding.Owner)
k.SetOwnerProvider(ctx, svcBinding.Owner, svcBinding.Provider)
pricing, err := k.ParsePricing(ctx, svcBinding.Pricing)
if err != nil {
return err
}
k.SetPricing(ctx, svcBinding.ServiceName, svcBinding.Provider, pricing)
return nil
}
// UpdateServiceBinding updates the specified service binding
func (k Keeper) UpdateServiceBinding(
ctx sdk.Context,
serviceName string,
provider sdk.AccAddress,
deposit sdk.Coins,
pricing string,
qos uint64,
options string,
owner sdk.AccAddress,
) error {
binding, found := k.GetServiceBinding(ctx, serviceName, provider)
if !found {
return sdkerrors.Wrap(types.ErrUnknownServiceBinding, "")
}
if !owner.Equals(binding.Owner) {
return sdkerrors.Wrap(types.ErrNotAuthorized, "owner not matching")
}
updated := false
if qos != 0 {
maxReqTimeout := k.MaxRequestTimeout(ctx)
if qos > uint64(maxReqTimeout) {
return sdkerrors.Wrapf(
types.ErrInvalidQoS,
"qos [%d] must not be greater than maximum request timeout [%d]",
qos, maxReqTimeout,
)
}
binding.QoS = qos
updated = true
}
// add the deposit
if !deposit.Empty() {
if err := k.validateDeposit(ctx, deposit); err != nil {
return err
}
binding.Deposit = binding.Deposit.Add(deposit...)
updated = true
}
parsedPricing := k.GetPricing(ctx, serviceName, provider)
// update the pricing
if len(pricing) != 0 {
parsedPricing, err := k.ParsePricing(ctx, pricing)
if err != nil {
return err
}
if err := types.ValidatePricing(parsedPricing); err != nil {
return err
}
binding.Pricing = pricing
k.SetPricing(ctx, serviceName, provider, parsedPricing)
updated = true
}
// only check deposit when the binding is available and updated
if binding.Available && updated {
minDeposit := k.getMinDeposit(ctx, parsedPricing)
if !binding.Deposit.IsAllGTE(minDeposit) {
return sdkerrors.Wrapf(
types.ErrInvalidDeposit,
"insufficient deposit: minimum deposit %s, %s got",
minDeposit, binding.Deposit,
)
}
}
if !deposit.Empty() {
// Send coins from owner's account to the deposit module account
if err := k.bankKeeper.SendCoinsFromAccountToModule(ctx, owner, types.DepositAccName, deposit); err != nil {
return err
}
}
if updated {
k.SetServiceBinding(ctx, binding)
}
return nil
}
// DisableServiceBinding disables the specified service binding
func (k Keeper) DisableServiceBinding(
ctx sdk.Context,
serviceName string,
provider,
owner sdk.AccAddress,
) error {
binding, found := k.GetServiceBinding(ctx, serviceName, provider)
if !found {
return sdkerrors.Wrap(types.ErrUnknownServiceBinding, "")
}
if !owner.Equals(binding.Owner) {
return sdkerrors.Wrap(types.ErrNotAuthorized, "owner not matching")
}
if !binding.Available {
return sdkerrors.Wrap(types.ErrServiceBindingUnavailable, "")
}
binding.Available = false
binding.DisabledTime = ctx.BlockHeader().Time
k.SetServiceBinding(ctx, binding)
return nil
}
// EnableServiceBinding enables the specified service binding
func (k Keeper) EnableServiceBinding(
ctx sdk.Context,
serviceName string,
provider sdk.AccAddress,
deposit sdk.Coins,
owner sdk.AccAddress,
) error {
binding, found := k.GetServiceBinding(ctx, serviceName, provider)
if !found {
return sdkerrors.Wrap(types.ErrUnknownServiceBinding, "")
}
if !owner.Equals(binding.Owner) {
return sdkerrors.Wrap(types.ErrNotAuthorized, "owner not matching")
}
if binding.Available {
return sdkerrors.Wrap(types.ErrServiceBindingAvailable, "")
}
// add the deposit
if !deposit.Empty() {
if err := k.validateDeposit(ctx, deposit); err != nil {
return err
}
binding.Deposit = binding.Deposit.Add(deposit...)
}
minDeposit := k.getMinDeposit(ctx, k.GetPricing(ctx, serviceName, provider))
if !binding.Deposit.IsAllGTE(minDeposit) {
return sdkerrors.Wrapf(
types.ErrInvalidDeposit,
"insufficient deposit: minimum deposit %s, %s got",
minDeposit, binding.Deposit,
)
}
if !deposit.Empty() {
// Send coins from owner's account to the deposit module account
if err := k.bankKeeper.SendCoinsFromAccountToModule(
ctx, owner, types.DepositAccName, deposit,
); err != nil {
return err
}
}
binding.Available = true
binding.DisabledTime = time.Time{}
k.SetServiceBinding(ctx, binding)
return nil
}
// RefundDeposit refunds the deposit from the specified service binding
func (k Keeper) RefundDeposit(ctx sdk.Context, serviceName string, provider, owner sdk.AccAddress) error {
binding, found := k.GetServiceBinding(ctx, serviceName, provider)
if !found {
return sdkerrors.Wrap(types.ErrUnknownServiceBinding, "")
}
if !owner.Equals(binding.Owner) {
return sdkerrors.Wrap(types.ErrNotAuthorized, "owner not matching")
}
if binding.Available {
return sdkerrors.Wrap(types.ErrServiceBindingAvailable, "")
}
if binding.Deposit.IsZero() {
return sdkerrors.Wrap(types.ErrInvalidDeposit, "the deposit of the service binding is zero")
}
refundableTime := binding.DisabledTime.Add(k.ArbitrationTimeLimit(ctx)).Add(k.ComplaintRetrospect(ctx))
currentTime := ctx.BlockHeader().Time
if currentTime.Before(refundableTime) {
return sdkerrors.Wrapf(types.ErrIncorrectRefundTime, "%v", refundableTime)
}
// Send coins from the deposit module account to the owner's account
if err := k.bankKeeper.SendCoinsFromModuleToAccount(
ctx, types.DepositAccName, binding.Owner, binding.Deposit,
); err != nil {
return err
}
binding.Deposit = sdk.Coins{}
k.SetServiceBinding(ctx, binding)
return nil
}
// RefundDeposits refunds the deposits of all the service bindings
func (k Keeper) RefundDeposits(ctx sdk.Context) error {
iterator := k.AllServiceBindingsIterator(ctx)
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
var binding types.ServiceBinding
k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &binding)
if err := k.bankKeeper.SendCoinsFromModuleToAccount(
ctx, types.DepositAccName, binding.Owner, binding.Deposit,
); err != nil {
return err
}
}
return nil
}
// SetServiceBinding sets the service binding
func (k Keeper) SetServiceBinding(ctx sdk.Context, svcBinding types.ServiceBinding) {
store := ctx.KVStore(k.storeKey)
bz := k.cdc.MustMarshalBinaryBare(&svcBinding)
store.Set(types.GetServiceBindingKey(svcBinding.ServiceName, svcBinding.Provider), bz)
}
// GetServiceBinding retrieves the specified service binding
func (k Keeper) GetServiceBinding(
ctx sdk.Context, serviceName string, provider sdk.AccAddress,
) (
svcBinding types.ServiceBinding, found bool,
) {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.GetServiceBindingKey(serviceName, provider))
if bz == nil {
return svcBinding, false
}
k.cdc.MustUnmarshalBinaryBare(bz, &svcBinding)
return svcBinding, true
}
// SetOwnerServiceBinding sets the owner service binding
func (k Keeper) SetOwnerServiceBinding(ctx sdk.Context, svcBinding types.ServiceBinding) {
store := ctx.KVStore(k.storeKey)
store.Set(types.GetOwnerServiceBindingKey(svcBinding.Owner, svcBinding.ServiceName, svcBinding.Provider), []byte{})
}
// GetOwnerServiceBindings retrieves the service bindings with the specified service name and owner
func (k Keeper) GetOwnerServiceBindings(ctx sdk.Context, owner sdk.AccAddress, serviceName string) []*types.ServiceBinding {
store := ctx.KVStore(k.storeKey)
bindings := make([]*types.ServiceBinding, 0)
iterator := sdk.KVStorePrefixIterator(store, types.GetOwnerBindingsSubspace(owner, serviceName))
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
bindingKey := iterator.Key()[sdk.AddrLen+1:]
sepIndex := bytes.Index(bindingKey, types.EmptyByte)
serviceName := string(bindingKey[0:sepIndex])
provider := sdk.AccAddress(bindingKey[sepIndex+1:])
if binding, found := k.GetServiceBinding(ctx, serviceName, provider); found {
bindings = append(bindings, &binding)
}
}
return bindings
}
// SetOwner sets an owner for the specified provider
func (k Keeper) SetOwner(ctx sdk.Context, provider, owner sdk.AccAddress) {
store := ctx.KVStore(k.storeKey)
bz := k.cdc.MustMarshalBinaryBare(&gogotypes.BytesValue{Value: owner})
store.Set(types.GetOwnerKey(provider), bz)
}
// GetOwner gets the owner for the specified provider
func (k Keeper) GetOwner(ctx sdk.Context, provider sdk.AccAddress) (sdk.AccAddress, bool) {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.GetOwnerKey(provider))
if bz == nil {
return nil, false
}
addr := gogotypes.BytesValue{}
k.cdc.MustUnmarshalBinaryBare(bz, &addr)
return addr.GetValue(), true
}
// SetOwnerProvider sets the provider with the owner
func (k Keeper) SetOwnerProvider(ctx sdk.Context, owner, provider sdk.AccAddress) {
store := ctx.KVStore(k.storeKey)
store.Set(types.GetOwnerProviderKey(owner, provider), []byte{})
}
// OwnerProvidersIterator returns an iterator for all providers of the specified owner
func (k Keeper) OwnerProvidersIterator(ctx sdk.Context, owner sdk.AccAddress) sdk.Iterator {
store := ctx.KVStore(k.storeKey)
return sdk.KVStorePrefixIterator(store, types.GetOwnerProvidersSubspace(owner))
}
// ParsePricing parses the given string to Pricing
func (k Keeper) ParsePricing(ctx sdk.Context, pricing string) (p types.Pricing, err error) {
var rawPricing types.RawPricing
if err := json.Unmarshal([]byte(pricing), &rawPricing); err != nil {
return p, sdkerrors.Wrapf(types.ErrInvalidPricing, "failed to unmarshal the pricing: %s", err.Error())
}
token, err := sdk.ParseDecCoin(rawPricing.Price)
if err != nil {
tokenPrice, err := sdk.ParseCoin(rawPricing.Price)
if err != nil {
return p, sdkerrors.Wrapf(types.ErrInvalidPricing, "invalid price: %s", err.Error())
}
token = sdk.NewDecCoinFromCoin(tokenPrice)
}
ft, err := k.tokenKeeper.GetToken(ctx, token.Denom)
if err != nil {
return p, sdkerrors.Wrapf(types.ErrInvalidPricing, "invalid price: %s", err.Error())
}
priceCoin, err := ft.ToMinCoin(token)
if err != nil {
return p, sdkerrors.Wrapf(types.ErrInvalidPricing, "invalid price: %s", err.Error())
}
if priceCoin.IsZero() {
p.Price = sdk.Coins{sdk.NewCoin(priceCoin.Denom, sdk.NewInt(0))}
} else {
p.Price = sdk.NewCoins(priceCoin)
}
p.PromotionsByTime = rawPricing.PromotionsByTime
p.PromotionsByVolume = rawPricing.PromotionsByVolume
return p, nil
}
// SetPricing sets the pricing for the specified service binding
func (k Keeper) SetPricing(
ctx sdk.Context,
serviceName string,
provider sdk.AccAddress,
pricing types.Pricing,
) {
store := ctx.KVStore(k.storeKey)
bz := k.cdc.MustMarshalBinaryBare(&pricing)
store.Set(types.GetPricingKey(serviceName, provider), bz)
}
// GetPricing retrieves the pricing of the specified service binding
func (k Keeper) GetPricing(ctx sdk.Context, serviceName string, provider sdk.AccAddress) (pricing types.Pricing) {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.GetPricingKey(serviceName, provider))
if bz == nil {
return
}
k.cdc.MustUnmarshalBinaryBare(bz, &pricing)
return pricing
}
// SetWithdrawAddress sets the withdrawal address for the specified owner
func (k Keeper) SetWithdrawAddress(ctx sdk.Context, owner, withdrawAddr sdk.AccAddress) {
store := ctx.KVStore(k.storeKey)
store.Set(types.GetWithdrawAddrKey(owner), withdrawAddr.Bytes())
}
// GetWithdrawAddress gets the withdrawal address of the specified owner
func (k Keeper) GetWithdrawAddress(ctx sdk.Context, owner sdk.AccAddress) sdk.AccAddress {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.GetWithdrawAddrKey(owner))
if bz == nil {
return owner
}
return sdk.AccAddress(bz)
}
// IterateWithdrawAddresses iterates through all withdrawal addresses
func (k Keeper) IterateWithdrawAddresses(
ctx sdk.Context,
op func(owner sdk.AccAddress, withdrawAddress sdk.AccAddress) (stop bool),
) {
store := ctx.KVStore(k.storeKey)
iterator := sdk.KVStorePrefixIterator(store, types.WithdrawAddrKey)
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
ownerAddress := sdk.AccAddress(iterator.Key()[1:])
withdrawAddress := sdk.AccAddress(iterator.Value())
if stop := op(ownerAddress, withdrawAddress); stop {
break
}
}
}
// ServiceBindingsIterator returns an iterator for all bindings of the specified service definition
func (k Keeper) ServiceBindingsIterator(ctx sdk.Context, serviceName string) sdk.Iterator {
store := ctx.KVStore(k.storeKey)
return sdk.KVStorePrefixIterator(store, types.GetBindingsSubspace(serviceName))
}
// AllServiceBindingsIterator returns an iterator for all bindings
func (k Keeper) AllServiceBindingsIterator(ctx sdk.Context) sdk.Iterator {
store := ctx.KVStore(k.storeKey)
return sdk.KVStorePrefixIterator(store, types.ServiceBindingKey)
}
func (k Keeper) IterateServiceBindings(
ctx sdk.Context,
op func(binding types.ServiceBinding) (stop bool),
) {
store := ctx.KVStore(k.storeKey)
iterator := sdk.KVStorePrefixIterator(store, types.ServiceBindingKey)
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
var binding types.ServiceBinding
k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &binding)
if stop := op(binding); stop {
break
}
}
}
// getMinDeposit gets the minimum deposit required for the service binding
func (k Keeper) getMinDeposit(ctx sdk.Context, pricing types.Pricing) sdk.Coins {
minDepositMultiple := sdk.NewInt(k.MinDepositMultiple(ctx))
minDepositParam := k.MinDeposit(ctx)
baseDenom := k.BaseDenom(ctx)
price := pricing.Price.AmountOf(baseDenom)
// minimum deposit = max(price * minDepositMultiple, minDepositParam)
minDeposit := sdk.NewCoins(sdk.NewCoin(baseDenom, price.Mul(minDepositMultiple)))
if minDeposit.IsAllLT(minDepositParam) {
minDeposit = minDepositParam
}
return minDeposit
}
// validateDeposit validates the given deposit
func (k Keeper) validateDeposit(ctx sdk.Context, deposit sdk.Coins) error {
baseDenom := k.BaseDenom(ctx)
token, err := k.tokenKeeper.GetToken(ctx, deposit[0].Denom)
if err != nil {
return sdkerrors.Wrap(types.ErrInvalidPricing, err.Error())
}
if len(deposit) != 1 || token.GetMinUnit() != baseDenom {
return sdkerrors.Wrapf(types.ErrInvalidDeposit, "deposit only accepts %s", baseDenom)
}
return nil
}
|
package logrusOVH
import (
"fmt"
"github.com/sirupsen/logrus"
)
// Protocol define available transfert proto
type Protocol uint8
// Endpoint OVH logs endpoint
var Endpoint string
const (
// GELFUDP for Gelf + UDP
GELFUDP Protocol = 1 + iota
// GELFTCP for Gelf + TCP
GELFTCP
// GELFTLS for Gelf + TLS
GELFTLS
// CAPNPROTOUDP for Cap'n proto + UDP
CAPNPROTOUDP
// CAPNPROTOTCP for Cap'n proto + TCP
CAPNPROTOTCP
// CAPNPROTOTLS for Cap'n proto + TLS
CAPNPROTOTLS
)
// reverse map
func (p Protocol) String() string {
switch p {
case GELFTCP:
return "GELFTCP"
case GELFUDP:
return "GELFUDP"
case GELFTLS:
return "GELFTLS"
case CAPNPROTOUDP:
return "CAPNPROTOUDP"
case CAPNPROTOTCP:
return "CAPNPROTOTCP"
case CAPNPROTOTLS:
return "CAPNPROTOTLS"
default:
return "UNKNOW"
}
}
// CompressAlgo the compression algorithm used
type CompressAlgo uint8
const (
// COMPRESSNONE No compression
COMPRESSNONE = 1 + iota
// COMPRESSGZIP GZIP compression for GELF
COMPRESSGZIP
// COMPRESSZLIB ZLIB compression for GELF
COMPRESSZLIB
// COMPRESSPACKNPPACKED compression for cap'n proto
COMPRESSPACKNPPACKED
// COMPRESSDEFLATE DEFLATE compression for GELF
COMPRESSDEFLATE
)
func (c CompressAlgo) String() string {
switch c {
case COMPRESSNONE:
return "no compression"
case COMPRESSGZIP:
return "GZIP"
case COMPRESSZLIB:
return "COMPRESSZLIB"
case COMPRESSPACKNPPACKED:
return "COMPRESSPACKNPPACKED"
case COMPRESSDEFLATE:
return "COMPRESSDEFLATE"
default:
return "unknow"
}
}
const (
// UDPCHUNKMAXSIZE max chunk size (fragmented)
// UDPCHUNKMAXSIZE = 8192
// UDP_CHUNK_MAX_SIZE_NOFRAG not fragmented
// UDP_CHUNK_MAX_SIZE_NOFRAG = 1472
// UDP_CHUNK_MAX_SIZE_FRAG max chunk size (fragmented)
UDP_CHUNK_MAX_SIZE_FRAG = 8192
//UDP_CHUNK_MAX_SIZE = 8164 // 8192 - (IP header) - (UDP header)
//UDP_CHUNK_MAX_DATA_SIZE = 8144 // UDP_CHUNK_MAX_SIZE - ( 2 + 8 + 1 + 1)
//UDP_CHUNK_MAX_SIZE = 1420
// UDP_CHUNK_MAX_SIZE chunk max size
UDP_CHUNK_MAX_SIZE = 1420
// UDP_CHUNK_MAX_DATA_SIZE chunk data max size
UDP_CHUNK_MAX_DATA_SIZE = 1348 // UDP_CHUNK_MAX_SIZE - ( 2 + 8 + 1 + 1)
)
var (
// GELF_CHUNK_MAGIC_BYTES "magic bytes" for GELF chunk headers
GELF_CHUNK_MAGIC_BYTES = []byte{0x1e, 0x0f}
)
// OvhHook represents an OVH PAAS Log
type OvhHook struct {
async bool
endpoint string
token string
levels []logrus.Level
proto Protocol
compression CompressAlgo
}
// NewOvhHook returns a sync Hook
func NewOvhHook(endpoint, ovhToken string, proto Protocol) (*OvhHook, error) {
return newOvhHook(endpoint, ovhToken, proto, false)
}
// NewAsyncOvhHook returns a async hook
func NewAsyncOvhHook(endpoint, ovhToken string, proto Protocol) (*OvhHook, error) {
return newOvhHook(endpoint, ovhToken, proto, true)
}
// generic (ooops)
func newOvhHook(endpoint, ovhToken string, proto Protocol, async bool) (*OvhHook, error) {
Endpoint = endpoint
hook := OvhHook{
async: async,
token: ovhToken,
proto: proto,
levels: logrus.AllLevels,
compression: COMPRESSNONE,
}
return &hook, nil
}
// SetCompression set compression algorithm
func (hook *OvhHook) SetCompression(algo CompressAlgo) error {
if algo != COMPRESSNONE && (hook.proto == GELFTCP || hook.proto == GELFTLS) {
return fmt.Errorf("compression is not available with %v", hook.proto)
}
hook.compression = algo
return nil
}
// Fire is called when a log event is fired.
func (hook *OvhHook) Fire(logrusEntry *logrus.Entry) error {
/* Convert Logrus log level to Syslog levels
we need to clone entry
Logrus | Syslog
0 - panic | 0 - emergency
1 - fatal | 2 - critical
2 - error | 3 - error
3 - warn | 4 - warn
4 - info | 6 - info
5 - debug | 7 - debug
*/
lentry := logrus.Entry{
Logger: logrusEntry.Logger,
Data: logrusEntry.Data,
Time: logrusEntry.Time,
Level: logrusEntry.Level,
Message: logrusEntry.Message,
Buffer: logrusEntry.Buffer,
}
switch lentry.Level {
case logrus.FatalLevel:
lentry.Level = 2
case logrus.ErrorLevel:
lentry.Level = 3
case logrus.WarnLevel:
lentry.Level = 4
case logrus.InfoLevel:
lentry.Level = 6
case logrus.DebugLevel:
lentry.Level = 7
}
e := Entry{
entry: &lentry,
ovhToken: hook.token,
}
if hook.async {
go e.send(hook.proto, hook.compression)
return nil
}
return e.send(hook.proto, hook.compression)
}
// Levels returns the available logging levels (interface impl)
func (hook *OvhHook) Levels() []logrus.Level {
return hook.levels
}
|
package main
func mergeTwoLists2(l1 *ListNode, l2 *ListNode) *ListNode {
dummy := &ListNode{}
cur := dummy
for l1 != nil && l2 != nil {
if l1.Val < l2.Val {
cur.Next = l1
cur = cur.Next
l1 = l1.Next
} else {
cur.Next = l2
cur = cur.Next
l2 = l2.Next
}
}
if l1 == nil {
cur.Next = l2
} else {
cur.Next = l1
}
return dummy.Next
} |
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package aws
import (
"context"
"github.com/aws/aws-sdk-go-v2/service/acm"
"github.com/aws/aws-sdk-go-v2/service/acm/types"
"github.com/mattermost/mattermost-cloud/model"
)
// ACMAPI represents the series of calls we require from the AWS SDK v2 ACM Client
type ACMAPI interface {
ListCertificates(ctx context.Context, params *acm.ListCertificatesInput, optFns ...func(*acm.Options)) (*acm.ListCertificatesOutput, error)
ListTagsForCertificate(ctx context.Context, params *acm.ListTagsForCertificateInput, optFns ...func(*acm.Options)) (*acm.ListTagsForCertificateOutput, error)
}
// newCertificateFromACMCertificateSummary converts an ACM's certificate summary into our own certificate type
func newCertificateFromACMCertificateSummary(c types.CertificateSummary) *model.Certificate {
return &model.Certificate{
ARN: c.CertificateArn,
}
}
|
package set3
import (
"bufio"
"bytes"
"cryptopals/utils"
"encoding/base64"
"math"
"os"
"testing"
)
func TestBreakFixedNonceCTRStatistically(t *testing.T) {
file, err := os.Open("./20.txt")
if err != nil {
t.Error(err)
}
defer file.Close()
var minLength int
var plainTexts [][]byte
minLength = math.MaxInt32
scanner := bufio.NewScanner(file)
for scanner.Scan() {
plainText, err := base64.StdEncoding.DecodeString(scanner.Text())
if err != nil {
t.Error("Failed to base64 decode string", err)
}
curLength := len(plainText)
if curLength < minLength {
minLength = curLength
}
plainTexts = append(plainTexts, plainText)
}
if err := scanner.Err(); err != nil {
t.Error(err)
}
key, err := utils.GenerateRandomAesKey()
if err != nil {
t.Error("Failed to generate random AES key:", err)
}
nonce := []byte{0, 0, 0, 0, 0, 0, 0, 0}
cipherTexts := getCTRCipherTexts(plainTexts, key, nonce)
truncatedCipherTexts := truncateCipherTextsToCommonLength(cipherTexts, minLength)
decrypted := breakFixedNonceCTRStatistically(truncatedCipherTexts, minLength)
expected, err := os.Open("./20.out")
if err != nil {
t.Error(err)
}
defer expected.Close()
scanner = bufio.NewScanner(expected)
i := 0
for scanner.Scan() {
expectedText, err := base64.StdEncoding.DecodeString(scanner.Text())
if err != nil {
t.Error("Failed to base64 decode expected string", err)
}
if !bytes.Equal(expectedText, decrypted[i]) {
t.Error("Failed to decrypt line ", i)
}
i++
}
if err := scanner.Err(); err != nil {
t.Error(err)
}
}
|
package main
import (
"sort"
"fmt"
)
func main() {
nums := []int{2,4,3,5,6,1,2,3}
sort.Ints(nums[:5])
fmt.Println(nums)
}
|
package bank
import (
"fmt"
"time"
)
func ProcessPayment(fromAccount int, toAccount int, amount int) error {
fmt.Printf("Transfered %d from %d to %d at %v via bank transfer", amount, fromAccount, toAccount, time.Now().String())
return nil
} |
package logger
var glog *logger
func init() {
glog = NewStdOut("", Lshortfile|Ltime, DEBUG)
glog.depth = 3
}
func SetPrefix(prefix string) {
glog.SetPrefix(prefix)
}
func SetFlags(flags int) {
glog.SetFlags(flags)
}
func SetLevel(level Level) {
glog.SetLevel(level)
}
func Trace(v ...interface{}) {
glog.Trace(v...)
}
func Debug(v ...interface{}) {
glog.Debug(v...)
}
func Info(v ...interface{}) {
glog.Info(v...)
}
func Warn(v ...interface{}) {
glog.Warn(v...)
glog.Flush()
}
// TODO: 优雅使用panic
func Panic(v ...interface{}) {
glog.Warn(v...)
glog.FlushAll()
panic(v)
}
func Flush() {
glog.Flush()
}
func FlushAll() {
glog.FlushAll()
}
|
package core
import (
"encoding/base64"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"github.com/google/uuid"
)
func IsLinux() bool {
return runtime.GOOS == "linux"
}
func ExecCmd(name string, arg ...string) ([]byte, error) {
cmd := exec.Command(name, arg...)
out, err := cmd.CombinedOutput()
if err != nil {
return out, err
}
return out, nil
}
func Failed(err error, out []byte) {
if err != nil {
fmt.Println(string(out))
log.Fatal(err)
}
}
func CheckServiceExists(name string) bool {
out, _ := ExecCmd("service", name, "status")
isNotFound := strings.Contains(string(out), "could not be found.")
if isNotFound {
return false
}
return !strings.Contains(string(out), "unrecognized service")
}
func IsServiceRunning(name string) bool {
out, _ := ExecCmd("service", name, "status")
return strings.Contains(string(out), "Active: active (running)")
}
func StartService(name string) {
_, err := ExecCmd("service", name, "start")
Failed(err, nil)
}
func RestartService(name string) {
_, err := ExecCmd("service", name, "restart")
Failed(err, nil)
}
func End(message string) {
fmt.Println(message)
os.Exit(0)
}
func GeneratePassword(l uint) string {
id := uuid.New().String()
if l < 36 {
return id[:l]
}
return id
}
func GetPublicIp() (string, error) {
defaultIp := "0.0.0.0"
resp, err := http.Get("http://httpbin.org/ip")
if err != nil {
return defaultIp, err
}
defer resp.Body.Close()
var res IpRes
err = json.NewDecoder(resp.Body).Decode(&res)
if err != nil {
return defaultIp, err
}
tmpArr := strings.Split(res.Origin, ",")
return tmpArr[0], nil
}
func GenerateSSLink(inbound Inbound, ip string) string {
if inbound.Protocol != "shadowsocks" {
return ""
}
auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", inbound.Settings.Method, inbound.Settings.Password)))
link := fmt.Sprintf("ss://%s@%s:%d", auth, ip, inbound.Port)
return link
}
|
package preload
import (
"bytes"
"fmt"
"net/http"
)
type Source struct {
// HTTP(S) URI of the list blob.
ListURI string
// HTTP(S) URI of the ASCII-armored PGP signature taht is valid for data fetched
// from ListURI.
SigURI string
// ASCII-armored PGP key to use when verifying the signature fetched from
// SigURI.
SigKey string
}
// PGPError is returned when Download fails due to the problem with PGP
// signature verification.
type PGPError struct {
Err error
}
func (err PGPError) Error() string {
return "mtasts: cannot verify the PGP signature: " + err.Err.Error()
}
func (err PGPError) Unwrap() error {
return err.Err
}
// Download downloads the list and verifies the PGP signature for it using
// source URIs provided in the Source structure.
//
// SigURI can be set to an empty string to disable PGP verification.
func Download(h *http.Client, s Source) (*List, error) {
resp, err := h.Get(s.ListURI)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("mtasts: unexpected HTTP status: %d", resp.StatusCode)
}
defer resp.Body.Close()
if resp.Header.Get("Content-Type") != "application/json" {
return nil, fmt.Errorf("mtasts: unexpected Content-Type: %s", resp.Header.Get("Content-Type"))
}
// Dump the body into RAM, we need it multiple times.
buf := bytes.NewBuffer(make([]byte, 0, resp.ContentLength))
if _, err := buf.ReadFrom(resp.Body); err != nil {
return nil, err
}
if s.SigURI != "" {
if s.SigKey == "" {
return nil, PGPError{Err: fmt.Errorf("empty SigKey")}
}
sigResp, err := h.Get(s.SigURI)
if err != nil {
return nil, PGPError{Err: err}
}
if sigResp.StatusCode != 200 {
return nil, PGPError{Err: fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode)}
}
defer sigResp.Body.Close()
if err := verifyPGP(s.SigKey, sigResp.Body, bytes.NewReader(buf.Bytes())); err != nil {
return nil, err
}
}
return Read(bytes.NewReader(buf.Bytes()))
}
|
package gbc
const (
A = iota
B
C
D
E
H
L
F
)
const (
AF = iota
BC
DE
HL
HLI
HLD
SP
PC
)
const (
flagZ, flagN, flagH, flagC = 7, 6, 5, 4
)
// Register Z80
type Register struct {
R [8]byte
SP uint16
PC uint16
IME bool
}
func (r *Register) R16(i int) uint16 {
switch i {
case AF:
return r.AF()
case BC:
return r.BC()
case DE:
return r.DE()
case HL:
return r.HL()
case HLD:
hl := r.HL()
r.setHL(hl - 1)
return hl
case HLI:
hl := r.HL()
r.setHL(hl + 1)
return hl
case SP:
return r.SP
case PC:
return r.PC
}
panic("invalid register16")
}
func (r *Register) setR16(i int, val uint16) {
switch i {
case AF:
r.setAF(val)
case BC:
r.setBC(val)
case DE:
r.setDE(val)
case HL:
r.setHL(val)
case SP:
r.SP = val
case PC:
r.PC = val
}
}
func (r *Register) AF() uint16 {
return (uint16(r.R[A]) << 8) | uint16(r.R[F])
}
func (r *Register) setAF(value uint16) {
r.R[A], r.R[F] = byte(value>>8), byte(value)
}
func (r *Register) BC() uint16 {
return (uint16(r.R[B]) << 8) | uint16(r.R[C])
}
func (r *Register) setBC(value uint16) {
r.R[B], r.R[C] = byte(value>>8), byte(value)
}
func (r *Register) DE() uint16 {
return (uint16(r.R[D]) << 8) | uint16(r.R[E])
}
func (r *Register) setDE(value uint16) {
r.R[D], r.R[E] = byte(value>>8), byte(value)
}
func (r *Register) HL() uint16 {
return (uint16(r.R[H]) << 8) | uint16(r.R[L])
}
func (r *Register) setHL(value uint16) {
r.R[H], r.R[L] = byte(value>>8), byte(value)
}
// flag
func subC(dst, src byte) bool { return dst < uint8(dst-src) }
func (g *GBC) f(idx int) bool {
return g.Reg.R[F]&(1<<idx) != 0
}
func (g *GBC) setF(idx int, flag bool) {
if flag {
g.Reg.R[F] |= (1 << idx)
return
}
g.Reg.R[F] &= ^(1 << idx)
}
func (g *GBC) setNH(n, h bool) {
g.setF(flagN, n)
g.setF(flagH, h)
}
func (g *GBC) setZNH(z, n, h bool) {
g.setF(flagZ, z)
g.setNH(n, h)
}
func (g *GBC) setNHC(n, h, c bool) {
g.setNH(n, h)
g.setF(flagC, c)
}
func (g *GBC) setZNHC(z, n, h, c bool) {
g.setZNH(z, n, h)
g.setF(flagC, c)
}
|
package main
func numIslands2(grid [][]byte) int {
if len(grid) == 0 {
return 0
}
row, col := len(grid), len(grid[0])
var count int
for x := 0; x < row; x++ {
for y := 0; y < col; y++ {
if grid[x][y] == '1' {
count++
dfs(x, y, grid)
}
}
}
return count
}
func dfs(x, y int, grid [][]byte) {
if x < 0 || y < 0 || x >= len(grid) || y >= len(grid[0]) || grid[x][y] == '0' {
return
}
grid[x][y] = '0'
dfs(x-1, y, grid)
dfs(x+1, y, grid)
dfs(x, y-1, grid)
dfs(x, y+1, grid)
}
func numIslands(grid [][]byte) int {
if len(grid) < 1 {
return 0
}
var (
n int
m int
ret = 0
)
n = len(grid)
m = len(grid[0])
var dfsMarking func(x, y int)
dfsMarking = func(x, y int) {
if x < n && x > 0 && y < m && y > 0 && grid[x][y] == '1' {
grid[x][y] = '0'
dfsMarking(x, y+1)
dfsMarking(x+1, y)
dfsMarking(x, y-1)
dfsMarking(x-1, y)
}
}
for i := 0; i < n; i++ {
for j := 0; j < m; j++ {
if grid[i][j] == '1' {
ret += 1
dfsMarking(i, j)
}
}
}
return ret
}
|
package Activity
import (
"encoding/json"
"fmt"
"os"
)
type User struct {
TipeID string
NoID string
}
func CariLoker (loker []string){
var prompt int
fmt.Printf("Cari loker berdasarkan? [1] Tipe ID, [2] No ID : ")
fmt.Scan(&prompt)
if prompt == 1 {
CariTipeID(loker)
}else if prompt == 2 {
CariNoID(loker)
}else{
os.Exit(1)
}
}
func CariTipeID (loker []string){
var prompt string
var dataID User
var data int = 1
fmt.Printf("Cari loker berdasarkan Tipe ID : ")
fmt.Scan(&prompt)
for i := 0; i <= len(loker)-1; i++ {
var jsonData = []byte(loker[i])
json.Unmarshal(jsonData, &dataID)
if prompt == dataID.TipeID {
fmt.Println(">>", prompt, "anda ada di dalam loker nomor", i+1)
break
}else {
data = 0
}
}
if data == 0{
fmt.Println(">> Data tidak di temukan")
}
CekStatus(loker)
}
func CariNoID (loker []string){
var prompt string
var dataID User
var data int = 1
fmt.Printf("Cari loker berdasarkan No ID : ")
fmt.Scan(&prompt)
for i := 0; i <= len(loker)-1; i++ {
var jsonData = []byte(loker[i])
json.Unmarshal(jsonData, &dataID)
if prompt == dataID.NoID {
fmt.Println(">>", dataID.TipeID, "anda ada di dalam loker nomor", i+1)
break
}else {
data = 0
}
}
if data == 0{
fmt.Println(">> Data tidak di temukan")
}
CekStatus(loker)
} |
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package liveness
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
)
func TestShouldReplaceLiveness(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
toMembershipStatus := func(membership string) livenesspb.MembershipStatus {
switch membership {
case "active":
return livenesspb.MembershipStatus_ACTIVE
case "decommissioning":
return livenesspb.MembershipStatus_DECOMMISSIONING
case "decommissioned":
return livenesspb.MembershipStatus_DECOMMISSIONED
default:
err := fmt.Sprintf("unexpected membership: %s", membership)
panic(err)
}
}
l := func(epo int64, expiration hlc.Timestamp, draining bool, membership string) Record {
liveness := livenesspb.Liveness{
Epoch: epo,
Expiration: expiration.ToLegacyTimestamp(),
Draining: draining,
Membership: toMembershipStatus(membership),
}
raw, err := protoutil.Marshal(&liveness)
if err != nil {
t.Fatal(err)
}
return Record{
Liveness: liveness,
raw: raw,
}
}
const (
no = false
yes = true
)
now := hlc.Timestamp{WallTime: 12345}
for _, test := range []struct {
old, new Record
exp bool
}{
{
// Epoch update only.
l(1, hlc.Timestamp{}, false, "active"),
l(2, hlc.Timestamp{}, false, "active"),
yes,
},
{
// No Epoch update, but Expiration update.
l(1, now, false, "active"),
l(1, now.Add(0, 1), false, "active"),
yes,
},
{
// No update.
l(1, now, false, "active"),
l(1, now, false, "active"),
no,
},
{
// Only Decommissioning changes.
l(1, now, false, "active"),
l(1, now, false, "decommissioning"),
yes,
},
{
// Only Decommissioning changes.
l(1, now, false, "decommissioned"),
l(1, now, false, "decommissioning"),
yes,
},
{
// Only Draining changes.
l(1, now, false, "active"),
l(1, now, true, "active"),
yes,
},
{
// Decommissioning changes, but Epoch moves backwards.
l(10, now, true, "decommissioning"),
l(9, now, true, "active"),
no,
},
{
// Draining changes, but Expiration moves backwards.
l(10, now, false, "active"),
l(10, now.Add(-1, 0), true, "active"),
no,
},
{
// Only raw encoding changes.
l(1, now, false, "active"),
func() Record {
r := l(1, now, false, "active")
r.raw = append(r.raw, []byte("different")...)
return r
}(),
yes,
},
} {
t.Run("", func(t *testing.T) {
if act := shouldReplaceLiveness(context.Background(), test.old, test.new); act != test.exp {
t.Errorf("unexpected update: %+v", test)
}
})
}
}
|
package des
import (
"crypto/des"
"CryptoHashCodeClass3/utils"
"crypto/cipher"
)
/**
* 使用秘钥key对明文data进行加密
*/
func DESEnCrypt(data []byte, key []byte) ([]byte, error) {
//三要素:key、data、mode
//DES:数据加密标准算法 Data Encryption Stardard
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
//对明文进行尾部填充
originText := utils.PKCS5EndPadding(data, block.BlockSize())
//mode
blockMode := cipher.NewCBCEncrypter(block, key)
cipherText := make([]byte, len(originText))
blockMode.CryptBlocks(cipherText, originText)
return cipherText, nil
}
/**
* 使用des算法和秘钥key对密文进行解密,并去除尾部填充
*/
func DESDeCrypt(data []byte, key []byte) ([]byte, error) {
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
//mode实例化
mode := cipher.NewCBCDecrypter(block, key)
//原始的,最初的: original
originalText := make([]byte, len(data))
mode.CryptBlocks(originalText, data)
//对解密后的明文进行尾部填充内容去除
originalText = utils.ClearPKCS5Padding(originalText, block.BlockSize())
return originalText, nil
}
|
/*
Package bootstrap implements the capability to connect to an existing and online
Tinzenite peer network.
TODO: add encryption bootstrap capabilities
*/
package bootstrap
import (
"github.com/tinzenite/channel"
"github.com/tinzenite/shared"
)
/*
Create returns a struct that will allow to bootstrap to an existing Tinzenite
network. To actually start bootstrapping call Bootstrap.Start(address).
Path: the absolute path to the directory. localPeerName: the user defined name
of this peer. trusted: whether this should be a trusted peer or an encrypted
one. f: the callback to call once the bootstrap has successfully run.
*/
func Create(path, localPeerName string, trusted bool, f Success) (*Bootstrap, error) {
if shared.IsTinzenite(path) {
return nil, shared.ErrIsTinzenite
}
// build structure
var err error
if trusted {
err = shared.MakeTinzeniteDir(path)
} else {
err = shared.MakeEncryptedDir(path)
}
// creation of structure error
if err != nil {
return nil, err
}
// create object
boot := &Bootstrap{
path: path,
onDone: f}
boot.cInterface = createChanInterface(boot)
channel, err := channel.Create(localPeerName, nil, boot.cInterface)
if err != nil {
return nil, err
}
boot.channel = channel
// get address for peer
address, err := boot.channel.Address()
if err != nil {
return nil, err
}
// make peer (at correct location!)
peer, err := shared.CreatePeer(localPeerName, address, trusted)
if err != nil {
return nil, err
}
boot.peer = peer
// bg stuff
boot.wg.Add(1)
boot.stop = make(chan bool, 1)
go boot.run()
return boot, nil
}
/*
Load tries to load the given directory as a bootstrap object, allowing it to
connect to an existing network. To actually start bootstrapping call
Bootstrap.Start(address). NOTE: will fail if already connected to other peers!
*/
func Load(path string, f Success) (*Bootstrap, error) {
// we need to do some logic to detect where we can load stuff from
trusted, err := isLoadable(path)
if err != nil {
return nil, err
}
// create object
boot := &Bootstrap{
path: path,
onDone: f}
boot.cInterface = createChanInterface(boot)
// load self peer from correct location
var toxPeerDump *shared.ToxPeerDump
if trusted {
toxPeerDump, err = shared.LoadToxDumpFrom(path + "/" + shared.STORETOXDUMPDIR)
} else {
toxPeerDump, err = shared.LoadToxDumpFrom(path + "/" + shared.LOCALDIR)
}
if err != nil {
return nil, err
}
boot.peer = toxPeerDump.SelfPeer
channel, err := channel.Create(boot.peer.Name, toxPeerDump.ToxData, boot.cInterface)
if err != nil {
return nil, err
}
boot.channel = channel
// bg stuff
boot.wg.Add(1)
boot.stop = make(chan bool, 1)
go boot.run()
return boot, nil
}
/*
isLoadable returns an error if not loadable. The flag returns whether it is the
dir for a TRUSTED peer (so false if encrypted).
*/
func isLoadable(path string) (bool, error) {
// we check based on available paths
tinPath := path + "/" + shared.TINZENITEDIR + "/" + shared.LOCALDIR
encPath := path + "/" + shared.LOCALDIR
// first check if .tinzenite (so that visible dirs of enc won't cause false detection)
if exists, _ := shared.DirectoryExists(tinPath); exists {
return true, nil
}
// second check if encrypted
if exists, _ := shared.DirectoryExists(encPath); exists {
return false, nil
}
// if neither we're done, so say encrypted but error
return false, shared.ErrNotTinzenite
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"fmt"
"strings"
xprogress "github.com/docker/buildx/util/progress"
cgo "github.com/compose-spec/compose-go/cli"
"github.com/compose-spec/compose-go/loader"
"github.com/compose-spec/compose-go/types"
"github.com/docker/cli/opts"
"github.com/mattn/go-shellwords"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/docker/cli/cli"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/progress"
"github.com/docker/compose/v2/pkg/utils"
)
type runOptions struct {
*composeOptions
Service string
Command []string
environment []string
Detach bool
Remove bool
noTty bool
tty bool
interactive bool
user string
workdir string
entrypoint string
entrypointCmd []string
capAdd opts.ListOpts
capDrop opts.ListOpts
labels []string
volumes []string
publish []string
useAliases bool
servicePorts bool
name string
noDeps bool
ignoreOrphans bool
quietPull bool
}
func (options runOptions) apply(project *types.Project) error {
if options.noDeps {
err := project.ForServices([]string{options.Service}, types.IgnoreDependencies)
if err != nil {
return err
}
}
target, err := project.GetService(options.Service)
if err != nil {
return err
}
target.Tty = !options.noTty
target.StdinOpen = options.interactive
if !options.servicePorts {
target.Ports = []types.ServicePortConfig{}
}
if len(options.publish) > 0 {
target.Ports = []types.ServicePortConfig{}
for _, p := range options.publish {
config, err := types.ParsePortConfig(p)
if err != nil {
return err
}
target.Ports = append(target.Ports, config...)
}
}
if len(options.volumes) > 0 {
for _, v := range options.volumes {
volume, err := loader.ParseVolume(v)
if err != nil {
return err
}
target.Volumes = append(target.Volumes, volume)
}
}
for i, s := range project.Services {
if s.Name == options.Service {
project.Services[i] = target
break
}
}
return nil
}
func runCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *cobra.Command {
options := runOptions{
composeOptions: &composeOptions{
ProjectOptions: p,
},
capAdd: opts.NewListOpts(nil),
capDrop: opts.NewListOpts(nil),
}
createOpts := createOptions{}
buildOpts := buildOptions{
ProjectOptions: p,
}
cmd := &cobra.Command{
Use: "run [OPTIONS] SERVICE [COMMAND] [ARGS...]",
Short: "Run a one-off command on a service.",
Args: cobra.MinimumNArgs(1),
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
options.Service = args[0]
if len(args) > 1 {
options.Command = args[1:]
}
if len(options.publish) > 0 && options.servicePorts {
return fmt.Errorf("--service-ports and --publish are incompatible")
}
if cmd.Flags().Changed("entrypoint") {
command, err := shellwords.Parse(options.entrypoint)
if err != nil {
return err
}
options.entrypointCmd = command
}
if cmd.Flags().Changed("tty") {
if cmd.Flags().Changed("no-TTY") {
return fmt.Errorf("--tty and --no-TTY can't be used together")
} else {
options.noTty = !options.tty
}
}
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
project, err := p.ToProject([]string{options.Service}, cgo.WithResolvedPaths(true), cgo.WithDiscardEnvFile)
if err != nil {
return err
}
if createOpts.quietPull {
buildOpts.Progress = xprogress.PrinterModeQuiet
}
options.ignoreOrphans = utils.StringToBool(project.Environment[ComposeIgnoreOrphans])
return runRun(ctx, backend, project, options, createOpts, buildOpts, streams)
}),
ValidArgsFunction: completeServiceNames(p),
}
flags := cmd.Flags()
flags.BoolVarP(&options.Detach, "detach", "d", false, "Run container in background and print container ID")
flags.StringArrayVarP(&options.environment, "env", "e", []string{}, "Set environment variables")
flags.StringArrayVarP(&options.labels, "label", "l", []string{}, "Add or override a label")
flags.BoolVar(&options.Remove, "rm", false, "Automatically remove the container when it exits")
flags.BoolVarP(&options.noTty, "no-TTY", "T", !streams.Out().IsTerminal(), "Disable pseudo-TTY allocation (default: auto-detected).")
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
flags.StringVarP(&options.user, "user", "u", "", "Run as specified username or uid")
flags.StringVarP(&options.workdir, "workdir", "w", "", "Working directory inside the container")
flags.StringVar(&options.entrypoint, "entrypoint", "", "Override the entrypoint of the image")
flags.Var(&options.capAdd, "cap-add", "Add Linux capabilities")
flags.Var(&options.capDrop, "cap-drop", "Drop Linux capabilities")
flags.BoolVar(&options.noDeps, "no-deps", false, "Don't start linked services.")
flags.StringArrayVarP(&options.volumes, "volume", "v", []string{}, "Bind mount a volume.")
flags.StringArrayVarP(&options.publish, "publish", "p", []string{}, "Publish a container's port(s) to the host.")
flags.BoolVar(&options.useAliases, "use-aliases", false, "Use the service's network useAliases in the network(s) the container connects to.")
flags.BoolVar(&options.servicePorts, "service-ports", false, "Run command with the service's ports enabled and mapped to the host.")
flags.BoolVar(&options.quietPull, "quiet-pull", false, "Pull without printing progress information.")
flags.BoolVar(&createOpts.Build, "build", false, "Build image before starting container.")
flags.BoolVar(&createOpts.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file.")
cmd.Flags().BoolVarP(&options.interactive, "interactive", "i", true, "Keep STDIN open even if not attached.")
cmd.Flags().BoolVarP(&options.tty, "tty", "t", true, "Allocate a pseudo-TTY.")
cmd.Flags().MarkHidden("tty") //nolint:errcheck
flags.SetNormalizeFunc(normalizeRunFlags)
flags.SetInterspersed(false)
return cmd
}
func normalizeRunFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "volumes":
name = "volume"
case "labels":
name = "label"
}
return pflag.NormalizedName(name)
}
func runRun(ctx context.Context, backend api.Service, project *types.Project, options runOptions, createOpts createOptions, buildOpts buildOptions, streams api.Streams) error {
err := options.apply(project)
if err != nil {
return err
}
err = createOpts.Apply(project)
if err != nil {
return err
}
err = progress.Run(ctx, func(ctx context.Context) error {
var buildForDeps *api.BuildOptions
if !createOpts.noBuild {
// allow dependencies needing build to be implicitly selected
bo, err := buildOpts.toAPIBuildOptions(nil)
if err != nil {
return err
}
buildForDeps = &bo
}
return startDependencies(ctx, backend, *project, buildForDeps, options.Service, options.ignoreOrphans)
}, streams.Err())
if err != nil {
return err
}
labels := types.Labels{}
for _, s := range options.labels {
parts := strings.SplitN(s, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("label must be set as KEY=VALUE")
}
labels[parts[0]] = parts[1]
}
var buildForRun *api.BuildOptions
if !createOpts.noBuild {
// dependencies have already been started above, so only the service
// being run might need to be built at this point
bo, err := buildOpts.toAPIBuildOptions([]string{options.Service})
if err != nil {
return err
}
buildForRun = &bo
}
// start container and attach to container streams
runOpts := api.RunOptions{
Build: buildForRun,
Name: options.name,
Service: options.Service,
Command: options.Command,
Detach: options.Detach,
AutoRemove: options.Remove,
Tty: !options.noTty,
Interactive: options.interactive,
WorkingDir: options.workdir,
User: options.user,
CapAdd: options.capAdd.GetAll(),
CapDrop: options.capDrop.GetAll(),
Environment: options.environment,
Entrypoint: options.entrypointCmd,
Labels: labels,
UseNetworkAliases: options.useAliases,
NoDeps: options.noDeps,
Index: 0,
QuietPull: options.quietPull,
}
for i, service := range project.Services {
if service.Name == options.Service {
service.StdinOpen = options.interactive
project.Services[i] = service
}
}
exitCode, err := backend.RunOneOffContainer(ctx, project, runOpts)
if exitCode != 0 {
errMsg := ""
if err != nil {
errMsg = err.Error()
}
return cli.StatusError{StatusCode: exitCode, Status: errMsg}
}
return err
}
func startDependencies(ctx context.Context, backend api.Service, project types.Project, buildOpts *api.BuildOptions, requestedServiceName string, ignoreOrphans bool) error {
dependencies := types.Services{}
var requestedService types.ServiceConfig
for _, service := range project.Services {
if service.Name != requestedServiceName {
dependencies = append(dependencies, service)
} else {
requestedService = service
}
}
project.Services = dependencies
project.DisabledServices = append(project.DisabledServices, requestedService)
err := backend.Create(ctx, &project, api.CreateOptions{
Build: buildOpts,
IgnoreOrphans: ignoreOrphans,
})
if err != nil {
return err
}
if len(dependencies) > 0 {
return backend.Start(ctx, project.Name, api.StartOptions{
Project: &project,
})
}
return nil
}
|
package main
//2383. 赢得比赛需要的最少训练时长
//你正在参加一场比赛,给你两个 正 整数 initialEnergy 和 initialExperience 分别表示你的初始精力和初始经验。
//
//另给你两个下标从 0 开始的整数数组 energy 和 experience,长度均为 n 。
//
//你将会 依次 对上 n 个对手。第 i 个对手的精力和经验分别用 energy[i] 和 experience[i] 表示。当你对上对手时,需要在经验和精力上都 严格 超过对手才能击败他们,然后在可能的情况下继续对上下一个对手。
//
//击败第 i 个对手会使你的经验 增加 experience[i],但会将你的精力 减少 energy[i] 。
//
//在开始比赛前,你可以训练几个小时。每训练一个小时,你可以选择将增加经验增加 1 或者 将精力增加 1 。
//
//返回击败全部 n 个对手需要训练的 最少 小时数目。
//
//
//
//示例 1:
//
//输入:initialEnergy = 5, initialExperience = 3, energy = [1,4,3,2], experience = [2,6,3,1]
//输出:8
//解释:在 6 小时训练后,你可以将精力提高到 11 ,并且再训练 2 个小时将经验提高到 5 。
//按以下顺序与对手比赛:
//- 你的精力与经验都超过第 0 个对手,所以获胜。
//精力变为:11 - 1 = 10 ,经验变为:5 + 2 = 7 。
//- 你的精力与经验都超过第 1 个对手,所以获胜。
//精力变为:10 - 4 = 6 ,经验变为:7 + 6 = 13 。
//- 你的精力与经验都超过第 2 个对手,所以获胜。
//精力变为:6 - 3 = 3 ,经验变为:13 + 3 = 16 。
//- 你的精力与经验都超过第 3 个对手,所以获胜。
//精力变为:3 - 2 = 1 ,经验变为:16 + 1 = 17 。
//在比赛前进行了 8 小时训练,所以返回 8 。
//可以证明不存在更小的答案。
//示例 2:
//
//输入:initialEnergy = 2, initialExperience = 4, energy = [1], experience = [3]
//输出:0
//解释:你不需要额外的精力和经验就可以赢得比赛,所以返回 0 。
//
//
//提示:
//
//n == energy.length == experience.length
//1 <= n <= 100
//1 <= initialEnergy, initialExperience, energy[i], experience[i] <= 100
func minNumberOfHours(initialEnergy int, initialExperience int, energy []int, experience []int) int {
var result, sum int
for _, v := range energy {
sum += v
}
if initialEnergy <= sum {
result += 1 + (sum - initialEnergy)
}
for _, e := range experience {
if initialExperience <= e {
result += 1 + (e - initialExperience)
initialExperience = 2*e + 1
} else {
initialExperience += e
}
}
return result
}
|
package main
import (
"fmt"
"github.com/fsetiawan29/design-pattern/structural/adapter"
)
func main() {
roundHole := adapter.NewRoundHole(5)
roundPeg := adapter.NewRoundPeg(5)
fmt.Printf("%+v\n", roundHole.Fits(roundPeg))
smallSquarePeg := adapter.NewSquarePeg(5)
largeSquarePeg := adapter.NewSquarePeg(10)
// roundHole.Fits(smallSquarePeg) // doesn't work
smallSquarePegAdapter := adapter.NewSquarePegAdapter(smallSquarePeg)
fmt.Printf("%+v\n", roundHole.Fits(smallSquarePegAdapter))
largeSquarePegAdapter := adapter.NewSquarePegAdapter(largeSquarePeg)
fmt.Printf("%+v\n", roundHole.Fits(largeSquarePegAdapter))
}
|
package schema
// CliConfiguration structure represents schema for `atmos.yaml` CLI config
type CliConfiguration struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
Components Components `yaml:"components" json:"components" mapstructure:"components"`
Stacks Stacks `yaml:"stacks" json:"stacks" mapstructure:"stacks"`
Workflows Workflows `yaml:"workflows" json:"workflows" mapstructure:"workflows"`
Logs Logs `yaml:"logs" json:"logs" mapstructure:"logs"`
Commands []Command `yaml:"commands" json:"commands" mapstructure:"commands"`
Integrations Integrations `yaml:"integrations" json:"integrations" mapstructure:"integrations"`
Schemas Schemas `yaml:"schemas" json:"schemas" mapstructure:"schemas"`
Initialized bool `yaml:"initialized" json:"initialized" mapstructure:"initialized"`
StacksBaseAbsolutePath string `yaml:"stacksBaseAbsolutePath" json:"stacksBaseAbsolutePath"`
IncludeStackAbsolutePaths []string `yaml:"includeStackAbsolutePaths" json:"includeStackAbsolutePaths"`
ExcludeStackAbsolutePaths []string `yaml:"excludeStackAbsolutePaths" json:"excludeStackAbsolutePaths"`
TerraformDirAbsolutePath string `yaml:"terraformDirAbsolutePath" json:"terraformDirAbsolutePath"`
HelmfileDirAbsolutePath string `yaml:"helmfileDirAbsolutePath" json:"helmfileDirAbsolutePath"`
StackConfigFilesRelativePaths []string `yaml:"stackConfigFilesRelativePaths" json:"stackConfigFilesRelativePaths"`
StackConfigFilesAbsolutePaths []string `yaml:"stackConfigFilesAbsolutePaths" json:"stackConfigFilesAbsolutePaths"`
StackType string `yaml:"stackType" json:"StackType"`
}
type Terraform struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
ApplyAutoApprove bool `yaml:"apply_auto_approve" json:"apply_auto_approve" mapstructure:"apply_auto_approve"`
DeployRunInit bool `yaml:"deploy_run_init" json:"deploy_run_init" mapstructure:"deploy_run_init"`
InitRunReconfigure bool `yaml:"init_run_reconfigure" json:"init_run_reconfigure" mapstructure:"init_run_reconfigure"`
AutoGenerateBackendFile bool `yaml:"auto_generate_backend_file" json:"auto_generate_backend_file" mapstructure:"auto_generate_backend_file"`
}
type Helmfile struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
UseEKS bool `yaml:"use_eks" json:"use_eks" mapstructure:"use_eks"`
KubeconfigPath string `yaml:"kubeconfig_path" json:"kubeconfig_path" mapstructure:"kubeconfig_path"`
HelmAwsProfilePattern string `yaml:"helm_aws_profile_pattern" json:"helm_aws_profile_pattern" mapstructure:"helm_aws_profile_pattern"`
ClusterNamePattern string `yaml:"cluster_name_pattern" json:"cluster_name_pattern" mapstructure:"cluster_name_pattern"`
}
type Components struct {
Terraform Terraform `yaml:"terraform" json:"terraform" mapstructure:"terraform"`
Helmfile Helmfile `yaml:"helmfile" json:"helmfile" mapstructure:"helmfile"`
}
type Stacks struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
IncludedPaths []string `yaml:"included_paths" json:"included_paths" mapstructure:"included_paths"`
ExcludedPaths []string `yaml:"excluded_paths" json:"excluded_paths" mapstructure:"excluded_paths"`
NamePattern string `yaml:"name_pattern" json:"name_pattern" mapstructure:"name_pattern"`
}
type Workflows struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
}
type Logs struct {
File string `yaml:"file" json:"file" mapstructure:"file"`
Level string `yaml:"level" json:"level" mapstructure:"level"`
}
type Context struct {
Namespace string `yaml:"namespace" json:"namespace" mapstructure:"namespace"`
Tenant string `yaml:"tenant" json:"tenant" mapstructure:"tenant"`
Environment string `yaml:"environment" json:"environment" mapstructure:"environment"`
Stage string `yaml:"stage" json:"stage" mapstructure:"stage"`
Region string `yaml:"region" json:"region" mapstructure:"region"`
Component string `yaml:"component" json:"component" mapstructure:"component"`
BaseComponent string `yaml:"base_component" json:"base_component" mapstructure:"base_component"`
ComponentPath string `yaml:"component_path" json:"component_path" mapstructure:"component_path"`
Workspace string `yaml:"workspace" json:"workspace" mapstructure:"workspace"`
Attributes []string `yaml:"attributes" json:"attributes" mapstructure:"attributes"`
File string `yaml:"file" json:"file" mapstructure:"file"`
Folder string `yaml:"folder" json:"folder" mapstructure:"folder"`
}
type ArgsAndFlagsInfo struct {
AdditionalArgsAndFlags []string
SubCommand string
SubCommand2 string
ComponentFromArg string
GlobalOptions []string
TerraformDir string
HelmfileDir string
ConfigDir string
StacksDir string
WorkflowsDir string
BasePath string
DeployRunInit string
InitRunReconfigure string
AutoGenerateBackendFile string
UseTerraformPlan bool
PlanFile string
DryRun bool
SkipInit bool
NeedHelp bool
JsonSchemaDir string
OpaDir string
CueDir string
RedirectStdErr string
}
type ConfigAndStacksInfo struct {
StackFromArg string
Stack string
StackFile string
ComponentType string
ComponentFromArg string
Component string
ComponentFolderPrefix string
ComponentFolderPrefixReplaced string
BaseComponentPath string
BaseComponent string
FinalComponent string
Command string
SubCommand string
SubCommand2 string
ComponentSection map[string]any
ComponentVarsSection map[any]any
ComponentSettingsSection map[any]any
ComponentEnvSection map[any]any
ComponentEnvList []string
ComponentBackendSection map[any]any
ComponentBackendType string
AdditionalArgsAndFlags []string
GlobalOptions []string
BasePath string
TerraformDir string
HelmfileDir string
ConfigDir string
StacksDir string
WorkflowsDir string
Context Context
ContextPrefix string
DeployRunInit string
InitRunReconfigure string
AutoGenerateBackendFile string
UseTerraformPlan bool
PlanFile string
DryRun bool
SkipInit bool
ComponentInheritanceChain []string
ComponentImportsSection []string
NeedHelp bool
ComponentIsAbstract bool
ComponentMetadataSection map[any]any
TerraformWorkspace string
JsonSchemaDir string
OpaDir string
CueDir string
AtmosCliConfigPath string
AtmosBasePath string
RedirectStdErr string
}
// Workflows
type WorkflowStep struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Command string `yaml:"command" json:"command" mapstructure:"command"`
Stack string `yaml:"stack,omitempty" json:"stack,omitempty" mapstructure:"stack"`
Type string `yaml:"type,omitempty" json:"type,omitempty" mapstructure:"type"`
}
type WorkflowDefinition struct {
Description string `yaml:"description,omitempty" json:"description,omitempty" mapstructure:"description"`
Steps []WorkflowStep `yaml:"steps" json:"steps" mapstructure:"steps"`
Stack string `yaml:"stack,omitempty" json:"stack,omitempty" mapstructure:"stack"`
}
type WorkflowConfig map[string]WorkflowDefinition
type WorkflowFile map[string]WorkflowConfig
// EKS update-kubeconfig
type AwsEksUpdateKubeconfigContext struct {
Component string
Stack string
Profile string
ClusterName string
Kubeconfig string
RoleArn string
DryRun bool
Verbose bool
Alias string
Namespace string
Tenant string
Environment string
Stage string
Region string
}
// Component vendoring (`component.yaml` file)
type VendorComponentSource struct {
Type string `yaml:"type" json:"type" mapstructure:"type"`
Uri string `yaml:"uri" json:"uri" mapstructure:"uri"`
Version string `yaml:"version" json:"version" mapstructure:"version"`
IncludedPaths []string `yaml:"included_paths" json:"included_paths" mapstructure:"included_paths"`
ExcludedPaths []string `yaml:"excluded_paths" json:"excluded_paths" mapstructure:"excluded_paths"`
}
type VendorComponentMixins struct {
Type string `yaml:"type" json:"type" mapstructure:"type"`
Uri string `yaml:"uri" json:"uri" mapstructure:"uri"`
Version string `yaml:"version" json:"version" mapstructure:"version"`
Filename string `yaml:"filename" json:"filename" mapstructure:"filename"`
}
type VendorComponentSpec struct {
Source VendorComponentSource
Mixins []VendorComponentMixins
}
type VendorComponentMetadata struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Description string `yaml:"description" json:"description" mapstructure:"description"`
}
type VendorComponentConfig struct {
ApiVersion string `yaml:"apiVersion" json:"apiVersion" mapstructure:"apiVersion"`
Kind string `yaml:"kind" json:"kind" mapstructure:"kind"`
Metadata VendorComponentMetadata
Spec VendorComponentSpec `yaml:"spec" json:"spec" mapstructure:"spec"`
}
// Custom CLI commands
type Command struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Description string `yaml:"description" json:"description" mapstructure:"description"`
Env []CommandEnv `yaml:"env" json:"env" mapstructure:"env"`
Arguments []CommandArgument `yaml:"arguments" json:"arguments" mapstructure:"arguments"`
Flags []CommandFlag `yaml:"flags" json:"flags" mapstructure:"flags"`
ComponentConfig CommandComponentConfig `yaml:"component_config" json:"component_config" mapstructure:"component_config"`
Steps []string `yaml:"steps" json:"steps" mapstructure:"steps"`
Commands []Command `yaml:"commands" json:"commands" mapstructure:"commands"`
Verbose bool `yaml:"verbose" json:"verbose" mapstructure:"verbose"`
}
type CommandArgument struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Description string `yaml:"description" json:"description" mapstructure:"description"`
}
type CommandFlag struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Shorthand string `yaml:"shorthand" json:"shorthand" mapstructure:"shorthand"`
Type string `yaml:"type" json:"type" mapstructure:"type"`
Description string `yaml:"description" json:"description" mapstructure:"description"`
Usage string `yaml:"usage" json:"usage" mapstructure:"usage"`
Required bool `yaml:"required" json:"required" mapstructure:"required"`
}
type CommandEnv struct {
Key string `yaml:"key" json:"key" mapstructure:"key"`
Value string `yaml:"value" json:"value" mapstructure:"value"`
ValueCommand string `yaml:"valueCommand" json:"valueCommand" mapstructure:"valueCommand"`
}
type CommandComponentConfig struct {
Component string `yaml:"component" json:"component" mapstructure:"component"`
Stack string `yaml:"stack" json:"stack" mapstructure:"stack"`
}
// Integrations
type Integrations struct {
Atlantis Atlantis `yaml:"atlantis" json:"atlantis" mapstructure:"atlantis"`
}
// Atlantis integration
type Atlantis struct {
Path string `yaml:"path" json:"path" mapstructure:"path"`
ConfigTemplates map[string]AtlantisRepoConfig `yaml:"config_templates" json:"config_templates" mapstructure:"config_templates"`
ProjectTemplates map[string]AtlantisProjectConfig `yaml:"project_templates" json:"project_templates" mapstructure:"project_templates"`
WorkflowTemplates map[string]any `yaml:"workflow_templates" json:"workflow_templates" mapstructure:"workflow_templates"`
}
type AtlantisRepoConfig struct {
Version int `yaml:"version" json:"version" mapstructure:"version"`
Automerge bool `yaml:"automerge" json:"automerge" mapstructure:"automerge"`
DeleteSourceBranchOnMerge bool `yaml:"delete_source_branch_on_merge" json:"delete_source_branch_on_merge" mapstructure:"delete_source_branch_on_merge"`
ParallelPlan bool `yaml:"parallel_plan" json:"parallel_plan" mapstructure:"parallel_plan"`
ParallelApply bool `yaml:"parallel_apply" json:"parallel_apply" mapstructure:"parallel_apply"`
AllowedRegexpPrefixes []string `yaml:"allowed_regexp_prefixes" json:"allowed_regexp_prefixes" mapstructure:"allowed_regexp_prefixes"`
}
type AtlantisProjectConfig struct {
Name string `yaml:"name" json:"name" mapstructure:"name"`
Workspace string `yaml:"workspace" json:"workspace" mapstructure:"workspace"`
Workflow string `yaml:"workflow,omitempty" json:"workflow,omitempty" mapstructure:"workflow"`
Dir string `yaml:"dir" json:"dir" mapstructure:"dir"`
TerraformVersion string `yaml:"terraform_version" json:"terraform_version" mapstructure:"terraform_version"`
DeleteSourceBranchOnMerge bool `yaml:"delete_source_branch_on_merge" json:"delete_source_branch_on_merge" mapstructure:"delete_source_branch_on_merge"`
Autoplan AtlantisProjectAutoplanConfig `yaml:"autoplan" json:"autoplan" mapstructure:"autoplan"`
ApplyRequirements []string `yaml:"apply_requirements" json:"apply_requirements" mapstructure:"apply_requirements"`
}
type AtlantisProjectAutoplanConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" mapstructure:"enabled"`
WhenModified []string `yaml:"when_modified" json:"when_modified" mapstructure:"when_modified"`
}
type AtlantisConfigOutput struct {
Version int `yaml:"version" json:"version" mapstructure:"version"`
Automerge bool `yaml:"automerge" json:"automerge" mapstructure:"automerge"`
DeleteSourceBranchOnMerge bool `yaml:"delete_source_branch_on_merge" json:"delete_source_branch_on_merge" mapstructure:"delete_source_branch_on_merge"`
ParallelPlan bool `yaml:"parallel_plan" json:"parallel_plan" mapstructure:"parallel_plan"`
ParallelApply bool `yaml:"parallel_apply" json:"parallel_apply" mapstructure:"parallel_apply"`
AllowedRegexpPrefixes []string `yaml:"allowed_regexp_prefixes" json:"allowed_regexp_prefixes" mapstructure:"allowed_regexp_prefixes"`
Projects []AtlantisProjectConfig `yaml:"projects" json:"projects" mapstructure:"projects"`
Workflows map[string]any `yaml:"workflows,omitempty" json:"workflows,omitempty" mapstructure:"workflows"`
}
// Validation schemas
type JsonSchema struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
}
type Cue struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
}
type Opa struct {
BasePath string `yaml:"base_path" json:"base_path" mapstructure:"base_path"`
}
type Schemas struct {
JsonSchema JsonSchema `yaml:"jsonschema" json:"jsonschema" mapstructure:"jsonschema"`
Cue Cue `yaml:"cue" json:"cue" mapstructure:"cue"`
Opa Opa `yaml:"opa" json:"opa" mapstructure:"opa"`
}
type ValidationItem struct {
SchemaType string `yaml:"schema_type" json:"schema_type" mapstructure:"schema_type"`
SchemaPath string `yaml:"schema_path" json:"schema_path" mapstructure:"schema_path"`
ModulePaths []string `yaml:"module_paths" json:"module_paths" mapstructure:"module_paths"`
Description string `yaml:"description" json:"description" mapstructure:"description"`
Disabled bool `yaml:"disabled" json:"disabled" mapstructure:"disabled"`
Timeout int `yaml:"timeout" json:"timeout" mapstructure:"timeout"`
}
type Validation map[string]ValidationItem
// Affected Atmos components and stacks given two Git commits
type Affected struct {
Component string `yaml:"component" json:"component" mapstructure:"component"`
ComponentType string `yaml:"component_type" json:"component_type" mapstructure:"component_type"`
ComponentPath string `yaml:"component_path" json:"component_path" mapstructure:"component_path"`
Namespace string `yaml:"namespace,omitempty" json:"namespace,omitempty" mapstructure:"namespace"`
Tenant string `yaml:"tenant,omitempty" json:"tenant,omitempty" mapstructure:"tenant"`
Environment string `yaml:"environment,omitempty" json:"environment,omitempty" mapstructure:"environment"`
Stage string `yaml:"stage,omitempty" json:"stage,omitempty" mapstructure:"stage"`
Stack string `yaml:"stack" json:"stack" mapstructure:"stack"`
StackSlug string `yaml:"stack_slug" json:"stack_slug" mapstructure:"stack_slug"`
SpaceliftStack string `yaml:"spacelift_stack,omitempty" json:"spacelift_stack,omitempty" mapstructure:"spacelift_stack"`
AtlantisProject string `yaml:"atlantis_project,omitempty" json:"atlantis_project,omitempty" mapstructure:"atlantis_project"`
Affected string `yaml:"affected" json:"affected" mapstructure:"affected"`
File string `yaml:"file,omitempty" json:"file,omitempty" mapstructure:"file"`
Folder string `yaml:"folder,omitempty" json:"folder,omitempty" mapstructure:"folder"`
}
type BaseComponentConfig struct {
BaseComponentVars map[any]any
BaseComponentSettings map[any]any
BaseComponentEnv map[any]any
FinalBaseComponentName string
BaseComponentCommand string
BaseComponentBackendType string
BaseComponentBackendSection map[any]any
BaseComponentRemoteStateBackendType string
BaseComponentRemoteStateBackendSection map[any]any
ComponentInheritanceChain []string
}
// Stack imports (`import` section)
type StackImport struct {
Path string `yaml:"path" json:"path" mapstructure:"path"`
Context map[string]any `yaml:"context" json:"context" mapstructure:"context"`
}
// Dependencies
type DependsOn map[any]Context
type Dependent struct {
Component string `yaml:"component" json:"component" mapstructure:"component"`
ComponentType string `yaml:"component_type" json:"component_type" mapstructure:"component_type"`
ComponentPath string `yaml:"component_path" json:"component_path" mapstructure:"component_path"`
Namespace string `yaml:"namespace,omitempty" json:"namespace,omitempty" mapstructure:"namespace"`
Tenant string `yaml:"tenant,omitempty" json:"tenant,omitempty" mapstructure:"tenant"`
Environment string `yaml:"environment,omitempty" json:"environment,omitempty" mapstructure:"environment"`
Stage string `yaml:"stage,omitempty" json:"stage,omitempty" mapstructure:"stage"`
Stack string `yaml:"stack" json:"stack" mapstructure:"stack"`
StackSlug string `yaml:"stack_slug" json:"stack_slug" mapstructure:"stack_slug"`
SpaceliftStack string `yaml:"spacelift_stack,omitempty" json:"spacelift_stack,omitempty" mapstructure:"spacelift_stack"`
AtlantisProject string `yaml:"atlantis_project,omitempty" json:"atlantis_project,omitempty" mapstructure:"atlantis_project"`
}
// Settings
type SettingsSpacelift map[any]any
type Settings struct {
DependsOn DependsOn `yaml:"depends_on" json:"depends_on" mapstructure:"depends_on"`
Spacelift SettingsSpacelift `yaml:"spacelift" json:"spacelift" mapstructure:"spacelift"`
}
// ConfigSourcesStackDependency defines schema for sources of config sections
type ConfigSourcesStackDependency struct {
StackFile string `yaml:"stack_file" json:"stack_file" mapstructure:"stack_file"`
StackFileSection string `yaml:"stack_file_section" json:"stack_file_section" mapstructure:"stack_file_section"`
DependencyType string `yaml:"dependency_type" json:"dependency_type" mapstructure:"dependency_type"`
VariableValue any `yaml:"variable_value" json:"variable_value" mapstructure:"variable_value"`
}
type ConfigSourcesStackDependencies []ConfigSourcesStackDependency
type ConfigSourcesItem struct {
FinalValue any `yaml:"final_value" json:"final_value" mapstructure:"final_value"`
Name string `yaml:"name" json:"name" mapstructure:"name"`
StackDependencies ConfigSourcesStackDependencies `yaml:"stack_dependencies" json:"stack_dependencies" mapstructure:"stack_dependencies"`
}
type ConfigSources map[string]map[string]ConfigSourcesItem
|
package obj
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
type ObjParser interface {
Filename() string
Comment(s string)
Vertex(components []float64)
TextureVertex(components []float64)
Normal(components []float64)
Group(names []string)
Face(vertexIds, textureVertexIds, normalIds []int)
MaterialLibrary(filename string)
UseMaterial(materialName string)
}
func Read(objParser ObjParser) error {
file, err := os.Open(objParser.Filename())
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lineTokens := strings.Fields(scanner.Text())
if len(lineTokens) > 0 {
parseObjLine(objParser, lineTokens)
}
}
fmt.Printf(" Loaded: %v\n", objParser.Filename())
return nil
}
// Parse a line by dispatching to the appropriate function in parser
// Assumes at least one token.
func parseObjLine(parser ObjParser, tokens []string) {
switch tokens[0] {
case "#":
parser.Comment(strings.Join(tokens[1:], " "))
case "v":
parser.Vertex(parseFloats(tokens[1:]))
case "vt":
parser.TextureVertex(parseFloats(tokens[1:]))
case "vn":
parser.Normal(parseFloats(tokens[1:]))
case "g":
parser.Group(tokens[1:])
case "f":
parser.Face(parseFaces(tokens[1:]))
case "mtllib":
parser.MaterialLibrary(tokens[1])
case "usemtl":
parser.UseMaterial(tokens[1])
default:
fmt.Printf("Unknown obj parameter: %v\n", tokens[0])
}
}
func parseFaces(tokens []string) (vertexIds, textureVertexIds, normalIds []int) {
vertexIds = make([]int, len(tokens))
textureVertexIds = make([]int, len(tokens))
normalIds = make([]int, len(tokens))
for i := 0; i < len(tokens); i++ {
// Each token is split by '/'
elementTokens := strings.Split(tokens[i], "/")
// Ugh
for j := 0; j < len(elementTokens); j++ {
id, _ := strconv.ParseInt(elementTokens[j], 10, 32)
switch j {
case 0:
vertexIds[i] = int(id)
case 1:
textureVertexIds[i] = int(id)
case 2:
normalIds[i] = int(id)
}
}
}
return vertexIds, textureVertexIds, normalIds
}
func parseFloat(token string) float64 {
value, _ := strconv.ParseFloat(token, 64)
return value
}
func parseFloats(tokens []string) []float64 {
floats := make([]float64, len(tokens))
for i := 0; i < len(tokens); i++ {
floats[i], _ = strconv.ParseFloat(tokens[i], 64)
}
return floats
}
type ObjMaterialParser interface {
Filename() string
Comment(s string)
NewMaterial(materialName string)
Specular(value float64)
IndexOfRefraction(value float64)
Transparency(value float64)
AmbientColour(values []float64)
DiffuseColour(values []float64)
SpecularColour(values []float64)
EmissiveColour(values []float64)
DiffuseTexture(filename string)
}
func ReadMaterial(parser ObjMaterialParser) error {
file, err := os.Open(parser.Filename())
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lineTokens := strings.Fields(strings.TrimSpace(scanner.Text()))
if len(lineTokens) > 0 {
parseMaterialLine(parser, lineTokens)
}
}
fmt.Printf(" Loaded: %v\n", parser.Filename())
return nil
}
// Parse a line by dispatching to the appropriate function in parser
// Assumes at least one token.
func parseMaterialLine(parser ObjMaterialParser, tokens []string) {
switch tokens[0] {
case "#":
parser.Comment(strings.Join(tokens[1:], " "))
case "newmtl":
parser.NewMaterial(tokens[1])
case "Ns":
parser.Specular(parseFloat(tokens[1]))
case "Ni":
parser.IndexOfRefraction(parseFloat(tokens[1]))
case "Tr":
parser.Transparency(parseFloat(tokens[1]))
case "Ka":
parser.AmbientColour(parseFloats(tokens[1:]))
case "Kd":
parser.DiffuseColour(parseFloats(tokens[1:]))
case "Ks":
parser.SpecularColour(parseFloats(tokens[1:]))
case "Ke":
parser.EmissiveColour(parseFloats(tokens[1:]))
case "map_Kd":
parser.DiffuseTexture(tokens[1])
default:
fmt.Printf("Unknown material parameter: %v\n", tokens[0])
}
}
|
package ravendb
import (
"fmt"
"reflect"
"strings"
)
// functionality related to reflection
func isPtrStruct(t reflect.Type) (reflect.Type, bool) {
if t.Kind() == reflect.Ptr && t.Elem() != nil && t.Elem().Kind() == reflect.Struct {
return t, true
}
return nil, false
}
func isPtrMapStringToPtrStruct(tp reflect.Type) (reflect.Type, bool) {
if tp.Kind() != reflect.Ptr {
return nil, false
}
tp = tp.Elem()
if tp.Kind() != reflect.Map {
return nil, false
}
if tp.Key().Kind() != reflect.String {
return nil, false
}
return isPtrStruct(tp.Elem())
}
func isMapStringToPtrStruct(tp reflect.Type) (reflect.Type, bool) {
if tp.Kind() != reflect.Map {
return nil, false
}
if tp.Key().Kind() != reflect.String {
return nil, false
}
return isPtrStruct(tp.Elem())
}
// Go port of com.google.common.base.Defaults to make porting Java easier
func getDefaultValueForType(clazz reflect.Type) interface{} {
rv := reflect.Zero(clazz)
return rv.Interface()
}
// GetFullTypeName returns fully qualified (including package) name of the type,
// after traversing pointers.
// e.g. for struct Foo in main package, the type of Foo and *Foo is main.Foo
func getFullTypeName(v interface{}) string {
rv := reflect.ValueOf(v)
for rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
typ := rv.Type()
return typ.String()
}
// getShortTypeName returns a short (not including package) name of the type,
// after traversing pointers.
// e.g. for struct Foo, the type of Foo and *Foo is "Foo"
// Note: this emulates Java's operator over-loading to support
// DefaultGetCollectionName.
func getShortTypeNameForEntityOrType(v interface{}) string {
if typ, ok := v.(reflect.Type); ok {
return getShortTypeNameForType(typ)
}
return getShortTypeNameForEntity(v)
}
func getShortTypeNameForEntity(v interface{}) string {
rv := reflect.ValueOf(v)
for rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
typ := rv.Type()
return getShortTypeNameForType(typ)
}
func getShortTypeNameForType(typ reflect.Type) string {
// for *Foo and **Foo the name we want to return is Foo
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
return typ.Name()
}
// identity property is field of type string with name ID
func getIdentityProperty(typ reflect.Type) string {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return ""
}
field, ok := typ.FieldByName("ID")
if !ok || field.Type.Kind() != reflect.String {
return ""
}
return "ID"
}
func isTypePrimitive(t reflect.Type) bool {
kind := t.Kind()
switch kind {
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16,
reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8,
reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.String:
return true
case reflect.Ptr:
return false
// TODO: not all of those we should support
case reflect.Array, reflect.Interface, reflect.Map, reflect.Slice, reflect.Struct:
panic("NYI")
}
return false
}
func getStructTypeOfReflectValue(rv reflect.Value) (reflect.Type, bool) {
if rv.Type().Kind() == reflect.Ptr {
rv = rv.Elem()
}
typ := rv.Type()
if typ.Kind() == reflect.Struct {
return typ, true
}
return typ, false
}
func getStructTypeOfValue(v interface{}) (reflect.Type, bool) {
rv := reflect.ValueOf(v)
return getStructTypeOfReflectValue(rv)
}
// if typ is ptr-to-struct, return as is
// if typ is ptr-to-ptr-to-struct, returns ptr-to-struct
// otherwise returns nil
func fixUpStructType(typ reflect.Type) reflect.Type {
if typ.Kind() != reflect.Ptr {
return nil
}
subtype := typ.Elem()
if subtype.Kind() == reflect.Struct {
return typ
}
if subtype.Kind() != reflect.Ptr {
return nil
}
if subtype.Elem().Kind() == reflect.Struct {
return subtype
}
return nil
}
func convertFloat64ToType(v float64, typ reflect.Type) interface{} {
switch typ.Kind() {
case reflect.Float32:
return float32(v)
case reflect.Float64:
return v
case reflect.Int:
return int(v)
case reflect.Int8:
return int8(v)
case reflect.Int16:
return int16(v)
case reflect.Int32:
return int32(v)
case reflect.Int64:
return int64(v)
case reflect.Uint:
return uint(v)
case reflect.Uint8:
return uint8(v)
case reflect.Uint16:
return uint16(v)
case reflect.Uint32:
return uint32(v)
case reflect.Uint64:
return uint64(v)
}
panicIf(true, "don't know how to convert value of type %T to reflect type %s", v, typ.Name())
return int(0)
}
func treeToValue(typ reflect.Type, js interface{}) (interface{}, error) {
// TODO: should also handle primitive types
switch v := js.(type) {
case string:
if typ.Kind() == reflect.String {
return js, nil
}
panicIf(true, "don't know how to convert value of type %T to reflect type %s", js, typ.Name())
case float64:
return convertFloat64ToType(v, typ), nil
case bool:
panicIf(true, "don't know how to convert value of type %T to reflect type %s", js, typ.Name())
case []interface{}:
panicIf(true, "don't know how to convert value of type %T to reflect type %s", js, typ.Name())
case map[string]interface{}:
return makeStructFromJSONMap(typ, v)
}
panicIf(true, "don't know how to convert value of type %v to reflect type %s", js, typ.Name())
return nil, fmt.Errorf("don't know how to convert value of type %v to reflect type %s", js, typ.Name())
}
// get name of struct field for json serialization
// empty string means we should skip this field
func getJSONFieldName(field reflect.StructField) string {
// skip unexported fields
if field.PkgPath != "" {
return ""
}
tag := field.Tag.Get("json")
// if no tag, use field name
if tag == "" {
return field.Name
}
// skip if explicitly marked as non-json serializable
// TODO: write tests for this
if tag == "-" {
return ""
}
// this could be "name,omitempty" etc.; extract just the name
if idx := strings.IndexByte(tag, ','); idx != -1 {
name := tag[:idx-1]
// if it's sth. like ",omitempty", use field name
// TODO: write tests for this
if name == "" {
return field.Name
}
return name
}
return tag
}
// FieldsFor returns names of all fields for the value of a struct type.
// They can be used in e.g. DocumentQuery.SelectFields:
// fields := ravendb.FieldsFor(&MyType{})
// q = q.SelectFields(fields...)
func FieldsFor(s interface{}) []string {
v := reflect.ValueOf(s)
// if pointer get the underlying element≤
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
panicIf(v.Kind() != reflect.Struct, "argument must be struct, we got %T", s)
t := v.Type()
var res []string
for i := 0; i < t.NumField(); i++ {
if name := getJSONFieldName(t.Field(i)); name != "" {
res = append(res, name)
}
}
return res
}
// given js value (most likely as map[string]interface{}) decode into res
func decodeJSONAsStruct(js interface{}, res interface{}) error {
d, err := jsonMarshal(js)
if err != nil {
return err
}
return jsonUnmarshal(d, res)
}
// given a json represented as map and type of a struct
func makeStructFromJSONMap(typ reflect.Type, js map[string]interface{}) (interface{}, error) {
if typ == reflect.TypeOf(map[string]interface{}{}) {
return js, nil
}
typ2 := fixUpStructType(typ)
if typ2 == nil {
return nil, newIllegalArgumentError("typ should be *<type> or *(*<type> but is %s", typ.String())
}
typ = typ2
// reflect.New() creates a pointer to type. if typ is already a pointer,
// we undo one level
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
rvNew := reflect.New(typ)
d, err := jsonMarshal(js)
if err != nil {
return nil, err
}
v := rvNew.Interface()
err = jsonUnmarshal(d, v)
if err != nil {
return nil, err
}
return v, nil
}
func dbglog(format string, args ...interface{}) string {
s := fmt.Sprintf(format, args...)
fmt.Println(s)
return s
}
// corresponds to ObjectMapper.convertValue()
// val is coming from JSON, so it can be string, bool, float64, []interface{}
// or map[string]interface{}
// TODO: not sure about nil
// for simple types (int, bool, string) it should be just pass-through
// for structs decode map[string]interface{} => struct using MakeStructFromJSONMap
func convertValue(val interface{}, clazz reflect.Type) (interface{}, error) {
// TODO: implement every possible type. Need more comprehensive tests
// to exercise those code paths
switch clazz.Kind() {
case reflect.String:
switch v := val.(type) {
case string:
return v, nil
default:
panicIf(true, "%s", dbglog("converting of type %T to string NYI", val))
}
case reflect.Int:
switch v := val.(type) {
case int:
return v, nil
case float64:
res := int(v)
return res, nil
default:
panicIf(true, "%s", dbglog("converting of type %T to reflect.Int NYI", val))
}
case reflect.Ptr:
clazz2 := clazz.Elem()
switch clazz2.Kind() {
case reflect.Struct:
valIn, ok := val.(map[string]interface{})
if !ok {
return nil, newRavenError("can't convert value of type '%s' to a struct", val)
}
v, err := makeStructFromJSONMap(clazz, valIn)
return v, err
default:
panicIf(true, "%s", dbglog("converting to pointer of '%s' NYI", clazz.Kind().String()))
}
default:
panicIf(true, "%s", dbglog("converting to %s NYI", clazz.Kind().String()))
}
return nil, newNotImplementedError("convertValue: NYI")
}
// m is a single-element map[string]*struct
// returns single map value
func getSingleMapValue(results interface{}) (interface{}, error) {
m := reflect.ValueOf(results)
if m.Type().Kind() != reflect.Map {
return nil, fmt.Errorf("results should be a map[string]*struct, is %s. tp: %s", m.Type().String(), m.Type().String())
}
mapKeyType := m.Type().Key()
if mapKeyType != stringType {
return nil, fmt.Errorf("results should be a map[string]*struct, is %s. tp: %s", m.Type().String(), m.Type().String())
}
mapElemPtrType := m.Type().Elem()
if mapElemPtrType.Kind() != reflect.Ptr {
return nil, fmt.Errorf("results should be a map[string]*struct, is %s. tp: %s", m.Type().String(), m.Type().String())
}
mapElemType := mapElemPtrType.Elem()
if mapElemType.Kind() != reflect.Struct {
return nil, fmt.Errorf("results should be a map[string]*struct, is %s. tp: %s", m.Type().String(), m.Type().String())
}
keys := m.MapKeys()
if len(keys) == 0 {
return nil, nil
}
if len(keys) != 1 {
return nil, fmt.Errorf("expected results to have only one element, has %d", len(keys))
}
v := m.MapIndex(keys[0])
return v.Interface(), nil
}
func checkIsPtrSlice(v interface{}, argName string) error {
if v == nil {
return newIllegalArgumentError("%s can't be nil", argName)
}
tp := reflect.TypeOf(v)
if tp.Kind() == reflect.Slice {
// more specific error message for common error of passing
// []<type> instead of *[]<type>
return newIllegalArgumentError("%s can't be of type %T, try *%T", argName, v, v)
}
if tp.Kind() != reflect.Ptr {
return newIllegalArgumentError("%s can't be of type %T", argName, v)
}
if tp.Elem().Kind() != reflect.Slice {
return newIllegalArgumentError("%s can't be of type %T", argName, v)
}
return nil
}
func checkIsPtrPtrStruct(v interface{}, argName string) error {
if v == nil {
return newIllegalArgumentError("%s can't be nil", argName)
}
tp := reflect.TypeOf(v)
if tp.Kind() == reflect.Struct {
// possibly a common mistake, so try to provide a helpful error message
typeGot := fmt.Sprintf("%T", v)
typeExpect := "**" + typeGot
return newIllegalArgumentError("%s can't be of type %s, try passing %s", argName, typeGot, typeExpect)
}
if tp.Kind() != reflect.Ptr {
return newIllegalArgumentError("%s can't be of type %T", argName, v)
}
if tp.Elem().Kind() == reflect.Struct {
// possibly a common mistake, so try to provide a helpful error message
typeGot := fmt.Sprintf("%T", v)
typeExpect := "*" + typeGot
return newIllegalArgumentError("%s can't be of type %s, try passing %s", argName, typeGot, typeExpect)
}
if tp.Elem().Kind() != reflect.Ptr {
return newIllegalArgumentError("%s can't be of type %T", argName, v)
}
// we only allow pointer to struct
if tp.Elem().Elem().Kind() == reflect.Struct {
return nil
}
return newIllegalArgumentError("%s can't be of type %T", argName, v)
}
|
package mr
import (
"sync"
"time"
)
type Master struct {
// Your definitions here.
sync.Mutex
nMap int
nReduce int
finishedMap int
finishedReduce int
mapTask map[string]int
reduceTask map[int]bool
inputFiles []string
intermediateFiles []int
}
//
// main/mrmaster.go calls Done() periodically to find out
// if the entire job has finished.
//
func (m *Master) Done() bool {
// Your code here.
return m.finishedReduce == m.nReduce
}
func (m *Master) checkMapFinished(file string) {
time.Sleep(time.Second * 10)
m.Lock()
defer m.Unlock()
if m.mapTask[file] != -1 {
m.inputFiles = append(m.inputFiles, file)
}
}
func (m *Master) checkReduceFinished(reduceNum int) {
time.Sleep(time.Second * 10)
m.Lock()
defer m.Unlock()
if !m.reduceTask[reduceNum] {
m.intermediateFiles = append(m.intermediateFiles, reduceNum)
}
}
//
// create a Master.
// main/mrmaster.go calls this function.
//
func MakeMaster(files []string, nReduce int) *Master {
m := Master{}
// Your code here.
m.nMap = len(files)
m.nReduce = nReduce
m.mapTask = make(map[string]int)
m.reduceTask = make(map[int]bool)
for idx, file := range files {
m.mapTask[file] = idx
m.inputFiles = append(m.inputFiles, file)
}
for i:= 0; i < nReduce; i++ {
m.intermediateFiles = append(m.intermediateFiles, i)
m.reduceTask[i] = false
}
m.server()
return &m
}
|
package cmd
import (
"fmt"
"io"
"github.com/brainicorn/skelp/generator"
"github.com/brainicorn/skelp/skelputil"
"github.com/mgutz/ansi"
"github.com/spf13/cobra"
)
// Flags that are to be added to commands.
var (
quietFlag bool
noColorFlag bool
homedirFlag string
skelpdirFlag string
)
func NewSkelpCommand() *cobra.Command {
skelpCmd := &cobra.Command{
Use: "skelp",
Short: "A commandline tool for generating skeleton projects",
Long: `skelp is a commandline tool for applying templates to a directory.
Skelp can be used to generate full project skeletons and/or apply templates to
an existing project.`,
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: validateRootFlags,
}
skelpCmd.PersistentFlags().BoolVar(&quietFlag, "quiet", false, "run in 'quiet mode'")
skelpCmd.PersistentFlags().BoolVar(&noColorFlag, "no-color", false, "turn off terminal colors")
skelpCmd.PersistentFlags().StringVar(&homedirFlag, "homedir", "", "path to override user's home directory where skelp stores data")
skelpCmd.PersistentFlags().StringVar(&skelpdirFlag, "skelpdir", "", "override name of skelp folder within the user's home directory")
addCommandsToRoot(skelpCmd)
return skelpCmd
}
func validateRootFlags(cmd *cobra.Command, args []string) error {
if noColorFlag {
ansi.DisableColors(true)
}
if !skelputil.IsBlank(homedirFlag) && !skelputil.PathExists(homedirFlag) {
return newUserError(fmt.Sprintf("%s is not a valid path for --homedir flag", homedirFlag))
}
return nil
}
func addCommandsToRoot(cmd *cobra.Command) {
cmd.AddCommand(newApplyCommand())
cmd.AddCommand(newAliasCommand())
cmd.AddCommand(newBashmeCommand())
}
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute(args []string, out io.Writer) int {
var cmd *cobra.Command
var err error
exitcode := 0
skelpCmd := NewSkelpCommand()
skelpCmd.SetArgs(args)
if out != nil {
skelpCmd.SetOutput(out)
}
if cmd, err = skelpCmd.ExecuteC(); err != nil {
exitcode = 1
if isUserError(err) {
cmd.Println(colorError(err.Error()))
cmd.Println(cmd.UsageString())
} else {
cmd.Println(colorError(err.Error()))
}
}
return exitcode
}
func getBaseOptions() generator.SkelpOptions {
opts := generator.DefaultOptions()
if !skelputil.IsBlank(homedirFlag) {
opts.HomeDirOverride = homedirFlag
}
if !skelputil.IsBlank(skelpdirFlag) {
opts.SkelpDirOverride = skelpdirFlag
}
return opts
}
func colorError(s string) string {
return ansi.Color(s, "red+b")
}
|
package credhub_test
import (
"errors"
"fmt"
"io"
"log"
"code.cloudfoundry.org/credhub-cli/credhub/credentials"
"code.cloudfoundry.org/credhub-cli/credhub/credentials/values"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/broker"
"github.com/pivotal-cf/on-demand-service-broker/credhub"
"github.com/pivotal-cf/on-demand-service-broker/credhub/fakes"
)
var _ = Describe("CredStore", func() {
var (
fakeCredhubClient *fakes.FakeCredhubClient
store *credhub.Store
)
BeforeEach(func() {
fakeCredhubClient = new(fakes.FakeCredhubClient)
store = credhub.New(fakeCredhubClient)
})
Describe("Bulk Get", func() {
var (
logBuffer *gbytes.Buffer
logger *log.Logger
)
BeforeEach(func() {
logBuffer = gbytes.NewBuffer()
logger = log.New(io.Writer(logBuffer), "my-app", log.LstdFlags)
})
var (
exampleCertificate = map[string]interface{}{
"ca": "-----BEGIN CERTIFICATE-----\nMIIDSjCCAjKgAwIBAgIUIwnRYqjEnzeMzNYuoctat+bi818wDQYJKoZIhvcNAQEL\nBQAwGTEXMBUGA1UEAxMOdG9tLmRpY2suaGFycnkwHhcNMTgwNzE2MTU0MzQwWhcN\nMTkwNzE2MTU0MzQwWjAZMRcwFQYDVQQDEw50b20uZGljay5oYXJyeTCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBALzyeXfpTM0ek6FVzTuOjpBYGLk2Kdl3\nAJ2gKx1FDqyeXS2Hn9nEEWAWYAQ4xvZzI1gnYm/2EXmZ1t4fY4fL6XXwjirNtOyF\n+R5UvG6uVdyfQU+FNnqnE2TQ37wNr8oWCfpoVr0T1Z9n7fPnZZg0+DRXv6x/1bzG\nqfl029bxxJMl64psR8Ew8UfrZ7zT+/URE7ex1XznwWM68rfllGaB7myPjXG6Io6I\nn7fptsCFqI7/EwofjNARIqoRwmbdpOOVz53kR0WeppfiafPsKEC0KT4hvJqgdVr7\nt4YDD4JDdCNTX/NL4BOl3pp9iBpCnz2Rk9E3tEd8JUkcjTc86KsQLYUCAwEAAaOB\niTCBhjAdBgNVHQ4EFgQU8RxuIlg9XT6/S+HDOWfUayaOvWUwVAYDVR0jBE0wS4AU\n8RxuIlg9XT6/S+HDOWfUayaOvWWhHaQbMBkxFzAVBgNVBAMTDnRvbS5kaWNrLmhh\ncnJ5ghQjCdFiqMSfN4zM1i6hy1q35uLzXzAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\nSIb3DQEBCwUAA4IBAQCu50sl64yo8n8/JRDEVibFwjmJj8h+ajcFGcFK9/iBq1Do\n4q8wibMH35sP9kDTGPJqu0IPxKUBaxkzZgIFjf7ujmyv5zEVQIqj9TdJiZs1QwkA\nKUaSBsFLSH9pweZhLVOgYab/ywc3xaKiQCuLAFovFKgqhfW5K6z3XpTEwknfP2Sj\n3An9KN9ZTp+x0f85oCuB8MXHyRTBF+js1pAMdfBGD6VnAfxn3QFx72x3x7YgG2zh\nyGNByRONHukFlzraQQ986237DXdhcAedkMA+OIZl+drLbEXDuPJT/dWp255FasZ4\n+pjdblNisoHZhV3W36NWxoQycjES2siEm8xHO43f\n-----END CERTIFICATE-----\n",
"ca_name": "Henry",
"certificate": "-----BEGIN CERTIFICATE-----\nMIIDSjCCAjKgAwIBAgIUIwnRYqjEnzeMzNYuoctat+bi818wDQYJKoZIhvcNAQEL\nBQAwGTEXMBUGA1UEAxMOdG9tLmRpY2suaGFycnkwHhcNMTgwNzE2MTU0MzQwWhcN\nMTkwNzE2MTU0MzQwWjAZMRcwFQYDVQQDEw50b20uZGljay5oYXJyeTCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBALzyeXfpTM0ek6FVzTuOjpBYGLk2Kdl3\nAJ2gKx1FDqyeXS2Hn9nEEWAWYAQ4xvZzI1gnYm/2EXmZ1t4fY4fL6XXwjirNtOyF\n+R5UvG6uVdyfQU+FNnqnE2TQ37wNr8oWCfpoVr0T1Z9n7fPnZZg0+DRXv6x/1bzG\nqfl029bxxJMl64psR8Ew8UfrZ7zT+/URE7ex1XznwWM68rfllGaB7myPjXG6Io6I\nn7fptsCFqI7/EwofjNARIqoRwmbdpOOVz53kR0WeppfiafPsKEC0KT4hvJqgdVr7\nt4YDD4JDdCNTX/NL4BOl3pp9iBpCnz2Rk9E3tEd8JUkcjTc86KsQLYUCAwEAAaOB\niTCBhjAdBgNVHQ4EFgQU8RxuIlg9XT6/S+HDOWfUayaOvWUwVAYDVR0jBE0wS4AU\n8RxuIlg9XT6/S+HDOWfUayaOvWWhHaQbMBkxFzAVBgNVBAMTDnRvbS5kaWNrLmhh\ncnJ5ghQjCdFiqMSfN4zM1i6hy1q35uLzXzAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\nSIb3DQEBCwUAA4IBAQCu50sl64yo8n8/JRDEVibFwjmJj8h+ajcFGcFK9/iBq1Do\n4q8wibMH35sP9kDTGPJqu0IPxKUBaxkzZgIFjf7ujmyv5zEVQIqj9TdJiZs1QwkA\nKUaSBsFLSH9pweZhLVOgYab/ywc3xaKiQCuLAFovFKgqhfW5K6z3XpTEwknfP2Sj\n3An9KN9ZTp+x0f85oCuB8MXHyRTBF+js1pAMdfBGD6VnAfxn3QFx72x3x7YgG2zh\nyGNByRONHukFlzraQQ986237DXdhcAedkMA+OIZl+drLbEXDuPJT/dWp255FasZ4\n+pjdblNisoHZhV3W36NWxoQycjES2siEm8xHO43f\n-----END CERTIFICATE-----\n",
"private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
}
)
DescribeTable("reading all the different credential types with optional subkeys", func(subkey, resolvedSecret string, credhubSecretValue interface{}) {
ref := "someName"
if subkey != "" {
ref += "." + subkey
}
ref = "((" + ref + "))"
expectedSecrets := map[string]string{
ref: resolvedSecret,
}
secretsToFetch := map[string]boshdirector.Variable{
ref: {
Path: "/path/to/someName",
},
}
fakeCredhubClient.GetLatestVersionReturns(credentials.Credential{
Value: credhubSecretValue,
}, nil)
secrets, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(secrets).To(Equal(expectedSecrets))
},
Entry("scalar secret", "", "my-secret", "my-secret"),
Entry("struct secret", "", toJson(exampleCertificate), exampleCertificate),
Entry("struct with subkey", "private_key", exampleCertificate["private_key"], exampleCertificate),
)
DescribeTable("reading all the different credential types when an ID is known", func(resolvedSecret string, credhubSecretValue interface{}) {
ref := "((someName))"
expectedSecrets := map[string]string{
ref: resolvedSecret,
}
secretsToFetch := map[string]boshdirector.Variable{
ref: {
ID: "1311",
Path: "/path/to/someName",
},
}
fakeCredhubClient.GetByIdReturns(credentials.Credential{
Value: credhubSecretValue,
}, nil)
secrets, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(secrets).To(Equal(expectedSecrets))
},
Entry("scalar secret", "my-secret", "my-secret"),
Entry("struct secret", toJson(exampleCertificate), exampleCertificate),
)
DescribeTable("sub keys errors when sub key is not defined on particular credhub value object", func(credType string, credhubSecretValue interface{}) {
ref := "((someName.badsubkey))"
secretsToFetch := map[string]boshdirector.Variable{
ref: {
Path: "/path/to/someName",
},
}
fakeCredhubClient.GetLatestVersionReturns(credentials.Credential{
Value: credhubSecretValue,
}, nil)
result, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
_, ok := result[ref]
Expect(ok).To(BeFalse())
if credType == "map" {
Expect(logBuffer).To(gbytes.Say("credential does not contain key 'badsubkey'"))
} else {
Expect(logBuffer).To(gbytes.Say("string type credential cannot have key 'badsubkey'"))
}
},
Entry("certificate bad subkey", "map", exampleCertificate),
Entry("scalar any subkey", "Value", "arnold"),
)
It("returns multiple values in secrets mapped if asked for more than one secret", func() {
secretsToFetch := map[string]boshdirector.Variable{
"((one))": {Path: "/foo"},
"((two))": {Path: "/bar"},
}
fakeCredhubClient.GetLatestVersionStub = func(name string) (credentials.Credential, error) {
if name == "/foo" {
return credentials.Credential{Value: "foo-val"}, nil
}
if name == "/bar" {
return credentials.Credential{Value: "bar-val"}, nil
}
return credentials.Credential{}, nil
}
result, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(result).To(HaveLen(2))
Expect(result["((one))"]).To(Equal("foo-val"))
Expect(result["((two))"]).To(Equal("bar-val"))
})
It("errors when retrieving a subkey for an unknown credential type", func() {
ref := "((someName.subkey))"
secretsToFetch := map[string]boshdirector.Variable{
ref: {
Path: "/path/to/someName",
},
}
fakeCredhubClient.GetLatestVersionReturns(credentials.Credential{
Value: 5,
}, nil)
result, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
_, ok := result[ref]
Expect(ok).To(BeFalse())
Expect(logBuffer).To(gbytes.Say(fmt.Sprintf("unknown credential type")))
})
It("logs problem and doesn't include secret when no ID and path does not exist in credhub", func() {
fakeCredhubClient.GetLatestVersionReturns(credentials.Credential{}, errors.New("oops"))
secretsToFetch := map[string]boshdirector.Variable{
"((somePath))": {Path: "/path/to/somePath"},
}
secrets, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
_, ok := secrets["((somePath))"]
Expect(ok).To(BeFalse(), "somePath should not be returned in secrets")
Expect(logBuffer).To(gbytes.Say(`Could not resolve \(\(somePath\)\): oops`))
})
It("logs problem and doesn't include secret when ID is known but does not exist in credhub", func() {
fakeCredhubClient.GetByIdReturns(credentials.Credential{}, errors.New("oops"))
secretsToFetch := map[string]boshdirector.Variable{
"((somePath))": {Path: "/path/to/somePath", ID: "31313"},
}
secrets, err := store.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
_, ok := secrets["((somePath))"]
Expect(ok).To(BeFalse(), "somePath should not be returned in secrets")
Expect(logBuffer).To(gbytes.Say(`Could not resolve \(\(somePath\)\): oops`))
})
})
Describe("Set", func() {
It("can set a json secret", func() {
secret := map[string]interface{}{}
secret["foo"] = "bar"
err := store.Set("/path/to/secret", secret)
Expect(err).NotTo(HaveOccurred())
Expect(fakeCredhubClient.SetJSONCallCount()).To(Equal(1))
path, val, _ := fakeCredhubClient.SetJSONArgsForCall(0)
Expect(path).To(Equal("/path/to/secret"))
Expect(val).To(Equal(values.JSON(secret)))
})
It("can set a string secret", func() {
err := store.Set("/path/to/secret", "caravan")
Expect(err).NotTo(HaveOccurred())
Expect(fakeCredhubClient.SetValueCallCount()).To(Equal(1))
path, val, _ := fakeCredhubClient.SetValueArgsForCall(0)
Expect(path).To(Equal("/path/to/secret"))
Expect(val).To(Equal(values.Value("caravan")))
})
It("errors if not a JSON or string secret", func() {
err := store.Set("/path/to/secret", make(chan int))
Expect(err).To(MatchError("Unknown credential type"))
})
})
Describe("Delete", func() {
It("can delete a credhub secret at path p", func() {
p := "/some/path"
store.Delete(p)
Expect(fakeCredhubClient.DeleteCallCount()).To(Equal(1))
Expect(fakeCredhubClient.DeleteArgsForCall(0)).To(Equal(p))
})
It("returns an error if the underlying call fails", func() {
fakeCredhubClient.DeleteReturns(errors.New("you what?"))
err := store.Delete("something")
Expect(err).To(MatchError("you what?"))
})
})
Describe("Add Permission", func() {
It("can add permissions to a path", func() {
p := "/some/path"
expectedActor := "jim"
expectedOps := []string{"read", "corrupt"}
_, err := store.AddPermission(p, expectedActor, expectedOps)
Expect(err).NotTo(HaveOccurred())
Expect(fakeCredhubClient.AddPermissionCallCount()).To(Equal(1))
actualName, actualActor, actualOps := fakeCredhubClient.AddPermissionArgsForCall(0)
Expect(actualName).To(Equal(p))
Expect(actualActor).To(Equal(expectedActor))
Expect(actualOps).To(Equal(expectedOps))
})
It("returns an error if the underlying call fails", func() {
p := "/some/path"
expectedActor := "jim"
expectedOps := []string{"read", "corrupt"}
fakeCredhubClient.AddPermissionReturns(nil, errors.New("you're joking, right?"))
_, err := store.AddPermission(p, expectedActor, expectedOps)
Expect(err).To(MatchError("you're joking, right?"))
})
})
Describe("BulkSet", func() {
It("does not set anything when called with an empty secrets map", func() {
secretsToSet := []broker.ManifestSecret{}
err := store.BulkSet(secretsToSet)
Expect(err).NotTo(HaveOccurred())
Expect(fakeCredhubClient.SetJSONCallCount()).To(Equal(0), "SetJSON was called")
Expect(fakeCredhubClient.SetValueCallCount()).To(Equal(0), "SetValue was called")
})
It("stores all secrets", func() {
secretsToSet := []broker.ManifestSecret{
{Name: "foo", Path: "/foo/foo", Value: "123"},
{Name: "bar", Path: "/foo/bar", Value: map[string]interface{}{"key": "value"}},
}
err := store.BulkSet(secretsToSet)
Expect(err).NotTo(HaveOccurred())
By("calling SetJSON for JSON values")
Expect(fakeCredhubClient.SetJSONCallCount()).To(Equal(1), "SetJSON wasn't called")
jsonPath, jsonValue, _ := fakeCredhubClient.SetJSONArgsForCall(0)
Expect(jsonPath).To(Equal("/foo/bar"))
Expect(jsonValue).To(Equal(values.JSON(map[string]interface{}{"key": "value"})))
By("calling SetValue for string values")
Expect(fakeCredhubClient.SetValueCallCount()).To(Equal(1), "SetValue wasn't called")
strPath, strValue, _ := fakeCredhubClient.SetValueArgsForCall(0)
Expect(strPath).To(Equal("/foo/foo"))
Expect(strValue).To(Equal(values.Value("123")))
})
It("errors when one of the credentials is of an unsupported type", func() {
secretsToSet := []broker.ManifestSecret{
{Name: "bar", Path: "/foo/bar", Value: map[string]interface{}{"key": "value"}},
{Name: "foo", Path: "/foo/foo", Value: make(chan bool)},
}
err := store.BulkSet(secretsToSet)
Expect(err).To(MatchError("Unknown credential type"))
})
It("errors when fail to store json secrets", func() {
secretsToSet := []broker.ManifestSecret{
{Name: "bar", Path: "/foo/bar", Value: map[string]interface{}{"key": "value"}},
}
fakeCredhubClient.SetJSONReturns(credentials.JSON{}, errors.New("can't do it right now"))
err := store.BulkSet(secretsToSet)
Expect(err).To(MatchError("can't do it right now"))
})
It("errors when fail to store string secrets", func() {
secretsToSet := []broker.ManifestSecret{
{Name: "bar", Path: "/foo/bar", Value: "value"},
}
fakeCredhubClient.SetValueReturns(credentials.Value{}, errors.New("too busy, sorry"))
err := store.BulkSet(secretsToSet)
Expect(err).To(MatchError("too busy, sorry"))
})
})
Describe("FindNameLike", func() {
It("can find all secrets containing a portion of a path in their path", func() {
fakeCredhubClient.FindByPartialNameReturns(credentials.FindResults{
Credentials: []struct {
Name string `json:"name" yaml:"name"`
VersionCreatedAt string `json:"version_created_at" yaml:"version_created_at"`
}{
{Name: "/tofu/path"},
{Name: "/not-real-cheese/tofu/other/path"},
},
}, nil)
actualPaths, err := store.FindNameLike("tofu", nil)
Expect(err).NotTo(HaveOccurred())
Expect(actualPaths).To(ConsistOf([]string{
"/tofu/path",
"/not-real-cheese/tofu/other/path",
}))
})
It("returns an error when there is an error with the credhub client", func() {
fakeCredhubClient.FindByPartialNameReturns(credentials.FindResults{}, errors.New("couldn't do it"))
_, err := store.FindNameLike("tofu", nil)
Expect(err).To(MatchError("couldn't do it"))
})
})
Describe("BulkDelete", func() {
var (
logBuffer *gbytes.Buffer
logger *log.Logger
)
BeforeEach(func() {
logBuffer = gbytes.NewBuffer()
logger = log.New(io.Writer(logBuffer), "my-app", log.LstdFlags)
})
It("deletes all the secrets for the path provided", func() {
secretsToDelete := []string{"/some/path/secret", "/some/path/another_secret"}
fakeCredhubClient.DeleteReturns(nil)
err := store.BulkDelete(secretsToDelete, logger)
Expect(err).NotTo(HaveOccurred())
Expect(fakeCredhubClient.DeleteCallCount()).To(Equal(2))
Expect(fakeCredhubClient.DeleteArgsForCall(0)).To(Equal("/some/path/secret"))
Expect(fakeCredhubClient.DeleteArgsForCall(1)).To(Equal("/some/path/another_secret"))
})
It("logs an error if a call to delete fails", func() {
secretsToDelete := []string{"/some/path/secret", "/some/path/another_secret"}
fakeCredhubClient.DeleteReturns(errors.New("too difficult to delete"))
err := store.BulkDelete(secretsToDelete, logger)
Expect(err).To(MatchError("too difficult to delete"))
Expect(logBuffer).To(gbytes.Say("could not delete secret '/some/path/secret': too difficult to delete"))
})
})
})
|
package jaegerMiddleware
import (
"github.com/gin-gonic/gin"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
func OpenTracingMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
carrier := opentracing.HTTPHeadersCarrier(c.Request.Header)
wireSpanCtx, _ := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, carrier)
// FIXME: handle err?
serverSpan := opentracing.GlobalTracer().StartSpan("app-server-backend", ext.RPCServerOption(wireSpanCtx))
defer serverSpan.Finish()
c.Request = c.Request.WithContext(
opentracing.ContextWithSpan(c.Request.Context(), serverSpan))
// if we bring the c.Request.Context() which contains the serverSpan already
// to the Gin's internal map by below code:
// c.Set("SpanContext", c.Request.Context())
// then we don't have to Inject the server span's context to the
// carrier as below line of code does
// opentracing.GlobalTracer().Inject(serverSpan.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(c.Request.Header))
c.Set("SpanContext", c.Request.Context())
c.Next()
}
}
|
// Copyright (c) 2018 soren yang
//
// Licensed under the MIT License
// you may not use this file except in complicance with the License.
// You may obtain a copy of the License at
//
// https://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"fmt"
"testing"
"github.com/stretchr/testify/suite"
)
type ipTestSuite struct {
suite.Suite
}
func (p *ipTestSuite) TestIp2Uint32Ok() {
values := map[string]uint32{
"0.0.0.0": 0,
"0.255.255.255": 16777215,
"1.0.0.255": 16777471,
"1.0.127.255": 16809983,
"1.1.128.0": 16875520,
"1.8.1.255": 17302015,
"1.11.0.0": 17498112,
"36.16.214.255": 605083391,
"36.43.28.0": 606804992,
"58.35.247.255": 975435775,
"61.155.235.67": 1033628483,
"116.113.210.138": 1953616522,
"185.60.104.0": 3107743744,
"202.147.6.0": 3398632960,
"223.255.255.0": 3758096128,
"255.255.255.255": 4294967295,
}
for k, v := range values {
actual, err := IP2Uint32(k)
p.Equal(err, nil)
p.Equal(v, actual)
}
}
func (p *ipTestSuite) TestIp2Uint32FailInvalid() {
values := []string{"ip",
"0.0.0.-1",
"-1.0.0.0",
"255.255.255.-1",
"255.255.255.256",
"256.255.255.255",
"0.0.0.0.",
"0.0.0.0.0",
"0..0.0.0",
".0.0.0",
"0.0.0.",
"CDCD:910A:2222:5498:8475:1111:3900:",
"CDCD:910A:2222:5498:8475:1111:",
":ffff:192.168.89.9",
":192.168.89.9",
}
for _, value := range values {
_, err := IP2Uint32(value)
p.EqualError(err, fmt.Sprintf("Invalid IP address: %s", value))
}
}
func (p *ipTestSuite) TestIp2Uint32FailIpv6() {
values := []string{"CDCD:910A:2222:5498:8475:1111:3900:2020",
"1030::C9B4:FF12:48AA:1A2B",
"2000:0:0:0:0:0:0:1",
// blow ipv6 will be transform to ipv4
// "::ffff:192.168.89.9",
// "::192.168.89.9",
}
for _, value := range values {
_, err := IP2Uint32(value)
p.EqualError(err, fmt.Sprintf("Only Support IPv4 address: %s", value))
}
}
func (p *ipTestSuite) TestUint322IpOK() {
values := map[string]uint32{
"0.0.0.0": 0,
"0.255.255.255": 16777215,
"1.0.0.255": 16777471,
"1.0.127.255": 16809983,
"1.1.128.0": 16875520,
"1.8.1.255": 17302015,
"1.11.0.0": 17498112,
"36.16.214.255": 605083391,
"36.43.28.0": 606804992,
"58.35.247.255": 975435775,
"61.155.235.67": 1033628483,
"116.113.210.138": 1953616522,
"185.60.104.0": 3107743744,
"202.147.6.0": 3398632960,
"223.255.255.0": 3758096128,
"255.255.255.255": 4294967295,
}
for k, v := range values {
actual := Uint322Ip(v)
p.Equal(k, actual)
}
}
func TestIpTestSuite(t *testing.T) {
p := &ipTestSuite{}
suite.Run(t, p)
}
|
package mvt
import (
"github.com/paulmach/orb"
"github.com/paulmach/orb/geojson"
"reflect"
"testing"
)
func TestLayersClip(t *testing.T) {
cases := []struct {
name string
bound orb.Bound
input Layers
output Layers
}{
{
name: "clips polygon and line",
input: Layers{&Layer{
Features: []*geojson.Feature{
geojson.NewFeature(orb.Polygon([]orb.Ring{
{
{-10, 10}, {0, 10}, {10, 10}, {10, 5}, {10, -5},
{10, -10}, {20, -10}, {20, 10}, {40, 10}, {40, 20},
{20, 20}, {20, 40}, {10, 40}, {10, 20}, {5, 20},
{-10, 20},
},
})),
geojson.NewFeature(orb.LineString{{-15, 0}, {66, 0}}),
},
}},
output: Layers{&Layer{
Features: []*geojson.Feature{
geojson.NewFeature(orb.Polygon([]orb.Ring{
{
{0, 10}, {0, 10}, {10, 10}, {10, 5}, {10, 0},
{20, 0}, {20, 10}, {30, 10}, {30, 20}, {20, 20},
{20, 30}, {10, 30}, {10, 20}, {5, 20}, {0, 20},
},
})),
geojson.NewFeature(orb.LineString{{0, 0}, {30, 0}}),
},
}},
bound: orb.Bound{Min: orb.Point{0, 0}, Max: orb.Point{30, 30}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
tc.input.Clip(tc.bound)
if !reflect.DeepEqual(tc.input, tc.output) {
t.Errorf("incorrect clip")
t.Logf("%v", tc.input)
t.Logf("%v", tc.output)
}
})
}
}
|
/*
* traPCollection API
*
* traPCollectionのAPI
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// GameMeta - ゲーム名とID
type GameMeta struct {
// 追加されたゲームのUUID
Id string `json:"id"`
// 追加されたゲームの名前
Name string `json:"name"`
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package feedback
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/feedbackapp"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
const descriptionWithoutSuggestedContent = "$$$$$$$$$$$$$$$$$$$$$$$$$"
func init() {
testing.AddTest(&testing.Test{
Func: ShowTopHelpContentIfNoSuggestedContent,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "User can get top help content if no suggested content shows",
Contacts: []string{
"wangdanny@google.com",
"zhangwenyu@google.com",
"xiangdongkong@google.com",
"cros-feedback-app@google.com",
},
Fixture: "chromeLoggedInWithOsFeedback",
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Timeout: 2 * time.Minute,
})
}
// ShowTopHelpContentIfNoSuggestedContent verifies the user enter long description
// or anything with no possible help content will show top help content.
func ShowTopHelpContentIfNoSuggestedContent(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*chrome.Chrome)
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to Test API: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr,
"ui_dump")
ui := uiauto.New(tconn).WithTimeout(20 * time.Second)
// Set up keyboard.
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to find keyboard: ", err)
}
defer kb.Close()
// Launch feedback app.
feedbackRootNode, err := feedbackapp.Launch(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch feedback app: ", err)
}
// Find the issue description text input.
issueDescriptionInput := nodewith.Role(role.TextField).Ancestor(feedbackRootNode)
if err := ui.EnsureFocused(issueDescriptionInput)(ctx); err != nil {
s.Fatal("Failed to find the issue description text input: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
// Type issue description.
if err := kb.Type(ctx, descriptionWithoutSuggestedContent); err != nil {
return errors.Wrap(err, "failed to type issue description")
}
// Verify top help content title exists.
title := nodewith.Name("No suggested content. See top help content.").Role(
role.StaticText).Ancestor(feedbackRootNode)
if err := ui.WaitUntilExists(title)(ctx); err != nil {
return errors.Wrap(err, "failed to find the no suggested content title")
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second}); err != nil {
s.Fatal("Failed to show top help content: ", err)
}
}
|
package generator
import (
"fmt"
"math/rand"
"proto-benchmark-value-vs-pointers/proto"
)
func randString(l int) string {
buf := make([]byte, l)
for i := 0; i < (l+1)/2; i++ {
buf[i] = byte(rand.Intn(256))
}
return fmt.Sprintf("%x", buf)[:l]
}
func GenerateMessageValue(n int) []*proto.MessageValue {
out := make([]*proto.MessageValue, 0, n)
for i := 0; i < n; i++ {
out = append(out, &proto.MessageValue{
Name: randString(10),
BirthDay: rand.Int63n(5),
Phone: randString(10),
Siblings: rand.Int31n(10),
Spouse: rand.Intn(2) == 1,
Money: rand.Float64(),
Type: proto.TypeValue(rand.Intn(4)),
Address: &proto.AddressValue{
Street: randString(10),
Number: rand.Int31n(10),
PostCode: rand.Int31n(10),
Floor: rand.Int31n(10),
Random: &proto.RandomValue{
FieldA: randString(10),
FieldB: randString(10),
FieldC: randString(10),
FieldD: randString(10),
FieldE: randString(10),
FieldF: randString(10),
FieldG: randString(10),
FieldH: randString(10),
FieldI: randString(10),
FieldJ: randString(10),
FieldK: rand.Int63n(5),
FieldL: rand.Int63n(5),
FieldM: rand.Int63n(5),
FieldN: rand.Int63n(5),
FieldO: randString(10),
FieldP: randString(10),
FieldQ: rand.Int31n(10),
FieldR: randString(10),
FieldS: randString(10),
FieldT: randString(10),
FieldU: rand.Int31n(10),
FieldV: rand.Int31n(10),
FieldW: rand.Int31n(10),
FieldX: rand.Int31n(10),
FieldY: randString(10),
FieldZ: rand.Intn(2) == 1,
NestedRandom: &proto.NestedRandomValue{
FieldA: randString(10),
FieldB: randString(10),
FieldC: randString(10),
FieldD: randString(10),
FieldE: randString(10),
FieldF: randString(10),
FieldG: rand.Float64(),
FieldH: rand.Float64(),
FieldI: rand.Float64(),
FieldJ: rand.Float64(),
FieldK: rand.Float64(),
FieldL: rand.Float64(),
FieldM: randString(10),
FieldN: randString(10),
FieldO: randString(10),
FieldP: randString(10),
FieldQ: rand.Int63n(5),
FieldR: rand.Int63n(5),
FieldS: rand.Int63n(5),
FieldT: rand.Int63n(5),
FieldU: randString(10),
FieldV: randString(10),
FieldW: randString(10),
FieldX: randString(10),
FieldY: rand.Intn(2) == 1,
FieldZ: rand.Intn(2) == 1,
},
},
},
})
}
return out
}
func GenerateMessageOptional(n int) []*proto.MessageOptional {
var (
Name = randString(10)
BirthDay = rand.Int63n(5)
Phone = randString(10)
Siblings = rand.Int31n(10)
Spouse = rand.Intn(2) == 1
Money = rand.Float64()
Type = proto.TypeOptional(rand.Intn(4))
Street = randString(10)
Number = rand.Int31n(10)
PostCode = rand.Int31n(10)
Floor = rand.Int31n(10)
FieldA = randString(10)
FieldB = randString(10)
FieldC = randString(10)
FieldD = randString(10)
FieldE = randString(10)
FieldF = randString(10)
FieldG = randString(10)
FieldH = randString(10)
FieldI = randString(10)
FieldJ = randString(10)
FieldK = rand.Int63n(5)
FieldL = rand.Int63n(5)
FieldM = rand.Int63n(5)
FieldN = rand.Int63n(5)
FieldO = randString(10)
FieldP = randString(10)
FieldQ = rand.Int31n(10)
FieldR = randString(10)
FieldS = randString(10)
FieldT = randString(10)
FieldU = rand.Int31n(10)
FieldV = rand.Int31n(10)
FieldW = rand.Int31n(10)
FieldX = rand.Int31n(10)
FieldY = randString(10)
FieldZ = rand.Intn(2) == 1
FieldA1 = randString(10)
FieldB1 = randString(10)
FieldC1 = randString(10)
FieldD1 = randString(10)
FieldE1 = randString(10)
FieldF1 = randString(10)
FieldG1 = rand.Float64()
FieldH1 = rand.Float64()
FieldI1 = rand.Float64()
FieldJ1 = rand.Float64()
FieldK1 = rand.Float64()
FieldL1 = rand.Float64()
FieldM1 = randString(10)
FieldN1 = randString(10)
FieldO1 = randString(10)
FieldP1 = randString(10)
FieldQ1 = rand.Int63n(5)
FieldR1 = rand.Int63n(5)
FieldS1 = rand.Int63n(5)
FieldT1 = rand.Int63n(5)
FieldU1 = randString(10)
FieldV1 = randString(10)
FieldW1 = randString(10)
FieldX1 = randString(10)
FieldY1 = rand.Intn(2) == 1
FieldZ1 = rand.Intn(2) == 1
)
out := make([]*proto.MessageOptional, 0, n)
for i := 0; i < n; i++ {
out = append(out, &proto.MessageOptional{
Name: &Name,
BirthDay: &BirthDay,
Phone: &Phone,
Siblings: &Siblings,
Spouse: &Spouse,
Money: &Money,
Type: &Type,
Address: &proto.AddressOptional{
Street: &Street,
Number: &Number,
PostCode: &PostCode,
Floor: &Floor,
Random: &proto.RandomOptional{
FieldA: &FieldA,
FieldB: &FieldB,
FieldC: &FieldC,
FieldD: &FieldD,
FieldE: &FieldE,
FieldF: &FieldF,
FieldG: &FieldG,
FieldH: &FieldH,
FieldI: &FieldI,
FieldJ: &FieldJ,
FieldK: &FieldK,
FieldL: &FieldL,
FieldM: &FieldM,
FieldN: &FieldN,
FieldO: &FieldO,
FieldP: &FieldP,
FieldQ: &FieldQ,
FieldR: &FieldR,
FieldS: &FieldS,
FieldT: &FieldT,
FieldU: &FieldU,
FieldV: &FieldV,
FieldW: &FieldW,
FieldX: &FieldX,
FieldY: &FieldY,
FieldZ: &FieldZ,
NestedRandom: &proto.NestedRandomOptional{
FieldA: &FieldA1,
FieldB: &FieldB1,
FieldC: &FieldC1,
FieldD: &FieldD1,
FieldE: &FieldE1,
FieldF: &FieldF1,
FieldG: &FieldG1,
FieldH: &FieldH1,
FieldI: &FieldI1,
FieldJ: &FieldJ1,
FieldK: &FieldK1,
FieldL: &FieldL1,
FieldM: &FieldM1,
FieldN: &FieldN1,
FieldO: &FieldO1,
FieldP: &FieldP1,
FieldQ: &FieldQ1,
FieldR: &FieldR1,
FieldS: &FieldS1,
FieldT: &FieldT1,
FieldU: &FieldU1,
FieldV: &FieldV1,
FieldW: &FieldW1,
FieldX: &FieldX1,
FieldY: &FieldY1,
FieldZ: &FieldZ1,
},
},
},
})
}
return out
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package audio
import (
"context"
"os"
"path/filepath"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/audio"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/bundles/cros/audio/audionode"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/filesapp"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
type volumeControlTier int
const (
// volumeOnly test tier will just increase/decrease the audio node volume.
volumeOnly volumeControlTier = iota
// withAudio test tier will generate an audio file and while playing the audio, it increases/decreases the audio node volume.
withAudio
)
type volumeControlParam struct {
tier volumeControlTier
expectedAudioNode string
}
func init() {
testing.AddTest(&testing.Test{
Func: VolumeControl,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Change the volume using keyboard keys",
Contacts: []string{"ambalavanan.m.m@intel.com", "intel-chrome-system-automation-team@intel.com"},
SoftwareDeps: []string{"chrome"},
Pre: chrome.LoggedIn(),
Params: []testing.Param{{
Name: "volume_only",
ExtraAttr: []string{"group:mainline", "informational"},
Val: volumeControlParam{
tier: volumeOnly,
expectedAudioNode: "INTERNAL_SPEAKER",
},
}, {
Name: "with_audio",
ExtraAttr: []string{"group:intel-gating"},
Val: volumeControlParam{
tier: withAudio,
expectedAudioNode: "INTERNAL_SPEAKER",
},
}, {
Name: "with_audio_headphone",
Val: volumeControlParam{
tier: withAudio,
expectedAudioNode: "HEADPHONE",
},
}},
})
}
// VolumeControl will increase/decrease volume using keyboard keys.
func VolumeControl(ctx context.Context, s *testing.State) {
const (
audioRate = 48000
audioChannel = 2
duration = 30
)
param := s.Param().(volumeControlParam)
cr := s.PreValue().(*chrome.Chrome)
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to open the keyboard: ", err)
}
defer kb.Close()
if param.tier == withAudio {
s.Log("Generate sine raw input file that lasts 30 seconds")
rawFileName := "30SEC.raw"
downloadsPath, err := cryptohome.DownloadsPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Download path: ", err)
}
rawFilePath := filepath.Join(downloadsPath, rawFileName)
rawFile := audio.TestRawData{
Path: rawFilePath,
BitsPerSample: 16,
Channels: audioChannel,
Rate: audioRate,
Frequencies: []int{440, 440},
Volume: 100,
Duration: duration,
}
if err := audio.GenerateTestRawData(ctx, rawFile); err != nil {
s.Fatal("Failed to generate audio test data: ", err)
}
defer os.Remove(rawFile.Path)
wavFileName := "30SEC.wav"
wavFile := filepath.Join(downloadsPath, wavFileName)
if err := audio.ConvertRawToWav(ctx, rawFilePath, wavFile, audioRate, audioChannel); err != nil {
s.Fatal("Failed to convert raw to wav: ", err)
}
defer os.Remove(wavFile)
// Open the test API.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
files, err := filesapp.Launch(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch the Files App: ", err)
}
defer files.Close(ctx)
if err := files.OpenDownloads()(ctx); err != nil {
s.Fatal("Failed to open Downloads folder in files app: ", err)
}
if err := files.OpenFile(wavFileName)(ctx); err != nil {
s.Fatalf("Failed to open the audio file %q: %v", wavFileName, err)
}
// Closing the audio player.
defer func() {
if kb.Accel(ctx, "Ctrl+W"); err != nil {
s.Error("Failed to close Audio player: ", err)
}
}()
s.Log("Play the audio file for 5 seconds")
// Sample time for the audio to play for 5 seconds.
if err := testing.Sleep(ctx, 5*time.Second); err != nil {
s.Fatal("Error while waiting during sample time: ", err)
}
audioDeviceName, err := audionode.SetAudioNode(ctx, param.expectedAudioNode)
if err != nil {
s.Fatal("Failed to set the Audio node: ", err)
}
s.Logf("Selected audio device name: %q", audioDeviceName)
devName, err := crastestclient.FirstRunningDevice(ctx, audio.OutputStream)
if err != nil {
s.Fatal("Failed to detect running output device: ", err)
}
if audioDeviceName != devName {
s.Fatalf("Failed to route the audio through expected audio node: got %q; want %q", devName, audioDeviceName)
}
}
vh, err := audionode.NewVolumeHelper(ctx)
if err != nil {
s.Fatal("Failed to create the volumeHelper: ", err)
}
originalVolume, err := vh.ActiveNodeVolume(ctx)
defer vh.SetVolume(ctx, originalVolume)
topRow, err := input.KeyboardTopRowLayout(ctx, kb)
if err != nil {
s.Fatal("Failed to obtain the top-row layout: ", err)
}
isMuted := func() bool {
dump, err := testexec.CommandContext(ctx, "sh", "-c", "cras_test_client --dump_server_info | grep muted").Output()
if err != nil {
s.Errorf("Failed to dump server info: %s", err)
}
muted := strings.TrimSpace(string(dump[strings.LastIndex(string(dump), ":")+1:]))
return muted == "Muted"
}
s.Log("Press mute key and unmute by pressing Volume up key")
if err = kb.Accel(ctx, topRow.VolumeMute); err != nil {
s.Fatal(`Failed to press "Mute": `, err)
}
if !isMuted() {
s.Fatal("Failed to mute the audio")
}
if err = kb.Accel(ctx, topRow.VolumeUp); err != nil {
s.Fatal(`Failed to press "VolumeUp": `, err)
}
if isMuted() {
s.Fatal("Failed to unmute the audio")
}
s.Log("Decrease volume to 0 and verify for every key press")
for {
volume, err := vh.ActiveNodeVolume(ctx)
if err != nil {
s.Fatal("Failed to get volume: ", err)
}
if volume == 0 {
break
}
if err := vh.VerifyVolumeChanged(ctx, func() error {
return kb.Accel(ctx, topRow.VolumeDown)
}); err != nil {
s.Fatal(`Failed to change volume after pressing "VolumeDown": `, err)
}
}
s.Log("Increase volume to 100 and verify for every key press")
for {
volume, err := vh.ActiveNodeVolume(ctx)
if err != nil {
s.Fatal("Failed to get volume: ", err)
}
if volume == 100 {
break
}
if err := vh.VerifyVolumeChanged(ctx, func() error {
return kb.Accel(ctx, topRow.VolumeUp)
}); err != nil {
s.Fatal(`Failed to change volume after pressing "VolumeUp": `, err)
}
}
}
|
/*
Copyright 2022 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudrun
import (
"context"
"fmt"
"io"
"net/http"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/run/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8syaml "sigs.k8s.io/yaml"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/access"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/debug"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/deploy/label"
sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/gcp"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/hooks"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/instrumentation"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubernetes/manifest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/status"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/sync"
"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
)
// Config contains config options needed for cloud run
type Config interface {
PortForwardResources() []*latest.PortForwardResource
PortForwardOptions() config.PortForwardOptions
Mode() config.RunMode
Tail() bool
}
// Deployer deploys code to Google Cloud Run.
type Deployer struct {
configName string
*latest.CloudRunDeploy
logger *LogAggregator
accessor *RunAccessor
monitor *Monitor
labeller *label.DefaultLabeller
hookRunner hooks.Runner
Project string
Region string
// additional client options for connecting to Cloud Run, used for tests
clientOptions []option.ClientOption
useGcpOptions bool
}
// NewDeployer creates a new Deployer for Cloud Run from the Skaffold deploy config.
func NewDeployer(cfg Config, labeller *label.DefaultLabeller, crDeploy *latest.CloudRunDeploy, configName string) (*Deployer, error) {
return &Deployer{
configName: configName,
CloudRunDeploy: crDeploy,
Project: crDeploy.ProjectID,
Region: crDeploy.Region,
// TODO: implement logger for Cloud Run.
logger: NewLoggerAggregator(cfg, labeller.GetRunID()),
accessor: NewAccessor(cfg, labeller.GetRunID()),
labeller: labeller,
hookRunner: hooks.NewCloudRunDeployRunner(crDeploy.LifecycleHooks, hooks.NewDeployEnvOpts(labeller.GetRunID(), "", []string{})),
useGcpOptions: true,
}, nil
}
// Deploy creates a Cloud Run service using the provided manifest.
func (d *Deployer) Deploy(ctx context.Context, out io.Writer, artifacts []graph.Artifact, manifestsByConfig manifest.ManifestListByConfig) error {
manifests := manifestsByConfig.GetForConfig(d.ConfigName())
for _, manifest := range manifests {
if err := d.deployToCloudRun(ctx, out, manifest); err != nil {
return err
}
}
return nil
}
func (d *Deployer) ConfigName() string {
return d.configName
}
// Dependencies list the files that would trigger a redeploy
func (d *Deployer) Dependencies() ([]string, error) {
return []string{}, nil
}
// Cleanup deletes the created Cloud Run services
func (d *Deployer) Cleanup(ctx context.Context, out io.Writer, dryRun bool, byConfig manifest.ManifestListByConfig) error {
return d.cleanupRun(ctx, out, dryRun, byConfig.GetForConfig(d.configName))
}
// GetDebugger Get the Debugger for Cloud Run. Not supported by this deployer.
func (d *Deployer) GetDebugger() debug.Debugger {
return &debug.NoopDebugger{}
}
// GetLogger Get the logger for the Cloud Run deploy.
func (d *Deployer) GetLogger() log.Logger {
return d.logger
}
// GetAccessor gets a no-op accessor for Cloud Run.
func (d *Deployer) GetAccessor() access.Accessor {
return d.accessor
}
// GetSyncer gets the file syncer for Cloud Run. Not supported by this deployer.
func (d *Deployer) GetSyncer() sync.Syncer {
return &sync.NoopSyncer{}
}
// TrackBuildArtifacts is not supported by this deployer.
func (d *Deployer) TrackBuildArtifacts(_, _ []graph.Artifact) {
}
// RegisterLocalImages is not supported by this deployer.
func (d *Deployer) RegisterLocalImages([]graph.Artifact) {
}
// GetStatusMonitor gets the resource that will monitor deployment status.
func (d *Deployer) GetStatusMonitor() status.Monitor {
return d.getMonitor()
}
func (d *Deployer) HasRunnableHooks() bool {
return len(d.CloudRunDeploy.LifecycleHooks.PreHooks) > 0 || len(d.CloudRunDeploy.LifecycleHooks.PostHooks) > 0
}
func (d *Deployer) PreDeployHooks(ctx context.Context, out io.Writer) error {
childCtx, endTrace := instrumentation.StartTrace(ctx, "Deploy_PreHooks")
if err := d.hookRunner.RunPreHooks(childCtx, out); err != nil {
endTrace(instrumentation.TraceEndError(err))
return err
}
endTrace()
return nil
}
func (d *Deployer) PostDeployHooks(ctx context.Context, out io.Writer) error {
childCtx, endTrace := instrumentation.StartTrace(ctx, "Deploy_PostHooks")
if err := d.hookRunner.RunPostHooks(childCtx, out); err != nil {
endTrace(instrumentation.TraceEndError(err))
return err
}
endTrace()
return nil
}
func (d *Deployer) getMonitor() *Monitor {
if d.monitor == nil {
d.monitor = NewMonitor(d.labeller, d.clientOptions)
}
return d.monitor
}
func (d *Deployer) deployToCloudRun(ctx context.Context, out io.Writer, manifest []byte) error {
cOptions := d.clientOptions
if d.useGcpOptions {
cOptions = append(cOptions, option.WithEndpoint(fmt.Sprintf("%s-run.googleapis.com", d.Region)))
cOptions = append(gcp.ClientOptions(ctx), cOptions...)
}
crclient, err := run.NewService(ctx, cOptions...)
if err != nil {
return sErrors.NewError(fmt.Errorf("unable to create Cloud Run Client"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_GET_CLOUD_RUN_CLIENT_ERR,
})
}
// figure out which type we have:
resource := &unstructured.Unstructured{}
if err = k8syaml.Unmarshal(manifest, resource); err != nil {
return sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
var resName *RunResourceName
switch {
case resource.GetAPIVersion() == "serving.knative.dev/v1" && resource.GetKind() == "Service":
resName, err = d.deployService(crclient, manifest, out)
// the accessor only supports services. Jobs don't run by themselves so port forwarding doesn't make sense.
if resName != nil {
d.accessor.AddResource(*resName)
}
case resource.GetAPIVersion() == "run.googleapis.com/v1" && resource.GetKind() == "Job":
resName, err = d.deployJob(crclient, manifest, out)
default:
err = sErrors.NewError(fmt.Errorf("unsupported Kind for Cloud Run Deployer: %s/%s", resource.GetAPIVersion(), resource.GetKind()),
&proto.ActionableErr{
Message: "Kind is not supported",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
if err != nil {
return err
}
d.getMonitor().Resources = append(d.getMonitor().Resources, *resName)
return nil
}
func (d *Deployer) deployService(crclient *run.APIService, manifest []byte, out io.Writer) (*RunResourceName, error) {
service := &run.Service{}
if err := k8syaml.Unmarshal(manifest, service); err != nil {
return nil, sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
if d.Project != "" {
service.Metadata.Namespace = d.Project
} else if service.Metadata.Namespace == "" {
return nil, sErrors.NewError(fmt.Errorf("unable to detect project for Cloud Run"), &proto.ActionableErr{
Message: "No Google Cloud project found in Cloud Run Manifest or Skaffold Config",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
// we need to strip "skaffold.dev" from the run-id label because gcp labels don't support domains
runID, foundID := service.Metadata.Labels["skaffold.dev/run-id"]
if foundID {
delete(service.Metadata.Labels, "skaffold.dev/run-id")
service.Metadata.Labels["run-id"] = runID
}
if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Metadata != nil {
runID, foundID = service.Spec.Template.Metadata.Labels["skaffold.dev/run-id"]
if foundID {
delete(service.Spec.Template.Metadata.Labels, "skaffold.dev/run-id")
service.Spec.Template.Metadata.Labels["run-id"] = runID
}
}
resName := RunResourceName{
Project: service.Metadata.Namespace,
Region: d.Region,
Service: service.Metadata.Name,
}
output.Default.Fprintln(out, "Deploying Cloud Run service:\n\t", service.Metadata.Name)
parent := fmt.Sprintf("projects/%s/locations/%s", service.Metadata.Namespace, d.Region)
sName := resName.String()
d.logger.AddResource(resName)
getCall := crclient.Projects.Locations.Services.Get(sName)
_, err := getCall.Do()
if err != nil {
gErr, ok := err.(*googleapi.Error)
if !ok || gErr.Code != http.StatusNotFound {
return nil, sErrors.NewError(fmt.Errorf("error checking Cloud Run State: %w", err), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_GET_SERVICE_ERR,
})
}
// This is a new service, we need to create it
createCall := crclient.Projects.Locations.Services.Create(parent, service)
_, err = createCall.Do()
} else {
replaceCall := crclient.Projects.Locations.Services.ReplaceService(sName, service)
_, err = replaceCall.Do()
}
if err != nil {
return nil, sErrors.NewError(fmt.Errorf("error deploying Cloud Run Service: %s", err), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_UPDATE_SERVICE_ERR,
})
}
return &resName, nil
}
func (d *Deployer) deployJob(crclient *run.APIService, manifest []byte, out io.Writer) (*RunResourceName, error) {
job := &run.Job{}
if err := k8syaml.Unmarshal(manifest, job); err != nil {
return nil, sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
if d.Project != "" {
job.Metadata.Namespace = d.Project
} else if job.Metadata.Namespace == "" {
return nil, sErrors.NewError(fmt.Errorf("unable to detect project for Cloud Run"), &proto.ActionableErr{
Message: "No Google Cloud project found in Cloud Run Manifest or Skaffold Config",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
// we need to strip "skaffold.dev" from the run-id label because gcp labels don't support domains
runID, foundID := job.Metadata.Labels["skaffold.dev/run-id"]
if foundID {
delete(job.Metadata.Labels, "skaffold.dev/run-id")
job.Metadata.Labels["run-id"] = runID
}
if job.Spec != nil && job.Spec.Template != nil && job.Spec.Template.Metadata != nil {
runID, foundID = job.Spec.Template.Metadata.Labels["skaffold.dev/run-id"]
if foundID {
delete(job.Spec.Template.Metadata.Labels, "skaffold.dev/run-id")
job.Spec.Template.Metadata.Labels["run-id"] = runID
}
}
resName := RunResourceName{
Project: job.Metadata.Namespace,
Region: d.Region,
Job: job.Metadata.Name,
}
output.Default.Fprintln(out, "Deploying Cloud Run service:\n\t", job.Metadata.Name)
parent := fmt.Sprintf("namespaces/%s", job.Metadata.Namespace)
sName := resName.String()
getCall := crclient.Namespaces.Jobs.Get(sName)
_, err := getCall.Do()
if err != nil {
gErr, ok := err.(*googleapi.Error)
if !ok || gErr.Code != http.StatusNotFound {
return nil, sErrors.NewError(fmt.Errorf("error checking Cloud Run State: %w", err), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_GET_SERVICE_ERR,
})
}
// This is a new service, we need to create it
createCall := crclient.Namespaces.Jobs.Create(parent, job)
_, err = createCall.Do()
} else {
replaceCall := crclient.Namespaces.Jobs.ReplaceJob(sName, job)
_, err = replaceCall.Do()
}
if err != nil {
return nil, sErrors.NewError(fmt.Errorf("error deploying Cloud Run Job: %s", err), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_UPDATE_SERVICE_ERR,
})
}
return &resName, nil
}
func (d *Deployer) cleanupRun(ctx context.Context, out io.Writer, dryRun bool, manifests manifest.ManifestList) error {
var errors []error
cOptions := d.clientOptions
if d.useGcpOptions {
cOptions = append(cOptions, option.WithEndpoint(fmt.Sprintf("%s-run.googleapis.com", d.Region)))
cOptions = append(gcp.ClientOptions(ctx), cOptions...)
}
crclient, err := run.NewService(ctx, cOptions...)
if err != nil {
return sErrors.NewError(fmt.Errorf("unable to create Cloud Run Client"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_GET_CLOUD_RUN_CLIENT_ERR,
})
}
for _, manifest := range manifests {
tpe, err := getTypeFromManifest(manifest)
switch {
case err != nil:
errors = append(errors, err)
case tpe == typeService:
err := d.deleteRunService(crclient, out, dryRun, manifest)
if err != nil {
errors = append(errors, err)
}
case tpe == typeJob:
err := d.deleteRunJob(crclient, out, dryRun, manifest)
if err != nil {
errors = append(errors, err)
}
}
}
if len(errors) != 0 {
// TODO: is there a good way to report all of the errors?
return errors[0]
}
return nil
}
func (d *Deployer) deleteRunService(crclient *run.APIService, out io.Writer, dryRun bool, manifest []byte) error {
service := &run.Service{}
if err := k8syaml.Unmarshal(manifest, service); err != nil {
return sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
var projectID string
switch {
case d.Project != "":
projectID = d.Project
case service.Metadata.Namespace != "":
projectID = service.Metadata.Namespace
default:
// no project specified, we don't know what to delete.
return sErrors.NewError(fmt.Errorf("unable to determine Google Cloud Project"), &proto.ActionableErr{
Message: "No Google Cloud Project found in Cloud Run manifest or Skaffold Manifest.",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
parent := fmt.Sprintf("projects/%s/locations/%s", projectID, d.Region)
sName := fmt.Sprintf("%s/services/%s", parent, service.Metadata.Name)
if dryRun {
output.Yellow.Fprintln(out, sName)
return nil
}
delCall := crclient.Projects.Locations.Services.Delete(sName)
_, err := delCall.Do()
if err != nil {
return sErrors.NewError(fmt.Errorf("unable to delete Cloud Run Service"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_DELETE_SERVICE_ERR,
})
}
return nil
}
func (d *Deployer) deleteRunJob(crclient *run.APIService, out io.Writer, dryRun bool, manifest []byte) error {
job := &run.Job{}
if err := k8syaml.Unmarshal(manifest, job); err != nil {
return sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
var projectID string
switch {
case d.Project != "":
projectID = d.Project
case job.Metadata.Namespace != "":
projectID = job.Metadata.Namespace
default:
// no project specified, we don't know what to delete.
return sErrors.NewError(fmt.Errorf("unable to determine Google Cloud Project"), &proto.ActionableErr{
Message: "No Google Cloud Project found in Cloud Run manifest or Skaffold Manifest.",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
parent := fmt.Sprintf("namespaces/%s", projectID)
sName := fmt.Sprintf("%s/jobs/%s", parent, job.Metadata.Name)
if dryRun {
output.Yellow.Fprintln(out, sName)
return nil
}
delCall := crclient.Namespaces.Jobs.Delete(sName)
_, err := delCall.Do()
if err != nil {
return sErrors.NewError(fmt.Errorf("unable to delete Cloud Run Job"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_CLOUD_RUN_DELETE_SERVICE_ERR,
})
}
return nil
}
func getTypeFromManifest(manifest []byte) (string, error) {
resource := &unstructured.Unstructured{}
if err := k8syaml.Unmarshal(manifest, resource); err != nil {
return "", sErrors.NewError(fmt.Errorf("unable to unmarshal Cloud Run Service config"), &proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
}
switch {
case resource.GetAPIVersion() == "serving.knative.dev/v1" && resource.GetKind() == "Service":
return typeService, nil
case resource.GetAPIVersion() == "run.googleapis.com/v1" && resource.GetKind() == "Job":
return typeJob, nil
default:
err := sErrors.NewError(fmt.Errorf("unsupported Kind for Cloud Run Deployer: %s/%s", resource.GetAPIVersion(), resource.GetKind()),
&proto.ActionableErr{
Message: "Kind is not supported",
ErrCode: proto.StatusCode_DEPLOY_READ_MANIFEST_ERR,
})
return "", err
}
}
|
package main
type ConfigAWS struct {
}
//todo aws proxy
|
package main
import "fmt"
func main() {
var two int = 2
a := two << 1 // 4
b := two << 2 // 8
c := two << 3 // 16
d := two >> 1 // 1
e := two >> 2 // 0
f := two >> 3 // 0
fmt.Println(a, b, c, d, e, f)
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package build
import (
"fmt"
"testing"
"google.golang.org/protobuf/testing/protocmp"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/constants"
sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors"
"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestMakeAuthSuggestionsForRepo(t *testing.T) {
testutil.CheckDeepEqual(t, &proto.Suggestion{
SuggestionCode: proto.SuggestionCode_DOCKER_AUTH_CONFIGURE,
Action: "try `docker login`",
}, makeAuthSuggestionsForRepo(""), protocmp.Transform())
testutil.CheckDeepEqual(t, &proto.Suggestion{
SuggestionCode: proto.SuggestionCode_GCLOUD_DOCKER_AUTH_CONFIGURE,
Action: "try `gcloud auth configure-docker gcr.io`",
}, makeAuthSuggestionsForRepo("gcr.io/test"), protocmp.Transform())
testutil.CheckDeepEqual(t, &proto.Suggestion{
SuggestionCode: proto.SuggestionCode_GCLOUD_DOCKER_AUTH_CONFIGURE,
Action: "try `gcloud auth configure-docker eu.gcr.io`",
}, makeAuthSuggestionsForRepo("eu.gcr.io/test"), protocmp.Transform())
testutil.CheckDeepEqual(t, &proto.Suggestion{
SuggestionCode: proto.SuggestionCode_GCLOUD_DOCKER_AUTH_CONFIGURE,
Action: "try `gcloud auth configure-docker us-docker.pkg.dev`",
}, makeAuthSuggestionsForRepo("us-docker.pkg.dev/k8s-skaffold/skaffold"), protocmp.Transform())
}
func TestBuildProblems(t *testing.T) {
tests := []struct {
description string
context config.ContextConfig
mode config.RunMode
optRepo string
err error
expected string
expectedAE *proto.ActionableErr
}{
{
description: "Push access denied when neither default repo or global config is defined in `build` command",
mode: config.RunModes.Build,
err: fmt.Errorf("skaffold build failed: could not push image: denied: push access to resource"),
expected: "Build Failed. No push access to specified image repository. Try running with `--default-repo` flag.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_PUSH_ACCESS_DENIED,
Message: "skaffold build failed: could not push image: denied: push access to resource",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_ADD_DEFAULT_REPO,
Action: "Try running with `--default-repo` flag",
},
}},
},
{
description: "Push access denied when neither default repo or global config is defined in `dev` command",
mode: config.RunModes.Dev,
err: fmt.Errorf("skaffold build failed: could not push image: denied: push access to resource"),
expected: "Build Failed. No push access to specified image repository. Try running with `--default-repo` flag. Otherwise start a local kubernetes cluster like `minikube`.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_PUSH_ACCESS_DENIED,
Message: "skaffold build failed: could not push image: denied: push access to resource",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_ADD_DEFAULT_REPO,
Action: "Try running with `--default-repo` flag. Otherwise start a local kubernetes cluster like `minikube`",
},
}},
},
{
description: "Push access denied when default repo is defined",
optRepo: "gcr.io/test",
err: fmt.Errorf("skaffold build failed: could not push image image1 : denied: push access to resource"),
expected: "Build Failed. No push access to specified image repository. Check your `--default-repo` value or try `gcloud auth configure-docker gcr.io`.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_PUSH_ACCESS_DENIED,
Message: "skaffold build failed: could not push image image1 : denied: push access to resource",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_DEFAULT_REPO,
Action: "Check your `--default-repo` value",
}, {
SuggestionCode: proto.SuggestionCode_GCLOUD_DOCKER_AUTH_CONFIGURE,
Action: "try `gcloud auth configure-docker gcr.io`",
},
},
},
},
{
description: "Push access denied when global repo is defined",
context: config.ContextConfig{DefaultRepo: "docker.io/global"},
err: fmt.Errorf("skaffold build failed: could not push image: denied: push access to resource"),
expected: "Build Failed. No push access to specified image repository. Check your default-repo setting in skaffold config or try `docker login docker.io`.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_PUSH_ACCESS_DENIED,
Message: "skaffold build failed: could not push image: denied: push access to resource",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_DEFAULT_REPO_GLOBAL_CONFIG,
Action: "Check your default-repo setting in skaffold config",
}, {
SuggestionCode: proto.SuggestionCode_DOCKER_AUTH_CONFIGURE,
Action: "try `docker login docker.io`",
},
},
},
},
{
description: "unknown project error",
err: fmt.Errorf("build failed: could not push image: unknown: Project test"),
expected: "Build Failed. could not push image: unknown: Project test. Check your GCR project.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_PROJECT_NOT_FOUND,
Message: "build failed: could not push image: unknown: Project test",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_GCLOUD_PROJECT,
Action: "Check your GCR project",
},
},
},
},
{
description: "build error when docker is not running with minikube local cluster",
err: fmt.Errorf(`creating runner: creating builder: getting docker client: getting minikube env: running [/Users/tejaldesai/Downloads/google-cloud-sdk2/bin/minikube docker-env --shell none -p minikube]
- stdout: "\n\n"
- stderr: "! Executing \"docker container inspect minikube --format={{.State.Status}}\" took an unusually long time: 7.36540945s\n* Restarting the docker service may improve performance.\nX Exiting due to GUEST_STATUS: state: unknown state \"minikube\": docker container inspect minikube --format=: exit status 1\nstdout:\n\n\nstderr:\nCannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?\n\n* \n* If the above advice does not help, please let us know: \n - https://github.com/kubernetes/minikube/issues/new/choose\n"
- cause: exit status 80`),
expected: "Build Failed. Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Check if docker is running.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_DOCKER_DAEMON_NOT_RUNNING,
Message: "creating runner: creating builder: getting docker client: getting minikube env: running [/Users/tejaldesai/Downloads/google-cloud-sdk2/bin/minikube docker-env --shell none -p minikube]\n - stdout: \"\\n\\n\"\n - stderr: \"! Executing \\\"docker container inspect minikube --format={{.State.Status}}\\\" took an unusually long time: 7.36540945s\\n* Restarting the docker service may improve performance.\\nX Exiting due to GUEST_STATUS: state: unknown state \\\"minikube\\\": docker container inspect minikube --format=: exit status 1\\nstdout:\\n\\n\\nstderr:\\nCannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?\\n\\n* \\n* If the above advice does not help, please let us know: \\n - https://github.com/kubernetes/minikube/issues/new/choose\\n\"\n - cause: exit status 80",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_DOCKER_RUNNING,
Action: "Check if docker is running",
},
},
},
},
{
description: "build error when docker is not running and deploying to GKE",
err: fmt.Errorf(`exiting dev mode because first build failed: docker build: Cannot connect to the Docker daemon at tcp://127.0.0.1:32770. Is the docker daemon running?`),
expected: "Build Failed. Cannot connect to the Docker daemon at tcp://127.0.0.1:32770. Check if docker is running.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_DOCKER_DAEMON_NOT_RUNNING,
Message: "exiting dev mode because first build failed: docker build: Cannot connect to the Docker daemon at tcp://127.0.0.1:32770. Is the docker daemon running?",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_DOCKER_RUNNING,
Action: "Check if docker is running",
},
},
},
},
{
description: "build error when docker is not and no host information",
// See https://github.com/moby/moby/blob/master/client/errors.go#L20
err: fmt.Errorf(`exiting dev mode because first build failed: docker build: Cannot connect to the Docker daemon. Is the docker daemon running on this host?`),
expected: "Build Failed. Cannot connect to the Docker daemon. Check if docker is running.",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_DOCKER_DAEMON_NOT_RUNNING,
Message: "exiting dev mode because first build failed: docker build: Cannot connect to the Docker daemon. Is the docker daemon running on this host?",
Suggestions: []*proto.Suggestion{{
SuggestionCode: proto.SuggestionCode_CHECK_DOCKER_RUNNING,
Action: "Check if docker is running",
},
},
},
},
{
description: "build cancelled",
// See https://github.com/moby/moby/blob/master/client/errors.go#L20
err: fmt.Errorf(`docker build: error during connect: Post \"https://127.0.0.1:32770/v1.24/build?buildargs=: context canceled`),
expected: "Build Cancelled",
expectedAE: &proto.ActionableErr{
ErrCode: proto.StatusCode_BUILD_CANCELLED,
Message: `docker build: error during connect: Post \"https://127.0.0.1:32770/v1.24/build?buildargs=: context canceled`,
},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&getConfigForCurrentContext, func(string) (*config.ContextConfig, error) {
return &test.context, nil
})
t.Override(&sErrors.GetProblemCatalogCopy, func() sErrors.ProblemCatalog {
pc := sErrors.NewProblemCatalog()
pc.AddPhaseProblems(constants.Build, problems)
return pc
})
cfg := mockConfig{optRepo: test.optRepo, mode: test.mode}
actual := sErrors.ShowAIError(&cfg, test.err)
t.CheckDeepEqual(test.expected, actual.Error())
actualAE := sErrors.ActionableErr(&cfg, constants.Build, test.err)
t.CheckDeepEqual(test.expectedAE, actualAE, protocmp.Transform())
})
}
}
|
package problem15
func Solve() (int, int, error) {
ints := []int{1, 0, 18, 10, 19, 6}
return SolveBoth(ints)
}
func SolveBoth(ints []int) (int, int, error) {
latest := map[int]int{}
for i, v := range ints[:len(ints)-2] {
latest[v] = i
}
var lastA int
lastB := ints[len(ints)-2]
next := ints[len(ints)-1]
i := len(ints) - 1
for ; i < 30000000; i++ {
if i == 2020 {
lastA = lastB
}
latest[lastB] = i - 1
lastB = next
if v, exists := latest[next]; exists {
next = i - v
} else {
next = 0
}
}
return lastA, lastB, nil
}
|
package main
import (
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/webdevops/go-common/prometheus/collector"
"go.uber.org/zap"
devopsClient "github.com/webdevops/azure-devops-exporter/azure-devops-client"
)
type MetricsCollectorProject struct {
collector.Processor
prometheus struct {
project *prometheus.GaugeVec
repository *prometheus.GaugeVec
}
}
func (m *MetricsCollectorProject) Setup(collector *collector.Collector) {
m.Processor.Setup(collector)
m.prometheus.project = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "azure_devops_project_info",
Help: "Azure DevOps project",
},
[]string{
"projectID",
"projectName",
},
)
m.Collector.RegisterMetricList("project", m.prometheus.project, true)
}
func (m *MetricsCollectorProject) Reset() {}
func (m *MetricsCollectorProject) Collect(callback chan<- func()) {
ctx := m.Context()
logger := m.Logger()
for _, project := range AzureDevopsServiceDiscovery.ProjectList() {
projectLogger := logger.With(zap.String("project", project.Name))
m.collectProject(ctx, projectLogger, callback, project)
}
}
func (m *MetricsCollectorProject) collectProject(ctx context.Context, logger *zap.SugaredLogger, callback chan<- func(), project devopsClient.Project) {
projectMetric := m.Collector.GetMetricList("project")
projectMetric.AddInfo(prometheus.Labels{
"projectID": project.Id,
"projectName": project.Name,
})
}
|
package main
import (
"fmt"
"unsafe"
)
const (
a = "abc"
b = len(a)
c = unsafe.Sizeof(a)
)
const (
i = 1 << iota
j = 3 << iota
k
l
)
func main() {
sample1()
fmt.Println("=====================")
sample2()
fmt.Println("=====================")
sample3()
fmt.Println("=====================")
}
func sample1() {
const LENGTH int = 10
const WIDTH int = 5
var area int
const a, b, c = 1, false, "str" //多重赋值
area = LENGTH * WIDTH
fmt.Printf("面积为 : %d\r\n", area)
fmt.Println(a, b, c)
}
func sample2() {
const (
a = iota //0
b //1
c //2
d = "ha" //独立值,iota += 1
e //"ha" iota += 1
f = 100 //iota +=1
g //100 iota +=1
h = iota //7,恢复计数
i //8
)
fmt.Println(a, b, c, d, e, f, g, h, i)
fmt.Println()
}
func sample3() {
fmt.Println("i=", i)
fmt.Println("j=", j)
fmt.Println("k=", k)
fmt.Println("l=", l)
}
|
package arithmetic
func Add(num ...int) int {
result := 0
for index := range num {
result += num[index]
}
return result
}
|
package types
import (
"math/big"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/tharsis/ethermint/types"
)
func newAccessListTx(tx *ethtypes.Transaction) *AccessListTx {
txData := &AccessListTx{
Nonce: tx.Nonce(),
Data: tx.Data(),
GasLimit: tx.Gas(),
}
v, r, s := tx.RawSignatureValues()
if tx.To() != nil {
txData.To = tx.To().Hex()
}
if tx.Value() != nil {
amountInt := sdk.NewIntFromBigInt(tx.Value())
txData.Amount = &amountInt
}
if tx.GasPrice() != nil {
gasPriceInt := sdk.NewIntFromBigInt(tx.GasPrice())
txData.GasPrice = &gasPriceInt
}
if tx.AccessList() != nil {
al := tx.AccessList()
txData.Accesses = NewAccessList(&al)
}
txData.SetSignatureValues(tx.ChainId(), v, r, s)
return txData
}
// TxType returns the tx type
func (tx *AccessListTx) TxType() uint8 {
return ethtypes.AccessListTxType
}
// Copy returns an instance with the same field values
func (tx *AccessListTx) Copy() TxData {
return &AccessListTx{
ChainID: tx.ChainID,
Nonce: tx.Nonce,
GasPrice: tx.GasPrice,
GasLimit: tx.GasLimit,
To: tx.To,
Amount: tx.Amount,
Data: common.CopyBytes(tx.Data),
Accesses: tx.Accesses,
V: common.CopyBytes(tx.V),
R: common.CopyBytes(tx.R),
S: common.CopyBytes(tx.S),
}
}
// GetChainID returns the chain id field from the AccessListTx
func (tx *AccessListTx) GetChainID() *big.Int {
if tx.ChainID == nil {
return nil
}
return tx.ChainID.BigInt()
}
// GetAccessList returns the AccessList field.
func (tx *AccessListTx) GetAccessList() ethtypes.AccessList {
if tx.Accesses == nil {
return nil
}
return *tx.Accesses.ToEthAccessList()
}
// GetData returns the a copy of the input data bytes.
func (tx *AccessListTx) GetData() []byte {
return common.CopyBytes(tx.Data)
}
// GetGas returns the gas limit.
func (tx *AccessListTx) GetGas() uint64 {
return tx.GasLimit
}
// GetGasPrice returns the gas price field.
func (tx *AccessListTx) GetGasPrice() *big.Int {
if tx.GasPrice == nil {
return nil
}
return tx.GasPrice.BigInt()
}
// GetGasTipCap returns the gas price field.
func (tx *AccessListTx) GetGasTipCap() *big.Int {
return tx.GetGasPrice()
}
// GetGasFeeCap returns the gas price field.
func (tx *AccessListTx) GetGasFeeCap() *big.Int {
return tx.GetGasPrice()
}
// GetValue returns the tx amount.
func (tx *AccessListTx) GetValue() *big.Int {
if tx.Amount == nil {
return nil
}
return tx.Amount.BigInt()
}
// GetNonce returns the account sequence for the transaction.
func (tx *AccessListTx) GetNonce() uint64 { return tx.Nonce }
// GetTo returns the pointer to the recipient address.
func (tx *AccessListTx) GetTo() *common.Address {
if tx.To == "" {
return nil
}
to := common.HexToAddress(tx.To)
return &to
}
// AsEthereumData returns an AccessListTx transaction tx from the proto-formatted
// TxData defined on the Cosmos EVM.
func (tx *AccessListTx) AsEthereumData() ethtypes.TxData {
v, r, s := tx.GetRawSignatureValues()
return ðtypes.AccessListTx{
ChainID: tx.GetChainID(),
Nonce: tx.GetNonce(),
GasPrice: tx.GetGasPrice(),
Gas: tx.GetGas(),
To: tx.GetTo(),
Value: tx.GetValue(),
Data: tx.GetData(),
AccessList: tx.GetAccessList(),
V: v,
R: r,
S: s,
}
}
// GetRawSignatureValues returns the V, R, S signature values of the transaction.
// The return values should not be modified by the caller.
func (tx *AccessListTx) GetRawSignatureValues() (v, r, s *big.Int) {
return rawSignatureValues(tx.V, tx.R, tx.S)
}
// SetSignatureValues sets the signature values to the transaction.
func (tx *AccessListTx) SetSignatureValues(chainID, v, r, s *big.Int) {
if v != nil {
tx.V = v.Bytes()
}
if r != nil {
tx.R = r.Bytes()
}
if s != nil {
tx.S = s.Bytes()
}
if chainID != nil {
chainIDInt := sdk.NewIntFromBigInt(chainID)
tx.ChainID = &chainIDInt
}
}
// Validate performs a stateless validation of the tx fields.
func (tx AccessListTx) Validate() error {
gasPrice := tx.GetGasPrice()
if gasPrice == nil {
return sdkerrors.Wrap(ErrInvalidGasPrice, "cannot be nil")
}
if gasPrice.Sign() == -1 {
return sdkerrors.Wrapf(ErrInvalidGasPrice, "gas price cannot be negative %s", gasPrice)
}
amount := tx.GetValue()
// Amount can be 0
if amount != nil && amount.Sign() == -1 {
return sdkerrors.Wrapf(ErrInvalidAmount, "amount cannot be negative %s", amount)
}
if tx.To != "" {
if err := types.ValidateAddress(tx.To); err != nil {
return sdkerrors.Wrap(err, "invalid to address")
}
}
if tx.GetChainID() == nil {
return sdkerrors.Wrap(
sdkerrors.ErrInvalidChainID,
"chain ID must be present on AccessList txs",
)
}
return nil
}
// Fee returns gasprice * gaslimit.
func (tx AccessListTx) Fee() *big.Int {
return fee(tx.GetGasPrice(), tx.GetGas())
}
// Cost returns amount + gasprice * gaslimit.
func (tx AccessListTx) Cost() *big.Int {
return cost(tx.Fee(), tx.GetValue())
}
|
// code for linux
// +build linux darwin
// +build 386
package godebug
var ColorRed = "\033[31;40m"
var ColorYellow = "\033[33;40m"
var ColorGreen = "\033[32;40m"
var ColorCyan = "\033[36;40m"
var ColorReset = "\033[0m"
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execgen
import (
"strings"
"testing"
"github.com/cockroachdb/datadriven"
"github.com/dave/dst/decorator"
)
// Walk walks path for datadriven files and calls RunTest on them.
func TestExecgen(t *testing.T) {
datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
f, err := decorator.Parse(d.Input)
if err != nil {
t.Fatal(err)
}
switch d.Cmd {
case "inline":
inlineFuncs(f)
case "template":
expandTemplates(f)
default:
t.Fatalf("unknown command: %s", d.Cmd)
return ""
}
var sb strings.Builder
_ = decorator.Fprint(&sb, f)
return sb.String()
})
})
}
|
package main
import "testing"
func Test_should_write_1(t *testing.T) {
assertEquals(Roman(1), "I", t)
}
func Test_should_write_3(t *testing.T) {
assertEquals(Roman(3), "III", t)
}
func Test_should_write_4(t *testing.T) {
assertEquals(Roman(4), "IV", t)
}
func Test_should_write_5(t *testing.T) {
assertEquals(Roman(5), "V", t)
}
func Test_should_write_6(t *testing.T) {
assertEquals(Roman(6), "VI", t)
}
func Test_should_write_9(t *testing.T) {
assertEquals(Roman(9), "IX", t)
}
func Test_should_write_10(t *testing.T) {
assertEquals(Roman(10), "X", t)
}
func Test_should_write_13(t *testing.T) {
assertEquals(Roman(13), "XIII", t)
}
func Test_should_write_16(t *testing.T) {
assertEquals(Roman(16), "XVI", t)
}
func Test_should_write_20(t *testing.T) {
assertEquals(Roman(20), "XX", t)
}
func Test_should_write_50(t *testing.T) {
assertEquals(Roman(50), "L", t)
}
func Test_should_write_100(t *testing.T) {
assertEquals(Roman(100), "C", t)
}
func Test_should_write_490(t *testing.T) {
assertEquals(Roman(490), "CDXC", t)
}
func Test_should_write_501(t *testing.T) {
assertEquals(Roman(501), "DI", t)
}
func Test_should_write_96(t *testing.T) {
assertEquals(Roman(96), "XCVI", t)
}
func Test_should_write_1000(t *testing.T) {
assertEquals(Roman(1000), "DI", t)
}
func assertEquals(s1 string, s2 string, t *testing.T) {
if (s1 != s2) {
t.Fatalf("Assertion failed: '" + s1 + "' != '" + s2 + "'")
}
}
|
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
)
func write(w io.Writer, list ...interface{}) {
fmt.Println(list...)
_, _ = fmt.Fprintln(w, list...)
}
func greet(w http.ResponseWriter, r *http.Request) {
defer write(w)
if r.URL.Scheme == "" {
r.URL.Scheme = "http"
}
write(w, r.Proto, r.Method, "at", r.URL.Scheme+"://"+r.Host+r.RequestURI)
write(w, "Remote:", r.RemoteAddr)
write(w, "Headers:")
for key, val := range r.Header {
write(w, "\t", key, "-", val)
}
write(w, "Content Length:", r.ContentLength)
if r.Body != nil {
write(w, "Body")
body, _ := ioutil.ReadAll(r.Body)
write(w, string(body))
}
}
func main() {
port := 80
flag.IntVar(&port, "p", port, "define port - shorthand")
flag.IntVar(&port, "port", port, "define port")
flag.Parse()
http.HandleFunc("/", greet)
portStr := fmt.Sprintf(":%v", port)
write(ioutil.Discard, "Starting server at http://localhost"+portStr)
if err := http.ListenAndServe(portStr, nil); err != nil {
write(ioutil.Discard, "Error starting:", err)
os.Exit(1)
}
}
|
package lang
import (
"testing"
)
func TestPairString(t *testing.T) {
result := MakePair(MakeString("a"), MakeNumber(2)).String()
expected := "[\"a\" 2]"
if result != expected {
t.Errorf("Wrong result, expected '%v', got '%v'", expected, result)
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"bytes"
"context"
"io/ioutil"
"path"
"time"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/storage"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MyFiles,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks whether the MyFiles directory is properly shared from ChromeOS to ARC",
Contacts: []string{
"youkichihosoi@chromium.org", "arc-storage@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Fixture: "arcBooted",
Data: []string{"capybara.jpg"},
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
}},
Timeout: 6 * time.Minute,
})
}
func MyFiles(ctx context.Context, s *testing.State) {
a := s.FixtValue().(*arc.PreData).ARC
cr := s.FixtValue().(*arc.PreData).Chrome
d := s.FixtValue().(*arc.PreData).UIDevice
if err := arc.WaitForARCMyFilesVolumeMount(ctx, a); err != nil {
s.Fatal("Failed to wait for MyFiles to be mounted in ARC: ", err)
}
cryptohomeUserPath, err := cryptohome.UserPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatalf("Failed to get the cryptohome user path for %s: %v", cr.NormalizedUser(), err)
}
myFilesPath := cryptohomeUserPath + "/MyFiles"
testARCToCros(ctx, s, a, myFilesPath)
testCrosToARC(ctx, s, a, cr, d, myFilesPath)
}
// testARCToCros checks whether a file put in the Android MyFiles directory
// appears in the ChromeOS MyFiles directory.
func testARCToCros(ctx context.Context, s *testing.State, a *arc.ARC, myFilesPath string) {
const (
filename = "capybara.jpg"
androidPath = "/storage/" + arc.MyFilesUUID + "/" + filename
)
crosPath := myFilesPath + "/" + filename
testing.ContextLog(ctx, "Testing Android -> CrOS")
if err := testPushToARCAndReadFromCros(ctx, a, s.DataPath(filename), androidPath, crosPath); err != nil {
s.Fatal("Android -> CrOS failed: ", err)
}
}
// testPushToARCAndReadFromCros pushes the content of sourcePath (in ChromeOS)
// to androidPath (in Android) using adb, and then checks whether the file can
// be accessed under crosPath (in ChromeOS).
func testPushToARCAndReadFromCros(ctx context.Context, a *arc.ARC, sourcePath, androidPath, crosPath string) (retErr error) {
// Shorten the context to make room for cleanup jobs.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
expected, err := ioutil.ReadFile(sourcePath)
if err != nil {
return errors.Wrapf(err, "failed to read from %s in ChromeOS", sourcePath)
}
if err := a.WriteFile(ctx, androidPath, expected); err != nil {
return errors.Wrapf(err, "failed to write to %s in Android", androidPath)
}
defer func(ctx context.Context) {
if err := a.RemoveAll(ctx, androidPath); err != nil {
if retErr == nil {
retErr = errors.Wrapf(err, "failed remove %s in Android", androidPath)
} else {
testing.ContextLogf(ctx, "Failed to remove %s in Android: %v", androidPath, err)
}
}
}(cleanupCtx)
actual, err := ioutil.ReadFile(crosPath)
if err != nil {
return errors.Wrapf(err, "failed to read from %s in ChromeOS", crosPath)
}
if !bytes.Equal(actual, expected) {
return errors.Errorf("content mismatch between %s in Android and %s in ChromeOS", androidPath, crosPath)
}
return nil
}
// testCrosToARC checks whether a file put in the ChromeOS MyFiles directory
// can be read by Android apps.
func testCrosToARC(ctx context.Context, s *testing.State, a *arc.ARC, cr *chrome.Chrome, d *ui.Device, myFilesPath string) {
config := storage.TestConfig{DirPath: myFilesPath, DirName: "My files", DirTitle: "Files - My files",
CreateTestFile: true, FileName: "storage.txt"}
testFileURI := arc.VolumeProviderContentURIPrefix + path.Join(arc.MyFilesUUID, config.FileName)
testing.ContextLog(ctx, "Testing CrOS -> Android")
expectations := []storage.Expectation{
{LabelID: storage.ActionID, Value: storage.ExpectedAction},
{LabelID: storage.URIID, Value: testFileURI},
{LabelID: storage.FileContentID, Value: storage.ExpectedFileContent}}
storage.TestOpenWithAndroidApp(ctx, s, a, cr, d, config, expectations)
}
|
package main
import (
"flag"
"log"
"net"
"os"
"strings"
tcpip "github.com/brewlin/net-protocol/protocol"
"github.com/brewlin/net-protocol/protocol/link/fdbased"
"github.com/brewlin/net-protocol/protocol/link/tuntap"
"github.com/brewlin/net-protocol/protocol/network/arp"
"github.com/brewlin/net-protocol/protocol/network/ipv4"
"github.com/brewlin/net-protocol/protocol/network/ipv6"
"github.com/brewlin/net-protocol/stack"
)
func main() {
//解析命令行参数
flag.Parse()
if len(flag.Args()) < 2 {
log.Fatal("Usage: ", os.Args[0], " <tap-device> <local-address/mask")
}
log.SetFlags(log.Lshortfile)
tapName := flag.Arg(0)
cidrName := flag.Arg(1)
log.Printf("tap :%v,cidrName :%v", tapName, cidrName)
parseAddr, cidr, err := net.ParseCIDR(cidrName)
if err != nil {
log.Fatalf("Bad cidr:%v", cidrName)
}
//解析地址ip地址,ipv4 或者ipv6 地址都支持
var addr tcpip.Address
var proto tcpip.NetworkProtocolNumber
if parseAddr.To4() != nil {
addr = tcpip.Address(parseAddr.To4())
proto = ipv4.ProtocolNumber
} else if parseAddr.To16() != nil {
addr = tcpip.Address(parseAddr.To16())
proto = ipv6.ProtocolNumber
} else {
log.Fatalf("Unknown IP type:%v", parseAddr)
}
//虚拟网卡配置
conf := &tuntap.Config{
Name: tapName,
Mode: tuntap.TAP,
}
var fd int
//新建虚拟网卡
fd, err = tuntap.NewNetDev(conf)
if err != nil {
log.Fatal(err)
}
//启动tap网卡
tuntap.SetLinkUp(tapName)
//设置路由
tuntap.SetRoute(tapName, cidr.String())
//获取mac地址
mac, err := tuntap.GetHardwareAddr(tapName)
log.Println("get mac addr:", string(mac))
if err != nil {
panic(err)
}
//抽象网卡的文件接口
linkID := fdbased.New(&fdbased.Options{
FD: fd,
MTU: 1500,
Address: tcpip.LinkAddress(mac),
})
//新建相关协议
s := stack.New([]string{ipv4.ProtocolName, arp.ProtocolName}, []string{}, stack.Options{})
//新建抽象的网卡
// if err := s.CreateNamedNIC(1, "vnic1", linkID); err != nil {
if err := s.CreateNamedNIC(1, "vnic1", linkID); err != nil {
log.Fatal(err)
}
//在该协议栈上添加和注册相应的网络层
if err := s.AddAddress(1, proto, addr); err != nil {
log.Fatal(err)
}
//在该协议栈上添加和注册ARP协议
if err := s.AddAddress(1, arp.ProtocolNumber, arp.ProtocolAddress); err != nil {
log.Fatal(err)
}
//添加默认 路由
s.SetRouteTable([]tcpip.Route{
{
Destination: tcpip.Address(strings.Repeat("\x00", len(addr))),
Mask: tcpip.AddressMask(strings.Repeat("\x00", len(addr))),
Gateway: "",
NIC: 1,
},
})
select {}
}
|
package field
import (
"github.com/payfazz/ditto/structure/component"
)
type List struct {
*Field
}
func NewList() component.Interface {
return &List{
Field: NewField(),
}
}
|
package http
import (
"net/http"
"strconv"
"robot/common/logger"
)
var log = logger.NewLog()
func Start(port int){
router := &Router{}
router.RegRoutes(InitRouter())
server := http.Server{
Addr:":"+strconv.Itoa(port),
Handler:router,
}
log.Infof("http server listen on %d",port)
server.ListenAndServe()
}
func InitRouter()[]*Route{
var routes []*Route
routes = append(routes,NewRoute("/api/switch","get",PinHandler,[]string{"pos","status"}))
routes = append(routes,NewRoute("/api/job/add","get",CronJob,[]string{"name","pos","status","time"}))
routes = append(routes,NewRoute("/api/status","get",GetFromDb,[]string{"key"}))
routes = append(routes,NewRoute("/api/status","post",SetToDb,[]string{"key","value"}))
return routes
}
|
package main;
import "fmt";
func main(){
if c==1{
if d==1{
d++;};
}
else
{
if d&e==1 {
e++;
}
else {
d--;};
};
};
|
package migrations
//it is the schema of the table to be stored in the database
type Product struct {
Username string `json:"username"`
UserID int `json:"user_id"`
Price int `json:"price"`
PhoneNo string `json:"phone_no"`
OrderPlaced string `json:"order_placed"`
Password string `json:"password"`
Declare []byte
}
|
// Copyright 2017 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package engineccl
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)
func TestVerifyBatchRepr(t *testing.T) {
defer leaktest.AfterTest(t)()
keyA := storage.MVCCKey{Key: []byte("a")}
keyB := storage.MVCCKey{Key: []byte("b")}
keyC := storage.MVCCKey{Key: []byte("c")}
keyD := storage.MVCCKey{Key: []byte("d")}
keyE := storage.MVCCKey{Key: []byte("e")}
var batch storage.RocksDBBatchBuilder
key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 1}}
batch.Put(key, roachpb.MakeValueFromString("1").RawBytes)
data := batch.Finish()
ms, err := VerifyBatchRepr(data, keyB, keyC, 0)
if err != nil {
t.Fatalf("%+v", err)
}
if ms.KeyCount != 1 {
t.Fatalf("got %d expected 1", ms.KeyCount)
}
// Key is before the range in the request span.
if _, err := VerifyBatchRepr(data, keyD, keyE, 0); !testutils.IsError(err, "request range") {
t.Fatalf("expected request range error got: %+v", err)
}
// Key is after the range in the request span.
if _, err := VerifyBatchRepr(data, keyA, keyB, 0); !testutils.IsError(err, "request range") {
t.Fatalf("expected request range error got: %+v", err)
}
// Invalid key/value entry checksum.
{
var batch storage.RocksDBBatchBuilder
key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 1}}
value := roachpb.MakeValueFromString("1")
value.InitChecksum([]byte("foo"))
batch.Put(key, value.RawBytes)
data := batch.Finish()
if _, err := VerifyBatchRepr(data, keyB, keyC, 0); !testutils.IsError(err, "invalid checksum") {
t.Fatalf("expected 'invalid checksum' error got: %+v", err)
}
}
}
|
package lang
import (
"fmt"
)
type symbol struct {
value string
}
func MakeSymbol(name string) Expr {
return &symbol{name}
}
func (s *symbol) String() string {
return fmt.Sprintf(":%v", s.value)
}
func (s *symbol) Value() string {
return s.value
}
func (s *symbol) Equal(o Expr) bool {
switch other := o.(type) {
case *symbol:
return s.value == other.value
default:
return false
}
}
|
// Package log defines the contract for the xds-relay logger.
// It also contains an implementation of the contract using the Zap logging
// framework.
package log
import (
"context"
)
// Logger is the contract for xds-relay's logging implementation.
//
// A self-contained usage example looks as follows:
//
// Log.Named("foo-component").With(
// "field1", "value1",
// "field2", "value2",
// ).Error("my error message")
type Logger interface {
// Named adds a sub-scope to the logger.
Named(name string) Logger
// With adds a variadic number of fields to the logging context.
// When processing pairs, the first element of the pair is used as the
// field key and the second as the field value.
//
// For example,
//
// Log.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// "count", 42,
// "user", User{Name: "alice"},
// ).Info("this is an error message")
With(args ...interface{}) Logger
// Log a message at level Debug, annotated with fields provided through With().
Debug(ctx context.Context, msg ...interface{})
// Log a message at level Info, annotated with fields provided through With().
Info(ctx context.Context, msg ...interface{})
// Log a message at level Warn, annotated with fields provided through With().
Warn(ctx context.Context, msg ...interface{})
// Log a message at level Error, annotated with fields provided through With().
Error(ctx context.Context, msg ...interface{})
// Log a message at level Panic, annotated with fields provided through With(), and immediately
// panic.
Panic(ctx context.Context, msg ...interface{})
// Log a message at level Fatal, annotated with fields provided through With(), and immediately
// call os.Exit.
Fatal(ctx context.Context, msg ...interface{})
// Sync flushes any buffered log entries.
Sync() error
}
|
package controllers
import (
"api/models"
"encoding/json"
"io/ioutil"
"net/http"
"regexp"
"strconv"
)
//GetUsers List all users
func GetUsers(w http.ResponseWriter, r *http.Request) {
accouunts, err := models.GetUsers()
if err != nil {
w.Write([]byte(err.Error()))
}
if len(accouunts) > 0 {
w.Header().Set("Content-Type", "application/json")
response, _ := json.Marshal(accouunts)
w.Write(response)
}
}
//CreateUser add new user
func CreateUser(w http.ResponseWriter, r *http.Request) {
var accouunt models.Account
postBody, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(postBody, &accouunt)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
res := accouunt.Create()
response, _ := json.Marshal(res)
w.Write(response)
}
}
//GetUser returns a single user
func GetUser(w http.ResponseWriter, r *http.Request) {
pattern, _ := regexp.Compile(`/users/(\d+)`)
matches := pattern.FindStringSubmatch(r.URL.Path)
if len(matches) > 0 {
ids, _ := strconv.Atoi(matches[1])
id := uint(ids)
w.Header().Set("Content-Type", "application/json")
accouunt, err := models.GetUser(id)
if err != nil {
w.Write([]byte(err.Error()))
} else {
response, _ := json.Marshal(accouunt)
w.Write(response)
}
}
}
//UpdateUser edit a user
func UpdateUser(w http.ResponseWriter, r *http.Request) {
pattern, _ := regexp.Compile(`/users/(\d+)`)
matches := pattern.FindStringSubmatch(r.URL.Path)
if len(matches) > 0 {
ids, _ := strconv.Atoi(matches[1])
id := uint(ids)
w.Header().Set("Content-Type", "application/json")
account := models.Account{}
postBody, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(postBody, &account)
if err != nil {
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
row, err := models.UpdateUser(id, account)
if err != nil {
w.Write([]byte(err.Error()))
}
if row != 0 {
acc, _ := models.GetUser(id)
response, _ := json.Marshal(acc)
w.Write(response)
}
}
}
}
//DeleteUser deletes a user
func DeleteUser(w http.ResponseWriter, r *http.Request) {
pattern, _ := regexp.Compile(`/users/(\d+)`)
matches := pattern.FindStringSubmatch(r.URL.Path)
if len(matches) > 0 {
ids, _ := strconv.Atoi(matches[1])
id := uint(ids)
w.Header().Set("Content-Type", "application/json")
accouunt, err := models.DeleteUser(id)
if err != nil {
w.Write([]byte(err.Error()))
} else {
response, _ := json.Marshal(accouunt)
w.Write(response)
}
}
}
|
/*Package rpterr provides a place to report internal program errors
To begin with, we simply dump to stderr
*/
package rpterr
|
package library
import (
"encoding/json"
"log"
"net/http"
"gopkg.in/mgo.v2"
)
type Disk struct {
Id string `json:"id" validate:"required"`
Title string `json:"title" validate:"required"`
Authors []string `json:"authors" validate:"required"`
Genre string `json:"genre" validate:"required"`
Mp3 string `json:"mp3"`
}
func allDisks(w http.ResponseWriter, r *http.Request) {
var disks []Disk
disks, err := SearchDisks()
if err != nil {
ErrorWithJSON(w, "Database error", http.StatusInternalServerError)
log.Println("Failed get all books: ", err)
return
}
respBody, err := json.MarshalIndent(disks, "", " ")
if err != nil {
log.Fatal(err)
}
ResponseWithJSON(w, respBody, http.StatusOK)
}
func addDisk(w http.ResponseWriter, r *http.Request) {
var disk Disk
err := DecodeJson(r.Body, &disk)
if err != nil {
w.Write([]byte(responses["bad-json"]))
return
}
err = validate.Struct(disk)
if err != nil {
w.Write([]byte(responses["bad"]))
return
}
disk, err = getPath(disk, w)
err = RegisterDisk(disk)
if err != nil {
if mgo.IsDup(err) {
w.Write([]byte(responses["dup"]))
return
}
w.Write([]byte(responses["server"]))
return
}
w.Write([]byte(responses["bacana"]))
return
}
func diskById(w http.ResponseWriter, r *http.Request) {
var disk Disk
disk, err := SearchDiskById(r)
if err != nil {
switch err {
default:
w.Write([]byte(responses["server"]))
return
case mgo.ErrNotFound:
w.Write([]byte(responses["notfound"]))
return
}
}
respBody, err := json.MarshalIndent(disk, "", " ")
if err != nil {
log.Fatal(err)
}
ResponseWithJSON(w, respBody, http.StatusOK)
return
}
func updateDisk(w http.ResponseWriter, r *http.Request) {
var disk Disk
err := DecodeJson(r.Body, &disk)
if err != nil {
w.Write([]byte(responses["bad-json"]))
return
}
err = validate.Struct(disk)
if err != nil {
w.Write([]byte(responses["bad"]))
return
}
err = ChangeDisk(disk, r)
if err != nil {
switch err {
default:
w.Write([]byte(responses["server"]))
return
case mgo.ErrNotFound:
w.Write([]byte(responses["notfound"]))
return
}
}
w.Write([]byte(responses["bacana"]))
return
}
func deleteDisk(w http.ResponseWriter, r *http.Request) {
err := RemoveDisk(r)
if err != nil {
switch err {
default:
w.Write([]byte(responses["server"]))
return
case mgo.ErrNotFound:
w.Write([]byte(responses["notfound"]))
return
}
}
w.Write([]byte(responses["bacana"]))
return
}
|
package Routes
import (
"log"
"net/http"
)
func SetupRoutes() {
http.HandleFunc("/upload", upload)
http.HandleFunc("/remove", remove)
http.HandleFunc("/rename", rename)
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Println(err)
return
}
}
|
package controllers
import (
"fmt"
"net/http"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
utility "github.com/go-ignite/ignite-admin/utils"
"github.com/go-ignite/ignite/models"
)
func (router *MainRouter) PanelIndexHandler(c *gin.Context) {
c.HTML(http.StatusOK, "index.html", nil)
}
func (router *MainRouter) PanelLoginHandler(c *gin.Context) {
loginEntity := struct {
Username string `json:"username"`
Password string `json:"password"`
}{}
if err := c.BindJSON(&loginEntity); err != nil {
resp := models.Response{Success: false, Message: "Could not parse username & password..."}
c.JSON(http.StatusInternalServerError, &resp)
return
}
fmt.Println("username:", loginEntity.Username)
fmt.Println("pwd:", loginEntity.Password)
if loginEntity.Username == utility.Auth_Username && loginEntity.Password == utility.Auth_Password {
// Create the token
token := jwt.New(jwt.GetSigningMethod("HS256"))
// Set some claims
token.Claims = jwt.MapClaims{
"exp": time.Now().Add(time.Hour * 1).Unix(),
}
// Sign and get the complete encoded token as a string
tokenString, err := token.SignedString([]byte(utility.Auth_Secret))
resp := models.Response{}
if err != nil {
resp.Success = false
resp.Message = "Could not generate token"
c.JSON(http.StatusInternalServerError, &resp)
return
}
resp.Success = true
resp.Message = "success"
resp.Data = tokenString
c.JSON(http.StatusOK, &resp)
return
}
resp := models.Response{Success: false, Message: "Username of password is wrong!"}
c.JSON(http.StatusOK, &resp)
}
|
package response
import (
"bytes"
"io"
"net/http"
"os"
"sync"
"time"
"github.com/webnice/transport/v3/charmap"
"github.com/webnice/transport/v3/content"
"github.com/webnice/transport/v3/data"
"github.com/webnice/transport/v3/header"
)
const (
// Максимальный размер данных загружаемый в память 250Mb
maxDataSizeLoadedInMemory = uint64(250 * 1024 * 1024)
)
// Pool is an interface of package
type Pool interface {
// ResponseGet Извлечение из pool нового элемента Response
ResponseGet() Interface
// ResponsePut Возврат в sync.Pool использованного элемента Response
ResponsePut(req Interface)
}
// Interface is an interface of package
type Interface interface {
// DebugFunc Set debug func and enable or disable debug mode
// If fn=not nil - debug mode is enabled. If fn=nil, debug mode is disbled
DebugFunc(fn DebugFunc) Interface
// Do Выполнение запроса и получение Response
Do(client *http.Client, request *http.Request) error
// Load all response data
Load() error
// Error Return latest error
Error() error
// Response Returns the http.Response as is
Response() *http.Response
// ContentLength records the length of the associated content
ContentLength() int64
// Cookies parses and returns the cookies set in the Set-Cookie headers
Cookies() []*http.Cookie
// Latency is an request latency for reading body of response without reading header of response
Latency() time.Duration
// StatusCode is an http status code of response
StatusCode() int
// Status is an http status string of response, for known HTTP codes
Status() string
// Header maps header keys to values. If the response had multiple headers with the same key,
// they may be concatenated, with comma delimiters
Header() header.Interface
// Charmap interface
Charmap() charmap.Charmap
// Content() Interface for working with response content
Content() content.Interface
}
// impl is an implementation of package
type impl struct {
responsePool *sync.Pool // Пул объектов Response
}
// DebugFunc Is an a function for debug request/response data
type DebugFunc func(data []byte)
// Response is an Response implementation
type Response struct {
err error // Latest error
response *http.Response // http.Response object
debugFunc DebugFunc // Is an a function for debug request/response data. If not nil - debug mode is enabled. If nil, debug mode is disbled
timeBegin time.Time // Дата и время начала загрузки результата запроса
timeLatency time.Duration // Время ушедшее за выполнение загрузки результата запроса
contentInMemory bool // =true - Результат в памяти, =false - результат во временном файле
contentData *bytes.Buffer // Результат запроса в памяти
contentFilename string // Имя времененного файла результата запроса
contentFh *os.File // Интерфейс файлового дескриптора временного файла
contentTemporaryFiles []string // Имена временных файлов
contentWriteCloser io.WriteCloser // Интерфейс io.WriteCloser к результату запроса в памяти
contentLength int64 // Размер загруженных данных
contentReader data.ReadAtSeekerWriteToCloser // Интерфейс к данным результата запроса
charmap charmap.Charmap // charmap interface
// Переменные
tmpOk bool // Общая переменная
tmpTm time.Time // Общая переменная
tmpString string // Общая переменная
tmpI int // Общая переменная
}
|
// +build never
package examples
import (
"net/http"
"os"
"testing"
"github.com/gavv/httpexpect"
"google.golang.org/appengine/aetest"
)
// These tests require installed Google Appengine SDK.
// https://cloud.google.com/appengine/downloads
// init() is used by GAE to start serving the app
// added here for illustration purposes
func init() {
http.Handle("/", GaeHandler())
}
// gaeInstance is our global dev_appserver instance.
var gaeInstance aetest.Instance
// TestMain is called first to create the gaeInstance.
func TestMain(m *testing.M) {
var err error
gaeInstance, err = aetest.NewInstance(nil)
if err != nil {
panic(err)
}
c := m.Run() // call all actual tests
gaeInstance.Close()
os.Exit(c)
}
// gaeTester returns a new Expect instance to test GaeHandler().
func gaeTester(t *testing.T) *httpexpect.Expect {
return httpexpect.WithConfig(httpexpect.Config{
// Use gaeInstance to create requests.
// aetest.Instance is compatible with httpexpect.RequestFactory.
RequestFactory: gaeInstance,
// Pass requests directly to GaeHandler.
Client: &http.Client{
Transport: httpexpect.NewBinder(GaeHandler()),
Jar: httpexpect.NewJar(),
},
// Report errors using testify.
Reporter: httpexpect.NewAssertReporter(t),
})
}
func TestGae(t *testing.T) {
e := gaeTester(t)
e.GET("/ping").Expect().
Status(200).
Text().Equal("pong")
}
|
package main
import (
"context"
"fmt"
"io/ioutil"
"strings"
"github.com/desdic/godmarcparser/dmarc"
"github.com/desdic/godmarcparser/input"
log "github.com/sirupsen/logrus"
)
// ScanDirectory scans for dmarc reports in various formats
func ScanDirectory(ctx context.Context, queue chan<- dmarc.Content, errors chan<- error, path string) {
files, err := ioutil.ReadDir(path)
if err != nil {
errors <- fmt.Errorf("Unable to list files: %v", err)
return
}
var i input.Handler
DONE:
for _, f := range files {
switch {
case strings.HasSuffix(f.Name(), ".xml.gz"):
i = input.GzipInput{}
case strings.HasSuffix(f.Name(), ".zip"):
i = input.ZipInput{}
case strings.HasSuffix(f.Name(), ".xml"):
i = input.XmlInput{}
default:
errors <- fmt.Errorf("Unknown filetype %s, skipping", f.Name())
continue
}
select {
case <-ctx.Done():
break DONE
default:
}
fname := path + "/" + f.Name()
log.Debugf("Found %s", fname)
err := i.Read(ctx, fname, queue)
if err != nil {
errors <- err
}
}
}
|
package gcppubsub
import (
"context"
"encoding/json"
"fmt"
"sync"
"cloud.google.com/go/pubsub"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/api/option"
"github.com/brocaar/lora-app-server/internal/handler"
"github.com/brocaar/lorawan"
)
// Config holds the GCP Pub/Sub integration configuration.
type Config struct {
CredentialsFile string `mapstructure:"credentials_file"`
ProjectID string `mapstructure:"project_id"`
TopicName string `mapstructure:"topic_name"`
}
// Handler implements a Google Cloud Pub/Sub handler.
type Handler struct {
sync.RWMutex
ctx context.Context
cancel context.CancelFunc
client *pubsub.Client
topic *pubsub.Topic
}
// NewHandler creates a new Pub/Sub handler.
func NewHandler(conf Config) (handler.Handler, error) {
h := Handler{
ctx: context.Background(),
}
var err error
var o []option.ClientOption
h.ctx, h.cancel = context.WithCancel(h.ctx)
if conf.CredentialsFile != "" {
o = append(o, option.WithCredentialsFile(conf.CredentialsFile))
}
log.Info("handler/gcp_pub_sub: setting up client")
h.client, err = pubsub.NewClient(h.ctx, conf.ProjectID, o...)
if err != nil {
return nil, errors.Wrap(err, "new pubsub client error")
}
log.WithField("topic", conf.TopicName).Info("handler/gcp_pub_sub: setup topic")
h.topic = h.client.Topic(conf.TopicName)
ok, err := h.topic.Exists(h.ctx)
if err != nil {
return nil, errors.Wrap(err, "topic exists error")
}
if !ok {
return nil, fmt.Errorf("topic %s does not exist", conf.TopicName)
}
return &h, nil
}
// Close closes the handler.
func (h *Handler) Close() error {
log.Info("handler/gcp_pub_sub: closing handler")
h.cancel()
return h.client.Close()
}
// SendDataUp sends an uplink data payload.
func (h *Handler) SendDataUp(pl handler.DataUpPayload) error {
return h.publish("up", pl.DevEUI, pl)
}
// SendJoinNotification sends a join notification.
func (h *Handler) SendJoinNotification(pl handler.JoinNotification) error {
return h.publish("join", pl.DevEUI, pl)
}
// SendACKNotification sends an ack notification.
func (h *Handler) SendACKNotification(pl handler.ACKNotification) error {
return h.publish("ack", pl.DevEUI, pl)
}
// SendErrorNotification sends an error notification.
func (h *Handler) SendErrorNotification(pl handler.ErrorNotification) error {
return h.publish("error", pl.DevEUI, pl)
}
// SendStatusNotification sends a status notification.
func (h *Handler) SendStatusNotification(pl handler.StatusNotification) error {
return h.publish("status", pl.DevEUI, pl)
}
// SendLocationNotification sends a location notification.
func (h *Handler) SendLocationNotification(pl handler.LocationNotification) error {
return h.publish("location", pl.DevEUI, pl)
}
// DataDownChan return nil.
func (h *Handler) DataDownChan() chan handler.DataDownPayload {
return nil
}
func (h *Handler) publish(event string, devEUI lorawan.EUI64, v interface{}) error {
jsonB, err := json.Marshal(v)
if err != nil {
return errors.Wrap(err, "marshal json error")
}
res := h.topic.Publish(h.ctx, &pubsub.Message{
Data: jsonB,
Attributes: map[string]string{
"event": event,
"devEUI": devEUI.String(),
},
})
if _, err := res.Get(h.ctx); err != nil {
return errors.Wrap(err, "get publish result error")
}
log.WithFields(log.Fields{
"dev_eui": devEUI,
"event": event,
}).Info("handler/gcp_pub_sub: event published")
return nil
}
|
package main
import (
"fmt"
"log"
"strings"
"time"
"github.com/Debian/debiman/internal/archive"
"github.com/Debian/debiman/internal/manpage"
"pault.ag/go/debian/control"
)
// mostPopularArchitecture is used as preferred architecture when we
// need to pick an arbitrary architecture. The rationale is that
// downloading the package for the most popular architecture has the
// least bad influence on the mirror server’s caches.
const mostPopularArchitecture = "amd64"
type stats struct {
PackagesExtracted uint64
PackagesDeleted uint64
ManpagesRendered uint64
ManpageBytes uint64
HtmlBytes uint64
IndexBytes uint64
}
type globalView struct {
pkgs []*pkgEntry
suites map[string]bool
idxSuites map[string]string
contentByPath map[string][]*contentEntry
xref map[string][]*manpage.Meta
stats *stats
start time.Time
}
type distributionIdentifier int
const (
fromCodename = iota
fromSuite
)
type distribution struct {
name string
identifier distributionIdentifier
}
// distributions returns a list of all distributions (either codenames
// [e.g. wheezy, jessie] or suites [e.g. testing, unstable]) from the
// -sync_codenames and -sync_suites flags.
func distributions(codenames []string, suites []string) []distribution {
distributions := make([]distribution, 0, len(codenames)+len(suites))
for _, e := range codenames {
e = strings.TrimSpace(e)
if e == "" {
continue
}
distributions = append(distributions, distribution{
name: e,
identifier: fromCodename})
}
for _, e := range suites {
e = strings.TrimSpace(e)
if e == "" {
continue
}
distributions = append(distributions, distribution{
name: e,
identifier: fromSuite})
}
return distributions
}
func buildGlobalView(ar *archive.Getter, dists []distribution, start time.Time) (globalView, error) {
var stats stats
res := globalView{
suites: make(map[string]bool, len(dists)),
idxSuites: make(map[string]string, len(dists)),
contentByPath: make(map[string][]*contentEntry),
xref: make(map[string][]*manpage.Meta),
stats: &stats,
start: start,
}
for _, dist := range dists {
release, err := ar.GetRelease(dist.name)
if err != nil {
return res, err
}
var suite string
if dist.identifier == fromCodename {
suite = release.Codename
} else {
suite = release.Suite
}
res.suites[suite] = true
res.idxSuites[release.Suite] = suite
res.idxSuites[release.Codename] = suite
res.idxSuites[dist.name] = suite
hashByFilename := make(map[string]*control.SHA256FileHash, len(release.SHA256))
for idx, fh := range release.SHA256 {
// fh.Filename contains e.g. “non-free/source/Sources”
hashByFilename[fh.Filename] = &(release.SHA256[idx])
}
content, err := getAllContents(ar, suite, release, hashByFilename)
if err != nil {
return res, err
}
for _, c := range content {
res.contentByPath[c.filename] = append(res.contentByPath[c.filename], c)
}
var latestVersion map[string]*manpage.PkgMeta
{
// Collect package download work units
var pkgs []*pkgEntry
var err error
pkgs, latestVersion, err = getAllPackages(ar, suite, release, hashByFilename, buildContainsMains(content))
if err != nil {
return res, err
}
log.Printf("Adding %d packages from suite %q", len(pkgs), suite)
res.pkgs = append(res.pkgs, pkgs...)
}
knownIssues := make(map[string][]error)
// Build a global view of all the manpages (required for cross-referencing).
// TODO(issue): edge case: packages which got renamed between releases
for _, c := range content {
if _, ok := latestVersion[c.suite+"/"+c.binarypkg]; !ok {
key := c.suite + "/" + c.binarypkg
knownIssues[key] = append(knownIssues[key],
fmt.Errorf("Could not determine latest version"))
continue
}
m, err := manpage.FromManPath(strings.TrimPrefix(c.filename, "usr/share/man/"), latestVersion[c.suite+"/"+c.binarypkg])
if err != nil {
key := c.suite + "/" + c.binarypkg
knownIssues[key] = append(knownIssues[key],
fmt.Errorf("Trying to interpret path %q: %v", c.filename, err))
continue
}
// NOTE(stapelberg): this additional verification step
// is necessary because manpages such as the French
// manpage for qelectrotech(1) are present in multiple
// encodings. manpageFromManPath ignores encodings, so
// if we didn’t filter, we would end up with what
// looks like duplicates.
present := false
for _, x := range res.xref[m.Name] {
if x.ServingPath() == m.ServingPath() {
present = true
break
}
}
if !present {
res.xref[m.Name] = append(res.xref[m.Name], m)
}
}
for key, errors := range knownIssues {
// TODO: write these to a known-issues file, parse bug numbers from an auxilliary file
log.Printf("package %q has errors: %v", key, errors)
}
}
return res, nil
}
|
package models
import (
"github.com/astaxie/beego/orm"
"time"
)
func init() {
orm.RegisterModel(&Calendar{})
orm.RegisterModel(&CalendarEvent{})
}
type Calendar struct {
Id int `json:"id"`
Name string `json:"name" orm:"size(128)" form:"name"`
Public bool `json:"public" form:"public"`
Events []*CalendarEvent `json:"events" orm:"reverse(many)"`
}
type CalendarEvent struct {
Id int `json:"id"`
Title string `json:"title" orm:"size(128)" form:"title"`
Description string `json:"description" orm:"null;type(text)" form:"description"`
Location string `json:"location" orm:"size(128)" form:"location"`
Begin time.Time `json:"begin" form:"begin"`
End time.Time `json:"end" form:"end"`
Calendar *Calendar `json:"calendar" orm:"rel(fk)"`
}
|
package library
import "errors"
type MusicEntry struct {
Id string
Name string
Artist string
Source string
Type string
}
type MusicManager struct {
musics []MusicEntry
}
func NewMusicManager() *MusicManager {
return &MusicManager{make([]MusicEntry, 0)}
}
func (m *MusicManager) Len() int {
return len(m.musics)
}
func (m *MusicManager) Get(index int) (musics *MusicEntry, err error) {
if index < 0 || index >= len(m.musics) {
return nil, errors.New("index out of range.")
}
return &m.musics[index], nil
}
func (m *MusicManager) Find(name string) *MusicEntry {
if 0 == len(m.musics) {
return nil
}
for _, v := range m.musics {
if v.Name == name {
return &v
}
}
return nil
}
func (m *MusicManager) Add(music *MusicEntry) {
m.musics = append(m.musics, *music)
}
func (m *MusicManager) Remove(index int) *MusicEntry {
if index < 0 || index >= len(m.musics) {
return nil
}
removedMusic := &m.musics[index]
if 0 == index {
m.musics = m.musics[1:]
} else if index == len(m.musics)-1 {
m.musics = m.musics[:len(m.musics)-1]
} else {
m.musics = append(m.musics[0:index-1], m.musics[index+1:]...)
}
return removedMusic
}
func (m *MusicManager) RemoveByName(name string) (removed []MusicEntry) {
var left []MusicEntry
for i, v := range m.musics {
if v.Name == name {
removed = append(removed, m.musics[i])
} else {
left = append(left, m.musics[i])
}
}
m.musics = left
return
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
"github.com/zhuangalbert/boilerplate/src/api/databases"
"github.com/zhuangalbert/boilerplate/src/api/v1/controllers"
)
func init() {
if godotenv.Load() != nil {
log.Fatal("Error loading .env file")
}
}
func main() {
cmdString := command()
if cmdString == "" {
cmdString = "serve"
}
if cmdString == "serve" && os.Getenv("APP_ENV") == "production" {
startProd()
startApp()
} else if cmdString == "serve" && os.Getenv("APP_ENV") != "production" {
startApp()
} else if cmdString == "seed" {
//database.Seed()
} else if cmdString == "migrate" {
databases.Migrate()
}
}
func startProd() {
}
func startApp() {
serverPort := os.Getenv("API_PORT")
router := gin.Default()
controllers.UserControllerHandler(router)
serverString := fmt.Sprintf(":%s", serverPort)
fmt.Println(serverString)
router.Run(serverString)
}
func command() string {
args := os.Args[1:]
if len(args) > 0 {
return args[0]
}
return ""
}
|
// This program demonstrates how to attach an eBPF program to a uretprobe.
// The program will be attached to the 'readline' symbol in the binary '/bin/bash' and print out
// the line which 'readline' functions returns to the caller.
package main
import (
"bytes"
"debug/elf"
"encoding/binary"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"unsafe"
ringbuffer "github.com/cilium/ebpf/perf"
goperf "github.com/elastic/go-perf"
"golang.org/x/sys/unix"
)
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang-11 UProbeExample ./bpf/uprobe_example.c -- -I../headers -O2
const bashPath = "/bin/bash"
const symbolName = "readline"
type Event struct {
PID uint32
Line [80]byte
}
func main() {
stopper := make(chan os.Signal, 1)
signal.Notify(stopper, os.Interrupt, syscall.SIGTERM)
// Increase rlimit so the eBPF map and program can be loaded.
if err := unix.Setrlimit(unix.RLIMIT_MEMLOCK, &unix.Rlimit{
Cur: unix.RLIM_INFINITY,
Max: unix.RLIM_INFINITY,
}); err != nil {
log.Fatalf("failed to set temporary rlimit: %v", err)
}
specs, err := NewUProbeExampleSpecs()
if err != nil {
log.Fatalf("error while loading specs: %v", err)
}
objs, err := specs.Load(nil)
if err != nil {
log.Fatalf("error while loading objects: %v", err)
}
symbolAddress, err := getSymbolAddress(bashPath, symbolName)
if err != nil {
log.Fatalf("error while getting symbol address: %v", err)
}
efd, err := openUProbe(bashPath, symbolAddress, true, uint32(objs.ProgramUprobeBashReadline.FD()))
if err != nil {
log.Fatalf("create and attach UProbe: %v", err)
}
defer unix.Close(efd)
rd, err := ringbuffer.NewReader(objs.MapEvents, os.Getpagesize())
if err != nil {
log.Fatalf("error while creating ringbuffer reader: %v", err)
}
defer func() {
<-stopper
_ = rd.Close()
}()
var event Event
for {
select {
case <-stopper:
return
default:
}
record, err := rd.Read()
if err != nil {
if ringbuffer.IsClosed(err) {
return
}
log.Printf("failed to read from ringbuffer: %+v\n", err)
}
if record.LostSamples != 0 {
log.Printf("lost samples due to ringbuffer full: %+v\n", err)
continue
}
binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event)
line := string(event.Line[:bytes.IndexByte(event.Line[:], 0)])
log.Printf("%s from /bin/bash called with %s\n", symbolName, line)
}
}
func openUProbe(binaryPath string, symbolAddress uint64, isReturn bool, fd uint32) (int, error) {
et, err := goperf.LookupEventType("uprobe")
if err != nil {
return 0, fmt.Errorf("read PMU type: %v", err)
}
config1ptr := newStringPointer(binaryPath)
attr := goperf.Attr{
Type: et,
Config1: uint64(uintptr(config1ptr)),
Config2: symbolAddress,
}
if isReturn {
// set uretprobe bit
attr.Config |= 1 << 0
}
ev, err := goperf.Open(&attr, goperf.AllThreads, 0, nil)
if err != nil {
return 0, fmt.Errorf("perf event open: %v", err)
}
efd, err := ev.FD()
if err != nil {
return 0, fmt.Errorf("get perf event fd: %v", err)
}
// Ensure config1ptr is not finalized until goperf.Open returns.
runtime.KeepAlive(config1ptr)
if err := ev.Enable(); err != nil {
_ = unix.Close(efd)
return 0, fmt.Errorf("perf event enable: %v", err)
}
if err := ev.SetBPF(fd); err != nil {
unix.Close(efd)
return 0, fmt.Errorf("perf event set bpf: %v", err)
}
return efd, nil
}
func newStringPointer(str string) unsafe.Pointer {
// The kernel expects strings to be zero terminated
buf := make([]byte, len(str)+1)
copy(buf, str)
return unsafe.Pointer(&buf[0])
}
func getSymbolAddress(elfPath, symbolName string) (uint64, error) {
binFile, err := elf.Open(elfPath)
if err != nil {
return 0, fmt.Errorf("failed to open ELF: %+v", err)
}
defer func() {
_ = binFile.Close()
}()
syms, err := binFile.DynamicSymbols()
if err != nil {
return 0, fmt.Errorf("failed to list symbols: %+v", err)
}
for _, sym := range syms {
if sym.Name == symbolName {
return sym.Value, nil
}
}
return 0, fmt.Errorf("failed to find symbol %s", symbolName)
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package boshdirector_test
import (
"errors"
boshdir "github.com/cloudfoundry/bosh-cli/v7/director"
boshuaa "github.com/cloudfoundry/bosh-cli/v7/uaa"
. "github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector/fakes"
"github.com/pivotal-cf/on-demand-service-broker/config"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("New", func() {
var (
fakeCertAppender *fakes.FakeCertAppender
fakeDirector, fakeDirectorUnauthenticated *fakes.FakeDirector
fakeDirectorFactory *fakes.FakeDirectorFactory
fakeUAAFactory *fakes.FakeUAAFactory
)
BeforeEach(func() {
fakeCertAppender = new(fakes.FakeCertAppender)
fakeDirectorFactory = new(fakes.FakeDirectorFactory)
fakeDirectorUnauthenticated = new(fakes.FakeDirector)
fakeDirector = new(fakes.FakeDirector)
fakeUAAFactory = new(fakes.FakeUAAFactory)
fakeCertAppender.AppendCertsFromPEMReturns(true)
fakeDirectorFactory.NewReturnsOnCall(0, fakeDirectorUnauthenticated, nil)
fakeDirectorFactory.NewReturnsOnCall(1, fakeDirector, nil)
fakeDirector.IsAuthenticatedReturns(true, nil)
})
Context("when UAA is configured", func() {
BeforeEach(func() {
fakeDirectorUnauthenticated.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
Auth: boshdir.UserAuthentication{
Type: "uaa",
Options: map[string]interface{}{
"url": "uaa.url.example.com:12345",
},
},
}, nil)
fakeDirector.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
User: "bosh-username",
Auth: boshdir.UserAuthentication{
Type: "uaa",
Options: map[string]interface{}{
"url": "uaa.url.example.com:12345",
},
},
}, nil)
})
It("returns a bosh client that works", func() {
client, err := New(
"http://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).NotTo(HaveOccurred())
Expect(client).NotTo(BeNil())
By("getting bosh info from the non-authenticated director")
directorConfig, taskReporter, fileReporter := fakeDirectorFactory.NewArgsForCall(0)
Expect(directorConfig).To(Equal(boshdir.FactoryConfig{
Host: "example.org",
Port: 25666,
CACert: "a totally trustworthy cert",
}))
Expect(directorConfig.TokenFunc).To(BeNil())
Expect(taskReporter).To(Equal(boshdir.NoopTaskReporter{}))
Expect(fileReporter).To(Equal(boshdir.NoopFileReporter{}))
Expect(fakeDirectorUnauthenticated.InfoCallCount()).To(Equal(1))
By("appending the trusted certificate to the system cert pool")
Expect(fakeCertAppender.AppendCertsFromPEMCallCount()).To(Equal(1))
Expect(fakeCertAppender.AppendCertsFromPEMArgsForCall(0)).To(Equal([]byte("a totally trustworthy cert")))
By("finally returning a client with a sensible PollingInterval that we can use for a working GetInfo call")
Expect(client.PollingInterval).To(BeEquivalentTo(5))
By("ensuring that the client works")
err = client.VerifyAuth(logger)
Expect(err).NotTo(HaveOccurred())
By("having configured uaa")
Expect(fakeUAAFactory.NewCallCount()).To(Equal(1))
uaaConfig := fakeUAAFactory.NewArgsForCall(0)
Expect(uaaConfig).To(Equal(boshuaa.Config{
Host: "uaa.url.example.com",
Port: 12345,
CACert: "a totally trustworthy cert",
Client: boshAuthConfig.UAA.ClientCredentials.ID,
ClientSecret: boshAuthConfig.UAA.ClientCredentials.Secret,
}))
})
Describe("but New fails", func() {
It("errors when bosh url is not valid", func() {
_, err := New(
"https://not a valid url",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).To(MatchError(ContainSubstring("Failed to build director config from url")))
})
It("errors when the director factory errors", func() {
fakeDirectorFactory.NewReturnsOnCall(0, new(fakes.FakeDirector), errors.New("could not build director"))
_, err := New(
"https://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).To(MatchError(ContainSubstring("Failed to build director: could not build director")))
})
It("errors when the director fails to GetInfo", func() {
fakeDirectorUnauthenticated.InfoReturns(boshdir.Info{}, errors.New("could not get info"))
_, err := New(
"https://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).To(MatchError(ContainSubstring("error fetching BOSH director information: could not get info")))
})
It("errors when uaa url is not valid", func() {
fakeDirectorUnauthenticated.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
Auth: boshdir.UserAuthentication{
Type: "uaa",
Options: map[string]interface{}{
"url": "http://what is this",
},
},
}, nil)
client, err := New(
"https://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).NotTo(HaveOccurred())
err = client.VerifyAuth(logger)
Expect(err).To(MatchError(ContainSubstring("Failed to build UAA config from url")))
})
It("errors when uaa is not deployed", func() {
fakeDirectorUnauthenticated.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
Auth: boshdir.UserAuthentication{
Type: "basic",
},
}, nil)
client, err := New(
"https://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).NotTo(HaveOccurred())
err = client.VerifyAuth(logger)
Expect(err).To(MatchError(ContainSubstring("Failed to build UAA config from url: Expected non-empty UAA URL")))
})
It("errors when uaa factory returns an error", func() {
fakeUAAFactory.NewReturns(new(fakes.FakeUAA), errors.New("failed to build uaa"))
client, err := New(
"https://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
boshAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).NotTo(HaveOccurred())
err = client.VerifyAuth(logger)
Expect(err).To(MatchError(ContainSubstring("Failed to build UAA client: failed to build uaa")))
})
})
})
Context("when UAA is not configured (a.k.a. Basic auth)", func() {
BeforeEach(func() {
fakeDirectorUnauthenticated.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
Auth: boshdir.UserAuthentication{
Type: "basic",
},
}, nil)
fakeDirector.InfoReturns(boshdir.Info{
Version: "1.3262.0.0 (00000000)",
User: "bosh-username",
Auth: boshdir.UserAuthentication{
Type: "basic",
},
}, nil)
})
It("returns a bosh client that works", func() {
basicAuthConfig := config.Authentication{
Basic: config.UserCredentials{Username: "example-username", Password: "example-password"},
}
client, err := New(
"http://example.org:25666",
[]byte("a totally trustworthy cert"),
fakeCertAppender,
fakeDirectorFactory,
fakeUAAFactory,
basicAuthConfig,
fakeDNSRetrieverFactory.Spy,
fakeBoshHTTPFactory.Spy,
logger,
)
Expect(err).NotTo(HaveOccurred())
Expect(client).NotTo(BeNil())
By("getting bosh info from the non-authenticated director")
directorConfig, taskReporter, fileReporter := fakeDirectorFactory.NewArgsForCall(0)
Expect(directorConfig).To(Equal(boshdir.FactoryConfig{
Host: "example.org",
Port: 25666,
CACert: "a totally trustworthy cert",
}))
Expect(directorConfig.TokenFunc).To(BeNil())
Expect(taskReporter).To(Equal(boshdir.NoopTaskReporter{}))
Expect(fileReporter).To(Equal(boshdir.NoopFileReporter{}))
Expect(fakeDirectorUnauthenticated.InfoCallCount()).To(Equal(1))
By("not configuring uaa")
Expect(fakeUAAFactory.NewCallCount()).To(Equal(0))
By("appending the trusted certificate to the system cert pool")
Expect(fakeCertAppender.AppendCertsFromPEMCallCount()).To(Equal(1))
Expect(fakeCertAppender.AppendCertsFromPEMArgsForCall(0)).To(Equal([]byte("a totally trustworthy cert")))
By("finally returning a client with a sensible PollingInterval that we can use for a working GetInfo call")
Expect(client.PollingInterval).To(BeEquivalentTo(5))
By("ensuring that the client works")
err = client.VerifyAuth(logger)
Expect(err).NotTo(HaveOccurred())
})
})
})
|
package convert
import (
"encoding/json"
"fmt"
tfv1alpha1 "github.com/isaaguilar/terraform-operator/pkg/apis/tf/v1alpha1"
tfv1alpha2 "github.com/isaaguilar/terraform-operator/pkg/apis/tf/v1alpha2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func ConvertV1alpha1ToV1alpha2(rawRequest []byte) ([]byte, runtime.Object, error) {
want := tfv1alpha2.Terraform{}
have := tfv1alpha1.Terraform{}
err := json.Unmarshal(rawRequest, &have)
if err != nil {
return []byte{}, &want, err
}
fmt.Printf("Should convert %s/%s from %s to %s\n", have.Namespace, have.Name, tfv1alpha1.SchemeGroupVersion, tfv1alpha2.SchemeGroupVersion)
want.TypeMeta = metav1.TypeMeta{
Kind: "Terraform",
APIVersion: tfv1alpha2.SchemeGroupVersion.String(),
}
want.ObjectMeta = have.ObjectMeta
want.Spec.TerraformVersion = have.Spec.TerraformVersion
want.Spec.TerraformModule.Source = have.Spec.TerraformModule
want.Spec.TerraformModule.Inline = have.Spec.TerraformModuleInline
want.Spec.TerraformModule.ConfigMapSelector = (*tfv1alpha2.ConfigMapSelector)(have.Spec.TerraformModuleConfigMap)
want.Spec.Backend = have.Spec.CustomBackend
want.Spec.IgnoreDelete = have.Spec.IgnoreDelete
want.Spec.KeepCompletedPods = have.Spec.KeepCompletedPods
want.Spec.KeepLatestPodsOnly = have.Spec.KeepLatestPodsOnly
want.Spec.WriteOutputsToStatus = have.Spec.WriteOutputsToStatus
want.Spec.OutputsSecret = have.Spec.OutputsSecret
want.Spec.PersistentVolumeSize = have.Spec.PersistentVolumeSize
want.Spec.OutputsToInclude = have.Spec.OutputsToInclude
want.Spec.OutputsToOmit = have.Spec.OutputsToOmit
want.Spec.ServiceAccount = have.Spec.ServiceAccount
if storageClassName, ok := have.Annotations["v1alpha2.tf.isaaguilar.com/storageClassName"]; ok {
want.Spec.StorageClassName = &storageClassName
}
scriptImageConfig := convertImageConfig("script", have.Spec.ScriptRunner, have.Spec.ScriptRunnerVersion, have.Spec.ScriptRunnerPullPolicy)
terraformImageConfig := convertImageConfig("terraform", have.Spec.TerraformRunner, "", have.Spec.TerraformRunnerPullPolicy)
setupImageConfig := convertImageConfig("setup", have.Spec.SetupRunner, have.Spec.SetupRunnerVersion, have.Spec.SetupRunnerPullPolicy)
images := tfv1alpha2.Images{
Terraform: terraformImageConfig,
Setup: setupImageConfig,
Script: scriptImageConfig,
}
if scriptImageConfig != nil || terraformImageConfig != nil || setupImageConfig != nil {
want.Spec.Images = &images
}
for _, credentials := range have.Spec.Credentials {
want.Spec.Credentials = append(want.Spec.Credentials, tfv1alpha2.Credentials{
SecretNameRef: tfv1alpha2.SecretNameRef(credentials.SecretNameRef),
AWSCredentials: tfv1alpha2.AWSCredentials(credentials.AWSCredentials),
ServiceAccountAnnotations: credentials.ServiceAccountAnnotations,
})
}
if have.Spec.SSHTunnel != nil {
want.Spec.SSHTunnel = &tfv1alpha2.ProxyOpts{
Host: have.Spec.SSHTunnel.Host,
User: have.Spec.SSHTunnel.User,
SSHKeySecretRef: tfv1alpha2.SSHKeySecretRef(have.Spec.SSHTunnel.SSHKeySecretRef),
}
}
for _, scmAuthMethod := range have.Spec.SCMAuthMethods {
var gitScmAuthMethod *tfv1alpha2.GitSCM
if scmAuthMethod.Git != nil {
var gitSSH *tfv1alpha2.GitSSH
if scmAuthMethod.Git.SSH != nil {
gitSSH = &tfv1alpha2.GitSSH{
RequireProxy: scmAuthMethod.Git.SSH.RequireProxy,
SSHKeySecretRef: (*tfv1alpha2.SSHKeySecretRef)(scmAuthMethod.Git.SSH.SSHKeySecretRef),
}
}
var gitHTTPS *tfv1alpha2.GitHTTPS
if scmAuthMethod.Git.HTTPS != nil {
gitHTTPS = &tfv1alpha2.GitHTTPS{
RequireProxy: scmAuthMethod.Git.HTTPS.RequireProxy,
TokenSecretRef: (*tfv1alpha2.TokenSecretRef)(scmAuthMethod.Git.HTTPS.TokenSecretRef),
}
}
gitScmAuthMethod = &tfv1alpha2.GitSCM{
SSH: gitSSH,
HTTPS: gitHTTPS,
}
}
want.Spec.SCMAuthMethods = append(want.Spec.SCMAuthMethods, tfv1alpha2.SCMAuthMethod{
Host: scmAuthMethod.Host,
Git: gitScmAuthMethod,
})
}
want.Spec.TaskOptions = []tfv1alpha2.TaskOption{}
if len(have.Spec.Env) > 0 ||
len(have.Spec.RunnerRules) > 0 ||
len(have.Spec.RunnerAnnotations) > 0 ||
len(have.Spec.RunnerLabels) > 0 {
taskOption := tfv1alpha2.TaskOption{
For: []tfv1alpha2.TaskName{"*"},
}
if len(have.Spec.Env) > 0 {
taskOption.Env = have.Spec.Env
}
if len(have.Spec.RunnerRules) > 0 {
taskOption.PolicyRules = have.Spec.RunnerRules
}
if len(have.Spec.RunnerAnnotations) > 0 {
taskOption.Annotations = have.Spec.RunnerAnnotations
}
if len(have.Spec.RunnerLabels) > 0 {
taskOption.Labels = have.Spec.RunnerLabels
}
want.Spec.TaskOptions = append(want.Spec.TaskOptions, taskOption)
}
// NOTICE: ScriptRunnerExecutionScriptConfigMap is not supported. Instead,
// use a different Container Image to change the ENTRYPOINT. Changing the
// ENRYPOINT was the intended behaviour of *ExecutionScriptConfigMap
// options.
//
// Note that both
// - terraformRunnerExecutionScriptConfigMap &
// - setupRunnerExecutionScriptConfigMap
// will be used as as scripts that their respective Containers execute.
//
// In practice, the use of the *ExecutionScriptConfigMap was not adopted and
// will lose support in favor of TaskOptions Script.
if have.Spec.TerraformRunnerExecutionScriptConfigMap != nil {
terraformRunnerExecutionScriptConfigMap := &tfv1alpha1.ConfigMapSelector{
Name: have.Spec.TerraformRunnerExecutionScriptConfigMap.Name,
Key: have.Spec.TerraformRunnerExecutionScriptConfigMap.Key,
}
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunInit, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunPlan, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunApply, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunInitDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunPlanDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(terraformRunnerExecutionScriptConfigMap, tfv1alpha2.RunApplyDelete, &want.Spec.TaskOptions)
}
if have.Spec.SetupRunnerExecutionScriptConfigMap != nil {
setupRunnerExecutionScriptConfigMap := &tfv1alpha1.ConfigMapSelector{
Name: have.Spec.SetupRunnerExecutionScriptConfigMap.Name,
Key: have.Spec.SetupRunnerExecutionScriptConfigMap.Key,
}
convertRunScriptsToTaskConfigMapSelector(setupRunnerExecutionScriptConfigMap, tfv1alpha2.RunSetup, &want.Spec.TaskOptions)
convertRunScriptsToTaskConfigMapSelector(setupRunnerExecutionScriptConfigMap, tfv1alpha2.RunSetupDelete, &want.Spec.TaskOptions)
}
convertRunScriptsToTaskInlineScripts(have.Spec.PreInitScript, tfv1alpha2.RunPreInit, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostInitScript, tfv1alpha2.RunPostInit, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PrePlanScript, tfv1alpha2.RunPrePlan, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostPlanScript, tfv1alpha2.RunPostPlan, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PreApplyScript, tfv1alpha2.RunPreApply, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostApplyScript, tfv1alpha2.RunPostApply, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PreInitDeleteScript, tfv1alpha2.RunPreInitDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostInitDeleteScript, tfv1alpha2.RunPostInitDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PrePlanDeleteScript, tfv1alpha2.RunPrePlanDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostPlanDeleteScript, tfv1alpha2.RunPostPlanDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PreApplyDeleteScript, tfv1alpha2.RunPreApplyDelete, &want.Spec.TaskOptions)
convertRunScriptsToTaskInlineScripts(have.Spec.PostApplyDeleteScript, tfv1alpha2.RunPostApplyDelete, &want.Spec.TaskOptions)
if want.Spec.Setup == nil {
want.Spec.Setup = &tfv1alpha2.Setup{}
}
want.Spec.Setup.CleanupDisk = have.Spec.CleanupDisk
for _, resourceDownload := range have.Spec.ResourceDownloads {
if resourceDownload != nil {
want.Spec.Setup.ResourceDownloads = append(want.Spec.Setup.ResourceDownloads, tfv1alpha2.ResourceDownload(*resourceDownload))
}
}
// Status is very important so TFO can continue where it left from last version
want.Status.PodNamePrefix = have.Status.PodNamePrefix
want.Status.Stages = []tfv1alpha2.Stage{}
for _, stage := range have.Status.Stages {
want.Status.Stages = append(want.Status.Stages, tfv1alpha2.Stage{
Generation: stage.Generation,
State: tfv1alpha2.StageState(stage.State),
TaskType: TaskNameFromPodType(stage.PodType),
Interruptible: tfv1alpha2.Interruptible(stage.Interruptible),
Reason: stage.Reason,
StartTime: stage.StartTime,
StopTime: stage.StopTime,
})
}
if len(have.Status.Stages) > 0 {
lastStage := have.Status.Stages[len(have.Status.Stages)-1]
want.Status.Stage = tfv1alpha2.Stage{
Generation: lastStage.Generation,
State: tfv1alpha2.StageState(lastStage.State),
TaskType: TaskNameFromPodType(lastStage.PodType),
Interruptible: tfv1alpha2.Interruptible(lastStage.Interruptible),
Reason: lastStage.Reason,
StartTime: lastStage.StartTime,
StopTime: lastStage.StopTime,
PodName: "",
Message: "",
}
}
want.Status.Phase = tfv1alpha2.StatusPhase(have.Status.Phase)
want.Status.Plugins = []tfv1alpha2.TaskName{}
want.Status.LastCompletedGeneration = have.Status.LastCompletedGeneration
// Finally store the original v1alpha1 contents in it's entirety into an annotation that can be used to
// load into the old resource to continue to work with v1alpha1
if want.ObjectMeta.Annotations == nil {
want.ObjectMeta.Annotations = map[string]string{}
}
v1alpha1TerraformJSON, _ := json.Marshal(have)
want.ObjectMeta.Annotations["tf.isaaguilar.com/v1alpha1_terraforms"] = string(v1alpha1TerraformJSON)
rawResponse, _ := json.Marshal(want)
return rawResponse, &want, nil
}
func TaskNameFromPodType(podType tfv1alpha1.PodType) tfv1alpha2.TaskName {
conversionMap := map[tfv1alpha1.PodType]tfv1alpha2.TaskName{
tfv1alpha1.PodSetupDelete: tfv1alpha2.RunSetupDelete,
tfv1alpha1.PodPreInitDelete: tfv1alpha2.RunPreInitDelete,
tfv1alpha1.PodInitDelete: tfv1alpha2.RunInitDelete,
tfv1alpha1.PodPostInitDelete: tfv1alpha2.RunPostInitDelete,
tfv1alpha1.PodPrePlanDelete: tfv1alpha2.RunPrePlanDelete,
tfv1alpha1.PodPlanDelete: tfv1alpha2.RunPlanDelete,
tfv1alpha1.PodPostPlanDelete: tfv1alpha2.RunPostPlanDelete,
tfv1alpha1.PodPreApplyDelete: tfv1alpha2.RunPreApplyDelete,
tfv1alpha1.PodApplyDelete: tfv1alpha2.RunApplyDelete,
tfv1alpha1.PodPostApplyDelete: tfv1alpha2.RunPostApplyDelete,
tfv1alpha1.PodSetup: tfv1alpha2.RunSetup,
tfv1alpha1.PodPreInit: tfv1alpha2.RunPreInit,
tfv1alpha1.PodInit: tfv1alpha2.RunInit,
tfv1alpha1.PodPostInit: tfv1alpha2.RunPostInit,
tfv1alpha1.PodPrePlan: tfv1alpha2.RunPrePlan,
tfv1alpha1.PodPlan: tfv1alpha2.RunPlan,
tfv1alpha1.PodPostPlan: tfv1alpha2.RunPostPlan,
tfv1alpha1.PodPreApply: tfv1alpha2.RunPreApply,
tfv1alpha1.PodApply: tfv1alpha2.RunApply,
tfv1alpha1.PodPostApply: tfv1alpha2.RunPostApply,
tfv1alpha1.PodNil: tfv1alpha2.RunNil,
}
return conversionMap[podType]
}
func convertRunScriptsToTaskConfigMapSelector(configMapSelector *tfv1alpha1.ConfigMapSelector, task tfv1alpha2.TaskName, taskOptions *[]tfv1alpha2.TaskOption) {
if configMapSelector != nil {
*taskOptions = append(*taskOptions, tfv1alpha2.TaskOption{
For: []tfv1alpha2.TaskName{task},
Script: tfv1alpha2.StageScript{
ConfigMapSelector: (*tfv1alpha2.ConfigMapSelector)(configMapSelector),
},
})
}
}
func convertRunScriptsToTaskInlineScripts(inlineScript string, task tfv1alpha2.TaskName, taskOptions *[]tfv1alpha2.TaskOption) {
if inlineScript != "" {
*taskOptions = append(*taskOptions, tfv1alpha2.TaskOption{
For: []tfv1alpha2.TaskName{task},
Script: tfv1alpha2.StageScript{
Inline: inlineScript,
},
})
}
}
func convertImageConfig(imageType, repo, version string, imagePullPolicy corev1.PullPolicy) *tfv1alpha2.ImageConfig {
defaults := map[string]map[string]string{
"script": {
"tag": tfv1alpha2.ScriptTaskImageTagDefault,
"image": tfv1alpha2.ScriptTaskImageRepoDefault,
},
"terraform": {
"image": tfv1alpha2.TerraformTaskImageRepoDefault,
},
"setup": {
"image": tfv1alpha2.SetupTaskImageRepoDefault,
"tag": tfv1alpha2.SetupTaskImageTagDefault,
},
}
var imageConfig *tfv1alpha2.ImageConfig
if repo != "" || version != "" || imagePullPolicy != "" {
image := ""
var imagePullPolicy corev1.PullPolicy
if repo != "" {
image = repo
} else {
if value, ok := defaults[imageType]["image"]; ok {
image = value
}
}
if version != "" {
image += ":" + version
} else {
if value, ok := defaults[imageType]["tag"]; ok {
image += ":" + value
}
}
if imagePullPolicy == "" {
imagePullPolicy = "Always"
}
imageConfig = &tfv1alpha2.ImageConfig{
Image: image,
ImagePullPolicy: imagePullPolicy,
}
}
return imageConfig
}
|
package singleton
import (
"fmt"
"log"
"sort"
"strings"
"sync"
"time"
"github.com/naiba/nezha/model"
pb "github.com/naiba/nezha/proto"
"github.com/nicksnyder/go-i18n/v2/i18n"
)
const (
_CurrentStatusSize = 30 // 统计 15 分钟内的数据为当前状态
)
var ServiceSentinelShared *ServiceSentinel
type ReportData struct {
Data *pb.TaskResult
Reporter uint64
}
// _TodayStatsOfMonitor 今日监控记录
type _TodayStatsOfMonitor struct {
Up int // 今日在线计数
Down int // 今日离线计数
Delay float32 // 今日平均延迟
}
// NewServiceSentinel 创建服务监控器
func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Monitor) {
ServiceSentinelShared = &ServiceSentinel{
serviceReportChannel: make(chan ReportData, 200),
serviceStatusToday: make(map[uint64]*_TodayStatsOfMonitor),
serviceCurrentStatusIndex: make(map[uint64]int),
serviceCurrentStatusData: make(map[uint64][]*pb.TaskResult),
lastStatus: make(map[uint64]int),
serviceResponseDataStoreCurrentUp: make(map[uint64]uint64),
serviceResponseDataStoreCurrentDown: make(map[uint64]uint64),
serviceResponseDataStoreCurrentAvgDelay: make(map[uint64]float32),
monitors: make(map[uint64]*model.Monitor),
sslCertCache: make(map[uint64]string),
// 30天数据缓存
monthlyStatus: make(map[uint64]*model.ServiceItemResponse),
dispatchBus: serviceSentinelDispatchBus,
}
// 加载历史记录
ServiceSentinelShared.loadMonitorHistory()
year, month, day := time.Now().Date()
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
var mhs []model.MonitorHistory
// 加载当日记录
DB.Where("created_at >= ?", today).Find(&mhs)
totalDelay := make(map[uint64]float32)
totalDelayCount := make(map[uint64]float32)
for i := 0; i < len(mhs); i++ {
totalDelay[mhs[i].MonitorID] += mhs[i].AvgDelay
totalDelayCount[mhs[i].MonitorID]++
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Up += int(mhs[i].Up)
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalUp += mhs[i].Up
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Down += int(mhs[i].Down)
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalDown += mhs[i].Down
}
for id, delay := range totalDelay {
ServiceSentinelShared.serviceStatusToday[id].Delay = delay / float32(totalDelayCount[id])
}
// 启动服务监控器
go ServiceSentinelShared.worker()
// 每日将游标往后推一天
_, err := Cron.AddFunc("0 0 0 * * *", ServiceSentinelShared.refreshMonthlyServiceStatus)
if err != nil {
panic(err)
}
}
/*
使用缓存 channel,处理上报的 Service 请求结果,然后判断是否需要报警
需要记录上一次的状态信息
加锁顺序:serviceResponseDataStoreLock > monthlyStatusLock > monitorsLock
*/
type ServiceSentinel struct {
// 服务监控任务上报通道
serviceReportChannel chan ReportData // 服务状态汇报管道
// 服务监控任务调度通道
dispatchBus chan<- model.Monitor
serviceResponseDataStoreLock sync.RWMutex
serviceStatusToday map[uint64]*_TodayStatsOfMonitor // [monitor_id] -> _TodayStatsOfMonitor
serviceCurrentStatusIndex map[uint64]int // [monitor_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [monitor_id] -> []model.MonitorHistory
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [monitor_id] -> 当前服务在线计数
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [monitor_id] -> 当前服务离线计数
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [monitor_id] -> 当前服务离线计数
lastStatus map[uint64]int
sslCertCache map[uint64]string
monitorsLock sync.RWMutex
monitors map[uint64]*model.Monitor // [monitor_id] -> model.Monitor
// 30天数据缓存
monthlyStatusLock sync.Mutex
monthlyStatus map[uint64]*model.ServiceItemResponse // [monitor_id] -> model.ServiceItemResponse
}
func (ss *ServiceSentinel) refreshMonthlyServiceStatus() {
// 刷新数据防止无人访问
ss.LoadStats()
// 将数据往前刷一天
ss.serviceResponseDataStoreLock.Lock()
defer ss.serviceResponseDataStoreLock.Unlock()
ss.monthlyStatusLock.Lock()
defer ss.monthlyStatusLock.Unlock()
for k, v := range ss.monthlyStatus {
for i := 0; i < len(v.Up)-1; i++ {
if i == 0 {
// 30 天在线率,减去已经出30天之外的数据
v.TotalDown -= uint64(v.Down[i])
v.TotalUp -= uint64(v.Up[i])
}
v.Up[i], v.Down[i], v.Delay[i] = v.Up[i+1], v.Down[i+1], v.Delay[i+1]
}
v.Up[29] = 0
v.Down[29] = 0
v.Delay[29] = 0
// 清理前一天数据
ss.serviceResponseDataStoreCurrentUp[k] = 0
ss.serviceResponseDataStoreCurrentDown[k] = 0
ss.serviceResponseDataStoreCurrentAvgDelay[k] = 0
ss.serviceStatusToday[k].Delay = 0
ss.serviceStatusToday[k].Up = 0
ss.serviceStatusToday[k].Down = 0
}
}
// Dispatch 将传入的 ReportData 传给 服务状态汇报管道
func (ss *ServiceSentinel) Dispatch(r ReportData) {
ss.serviceReportChannel <- r
}
func (ss *ServiceSentinel) Monitors() []*model.Monitor {
ss.monitorsLock.RLock()
defer ss.monitorsLock.RUnlock()
var monitors []*model.Monitor
for _, v := range ss.monitors {
monitors = append(monitors, v)
}
sort.SliceStable(monitors, func(i, j int) bool {
return monitors[i].ID < monitors[j].ID
})
return monitors
}
// LoadStats 加载服务监控器的历史状态信息
func (ss *ServiceSentinel) loadMonitorHistory() {
var monitors []*model.Monitor
err := DB.Find(&monitors).Error
if err != nil {
panic(err)
}
ss.serviceResponseDataStoreLock.Lock()
defer ss.serviceResponseDataStoreLock.Unlock()
ss.monthlyStatusLock.Lock()
defer ss.monthlyStatusLock.Unlock()
ss.monitorsLock.Lock()
defer ss.monitorsLock.Unlock()
for i := 0; i < len(monitors); i++ {
// 旧版本可能不存在通知组 为其设置默认组
if monitors[i].NotificationTag == "" {
monitors[i].NotificationTag = "default"
DB.Save(monitors[i])
}
task := *monitors[i]
// 通过cron定时将服务监控任务传递给任务调度管道
monitors[i].CronJobID, err = Cron.AddFunc(task.CronSpec(), func() {
ss.dispatchBus <- task
})
if err != nil {
panic(err)
}
ss.monitors[monitors[i].ID] = monitors[i]
ss.serviceCurrentStatusData[monitors[i].ID] = make([]*pb.TaskResult, _CurrentStatusSize)
ss.serviceStatusToday[monitors[i].ID] = &_TodayStatsOfMonitor{}
}
year, month, day := time.Now().Date()
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
for i := 0; i < len(monitors); i++ {
ServiceSentinelShared.monthlyStatus[monitors[i].ID] = &model.ServiceItemResponse{
Monitor: monitors[i],
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
}
}
// 加载服务监控历史记录
var mhs []model.MonitorHistory
DB.Where("created_at > ? AND created_at < ?", today.AddDate(0, 0, -29), today).Find(&mhs)
var delayCount = make(map[int]int)
for i := 0; i < len(mhs); i++ {
dayIndex := 28 - (int(today.Sub(mhs[i].CreatedAt).Hours()) / 24)
if dayIndex < 0 {
continue
}
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex] = (ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mhs[i].AvgDelay) / float32(delayCount[dayIndex]+1)
delayCount[dayIndex]++
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Up[dayIndex] += int(mhs[i].Up)
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalUp += mhs[i].Up
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Down[dayIndex] += int(mhs[i].Down)
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalDown += mhs[i].Down
}
}
func (ss *ServiceSentinel) OnMonitorUpdate(m model.Monitor) error {
ss.serviceResponseDataStoreLock.Lock()
defer ss.serviceResponseDataStoreLock.Unlock()
ss.monthlyStatusLock.Lock()
defer ss.monthlyStatusLock.Unlock()
ss.monitorsLock.Lock()
defer ss.monitorsLock.Unlock()
var err error
// 写入新任务
m.CronJobID, err = Cron.AddFunc(m.CronSpec(), func() {
ss.dispatchBus <- m
})
if err != nil {
return err
}
if ss.monitors[m.ID] != nil {
// 停掉旧任务
Cron.Remove(ss.monitors[m.ID].CronJobID)
} else {
// 新任务初始化数据
ss.monthlyStatus[m.ID] = &model.ServiceItemResponse{
Monitor: &m,
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
}
ss.serviceCurrentStatusData[m.ID] = make([]*pb.TaskResult, _CurrentStatusSize)
ss.serviceStatusToday[m.ID] = &_TodayStatsOfMonitor{}
}
// 更新这个任务
ss.monitors[m.ID] = &m
return nil
}
func (ss *ServiceSentinel) OnMonitorDelete(id uint64) {
ss.serviceResponseDataStoreLock.Lock()
defer ss.serviceResponseDataStoreLock.Unlock()
ss.monthlyStatusLock.Lock()
defer ss.monthlyStatusLock.Unlock()
ss.monitorsLock.Lock()
defer ss.monitorsLock.Unlock()
delete(ss.serviceCurrentStatusIndex, id)
delete(ss.serviceCurrentStatusData, id)
delete(ss.lastStatus, id)
delete(ss.serviceResponseDataStoreCurrentUp, id)
delete(ss.serviceResponseDataStoreCurrentDown, id)
delete(ss.serviceResponseDataStoreCurrentAvgDelay, id)
delete(ss.sslCertCache, id)
delete(ss.serviceStatusToday, id)
// 停掉定时任务
Cron.Remove(ss.monitors[id].CronJobID)
delete(ss.monitors, id)
delete(ss.monthlyStatus, id)
}
func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceItemResponse {
ss.serviceResponseDataStoreLock.RLock()
defer ss.serviceResponseDataStoreLock.RUnlock()
ss.monthlyStatusLock.Lock()
defer ss.monthlyStatusLock.Unlock()
// 刷新最新一天的数据
for k := range ss.monitors {
ss.monthlyStatus[k].Monitor = ss.monitors[k]
v := ss.serviceStatusToday[k]
// 30 天在线率,
// |- 减去上次加的旧当天数据,防止出现重复计数
ss.monthlyStatus[k].TotalUp -= uint64(ss.monthlyStatus[k].Up[29])
ss.monthlyStatus[k].TotalDown -= uint64(ss.monthlyStatus[k].Down[29])
// |- 加上当日数据
ss.monthlyStatus[k].TotalUp += uint64(v.Up)
ss.monthlyStatus[k].TotalDown += uint64(v.Down)
ss.monthlyStatus[k].Up[29] = v.Up
ss.monthlyStatus[k].Down[29] = v.Down
ss.monthlyStatus[k].Delay[29] = v.Delay
}
// 最后 5 分钟的状态 与 monitor 对象填充
for k, v := range ss.serviceResponseDataStoreCurrentDown {
ss.monthlyStatus[k].CurrentDown = v
}
for k, v := range ss.serviceResponseDataStoreCurrentUp {
ss.monthlyStatus[k].CurrentUp = v
}
return ss.monthlyStatus
}
// worker 服务监控的实际工作流程
func (ss *ServiceSentinel) worker() {
// 从服务状态汇报管道获取汇报的服务数据
for r := range ss.serviceReportChannel {
if ss.monitors[r.Data.GetId()] == nil || ss.monitors[r.Data.GetId()].ID == 0 {
log.Printf("NEZAH>> 错误的服务监控上报 %+v", r)
continue
}
mh := r.Data
ss.serviceResponseDataStoreLock.Lock()
// 写入当天状态
if mh.Successful {
ss.serviceStatusToday[mh.GetId()].Delay = (ss.serviceStatusToday[mh.
GetId()].Delay*float32(ss.serviceStatusToday[mh.GetId()].Up) +
mh.Delay) / float32(ss.serviceStatusToday[mh.GetId()].Up+1)
ss.serviceStatusToday[mh.GetId()].Up++
} else {
ss.serviceStatusToday[mh.GetId()].Down++
}
// 写入当前数据
ss.serviceCurrentStatusData[mh.GetId()][ss.serviceCurrentStatusIndex[mh.GetId()]] = mh
ss.serviceCurrentStatusIndex[mh.GetId()]++
// 更新当前状态
ss.serviceResponseDataStoreCurrentUp[mh.GetId()] = 0
ss.serviceResponseDataStoreCurrentDown[mh.GetId()] = 0
ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()] = 0
// 永远是最新的 30 个数据的状态 [01:00, 02:00, 03:00] -> [04:00, 02:00, 03: 00]
for i := 0; i < len(ss.serviceCurrentStatusData[mh.GetId()]); i++ {
if ss.serviceCurrentStatusData[mh.GetId()][i].GetId() > 0 {
if ss.serviceCurrentStatusData[mh.GetId()][i].Successful {
ss.serviceResponseDataStoreCurrentUp[mh.GetId()]++
ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()] = (ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()]*float32(ss.serviceResponseDataStoreCurrentUp[mh.GetId()]-1) + ss.serviceCurrentStatusData[mh.GetId()][i].Delay) / float32(ss.serviceResponseDataStoreCurrentUp[mh.GetId()])
} else {
ss.serviceResponseDataStoreCurrentDown[mh.GetId()]++
}
}
}
// 计算在线率,
var upPercent uint64 = 0
if ss.serviceResponseDataStoreCurrentDown[mh.GetId()]+ss.serviceResponseDataStoreCurrentUp[mh.GetId()] > 0 {
upPercent = ss.serviceResponseDataStoreCurrentUp[mh.GetId()] * 100 / (ss.serviceResponseDataStoreCurrentDown[mh.GetId()] + ss.serviceResponseDataStoreCurrentUp[mh.GetId()])
}
stateCode := GetStatusCode(upPercent)
// 数据持久化
if ss.serviceCurrentStatusIndex[mh.GetId()] == _CurrentStatusSize {
ss.serviceCurrentStatusIndex[mh.GetId()] = 0
if err := DB.Create(&model.MonitorHistory{
MonitorID: mh.GetId(),
AvgDelay: ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()],
Data: mh.Data,
Up: ss.serviceResponseDataStoreCurrentUp[mh.GetId()],
Down: ss.serviceResponseDataStoreCurrentDown[mh.GetId()],
}).Error; err != nil {
log.Println("NEZHA>> 服务监控数据持久化失败:", err)
}
}
// 延迟报警
if mh.Delay > 0 {
ss.monitorsLock.RLock()
if ss.monitors[mh.GetId()].LatencyNotify {
notificationTag := ss.monitors[mh.GetId()].NotificationTag
minMuteLabel := NotificationMuteLabel.ServiceLatencyMin(mh.GetId())
maxMuteLabel := NotificationMuteLabel.ServiceLatencyMax(mh.GetId())
if mh.Delay > ss.monitors[mh.GetId()].MaxLatency {
// 延迟超过最大值
ServerLock.RLock()
reporterServer := ServerList[r.Reporter]
msg := fmt.Sprintf("[Latency] %s %2f > %2f, Reporter: %s", ss.monitors[mh.GetId()].Name, mh.Delay, ss.monitors[mh.GetId()].MaxLatency, reporterServer.Name)
go SendNotification(notificationTag, msg, minMuteLabel)
ServerLock.RUnlock()
} else if mh.Delay < ss.monitors[mh.GetId()].MinLatency {
// 延迟低于最小值
ServerLock.RLock()
reporterServer := ServerList[r.Reporter]
msg := fmt.Sprintf("[Latency] %s %2f < %2f, Reporter: %s", ss.monitors[mh.GetId()].Name, mh.Delay, ss.monitors[mh.GetId()].MinLatency, reporterServer.Name)
go SendNotification(notificationTag, msg, maxMuteLabel)
ServerLock.RUnlock()
} else {
// 正常延迟, 清除静音缓存
UnMuteNotification(notificationTag, minMuteLabel)
UnMuteNotification(notificationTag, maxMuteLabel)
}
}
ss.monitorsLock.RUnlock()
}
// 状态变更报警+触发任务执行
if stateCode == StatusDown || stateCode != ss.lastStatus[mh.GetId()] {
ss.monitorsLock.Lock()
lastStatus := ss.lastStatus[mh.GetId()]
// 存储新的状态值
ss.lastStatus[mh.GetId()] = stateCode
// 判断是否需要发送通知
isNeedSendNotification := ss.monitors[mh.GetId()].Notify && (lastStatus != 0 || stateCode == StatusDown)
if isNeedSendNotification {
ServerLock.RLock()
reporterServer := ServerList[r.Reporter]
notificationTag := ss.monitors[mh.GetId()].NotificationTag
notificationMsg := fmt.Sprintf("[%s] %s Reporter: %s, Error: %s", StatusCodeToString(stateCode), ss.monitors[mh.GetId()].Name, reporterServer.Name, mh.Data)
muteLabel := NotificationMuteLabel.ServiceStateChanged(mh.GetId())
// 状态变更时,清除静音缓存
if stateCode != lastStatus {
UnMuteNotification(notificationTag, muteLabel)
}
go SendNotification(notificationTag, notificationMsg, muteLabel)
ServerLock.RUnlock()
}
// 判断是否需要触发任务
isNeedTriggerTask := ss.monitors[mh.GetId()].EnableTriggerTask && lastStatus != 0
if isNeedTriggerTask {
ServerLock.RLock()
reporterServer := ServerList[r.Reporter]
ServerLock.RUnlock()
if stateCode == StatusGood && lastStatus != stateCode {
// 当前状态正常 前序状态非正常时 触发恢复任务
go SendTriggerTasks(ss.monitors[mh.GetId()].RecoverTriggerTasks, reporterServer.ID)
} else if lastStatus == StatusGood && lastStatus != stateCode {
// 前序状态正常 当前状态非正常时 触发失败任务
go SendTriggerTasks(ss.monitors[mh.GetId()].FailTriggerTasks, reporterServer.ID)
}
}
ss.monitorsLock.Unlock()
}
ss.serviceResponseDataStoreLock.Unlock()
// SSL 证书报警
var errMsg string
if strings.HasPrefix(mh.Data, "SSL证书错误:") {
// i/o timeout、connection timeout、EOF 错误
if !strings.HasSuffix(mh.Data, "timeout") &&
!strings.HasSuffix(mh.Data, "EOF") &&
!strings.HasSuffix(mh.Data, "timed out") {
errMsg = mh.Data
ss.monitorsLock.RLock()
if ss.monitors[mh.GetId()].Notify {
muteLabel := NotificationMuteLabel.ServiceSSL(mh.GetId(), "network")
go SendNotification(ss.monitors[mh.GetId()].NotificationTag, fmt.Sprintf("[SSL] Fetch cert info failed, %s %s", ss.monitors[mh.GetId()].Name, errMsg), muteLabel)
}
ss.monitorsLock.RUnlock()
}
} else {
// 清除网络错误静音缓存
UnMuteNotification(ss.monitors[mh.GetId()].NotificationTag, NotificationMuteLabel.ServiceSSL(mh.GetId(), "network"))
var newCert = strings.Split(mh.Data, "|")
if len(newCert) > 1 {
ss.monitorsLock.Lock()
enableNotify := ss.monitors[mh.GetId()].Notify
// 首次获取证书信息时,缓存证书信息
if ss.sslCertCache[mh.GetId()] == "" {
ss.sslCertCache[mh.GetId()] = mh.Data
}
oldCert := strings.Split(ss.sslCertCache[mh.GetId()], "|")
isCertChanged := false
expiresOld, _ := time.Parse("2006-01-02 15:04:05 -0700 MST", oldCert[1])
expiresNew, _ := time.Parse("2006-01-02 15:04:05 -0700 MST", newCert[1])
// 证书变更时,更新缓存
if oldCert[0] != newCert[0] && !expiresNew.Equal(expiresOld) {
isCertChanged = true
ss.sslCertCache[mh.GetId()] = mh.Data
}
notificationTag := ss.monitors[mh.GetId()].NotificationTag
serviceName := ss.monitors[mh.GetId()].Name
ss.monitorsLock.Unlock()
// 需要发送提醒
if enableNotify {
// 证书过期提醒
if expiresNew.Before(time.Now().AddDate(0, 0, 7)) {
expiresTimeStr := expiresNew.Format("2006-01-02 15:04:05")
errMsg = fmt.Sprintf(
"The SSL certificate will expire within seven days. Expiration time: %s",
expiresTimeStr,
)
// 静音规则: 服务id+证书过期时间
// 用于避免多个监测点对相同证书同时报警
muteLabel := NotificationMuteLabel.ServiceSSL(mh.GetId(), fmt.Sprintf("expire_%s", expiresTimeStr))
go SendNotification(notificationTag, fmt.Sprintf("[SSL] %s %s", serviceName, errMsg), muteLabel)
}
// 证书变更提醒
if isCertChanged {
errMsg = fmt.Sprintf(
"SSL certificate changed, old: %s, %s expired; new: %s, %s expired.",
oldCert[0], expiresOld.Format("2006-01-02 15:04:05"), newCert[0], expiresNew.Format("2006-01-02 15:04:05"))
// 证书变更后会自动更新缓存,所以不需要静音
go SendNotification(notificationTag, fmt.Sprintf("[SSL] %s %s", serviceName, errMsg), nil)
}
}
}
}
}
}
const (
_ = iota
StatusNoData
StatusGood
StatusLowAvailability
StatusDown
)
func GetStatusCode[T float32 | uint64](percent T) int {
if percent == 0 {
return StatusNoData
}
if percent > 95 {
return StatusGood
}
if percent > 80 {
return StatusLowAvailability
}
return StatusDown
}
func StatusCodeToString(statusCode int) string {
switch statusCode {
case StatusNoData:
return Localizer.MustLocalize(&i18n.LocalizeConfig{MessageID: "StatusNoData"})
case StatusGood:
return Localizer.MustLocalize(&i18n.LocalizeConfig{MessageID: "StatusGood"})
case StatusLowAvailability:
return Localizer.MustLocalize(&i18n.LocalizeConfig{MessageID: "StatusLowAvailability"})
case StatusDown:
return Localizer.MustLocalize(&i18n.LocalizeConfig{MessageID: "StatusDown"})
default:
return ""
}
}
|
package material
import "github.com/galaco/gosigl"
// getGLTextureFormat swap vtf format to openGL format
func GLTextureFormatFromVtfFormat(vtfFormat uint32) gosigl.PixelFormat {
switch vtfFormat {
case 0:
return gosigl.RGBA
case 2:
return gosigl.RGB
case 3:
return gosigl.BGR
case 12:
return gosigl.BGRA
case 13:
return gosigl.DXT1
case 14:
return gosigl.DXT3
case 15:
return gosigl.DXT5
default:
return gosigl.RGB
}
}
|
package lxn
import (
"io/ioutil"
schema "github.com/liblxn/lxn/schema/golang"
"github.com/liblxn/lxnc/internal/locale"
)
type input struct {
filename string
bytes []byte
}
// Compile parses the given input and determines the locale information which is need
// for formatting data.
func Compile(loc locale.Locale, data ...[]byte) (schema.Catalog, error) {
inputs := make([]input, 0, len(data))
for _, bytes := range data {
inputs = append(inputs, input{bytes: bytes})
}
return compile(loc, inputs)
}
// CompileFile parses the given file and determines the locale information which is need
// for formatting data.
func CompileFile(loc locale.Locale, filenames ...string) (schema.Catalog, error) {
inputs := make([]input, 0, len(filenames))
for _, filename := range filenames {
bytes, err := ioutil.ReadFile(filename)
if err != nil {
return schema.Catalog{}, err
}
inputs = append(inputs, input{filename: filename, bytes: bytes})
}
return compile(loc, inputs)
}
func compile(loc locale.Locale, inputs []input) (schema.Catalog, error) {
var (
p parser
msg []schema.Message
)
for _, input := range inputs {
m, err := p.Parse(input.filename, input.bytes)
if err != nil {
return schema.Catalog{}, err
}
msg = append(msg, m...)
}
return schema.Catalog{
Locale: schema.Locale{
ID: loc.String(),
DecimalFormat: newNumberFormat(locale.DecimalFormat(loc)),
MoneyFormat: newNumberFormat(locale.MoneyFormat(loc)),
PercentFormat: newNumberFormat(locale.PercentFormat(loc)),
CardinalPlurals: newPlurals(locale.CardinalPlural(loc)),
OrdinalPlurals: newPlurals(locale.OrdinalPlural(loc)),
},
Messages: msg,
}, nil
}
func newNumberFormat(nf locale.NumberFormat) schema.NumberFormat {
symbols := nf.Symbols()
posAffixes := nf.PositiveAffixes()
negAffixes := nf.NegativeAffixes()
intGrouping := nf.IntegerGrouping()
fracGrouping := nf.FractionGrouping()
return schema.NumberFormat{
Symbols: schema.Symbols{
Decimal: symbols.Decimal,
Group: symbols.Group,
Percent: symbols.Percent,
Minus: symbols.Minus,
Inf: symbols.Inf,
Nan: symbols.NaN,
Zero: uint32(symbols.Zero),
},
PositivePrefix: posAffixes.Prefix,
PositiveSuffix: posAffixes.Suffix,
NegativePrefix: negAffixes.Prefix,
NegativeSuffix: negAffixes.Suffix,
MinIntegerDigits: nf.MinIntegerDigits(),
MinFractionDigits: nf.MinFractionDigits(),
MaxFractionDigits: nf.MaxFractionDigits(),
PrimaryIntegerGrouping: intGrouping.Primary,
SecondaryIntegerGrouping: intGrouping.Secondary,
FractionGrouping: fracGrouping.Primary,
}
}
func newPlurals(p locale.Plural) []schema.Plural {
var res []schema.Plural
for i := 0; i < len(p) && p[i].Tag != locale.Other; i++ {
rules := schema.Plural{Tag: schema.PluralTag(p[i].Tag)}
p[i].Iter(func(r locale.PluralRule) bool {
rules.Rules = append(rules.Rules, newPluralRule(r))
return true
})
}
return res
}
func newPluralRule(r locale.PluralRule) schema.PluralRule {
nranges := r.Ranges.Len()
ranges := make([]schema.Range, nranges)
for i := 0; i < nranges; i++ {
ranges[i] = schema.Range(r.Ranges.At(i))
}
mod := 0
if r.ModuloExp > 0 {
mod = 10
for i := 1; i < r.ModuloExp; i++ {
mod *= 10
}
}
return schema.PluralRule{
Operand: schema.Operand(r.Operand),
Modulo: mod,
Negate: r.Operator == locale.NotEqual,
Ranges: ranges,
Connective: schema.Connective(r.Connective),
}
}
|
package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/handler/transport"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/go-chi/chi"
"github.com/gorilla/websocket"
"github.com/padulkemid/pingpos/config"
"github.com/padulkemid/pingpos/graph"
"github.com/padulkemid/pingpos/graph/generated"
"github.com/padulkemid/pingpos/auth"
"github.com/rs/cors"
)
const defaultPort = "4000"
func main() {
config.Connection()
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
router := chi.NewRouter()
routerOptions := cors.Options{
AllowedOrigins: []string{"http://localhost:" + port},
AllowCredentials: true,
}
newRouter := cors.New(routerOptions).Handler
// apply middleware
router.Use(auth.Middleware())
router.Use(newRouter)
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))
websocketTransport := &transport.Websocket{
Upgrader: websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
// add domains here
return r.Host == "herokuapp.com"
},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
},
}
// Add the transport request
srv.AddTransport(websocketTransport)
router.Handle("/", playground.Handler("GraphQL playground", "/query"))
router.Handle("/query", srv)
err := http.ListenAndServe(":"+port, router)
if err != nil {
panic(err)
}
log.Printf("server is started on http://localhost:%s", port)
}
|
package module
import (
"fmt"
"io"
"github.com/dnaeon/gru/graph"
)
// ImportGraph creates a DAG graph of the
// module imports for a given module.
// The resulting DAG graph can be used to determine the
// proper ordering of modules and also to detect whether
// we have circular imports in our modules.
func ImportGraph(main, path string) (*graph.Graph, error) {
g := graph.New()
config := &Config{
Path: path,
}
modules, err := DiscoverAndLoad(config)
if err != nil {
return g, err
}
if _, ok := modules[main]; !ok {
return g, fmt.Errorf("Module %s not found in module path", main)
}
// A map containing the modules as graph nodes
// The graph can be used to determine if we have
// circular module imports and also to provide the
// proper ordering of loading modules after a
// topological sort of the graph nodes
nodes := make(map[string]*graph.Node)
for n := range modules {
node := graph.NewNode(n)
nodes[n] = node
}
// Recursively find all imports that the main module has
var buildImportGraphFunc func(m *Module) error
buildImportGraphFunc = func(m *Module) error {
// Add the node to the graph if it is not present already
if _, ok := g.GetNode(m.Name); !ok {
g.AddNode(nodes[m.Name])
} else {
return nil
}
// Build the import graph for each imported module
for _, mi := range m.Imports {
if _, ok := modules[mi.Name]; !ok {
return fmt.Errorf("Module %s imports %s, which is not in the module path", m.Name, mi.Name)
}
// Build the dependencies of imported modules as well
buildImportGraphFunc(modules[mi.Name])
// Finally connect the nodes in the graph
g.AddEdge(nodes[m.Name], nodes[mi.Name])
}
return nil
}
if err := buildImportGraphFunc(modules[main]); err != nil {
return g, err
}
return g, nil
}
// ImportGraphAsDot creates a DOT representation of the module imports
func ImportGraphAsDot(main, path string, w io.Writer) error {
g, err := ImportGraph(main, path)
if err != nil {
return err
}
g.AsDot("modules", w)
// Try a topological sort of the graph
// In case of circular dependencies in the graph
// generate a DOT for the remaining nodes in the graph,
// which would give us the modules causing circular dependencies
if nodes, err := g.Sort(); err == graph.ErrCircularDependency {
circular := graph.New()
circular.AddNode(nodes...)
circular.AsDot("modules_circular", w)
}
return nil
}
|
package main
import "fmt"
func main() {
var (
blue = [3]int{6, 9, 3}
red = [3]int{6, 9, 3}
)
fmt.Println("Are they equal...", blue == red)
var (
arr1 = [...]int{1, 2, 3} //size 3
arr2 = [...]int{1, 2, 3, 4} //size 4
)
_, _ = arr1, arr2
// fmt.Println("Are they equal...", arr1==arr2); // this comparison throws exception as type of array is not equal
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.