text stringlengths 11 4.05M |
|---|
package util
import (
"regexp"
"strings"
)
type StringMatcher func(haystack, needle string) bool
var All StringMatcher = func(haystack, needle string) bool {
return true
}
var Equal StringMatcher = func(haystack, needle string) bool {
return haystack == needle
}
var Contains StringMatcher = func(haystack, needle string) bool {
return strings.Contains(haystack, needle)
}
var Prefix StringMatcher = func(haystack, needle string) bool {
return strings.HasPrefix(haystack, needle)
}
var Suffix StringMatcher = func(haystack, needle string) bool {
return strings.HasSuffix(haystack, needle)
}
var Regex StringMatcher = func(haystack, needle string) bool {
if found, err := regexp.MatchString(needle, haystack); err != nil {
return false
} else {
return found
}
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package tcp
import (
"executors"
)
type QueryState struct {
result *executors.Result
}
func (state *QueryState) SetExecutorResult(r *executors.Result) {
state.result = r
}
func (state *QueryState) Empty() bool {
return state.result == nil
}
func (state *QueryState) Reset() {
state.result = nil
}
|
package main
import (
"database-manager/api"
"database-manager/collections"
"database-manager/configuration"
"fmt"
"log"
"net/http"
"time"
)
func main() {
httpRouter := api.NewRouter()
configuration.InitConfig()
srv := &http.Server{
Handler: httpRouter,
Addr: ":" + fmt.Sprint(configuration.Config.HTTPPort),
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
collections.Database()
log.Fatal(srv.ListenAndServe())
log.Println("listening")
}
|
package gitlabClient
import (
"github.com/xanzy/go-gitlab"
"regexp"
"strconv"
)
func (git *GitLab) ParseWeight(title string) int {
var rgx = regexp.MustCompile(`\[(.*?)\]`)
rs := rgx.FindStringSubmatch(title)
if len(rs) == 0 {
return 0
}
weight, err := strconv.Atoi(rs[1])
if err != nil {
return 0
}
return weight
}
func (git *GitLab) ListIssue(milestone *string) ([]*gitlab.Issue, error) {
scope := "all"
opt := &gitlab.ListIssuesOptions{
Milestone: milestone,
Scope: &scope,
ListOptions: gitlab.ListOptions{
PerPage: 100,
Page: 1,
},
}
for {
// Get the first page with projects.
issues, resp, err := git.Client.Issues.ListIssues(opt)
if err != nil {
return issues, err
}
// Exit the loop when we've seen all pages.
if resp.CurrentPage >= resp.TotalPages {
return issues, err
}
// Update the page number to get the next page.
opt.Page = resp.NextPage
}
}
|
package p1
// func TestSum(t *testing.T) {
// tests := []struct {
// Input string
// Result int
// }{
// {
// Input: "1122",
// Result: 3,
// },
// {
// Input: "1111",
// Result: 4,
// },
// {
// Input: "1234",
// Result: 0,
// },
// {
// Input: "91212129",
// Result: 9,
// },
// {
// Input: "@a.txt",
// Result: 995,
// },
// }
// for i, test := range tests {
// n := Sum(test.Input)
// if n != test.Result {
// t.Fatalf("a.%d: %s: %d (should be %d)", i, test.Input, n, test.Result)
// }
// }
// }
// func TestSumHalf(t *testing.T) {
// tests := []struct {
// Input string
// Result int
// }{
// {
// Input: "1212",
// Result: 6,
// },
// {
// Input: "1221",
// Result: 0,
// },
// {
// Input: "123425",
// Result: 4,
// },
// {
// Input: "123123",
// Result: 12,
// },
// {
// Input: "12131415",
// Result: 4,
// },
// {
// Input: "@a.txt",
// Result: 1130,
// },
// }
// for i, test := range tests {
// n := Half(test.Input)
// if n != test.Result {
// t.Fatalf("b.%d: %s: %d (should be %d)", i, test.Input, n, test.Result)
// }
// }
// }
|
package LeetCode
import (
"fmt"
)
func Code300() {
num := lengthOfLIS([]int{1, 3, 6, 7, 9, 4, 10, 5, 6})
fmt.Println(num)
}
/**
给定一个无序的整数数组,找到其中最长上升子序列的长度。
示例:
输入: [10,9,2,5,3,7,101,18]
输出: 4
解释: 最长的上升子序列是 [2,3,7,101],它的长度是 4。
说明:
可能会有多种最长上升子序列的组合,你只需要输出对应的长度即可。
你算法的时间复杂度应该为 O(n2) 。
进阶: 你能将算法的时间复杂度降低到 O(n log n) 吗?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/longest-increasing-subsequence
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
func lengthOfLIS(nums []int) int {
//dp[i] 表示i长度数组的最长上升子序列的长度}
/**
dp[0] = 1
9 < 10 dp[1] = 1 min = 9
2 < 9 dp[2] = 1 min = 2
5 > 2 dp[3] = dp[2] + 1 = 2
3 > 2
for j := 0; j < i; j++ {
if num[i] > num[j] {
dp[4] = dp[2] + 1 = 2
dp[4] = max(dp[4],dp[2] + 1)
}
}
*/
dp := make([]int, len(nums))
if len(nums) == 0 {
return 0
}
for i := 0; i < len(nums); i++ {
dp[i] = 1
for j := 0; j < i; j++ {
if nums[i] > nums[j] {
dp[i] = max(dp[i], dp[j]+1)
}
}
}
maxVal := dp[0]
for _, item := range dp {
maxVal = max(maxVal, item)
}
return maxVal
}
|
package main
import (
"errors"
"fmt"
"github.com/afex/hystrix-go/hystrix"
"time"
)
/**
判断是否会触发熔断
其实可以看到结果很明显,当第100次的时候,由于我们此时流量已经大于等于100了,所以已经突破了请求阀值,而且我们的异常量此时是10/100=10,然后可以看到的就是此时已经等于等于阀值了,所以会触发熔断
*/
func main() {
hystrix.ConfigureCommand("my_command", hystrix.CommandConfig{
RequestVolumeThreshold: 100,
SleepWindow: 1000,
ErrorPercentThreshold: 10, // 如果10s内出现10%的流量异,且10s内流量大于等于100就会触发熔断
})
errorCount := 0
for count := 1; count <= 1000; count++ {
time.Sleep(time.Millisecond * 10)
_ = hystrix.Do("my_command", func() error {
if count%10 == 0 { // 10%
errorCount++
return errors.New(fmt.Sprintf("出现业务异常,count:%d, errorCount: %d, errorCount/count: %d, time: %s\n", count, errorCount, int(((float64(errorCount)/float64(count))*100)+0.5), time.Now().Format("2006-01-02 15:04:05.000")))
}
fmt.Printf("正常处理, count: %d, time: %s\n", count, time.Now().Format("2006-01-02 15:04:05.000"))
return nil
}, func(e error) error {
if e == hystrix.ErrCircuitOpen {
fmt.Printf("出现熔断异常, count: %d, time: %s\n", count, time.Now().Format("2006-01-02 15:04:05.000"))
return nil
}
fmt.Printf("%v", e)
return nil
})
}
}
|
package kustomize
import (
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/summary"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
)
func init() {
summary.Summarizers = append(summary.Summarizers, KStatusSummarizer)
}
func KStatusSummarizer(obj data.Object, conditions []summary.Condition, summary summary.Summary) summary.Summary {
result, err := status.Compute(&unstructured.Unstructured{Object: obj})
if err != nil {
return summary
}
switch result.Status {
case status.InProgressStatus:
summary.Transitioning = true
case status.FailedStatus:
summary.Error = true
case status.CurrentStatus:
case status.TerminatingStatus:
summary.Transitioning = true
case status.UnknownStatus:
}
if result.Message != "" {
summary.Message = append(summary.Message, result.Message)
}
return summary
}
|
package types
type ImageNameTag struct {
ImageConfigName string
ImageName string
LocalRegistryImageName string
ImageTag string
}
|
package main
import "sync"
func main66() {
var wg sync.WaitGroup
wg.Add(1)
go func() {
wg.Add(1) //来不及设置
//defer wg.Done()
println("hi!")
}()
wg.Wait()
println("exit.")
}
|
package watcher
import "fmt"
type QaBlinkStatusCode int
const (
STABLE QaBlinkStatusCode = iota
UNSTABLE
FAILED
UNKNOWN
DISABLED
)
type QaBlinkState struct {
StatusCode QaBlinkStatusCode
Score uint8
Pending bool
}
type QaBlinkJob interface {
Update()
State() QaBlinkState
Id() string
}
func (code QaBlinkStatusCode) String() string {
switch code {
case STABLE:
return "\033[1;32m STABLE \033[0m"
case UNSTABLE:
return "\033[1;33mUNSTABLE\033[0m"
case FAILED:
return "\033[1;31m FAILED \033[0m"
case UNKNOWN:
return "\033[1;31mUNKNOWN \033[0m"
case DISABLED:
return "\033[1;33mDISABLED\033[0m"
}
panic("Unknown status code")
}
func (s QaBlinkState) String() string {
if s.Pending {
return fmt.Sprintf("\033[1;46m%v\033[0m", s.StatusCode)
}
return s.StatusCode.String()
}
|
// Package hamming is a package to implement the calculation of dna
package hamming
import "errors"
// Distance calculates the hamming distance
func Distance(a, b string) (dis int, err error) {
if len(a) != len(b) {
return 0, errors.New("Not Equal length")
}
for index := 0; index <= len(a)-1; index++ {
if a[index] != b[index] {
dis++
}
}
return
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package kernelmeter provides a mechanism for collecting kernel-related
// measurements in parallel with the execution of a test.
//
// Several kernel quantities (e.g page faults, swaps) are exposed via sysfs or
// procfs in the form of counters. We are generally interested in the absolute
// increments of these values over a period of time, and their rate of change.
// A kernelmeter.Meter instance keeps track of the initial values of the
// counters so that deltas can be computed. It also calculates the peak rate
// over an interval. Additionally, various methods are available for reading
// snapshots of other exported kernel quantities.
package kernelmeter
import (
"context"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
// Meter collects kernel performance statistics.
type Meter struct {
isClosed bool // true after the meter has been closed
stop chan struct{} // closed (by client) to request stop
stopped chan struct{} // closed by collection goroutine when it exits
vmsm *vmStatsMeter // tracks various memory manager counters
}
// vmField is an index into vmSample.fields. Each vmstat of interest is
// assigned a fixed vmField.
type vmField int
// The /proc/vmstat fields to be collected.
const (
pageFaultField vmField = iota
swapInField
swapOutField
oomKillField
vmFieldsLastField
)
const vmFieldsLength = int(vmFieldsLastField)
// vmSample contains a snapshot (time + values) from /proc/vmstat.
type vmSample struct {
time time.Time
fields [vmFieldsLength]uint64
}
// vmFieldIndices maps the name of a vmstat field to a vmField, which is an
// index into a vmSample.fields vector.
var vmFieldIndices = map[string]vmField{
"pgmajfault": pageFaultField,
"pswpin": swapInField,
"pswpout": swapOutField,
"oom_kill": oomKillField,
}
const (
// Length of window for moving averages as a multiple of the sampling period.
vmCountWindowLength = 10
// Number of samples in circular buffer. The window has samples at
// both ends, so for instance a window of length 1 requires 2 samples.
sampleBufferLength = vmCountWindowLength + 1
)
// vmStatsMeter collects vm counter statistics.
type vmStatsMeter struct {
startSample vmSample // initial values at collection start
samples [sampleBufferLength]vmSample // circular buffer of recent samples
sampleIndex int // index of most recent sample in buffer
sampleCount int // count of valid samples in buffer (for startup)
maxRates [vmFieldsLength]float64 // max seen counter rates (delta per second)
mutex sync.Mutex // for safe access of all variables
}
// reset resets a vmStatsMeter. Should be called immediately after
// acquireSample, so that the latest sample is up to date. Note that this
// resets the start time and max rates seen, but does not modify the
// circular buffer used to compute the moving average.
func (v *vmStatsMeter) reset() {
v.startSample = v.samples[v.sampleIndex]
for i := range v.maxRates {
v.maxRates[i] = 0.0
}
}
// updateMaxRates updates the max rate of increase seen for each counter.
func (v *vmStatsMeter) updateMaxRates() {
currentTime := v.samples[v.sampleIndex].time
previousIndex := (v.sampleIndex - 1 + sampleBufferLength) % sampleBufferLength
previousTime := v.samples[previousIndex].time
for i := 0; i < vmFieldsLength; i++ {
currentCount := v.samples[v.sampleIndex].fields[i]
previousCount := v.samples[previousIndex].fields[i]
rate := float64(currentCount-previousCount) / currentTime.Sub(previousTime).Seconds()
if rate > v.maxRates[i] {
v.maxRates[i] = rate
}
}
}
// acquireSample adds a new sample to the circular buffer, and tracks the
// number of valid entries in the buffer.
func (v *vmStatsMeter) acquireSample() {
if v.sampleCount < sampleBufferLength {
v.sampleCount++
}
v.sampleIndex = (v.sampleIndex + 1) % sampleBufferLength
v.samples[v.sampleIndex].read()
}
// counterData produces a VMCounterData for field.
func (v *vmStatsMeter) counterData(field vmField) VMCounterData {
current := v.samples[v.sampleIndex].fields[field]
currentTime := v.samples[v.sampleIndex].time
delta := current - v.startSample.fields[field]
// Use the most recent and least recent samples in the circular buffer.
old := (v.sampleIndex - (v.sampleCount - 1) + sampleBufferLength) % sampleBufferLength
oldTime := v.samples[old].time
recentDelta := current - v.samples[old].fields[field]
return VMCounterData{
Count: delta,
AverageRate: float64(delta) / currentTime.Sub(v.startSample.time).Seconds(),
MaxRate: v.maxRates[field],
RecentRate: float64(recentDelta) / currentTime.Sub(oldTime).Seconds(),
}
}
// VMCounterData contains statistics for a memory manager event counter, such
// as the page fault counter (pgmajfault in /proc/vmstat).
type VMCounterData struct {
// Count is the number of events since the last reset.
Count uint64
// AverageRate is the average rate (increase/second) for the duration
// of the sampling.
AverageRate float64
// MaxRate is the maximum rate seen during the sampling
// (increase/second over samplePeriod intervals).
MaxRate float64
// RecentRate is the average rate in the most recent window with size
// vmCountWindowLength periods (or slightly more), or however many
// periods are available since the most recent reset, including the
// most recent sample.
RecentRate float64
}
// VMStatsData contains statistics for various memory manager counters.
// The fields of VMStatsData must match the names and indices above.
type VMStatsData struct {
// PageFault reports major page fault count and rates.
PageFault VMCounterData
// SwapIn reports swapin count and rates.
SwapIn VMCounterData
// SwapOut reports swapout count and rates.
SwapOut VMCounterData
// OOM reports out-of-memory kill count and rates.
OOM VMCounterData
}
const samplePeriod = 1 * time.Second // length of sample period for max rate calculation
// New creates a Meter and starts the sampling goroutine.
func New(ctx context.Context) *Meter {
vmsm := newVMStatsMeter()
m := &Meter{
vmsm: vmsm,
stop: make(chan struct{}),
stopped: make(chan struct{}),
}
go m.start(ctx)
return m
}
// Close stops the sampling goroutine and releases other resources.
func (m *Meter) Close(ctx context.Context) {
if m.isClosed {
panic("Closing already closed kernelmeter")
}
// Send stop request to the goroutine.
close(m.stop)
// Wait for the goroutine to finish.
select {
case <-m.stopped:
case <-ctx.Done():
}
m.isClosed = true
}
// Reset resets a Meter so that it is ready for a new set of measurements.
func (m *Meter) Reset() {
m.vmsm.reset()
}
// newVMStatsMeter returns a vmStatsMeter instance.
func newVMStatsMeter() *vmStatsMeter {
v := &vmStatsMeter{}
v.acquireSample()
v.reset()
return v
}
// stats returns the vm counter stats since the last reset.
func (v *vmStatsMeter) stats() (*VMStatsData, error) {
v.mutex.Lock()
defer v.mutex.Unlock()
interval := time.Now().Sub(v.startSample.time)
if interval.Seconds() == 0.0 {
return nil, errors.New("calling VMCounterStats too soon")
}
v.acquireSample()
return &VMStatsData{
PageFault: v.counterData(pageFaultField),
SwapIn: v.counterData(swapInField),
SwapOut: v.counterData(swapOutField),
OOM: v.counterData(oomKillField),
}, nil
}
// read stores the current time and current values of selected fields of
// /proc/vmstat into s. Panics if any error occurs, since we expect the kernel
// to function properly. The values of fields that are not found in
// /proc/vmstat are left unchanged.
func (s *vmSample) read() {
s.time = time.Now()
b, err := ioutil.ReadFile("/proc/vmstat")
if err != nil {
panic(fmt.Sprint("Cannot read /proc/vmstat: ", err))
}
seen := make(map[string]struct{})
for _, line := range strings.Split(strings.TrimSuffix(string(b), "\n"), "\n") {
nameValue := strings.Split(line, " ")
if len(nameValue) != 2 {
panic(fmt.Sprintf("Unexpected vmstat line %q", line))
}
name := nameValue[0]
value := nameValue[1]
i, present := vmFieldIndices[name]
if !present {
continue
}
count, err := strconv.ParseInt(value, 10, 64)
if err != nil {
panic(fmt.Sprintf("Cannot parse %q value %q: %v", name, value, err))
}
s.fields[i] = uint64(count)
if len(seen) == vmFieldsLength {
break
}
}
}
// start starts the kernel meter, which periodically samples various memory
// manager quantities (such as page fault counts) and tracks the max values of
// their rate of change.
func (m *Meter) start(ctx context.Context) {
defer func() {
close(m.stopped)
}()
for {
select {
case <-time.After(samplePeriod):
case <-m.stop:
return
case <-ctx.Done():
return
}
m.vmsm.acquireSample()
m.vmsm.updateMaxRates()
}
}
// VMStats returns the total number of events, and the average and
// max rates, for various memory manager events.
func (m *Meter) VMStats() (*VMStatsData, error) {
return m.vmsm.stats()
}
// MemSize represents an amount of RAM in bytes.
type MemSize uint64
// String converts a MemSize to a string for printing. The value is printed in
// MiB, since MiB resolution is more than sufficient for this application. For
// Values smaller than 2 MiB, print a few decimals.
func (m MemSize) String() string {
const mib = MemSize(1024 * 1024)
if m >= 2*mib {
return fmt.Sprintf("%d", m/mib)
}
return fmt.Sprintf("%.3f", float64(m)/float64(mib))
}
// watermarkData contains the sums of per-zone watermarks, plus the total
// memory reserve from the kernel.
type watermarkData struct {
min, low, high, totalReserve MemSize
}
// watermarks returns the MM watermarks and mimics the calculation of
// totalreserve_pages, which is not exported, in
// calculate_totalreserve_pages(). The latter number is a reasonable
// approximation (and an upper bound) of the minimum amount of RAM which the
// kernel tries to keep free by reclaiming.
func watermarks() (*watermarkData, error) {
b, err := ioutil.ReadFile("/proc/zoneinfo")
if err != nil {
return nil, err
}
return stringToWatermarks(string(b))
}
// NewMemSizePages converts a number of pages to its memory size in bytes.
func NewMemSizePages(pages int) MemSize {
return MemSize(pages) * MemSize(os.Getpagesize())
}
// NewMemSizeKiB converts an amount in KiB to a memory size in bytes.
func NewMemSizeKiB(kib int) MemSize {
return MemSize(kib) * 1024
}
// NewMemSizeMiB converts an amount in MiB to a memory size in bytes.
func NewMemSizeMiB(mib int) MemSize {
return MemSize(mib) * 1024 * 1024
}
// stringToWatermarks is the internal version of watermarks, for unit testing.
// s is the content of /proc/zoneinfo.
func stringToWatermarks(s string) (*watermarkData, error) {
watermarkRE := regexp.MustCompile(`(high|low|min)\s+(\d+)`)
managedRE := regexp.MustCompile(`managed\s+(\d+)`)
reserveRE := regexp.MustCompile(`protection: \((.*)\)`)
w := &watermarkData{}
type parseState int
const (
lookingForWatermarks parseState = iota
lookingForManaged
lookingForProtection
foundAll
)
// All quantities in /proc/zoneinfo are in pages. They are converted
// to MemSize (bytes).
state := lookingForWatermarks // initial parsing state
var managed MemSize // per-zone managed memory
var highWM MemSize // high watermark in a zone
var maxReserve MemSize // highest value in "protection" array for a zone
var totalReserve MemSize // total reserve, based on max per-zone reserves
wm := map[string]MemSize{} // values of min, low, high in a zone
for _, line := range strings.Split(strings.TrimSuffix(string(s), "\n"), "\n") {
if groups := watermarkRE.FindStringSubmatch(line); groups != nil {
if state != lookingForWatermarks {
return nil, errors.New("field out of order in zoneinfo")
}
var v int
var err error
if v, err = strconv.Atoi(groups[2]); err != nil {
return nil, errors.Wrapf(err, "bad value %q for zoneinfo field %q", groups[2], groups[1])
}
wm[groups[1]] = NewMemSizePages(v)
if len(wm) == 3 {
w.min += wm["min"]
w.low += wm["low"]
w.high += wm["high"]
highWM = w.high
state = lookingForManaged
wm = map[string]MemSize{} // clear watermarks map
}
continue
}
if groups := managedRE.FindStringSubmatch(line); groups != nil {
if state != lookingForManaged {
return nil, errors.New("field 'managed' out of order in zoneinfo")
}
var m int
var err error
if m, err = strconv.Atoi(groups[1]); err != nil {
return nil, errors.Wrapf(err, "bad zoneinfo 'managed' field %q", groups[1])
}
managed = NewMemSizePages(m)
state = lookingForProtection
continue
}
if groups := reserveRE.FindStringSubmatch(line); groups != nil {
maxReserve = 0
if state != lookingForProtection {
return nil, errors.New("field 'protection' out of order in zoneinfo")
}
for _, field := range strings.Split(groups[1], ", ") {
r, err := strconv.Atoi(field)
if err != nil {
return nil, errors.Wrapf(err, "bad reserve %q", groups[1])
}
reserve := NewMemSizePages(r)
if maxReserve < reserve {
maxReserve = reserve
}
}
state = foundAll
}
if state == foundAll {
zoneReserve := highWM + maxReserve
if zoneReserve > managed {
zoneReserve = managed
}
totalReserve += zoneReserve
state = lookingForWatermarks
}
}
if state != lookingForWatermarks {
return nil, errors.New("zoneinfo ended prematurely")
}
w.totalReserve = totalReserve
return w, nil
}
// ReadMemInfo returns all name-value pairs from /proc/meminfo. The values
// returned are in bytes.
func ReadMemInfo() (map[string]MemSize, error) {
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return nil, err
}
re := regexp.MustCompile(`(\S+):\s+(\d+) kB\n`)
info := make(map[string]MemSize)
for _, groups := range re.FindAllStringSubmatch(string(b), -1) {
v, err := strconv.Atoi(groups[2])
if err != nil {
return nil, errors.Wrapf(err, "bad meminfo value: %q", groups[2])
}
info[groups[1]] = NewMemSizeKiB(v)
}
return info, nil
}
// MemInfoFields holds selected fields of /proc/meminfo.
type MemInfoFields struct {
Total, Free, Anon, File, SwapTotal, SwapUsed MemSize
}
// MemInfo returns selected /proc/meminfo fields.
func MemInfo() (data *MemInfoFields, err error) {
info, err := ReadMemInfo()
if err != nil {
return nil, err
}
return &MemInfoFields{
Total: info["MemTotal"],
Free: info["MemFree"],
Anon: info["Active(anon)"] + info["Inactive(anon)"],
File: info["Active(file)"] + info["Inactive(file)"],
SwapTotal: info["SwapTotal"],
SwapUsed: info["SwapTotal"] - info["SwapFree"],
}, nil
}
// readIntFromFile returns the numeric value of the content of filename, which
// is typically a sysfs or procfs entry.
func readIntFromFile(filename string) (int, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return 0, err
}
x, err := strconv.Atoi(strings.TrimSpace(string(b)))
if err != nil {
return 0, errors.Wrapf(err, "bad integer: %q", b)
}
return x, nil
}
// readFirstIntFromFile assumes filename contains one or more space-separated
// items, and returns the value of the first item which must be an integer.
func readFirstIntFromFile(filename string) (int, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return 0, err
}
f := strings.Fields(string(b))
if len(f) == 0 {
return 0, errors.Wrapf(err, "no fields in file %v", filename)
}
x, err := strconv.Atoi(f[0])
if err != nil {
return 0, errors.Wrapf(err, "bad integer: %q", f[0])
}
return x, nil
}
// ChromeosLowMem returns sysfs information from the chromeos low-mem module.
func ChromeosLowMem() (available, criticalMargin MemSize, ramWeight int, err error) {
sysdir := "/sys/kernel/mm/chromeos-low_mem/"
a, err := readIntFromFile(sysdir + "available")
if err != nil {
return 0, 0, 0, err
}
m, err := readFirstIntFromFile(sysdir + "margin")
if err != nil {
return 0, 0, 0, err
}
r, err := readIntFromFile(sysdir + "ram_vs_swap_weight")
if err != nil {
return 0, 0, 0, err
}
available = NewMemSizeMiB(a)
criticalMargin = NewMemSizeMiB(m)
ramWeight = r
return
}
// ProcessMemory returns the approximate amount of virtual memory (swapped or
// not) currently allocated by processes.
func ProcessMemory() (allocated MemSize, err error) {
meminfo, err := MemInfo()
if err != nil {
return 0, err
}
return meminfo.Anon + meminfo.SwapUsed, nil
}
// HasZram returns true when the system uses swap on a zram device,
// and no other device.
func HasZram() bool {
b, err := ioutil.ReadFile("/proc/swaps")
if err != nil {
return false
}
lines := strings.Split(string(b), "\n")
if len(lines) < 2 {
return false
}
return strings.HasPrefix(lines[1], "/dev/zram")
}
// LogMemoryParameters logs various kernel parameters as well as some
// calculated quantities to help understand the memory manager behavior.
func LogMemoryParameters(ctx context.Context, ratio float64) error {
available, margin, ramWeight, err := ChromeosLowMem()
if err != nil {
return errors.Wrap(err, "cannot obtain low-mem info")
}
hasZram := HasZram()
if !hasZram {
// Swap to disk is the same as if the compression ratio was 0.
ratio = 0.0
testing.ContextLog(ctx, "Device is not using zram")
}
memInfo, err := MemInfo()
if err != nil {
return errors.Wrap(err, "cannot obtain memory info")
}
total := memInfo.Total
totalSwap := memInfo.SwapTotal
usedSwap := memInfo.SwapUsed
// process is how much memory is in use by processes at this time.
process, err := ProcessMemory()
if err != nil {
testing.ContextLog(ctx, "Cannot compute process footprint: ", err)
}
wm, err := watermarks()
if err != nil {
testing.ContextLog(ctx, "Cannot compute watermarks: ", err)
}
// swapReduction is the amount to be taken out of swapTotal because we
// start discarding before swap is full. If ramWeight is large, free
// swap has little or no influence on available, and we assume all swap
// space can be used.
var swapReduction MemSize
if margin > wm.totalReserve {
swapReduction = (margin - wm.totalReserve) * MemSize(ramWeight)
if swapReduction > totalSwap {
swapReduction = 0
}
}
usableSwap := totalSwap - swapReduction
// maxProcess is the amount of allocated process memory at which the
// low-mem device triggers.
maxProcess := total - wm.totalReserve + MemSize(float64(usableSwap)*(1-ratio))
if maxProcess < process {
return errors.Errorf("bad process size calculation: max %v , current %v ", maxProcess, process)
}
testing.ContextLog(ctx, "Metrics: all memory sizes (RAM, swap, process) are in MiB")
testing.ContextLogf(ctx, "Metrics: meminfo: total %v, has zram %v", total, hasZram)
testing.ContextLogf(ctx, "Metrics: swap: total %v, used %d, usable %v", totalSwap, usedSwap, usableSwap)
testing.ContextLogf(ctx, "Metrics: low-mem: available %v, margin %v, RAM weight %v", available, margin, ramWeight)
testing.ContextLogf(ctx, "Metrics: watermarks %v %v %v, total reserve %v", wm.min, wm.low, wm.high, wm.totalReserve)
testing.ContextLogf(ctx, "Metrics: process allocation: current %v, max %v, compression ratio %v", process, maxProcess, ratio)
return nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"context"
gosql "database/sql"
"fmt"
"time"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
)
func registerSchemaChangeDuringKV(r *testRegistry) {
r.Add(testSpec{
Name: `schemachange/during/kv`,
Owner: OwnerSQLSchema,
Cluster: makeClusterSpec(5),
Run: func(ctx context.Context, t *test, c *cluster) {
const fixturePath = `gs://cockroach-fixtures/workload/tpch/scalefactor=10/backup`
c.Put(ctx, cockroach, "./cockroach")
c.Put(ctx, workload, "./workload")
c.Start(ctx, t, c.All())
db := c.Conn(ctx, 1)
defer db.Close()
m := newMonitor(ctx, c, c.All())
m.Go(func(ctx context.Context) error {
t.Status("loading fixture")
if _, err := db.Exec(`RESTORE DATABASE tpch FROM $1`, fixturePath); err != nil {
t.Fatal(err)
}
return nil
})
m.Wait()
c.Run(ctx, c.Node(1), `./workload init kv --drop --db=test`)
for node := 1; node <= c.spec.NodeCount; node++ {
node := node
// TODO(dan): Ideally, the test would fail if this queryload failed,
// but we can't put it in monitor as-is because the test deadlocks.
go func() {
const cmd = `./workload run kv --tolerate-errors --min-block-bytes=8 --max-block-bytes=127 --db=test`
l, err := t.l.ChildLogger(fmt.Sprintf(`kv-%d`, node))
if err != nil {
t.Fatal(err)
}
defer l.close()
_ = execCmd(ctx, t.l, roachprod, "ssh", c.makeNodes(c.Node(node)), "--", cmd)
}()
}
m = newMonitor(ctx, c, c.All())
m.Go(func(ctx context.Context) error {
t.Status("running schema change tests")
return waitForSchemaChanges(ctx, t.l, db)
})
m.Wait()
},
})
}
func waitForSchemaChanges(ctx context.Context, l *logger, db *gosql.DB) error {
start := timeutil.Now()
// These schema changes are over a table that is not actively
// being updated.
l.Printf("running schema changes over tpch.customer\n")
schemaChanges := []string{
"ALTER TABLE tpch.customer ADD COLUMN newcol INT DEFAULT 23456",
"CREATE INDEX foo ON tpch.customer (c_name)",
}
if err := runSchemaChanges(ctx, l, db, schemaChanges); err != nil {
return err
}
// TODO(vivek): Fix #21544.
// if err := sqlutils.RunScrub(db, `test`, `kv`); err != nil {
// return err
// }
// All these return the same result.
validationQueries := []string{
"SELECT count(*) FROM tpch.customer AS OF SYSTEM TIME %s",
"SELECT count(newcol) FROM tpch.customer AS OF SYSTEM TIME %s",
"SELECT count(c_name) FROM tpch.customer@foo AS OF SYSTEM TIME %s",
}
if err := runValidationQueries(ctx, l, db, start, validationQueries, nil); err != nil {
return err
}
// These schema changes are run later because the above schema
// changes run for a decent amount of time giving kv.kv
// an opportunity to get populate through the load generator. These
// schema changes are acting upon a decent sized table that is also
// being updated.
l.Printf("running schema changes over test.kv\n")
schemaChanges = []string{
"ALTER TABLE test.kv ADD COLUMN created_at TIMESTAMP DEFAULT now()",
"CREATE INDEX foo ON test.kv (v)",
}
if err := runSchemaChanges(ctx, l, db, schemaChanges); err != nil {
return err
}
// TODO(vivek): Fix #21544.
// if err := sqlutils.RunScrub(db, `test`, `kv`); err != nil {
// return err
// }
// All these return the same result.
validationQueries = []string{
"SELECT count(*) FROM test.kv AS OF SYSTEM TIME %s",
"SELECT count(v) FROM test.kv AS OF SYSTEM TIME %s",
"SELECT count(v) FROM test.kv@foo AS OF SYSTEM TIME %s",
}
// Queries to hone in on index validation problems.
indexValidationQueries := []string{
"SELECT count(k) FROM test.kv@primary AS OF SYSTEM TIME %s WHERE created_at > $1 AND created_at <= $2",
"SELECT count(v) FROM test.kv@foo AS OF SYSTEM TIME %s WHERE created_at > $1 AND created_at <= $2",
}
return runValidationQueries(ctx, l, db, start, validationQueries, indexValidationQueries)
}
func runSchemaChanges(ctx context.Context, l *logger, db *gosql.DB, schemaChanges []string) error {
for _, cmd := range schemaChanges {
start := timeutil.Now()
l.Printf("starting schema change: %s\n", cmd)
if _, err := db.Exec(cmd); err != nil {
l.Errorf("hit schema change error: %s, for %s, in %s\n", err, cmd, timeutil.Since(start))
return err
}
l.Printf("completed schema change: %s, in %s\n", cmd, timeutil.Since(start))
// TODO(vivek): Monitor progress of schema changes and log progress.
}
return nil
}
// The validationQueries all return the same result.
func runValidationQueries(
ctx context.Context,
l *logger,
db *gosql.DB,
start time.Time,
validationQueries []string,
indexValidationQueries []string,
) error {
// Sleep for a bit before validating the schema changes to
// accommodate for time differences between nodes. Some of the
// schema change backfill transactions might use a timestamp a bit
// into the future. This is not a problem normally because a read
// of schema data written into the impending future gets pushed,
// but the reads being done here are at a specific timestamp through
// AS OF SYSTEM TIME.
time.Sleep(5 * time.Second)
var nowString string
if err := db.QueryRow("SELECT cluster_logical_timestamp()").Scan(&nowString); err != nil {
return err
}
var nowInNanos int64
if _, err := fmt.Sscanf(nowString, "%d", &nowInNanos); err != nil {
return err
}
now := timeutil.Unix(0, nowInNanos)
// Validate the different schema changes
var eCount int64
for i := range validationQueries {
var count int64
q := fmt.Sprintf(validationQueries[i], nowString)
if err := db.QueryRow(q).Scan(&count); err != nil {
return err
}
l.Printf("query: %s, found %d rows\n", q, count)
if count == 0 {
return errors.Errorf("%s: %d rows found", q, count)
}
if eCount == 0 {
eCount = count
// Investigate index creation problems. Always run this so we know
// it works.
if indexValidationQueries != nil {
sp := timeSpan{start: start, end: now}
if err := findIndexProblem(
ctx, l, db, sp, nowString, indexValidationQueries,
); err != nil {
return err
}
}
} else if count != eCount {
return errors.Errorf("%s: %d rows found, expected %d rows", q, count, eCount)
}
}
return nil
}
type timeSpan struct {
start, end time.Time
}
// Check index inconsistencies over the timeSpan and return true when
// problems are seen.
func checkIndexOverTimeSpan(
ctx context.Context,
l *logger,
db *gosql.DB,
s timeSpan,
nowString string,
indexValidationQueries []string,
) (bool, error) {
var eCount int64
q := fmt.Sprintf(indexValidationQueries[0], nowString)
if err := db.QueryRow(q, s.start, s.end).Scan(&eCount); err != nil {
return false, err
}
var count int64
q = fmt.Sprintf(indexValidationQueries[1], nowString)
if err := db.QueryRow(q, s.start, s.end).Scan(&count); err != nil {
return false, err
}
l.Printf("counts seen %d, %d, over [%s, %s]\n", count, eCount, s.start, s.end)
return count != eCount, nil
}
// Keep splitting the span of time passed and log where index
// inconsistencies are seen.
func findIndexProblem(
ctx context.Context,
l *logger,
db *gosql.DB,
s timeSpan,
nowString string,
indexValidationQueries []string,
) error {
spans := []timeSpan{s}
// process all the outstanding time spans.
for len(spans) > 0 {
s := spans[0]
spans = spans[1:]
// split span into two time ranges.
leftSpan, rightSpan := s, s
d := s.end.Sub(s.start) / 2
if d < 50*time.Millisecond {
l.Printf("problem seen over [%s, %s]\n", s.start, s.end)
continue
}
m := s.start.Add(d)
leftSpan.end = m
rightSpan.start = m
leftState, err := checkIndexOverTimeSpan(
ctx, l, db, leftSpan, nowString, indexValidationQueries)
if err != nil {
return err
}
rightState, err := checkIndexOverTimeSpan(
ctx, l, db, rightSpan, nowString, indexValidationQueries)
if err != nil {
return err
}
if leftState {
spans = append(spans, leftSpan)
}
if rightState {
spans = append(spans, rightSpan)
}
if !(leftState || rightState) {
l.Printf("no problem seen over [%s, %s]\n", s.start, s.end)
}
}
return nil
}
func registerSchemaChangeIndexTPCC1000(r *testRegistry) {
r.Add(makeIndexAddTpccTest(makeClusterSpec(5, cpu(16)), 1000, time.Hour*2))
}
func registerSchemaChangeIndexTPCC100(r *testRegistry) {
r.Add(makeIndexAddTpccTest(makeClusterSpec(5), 100, time.Minute*15))
}
func makeIndexAddTpccTest(spec clusterSpec, warehouses int, length time.Duration) testSpec {
return testSpec{
Name: fmt.Sprintf("schemachange/index/tpcc/w=%d", warehouses),
Owner: OwnerSQLSchema,
Cluster: spec,
Timeout: length * 3,
Run: func(ctx context.Context, t *test, c *cluster) {
runTPCC(ctx, t, c, tpccOptions{
Warehouses: warehouses,
// We limit the number of workers because the default results in a lot
// of connections which can lead to OOM issues (see #40566).
ExtraRunArgs: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses),
During: func(ctx context.Context) error {
return runAndLogStmts(ctx, t, c, "addindex", []string{
`CREATE UNIQUE INDEX ON tpcc.order (o_entry_d, o_w_id, o_d_id, o_carrier_id, o_id);`,
`CREATE INDEX ON tpcc.order (o_carrier_id);`,
`CREATE INDEX ON tpcc.customer (c_last, c_first);`,
})
},
Duration: length,
SetupType: usingImport,
})
},
MinVersion: "v19.1.0",
}
}
func registerSchemaChangeBulkIngest(r *testRegistry) {
r.Add(makeSchemaChangeBulkIngestTest(5, 100000000, time.Minute*20))
}
func makeSchemaChangeBulkIngestTest(numNodes, numRows int, length time.Duration) testSpec {
return testSpec{
Name: "schemachange/bulkingest",
Owner: OwnerSQLSchema,
Cluster: makeClusterSpec(numNodes),
Timeout: length * 2,
// `fixtures import` (with the workload paths) is not supported in 2.1
MinVersion: "v19.1.0",
Run: func(ctx context.Context, t *test, c *cluster) {
// Configure column a to have sequential ascending values, and columns b and c to be constant.
// The payload column will be randomized and thus uncorrelated with the primary key (a, b, c).
aNum := numRows
if c.isLocal() {
aNum = 100000
}
bNum := 1
cNum := 1
payloadBytes := 4
crdbNodes := c.Range(1, c.spec.NodeCount-1)
workloadNode := c.Node(c.spec.NodeCount)
c.Put(ctx, cockroach, "./cockroach")
c.Put(ctx, workload, "./workload", workloadNode)
// TODO (lucy): Remove flag once the faster import is enabled by default
c.Start(ctx, t, crdbNodes, startArgs("--env=COCKROACH_IMPORT_WORKLOAD_FASTER=true"))
// Don't add another index when importing.
cmdWrite := fmt.Sprintf(
// For fixtures import, use the version built into the cockroach binary
// so the tpcc workload-versions match on release branches.
"./cockroach workload fixtures import bulkingest {pgurl:1} --a %d --b %d --c %d --payload-bytes %d --index-b-c-a=false",
aNum, bNum, cNum, payloadBytes,
)
c.Run(ctx, workloadNode, cmdWrite)
m := newMonitor(ctx, c, crdbNodes)
indexDuration := length
if c.isLocal() {
indexDuration = time.Second * 30
}
cmdWriteAndRead := fmt.Sprintf(
"./workload run bulkingest --duration %s {pgurl:1-%d} --a %d --b %d --c %d --payload-bytes %d",
indexDuration.String(), c.spec.NodeCount-1, aNum, bNum, cNum, payloadBytes,
)
m.Go(func(ctx context.Context) error {
c.Run(ctx, workloadNode, cmdWriteAndRead)
return nil
})
m.Go(func(ctx context.Context) error {
db := c.Conn(ctx, 1)
defer db.Close()
if !c.isLocal() {
// Wait for the load generator to run for a few minutes before creating the index.
sleepInterval := time.Minute * 5
maxSleep := length / 2
if sleepInterval > maxSleep {
sleepInterval = maxSleep
}
time.Sleep(sleepInterval)
}
c.l.Printf("Creating index")
before := timeutil.Now()
if _, err := db.Exec(`CREATE INDEX payload_a ON bulkingest.bulkingest (payload, a)`); err != nil {
t.Fatal(err)
}
c.l.Printf("CREATE INDEX took %v\n", timeutil.Since(before))
return nil
})
m.Wait()
},
}
}
func registerSchemaChangeDuringTPCC1000(r *testRegistry) {
r.Add(makeSchemaChangeDuringTPCC(makeClusterSpec(5, cpu(16)), 1000, time.Hour*3))
}
func makeSchemaChangeDuringTPCC(spec clusterSpec, warehouses int, length time.Duration) testSpec {
return testSpec{
Name: "schemachange/during/tpcc",
Owner: OwnerSQLSchema,
Cluster: spec,
Timeout: length * 3,
Run: func(ctx context.Context, t *test, c *cluster) {
runTPCC(ctx, t, c, tpccOptions{
Warehouses: warehouses,
// We limit the number of workers because the default results in a lot
// of connections which can lead to OOM issues (see #40566).
ExtraRunArgs: fmt.Sprintf("--wait=false --tolerate-errors --workers=%d", warehouses),
During: func(ctx context.Context) error {
if t.IsBuildVersion(`v19.2.0`) {
if err := runAndLogStmts(ctx, t, c, "during-schema-changes-19.2", []string{
// CREATE TABLE AS with a specified primary key was added in 19.2.
`CREATE TABLE tpcc.orderpks (o_w_id, o_d_id, o_id, PRIMARY KEY(o_w_id, o_d_id, o_id)) AS select o_w_id, o_d_id, o_id FROM tpcc.order;`,
}); err != nil {
return err
}
} else {
if err := runAndLogStmts(ctx, t, c, "during-schema-changes-19.1", []string{
`CREATE TABLE tpcc.orderpks (o_w_id INT, o_d_id INT, o_id INT, PRIMARY KEY(o_w_id, o_d_id, o_id));`,
// We can't populate the table with CREATE TABLE AS, so just
// insert the rows. The limit exists to reduce contention.
`INSERT INTO tpcc.orderpks SELECT o_w_id, o_d_id, o_id FROM tpcc.order LIMIT 10000;`,
}); err != nil {
return err
}
}
return runAndLogStmts(ctx, t, c, "during-schema-changes", []string{
`CREATE INDEX ON tpcc.order (o_carrier_id);`,
`CREATE TABLE tpcc.customerpks (c_w_id INT, c_d_id INT, c_id INT, FOREIGN KEY (c_w_id, c_d_id, c_id) REFERENCES tpcc.customer (c_w_id, c_d_id, c_id));`,
`ALTER TABLE tpcc.order ADD COLUMN orderdiscount INT DEFAULT 0;`,
`ALTER TABLE tpcc.order ADD CONSTRAINT nodiscount CHECK (orderdiscount = 0);`,
`ALTER TABLE tpcc.orderpks ADD CONSTRAINT warehouse_id FOREIGN KEY (o_w_id) REFERENCES tpcc.warehouse (w_id);`,
// The FK constraint on tpcc.district referencing tpcc.warehouse is
// unvalidated, thus this operation will not be a noop.
`ALTER TABLE tpcc.district VALIDATE CONSTRAINT fk_d_w_id_ref_warehouse;`,
`ALTER TABLE tpcc.orderpks RENAME TO tpcc.readytodrop;`,
`TRUNCATE TABLE tpcc.readytodrop CASCADE;`,
`DROP TABLE tpcc.readytodrop CASCADE;`,
})
},
Duration: length,
SetupType: usingImport,
})
},
MinVersion: "v19.1.0",
}
}
func runAndLogStmts(ctx context.Context, t *test, c *cluster, prefix string, stmts []string) error {
db := c.Conn(ctx, 1)
defer db.Close()
c.l.Printf("%s: running %d statements\n", prefix, len(stmts))
start := timeutil.Now()
for i, stmt := range stmts {
// Let some traffic run before the schema change.
time.Sleep(time.Minute)
c.l.Printf("%s: running statement %d...\n", prefix, i+1)
before := timeutil.Now()
if _, err := db.Exec(stmt); err != nil {
t.Fatal(err)
}
c.l.Printf("%s: statement %d: %q took %v\n", prefix, i+1, stmt, timeutil.Since(before))
}
c.l.Printf("%s: ran %d statements in %v\n", prefix, len(stmts), timeutil.Since(start))
return nil
}
|
package solutions
func maxProfit(prices []int) int {
buy, sell := -int(^uint(0) >> 1) - 1, -int(^uint(0) >> 1) - 1
nothing := 0
for i := 0; i < len(prices); i++ {
temp := nothing
nothing = max(nothing, sell)
buy = max(buy, temp - prices[i])
sell = buy + prices[i]
}
return max(nothing, sell)
}
|
package Reorganize_String
func reorganizeString(S string) string {
n := len(S)
if n <= 1 {
return S
}
maxCnt := 0
cnt := [26]int{}
for _, c := range S {
cnt[c-'a']++
if cnt[c-'a'] > maxCnt {
maxCnt = cnt[c-'a']
}
}
if maxCnt > (n+1)/2 {
return ""
}
result := make([]byte, n)
oddIndex, evenIndex, halfLen := 1, 0, n/2
for i, c := range cnt {
b := byte('a' + i)
for c > 0 && c <= halfLen && oddIndex < n {
result[oddIndex] = b
c--
oddIndex += 2
}
for c > 0 {
result[evenIndex] = b
c--
evenIndex += 2
}
}
return string(result)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package fingerprint
// FirmwareFilePath is the directory that hold fingerprint MCU firmware files.
const FirmwareFilePath = "/opt/google/biod/fw"
// FirmwareFilePattern produces a glob pattern for the specific fingerprint
// MCU firmware on rootfs. Note, some devices might contain multiple firmware
// files for different fingerprint variants but this tool will only yield
// the file for the active FPMCU.
func FirmwareFilePattern(board BoardName) string {
return FirmwareFilePath + "/" + string(board) + "*.bin"
}
|
package main
import (
"fmt"
"math/rand"
"net/http"
"time"
)
const RandomUpperThreshold int = 5
func concurrentHandler(w http.ResponseWriter, r *http.Request) {
receiver := make(chan string)
rand.Seed(time.Now().UnixNano())
go searchOnFacebook(receiver)
go searchOnGoogle(receiver)
go searchOnTwitter(receiver)
response := <-receiver + ", " + <-receiver + ", " + <-receiver
fmt.Fprintf(w, response)
}
func searchOnFacebook(ch chan string) {
fmt.Printf("%d|%s|%s|%d|%d|1|FF0000\n", time.Now().Unix(), "127.0.0.1", "/facebook", 200, 2048)
time.Sleep(time.Duration(rand.Intn(RandomUpperThreshold) * int(time.Second)))
ch <- "from facebook"
}
func searchOnGoogle(ch chan string) {
fmt.Printf("%d|%s|%s|%d|%d|1|00FF00\n", time.Now().Unix(), "127.0.0.1", "/google", 200, 2048)
time.Sleep(time.Duration(rand.Intn(RandomUpperThreshold) * int(time.Second)))
ch <- "from google"
}
func searchOnTwitter(ch chan string) {
fmt.Printf("%d|%s|%s|%d|%d|1|0000FF\n", time.Now().Unix(), "127.0.0.1", "/twitter", 200, 2048)
time.Sleep(time.Duration(rand.Intn(RandomUpperThreshold) * int(time.Second)))
ch <- "from twitter"
}
func main() {
http.HandleFunc("/concurrent", concurrentHandler)
http.ListenAndServe(":8082", nil)
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package roachpb
import "sort"
type sortedSpans []Span
func (s sortedSpans) Less(i, j int) bool {
// Sort first on the start key and second on the end key. Note that we're
// relying on EndKey = nil (and len(EndKey) == 0) sorting before other
// EndKeys.
c := s[i].Key.Compare(s[j].Key)
if c != 0 {
return c < 0
}
return s[i].EndKey.Compare(s[j].EndKey) < 0
}
func (s sortedSpans) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedSpans) Len() int {
return len(s)
}
// MergeSpans sorts the incoming spans and merges overlapping spans. Returns
// true iff all of the spans are distinct. Note that even if it returns true,
// adjacent spans might have been merged (i.e. [a, b) is distinct from [b,c),
// but the two are still merged.
//
// The input spans are not safe for re-use.
func MergeSpans(spans []Span) ([]Span, bool) {
if len(spans) == 0 {
return spans, true
}
sort.Sort(sortedSpans(spans))
// We build up the resulting slice of merged spans in place. This is safe
// because "r" grows by at most 1 element on each iteration, staying abreast
// or behind the iteration over "spans".
r := spans[:1]
distinct := true
for _, cur := range spans[1:] {
prev := &r[len(r)-1]
if len(cur.EndKey) == 0 && len(prev.EndKey) == 0 {
if cur.Key.Compare(prev.Key) != 0 {
// [a, nil] merge [b, nil]
r = append(r, cur)
} else {
// [a, nil] merge [a, nil]
distinct = false
}
continue
}
if len(prev.EndKey) == 0 {
if cur.Key.Compare(prev.Key) == 0 {
// [a, nil] merge [a, b]
prev.EndKey = cur.EndKey
distinct = false
} else {
// [a, nil] merge [b, c]
r = append(r, cur)
}
continue
}
if c := prev.EndKey.Compare(cur.Key); c >= 0 {
if cur.EndKey != nil {
if prev.EndKey.Compare(cur.EndKey) < 0 {
// [a, c] merge [b, d]
prev.EndKey = cur.EndKey
if c > 0 {
distinct = false
}
} else {
// [a, c] merge [b, c]
distinct = false
}
} else if c == 0 {
// [a, b] merge [b, nil]
prev.EndKey = cur.Key.Next()
} else {
// [a, c] merge [b, nil]
distinct = false
}
continue
}
r = append(r, cur)
}
return r, distinct
}
// SubtractSpans subtracts the subspans covered by a set of non-overlapping
// spans from another set of non-overlapping spans.
//
// Specifically, it returns a non-overlapping set of spans that cover the spans
// covered by the minuend but not the subtrahend. For example, given a single
// minuend span [0, 10) and a subspan subtrahend [4,5) yields: [0, 4), [5, 10).
//
// The todo input is mutated during execution and is not safe for reuse after.
// The done input is left as-is and is safe for later reuse.
//
// Internally the minuend and subtrahend are labeled as "todo" and "done", i.e.
// conceptually it discusses them as a set of spans "to do" with a subset that
// has been "done" and need to be removed from the set "todo".
func SubtractSpans(todo, done Spans) Spans {
if len(done) == 0 {
return todo
}
sort.Sort(todo)
sort.Sort(done)
remaining := make(Spans, 0, len(todo))
appendRemaining := func(s Span) {
if len(remaining) > 0 && remaining[len(remaining)-1].EndKey.Equal(s.Key) {
remaining[len(remaining)-1].EndKey = s.EndKey
} else {
remaining = append(remaining, s)
}
}
var d int
var t int
for t < len(todo) && d < len(done) {
tStart, tEnd := todo[t].Key, todo[t].EndKey
dStart, dEnd := done[d].Key, done[d].EndKey
if tStart.Equal(tEnd) {
// We've shrunk the todo span to nothing: pop it off and move on.
t++
continue
}
if dStart.Compare(tEnd) >= 0 {
// Done span starts after todo span: todo is kept in its entirety.
appendRemaining(todo[t])
t++
continue
}
if dEnd.Compare(tStart) <= 0 {
// Done span isn't in todo at all, so pop it off and move on.
d++
continue
}
// At this point, we know that the two spans overlap.
endCmp := dEnd.Compare(tEnd)
if dStart.Compare(tStart) <= 0 {
// The done span starts at or before the todo span starts.
if endCmp < 0 {
// Covers strict prefix of todo: pop done and shrink remaining todo.
todo[t].Key = dEnd
d++
} else if endCmp > 0 {
// Covers all of todo and more: pop todo, keep consuming done.
t++
} else {
// cmp == 0 means exactly matches: pop both.
t++
d++
}
} else {
// The beginning of todo is uncovered: split it to remaining.
appendRemaining(Span{Key: tStart, EndKey: dStart})
if endCmp < 0 {
// There is todo uncovered after done: Pop done, shrink and keep todo.
todo[t].Key = dEnd
d++
} else if endCmp > 0 {
// Done covers beyond todo: pop todo, keep consuming done.
t++
} else {
// cmp == 0: covers to end, uncovered prefix already copied: pop both.
t++
d++
}
}
}
// Just append anything that's left.
if t < len(todo) {
remaining = append(remaining, todo[t:]...)
}
return remaining
}
|
package main
import (
"io"
"net/url"
"strings"
"golang.org/x/net/html"
)
// Function to retrieve a list of attribute values in HTML5 tags.
// Example: getAllTagAttr(map[string]string{"a": "href",}, File) retrieves
// all values of attribute href for all "a" tags in the document.
// TODO: This parser implementation uses a html tokenizer. Alternatively, we could
// use regex expressions. Need benchmarks to compare results.
// TODO: It is potentially more efficient to return only new links. The design
// of the code is less clear that way. Need benchmarks to evaluate benefits.
func getAllTagAttr(tagAttr map[string]string, content io.Reader) []string {
var found []string
parser := html.NewTokenizer(content)
token := parser.Next()
for token != html.ErrorToken {
t := parser.Token()
if searchAttr, ok := tagAttr[t.Data]; ok {
for _, a := range t.Attr {
if a.Key == searchAttr {
trimmed := strings.TrimSpace(a.Val)
found = append(found, trimmed)
}
}
}
token = parser.Next()
}
return found
}
func getCanonicalURLString(urlString string, parentURL *url.URL) (string, error) {
//TODO: transform parameters to path segments; order arguments by index lex. order
//Full path
//TODO: process dots in paths...
//Remove Anchors #
lastIndexOf := strings.LastIndex(urlString, "#")
for lastIndexOf > 0 {
urlString = urlString[:len(urlString)-(len(urlString)-lastIndexOf)]
lastIndexOf = strings.LastIndex(urlString, "#")
}
if strings.Index(urlString, "http://") == 0 || strings.Index(urlString,
"https://") == 0 {
return urlString, nil
}
if strings.Index(urlString, "/") == 0 {
return parentURL.Scheme + "://" + parentURL.Hostname() + urlString, nil
}
//Relative path
return parentURL.Scheme + "://" + parentURL.Hostname() + "/" + urlString, nil
}
func toURL(urlString string) (*url.URL, error) {
//TODO: after transforming urlParams in getCanonicalURLString,
// transform them back to URL.
return url.Parse(urlString)
}
|
package taskqueue
import (
"github.com/RichardKnop/machinery/v1"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"machineryDemo/controller"
)
var configPath = "./machinery_config.yml"
func loadConfig() (*config.Config, error) {
if configPath != "" {
return config.NewFromYaml(configPath, false)
}
return config.NewFromEnvironment(false)
}
func startServer() (*machinery.Server, error) {
cnf, err := loadConfig()
if err != nil {
return nil, err
}
queueServer, err := machinery.NewServer(cnf)
if err != nil {
return nil, err
}
tasks := map[string]interface{}{
"post": controller.PostStudent,
"delete": controller.DeleteStudent,
"put": controller.UpdateStudent,
"get": controller.GetStudent,
}
return queueServer, queueServer.RegisterTasks(tasks)
}
//Worker ...
func Worker() error {
consumerTag := "machinery_worker"
server, err := startServer()
if err != nil {
return err
}
worker := server.NewWorker(consumerTag, 0)
return worker.Launch()
}
//Send ...
func Send(signature tasks.Signature) error {
server, err := startServer()
if err != nil {
return err
}
log.INFO.Println("InitTasks successful")
UUID := signature.UUID
if err != nil {
log.ERROR.Printf("Error generating batch id: %s", err.Error())
return err
}
log.INFO.Println("Starting batch:", UUID)
_, err = server.SendTask(&signature)
if err != nil {
log.ERROR.Printf("Could not send task: %s", err.Error())
return err
}
//results, err := asyncResult.Get(time.Duration(time.Second * 5))
//if err != nil {
// log.ERROR.Printf("Getting task result failed with error:%s", err.Error())
// return err
//}
//log.INFO.Printf("%v\n", tasks.HumanReadableResults(results))
return nil
}
|
package cpu
import (
"encoding/binary"
"fmt"
"log"
"github.com/alexflk/nes/mem"
)
type CPU struct {
table [256]Instruction
mem.MemoryInterface
cpuState
}
type cpuState struct {
pc uint16 // program counter
sp byte // stack pointer
a byte // accumulator
x byte // index register x
y byte // index register y
p byte // flags
cycles int // number of cycles
}
// Step executes one CPU step.
func (cpu *CPU) Step() {
// Read bytes of instruction and find size
insBytes := cpu.ReadInsBytes(cpu.pc)
// Translate insBytes into an instruction
ins := cpu.table[insBytes[0]]
// Find the address associated with an instruction and any
// extra cycles associated with crossing a page boundary
address, extraCycles := cpu.DecodeAddress(ins)
// Log
cpu.Log(ins, insBytes)
// Increase the program counter (do it before executing to
// to prevent jmps from being messed up)
cpu.pc += uint16(len(insBytes))
// execute an instruction and return the number of cycles used
cycles := cpu.Execute(ins, address)
// increase cycles
cpu.cycles += cycles + extraCycles
}
func NewCPU(mmu mem.MemoryInterface) (cpu *CPU) {
cpu = &CPU{}
cpu.buildTable()
cpu.LinkMMU(mmu)
cpu.Reset()
return
}
func (cpu *CPU) Reset() {
cpu.sp = 0xfd
cpu.pc = 0xc000
cpu.p = 0x24
cpu.a = 0
cpu.x = 0
cpu.y = 0
}
func (cpu *CPU) LinkMMU(mmu mem.MemoryInterface) {
cpu.MemoryInterface = mmu
}
func (cpu *CPU) ReadInsBytes(addr uint16) []byte {
b := make([]byte, 3)
if _, err := cpu.MemoryInterface.ReadAtAddr(b, addr); err != nil {
log.Fatalf("Memory access error: %v", err)
}
// Cut off extra arguments
b = b[0:OpSize[b[0]]]
return b
}
func (cpu *CPU) DecodeAddress(ins Instruction) (uint16, int) {
switch ins.AddressMode {
case modeImplicit:
return 0, 0
case modeAccumulator:
return 0, 0
case modeImmediate:
addr := cpu.ReadMemByte(cpu.pc + 1)
return uint16(addr), 0
case modeZeroPage:
ind := uint16(cpu.ReadMemByte(cpu.pc + 1))
addr := cpu.ReadMemByte(ind)
return uint16(addr), 0
case modeZeroPageX:
ind := uint16(cpu.ReadMemByte(cpu.pc + 1))
addr := cpu.ReadMemByte(ind)
return uint16(addr + cpu.x), 0
case modeZeroPageY:
ind := uint16(cpu.ReadMemByte(cpu.pc + 1))
addr := cpu.ReadMemByte(ind)
return uint16(addr + cpu.y), 0
case modeRelative:
addr := cpu.ReadMemByte(cpu.pc + 1)
offset := uint16(int8(addr))
return cpu.pc + 2 + offset, 0
case modeAbsolute:
addr := cpu.ReadMemWord(cpu.pc + 1)
return addr, 0
case modeAbsoluteX:
addr := cpu.ReadMemWord(cpu.pc + 1)
return addr + uint16(cpu.x), 0
case modeAbsoluteY:
addr := cpu.ReadMemWord(cpu.pc + 1)
return addr + uint16(cpu.y), 0
case modeIndirect:
ind := cpu.ReadMemWord(cpu.pc + 1)
addr := cpu.ReadMemWord(ind)
return addr, 0
case modeIndexedIndirect:
ind := cpu.ReadMemByte(cpu.pc + 1)
addr := cpu.ReadMemWord(uint16(ind + cpu.x))
return addr, 0
case modeIndirectIndex:
ind := cpu.ReadMemByte(cpu.pc + 1)
addr := cpu.ReadMemWord(uint16(ind))
return addr + uint16(cpu.y), 0
default:
log.Fatalf("Unknown address mode: %v", ins.AddressMode)
return 0, 0
}
}
func (cpu *CPU) Log(ins Instruction, b []byte) {
aString := AddressString(b, ins.AddressMode)
rString := cpu.RegisterString()
s := fmt.Sprintf("%04X % -8X %v %-26s %s", cpu.pc, b, ins.Description, aString, rString)
log.Print(s)
}
func (cpu *CPU) RegisterString() string {
return fmt.Sprintf("A:%02X X:%02X Y:%02X P:%02X SP:%02X CYC:%3d", cpu.a, cpu.x, cpu.y,
cpu.p, cpu.sp, cpu.cycles)
}
func (cpu *CPU) ReadMemWord(addr uint16) uint16 {
b := make([]byte, 2)
_, err := cpu.MemoryInterface.ReadAtAddr(b, addr)
if err != nil {
log.Fatalf("Memory access error: %v", err)
}
return binary.LittleEndian.Uint16(b)
}
func (cpu *CPU) ReadMemByte(addr uint16) byte {
b := make([]byte, 1)
_, err := cpu.MemoryInterface.ReadAtAddr(b, addr)
if err != nil {
log.Fatalf("Memory access error: %v", err)
}
return b[0]
}
func (cpu *CPU) WriteMemWord(val uint16, addr uint16) {
writeBuffer := make([]byte, 2)
binary.LittleEndian.PutUint16(writeBuffer, addr)
if _, err := cpu.WriteAtAddr(writeBuffer, addr); err != nil {
log.Fatal(err)
}
}
func (cpu *CPU) WriteMemByte(val byte, addr uint16) {
writeBuffer := make([]byte, 1)
writeBuffer[0] = val
if _, err := cpu.WriteAtAddr(writeBuffer, addr); err != nil {
log.Fatal(err)
}
}
// Execute performs a single instruction and returns the number of cycles used
func (cpu *CPU) Execute(ins Instruction, addr uint16) int {
ins.f(addr)
return ins.Cycles
}
func (cpu *CPU) getFlagBit(pos uint) bool {
f := cpu.p & (byte(1) << pos)
if f != 0 {
return true
} else {
return false
}
}
func (cpu *CPU) setFlagBit(b bool, pos uint) {
if b {
cpu.p |= 1 << pos
} else {
cpu.p &^= (1 << pos)
}
}
func (cpu *CPU) carry() bool {
return cpu.getFlagBit(0)
}
func (cpu *CPU) setCarry(b bool) {
cpu.setFlagBit(b, 0)
}
func (cpu *CPU) zero() bool {
return cpu.getFlagBit(1)
}
func (cpu *CPU) setZero(b bool) {
cpu.setFlagBit(b, 1)
}
func (cpu *CPU) interrupt() bool {
return cpu.getFlagBit(2)
}
func (cpu *CPU) setInterrupt(b bool) {
cpu.setFlagBit(b, 2)
}
func (cpu *CPU) decimal() bool {
return cpu.getFlagBit(3)
}
func (cpu *CPU) setDecimal(b bool) {
cpu.setFlagBit(b, 3)
}
func (cpu *CPU) overflow() bool {
return cpu.getFlagBit(6)
}
func (cpu *CPU) setOverflow(b bool) {
cpu.setFlagBit(b, 6)
}
func (cpu *CPU) negative() bool {
return cpu.getFlagBit(7)
}
func (cpu *CPU) setNegative(b bool) {
cpu.setFlagBit(b, 7)
}
func AddressString(b []byte, m AddrMode) string {
switch m {
case modeImplicit:
return ""
case modeAccumulator:
return "A"
case modeImmediate:
return fmt.Sprintf("#%02X", b[1])
case modeZeroPage:
return fmt.Sprintf("$%02X", b[1])
case modeZeroPageX:
return fmt.Sprintf("$%02X,X", b[1])
case modeZeroPageY:
return fmt.Sprintf("$%02X,Y", b[1])
case modeRelative:
return fmt.Sprintf("*%+d", b[1])
case modeAbsolute:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("$%X", i)
case modeAbsoluteX:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("$%X,X", i)
case modeAbsoluteY:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("$%X,Y", i)
case modeIndirect:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("($%X)", i)
case modeIndexedIndirect:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("($%X,X)", i)
case modeIndirectIndex:
i := binary.LittleEndian.Uint16(b[1:3])
return fmt.Sprintf("($%X),Y", i)
default:
log.Fatalf("Unknown address mode: %v", m)
return ""
}
}
|
// Copyright 2021 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package document
import (
"encoding/json"
"encoding/xml"
"github.com/moov-io/iso20022/pkg/utils"
"io/ioutil"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestJsonXmlWithDocumentCamt05500109(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_camt_v09.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_camt_v09.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentCamt05500109NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentCamt05500109NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentAcmt00700103(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_acmt_v03.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_acmt_v03.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentAcmt00700103NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentAcmt00700103NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentAuth01800102(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_auth_v02.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_auth_v02.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentAuth01800102NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentAuth01800102NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentPacs00200111(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_pacs_v11.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_pacs_v11.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentPacs00200111NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentPacs00200111NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentPain00200111(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_pain_v11.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_pain_v11.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentPain00200111NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentPain00200111NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentReda06600101(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_reda_v01.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_reda_v01.json"))
assert.Equal(t, nil, err)
doc, err := NewDocument(utils.DocumentReda06600101NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentReda06600101NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Equal(t, nil, err)
assert.Equal(t, nil, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Equal(t, nil, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDocumentRemt00100104(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_remt_v04.xml"))
assert.Nil(t, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_remt_v04.json"))
assert.Nil(t, err)
doc, err := NewDocument(utils.DocumentRemt00100104NameSpace)
assert.Equal(t, nil, err)
err = xml.Unmarshal(inputXml, doc)
assert.Nil(t, err)
assert.Nil(t, doc.Validate())
expectXml := strings.ReplaceAll(string(inputXml), "\r\n", "\n")
expectJson := strings.ReplaceAll(string(inputJson), "\r\n", "\n")
buf, err := xml.MarshalIndent(doc, "", "\t")
assert.Nil(t, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Nil(t, err)
assert.Equal(t, expectJson, string(buf))
doc, err = NewDocument(utils.DocumentRemt00100104NameSpace)
assert.Equal(t, nil, err)
err = json.Unmarshal(inputJson, doc)
assert.Nil(t, err)
assert.Nil(t, doc.Validate())
buf, err = xml.MarshalIndent(doc, "", "\t")
assert.Nil(t, err)
assert.Equal(t, expectXml, string(buf))
buf, err = json.MarshalIndent(doc, "", "\t")
assert.Nil(t, err)
assert.Equal(t, expectJson, string(buf))
}
func TestJsonXmlWithDummy(t *testing.T) {
inputXml, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_remt_v04.xml"))
assert.Equal(t, nil, err)
inputJson, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", "valid_remt_v04.json"))
assert.Equal(t, nil, err)
var docInterface Iso20022Document
docInterface, err = ParseIso20022Document(inputJson)
assert.Equal(t, nil, err)
assert.NotNil(t, docInterface)
assert.Equal(t, nil, docInterface.Validate())
docInterface, err = ParseIso20022Document(inputXml)
assert.Equal(t, nil, err)
assert.Equal(t, nil, docInterface.Validate())
}
func TestJsonXmlWithFiles(t *testing.T) {
validFileList := []string{
"valid_acmt_v03.xml",
"valid_auth_v02.xml",
"valid_camt_v09.xml",
"valid_pacs_v11.xml",
"valid_pain_v11.xml",
"valid_reda_v01.xml",
"valid_remt_v04.xml",
"valid_acmt_v03.json",
"valid_auth_v02.json",
"valid_camt_v09.json",
"valid_pacs_v11.json",
"valid_pain_v11.json",
"valid_reda_v01.json",
"valid_remt_v04.json",
}
for _, fileName := range validFileList {
input, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", fileName))
assert.Equal(t, nil, err)
var docInterface Iso20022Document
docInterface, err = ParseIso20022Document(input)
assert.Nil(t, err)
assert.Nil(t, docInterface.Validate())
}
unsupportedFileList := []string{
"FI_camt_052_sample.xml.xml",
"FI_camt_053_sample.xml.xml",
"FI_camt_054_sample.xml.xml",
"200519_camt.054-Debit_P_CH2909000000250094239_1110092692_0_2019042401501580.xml",
"200519_camt.054-Credit_P_CH2909000000250094239_1110092691_0_2019042421291293.xml",
"200519_camt.052_P_CH2909000000250094239_1110092686_0_2019042416072347.xml",
"200519_camt.054_P_CH2909000000250094239_1111091335_0_2020061900081727.xml",
"200924_camt.054_P_CH2909000000250094239_1111091335_0_2020061900081727.xml",
"200924_camt.054_P_CH2909000000250094239_1110092703_0_2019042423412214.xml",
"200519_camt054-ESR-ASR_P_CH2909000000250094239_1110092704_0_2019042500372179.xml",
"camt.053_P_CH2909000000250094239_1110092698_0_2020112503071366.xml",
"200519_camt054-chdd_p_ch2909000000250094239_1110097484_0_20190520700381159.xml",
"pain002-chdd-cor1_p_ch2909000000250094239_1110097483_0_2018031317221082.xml",
"pain008-musterfile.xml",
"camt054-returns_p_ch5109000000250092291_1110097606_0_2020112500512470.xml",
"200519_camt054-returns_p_ch2909000000250094239_1109800798_0_2019052023472022.xml",
"200519_camt054-epo_p_ch5109000000250092291_1110097605_0_2019052003522556.xml",
"200519_camt054-epo_p_ch5109000000250092291_1110097605_0_2019052103322231.xml",
"pain002-epo_p_0_0_0_2018032614401842.xml",
"pain002-epo_p_0_0_0_2018031510491259.xml",
"pain002-epo_p_ch5109000000250092291_1110097605_0_2018031511252307.xml",
"pain002-epo_p_ch2909000000250094239_1109800799_0_2018032612092784.xml",
"musterfile_pain.001_Nov2020.xml",
"gistfile1.xml",
"statement_1.xml",
"International_payment_RUB_naujas_1.xml",
"International_payment_USD_naujas_1.xml",
"sepa_payment_naujas_1.xml",
}
for _, fileName := range unsupportedFileList {
input, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", fileName))
assert.Nil(t, err)
_, err = ParseIso20022Document(input)
assert.NotNil(t, err)
assert.Equal(t, "The namespace of document is unsupported", err.Error())
}
fileListWithoutNamespace := []string{
"invalid_pain_v11.json",
"invalid_pain_v11.xml",
}
for _, fileName := range fileListWithoutNamespace {
input, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", fileName))
assert.Nil(t, err)
_, err = ParseIso20022Document(input)
assert.NotNil(t, err)
assert.Equal(t, "The namespace of document is omitted", err.Error())
}
invalidFileList := []string{
"invalid_file1",
"invalid_file2",
}
for _, fileName := range invalidFileList {
input, err := ioutil.ReadFile(filepath.Join("..", "..", "test", "testdata", fileName))
assert.Nil(t, err)
_, err = ParseIso20022Document(input)
assert.NotNil(t, err)
assert.Equal(t, "The type of file is invalid", err.Error())
}
}
|
package problem0114
// TreeNode is a struct
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func flatten(root *TreeNode) {
if root == nil {
return
}
if root.Left != nil && (root.Left.Left != nil || root.Left.Right != nil) {
flatten(root.Left)
}
if root.Right != nil && (root.Right.Left != nil || root.Right.Right != nil) {
flatten(root.Right)
}
if root.Left != nil && root.Right != nil {
tmp := root.Right
root.Right = root.Left
root := rootRightTail(root.Right)
root.Right = tmp
}
if root.Left != nil && root.Right == nil {
root.Right = root.Left
}
root.Left = nil
return
}
func rootRightTail(root *TreeNode) *TreeNode {
if root.Right == nil {
return root
}
return rootRightTail(root.Right)
}
|
package innerSortImpl
/**
* @author liujun
* @version 1.0
* @date 2021-06-20 13:09
* @author—Email liujunfirst@outlook.com
* @blogURL https://blog.csdn.net/ljfirst
* @description 插入排序
*/
type InsertSort struct {
}
func (s *InsertSort) SortMethod(array []int) []int {
if array == nil || len(array) <= 1 {
return array
}
for index := 1; index <= len(array)-1; index++ {
cur := index
temp := array[index]
for ; cur >= 1; cur-- {
if temp < array[cur-1] {
array[cur] = array[cur-1]
} else {
break
}
}
if cur != index {
array[cur] = temp
}
}
return array
}
|
package spanner
import (
"cloud.google.com/go/spanner"
"context"
"fmt"
"github.com/chidakiyo/benkyo/spanner/models"
"github.com/google/uuid"
"github.com/greymd/ojichat/generator"
"log"
"sync"
"testing"
"time"
)
func Test_Yoしてみる(t *testing.T) {
ctx := context.Background()
tx := con.ReadOnlyTransaction()
defer tx.Close()
user, err := models.ReadUser(ctx, tx, spanner.AllKeys())
if err != nil {
t.Errorf("%v", err)
}
for _, i := range user {
t.Logf("%v", i)
}
}
func Test_tweet投稿してみる(t *testing.T) {
ctx := context.Background()
start := time.Now()
tt, err := con.ReadWriteTransaction(ctx, func(ctx context.Context, transaction *spanner.ReadWriteTransaction) error {
log.Printf("■ %s", time.Now().Sub(start))
text := message_gen()
log.Printf("■ %s", time.Now().Sub(start))
uuid, _ := uuid.NewRandom()
log.Printf("■ %s", time.Now().Sub(start))
tw := models.Tweet{
ID: "T_" + uuid.String(),
UserID: "U_74514912-1b78-4afb-8119-034f8e241e2b",
Text: text,
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
}
mut := tw.Insert(ctx)
transaction.BufferWrite([]*spanner.Mutation{mut})
return nil
})
log.Printf("■ %s", time.Now().Sub(start))
t.Logf("%v, %v", tt, err)
}
func message_gen() string {
text, _ := generator.Start(generator.Config{})
return "[ojichat] " + text
}
func tweet_uuid_gen() string {
uuid, _ := uuid.NewRandom()
return "T_" + uuid.String()
}
func user_uuid_gen() string {
uuid, _ := uuid.NewRandom()
return "U_" + uuid.String()
}
func Test_データ大量に投入する(t *testing.T) {
ctx := context.Background()
tr := con.ReadOnlyTransaction()
// アカウントを検索する
users, err := models.ReadUser(ctx, tr, spanner.AllKeys())
if err != nil {
t.Errorf("%v", err)
}
tr.Close()
var muChan = make(chan *spanner.Mutation, 10000)
var wg sync.WaitGroup
// FIXME この辺適当。なおす。
go func() {
for {
var tmp []*spanner.Mutation
BRK:
for {
select {
case m := <-muChan:
tmp = append(tmp, m)
if len(tmp) > 100 {
break BRK
}
}
}
_, err := con.ReadWriteTransaction(ctx, func(c context.Context, tr *spanner.ReadWriteTransaction) error {
return tr.BufferWrite(tmp)
})
if err != nil {
t.Logf("commit fail %v", err)
} else {
wg.Add(len(tmp) * -1)
t.Logf("commit success count:%d", len(tmp))
}
}
}()
for _, u := range users {
for i := 0; i < 10000; i++ {
user := u
ctx := ctx
wg.Add(1)
func() {
//t.Logf("insert : %s", user.ID)
start := time.Now()
text := message_gen()
uuid := tweet_uuid_gen()
now := time.Now()
tw := models.Tweet{
ID: uuid,
UserID: user.ID,
Text: text,
CreatedAt: now,
ModifiedAt: now,
}
mut := tw.Insert(ctx)
muChan <- mut
t.Logf("time : %s [%P]", time.Now().Sub(start), err)
}()
}
}
close(muChan)
wg.Wait()
}
func Benchmark_Spanner投入(b *testing.B) {
ctx := context.Background()
for i := 0; i < b.N; i++ {
text := message_gen()
uuid := tweet_uuid_gen()
now := time.Now()
tw := models.Tweet{
ID: uuid,
UserID: "U_74514912-1b78-4afb-8119-034f8e241e2b",
Text: text,
CreatedAt: now,
ModifiedAt: now,
}
mut := tw.Insert(ctx)
_, err := con.ReadWriteTransaction(ctx, func(c context.Context, tr *spanner.ReadWriteTransaction) error {
b.Logf("Insert %v", tw)
return tr.BufferWrite([]*spanner.Mutation{mut})
})
if err != nil {
b.Logf("%v", err)
}
}
}
func Benchmark_文字列生成パフォーマンス(b *testing.B) {
for i := 0; i < b.N; i++ {
message_gen()
}
}
func Benchmark_UUID生成パフォーマンス(b *testing.B) {
for i := 0; i < b.N; i++ {
tweet_uuid_gen()
}
}
func insert_tweet(t *testing.T, ctx context.Context, user models.User) {
t.Logf("insert : %s", user.ID)
start := time.Now()
text := message_gen()
uuid := tweet_uuid_gen()
now := time.Now()
tw := models.Tweet{
ID: uuid,
UserID: user.ID,
Text: text,
CreatedAt: now,
ModifiedAt: now,
}
mut := tw.Insert(ctx)
_, err := con.ReadWriteTransaction(ctx, func(c context.Context, tr *spanner.ReadWriteTransaction) error {
t.Logf("Insert %v", tw)
return tr.BufferWrite([]*spanner.Mutation{mut})
})
t.Logf("time : %s [%P]", time.Now().Sub(start), err)
}
func Test_アカウントを大量に投入する(t *testing.T) {
ctx := context.Background()
var mutations []*spanner.Mutation
start0 := time.Now()
for i := 0; i < 10; i++ {
start := time.Now()
u := models.User{
ID: user_uuid_gen(),
//UserID: fmt.Sprintf("U%s", strings.Split(uuid, "-")[4]),
UserID: fmt.Sprintf("U%d", i),
Email: fmt.Sprintf("U%d@example.com", i),
Password: "password",
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
}
uu := u.Insert(ctx)
mutations = append(mutations, uu)
t.Logf("time : %s", time.Now().Sub(start))
}
_, err := con.ReadWriteTransaction(ctx, func(c context.Context, tr *spanner.ReadWriteTransaction) error {
return tr.BufferWrite(mutations)
})
if err != nil {
t.Errorf("%v", err)
}
t.Logf("time : %s", time.Now().Sub(start0))
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
)
// GetFeatureFlagCourses Get the feature flag that applies to a given Account, Course, or User.
// The flag may be defined on the object, or it may be inherited from a parent
// account. You can look at the context_id and context_type of the returned object
// to determine which is the case. If these fields are missing, then the object
// is the global Canvas default.
// https://canvas.instructure.com/doc/api/feature_flags.html
//
// Path Parameters:
// # Path.CourseID (Required) ID
// # Path.Feature (Required) ID
//
type GetFeatureFlagCourses struct {
Path struct {
CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required)
Feature string `json:"feature" url:"feature,omitempty"` // (Required)
} `json:"path"`
}
func (t *GetFeatureFlagCourses) GetMethod() string {
return "GET"
}
func (t *GetFeatureFlagCourses) GetURLPath() string {
path := "courses/{course_id}/features/flags/{feature}"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
path = strings.ReplaceAll(path, "{feature}", fmt.Sprintf("%v", t.Path.Feature))
return path
}
func (t *GetFeatureFlagCourses) GetQuery() (string, error) {
return "", nil
}
func (t *GetFeatureFlagCourses) GetBody() (url.Values, error) {
return nil, nil
}
func (t *GetFeatureFlagCourses) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *GetFeatureFlagCourses) HasErrors() error {
errs := []string{}
if t.Path.CourseID == "" {
errs = append(errs, "'Path.CourseID' is required")
}
if t.Path.Feature == "" {
errs = append(errs, "'Path.Feature' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *GetFeatureFlagCourses) Do(c *canvasapi.Canvas) (*models.FeatureFlag, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.FeatureFlag{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
// 活动服务
package elemeOpenApi
// 创建减配送费活动
// createInfo 创建减配送费活动的结构体
// shopId 店铺Id
func (shippingfee *Shippingfee) CreateShippingFeeActivity(createInfo_ interface{}, shopId_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["createInfo"] = createInfo_
params["shopId"] = shopId_
return APIInterface(shippingfee.config, "eleme.activity.shippingFee.createShippingFeeActivity", params)
}
// 作废减配送费活动
// activityId 活动Id
// shopId 店铺Id
// comment 作废原因
func (shippingfee *Shippingfee) InvalidShippingFeeActivity(activityId_ int64, shopId_ int64, comment_ string) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
params["comment"] = comment_
return APIInterface(shippingfee.config, "eleme.activity.shippingFee.invalidShippingFeeActivity", params)
}
// 通过店铺Id查询该店铺被邀约的美食活动
// shopId 店铺Id
func (food *Food) QueryInvitedFoodActivities(shopId_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["shopId"] = shopId_
return APIInterface(food.config, "eleme.activity.food.queryInvitedFoodActivities", params)
}
// 报名美食活动
// activityId 活动Id
// activityApplyInfo 活动报名信息
func (food *Food) ApplyFoodActivity(activityId_ int64, activityApplyInfo_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["activityApplyInfo"] = activityApplyInfo_
return APIInterface(food.config, "eleme.activity.food.applyFoodActivity", params)
}
// 通过店铺Id和活动Id分页查询店铺已报名的美食活动
// activityId 活动Id
// shopId 店铺Id
// pageNo 页码
// pageSize 每页数量
func (food *Food) QueryFoodActivities(activityId_ int64, shopId_ int64, pageNo_ int64, pageSize_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
params["pageNo"] = pageNo_
params["pageSize"] = pageSize_
return APIInterface(food.config, "eleme.activity.food.queryFoodActivities", params)
}
// 修改美食活动的菜品库存
// activityId 活动Id
// shopId 店铺Id
// itemId 菜品Id
// stock 库存
func (food *Food) UpdateFoodActivityItemStock(activityId_ int64, shopId_ int64, itemId_ int64, stock_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
params["itemId"] = itemId_
params["stock"] = stock_
return APIInterface(food.config, "eleme.activity.food.updateFoodActivityItemStock", params)
}
// 取消参与了美食活动的菜品
// activityId 活动Id
// shopId 店铺Id
// itemId 菜品Id
func (food *Food) OfflineFoodActivityItem(activityId_ int64, shopId_ int64, itemId_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
params["itemId"] = itemId_
return APIInterface(food.config, "eleme.activity.food.offlineFoodActivityItem", params)
}
// 作废店铺与美食活动的关联关系
// activityId 活动Id
// shopId 店铺Id
func (food *Food) UnbindFoodActivity(activityId_ int64, shopId_ int64) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
return APIInterface(food.config, "eleme.activity.food.unbindFoodActivity", params)
}
// 定向赠红包
// shopId 店铺Id
// mobile 需要发放红包的用户手机号
// couponTemplate 定向赠红包的模板信息
func (coupon *Coupon) PresentCoupon(shopId_ int64, mobile_ string, couponTemplate_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["shopId"] = shopId_
params["mobile"] = mobile_
params["couponTemplate"] = couponTemplate_
return APIInterface(coupon.config, "eleme.activity.coupon.presentCoupon", params)
}
// 托管单店红包服务
// shopIds 餐厅id列表,长度不能超过20
// hostedType 红包服务业务类型,暂只支持超级会员,"SUPER_VIP"
// discounts 扣减额,请设置在[4,15]元,小数点后最多1位
func (coupon *Coupon) HostShops(shopIds_ interface{}, hostedType_ interface{}, discounts_ float64) (interface{}, error) {
params := make(map[string]interface{})
params["shopIds"] = shopIds_
params["hostedType"] = hostedType_
params["discounts"] = discounts_
return APIInterface(coupon.config, "eleme.activity.coupon.hostShops", params)
}
// 查询红包服务托管情况
// shopIds 餐厅id列表,长度不能超过20
// hostedType 红包服务业务类型,暂只支持超级会员,"SUPER_VIP"
func (coupon *Coupon) QueryHostInfo(shopIds_ interface{}, hostedType_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["shopIds"] = shopIds_
params["hostedType"] = hostedType_
return APIInterface(coupon.config, "eleme.activity.coupon.queryHostInfo", params)
}
// 取消托管单店红包服务
// shopIds 餐厅id列表,长度不能超过20
// hostedType 红包服务业务类型,暂只支持超级会员,"SUPER_VIP"
func (coupon *Coupon) UnhostShops(shopIds_ interface{}, hostedType_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["shopIds"] = shopIds_
params["hostedType"] = hostedType_
return APIInterface(coupon.config, "eleme.activity.coupon.unhostShops", params)
}
// 更改单店红包服务托管方式
// shopId 店铺Id
// hostedType 红包服务业务类型,暂只支持超级会员,"SUPER_VIP"
// oActivityServiceDetails 服务内容
func (coupon *Coupon) RehostShop(shopId_ int64, hostedType_ interface{}, oActivityServiceDetails_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["shopId"] = shopId_
params["hostedType"] = hostedType_
params["oActivityServiceDetails"] = oActivityServiceDetails_
return APIInterface(coupon.config, "eleme.activity.coupon.rehostShop", params)
}
// 定向赠红包(单店红包)
// shopId 店铺id
// targetList 目标列表
// targetListType 目标类型
// targetCouponDetail 定向赠红包模板细节
func (coupon *Coupon) PresentTargetCoupons(shopId_ int64, targetList_ interface{}, targetListType_ interface{}, targetCouponDetail_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["shopId"] = shopId_
params["targetList"] = targetList_
params["targetListType"] = targetListType_
params["targetCouponDetail"] = targetCouponDetail_
return APIInterface(coupon.config, "eleme.activity.coupon.presentTargetCoupons", params)
}
// 定向赠通用红包
// chainId 连锁店id
// targetList 目标列表
// targetListType 目标类型
// commonTargetCouponDetail 通用定向赠红包模板细节
func (coupon *Coupon) PresentCommonTargetCoupons(chainId_ int64, targetList_ interface{}, targetListType_ interface{}, commonTargetCouponDetail_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["chainId"] = chainId_
params["targetList"] = targetList_
params["targetListType"] = targetListType_
params["commonTargetCouponDetail"] = commonTargetCouponDetail_
return APIInterface(coupon.config, "eleme.activity.coupon.presentCommonTargetCoupons", params)
}
// 分页查询店铺的定向赠红包信息
// targetCouponQueryRequest 定向赠红包查询入参对象
func (coupon *Coupon) QueryTargetCouponInfo(targetCouponQueryRequest_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["targetCouponQueryRequest"] = targetCouponQueryRequest_
return APIInterface(coupon.config, "eleme.activity.coupon.queryTargetCouponInfo", params)
}
// 定向赠通用商品券
// chainId 连锁店id
// targetList 目标列表
// targetListType 目标类型
// commonTargetSkuCouponDetail 通用定向赠连锁商品券模板细节
func (coupon *Coupon) PresentCommonTargetSkuCoupons(chainId_ int64, targetList_ interface{}, targetListType_ interface{}, commonTargetSkuCouponDetail_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["chainId"] = chainId_
params["targetList"] = targetList_
params["targetListType"] = targetListType_
params["commonTargetSkuCouponDetail"] = commonTargetSkuCouponDetail_
return APIInterface(coupon.config, "eleme.activity.coupon.presentCommonTargetSkuCoupons", params)
}
// 定向赠连锁通用商品券
// chainId 连锁店id
// targetList 目标列表
// targetListType 目标类型
// chainSkuCouponDetail 通用定向赠连锁商品券模板细节
func (coupon *Coupon) PresentChainSkuCoupons(chainId_ int64, targetList_ interface{}, targetListType_ interface{}, chainSkuCouponDetail_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["chainId"] = chainId_
params["targetList"] = targetList_
params["targetListType"] = targetListType_
params["chainSkuCouponDetail"] = chainSkuCouponDetail_
return APIInterface(coupon.config, "eleme.activity.coupon.presentChainSkuCoupons", params)
}
// 定向赠指定商品券
// targetList 目标列表
// targetListType 目标类型
// skuCouponDetail 商品券模板细节
func (coupon *Coupon) PresentSkuCoupons(targetList_ interface{}, targetListType_ interface{}, skuCouponDetail_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["targetList"] = targetList_
params["targetListType"] = targetListType_
params["skuCouponDetail"] = skuCouponDetail_
return APIInterface(coupon.config, "eleme.activity.coupon.presentSkuCoupons", params)
}
// 券状态变更
// criteria 券状态修改对象
// type 操作类型
func (coupon *Coupon) UpdateCouponStatus(criteria_ interface{}, type_ int) (interface{}, error) {
params := make(map[string]interface{})
params["criteria"] = criteria_
params["type"] = type_
return APIInterface(coupon.config, "eleme.activity.coupon.updateCouponStatus", params)
}
// 创建券模板
// activityTemplate 创建券模板对象
func (coupon *Coupon) CreateActivityTemplateV2(activityTemplate_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["activityTemplate"] = activityTemplate_
return APIInterface(coupon.config, "eleme.activity.coupon.createActivityTemplateV2", params)
}
// 修改券模板
// updateModel 修改券模板对象
func (coupon *Coupon) ModifyActivityTemplateV2(updateModel_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["updateModel"] = updateModel_
return APIInterface(coupon.config, "eleme.activity.coupon.modifyActivityTemplateV2", params)
}
// 作废券模板
// invalidModel 作废券模板对象
func (coupon *Coupon) InvalidActivityTemplateV2(invalidModel_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["invalidModel"] = invalidModel_
return APIInterface(coupon.config, "eleme.activity.coupon.invalidActivityTemplateV2", params)
}
// 查询券模板
// queryModel 查询券模板对象
func (coupon *Coupon) QueryByTemplateIdV2(queryModel_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["queryModel"] = queryModel_
return APIInterface(coupon.config, "eleme.activity.coupon.queryByTemplateIdV2", params)
}
// 根据券模板发券
// presentWithTemplateModel 发券对象
func (coupon *Coupon) PresentCouponWithTemplateIdV2(presentWithTemplateModel_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["presentWithTemplateModel"] = presentWithTemplateModel_
return APIInterface(coupon.config, "eleme.activity.coupon.presentCouponWithTemplateIdV2", params)
}
// 查询订单内营销相关数据
// orderId 饿了么订单Id
func (marketing *Marketing) QueryOrderSubsidy(orderId_ string) (interface{}, error) {
params := make(map[string]interface{})
params["orderId"] = orderId_
return APIInterface(marketing.config, "eleme.activity.marketing.queryOrderSubsidy", params)
}
// 创建并绑定连锁店特价活动
// activity 活动创建信息
// chainId 连锁店Id
// shopApplyInfo 绑定的商品信息
func (skuchain *Skuchain) CreateAndParticipateChainPriceActivity(activity_ interface{}, chainId_ int64, shopApplyInfo_ interface{}) (interface{}, error) {
params := make(map[string]interface{})
params["activity"] = activity_
params["chainId"] = chainId_
params["shopApplyInfo"] = shopApplyInfo_
return APIInterface(skuchain.config, "eleme.activity.skuchain.createAndParticipateChainPriceActivity", params)
}
// 根据活动Id和店铺Id和商品规格Id,作废参与关系
// activityId 活动Id
// shopId 店铺Id
// specId 商品规格Id
// comment 作废原因
func (skuchain *Skuchain) InValidSkuActivityById(activityId_ int64, shopId_ int64, specId_ int64, comment_ string) (interface{}, error) {
params := make(map[string]interface{})
params["activityId"] = activityId_
params["shopId"] = shopId_
params["specId"] = specId_
params["comment"] = comment_
return APIInterface(skuchain.config, "eleme.activity.skuchain.inValidSkuActivityById", params)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shell
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/google/gapid/core/log"
)
// Cmd holds the configuration to run an external command.
//
// A Cmd can be run any number of times, and new commands may be derived from existing ones.
type Cmd struct {
// Name is the name of the command to run
Name string
// Args is the arguments handed to the command, it should not include the command itself.
Args []string
// Target is the target to execute the command on
// If left as nil, this will default to LocalTarget.
Target Target
// Verbosity makes the command echo it's stdout and stderr to the supplied logging context.
// It will also log the command itself as it starts.
Verbosity bool
// Dir sets the working directory for the command.
Dir string
// Stdout is the writer to which the command will write it's standard output if set.
Stdout io.Writer
// Stdout is the writer to which the command will write it's standard error if set.
Stderr io.Writer
// Stdin is the reader from which the command will read it's standard input if set.
Stdin io.Reader
// Environment is the processes environment, if set.
Environment *Env
}
// Command returns a Cmd with the specified command and arguments set.
func Command(name string, args ...string) Cmd {
return Cmd{Name: name, Args: args}
}
// On returns a copy of the Cmd with the Target set to target.
func (cmd Cmd) On(target Target) Cmd {
cmd.Target = target
return cmd
}
// Verbose returns a copy of the Cmd with the Verbosity flag set to true.
func (cmd Cmd) Verbose() Cmd {
cmd.Verbosity = true
return cmd
}
// In returns a copy of the Cmd with the Dir set to dir.
func (cmd Cmd) In(dir string) Cmd {
cmd.Dir = dir
return cmd
}
// Capture returns a copy of the Cmd with Stdout and Stderr set.
func (cmd Cmd) Capture(stdout, stderr io.Writer) Cmd {
cmd.Stdout = stdout
cmd.Stderr = stderr
return cmd
}
// Read returns a copy of the Cmd with Stdin set.
func (cmd Cmd) Read(stdin io.Reader) Cmd {
cmd.Stdin = stdin
return cmd
}
// Env returns a copy of the Cmd with the Environment set to env.
func (cmd Cmd) Env(env *Env) Cmd {
cmd.Environment = env
return cmd
}
// With returns a copy of the Cmd with the args added to the end of Args.
func (cmd Cmd) With(args ...string) Cmd {
old := cmd.Args
cmd.Args = make([]string, len(cmd.Args)+len(args))
copy(cmd.Args, old)
copy(cmd.Args[len(old):], args)
return cmd
}
// Start executes the command and returns immediately.
func (cmd Cmd) Start(ctx context.Context) (Process, error) {
// Deliberately a value receiver so the cmd object can be updated prior to execution
if cmd.Target == nil {
cmd.Target = LocalTarget
} else if cmd.Target != LocalTarget {
ctx = log.V{"On": cmd.Target}.Bind(ctx)
}
if cmd.Dir != "" {
ctx = log.V{"Dir": cmd.Dir}.Bind(ctx)
}
// build our stdout and stderr handling
var logStdout, logStderr io.WriteCloser
if cmd.Verbosity {
ctx := log.PutProcess(ctx, filepath.Base(cmd.Name))
logStdout = log.From(ctx).Writer(log.Info)
defer logStdout.Close()
if cmd.Stdout != nil {
cmd.Stdout = io.MultiWriter(cmd.Stdout, logStdout)
} else {
cmd.Stdout = logStdout
}
logStderr = log.From(ctx).Writer(log.Error)
defer logStderr.Close()
if cmd.Stderr != nil {
cmd.Stderr = io.MultiWriter(cmd.Stderr, logStderr)
} else {
cmd.Stderr = logStderr
}
}
// Ready to start
if cmd.Verbosity {
extra := ""
if cmd.Dir != "" {
extra = fmt.Sprintf(" In %v", cmd.Dir)
}
log.I(ctx, "Exec: %v%s", cmd, extra)
}
return cmd.Target.Start(cmd)
}
// Run executes the command, and blocks until it completes or the context is cancelled.
func (cmd Cmd) Run(ctx context.Context) error {
process, err := cmd.Start(ctx)
if err != nil {
return log.From(ctx).Err(err, "Failed to start process")
}
err = process.Wait(ctx)
if err != nil {
return log.From(ctx).Err(err, "Process returned error")
}
return nil
}
type muxedBuffer struct {
buf bytes.Buffer
mutex sync.Mutex
}
func (m *muxedBuffer) Write(b []byte) (int, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.buf.Write(b)
}
// Call executes the command, capturing its output.
// This is a helper for the common case where you want to run a command, capture all its output into a string and
// see if it succeeded.
func (cmd Cmd) Call(ctx context.Context) (string, error) {
buf := &muxedBuffer{}
err := cmd.Capture(buf, buf).Run(ctx)
output := strings.TrimSpace(buf.buf.String())
return output, err
}
func (cmd Cmd) Format(f fmt.State, c rune) {
fmt.Fprint(f, cmd.Name)
for _, arg := range cmd.Args {
fmt.Fprint(f, " ")
if strings.ContainsRune(arg, ' ') {
fmt.Fprint(f, `"`, arg, `"`)
} else {
fmt.Fprint(f, arg)
}
}
}
// SplitEnv splits the given environment variable string into key and values.
func SplitEnv(s string) (key string, vals []string) {
parts := strings.Split(s, "=")
if len(parts) != 2 {
return "", nil
}
return parts[0], strings.Split(parts[1], string(os.PathListSeparator))
}
// JoinEnv combines the given key and values into an environment variable string.
func JoinEnv(key string, vals []string) string {
return key + "=" + strings.Join(vals, string(os.PathListSeparator))
}
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"errors"
)
var errFUSENotSupported = errors.New("FUSE is not supported on OpenBSD")
// SupportsFUSE is false on OpenBSD.
func SupportsFUSE() error {
return errFUSENotSupported
}
type fuseMount struct {
// fuseDir is always an empty string on OpenBSD.
fuseDir string
}
func configureFUSE(c *Client, conf *Config) (*Client, error) { return nil, errFUSENotSupported }
func (c *Client) fuseMounts() []*socketMount { return nil }
func (c *Client) serveFuse(ctx context.Context, notify func()) error { return errFUSENotSupported }
func (c *Client) unmountFUSE() error { return nil }
func (c *Client) waitForFUSEMounts() {}
|
package backup
import (
"testing"
"time"
"os"
. "github.com/bborbe/assert"
backup_host "github.com/bborbe/backup/host"
backup_rootdir "github.com/bborbe/backup/rootdir"
backup_testutil "github.com/bborbe/backup/testutil"
"github.com/golang/glog"
)
func TestMain(m *testing.M) {
exit := m.Run()
glog.Flush()
os.Exit(exit)
}
func TestByTime(t *testing.T) {
rootdirName := backup_testutil.BACKUP_ROOT_DIR
hostName := "hostname"
h := backup_host.ByName(backup_rootdir.ByName(rootdirName), hostName)
ti, err := time.Parse("2006-01-02T15:04:05", "2010-12-24T10:11:12")
if err != nil {
t.Fatal(err)
}
backup := ByTime(h, ti)
err = AssertThat(backup.Name(), Is("2010-12-24T10:11:12"))
if err != nil {
t.Fatal(err)
}
}
func TestImplementsBackup(t *testing.T) {
backup := ByName(backup_host.ByName(backup_rootdir.ByName("/rootdir"), "hostname"), "backupname")
var expected *Backup
err := AssertThat(backup, Implements(expected))
if err != nil {
t.Fatal(err)
}
}
func TestName(t *testing.T) {
backup := ByName(backup_host.ByName(backup_rootdir.ByName("/rootdir"), "hostname"), "backupname")
err := AssertThat(backup.Name(), Is("backupname"))
if err != nil {
t.Fatal(err)
}
}
func TestPath(t *testing.T) {
backup := ByName(backup_host.ByName(backup_rootdir.ByName("/rootdir"), "hostname"), "backupname")
err := AssertThat(backup.Path(), Is("/rootdir/hostname/backupname"))
if err != nil {
t.Fatal(err)
}
}
func TestValidBackupName(t *testing.T) {
var err error
err = AssertThat(validName("foo"), Is(false))
if err != nil {
t.Fatal(err)
}
err = AssertThat(validName("2013-12-12T24:15:59"), Is(true))
if err != nil {
t.Fatal(err)
}
}
func TestResume(t *testing.T) {
rootdirName := backup_testutil.BACKUP_ROOT_DIR
hostName := "hostname"
h := backup_host.ByName(backup_rootdir.ByName(rootdirName), hostName)
backupNameA := "2014-12-24T13:14:15"
backupNameB := ByTime(h, time.Now()).Name()
var err error
err = backup_testutil.ClearRootDir(rootdirName)
if err != nil {
t.Fatal(err)
}
err = backup_testutil.CreateRootDir(rootdirName)
if err != nil {
t.Fatal(err)
}
err = backup_testutil.CreateHostDir(rootdirName, hostName)
if err != nil {
t.Fatal(err)
}
err = backup_testutil.CreateBackupDir(rootdirName, hostName, backupNameA)
if err != nil {
t.Fatal(err)
}
err = backup_testutil.CreateBackupDir(rootdirName, hostName, backupNameB)
if err != nil {
t.Fatal(err)
}
err = backup_testutil.CreateBackupCurrentSymlink(rootdirName, hostName, backupNameB)
if err != nil {
t.Fatal(err)
}
exists, err := existsIncomplete(h)
if err != nil {
t.Fatal(err)
}
err = AssertThat(exists, Is(false))
if err != nil {
t.Fatal(err)
}
err = Resume(h)
err = AssertThat(err, NilValue())
if err != nil {
t.Fatal(err)
}
exists, err = existsIncomplete(h)
if err != nil {
t.Fatal(err)
}
err = AssertThat(exists, Is(true))
if err != nil {
t.Fatal(err)
}
}
|
package udwIpPacket
import (
"github.com/tachyon-protocol/udw/udwNet/udwDns/udwDnsPacket"
)
func TcpRstSameWay(ipPacket IpPacket, tmpBuf []byte) IpPacket {
ipLen := ipPacket.GetIpHeaderLen()
srcPortBuf := ipPacket.buf[ipLen+0 : ipLen+2]
dstPortBuf := ipPacket.buf[ipLen+2 : ipLen+4]
reqTcpSeqNumberBuf := ipPacket.buf[ipLen+4 : ipLen+8]
reqTcpAckNumberBuf := ipPacket.buf[ipLen+8 : ipLen+12]
tmpBuf = append(tmpBuf[:0], gTcpRstTemplate...)
copy(tmpBuf[12:16], ipPacket.GetSrcIp())
copy(tmpBuf[16:20], ipPacket.GetDstIp())
copy(tmpBuf[20:22], srcPortBuf)
copy(tmpBuf[22:24], dstPortBuf)
copy(tmpBuf[24:28], reqTcpSeqNumberBuf)
copy(tmpBuf[28:32], reqTcpAckNumberBuf)
return IpPacket{
buf: tmpBuf,
}
}
func TcpRstAnotherWay(ipPacket IpPacket, tmpBuf []byte) IpPacket {
ipLen := ipPacket.GetIpHeaderLen()
srcPortBuf := ipPacket.buf[ipLen+0 : ipLen+2]
dstPortBuf := ipPacket.buf[ipLen+2 : ipLen+4]
reqTcpSeqNumberBuf := ipPacket.buf[ipLen+4 : ipLen+8]
reqTcpAckNumberBuf := ipPacket.buf[ipLen+8 : ipLen+12]
if ipPacket.GetTcpFlagSyn() && ipPacket.GetTcpFlagAck() == false {
for i := 3; i >= 0; i-- {
reqTcpSeqNumberBuf[i]++
if reqTcpSeqNumberBuf[i] != 0 {
break
}
}
}
tmpBuf = append(tmpBuf[:0], gTcpRstTemplate...)
copy(tmpBuf[12:16], ipPacket.GetDstIp())
copy(tmpBuf[16:20], ipPacket.GetSrcIp())
copy(tmpBuf[20:22], dstPortBuf)
copy(tmpBuf[22:24], srcPortBuf)
copy(tmpBuf[24:28], reqTcpAckNumberBuf)
copy(tmpBuf[28:32], reqTcpSeqNumberBuf)
outIpPacket := IpPacket{
buf: tmpBuf,
}
return outIpPacket
}
var gTcpRstTemplate = []byte{
0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, 0, 0, 0xac, 0x15, 0x00, 0x01,
0x68, 0xc7, 0xe7, 0x82, 0xc5, 0xb0, 0x4e, 0x3c, 0x4f, 0x59, 0x37, 0xd2, 0x00, 0x00, 0x00, 0x00,
0x50, 0x14, 0x00, 0x00, 0, 0, 0x00, 0x00,
}
func NotifyBlockIpPacketToClient(ipPacket IpPacket) (outIpPacket IpPacket, ok bool) {
if ipPacket.IsDnsRequest() {
dnsPacket, errMsg := udwDnsPacket.PacketReadFromByteSlice(ipPacket.GetUdpBody())
if errMsg != "" {
return outIpPacket, false
}
dnsPacket.AnswerList = nil
dnsPacket.SetIsResponse(true)
dnsPacket.SetRcode(udwDnsPacket.RcodeREFUSED)
outBuf, errMsg := udwDnsPacket.PacketWriteToByteSlice(dnsPacket, nil)
if errMsg != "" {
return outIpPacket, false
}
ipPacket2 := NewUdpIpPacket(NewUdpIpPacketRequest{
SrcIp: ipPacket.GetDstIp(),
SrcPort: ipPacket.GetDstPort(),
DstIp: ipPacket.GetSrcIp(),
DstPort: ipPacket.GetSrcPort(),
Body: outBuf,
})
return ipPacket2, true
}
if ipPacket.IsTcp() && ipPacket.GetTcpFlagRst() == false {
ipPacket2 := TcpRstAnotherWay(ipPacket, nil)
return ipPacket2, true
}
return outIpPacket, false
}
|
package pkg
import "github.com/irfansharif/log"
func Log(logger *log.Logger) {
logger.Info("from pkg!")
}
|
package model
import (
"fmt"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"golang.org/x/crypto/bcrypt"
"rain/library/go-str"
"rain/library/helper"
"strings"
)
type Auth struct {
}
func (m *Auth) Login(ctx *gin.Context) (user Admin, status bool) {
db := helper.Db()
requestMap := helper.GetRequestJson(ctx)
result := db.Table("admin").
Where("username = ?", requestMap["username"]).
First(&user)
if result.Error != nil {
fmt.Println(result.Error)
return user, false
}
if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(requestMap["password"].(string))); err != nil {
fmt.Println(err)
return user, false
}
fmt.Println("登录成功")
return user, true
}
func CreateUser(username, password, rolesId string, isAdmin bool) error {
db := helper.Db()
return db.Transaction(func(tx *gorm.DB) error {
admin := Admin{Username: username, Password: password}
if err := tx.Table("admin").Create(&admin).Error; err != nil {
return err
}
rolesIdList := strings.Split(rolesId, ",")
if isAdmin {
for _, v := range rolesIdList {
adminRole := AdminRole{AdminId: uint64(admin.ID), RoleId: uint64(str.ToUint(v))}
if err := tx.Table("admin_role").Create(&adminRole).Error; err != nil {
return err
}
}
} else {
role := Role{}
if err := tx.Table("role").FirstOrCreate(&role, Role{Name: "front_user", Memo: "前端用户", Sequence: 5}).Error; err != nil {
return err
}
adminRole := AdminRole{AdminId: uint64(admin.ID), RoleId: uint64(role.ID)}
if err := tx.Table("admin_role").Create(&adminRole).Error; err != nil {
return err
}
}
return nil
})
}
func (m *Auth) Register(ctx *gin.Context, isAdmin bool) (status bool) {
requestMap := helper.GetRequestJson(ctx)
var err error
requestMap["password"], err = bcrypt.GenerateFromPassword([]byte(requestMap["password"].(string)), bcrypt.DefaultCost)
if err != nil {
return false
}
password := string(requestMap["password"].([]byte))
username := requestMap["username"].(string)
rolesId, ok := requestMap["rolesId"].(string)
if !ok {
rolesId = ""
}
if err := CreateUser(username, password, rolesId, isAdmin); err != nil {
fmt.Println(err)
return false
}
return true
}
|
package main
type Orientation int
const (
Vertical Orientation = 1
Horizontal Orientation = 2
LeftTurn Orientation = 3 // при движении по вертикали
RightTurn Orientation = 4 // при движении по вертикали
Intersection Orientation = 7
)
type Direction int
const (
Up Direction = 0
Left Direction = 1
Down Direction = 2
Right Direction = 3
)
type Choice int
const (
TurnLeft Choice = 0
GoStraight Choice = 1
TurnRight Choice = 2
)
type Cart struct {
Position *Point
Direction Direction
Choice Choice
Dead bool
}
func (cart *Cart) move(grid *Grid) {
cart.Position.move(cart.Direction)
if (*grid)[*cart.Position] == LeftTurn {
if cart.Direction == Up || cart.Direction == Down {
cart.rotateLeft()
} else {
cart.rotateRight()
}
} else if (*grid)[*cart.Position] == RightTurn {
if cart.Direction == Up || cart.Direction == Down {
cart.rotateRight()
} else {
cart.rotateLeft()
}
} else if (*grid)[*cart.Position] == Intersection {
cart.makeChoice()
}
}
func (cart *Cart) rotateLeft() {
cart.Direction = Direction((cart.Direction + 1)%4)
}
func (cart *Cart) rotateRight() {
cart.Direction = Direction((cart.Direction - 1 + 4)%4)
}
func (cart *Cart) makeChoice() {
cart.Choice = Choice((cart.Choice + 1)%3)
if cart.Choice == TurnLeft{
cart.rotateLeft()
} else if cart.Choice == TurnRight{
cart.rotateRight()
}
}
func (cart *Cart) GetDirection() string {
switch cart.Direction {
case Up:
return "^"
case Down:
return "v"
case Left:
return "<"
case Right:
return ">"
}
return "X"
}
func (cart *Cart) GetChoice() string {
switch cart.Choice {
case TurnLeft:
return "\\"
case TurnRight:
return "/"
case GoStraight:
return "|"
}
return "X"
}
|
package portal
import (
"github.com/Cepave/fe/http/base"
"github.com/Cepave/fe/model/event"
)
type PortalController struct {
base.BaseController
}
func (this *PortalController) EventGet() {
baseResp := this.BasicRespGen()
_, err := this.SessionCheck()
if err != nil {
this.ResposeError(baseResp, err.Error())
return
}
startTime, _ := this.GetInt64("startTime", 0)
endTime, _ := this.GetInt64("endTime", 0)
prioprity, _ := this.GetInt("prioprity", -1)
status := this.GetString("status", "PROBLEM")
events, err := event.GetEvent(startTime, endTime, prioprity, status)
if err != nil {
this.ResposeError(baseResp, err.Error())
return
}
baseResp.Data["events"] = events
this.ServeApiJson(baseResp)
return
}
func (this *PortalController) ColseCase() {
baseResp := this.BasicRespGen()
_, err := this.SessionCheck()
if err != nil {
this.ResposeError(baseResp, err.Error())
return
}
username := this.GetString("cName", "")
id := this.GetString("id", "xxx")
if id == "xxx" {
this.ResposeError(baseResp, "You dosen't pick any event id")
return
}
err = event.CloseEvent(username, id)
if err != nil {
this.ResposeError(baseResp, err.Error())
return
}
this.ServeApiJson(baseResp)
return
}
func (this *PortalController) CountNumOfTlp() {
baseResp := this.BasicRespGen()
_, err := this.SessionCheck()
if err != nil {
this.ResposeError(baseResp, err.Error())
return
} else {
numberOfteam, err := event.CountNumOfTlp()
if err != nil {
this.ResposeError(baseResp, err.Error())
return
}
baseResp.Data["count"] = numberOfteam
}
this.ServeApiJson(baseResp)
return
}
|
package changelog
import (
"log"
"os/exec"
)
func gitAddAll() error {
log.Printf("Executing `git add *`")
c := exec.Command("git", "add", "*")
return c.Run()
}
func gitStash() error {
log.Printf("Executing `git stash`")
c := exec.Command("git", "stash")
return c.Run()
}
func gitStashPop() error {
log.Printf("Executing `git stash pop`")
c := exec.Command("git", "stash", "pop")
return c.Run()
}
func gitResetHead() error {
log.Printf("Executing `git reset HEAD`")
c := exec.Command("git", "reset", "HEAD")
return c.Run()
}
|
package hrtree
import (
"fmt"
h "github.com/jtejido/hilbert"
"testing"
)
var hf, _ = h.New(uint32(5), 2)
func (r *rectangle) LowerLeft() Point {
return r.lowerLeft
}
func (r *rectangle) UpperRight() Point {
return r.upperRight
}
func rect(lower, upper Point) *rectangle {
r, err := newRect(lower, upper)
if err != nil {
fmt.Println(err)
}
return &r
}
func index(objs []Rectangle, obj Rectangle) int {
ind := -1
for i, r := range objs {
if r == obj {
ind = i
break
}
}
return ind
}
func TestChooseNode(t *testing.T) {
rt, _ := NewTree(DefaultMinNodeEntries, DefaultMaxNodeEntries, 5)
rect1 := rect(Point{2, 1}, Point{2, 1})
h1 := hf.Encode(getCenter(rect1)...)
rect2 := rect(Point{2, 2}, Point{2, 2})
h2 := hf.Encode(getCenter(rect2)...)
rect3 := rect(Point{2, 3}, Point{2, 3})
h3 := hf.Encode(getCenter(rect3)...)
rect4 := rect(Point{2, 4}, Point{2, 4})
h4 := hf.Encode(getCenter(rect4)...)
l1 := entry{bb: rect1, obj: rect1, h: h2, leaf: true}
l2 := entry{bb: rect2, obj: rect2, h: h3, leaf: true}
l3 := entry{bb: rect3, obj: rect3, h: h4, leaf: true}
leaf := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
leaf.leaf = true
if leaf := rt.chooseNode(rt.root, h1); leaf != rt.root {
t.Errorf("expected chooseNode of empty tree to return root")
}
childNode1 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
childNode1.leaf = true
childNode1.insertLeaf(l1)
childNode1.adjustLHV()
childNode1.adjustMBR()
entry1 := entry{node: childNode1}
childNode2 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
childNode2.leaf = true
childNode2.insertLeaf(l2)
childNode2.adjustLHV()
childNode2.adjustMBR()
entry2 := entry{node: childNode2}
childNode3 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
childNode3.leaf = true
childNode3.insertLeaf(l3)
childNode3.adjustLHV()
childNode3.adjustMBR()
entry3 := entry{node: childNode3}
nonLeaf := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
nonLeaf.insertNonLeaf(entry3)
nonLeaf.insertNonLeaf(entry2)
nonLeaf.insertNonLeaf(entry1)
if childNode3 != rt.chooseNode(nonLeaf, h2) {
t.Errorf("incorrect chooseNode")
}
}
func TestInsertNonLeafEntry(t *testing.T) {
n := newNode(2, 4)
nonLeafNode := newNode(2, 4)
nonLeafEntry := entry{node: n}
nonLeafNode.insertNonLeaf(nonLeafEntry)
if nonLeafNode.entries.len() != 1 {
t.Errorf("no entry added.")
}
if nonLeafNode != nonLeafEntry.node.parent {
t.Errorf("incorrect parent.")
}
}
func TestInsertNonLeafEntrySiblings(t *testing.T) {
childNode := newNode(2, 4)
childNode.leaf = true
rect := rect(Point{2, 2}, Point{2, 4})
h := hf.Encode(getCenter(rect)...)
leafEntry := entry{bb: rect, obj: rect, h: h, leaf: true}
childNode.insertLeaf(leafEntry)
nonLeafEntry := entry{node: childNode}
parent := newNode(2, 4)
parent.insertNonLeaf(nonLeafEntry)
if parent != childNode.parent {
t.Errorf("incorrect parent.")
}
if parent != nonLeafEntry.node.parent {
t.Errorf("incorrect parent.")
}
if childNode.right != nil {
t.Errorf("incorrect right sibling.")
}
if childNode.left != nil {
t.Errorf("incorrect left sibling.")
}
}
func TestNodeOverflowing(t *testing.T) {
rect := rect(Point{2, 2}, Point{2, 4})
h := hf.Encode(getCenter(rect)...)
leafEntry := entry{bb: rect, obj: rect, h: h, leaf: true}
leafEntry2 := entry{bb: rect, obj: rect, h: h, leaf: true}
n := newNode(1, 2)
n.leaf = true
if n.isOverflowing() {
t.Errorf("should not be overflowing")
}
n.insertLeaf(leafEntry)
n.insertLeaf(leafEntry2)
if !n.isOverflowing() {
t.Errorf("should be overflowing")
}
}
func TestNodeUnderflowing(t *testing.T) {
rect := rect(Point{2, 2}, Point{2, 4})
h := hf.Encode(getCenter(rect)...)
leafEntry := entry{bb: rect, obj: rect, h: h, leaf: true}
leafEntry2 := entry{bb: rect, obj: rect, h: h, leaf: true}
n := newNode(1, 2)
n.leaf = true
if !n.isUnderflowing() {
t.Errorf("should not be overflowing")
}
n.insertLeaf(leafEntry)
n.insertLeaf(leafEntry2)
if n.isUnderflowing() {
t.Errorf("should be overflowing")
}
}
func TestAdjustMBR(t *testing.T) {
rect1 := rect(Point{2, 0}, Point{2, 4})
h1 := hf.Encode(getCenter(rect1)...)
leafEntry1 := entry{bb: rect1, obj: rect1, h: h1, leaf: true}
rect2 := rect(Point{2, 1}, Point{2, 5})
h2 := hf.Encode(getCenter(rect2)...)
leafEntry2 := entry{bb: rect2, obj: rect2, h: h2, leaf: true}
rect3 := rect(Point{2, 5}, Point{2, 10})
h3 := hf.Encode(getCenter(rect3)...)
leafEntry3 := entry{bb: rect3, obj: rect3, h: h3, leaf: true}
n := newNode(2, 4)
n.leaf = true
n.insertLeaf(leafEntry1)
n.insertLeaf(leafEntry2)
n.adjustMBR()
r := n.getMBR()
if 2 != r.lowerLeft[0] {
t.Errorf("incorrect lower[x]")
}
if 0 != r.lowerLeft[1] {
t.Errorf("incorrect lower[y]")
}
if 2 != r.upperRight[0] {
t.Errorf("incorrect upper[x]")
}
if 5 != r.upperRight[1] {
t.Errorf("incorrect upper[y]")
}
n1 := newNode(2, 4)
n1.leaf = true
n1.insertLeaf(leafEntry1)
n1.insertLeaf(leafEntry3)
n1.adjustMBR()
r1 := n1.getMBR()
if 2 != r1.lowerLeft[0] {
t.Errorf("incorrect lower[x]")
}
if 0 != r1.lowerLeft[1] {
t.Errorf("incorrect lower[y]")
}
if 2 != r1.upperRight[0] {
t.Errorf("incorrect upper[x]")
}
if 10 != r1.upperRight[1] {
t.Errorf("incorrect upper[y]")
}
}
func TestAdjustMBR2(t *testing.T) {
rect1 := rect(Point{2, 2}, Point{2, 3})
h1 := hf.Encode(getCenter(rect1)...)
leafEntry1 := entry{bb: rect1, obj: rect1, h: h1, leaf: true}
rect2 := rect(Point{2, 8}, Point{2, 8})
h2 := hf.Encode(getCenter(rect2)...)
leafEntry2 := entry{bb: rect2, obj: rect2, h: h2, leaf: true}
n := newNode(2, 4)
n.leaf = true
n.insertLeaf(leafEntry1)
n.insertLeaf(leafEntry2)
n.adjustMBR()
r := n.getMBR()
if 2 != r.lowerLeft[0] {
t.Errorf("incorrect upper[y]")
}
if 2 != r.lowerLeft[1] {
t.Errorf("incorrect upper[y]")
}
if 2 != r.upperRight[0] {
t.Errorf("incorrect upper[y]")
}
if 8 != r.upperRight[1] {
t.Errorf("incorrect upper[y]")
}
}
func TestAdjustLHV(t *testing.T) {
rect1 := rect(Point{2, 0}, Point{2, 0})
h1 := hf.Encode(getCenter(rect1)...)
leafEntry1 := entry{bb: rect1, obj: rect1, h: h1, leaf: true}
rect2 := rect(Point{2, 0}, Point{2, 2})
h2 := hf.Encode(getCenter(rect2)...)
leafEntry2 := entry{bb: rect2, obj: rect2, h: h2, leaf: true}
n := newNode(2, 4)
n.leaf = true
n.insertLeaf(leafEntry1)
n.insertLeaf(leafEntry2)
n.adjustLHV()
if h1.Cmp(h2) >= 0 {
t.Errorf("incorrect hilbert value")
}
if h2.Cmp(n.lhv) != 0 {
t.Errorf("incorrect hilbert value")
}
}
func TestSiblings(t *testing.T) {
right := newNode(2, 4)
right.leaf = true
left := newNode(2, 4)
left.leaf = true
main := newNode(2, 4)
main.leaf = true
if 1 != len(main.getSiblings(2)) {
t.Errorf("incorrect number of siblings")
}
main.right = right
right.left = main
if 2 != len(main.getSiblings(2)) {
t.Errorf("incorrect number of siblings")
}
main.left = left
left.right = main
if 2 != len(main.getSiblings(2)) {
t.Errorf("incorrect number of siblings")
}
if 1 != len(main.getSiblings(1)) {
t.Errorf("incorrect number of siblings")
}
siblings := main.getSiblings(3)
if siblings[0] != main {
t.Errorf("incorrect sibling")
}
if siblings[1] != right {
t.Errorf("incorrect sibling")
}
}
func TestHandleOverflow(t *testing.T) {
node1 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
node1.leaf = true
siblings := make([]*node, 0)
hf2, _ := h.New(uint32(5), 32)
for i := 0; i < DefaultMaxNodeEntries; i++ {
rect := rect(Point{2, uint64(i)}, Point{2, uint64(i)})
h := hf2.Encode(getCenter(rect)...)
entry := entry{bb: rect, obj: rect, h: h, leaf: true}
node1.insertLeaf(entry)
}
rect2 := rect(Point{2, 0}, Point{2, 0})
h2 := hf2.Encode(getCenter(rect2)...)
entry2 := entry{bb: rect2, obj: rect2, h: h2, leaf: true}
node2, _ := handleOverflow(node1, entry2, siblings)
if DefaultMaxNodeEntries/2 != node1.entries.len() {
t.Errorf("incorrect number of entries at node1")
}
if DefaultMaxNodeEntries/2+1 != node2.entries.len() {
t.Errorf("incorrect number of entries at node2")
}
if node1 != node2.right {
t.Errorf("incorrect right sibling at node2")
}
if nil != node2.left {
t.Errorf("incorrect left sibling at node2")
}
if nil != node1.right {
t.Errorf("incorrect right sibling at node1")
}
if node2 != node1.left {
t.Errorf("incorrect left sibling at node1")
}
}
func TestSearchIntersect(t *testing.T) {
rt, _ := NewTree(3, 3, 12)
things := []Rectangle{
rect(Point{0, 0}, Point{2, 1}),
rect(Point{3, 1}, Point{4, 3}),
rect(Point{1, 2}, Point{3, 4}),
rect(Point{8, 6}, Point{9, 7}),
rect(Point{10, 3}, Point{11, 5}),
rect(Point{11, 7}, Point{12, 8}),
rect(Point{2, 6}, Point{3, 8}),
rect(Point{3, 6}, Point{4, 8}),
rect(Point{2, 8}, Point{3, 10}),
rect(Point{3, 8}, Point{4, 10}),
}
for _, thing := range things {
rt.Insert(thing)
}
bb := rect(Point{2, 1}, Point{12, 7})
q := rt.SearchIntersect(bb)
expected := []int{0, 1, 2, 3, 4, 5, 6, 7}
if len(q) != len(expected) {
t.Errorf("SearchIntersect failed to find all objects")
}
for _, ind := range expected {
if index(q, things[ind]) < 0 {
t.Errorf("SearchIntersect failed to find things[%d]", ind)
}
}
}
func TestDelete(t *testing.T) {
rt, _ := NewTree(DefaultMinNodeEntries, DefaultMaxNodeEntries, 5)
rect0 := rect(Point{2, 4}, Point{2, 8})
rt.Insert(rect0)
if !rt.root.leaf {
t.Errorf("Root should be leaf")
}
if 1 != rt.root.entries.len() {
t.Errorf("Root should have 1 entry")
}
if 1 != len(rt.SearchIntersect(rect0)) {
t.Errorf("tree should have 1 result")
}
rect1 := rect(Point{2, 5}, Point{2, 7})
rt.Delete(rect1)
if !rt.root.leaf {
t.Errorf("Root should be leaf")
}
if 1 != rt.root.entries.len() {
t.Errorf("Root should have 1 entry")
}
if 1 != len(rt.SearchIntersect(rect0)) {
t.Errorf("tree should have 1 result")
}
rect2 := rect(Point{2, 2}, Point{2, 10})
rt.Delete(rect2)
if !rt.root.leaf {
t.Errorf("Root should be leaf")
}
if 1 != rt.root.entries.len() {
t.Errorf("Root should have 1 entry")
}
if 1 != len(rt.SearchIntersect(rect0)) {
t.Errorf("tree should have 1 result")
}
rt.Delete(rect0)
if !rt.root.leaf {
t.Errorf("Root should be leaf")
}
if 0 != rt.root.entries.len() {
t.Errorf("Root should have 1 entry")
}
if 0 != len(rt.SearchIntersect(rect0)) {
t.Errorf("tree should have 1 result")
}
}
func TestDeleteAtMax(t *testing.T) {
rt, _ := NewTree(DefaultMinNodeEntries, DefaultMaxNodeEntries, 12)
for i := 0; i < DefaultMaxNodeEntries; i++ {
r := rect(Point{2, uint64(i)}, Point{2, uint64(i)})
rt.Insert(r)
}
for i := 0; i < (DefaultMaxNodeEntries - DefaultMinNodeEntries); i++ {
r2 := rect(Point{2, uint64(i)}, Point{2, uint64(i)})
rt.Delete(r2)
}
if !rt.root.leaf {
t.Errorf("root should be leaf")
}
if DefaultMinNodeEntries != rt.root.entries.len() {
t.Errorf("incorrect number of entries left")
}
}
func TestDeleteAtMax2(t *testing.T) {
nodeNo := DefaultMaxNodeEntries * 4
rt, _ := NewTree(DefaultMinNodeEntries, DefaultMaxNodeEntries, 12)
for i := 0; i < nodeNo; i++ {
r := rect(Point{2, uint64(i)}, Point{2, uint64(i)})
rt.Insert(r)
}
for i := 0; i < nodeNo; i++ {
r2 := rect(Point{2, uint64(i)}, Point{2, uint64(i)})
rt.Delete(r2)
}
if !rt.root.leaf {
t.Errorf("root should be leaf")
}
if 0 != rt.root.entries.len() {
t.Errorf("incorrect number of entries left")
}
}
func TestRedistributeEntries(t *testing.T) {
entries := newListUncapped()
nodes := make([]*node, 0)
node1 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
node1.leaf = true
nodes = append(nodes, node1)
node2 := newNode(DefaultMinNodeEntries, DefaultMaxNodeEntries)
node2.leaf = true
nodes = append(nodes, node2)
for i := 0; i < DefaultMaxNodeEntries*2-1; i++ {
rect := rect(Point{2, 1}, Point{2, 1})
h := hf.Encode(getCenter(rect)...)
leafEntry := entry{bb: rect, obj: rect, h: h, leaf: true}
entries.insert(leafEntry)
}
redistributeEntries(entries, nodes)
if DefaultMaxNodeEntries != node1.entries.len() {
t.Errorf("incorrect number of entries")
}
if DefaultMaxNodeEntries-1 != node2.entries.len() {
t.Errorf("incorrect number of entries")
}
}
func TestSearchIntersectNoResult(t *testing.T) {
rt, _ := NewTree(3, 3, 12)
things := []Rectangle{
rect(Point{0, 0}, Point{2, 1}),
rect(Point{3, 1}, Point{4, 3}),
rect(Point{1, 2}, Point{3, 4}),
rect(Point{8, 6}, Point{9, 7}),
rect(Point{10, 3}, Point{11, 5}),
rect(Point{11, 7}, Point{12, 8}),
rect(Point{2, 6}, Point{3, 8}),
rect(Point{3, 6}, Point{4, 8}),
rect(Point{2, 8}, Point{3, 10}),
rect(Point{3, 8}, Point{4, 10}),
}
for _, thing := range things {
rt.Insert(thing)
}
bb := rect(Point{99, 99}, Point{109, 94})
q := rt.SearchIntersect(bb)
if len(q) != 0 {
t.Errorf("SearchIntersect failed to return nil slice on failing query")
}
}
func BenchmarkGetIntersect(b *testing.B) {
b.StopTimer()
rt, _ := NewTree(3, 3, 12)
things := []Rectangle{
rect(Point{0, 0}, Point{2, 1}),
rect(Point{3, 1}, Point{4, 3}),
rect(Point{1, 2}, Point{3, 4}),
rect(Point{8, 6}, Point{9, 7}),
rect(Point{10, 3}, Point{11, 5}),
rect(Point{11, 7}, Point{12, 8}),
rect(Point{2, 6}, Point{3, 8}),
rect(Point{3, 6}, Point{4, 8}),
rect(Point{2, 8}, Point{3, 10}),
rect(Point{3, 8}, Point{4, 10}),
}
for _, thing := range things {
rt.Insert(thing)
}
bb := rect(Point{2, 1}, Point{12, 7})
b.StartTimer()
for i := 0; i < b.N; i++ {
rt.SearchIntersect(bb)
}
}
func BenchmarkInsert(b *testing.B) {
for i := 0; i < b.N; i++ {
rt, _ := NewTree(3, DefaultMaxNodeEntries, 5)
things := []Rectangle{
rect(Point{0, 0}, Point{2, 1}),
rect(Point{3, 1}, Point{4, 3}),
rect(Point{1, 2}, Point{3, 4}),
rect(Point{8, 6}, Point{9, 7}),
rect(Point{10, 3}, Point{11, 5}),
rect(Point{11, 7}, Point{12, 8}),
rect(Point{2, 6}, Point{3, 8}),
rect(Point{3, 6}, Point{4, 8}),
rect(Point{2, 8}, Point{3, 10}),
rect(Point{3, 8}, Point{4, 10}),
}
for _, thing := range things {
rt.Insert(thing)
}
}
}
|
package mongodb
import (
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"golang.org/x/net/context"
)
// Setting 配置
type Setting struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"id"`
Name string `bson:"name" json:"name"` // 名称
Description string `bson:"description" json:"description"` // 描述
Regex string `bson:"regex" json:"regex"` // 正则限制
Value string `bson:"value" json:"value"` // 值
Roles []string `bson:"roles" json:"roles"` // 角色
CreateTime int64 `bson:"create_time" json:"create_time"`
UpdateTime int64 `bson:"update_time" json:"update_time"`
DeleteTime int64 `bson:"delete_time" json:"delete_time"`
}
func (m *Setting) table() string {
return "Setting"
}
// Save 保存
func (m *Setting) Save() bool {
m.CreateTime = time.Now().UnixNano()
result, err := InsertOne(m.table(), m)
m.ID = result.InsertedID.(primitive.ObjectID)
return err == nil
}
// Modify 修改
func (m *Setting) Modify() bool {
m.UpdateTime = time.Now().UnixNano()
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Trash 废弃
func (m *Setting) Trash() bool {
m.DeleteTime = time.Now().UnixNano()
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Restore 还原
func (m *Setting) Restore() bool {
m.DeleteTime = 0
err := UpdateOneByID(m.table(), m.ID, *m, m)
return err == nil
}
// Remove 删除
func (m *Setting) Remove() bool {
err := DeleteOneByID(m.table(), m.ID, m)
return err == nil
}
// Get 获取详情
func (m *Setting) Get() bool {
err := FindOneByID(m.table(), m.ID, m)
return err == nil
}
// GetByID 根据ID获取详情
func (m *Setting) GetByID(hex string) bool {
id, _ := primitive.ObjectIDFromHex(hex)
err := FindOneByID(m.table(), id, m)
return err == nil
}
// FindOne 单个查询
func (m *Setting) FindOne(filter interface{}) bool {
err := FindOne(m.table(), filter, m)
return err == nil
}
// InsertMany 多个插入
func (m *Setting) InsertMany(list []interface{}) []interface{} {
if result, err := InsertMany(m.table(), list); err == nil {
return result.InsertedIDs
}
return nil
}
// FindMany 多个查询
func (m *Setting) FindMany(filter interface{}, limit int64, sort interface{}) *[]Setting {
if cursor, err := FindMany(m.table(), filter, limit, sort); err == nil {
var list []Setting
for cursor.Next(context.Background()) {
var item Setting
if err := cursor.Decode(&item); err != nil {
panic(err)
}
list = append(list, item)
}
_ = cursor.Close(context.Background())
return &list
}
return nil
}
// FindManySkip 多个分页查询
func (m *Setting) FindManySkip(filter interface{}, skip int64, limit int64, sort interface{}) *[]Setting {
if cursor, err := FindManySkip(m.table(), filter, skip, limit, sort); err == nil {
var list []Setting
for cursor.Next(context.Background()) {
var item Setting
if err := cursor.Decode(&item); err != nil {
panic(err)
}
list = append(list, item)
}
_ = cursor.Close(context.Background())
return &list
}
return nil
}
// UpdateOne 单个修改
func (m *Setting) UpdateOne(filter interface{}, update bson.M) int64 {
update["update_time"] = time.Now().UnixNano()
if result, err := UpdateOne(m.table(), filter, update); err == nil {
return result.ModifiedCount
}
return 0
}
// UpdateMany 多个修改
func (m *Setting) UpdateMany(filter interface{}, update bson.M) int64 {
update["update_time"] = time.Now().UnixNano()
if result, err := UpdateMany(m.table(), filter, update); err == nil {
return result.ModifiedCount
}
return 0
}
// TrashOne 单个废弃
func (m *Setting) TrashOne(filter interface{}) int64 {
if result, err := UpdateOne(m.table(), filter, bson.M{"delete_time": time.Now().UnixNano()}); err == nil {
return result.ModifiedCount
}
return 0
}
// RestoreOne 单个还原
func (m *Setting) RestoreOne(filter interface{}) int64 {
if result, err := UpdateOne(m.table(), filter, bson.M{"delete_time": 0}); err == nil {
return result.ModifiedCount
}
return 0
}
// TrashMany 多个废弃
func (m *Setting) TrashMany(filter interface{}) int64 {
if result, err := UpdateMany(m.table(), filter, bson.M{"delete_time": time.Now().UnixNano()}); err == nil {
return result.ModifiedCount
}
return 0
}
// RestoreMany 多个还原
func (m *Setting) RestoreMany(filter interface{}) int64 {
if result, err := UpdateMany(m.table(), filter, bson.M{"delete_time": 0}); err == nil {
return result.ModifiedCount
}
return 0
}
// DeleteOne 单个删除
func (m *Setting) DeleteOne(filter interface{}) int64 {
if result, err := DeleteOne(m.table(), filter); err == nil {
return result.DeletedCount
}
return 0
}
// DeleteMany 多个删除
func (m *Setting) DeleteMany(filter interface{}) int64 {
if result, err := DeleteMany(m.table(), filter); err == nil {
return result.DeletedCount
}
return 0
}
// Count 计数
func (m *Setting) Count(filter interface{}) int64 {
if result, err := Count(m.table(), filter); err == nil {
return result
}
return 0
}
|
package main
import (
"fmt"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"unsafe"
"github.com/yamnikov-oleg/go-gtk/gdk"
"github.com/yamnikov-oleg/go-gtk/gio"
"github.com/yamnikov-oleg/go-gtk/glib"
"github.com/yamnikov-oleg/go-gtk/gtk"
"github.com/yamnikov-oleg/go-gtk/pango"
)
var Ui struct {
Window UiWindow
RootBox *gtk.VBox
SearchEntry UiEntry
ScrollWin *gtk.ScrolledWindow
ListStore *gtk.ListStore
TreeView UiTreeView
Pointer UiPointer
}
type UiWindow struct {
*gtk.Window
}
func (UiWindow) OnKeyPress(e *gdk.EventKey) bool {
switch e.Keyval {
case gdk.KEY_Down:
Ui.TreeView.Selected().IncCycle().Select()
case gdk.KEY_Up:
Ui.TreeView.Selected().DecCycle().Select()
case gdk.KEY_Return:
Ui.TreeView.Selected().Execute()
case gdk.KEY_Escape:
gtk.MainQuit()
case gdk.KEY_Tab:
selected := Ui.TreeView.Selected()
if !selected.None() {
tabname := selected.TabName()
Ui.SearchEntry.SetText(tabname)
Ui.SearchEntry.SelectRegion(len(tabname), -1)
}
return true
}
return false
}
func (UiWindow) OnButtonPress(e *gdk.EventButton) {
var wid, hei, clickX, clickY int
Ui.Window.GetSize(&wid, &hei)
clickX, clickY = int(e.X), int(e.Y)
if clickX < 0 || clickX > wid || clickY < 0 || clickY > hei {
gtk.MainQuit()
}
}
func (UiWindow) OnFocusIn() {
Ui.Pointer.Grab()
}
type UiEntry struct {
*gtk.Entry
}
func (UiEntry) OnChanged() {
UpdateSearchResults()
}
type UiTreeIter struct {
*gtk.TreeIter
}
var NilTreeIter = UiTreeIter{nil}
func NewTreeIter() UiTreeIter {
return UiTreeIter{new(gtk.TreeIter)}
}
func (iter UiTreeIter) None() bool {
return iter.TreeIter == nil
}
func (iter UiTreeIter) GetStr(i int) string {
if iter.None() {
return ""
}
var val glib.GValue
Ui.ListStore.GetValue(iter.TreeIter, i, &val)
return val.GetString()
}
func (iter UiTreeIter) MarkupName() string {
return iter.GetStr(1)
}
func (iter UiTreeIter) Cmdline() string {
return iter.GetStr(2)
}
func (iter UiTreeIter) TabName() string {
return iter.GetStr(3)
}
func (iter UiTreeIter) Name() string {
return iter.GetStr(4)
}
func (iter UiTreeIter) IconName() string {
return iter.GetStr(5)
}
func (iter UiTreeIter) Execute() {
if iter.None() {
return
}
var val glib.GValue
Ui.ListStore.GetValue(iter.TreeIter, 2, &val)
cmdline := val.GetString()
if cmdline == "" {
return
}
cmd := SplitCommandline(cmdline)
exec.Command(cmd[0], cmd[1:]...).Start()
MakeHistRecord(HistRecord{
Name: iter.Name(),
TabName: iter.TabName(),
Icon: iter.IconName(),
Cmdline: iter.Cmdline(),
})
gtk.MainQuit()
}
func (iter UiTreeIter) Select() {
if iter.None() {
return
}
Ui.TreeView.GetSelection().SelectIter(iter.TreeIter)
Ui.TreeView.ScrollToCell(Ui.ListStore.GetPath(iter.TreeIter), nil, false, 0, 0)
}
func (iter UiTreeIter) Inc() UiTreeIter {
if iter.None() || !Ui.ListStore.IterNext(iter.TreeIter) {
return NilTreeIter
}
return iter
}
func (iter UiTreeIter) Dec() UiTreeIter {
if iter.None() || !Ui.ListStore.IterPrev(iter.TreeIter) {
return NilTreeIter
}
return iter
}
func (iter UiTreeIter) IncCycle() UiTreeIter {
iter = iter.Inc()
if iter.None() {
return Ui.TreeView.First()
}
return iter
}
func (iter UiTreeIter) DecCycle() UiTreeIter {
iter = iter.Dec()
if iter.None() {
return Ui.TreeView.Last()
}
return iter
}
type UiTreeView struct {
*gtk.TreeView
}
func (UiTreeView) OnRowActivated() {
Ui.TreeView.Selected().Execute()
}
func (UiTreeView) Selected() UiTreeIter {
selection := Ui.TreeView.GetSelection()
if selection.CountSelectedRows() == 0 {
return NilTreeIter
}
iter := NewTreeIter()
selection.GetSelected(iter.TreeIter)
return iter
}
func (UiTreeView) First() UiTreeIter {
iter := NewTreeIter()
if !Ui.ListStore.GetIterFirst(iter.TreeIter) {
return NilTreeIter
}
return iter
}
func (UiTreeView) Last() UiTreeIter {
count := Ui.TreeView.Count()
if count == 0 {
return NilTreeIter
}
iter := NewTreeIter()
Ui.ListStore.IterNthChild(iter.TreeIter, nil, Ui.TreeView.Count()-1)
return iter
}
func (UiTreeView) Count() int {
return Ui.ListStore.IterNChildren(nil)
}
func (UiTreeView) Clear() {
Ui.ListStore.Clear()
}
func (UiTreeView) AppendLaunchEntry(entry *LaunchEntry, category string) {
iter := NewTreeIter()
Ui.ListStore.Append(iter.TreeIter)
gicon, err := gio.NewIconForString(entry.Icon)
if err != nil {
errduring("appending entry to ListStore", err, "Skipping it")
return
}
Ui.ListStore.Set(iter.TreeIter,
0, gicon.GIcon,
1, entry.MarkupName,
2, entry.Cmdline,
3, entry.TabName,
4, entry.Name,
5, entry.Icon,
6, fmt.Sprintf("<small><i>%v</i></small>", category),
)
}
type UiPointer struct {
*gdk.Device
}
func (UiPointer) Grab() {
status := Ui.Pointer.Device.Grab(Ui.Window.GetWindow(), gdk.OWNERSHIP_APPLICATION, true, gdk.BUTTON_PRESS_MASK, nil, gdk.CURRENT_TIME)
if status != gdk.GRAB_SUCCESS {
errduring("pointer grabbing, grab status %v", nil, "", status)
}
}
type Category struct {
Name string
Search EntrySearchFunc
}
func EnabledCategories() []Category {
cats := []Category{}
if Config.EnabledCategories.Calc {
cats = append(cats, Category{"Calc", PerformCalc})
}
if Config.EnabledCategories.History {
cats = append(cats, Category{"History", SearchHistEntries})
}
if Config.EnabledCategories.Apps {
cats = append(cats, Category{"Apps", SearchAppEntries})
}
if Config.EnabledCategories.URL {
cats = append(cats, Category{"URL", SearchUrlEntries})
}
if Config.EnabledCategories.Commands {
cats = append(cats, Category{"Commands", SearchCmdEntries})
}
if Config.EnabledCategories.Files {
cats = append(cats, Category{"Files", SearchFileEntries})
}
if Config.EnabledCategories.WebSearch {
cats = append(cats, Category{"Web Search", MakeWebSearchEntry})
}
return cats
}
func UpdateSearchResults() {
Ui.TreeView.Clear()
text := strings.TrimSpace(Ui.SearchEntry.GetText())
cats := EnabledCategories()
for _, s := range cats {
list := s.Search(text)
for i, entry := range list {
if i == 0 {
Ui.TreeView.AppendLaunchEntry(entry, s.Name)
} else {
Ui.TreeView.AppendLaunchEntry(entry, "")
}
}
}
Ui.TreeView.First().Select()
}
func init() {
runtime.LockOSThread()
}
func SetupUi(dry bool) {
logf("gtk.Init()\n")
gtk.Init(&os.Args)
//
// Constructors
//
logf("Constructors\n")
Ui.Window = UiWindow{gtk.NewWindow(gtk.WINDOW_TOPLEVEL)}
Ui.RootBox = gtk.NewVBox(false, 6)
Ui.SearchEntry = UiEntry{gtk.NewSearchEntry()}
Ui.ScrollWin = gtk.NewScrolledWindow(nil, nil)
Ui.TreeView = UiTreeView{gtk.NewTreeView()}
Ui.ListStore = gtk.NewListStore(
gio.GetIconType(), // Icon
glib.G_TYPE_STRING, // MarkupName
glib.G_TYPE_STRING, // Cmdline
glib.G_TYPE_STRING, // TabName
glib.G_TYPE_STRING, // Name
glib.G_TYPE_STRING, // IconName
glib.G_TYPE_STRING, // Category
)
Ui.Pointer = UiPointer{gdk.GetDefaultDisplay().GetDeviceManager().GetClientPointer()}
//
// Window
//
logf("Window\n")
Ui.Window.SetPosition(gtk.WIN_POS_CENTER)
Ui.Window.SetGravity(gdk.GRAVITY_SOUTH)
w, _ := strconv.Atoi(Config.UI.Width)
h, _ := strconv.Atoi(Config.UI.Height)
Ui.Window.SetSizeRequest(w, h)
Ui.Window.SetDecorated(false)
Ui.Window.SetSkipTaskbarHint(true)
Ui.Window.SetBorderWidth(6)
Ui.Window.Connect("key-press-event", func(ctx *glib.CallbackContext) bool {
arg := ctx.Args(0)
e := *(**gdk.EventKey)(unsafe.Pointer(&arg))
return Ui.Window.OnKeyPress(e)
})
Ui.Window.Connect("button-press-event", func(ctx *glib.CallbackContext) {
arg := ctx.Args(0)
e := *(**gdk.EventButton)(unsafe.Pointer(&arg))
Ui.Window.OnButtonPress(e)
})
Ui.Window.Connect("focus-in-event", Ui.Window.OnFocusIn)
Ui.Window.Connect("destroy", gtk.MainQuit)
//
// SearchEntry
//
logf("SearchEntry\n")
Ui.SearchEntry.Connect("changed", Ui.SearchEntry.OnChanged)
//
// TreeView
//
logf("TreeView\n")
Ui.TreeView.SetHeadersVisible(false)
crtCat := gtk.NewCellRendererText()
glib.ObjectFromNative(unsafe.Pointer(crtCat.ToCellRenderer())).Set("xalign", 0.0)
glib.ObjectFromNative(unsafe.Pointer(crtCat.ToCellRenderer())).Set("yalign", 0.0)
clnCat := gtk.NewTreeViewColumnWithAttributes("Cat", crtCat, "markup", 6)
clnCat.SetFixedWidth(80)
Ui.TreeView.AppendColumn(clnCat)
crp := gtk.NewCellRendererPixbuf()
glib.ObjectFromNative(unsafe.Pointer(crp.ToCellRenderer())).Set("stock-size", int(gtk.ICON_SIZE_DND))
Ui.TreeView.AppendColumn(gtk.NewTreeViewColumnWithAttributes2("Icon", crp, "gicon", 0))
crt := gtk.NewCellRendererText()
glib.ObjectFromNative(unsafe.Pointer(crt.ToCellRenderer())).Set("ellipsize", int(pango.ELLIPSIZE_START))
Ui.TreeView.AppendColumn(gtk.NewTreeViewColumnWithAttributes("Id", crt, "markup", 1))
Ui.TreeView.SetModel(Ui.ListStore)
Ui.TreeView.Connect("row-activated", Ui.TreeView.OnRowActivated)
//
// Focus setup
//
logf("Focus setup\n")
Ui.RootBox.SetCanFocus(false)
Ui.ScrollWin.SetCanFocus(false)
Ui.TreeView.SetCanFocus(false)
Ui.SearchEntry.GrabFocus()
//
// Packing
//
logf("Packing\n")
Ui.ScrollWin.Add(Ui.TreeView.TreeView)
Ui.RootBox.PackStart(Ui.SearchEntry.Entry, false, false, 0)
Ui.RootBox.PackEnd(Ui.ScrollWin, true, true, 0)
Ui.Window.Add(Ui.RootBox)
//
// Stylesheet loading
//
logf("Stylesheet\n")
provider := gtk.NewCssProvider()
screen := gdk.GetDefaultDisplay().GetDefaultScreen()
gtk.StyleContextAddProviderForScreen(screen, provider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
err := provider.LoadFromData(AppStyle())
if err != nil {
errduring("CSS loading", err, "")
}
logf("UpdateSearchResults()\n")
UpdateSearchResults()
if dry {
logf("Defer MainQuit()\n")
glib.IdleAdd(func() {
gtk.MainQuit()
})
} else {
logf("ShowAll()\n")
Ui.Window.ShowAll()
}
logf("gtk.Main()\n")
gtk.Main()
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package terraform
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/mattermost/mattermost-cloud/internal/tools/exechelper"
log "github.com/sirupsen/logrus"
)
func arg(key string, values ...string) string {
if !strings.HasPrefix(key, "-") {
key = fmt.Sprintf("-%s", key)
}
if len(values) == 0 {
return key
}
value := strings.Join(values, "")
return fmt.Sprintf("%s=%s", key, value)
}
func outputLogger(line string, logger log.FieldLogger) {
line = strings.TrimSpace(line)
if len(line) == 0 {
return
}
logger.Infof("[terraform] %s", line)
}
func (c *Cmd) run(arg ...string) ([]byte, []byte, error) {
cmd := exec.Command(c.terraformPath, append(arg, "-no-color")...)
cmd.Dir = c.dir
cmd.Env = append(os.Environ(), "TF_IN_AUTOMATION=1")
return exechelper.Run(cmd, c.logger, outputLogger)
}
|
package main
import (
"context"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
"myRPC/demo/trace/base"
"time"
)
func main() {
tracer, closer := base.Init("serName")
defer closer.Close()
opentracing.SetGlobalTracer(tracer)
span := tracer.StartSpan("spanName")
span.SetTag("main_tag_key", "main_tag_value")
span.LogFields(log.String("main_fields_key", "main_fields_value"))
span.LogKV("main_kv_key", "main_kv_value")
ctx := opentracing.ContextWithSpan(context.Background(), span)
formatString(ctx)
printHello(ctx)
span.Finish()
}
func formatString(ctx context.Context) {
span, _ := opentracing.StartSpanFromContext(ctx, "formatString")
defer span.Finish()
time.Sleep(2*time.Second)
span.SetTag("formatString_tag_key", "formatString_tag_value")
span.LogFields(log.String("formatString_fields_key", "formatString_fields_value"))
span.LogKV("formatString_kv_key", "formatString_kv_value")
}
func printHello(ctx context.Context) {
span, _ := opentracing.StartSpanFromContext(ctx, "formatString")
defer span.Finish()
time.Sleep(3*time.Second)
span.SetTag("printHello_tag_key", "printHello_tag_value")
span.LogFields(log.String("printHello_fields_key", "printHello_fields_value"))
span.LogKV("printHello_kv_key", "printHello_kv_value")
}
|
package main
import (
"fmt"
"strings"
)
func main() {
hello := "Hello Builder"
sb := strings.Builder{}
sb.WriteString("<p>")
sb.WriteString(hello)
sb.WriteString("</p>")
fmt.Println(sb.String())
words := []string{"Hello", "world"}
sb.Reset()
sb.WriteString("<ul>")
for _, v := range words {
sb.WriteString("<li>")
sb.WriteString(v)
sb.WriteString("</li>")
}
sb.WriteString("</ul>")
}
|
//+build wireinject
package server_tools
import (
"context"
"net/http"
"github.com/google/wire"
"github.com/YaroslavChirko/architecture-lab2/server/dbs"
"database/sql"
"github.com/YaroslavChirko/architecture-lab2/server/mhttp"
)
type APIServer struct {
db *sql.DB
serv *http.Server
}
func (api *APIServer) Start() error {
return api.serv.ListenAndServe()
}
func (api APIServer) Stop() error {
return api.serv.Shutdown(context.Background())
}
func createHttpServer(db *sql.DB) *http.Server {
srv := &http.Server{Addr: ":8080"}
http.HandleFunc("/", mhttp.Handler(db))
return srv
}
func makeAPIServer(db *sql.DB, serv *http.Server) *APIServer {
var api APIServer
api.db = db
api.serv = serv
return &api
}
func CreateAPIServer() *APIServer {
panic(wire.Build(
dbs.DbOpen,
createHttpServer,
makeAPIServer,
))/*
dbo:=dbs.DbOpen()
hs:=createHttpServer(dbo)
return makeAPIServer(dbo, hs)*/
}
|
/*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/gravitational/trace"
"golang.org/x/sync/errgroup"
"k8s.io/cli-runtime/pkg/genericclioptions"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/component-base/cli"
"k8s.io/kubectl/pkg/cmd"
"k8s.io/kubectl/pkg/cmd/plugin"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
)
var (
podForbiddenRe = regexp.MustCompile(`(?m)Error from server \(Forbidden\): pods "(.*)" is forbidden: User ".*" cannot get resource "pods" in API group "" in the namespace "(.*)"`)
clusterForbidden = "[00] access denied"
// clusterObjectDiscoveryFailed is printed when kubectl tries to do API discovery
// - calling /apis endpoint - but Teleport denies the request. Since it cannot
// discover the resources available in the cluster, it prints this message saying
// that the cluster does not have pod(s). Since every Kubernetes cluster supports
// pods, it's safe to create a resource access request.
clusterObjectDiscoveryFailed = regexp.MustCompile(`(?m)the server doesn't have a resource type "pods?"`)
)
// resourceKind identifies a Kubernetes resource.
type resourceKind struct {
kind string
subResourceName string
}
// onKubectlCommand re-execs itself if env var `tshKubectlRexec` is not set
// in order to execute the `kubectl` portion of the code. This is a requirement because
// `kubectl` calls `os.Exit()` in every code path, and we need to intercept the
// exit code to validate if the request was denied.
// When executing `tsh kubectl get pods`, tsh checks if `tshKubectlReexec`. Since
// it's the user call and the flag is not present, tsh reexecs the same exact
// the user executed and uses an io.MultiWriter to write the os.Stderr output
// from the kubectl command into an io.Pipe for analysis. It also sets the env
// `tshKubectlReexec` in the exec.Cmd.Env and runs the command. When running the
// command, `tsh` will be recalled, and since `tshKubectlReexec` is set only the
// kubectl portion of code is executed.
// On the caller side, once the callee execution finishes, tsh inspects the stderr
// outputs and decides if creating an access request is appropriate.
// If the access request is created, tsh waits for the approval and runs the expected
// command again.
func onKubectlCommand(cf *CLIConf, args []string) error {
if os.Getenv(tshKubectlReexecEnvVar) == "" {
err := runKubectlAndCollectRun(cf, args)
return trace.Wrap(err)
}
runKubectlCode(args)
return nil
}
const (
// tshKubectlReexecEnvVar is the name of the environment variable used to control if
// tsh should re-exec or execute a kubectl command.
tshKubectlReexecEnvVar = "TSH_KUBE_REEXEC"
)
// runKubectlReexec reexecs itself and copies the `stderr` output into
// the provided collector.
// It also sets tshKubectlReexec for the command to prevent
// an exec loop
func runKubectlReexec(selfExec string, args []string, collector io.Writer) error {
cmd := exec.Command(selfExec, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, collector)
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=yes", tshKubectlReexecEnvVar))
return trace.Wrap(cmd.Run())
}
// runKubectlCode runs the actual kubectl package code with the default options.
// This code is only executed when `tshKubectlReexec` env is present. This happens
// because we need to retry kubectl calls and `kubectl` calls os.Exit in multiple
// paths.
func runKubectlCode(args []string) {
// These values are the defaults used by kubectl and can be found here:
// https://github.com/kubernetes/kubectl/blob/3612c18ed86fc0a2f4467ca355b3e21569fabe0a/pkg/cmd/cmd.go#L94
defaultConfigFlags := genericclioptions.NewConfigFlags(true).
WithDeprecatedPasswordFlag().
WithDiscoveryBurst(300).
WithDiscoveryQPS(50.0)
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
// init the default plugin handler.
PluginHandler: cmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes),
Arguments: args,
ConfigFlags: defaultConfigFlags,
// init the IOSStreams.
IOStreams: genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
},
)
// override args without kubectl to avoid errors.
command.SetArgs(args[1:])
// run command until it finishes.
if err := cli.RunNoErrOutput(command); err != nil {
// Pretty-print the error and exit with an error.
cmdutil.CheckErr(err)
}
os.Exit(0)
}
func runKubectlAndCollectRun(cf *CLIConf, args []string) error {
var (
alreadyRequestedAccess bool
err error
exitErr *exec.ExitError
)
for {
// missingKubeResources will include the Kubernetes Resources whose access
// was rejected in this kubectl call.
missingKubeResources := make([]resourceKind, 0, 50)
reader, writer := io.Pipe()
group, _ := errgroup.WithContext(cf.Context)
group.Go(
func() error {
// This goroutine scans each line of output emitted to stderr by kubectl
// and parses it in order to check if the returned error was a problem with
// missing access level. If it's the case, tsh kubectl will create automatically
// the access request for the user to access the resource.
// Current supported resources:
// - pod
// - kube_cluster
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
line := scanner.Text()
// Check if the request targeting a pod endpoint was denied due to
// Teleport Pod RBAC or if the operation was denied by Kubernetes RBAC.
// In the second case, we should create a Resource Access Request to allow
// the user to exec/read logs using different Kubernetes RBAC principals.
// using different Kubernetes RBAC principals.
if podForbiddenRe.MatchString(line) {
results := podForbiddenRe.FindStringSubmatch(line)
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubePod, subResourceName: filepath.Join(results[2], results[1])})
// Check if cluster access was denied. If denied we should create
// a Resource Access Request for the cluster and not a pod.
} else if strings.Contains(line, clusterForbidden) || clusterObjectDiscoveryFailed.MatchString(line) {
missingKubeResources = append(missingKubeResources, resourceKind{kind: types.KindKubernetesCluster})
}
}
return trace.Wrap(scanner.Err())
},
)
err := runKubectlReexec(cf.executablePath, args, writer)
writer.CloseWithError(io.EOF)
if scanErr := group.Wait(); scanErr != nil {
log.WithError(scanErr).Warn("unable to scan stderr payload")
}
if err == nil {
break
} else if !errors.As(err, &exitErr) {
return trace.Wrap(err)
} else if errors.As(err, &exitErr) && exitErr.ExitCode() != cmdutil.DefaultErrorExitCode {
// if the exit code is not 1, it was emitted by pod exec code and we should
// ignore it since the user was allowed to execute the command in the pod.
break
}
if len(missingKubeResources) > 0 && !alreadyRequestedAccess {
// create the access requests for the user and wait for approval.
if err := createKubeAccessRequest(cf, missingKubeResources, args); err != nil {
return trace.Wrap(err)
}
alreadyRequestedAccess = true
continue
}
break
}
// exit with the kubectl exit code to keep compatibility.
if errors.As(err, &exitErr) {
os.Exit(exitErr.ExitCode())
}
return nil
}
// createKubeAccessRequest creates an access request to the denied resources
// if the user's roles allow search_as_role.
func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
kubeName, err := getKubeClusterName(args, tc.SiteName)
if err != nil {
return trace.Wrap(err)
}
for _, rec := range resources {
cf.RequestedResourceIDs = append(
cf.RequestedResourceIDs,
filepath.Join("/", tc.SiteName, rec.kind, kubeName, rec.subResourceName),
)
}
cf.Reason = fmt.Sprintf("Resource request automatically created for %v", args)
if err := executeAccessRequest(cf, tc); err != nil {
// TODO(tigrato): intercept the error to validate the origin
return trace.Wrap(err)
}
return nil
}
// extractKubeConfigAndContext parses the args and extracts - if present -
// the --kubeconfig flag that overrrides the default kubeconfig location
// and the --context flag that overrides the default context to use.
func extractKubeConfigAndContext(args []string) (kubeconfig string, context string) {
if len(args) <= 2 {
return
}
command := cmd.NewDefaultKubectlCommandWithArgs(
cmd.KubectlOptions{
Arguments: args[2:],
},
)
if err := command.ParseFlags(args[2:]); err != nil {
return
}
kubeconfig = command.Flag("kubeconfig").Value.String()
context = command.Flag("context").Value.String()
return
}
// getKubeClusterName extracts the Kubernetes Cluster name if the Kube belongs to
// the teleportClusterName cluster. It parses the args to extract the `--kubeconfig`
// and `--context` flag values and to use them if any was overriten.
func getKubeClusterName(args []string, teleportClusterName string) (string, error) {
kubeconfigLocation, selectedContext := extractKubeConfigAndContext(args)
if selectedContext == "" {
kubeName, err := kubeconfig.SelectedKubeCluster(kubeconfigLocation, teleportClusterName)
return kubeName, trace.Wrap(err)
}
kubeName := kubeconfig.KubeClusterFromContext(selectedContext, teleportClusterName)
if kubeName == "" {
return "", trace.BadParameter("selected context %q does not belong to Teleport cluster %q", selectedContext, teleportClusterName)
}
return kubeName, nil
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main() {
welcome := `
====================================================
TOP SECRET Super HAxx0r DATABASE
WELCOME ANON 47
ENTER YOUR PASSWORD TO CONTINUE
====================================================
`
password := "5up32_53cu23_p455w02d_f81"
fmt.Println(welcome)
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Password: ")
text, _ := reader.ReadString('\n')
flag := "YCEP{1_1uv_5721n95}"
// convert CRLF to LF
text = strings.Trim(text, " \r\n")
if strings.Compare(password, text) == 0 {
fmt.Printf("ACCESS GRANTED\nFlag: %s\n", flag)
} else {
fmt.Println("ACCESS DENIED")
}
}
|
package job
import (
"fmt"
"reflect"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestBacktrace(t *testing.T) {
type args struct {
size int
}
_, filename, _, _ := runtime.Caller(0)
_, testingFile, _, _ := runtime.Caller(1)
_, asmFile, _, _ := runtime.Caller(2)
trace1 := fmt.Sprintf("in %s:37 github.com/thesoulless/boxer/v2/job.TestBacktrace.func1", filename)
trace2 := fmt.Sprintf("in %s:1193 testing.tRunner", testingFile)
trace3 := fmt.Sprintf("in %s:1371 runtime.goexit", asmFile)
tests := []struct {
name string
args args
want []string
}{
{
name: "this caller trace",
args: args{size: 10},
want: []string{trace1, trace2, trace3},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Backtrace(tt.args.size); !reflect.DeepEqual(got[0], tt.want[0]) || !reflect.DeepEqual(got[2], tt.want[2]) {
t.Errorf("Backtrace() = %v, want %v", got, tt.want)
}
})
}
}
func TestNew(t *testing.T) {
type args struct {
jobType string
queue string
maxRetries int
delay time.Duration
args []interface{}
}
var a1 []interface{}
a1 = append(a1, 3)
tests := []struct {
name string
args args
want *Job
}{
{
name: "new",
args: args{
jobType: "SomeJob",
queue: "default",
maxRetries: 3,
delay: time.Second * 3,
args: a1,
},
want: &Job{
Queue: "default",
Type: "SomeJob",
Args: a1,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := New(tt.args.jobType, tt.args.queue, tt.args.maxRetries, tt.args.delay, tt.args.args...)
if !assert.Equal(t, tt.want.Queue, got.Queue) || !assert.Equal(t, tt.want.Type, got.Type, tt.want.Args, got.Args) {
t.Errorf("New() = %v, want %v", got, tt.want)
}
})
}
}
|
package frame256x288
import (
"github.com/reiver/go-rgba32"
"image"
"image/color"
"image/draw"
)
type Slice []uint8
func (receiver Slice) at(x, y int) []uint8 {
if nil == receiver {
return nil
}
if x < 0 || Width <= x {
return nil
}
if y < 0 || Height <= y {
return nil
}
offset := receiver.PixOffset(x,y)
low := offset
high := low + rgba32.ByteSize
if rgba32.ByteSize != (high-low) {
return nil
}
p := receiver[low:high]
return rgba32.Slice(p)
}
func (receiver Slice) At(x, y int) color.Color {
p := receiver.at(x,y)
return rgba32.Slice(p)
}
func (receiver Slice) Bounds() image.Rectangle {
const x = 0
const y = 0
// [x,x+Width) and [y,y+Height)
return image.Rectangle{
Min: image.Point{
X: x,
Y: y,
},
Max: image.Point{
X: x+Width,
Y: y+Height,
},
}
}
func (receiver Slice) ColorModel() color.Model {
return color.NRGBAModel
}
func (receiver Slice) Draw(img image.Image) error {
if nil == receiver {
return errNilReceiver
}
rect := img.Bounds()
draw.Draw(receiver, rect, img, rect.Min, draw.Over)
return nil
}
// Dye changes the color of all the pixels / pels in this from to ‘color’.
func (receiver Slice) Dye(c color.Color) error {
if nil == receiver {
return errNilReceiver
}
var r,g,b,a uint8
{
switch casted := c.(type) {
case rgba32.Slice:
r = casted[rgba32.OffsetRed]
g = casted[rgba32.OffsetGreen]
b = casted[rgba32.OffsetBlue]
a = casted[rgba32.OffsetAlpha]
default:
rr,gg,bb,aa := casted.RGBA()
r = uint8((rr*0xff)/0xffff)
g = uint8((gg*0xff)/0xffff)
b = uint8((bb*0xff)/0xffff)
a = uint8((aa*0xff)/0xffff)
}
}
for y:=0; y<Height; y++ {
for x:=0; x<Width; x++ {
receiver.set(x,y, r,g,b,a)
}
}
return nil
}
func (receiver Slice) PixOffset(x int, y int) int {
return y*(Width*Depth) + x*Depth
}
func (receiver Slice) set(x, y int, r, g, b, a uint8) {
if nil == receiver {
return
}
p := receiver.at(x,y)
p[rgba32.OffsetRed] = r
p[rgba32.OffsetGreen] = g
p[rgba32.OffsetBlue] = b
p[rgba32.OffsetAlpha] = a
}
// Set helps Slice fit the Go built-in draw.Image interface.
//
// Set will change the color of the pixel / pel, in this frame,
// at (‘x’,‘y’) to ‘color’.
func (receiver Slice) Set(x, y int, c color.Color) {
if nil == receiver {
return
}
if nil == c {
return
}
u32r, u32g, u32b, u32a := c.RGBA()
u8r := uint8((u32r*0xff)/0xffff)
u8g := uint8((u32g*0xff)/0xffff)
u8b := uint8((u32b*0xff)/0xffff)
u8a := uint8((u32a*0xff)/0xffff)
receiver.set(x,y, u8r, u8g, u8b, u8a)
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// ResearchStudy is documented here http://hl7.org/fhir/StructureDefinition/ResearchStudy
type ResearchStudy struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Text *Narrative `bson:"text,omitempty" json:"text,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
Title *string `bson:"title,omitempty" json:"title,omitempty"`
Protocol []Reference `bson:"protocol,omitempty" json:"protocol,omitempty"`
PartOf []Reference `bson:"partOf,omitempty" json:"partOf,omitempty"`
Status ResearchStudyStatus `bson:"status" json:"status"`
PrimaryPurposeType *CodeableConcept `bson:"primaryPurposeType,omitempty" json:"primaryPurposeType,omitempty"`
Phase *CodeableConcept `bson:"phase,omitempty" json:"phase,omitempty"`
Category []CodeableConcept `bson:"category,omitempty" json:"category,omitempty"`
Focus []CodeableConcept `bson:"focus,omitempty" json:"focus,omitempty"`
Condition []CodeableConcept `bson:"condition,omitempty" json:"condition,omitempty"`
Contact []ContactDetail `bson:"contact,omitempty" json:"contact,omitempty"`
RelatedArtifact []RelatedArtifact `bson:"relatedArtifact,omitempty" json:"relatedArtifact,omitempty"`
Keyword []CodeableConcept `bson:"keyword,omitempty" json:"keyword,omitempty"`
Location []CodeableConcept `bson:"location,omitempty" json:"location,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
Enrollment []Reference `bson:"enrollment,omitempty" json:"enrollment,omitempty"`
Period *Period `bson:"period,omitempty" json:"period,omitempty"`
Sponsor *Reference `bson:"sponsor,omitempty" json:"sponsor,omitempty"`
PrincipalInvestigator *Reference `bson:"principalInvestigator,omitempty" json:"principalInvestigator,omitempty"`
Site []Reference `bson:"site,omitempty" json:"site,omitempty"`
ReasonStopped *CodeableConcept `bson:"reasonStopped,omitempty" json:"reasonStopped,omitempty"`
Note []Annotation `bson:"note,omitempty" json:"note,omitempty"`
Arm []ResearchStudyArm `bson:"arm,omitempty" json:"arm,omitempty"`
Objective []ResearchStudyObjective `bson:"objective,omitempty" json:"objective,omitempty"`
}
type ResearchStudyArm struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Name string `bson:"name" json:"name"`
Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"`
Description *string `bson:"description,omitempty" json:"description,omitempty"`
}
type ResearchStudyObjective struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Name *string `bson:"name,omitempty" json:"name,omitempty"`
Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"`
}
type OtherResearchStudy ResearchStudy
// MarshalJSON marshals the given ResearchStudy as JSON into a byte slice
func (r ResearchStudy) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherResearchStudy
ResourceType string `json:"resourceType"`
}{
OtherResearchStudy: OtherResearchStudy(r),
ResourceType: "ResearchStudy",
})
}
// UnmarshalResearchStudy unmarshals a ResearchStudy.
func UnmarshalResearchStudy(b []byte) (ResearchStudy, error) {
var researchStudy ResearchStudy
if err := json.Unmarshal(b, &researchStudy); err != nil {
return researchStudy, err
}
return researchStudy, nil
}
|
package editorapi
import (
"editorApi/controller/servers"
"editorApi/init/mgdb"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/tealeg/xlsx"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
var tblImageSys string = "image_sys"
var tblImageSysTags string = "image_sys_tags"
type imageDownloadParam struct {
TagKey string `json:"tagKey"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片信息下载
// @Security ApiKeyAuth
// @accept mpfd
// @Produce application/json
// @Param tagKey formData string true "图片标签"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/download [get]
func ImageDownload(ctx *gin.Context) {
tagKey := ctx.Query("tagKey")
var images []*struct {
ID primitive.ObjectID `bson:"_id" json:"_id"`
ImageID string `json:"image_id"`
ImageUrl string `bson:"image_url" json:"image_url"`
Desc string `bson:"desc" json:"desc"`
TagKeys []string `bson:"tagKeys" json:"tagKeys"`
}
excelFile := xlsx.NewFile()
sheet, _ := excelFile.AddSheet(tagKey)
header := sheet.AddRow()
descH := header.AddCell()
descH.Value = "图片描述"
urlH := header.AddCell()
urlH.Value = "图片URL地址"
var offset int64 = 0
var pageSize int64 = 500
where := bson.M{
"tagKeys": tagKey,
}
for {
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSys,
where,
map[string]int{
"desc": 1,
},
nil,
offset,
pageSize,
&images,
)
for _, img := range images {
row := sheet.AddRow()
descH := row.AddCell()
descH.Value = img.Desc
urlH := row.AddCell()
urlH.Value = img.ImageUrl
}
offset += pageSize
if offset == 10000 && len(images) < int(pageSize) {
break
}
}
ctx.Header("Content-Type", "application/octet-stream")
ctx.Header("Content-Disposition", "attachment; filename="+time.Now().Format("2006-01-02")+".xlsx")
ctx.Header("Content-Transfer-Encoding", "binary")
excelFile.Write(ctx.Writer)
}
type imageSearchParam struct {
SearchType int `json:"searchType"` // 0模糊搜索,1精确搜索
TagKey string `json:"tagKey"`
Words string `json:"words"`
PageSize int64 `json:"pageSize"`
Page int64 `json:"page"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片搜索
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageSearchParam true "图片搜索参数"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/search [post]
func ImageSearch(ctx *gin.Context) {
var param imageSearchParam
ctx.BindJSON(¶m)
if param.PageSize == 0 {
param.PageSize = 100
}
if param.Page == 0 {
param.Page = 1
}
offset := (param.Page - 1) * param.PageSize
var images []*struct {
ID primitive.ObjectID `bson:"_id" json:"_id"`
ImageID string `json:"image_id"`
ImageUrl string `bson:"image_url" json:"image_url"`
Desc string `bson:"desc" json:"desc"`
TagKeys []string `bson:"tagKeys" json:"tagKeys"`
}
where := bson.M{}
if param.SearchType == 0 {
if param.Words != "" {
where["desc"] = primitive.Regex{
Pattern: param.Words,
Options: "i",
}
}
if param.TagKey != "" {
where["tagKeys"] = param.TagKey
}
} else {
if param.Words != "" {
where["desc"] = param.Words
}
if param.TagKey != "" {
where["tagKeys"] = param.TagKey
}
//where["desc"] = param.Words
}
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSys,
where,
nil,
nil,
offset,
param.PageSize,
&images,
)
for k, _ := range images {
images[k].ImageID = images[k].ID.Hex()
}
servers.ReportFormat(ctx, true, "图片列表", gin.H{
"images": images,
})
}
type imageAddParam struct {
TagKeys []string `json:"tagKeys"`
ImageUrl string `json:"image_url"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片添加
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageAddParam true "图片添加"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/add [post]
func ImageAdd(ctx *gin.Context) {
var param imageAddParam
ctx.BindJSON(¶m)
collection := mgdb.MongoClient.Database(EDITOR_DB).Collection(tblImageSys)
collection.InsertOne(ctx, bson.M{
"desc": strings.Join(param.TagKeys, "/"),
"image_url": param.ImageUrl,
"tagKeys": param.TagKeys,
})
//更新图片tag
mgdb.UpdateMany(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"tagKey": bson.M{"$in": param.TagKeys},
},
bson.M{
"$set": bson.M{
"conNum": bson.M{"$inc": 1},
"del": false,
},
},
true,
)
servers.ReportFormat(ctx, true, "添加成功", gin.H{})
}
type imageDelParam struct {
ImageID string `json:"image_id"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片删除
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageDelParam true "图片删除"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/del [post]
func ImageDel(ctx *gin.Context) {
var param imageDelParam
ctx.BindJSON(¶m)
_id, _ := primitive.ObjectIDFromHex(param.ImageID)
mgdb.MongoClient.Database(EDITOR_DB).Collection(tblImageSys).DeleteOne(ctx, bson.M{
"_id": _id,
})
servers.ReportFormat(ctx, true, "成功", gin.H{})
}
type imageAddMoreParam struct {
TagKeys []string `json:"tagKeys"`
ImageUrls []string `json:"image_urls"`
Names []string `json:"names"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片添加
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageAddMoreParam true "图片添加"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/add/more [post]
func ImageAddMore(ctx *gin.Context) {
var param imageAddMoreParam
ctx.BindJSON(¶m)
collection := mgdb.MongoClient.Database(EDITOR_DB).Collection(tblImageSys)
images := make([]interface{}, len(param.ImageUrls))
for k, ig := range param.ImageUrls {
images[k] = bson.M{
"desc": param.Names[k] + "/" + strings.Join(param.TagKeys, "/"),
"image_url": ig,
"tagKeys": param.TagKeys,
}
}
collection.InsertMany(ctx, images)
//更新图片tag
mgdb.UpdateMany(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"tagKey": bson.M{"$in": param.TagKeys},
},
bson.M{
"$set": bson.M{
"conNum": bson.M{"$inc": 1},
"del": false,
},
},
true,
)
servers.ReportFormat(ctx, true, "添加成功", gin.H{})
}
type imageEditParam struct {
ImageID string `json:"image_id"`
TagKeys []string `json:"tagKeys"`
Desc string `json:"desc"`
ImageUrl string `json:"image_url"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片编辑
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageEditParam true "图片添加"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/edit [post]
func ImageEdit(ctx *gin.Context) {
var param imageEditParam
ctx.BindJSON(¶m)
_id, _ := primitive.ObjectIDFromHex(param.ImageID)
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSys,
bson.M{
"_id": _id,
},
bson.M{
"$set": bson.M{
"desc": param.Desc + "/" + strings.Join(param.TagKeys, "/"),
"image_url": param.ImageUrl,
"tagKeys": param.TagKeys,
},
},
false,
)
//更新图片tag
mgdb.UpdateMany(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"tagKey": bson.M{"$in": param.TagKeys},
},
bson.M{
"$set": bson.M{
"conNum": bson.M{"$inc": 1},
"del": false,
},
},
true,
)
servers.ReportFormat(ctx, true, "添加成功", gin.H{})
}
type imageTagParam struct {
TagKey string `bson:"tagKey"`
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片标签列表
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/tags [post]
func ImageTags(ctx *gin.Context) {
var tags []imageTagParam
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"del": false,
},
nil,
nil,
0,
500,
&tags,
)
servers.ReportFormat(ctx, true, "图片标签列表", gin.H{
"tags": tags,
})
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片标签添加
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageTagParam true "图片标签添加"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/tag/add [post]
func ImageTagAdd(ctx *gin.Context) {
var param imageTagParam
ctx.BindJSON(¶m)
//更新图片tag
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"tagKey": param.TagKey,
},
bson.M{
"$set": bson.M{
"del": false,
"createdOn": time.Now(),
},
},
true,
)
servers.ReportFormat(ctx, true, "添加成功", gin.H{})
}
// @Tags EditorImageAPI (图片系统接口)
// @Summary 图片标签删除
// @Security ApiKeyAuth
// @accept application/json
// @Produce application/json
// @Param data body editorapi.imageTagParam true "图片标签添加"
// @Success 200 {string} string "{"success":true,"data":{},"msg":"成功"}"
// @Router /editor/image/tag/del [post]
func ImageTagDel(ctx *gin.Context) {
var param imageTagParam
ctx.BindJSON(¶m)
//更新图片tag
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
tblImageSysTags,
bson.M{
"tagKey": param.TagKey,
},
bson.M{
"$set": bson.M{
"del": true,
},
},
false,
)
servers.ReportFormat(ctx, true, "删除成功", gin.H{})
}
|
package handle
import (
. "base"
)
type C12002Down struct {
SID string //String,掉线玩家的SID
}
func (s *C12002Down) PackInTo(p *Pack) {
p.WriteUInt16(12002) //写入协议号
p.WriteString(s.SID) //掉线玩家的SID
}
func (s *C12002Down) ToBytes() []byte {
pack := NewPackEmpty()
s.PackInTo(pack)
return pack.Data()
}
|
package props
import "os"
var (
Port, ApiKey, Secret string
)
func Setup() {
ApiKey = os.Getenv("API_KEY_CONVERTER")
Port = os.Getenv("PORT")
Secret = os.Getenv("SECRET")
}
|
/*
* @lc app=leetcode id=542 lang=golang
*
* [542] 01 Matrix
*
* https://leetcode.com/problems/01-matrix/description/
*
* algorithms
* Medium (39.36%)
* Likes: 1486
* Dislikes: 107
* Total Accepted: 87.7K
* Total Submissions: 220.9K
* Testcase Example: '[[0,0,0],[0,1,0],[0,0,0]]'
*
* Given a matrix consists of 0 and 1, find the distance of the nearest 0 for
* each cell.
*
* The distance between two adjacent cells is 1.
*
*
*
* Example 1:
*
*
* Input:
* [[0,0,0],
* [0,1,0],
* [0,0,0]]
*
* Output:
* [[0,0,0],
* [0,1,0],
* [0,0,0]]
*
*
* Example 2:
*
*
* Input:
* [[0,0,0],
* [0,1,0],
* [1,1,1]]
*
* Output:
* [[0,0,0],
* [0,1,0],
* [1,2,1]]
*
*
*
*
* Note:
*
*
* The number of elements of the given matrix will not exceed 10,000.
* There are at least one 0 in the given matrix.
* The cells are adjacent in only four directions: up, down, left and right.
*
*
*/
// @lc code=start
func updateMatrix(matrix [][]int) [][]int {
return updateMatrix1(matrix)
}
// using bfs
func updateMatrix1(matrix [][]int) [][]int {
dist := make([][]int, len(matrix))
if len(matrix) == 0 || len(matrix[0]) == 0 {
return dist
}
queue := make([][]int, 0)
for i := 0; i < len(matrix); i++ {
dist[i] = make([]int, len(matrix[i]))
for j := 0; j < len(matrix[i]); j++ {
if matrix[i][j] == 0 {
dist[i][j] = 0
queue = append(queue, []int{i, j})
} else {
dist[i][j] = -1 // using visited flag
}
}
}
maxRows, maxColums := len(matrix), len(matrix[0])
directions := [][]int{{-1, 0}, {0, 1}, {0, -1}, {1, 0}}
for len(queue) > 0 {
top := queue[0]
queue = queue[1:]
x, y := top[0], top[1]
for _, v := range directions {
nextX, nextY := x+v[0], y+v[1]
if nextX >= 0 && nextX < maxRows && nextY >= 0 && nextY < maxColums && dist[nextX][nextY] == -1 {
dist[nextX][nextY] = dist[x][y] + 1
queue = append(queue, []int{nextX, nextY})
}
}
}
return dist
}
// @lc code=end |
package block
import (
"encoding/json"
"github.com/transmutate-io/cryptocore/types"
)
var (
_ Block = (*BlockDCR)(nil)
_ TransactionsLister = (*BlockDCR)(nil)
_ ConfirmationCounter = (*BlockDCR)(nil)
_ ForwardBlockNavigator = (*BlockDCR)(nil)
_ BackwardBlockNavigator = (*BlockDCR)(nil)
)
type BlockDCR struct{ baseBTCBlock }
func (blk *BlockDCR) Hash() types.Bytes { return blk.baseBTCBlock.Hash }
func (blk *BlockDCR) Confirmations() int { return blk.baseBTCBlock.Confirmations }
func (blk *BlockDCR) Height() int { return blk.baseBTCBlock.Height }
func (blk *BlockDCR) TransactionsHashes() []types.Bytes { return blk.baseBTCBlock.Transactions }
func (blk *BlockDCR) Time() types.UnixTime { return blk.baseBTCBlock.Time }
func (blk *BlockDCR) PreviousBlockHash() types.Bytes { return blk.baseBTCBlock.PreviousBlockHash }
func (blk *BlockDCR) NextBlockHash() types.Bytes { return blk.baseBTCBlock.NextBlockHash }
func (blk *BlockDCR) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &blk.baseBTCBlock) }
|
//
// Sa labs
//
// API Docs for sa labs
// Schemes: http
// Version: 0.0.1
// Contact: Andriy Tymkiv <a.tymkiv99@gmail.com>
// Host: localhost
//
// Consumes:
// - application/json
//
// Produces:
// - application/json
//
//
// swagger:meta
package main
import (
"flag"
gormsqlS "github.com/atymkiv/sa/cmd/api/shop/platform/gormsql"
ss "github.com/atymkiv/sa/cmd/api/shop/service"
st "github.com/atymkiv/sa/cmd/api/shop/transport"
"github.com/atymkiv/sa/pkg/utl/config"
"github.com/atymkiv/sa/pkg/utl/gorm"
"github.com/atymkiv/sa/pkg/utl/messages"
"github.com/atymkiv/sa/pkg/utl/nats"
"github.com/atymkiv/sa/pkg/utl/server"
)
func main() {
cfgPath := flag.String("p", "./config/api.yaml", "Path to config file")
flag.Parse()
cfg, err := config.Load(*cfgPath)
checkErr(err)
checkErr(Start(cfg))
}
func Start(cfg *config.Configuration) error {
db, err := gorm.GetDbInstance(&cfg.DB)
if err != nil {
return err
}
e := server.New()
natsClient, err := nats.New(cfg.Nats)
messageService := messages.Create(natsClient)
shopDB := gormsqlS.NewShop(db, )
shopGr := e.Group("/shop")
st.NewHTTP(ss.New(shopDB, messageService), shopGr)
server.Start(e, &server.Config{
Port: cfg.Server.Port,
ReadTimeoutSeconds: cfg.Server.ReadTimeout,
WriteTimeoutSeconds: cfg.Server.WriteTimeout,
Debug: cfg.Server.Debug,
})
return nil
}
func checkErr(err error) {
if err != nil {
panic(err.Error())
}
}
|
package common
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"github.com/bitly/go-simplejson"
"strconv"
"strings"
"time"
)
func StrFirstToUpper(str string) string {
if len(str) < 1 {
return ""
}
strArry := []rune(str)
if strArry[0] >= 97 && strArry[0] <= 122 {
strArry[0] -= 32
}
return string(strArry)
}
func JsonEncode(data interface{})string {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetEscapeHTML(false)
encoder.Encode(data)
return string(buffer.Bytes())
}
func JsonDecode(json []byte)*simplejson.Json {
js,_ := simplejson.NewJson(json)
return js
}
func TimeStamp(timeString string)int64 {
timeLayout := "2006-01-02 15:04:05" //转化所需模板
loc, _ := time.LoadLocation("Local") //获取时区
tmp, _ := time.ParseInLocation(timeLayout, timeString, loc)
timestamp := tmp.Unix() //转化为时间戳 类型是int64
return timestamp
}
func JsonAtoi(str interface{}) int {
str = str.(json.Number).String()
str,_ = strconv.Atoi(str.(string))
return str.(int)
}
func LSJPrice(str string) float64 {
if str == "暂无" || str == "-" {
return 0.00
}
strIndex:= strings.Index(str,"万")
num,_ := strconv.ParseFloat(str[:strIndex],10)
return num
}
func RsaEncrypt(origData,publicKey []byte) ([]byte, error) {
//解密pem格式的公钥
block, _ := pem.Decode(publicKey)
if block == nil {
return nil, fmt.Errorf("加密失败")
}
// 解析公钥
pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, err
}
// 类型断言
pub := pubInterface.(*rsa.PublicKey)
//加密
return rsa.EncryptPKCS1v15(rand.Reader, pub, origData)
} |
package rds
import (
"testing"
"github.com/pmacik/k8s-rds/pkg/crd"
"github.com/stretchr/testify/assert"
)
func TestConvertSpecToInput(t *testing.T) {
db := &crd.Database{
Spec: crd.DatabaseSpec{
DBName: "mydb",
Engine: "postgres",
Username: "myuser",
Class: "db.t2.micro",
Size: 100,
MultiAZ: true,
PubliclyAccessible: true,
StorageEncrypted: true,
StorageType: "bad",
Iops: 1000,
Password: crd.PasswordSecret{Name: "password", Key: "mypassword"},
},
}
i := convertSpecToInput(db, "mysubnet", []string{"sg-1234", "sg-4321"}, "mypassword")
assert.Equal(t, "mydb", *i.DBName)
assert.Equal(t, "postgres", *i.Engine)
assert.Equal(t, "mypassword", *i.MasterUserPassword)
assert.Equal(t, "myuser", *i.MasterUsername)
assert.Equal(t, "db.t2.micro", *i.DBInstanceClass)
assert.Equal(t, int64(100), *i.AllocatedStorage)
assert.Equal(t, true, *i.PubliclyAccessible)
assert.Equal(t, true, *i.MultiAZ)
assert.Equal(t, true, *i.StorageEncrypted)
assert.Equal(t, 2, len(i.VpcSecurityGroupIds))
assert.Equal(t, "bad", *i.StorageType)
assert.Equal(t, int64(1000), *i.Iops)
}
func TestGetIDFromProvider(t *testing.T) {
x := getIDFromProvider("aws:///eu-west-1a/i-02ab67f4da79c3caa")
assert.Equal(t, "i-02ab67f4da79c3caa", x)
}
|
package main
import (
"time"
)
type Trend struct {
Term string `json:"term"`
SourceURI string `json:"source_uri"`
Mined time.Time `json:"mined"`
Posted time.Time `json:"posted"`
WordCounts []WordCount `json:"word_counts"`
}
type Trends []Trend |
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"github.com/dherbst/sentry-web-api"
)
var funcMap map[string]func(context.Context)
func init() {
funcMap = map[string]func(context.Context){
"help": Usage,
"version": Version,
"orgs": Organizations,
"projects": Projects,
"events": Events,
}
}
// Version prints the version from the sentry.GitHash out and exits.
func Version(ctx context.Context) {
fmt.Printf("Version: %v %v\n", sentry.Version, sentry.GitHash)
}
// Usage prints how to invoke `sentry` from the command line.
func Usage(ctx context.Context) {
fmt.Printf(`
Usage:
sentry version ; prints the commit version
sentry orgs ; prints the Organizations you can access
sentry projects ; prints the Projects you can access
sentry events org_slug project_slug ; prints the events in the project
`)
}
// Organizations prints the orgs you have access to.
func Organizations(ctx context.Context) {
client := sentry.NewClient("", 0, "")
orgs, err := client.OrganizationList(false, "")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
result, err := json.MarshalIndent(orgs, "", " ")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(string(result))
}
// Projects prints the projects you have access to.
func Projects(ctx context.Context) {
client := sentry.NewClient("", 0, "")
projects, err := client.ProjectList("")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
result, err := json.MarshalIndent(projects, "", " ")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(string(result))
}
// Events prints the events in the project. Requires org_slug and project_slug
func Events(ctx context.Context) {
orgSlug := flag.Arg(1)
projectSlug := flag.Arg(2)
client := sentry.NewClient("", 0, "")
events, err := client.EventList(orgSlug, projectSlug, false, "")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
result, err := json.MarshalIndent(events, "", " ")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(string(result))
}
func main() {
flag.Parse()
ctx := context.Background()
command := flag.Arg(0)
if command == "" || command == "help" {
Usage(ctx)
return
}
f := funcMap[command]
if f == nil {
fmt.Println("Unknown command")
Usage(ctx)
return
}
f(ctx)
}
|
package logger
import (
"context"
"io"
"log"
)
type Logger struct {
Trace *log.Logger
Info *log.Logger
Warning *log.Logger
Error *log.Logger
}
func New(_ context.Context, traceHandle, infoHandle, warningHandle, errorHandle io.Writer) *Logger {
var Logger Logger
Logger.Trace = log.New(traceHandle, "TRACE: ", log.Ldate|log.Ltime|log.Lshortfile)
Logger.Info = log.New(infoHandle, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile)
Logger.Warning = log.New(warningHandle, "WARNING: ", log.Ldate|log.Ltime|log.Lshortfile)
Logger.Error = log.New(errorHandle, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile)
return &Logger
}
|
package main
import (
"context"
"github.com/Fish-pro/grpc-server/helper"
"github.com/Fish-pro/grpc-server/services"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/grpc"
"log"
"net/http"
"os"
)
func main() {
gwmux := runtime.NewServeMux()
gRpcEndPoint := "localhost:8081"
opt := []grpc.DialOption{grpc.WithTransportCredentials(helper.GetClientCred())}
err := services.RegisterProdServiceHandlerFromEndpoint(
context.Background(),
gwmux,
gRpcEndPoint,
opt,
)
if err != nil {
log.Println(">>>", err.Error())
os.Exit(1)
}
err = services.RegisterOrderServiceHandlerFromEndpoint(
context.Background(),
gwmux,
gRpcEndPoint,
opt,
)
if err != nil {
log.Println(">>>", err.Error())
os.Exit(1)
}
httpServer := &http.Server{
Addr: ":8080",
Handler: gwmux,
}
if err := httpServer.ListenAndServe(); err != nil {
log.Println(">>>", err.Error())
os.Exit(1)
}
}
|
// GENERATED FILE -- DO NOT EDIT
//
package metadata
var (
// Default is the name of snapshot default
Default = "default"
// LocalAnalysis is the name of snapshot localAnalysis
LocalAnalysis = "localAnalysis"
// SyntheticServiceEntry is the name of snapshot syntheticServiceEntry
SyntheticServiceEntry = "syntheticServiceEntry"
)
// SnapshotNames returns the snapshot names declared in this package.
func SnapshotNames() []string {
return []string{
Default,
LocalAnalysis,
SyntheticServiceEntry,
}
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The head object has all methods to work with the header of a packet.
package head
import (
"encoding/binary"
"fmt"
"github.com/dirkjabl/bricker/net/errors"
"io"
)
// Header of the IP packet for the connection.
type Head struct {
Uid uint32
Length uint8
FunctionID uint8
SequenceAndOptions uint8
ErrorCodeAndFutureUse uint8
}
func New(uid uint32, length, functionID, seqAndOpt, errCode uint8) *Head {
return &Head{Uid: uid, Length: length, FunctionID: functionID, SequenceAndOptions: seqAndOpt, ErrorCodeAndFutureUse: errCode}
}
// Copy makes a real copy of a header.
func (h *Head) Copy() *Head {
if h == nil { // nothing to copy
return nil
}
return &Head{Uid: h.Uid, Length: h.Length, FunctionID: h.FunctionID,
SequenceAndOptions: h.SequenceAndOptions, ErrorCodeAndFutureUse: h.ErrorCodeAndFutureUse}
}
// Sequence read out the actual sequence of the header.
func (h *Head) Sequence() uint8 {
return (h.SequenceAndOptions >> 4) & 0x0f
}
// SetSequence sets a new sequence to the header.
// The sequence has to be between 1 and 15.
func (h *Head) SetSequence(seq uint8) {
if seq < 1 || seq > 15 {
panic(fmt.Sprintf("Sequence (%d) is out of range (1-15)", seq))
}
h.SequenceAndOptions |= (seq << 4) & 0xf0
}
// OptionResponseExpected read out, if this header is configured to expect a response after sending.
func (h *Head) OptionResponseExpected() bool {
return (((h.SequenceAndOptions >> 3) & 1) == 1)
}
// SetOptionResponseExpected writes into the head, if or if not a response is expected.
func (h *Head) SetOptionResponseExpected(exp bool) {
var v uint8
if exp {
v = 1
} else {
v = 0
}
h.SequenceAndOptions |= (v << 3) & 8
}
// OptionOther read out the other options from the header.
func (h *Head) OptionOther() uint8 {
return h.SequenceAndOptions & 192
}
// ErrorCodeNbr read the error code number from the header.
func (h *Head) ErrorCodeNbr() uint8 {
return (h.ErrorCodeAndFutureUse >> 6) & 3
}
// ErrorCode gets the actual error code number in a go error.
func (h *Head) ErrorCode() *errors.Error {
return errors.New(h.ErrorCodeNbr())
}
// FutureUse reads the flags which reserved for future use.
func (h *Head) FutureUse() uint8 {
return h.ErrorCodeAndFutureUse & 31
}
// Write writes the binary representation of the header in a given writer.
func (h *Head) Write(w io.Writer) error {
return binary.Write(w, binary.LittleEndian, h)
}
// Read reads the binary representation of the header in the acutal header from the given reader.
func (h *Head) Read(r io.Reader) error {
return binary.Read(r, binary.LittleEndian, h)
}
/*
String gives a representation of the single entries.
*/
func (h *Head) String() string {
txt := ""
txt += fmt.Sprintf("Uid: %d, ", h.Uid)
txt += fmt.Sprintf("Length: %d, ", h.Length)
txt += fmt.Sprintf("Function-ID: %d, ", h.FunctionID)
txt += fmt.Sprintf("Sequence: %d, ", h.Sequence())
txt += fmt.Sprintf("Response Expected: %v, ", h.OptionResponseExpected())
txt += fmt.Sprintf("Other Options: 0x%02x, ", h.OptionOther())
txt += fmt.Sprintf("Error Code: %v, ", h.ErrorCode())
txt += fmt.Sprintf("Future Use: 0x%02x", h.FutureUse())
return fmt.Sprintf("Header: [%s]", txt)
}
|
/*-------------------------------------------------------------------------
*
* io_test.go
* test cgroup io metrics
*
*
* Copyright (c) 2021, Alibaba Group Holding Limited
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* IDENTIFICATION
* common/cgroup/io_test.go
*-------------------------------------------------------------------------
*/
package cgroup
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"testing"
)
func setup(t *testing.T) {
ioServiced := `252:64 Read 10698689
252:64 Write 3868807128
252:64 Sync 0
252:64 Async 3879505817
252:64 Total 3879505817
253:1 Read 21366158
253:1 Write 7304062679
253:1 Sync 0
253:1 Async 7325428837
253:1 Total 7325428837
252:0 Read 10705125
252:0 Write 4211279610
252:0 Sync 0
252:0 Async 4221984735
252:0 Total 4221984735
253:0 Read 37656
253:0 Write 776025055
253:0 Sync 0
253:0 Async 776062711
253:0 Total 776062711
Total 16202982100`
mountinfo := `26 21 253:0 / /u01 rw,noatime,nodiratime - ext4 /dev/mapper/vgdata-volume1 rw,barrier=0,nodelalloc,stripe=64,data=ordered
27 21 253:1 / /u02 rw,noatime,nodiratime - ext4 /dev/mapper/vgdata-volume2 rw,barrier=0,nodelalloc,stripe=64,data=ordered`
f, err := os.Create("blkio.test")
if err != nil {
t.Fatal("file create fail")
return
}
_, err = io.WriteString(f, ioServiced)
if err != nil {
t.Fatal("write error", err)
return
}
ioutil.WriteFile("mountinfo.test", []byte(mountinfo), 777)
}
func teardown() {
os.Remove("blkio.test")
os.Remove("mountinfo.test")
}
func TestGetIoFound(t *testing.T) {
//t.Skip("")
//setup(t)
//defer teardown()
var buf bytes.Buffer
cio := NewIo(&buf)
if err := cio.initIo("/cgroup/rds/rule3003/blkio.throttle.io_serviced"); err != nil {
t.Fatal(err.Error())
}
path := "blkio.test"
path = "/cgroup/rds/rule3003/blkio.throttle.io_serviced"
dataIo, err := cio.getBlkio(path)
fmt.Println(err)
fmt.Printf("%+v\n", dataIo)
//assert.Nil(t, err)
//assert.Equal(t, uint64(776062711), dataIo)
//assert.Equal(t, uint64(7325428837), logIo)
}
func TestGetIO(t *testing.T) {
//t.Skip()
//t.Skip("")
var buf bytes.Buffer
cio := NewIo(&buf)
err := cio.InitPath("/sys/fs/cgroup/blkio/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6c5c7c9_4e76_11ea_9283_00163e0a97ec.slice/docker-e282b4ececbfd0492d2e1d45317a2f88251168046066b6f8ddba08cdb2f5e618.scope", true)
if err != nil {
fmt.Println(err)
}
//fmt.Println("dev",cio.dataDevId,cio.dataDev,cio.logDevId,cio.logDev)
//data,log,err:=cio.GetIo()
//fmt.Println(data,log,err)
}
func TestGetIO1(t *testing.T) {
t.Skip()
var buf bytes.Buffer
cio := NewIo(&buf)
err := cio.InitPathWithMp("/sys/fs/cgroup/blkio/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40b29278_8b30_4990_9409_978ddcc52362.slice/docker-da82ea78f35b6a92379f96ae9a6d1adb38b488821fac19aa6c61c28879d90982.scope", true, "/var/lib/kubelet/pods/40b29278-8b30-4990-9409-978ddcc52362/volumes/kubernetes.io~csi/xdbcloudautotestvxbg52a167-e1f1e8ab-log-7e7ce883/mount", "/var/lib/kubelet/pods/40b29278-8b30-4990-9409-978ddcc52362/volumes/kubernetes.io~csi/xdbcloudautotestvxbg52a167-e1f1e8ab-log-7e7ce883/mount")
fmt.Println(err)
fmt.Println(cio.logDev, cio.dataDev, cio.logDevId, cio.dataDevId)
}
|
package main
import (
"fmt"
"log"
"os"
"time"
"io/ioutil"
"math/rand"
"github.com/grossamos/jam0001/evaluator"
"github.com/grossamos/jam0001/lexer"
"github.com/grossamos/jam0001/parser"
"github.com/grossamos/jam0001/shared"
)
func help() {
fmt.Println(
`smile - an esoteric programming language made for langjam
Usage: smile [ file ] [ flags ]
--help, -h: show this help and exit
--debug, -d: show info for debugging the interpretor (ast, tokens)`)
os.Exit(0)
}
func main() {
rand.Seed(time.Now().Unix())
debugMode := false
file := ""
// do initial checks
if len(os.Args) == 1 {
help()
}
for i := 1; i < len(os.Args); i++ {
switch os.Args[i] {
case "--version":
fmt.Println("〈( ^.^)ノ\tv0.1")
return
case "--debug", "-d":
debugMode = true
case "--help", "-h":
help()
default:
if file == "" {
file = os.Args[i]
} else {
fmt.Println("Incorrect flags.")
return
}
}
}
// load test code from disk
dat, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
code := string(dat)
if debugMode {
fmt.Println(code)
}
// run lexer
toks := lexer.RunLexer(code)
if debugMode {
fmt.Println(toks)
}
// run parser
nodes, comments, err := parser.GenerateAst(toks)
if debugMode {
shared.Node{IsExpression: true, Children: nodes}.Print("")
}
if err != nil {
log.Fatal(err)
}
// run evaluator
err = evaluator.RunEvaluator(nodes, comments)
if err != nil {
log.Fatal(err)
}
}
|
package main
import "fmt"
/*
考点:defer 闭包 指针变量
*/
type Person struct {
age int
}
func main() {
//p是一个指针变量
p := &Person{25}
//此处的defer中p将当前age 25作为参数,缓存到栈中
defer fmt.Println(p.age)
//此处将p的引用地址,age最终被重新赋值
defer func(p *Person) {
fmt.Println(p.age)
}(p)
//闭包 最终引用的是外部变量
defer func() {
fmt.Println(p.age)
}()
p.age = 27
fmt.Println(p)
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package fmtsafe
import (
"strings"
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
)
// requireConstMsg records functions for which the last string
// argument must be a constant string.
var requireConstMsg = map[string]bool{
"errors.New": true,
"github.com/pkg/errors.New": true,
"github.com/pkg/errors.Wrap": true,
"github.com/cockroachdb/errors.New": true,
"github.com/cockroachdb/errors.Error": true,
"github.com/cockroachdb/errors.NewWithDepth": true,
"github.com/cockroachdb/errors.WithMessage": true,
"github.com/cockroachdb/errors.Wrap": true,
"github.com/cockroachdb/errors.WrapWithDepth": true,
"github.com/cockroachdb/errors.AssertionFailed": true,
"github.com/cockroachdb/errors.HandledWithMessage": true,
"github.com/cockroachdb/errors.HandledInDomainWithMessage": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.New": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.New": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.NewWithIssue": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.NewWithIssueDetail": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire.newAdminShutdownErr": true,
"(*github.com/cockroachdb/cockroach/pkg/parser/lexer).Error": true,
"github.com/cockroachdb/cockroach/pkg/util/log.Shout": true,
"github.com/cockroachdb/cockroach/pkg/util/log.Event": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VEvent": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VErrEvent": true,
"(*github.com/cockroachdb/cockroach/pkg/sql.optPlanningCtx).log": true,
}
// requireConstFmt records functions for which the string arg
// before the final ellipsis must be a constant string.
var requireConstFmt = map[string]bool{
// Logging things.
"log.Printf": true,
"log.Fatalf": true,
"log.Panicf": true,
"(*log.Logger).Fatalf": true,
"(*log.Logger).Panicf": true,
"(*log.Logger).Printf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.Shoutf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.Eventf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.vEventf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VEventf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VErrEventf": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VEventfDepth": true,
"github.com/cockroachdb/cockroach/pkg/util/log.VErrEventfDepth": true,
// Note: More of the logging functions are populated here via the
// init() function below.
"github.com/cockroachdb/cockroach/pkg/util/log.MakeLegacyEntry": true,
"github.com/cockroachdb/cockroach/pkg/util/log.makeUnstructuredEntry": true,
"github.com/cockroachdb/cockroach/pkg/util/log.FormatWithContextTags": true,
"github.com/cockroachdb/cockroach/pkg/util/log.renderArgsAsRedactable": true,
"github.com/cockroachdb/cockroach/pkg/util/log.formatArgs": true,
"github.com/cockroachdb/cockroach/pkg/util/log.logfDepth": true,
"github.com/cockroachdb/cockroach/pkg/util/log.shoutfDepth": true,
"github.com/cockroachdb/cockroach/pkg/util/log.makeStartLine": true,
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash.ReportOrPanic": true,
"(*github.com/cockroachdb/cockroach/pkg/util/tracing.Span).Recordf": true,
"(github.com/cockroachdb/cockroach/pkg/rpc.breakerLogger).Debugf": true,
"(github.com/cockroachdb/cockroach/pkg/rpc.breakerLogger).Infof": true,
"(*github.com/cockroachdb/cockroach/pkg/internal/rsg/yacc.Tree).errorf": true,
"(github.com/cockroachdb/cockroach/pkg/storage.pebbleLogger).Infof": true,
"(github.com/cockroachdb/cockroach/pkg/storage.pebbleLogger).Fatalf": true,
"(*github.com/cockroachdb/cockroach/pkg/util/grpcutil.grpcLogger).Infof": true,
"(*github.com/cockroachdb/cockroach/pkg/util/grpcutil.grpcLogger).Warningf": true,
"(*github.com/cockroachdb/cockroach/pkg/util/grpcutil.grpcLogger).Errorf": true,
"(*github.com/cockroachdb/cockroach/pkg/util/grpcutil.grpcLogger).Fatalf": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Debugf": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Infof": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Warningf": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Errorf": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Fatalf": true,
"(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Panicf": true,
"github.com/cockroachdb/cockroach/pkg/kv/kvserver.makeNonDeterministicFailure": true,
"github.com/cockroachdb/cockroach/pkg/kv/kvserver.wrapWithNonDeterministicFailure": true,
"(go.etcd.io/etcd/raft/v3.Logger).Debugf": true,
"(go.etcd.io/etcd/raft/v3.Logger).Infof": true,
"(go.etcd.io/etcd/raft/v3.Logger).Warningf": true,
"(go.etcd.io/etcd/raft/v3.Logger).Errorf": true,
"(go.etcd.io/etcd/raft/v3.Logger).Fatalf": true,
"(go.etcd.io/etcd/raft/v3.Logger).Panicf": true,
"(google.golang.org/grpc/grpclog.Logger).Infof": true,
"(google.golang.org/grpc/grpclog.Logger).Warningf": true,
"(google.golang.org/grpc/grpclog.Logger).Errorf": true,
"(github.com/cockroachdb/pebble.Logger).Infof": true,
"(github.com/cockroachdb/pebble.Logger).Fatalf": true,
"(github.com/cockroachdb/circuitbreaker.Logger).Infof": true,
"(github.com/cockroachdb/circuitbreaker.Logger).Debugf": true,
"github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/exprgen.errorf": true,
"github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/exprgen.wrapf": true,
"(*github.com/cockroachdb/cockroach/pkg/sql.connExecutor).sessionEventf": true,
"(*github.com/cockroachdb/cockroach/pkg/sql/logictest.logicTest).outf": true,
"(*github.com/cockroachdb/cockroach/pkg/sql/logictest.logicTest).Errorf": true,
"(*github.com/cockroachdb/cockroach/pkg/sql/logictest.logicTest).Fatalf": true,
"(*github.com/cockroachdb/cockroach/pkg/server.adminServer).serverErrorf": true,
"github.com/cockroachdb/cockroach/pkg/server.guaranteedExitFatal": true,
"(*github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl.kafkaLogAdapter).Printf": true,
// Error things.
"fmt.Errorf": true,
"github.com/pkg/errors.Errorf": true,
"github.com/pkg/errors.Wrapf": true,
"github.com/cockroachdb/errors.Newf": true,
"github.com/cockroachdb/errors.Errorf": true,
"github.com/cockroachdb/errors.NewWithDepthf": true,
"github.com/cockroachdb/errors.WithMessagef": true,
"github.com/cockroachdb/errors.Wrapf": true,
"github.com/cockroachdb/errors.WrapWithDepthf": true,
"github.com/cockroachdb/errors.AssertionFailedf": true,
"github.com/cockroachdb/errors.AssertionFailedWithDepthf": true,
"github.com/cockroachdb/errors.NewAssertionErrorWithWrappedErrf": true,
"github.com/cockroachdb/errors.WithSafeDetails": true,
"github.com/cockroachdb/redact.Sprintf": true,
"github.com/cockroachdb/redact.Fprintf": true,
"(github.com/cockroachdb/redact.SafePrinter).Printf": true,
"(github.com/cockroachdb/redact.SafeWriter).Printf": true,
"(*github.com/cockroachdb/redact.printer).Printf": true,
"github.com/cockroachdb/cockroach/pkg/roachpb.NewErrorf": true,
"github.com/cockroachdb/cockroach/pkg/ccl/importccl.makeRowErr": true,
"github.com/cockroachdb/cockroach/pkg/ccl/importccl.wrapRowErr": true,
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors.NewSyntaxErrorf": true,
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors.NewDependentObjectErrorf": true,
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree.newSourceNotFoundError": true,
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree.decorateTypeCheckError": true,
"github.com/cockroachdb/cockroach/pkg/sql/opt/optbuilder.unimplementedWithIssueDetailf": true,
"(*github.com/cockroachdb/cockroach/pkg/sql/pgwire.authPipe).Logf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.Newf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.NewWithDepthf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.DangerousStatementf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.Wrapf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.WrapWithDepthf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice.Newf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice.NewWithSeverityf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase.NewProtocolViolationErrorf": true,
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase.NewInvalidBinaryRepresentationErrorf": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil.UnexpectedWithIssueErrorf": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.Newf": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.NewWithDepthf": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.NewWithIssuef": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.NewWithIssueDetailf": true,
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented.unimplementedInternal": true,
"github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate.inputErrorf": true,
"github.com/cockroachdb/cockroach/pkg/ccl/sqlproxyccl.NewErrorf": true,
}
func init() {
for _, sev := range logpb.Severity_name {
capsev := strings.Title(strings.ToLower(sev))
// log.Infof, log.Warningf etc.
requireConstFmt["github.com/cockroachdb/cockroach/pkg/util/log."+capsev+"f"] = true
// log.VInfof, log.VWarningf etc.
requireConstFmt["github.com/cockroachdb/cockroach/pkg/util/log.V"+capsev+"f"] = true
// log.InfofDepth, log.WarningfDepth, etc.
requireConstFmt["github.com/cockroachdb/cockroach/pkg/util/log."+capsev+"fDepth"] = true
// log.Info, log.Warning, etc.
requireConstMsg["github.com/cockroachdb/cockroach/pkg/util/log."+capsev] = true
for _, ch := range logpb.Channel_name {
capch := strings.ReplaceAll(strings.Title(strings.ReplaceAll(strings.ToLower(ch), "_", " ")), " ", "")
// log.Ops.Infof, log.Ops.Warningf, etc.
requireConstFmt["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+")."+capsev+"f"] = true
// log.Ops.VInfof, log.Ops.VWarningf, etc.
requireConstFmt["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+").V"+capsev+"f"] = true
// log.Ops.InfofDepth, log.Ops.WarningfDepth, etc.
requireConstFmt["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+")."+capsev+"fDepth"] = true
// log.Ops.Info, logs.Ops.Warning, etc.
requireConstMsg["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+")."+capsev] = true
}
}
for _, ch := range logpb.Channel_name {
capch := strings.ReplaceAll(strings.Title(strings.ReplaceAll(strings.ToLower(ch), "_", " ")), " ", "")
// log.Ops.Shoutf, log.Dev.Shoutf, etc.
requireConstFmt["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+").Shoutf"] = true
// log.Ops.Shout, log.Dev.Shout, etc.
requireConstMsg["(github.com/cockroachdb/cockroach/pkg/util/log.logger"+capch+").Shout"] = true
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"fmt"
"path/filepath"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/remote/dutfs"
"chromiumos/tast/remote/firmware/fingerprint"
"chromiumos/tast/remote/firmware/fingerprint/rpcdut"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: FpROCanUpdateRW,
Desc: "Verify that RO can update RW",
Contacts: []string{
"josienordrum@google.com", // Test author
"tomhughes@chromium.org",
"chromeos-fingerprint@google.com",
},
Attr: []string{"group:mainline", "informational"},
Timeout: 8 * time.Minute,
SoftwareDeps: []string{"biometrics_daemon"},
HardwareDeps: hwdep.D(hwdep.Fingerprint()),
ServiceDeps: []string{"tast.cros.platform.UpstartService", dutfs.ServiceName},
Vars: []string{"servo"},
Data: []string{
fingerprint.Futility,
fingerprint.BloonchipperDevKey,
fingerprint.DartmonkeyDevKey,
fingerprint.NamiFPDevKey,
fingerprint.NocturneFPDevKey,
},
})
}
type testRWFlashParams struct {
firmwarePath string
expectedROVersion string
expectedRWVersion string
expectedRunningFirmwareCopy fingerprint.FWImageType
}
func testFlashingRWFirmware(ctx context.Context, d *rpcdut.RPCDUT, params *testRWFlashParams) error {
testing.ContextLog(ctx, "Flashing RW firmware: ", params.firmwarePath)
if err := fingerprint.FlashRWFirmware(ctx, d, params.firmwarePath); err != nil {
return errors.Wrapf(err, "failed to flash firmware: %q", params.firmwarePath)
}
testing.ContextLog(ctx, "Checking for versions: RO: ", params.expectedROVersion, ", RW: ", params.expectedRWVersion)
if err := fingerprint.CheckRunningFirmwareVersionMatches(ctx, d, params.expectedROVersion, params.expectedRWVersion); err != nil {
return errors.Wrap(err, "unexpected firmware version")
}
testing.ContextLog(ctx, "Checking that ", params.expectedRunningFirmwareCopy, " firmware is running")
if err := fingerprint.CheckRunningFirmwareCopy(ctx, d.DUT(), params.expectedRunningFirmwareCopy); err != nil {
return errors.Wrap(err, "running unexpected firmware copy")
}
testing.ContextLog(ctx, "Checking that rollback meets expected values")
if err := fingerprint.CheckRollbackSetToInitialValue(ctx, d); err != nil {
return errors.Wrap(err, "rollback not set to initial value")
}
return nil
}
// FpROCanUpdateRW flashes RW firmware with a version string that ends in '.rb0'
// (has rollback ID '0') and validates that it is running. Then flashes RW
// firmware with version string that ends in '.dev' (also has rollback ID '0')
// and validates that it is running.
func FpROCanUpdateRW(ctx context.Context, s *testing.State) {
d, err := rpcdut.NewRPCDUT(ctx, s.DUT(), s.RPCHint(), "cros")
if err != nil {
s.Fatal("Failed to connect RPCDUT: ", err)
}
defer d.Close(ctx)
servoSpec, ok := s.Var("servo")
if !ok {
servoSpec = ""
}
fpBoard, err := fingerprint.Board(ctx, d)
if err != nil {
s.Fatal("Failed to get fingerprint board: ", err)
}
buildFwFile, err := fingerprint.FirmwarePath(ctx, d, fpBoard)
if err != nil {
s.Fatal("Failed to get build firmware file path: ", err)
}
folderpath := filepath.Join("/", "mnt", "stateful_partition", fmt.Sprintf("fpimages_%d", time.Now().Unix()))
err = dutfs.NewClient(d.RPC().Conn).MkDir(ctx, folderpath, 0755)
if err != nil {
s.Fatal("Failed to create remote working directory: ", err)
}
testing.ContextLog(ctx, "Created non-temporary fptast directory")
// Generate test images to flash to RW.
testImages, err := fingerprint.GenerateTestFirmwareImages(ctx, d, s.DataPath(fingerprint.Futility), s.DataPath(fingerprint.DevKeyForFPBoard(fpBoard)), fpBoard, buildFwFile, folderpath)
if err != nil {
s.Fatal("Failed to generate test images: ", err)
}
firmwareFile := fingerprint.NewFirmwareFile(testImages[fingerprint.TestImageTypeDev].Path, fingerprint.KeyTypeDev, testImages[fingerprint.TestImageTypeDev].ROVersion, testImages[fingerprint.TestImageTypeDev].RWVersion)
// Set both HW write protect and SW write protect true.
t, err := fingerprint.NewFirmwareTest(ctx, d, servoSpec, s.OutDir(), firmwareFile, true /*HW protect*/, true /*SW protect*/)
if err != nil {
s.Fatal("Failed to create new firmware test: ", err)
}
cleanupCtx := ctx
defer func() {
s.Log("Delete fptast directory and contained files from DUT")
dutfs.NewClient(d.RPC().Conn).RemoveAll(ctx, folderpath)
if err != nil {
s.Fatal("Failed to delete dir: ", folderpath, err)
}
if err := t.Close(cleanupCtx); err != nil {
s.Fatal("Failed to clean up: ", err)
}
}()
ctx, cancel := ctxutil.Shorten(ctx, t.CleanupTime())
defer cancel()
testing.ContextLog(ctx, "Flashing RW firmware with rollback ID of '0'")
if err := testFlashingRWFirmware(ctx, d,
&testRWFlashParams{
firmwarePath: testImages[fingerprint.TestImageTypeDevRollbackZero].Path,
// RO version should remain unchanged.
expectedROVersion: testImages[fingerprint.TestImageTypeDev].ROVersion,
// RW version should match what we requested to be flashed.
expectedRWVersion: testImages[fingerprint.TestImageTypeDevRollbackZero].RWVersion,
// Signature check will pass, so we should be running RW.
expectedRunningFirmwareCopy: fingerprint.ImageTypeRW,
}); err != nil {
s.Fatal("Rollback ID 0 test failed: ", err)
}
testing.ContextLog(ctx, "Flashing RW with dev firmware")
if err := testFlashingRWFirmware(ctx, d,
&testRWFlashParams{
firmwarePath: testImages[fingerprint.TestImageTypeDev].Path,
// RO version should remain unchanged.
expectedROVersion: testImages[fingerprint.TestImageTypeDev].ROVersion,
// RW version should match what we requested to be flashed.
expectedRWVersion: testImages[fingerprint.TestImageTypeDev].RWVersion,
// Signature check will pass, so we should be running RW.
expectedRunningFirmwareCopy: fingerprint.ImageTypeRW,
}); err != nil {
s.Fatal("Dev firmware test failed: ", err)
}
}
|
package mailgun
import (
"net/http"
"github.com/gorilla/mux"
)
func (ms *mockServer) addIPRoutes(r *mux.Router) {
r.HandleFunc("/ips", ms.listIPS).Methods(http.MethodGet)
r.HandleFunc("/ips/{ip}", ms.getIPAddress).Methods(http.MethodGet)
func(r *mux.Router) {
r.HandleFunc("", ms.listDomainIPS).Methods(http.MethodGet)
r.HandleFunc("/{ip}", ms.getIPAddress).Methods(http.MethodGet)
r.HandleFunc("", ms.postDomainIPS).Methods(http.MethodPost)
r.HandleFunc("/{ip}", ms.deleteDomainIPS).Methods(http.MethodDelete)
}(r.PathPrefix("/domains/{domain}/ips").Subrouter())
}
func (ms *mockServer) listIPS(w http.ResponseWriter, _ *http.Request) {
toJSON(w, ipAddressListResponse{
TotalCount: 2,
Items: []string{"172.0.0.1", "192.168.1.1"},
})
}
func (ms *mockServer) getIPAddress(w http.ResponseWriter, r *http.Request) {
toJSON(w, IPAddress{
IP: mux.Vars(r)["ip"],
RDNS: "luna.mailgun.net",
Dedicated: true,
})
}
func (ms *mockServer) listDomainIPS(w http.ResponseWriter, _ *http.Request) {
defer ms.mutex.Unlock()
ms.mutex.Lock()
toJSON(w, ipAddressListResponse{
TotalCount: 2,
Items: ms.domainIPS,
})
}
func (ms *mockServer) postDomainIPS(w http.ResponseWriter, r *http.Request) {
defer ms.mutex.Unlock()
ms.mutex.Lock()
ms.domainIPS = append(ms.domainIPS, r.FormValue("ip"))
toJSON(w, okResp{Message: "success"})
}
func (ms *mockServer) deleteDomainIPS(w http.ResponseWriter, r *http.Request) {
defer ms.mutex.Unlock()
ms.mutex.Lock()
result := ms.domainIPS[:0]
for _, ip := range ms.domainIPS {
if ip == mux.Vars(r)["ip"] {
continue
}
result = append(result, ip)
}
if len(result) != len(ms.domainIPS) {
toJSON(w, okResp{Message: "success"})
ms.domainIPS = result
return
}
// Not the actual error returned by the mailgun API
w.WriteHeader(http.StatusNotFound)
toJSON(w, okResp{Message: "ip not found"})
}
|
// Rect
package GridSearch
import (
"strconv"
)
type rect struct {
Left, Top, Right, Bottom int32
}
func NewRectBy4String(v []string) (*rect, bool) {
tleft, err := strconv.Atoi(v[0])
if err != nil {
return nil, false
}
ttop, err := strconv.Atoi(v[1])
if err != nil {
return nil, false
}
tright, err := strconv.Atoi(v[2])
if err != nil {
return nil, false
}
tbottom, err := strconv.Atoi(v[3])
if err != nil {
return nil, false
}
r := &rect{
int32(tleft),
int32(ttop),
int32(tright),
int32(tbottom),
}
return r, r.legal()
}
func (r *rect) getCenter() point {
var p point
p.lo = (r.Left + r.Right) / 2
p.la = (r.Top + r.Bottom) / 2
return p
}
func (r *rect) legal() (ok bool) {
ok = true
if r.Left > r.Right {
ok = false
}
if r.Top < r.Bottom {
ok = false
}
return
}
func (r *rect) width() int32 {
return r.Right - r.Left
}
func (r *rect) height() int32 {
return r.Top - r.Bottom
}
func (r *rect) equal(other *rect) bool {
if r.Left == other.Left &&
r.Top == other.Top &&
r.Right == other.Right &&
r.Bottom == other.Bottom {
return true
}
return false
}
func (r *rect) intersection(other *rect) (nr rect, ok bool) {
nr.Left = max(r.Left, other.Left)
nr.Top = min(r.Top, other.Top)
nr.Right = min(r.Right, other.Right)
nr.Bottom = max(r.Bottom, other.Bottom)
ok = nr.legal()
return
}
func (r *rect) contain(other *rect) (nr rect, ok bool) {
nr, ok = r.intersection(other)
if ok {
ok = nr.equal(other)
}
return
}
func (r *rect) getQD(p *point) int32 {
center := r.getCenter()
var qd int32 = 0
if p.lo > center.lo {
qd += 1
}
if p.la < center.la {
qd += 2
}
return qd
}
func (r *rect) getQDRect(qd int32) (nr rect) {
center := r.getCenter()
if qd == 0 {
nr.Left = r.Left
nr.Top = r.Top
nr.Right = center.lo
nr.Bottom = center.la
}
if qd == 1 {
nr.Left = center.lo
nr.Top = r.Top
nr.Right = r.Right
nr.Bottom = center.la
}
if qd == 2 {
nr.Left = r.Left
nr.Top = center.la
nr.Right = center.lo
nr.Bottom = r.Bottom
}
if qd == 3 {
nr.Left = center.lo
nr.Top = center.la
nr.Right = r.Right
nr.Bottom = r.Bottom
}
return
}
func gridColNum() int32 {
return (CHINA_RECT.Right-CHINA_RECT.Left)/GRID_TOP_WIDTH + 1
}
func gridRowNum() int32 {
return (CHINA_RECT.Top-CHINA_RECT.Bottom)/GRID_TOP_HEIGHT + 1
}
func getGridRowCol(lo, la int32) (row, col int32, ok bool) {
ok = true
row = (CHINA_RECT.Top - la) / GRID_TOP_HEIGHT
if row < 0 || row >= GRID_ROW_NUM {
ok = false
}
col = (lo - CHINA_RECT.Left) / GRID_TOP_WIDTH
if col < 0 || col >= GRID_COL_NUM {
ok = false
}
return
}
func getGridTopIndexKey(lo, la int32) int32 {
row, col, ok := getGridRowCol(lo, la)
if !ok {
return -1
}
return row*GRID_COL_NUM + col
}
|
package mysql
import (
"context"
"database/sql"
"reflect"
"sync"
_ "github.com/go-sql-driver/mysql"
"shared/utility/naming"
)
// 复用TableStruct,key是Table.Name
var (
tables = &sync.Map{}
mutex sync.Mutex
)
type Handler struct {
db *sql.DB
}
func NewHandler(db *sql.DB) *Handler {
// db, err := sql.Open("mysql", config.Addr)
// if err != nil {
// return nil, err
// }
//
// db.SetMaxIdleConns(config.MaxIdleConn)
// db.SetMaxOpenConns(config.MaxOpenConn)
// db.SetConnMaxLifetime(config.ConnMaxLifetime)
return &Handler{
db: db,
}
}
// func NewHandler(config *Config) (*Handler, error) {
// db, err := sql.Open("mysql", config.Addr)
// if err != nil {
// return nil, err
// }
//
// db.SetMaxIdleConns(config.MaxIdleConn)
// db.SetMaxOpenConns(config.MaxOpenConn)
// db.SetConnMaxLifetime(config.ConnMaxLifetime)
//
// return &Handler{
// db: db,
// }, db.Ping()
// }
// type initOpts struct {
// Table string
// Data Module
// }
func (h *Handler) init(data Module) error {
if data.needInit() {
// name := data.Table()
// if name == "" {
name := naming.UnderlineNaming(reflect.TypeOf(data).Elem().Name())
// }
var table *TableStruct
iTable, ok := tables.Load(name)
if !ok {
mutex.Lock()
// 取2次,防止重复初始化
iTable, ok = tables.Load(name)
if !ok {
table = NewTableStruct(name)
err := table.ReflectTable(data)
if err != nil {
mutex.Unlock()
return err
}
tables.Store(table.Name, table)
} else {
table, _ = iTable.(*TableStruct)
}
mutex.Unlock()
} else {
table, _ = iTable.(*TableStruct)
}
data.init(h.db, table)
}
return nil
}
//
// func (m *Handler) BatchLoad(data []Module) error {
// rows, err := m.db.Query("SELECT * FROM user", 1)
// if err != nil {
// return err
// }
// defer rows.Close()
//
// for rows.Next() {
// rows.Scan()
// }
//
// err := m.checkInit(data)
// if err != nil {
// return err
// }
//
// return nil
// }
func (h *Handler) Preload(data Module) error {
name := data.Table()
if name == "" {
name = naming.UnderlineNaming(reflect.TypeOf(data).Elem().Name())
}
_, ok := tables.Load(name)
if !ok {
mutex.Lock()
// 取2次,防止重复初始化
_, ok = tables.Load(name)
if !ok {
table := NewTableStruct(name)
err := table.ReflectTable(data)
if err != nil {
return err
}
tables.Store(table.Name, table)
}
mutex.Unlock()
}
return nil
}
func (h *Handler) Init(data Module) error {
err := h.init(data)
if err != nil {
return err
}
return nil
}
func (h *Handler) Create(ctx context.Context, data Module) error {
err := h.init(data)
if err != nil {
return err
}
return data.create(ctx, data)
}
func (h *Handler) Load(ctx context.Context, data Module) error {
err := h.init(data)
if err != nil {
return err
}
return data.load(ctx, data)
}
func (h *Handler) Save(ctx context.Context, data Module) error {
err := h.init(data)
if err != nil {
return err
}
return data.save(ctx, data)
}
func (h *Handler) Close() error {
return h.db.Close()
}
|
package main
import "github.com/sanderkvale/IS105/ICA03/Oppg2/fileinfo"
func main() {
fileinfo.FilInformasjon()
}
|
package admin
import (
"github.com/caos/logging"
view_model "github.com/caos/zitadel/internal/view/model"
"github.com/caos/zitadel/pkg/grpc/admin"
"github.com/golang/protobuf/ptypes"
)
func viewsFromModel(views []*view_model.View) []*admin.View {
result := make([]*admin.View, len(views))
for i, view := range views {
result[i] = viewFromModel(view)
}
return result
}
func failedEventsFromModel(failedEvents []*view_model.FailedEvent) []*admin.FailedEvent {
result := make([]*admin.FailedEvent, len(failedEvents))
for i, view := range failedEvents {
result[i] = failedEventFromModel(view)
}
return result
}
func viewFromModel(view *view_model.View) *admin.View {
timestamp, err := ptypes.TimestampProto(view.CurrentTimestamp)
logging.Log("GRPC-KSo03").OnError(err).Debug("unable to parse timestamp")
return &admin.View{
Database: view.Database,
ViewName: view.ViewName,
ProcessedSequence: view.CurrentSequence,
ViewTimestamp: timestamp,
}
}
func failedEventFromModel(failedEvent *view_model.FailedEvent) *admin.FailedEvent {
return &admin.FailedEvent{
Database: failedEvent.Database,
ViewName: failedEvent.ViewName,
FailedSequence: failedEvent.FailedSequence,
FailureCount: failedEvent.FailureCount,
ErrorMessage: failedEvent.ErrMsg,
}
}
|
package radareutil
import (
"bytes"
"fmt"
"net/http"
"net/url"
"os/exec"
"path/filepath"
"strconv"
"sync"
"time"
)
const (
cmdSubPath = "/cmd"
)
// Deprecated: Use 'NewCustomHttpServerApi()' instead.
type HttpApiOptions struct {
Timeout time.Duration
DoNotTrimWhiteSpace bool
}
// Deprecated: Use 'NewCustomHttpServerApi()' instead.
type HttpApi interface {
Exec(command string) (string, error)
}
// Deprecated: Use 'HttpServerApi' instead.
type defaultHttpApi struct {
httpClient *http.Client
address *url.URL
options *HttpApiOptions
}
func (o defaultHttpApi) Exec(command string) (string, error) {
content, err := executeHttpCall(command, o.address, o.httpClient, !o.options.DoNotTrimWhiteSpace)
if err != nil {
return string(content), err
}
return string(content), nil
}
// Deprecated: Use 'NewCustomHttpServerApi()' instead.
func NewHttpApi(address *url.URL, options *HttpApiOptions) (HttpApi, error) {
if options.Timeout == 0 {
options.Timeout = 10 * time.Second
}
return defaultHttpApi{
httpClient: &http.Client{
Timeout: options.Timeout,
},
address: address,
options: options,
}, nil
}
// Deprecated: Use 'HttpServerApi' instead.
type HttpServer interface {
Options() *HttpServerOptions
Start() error
Stop()
OnStopped() chan StoppedInfo
Restart() error
Status() Status
Execute(command string) (string, error)
}
// Deprecated: Use 'Radare2Options' instead.
type HttpServerOptions struct {
DisableSandbox bool
DebugPid int
Port int
// DetachOnStop requires that HttpApi be set.
DetachOnStop bool
HttpApi HttpApi
}
// Deprecated: This type represents legacy implementation of HTTP server.
type deprecatedHttpServer struct {
exePath string
mutex *sync.Mutex
server *exec.Cmd
options *HttpServerOptions
state State
stopped chan StoppedInfo
}
func (o *deprecatedHttpServer) Start() error {
o.mutex.Lock()
defer o.mutex.Unlock()
return o.startUnsafe()
}
// startUnsafe starts the server without use of the lock.
func (o *deprecatedHttpServer) startUnsafe() error {
if o.state == Running {
return fmt.Errorf("server is already running")
}
var args []string
if o.options.Port > 0 {
args = append(args, httpServerArg + strconv.Itoa(o.options.Port))
} else {
args = append(args, httpServerArg)
}
if o.options.DisableSandbox {
args = append(args, "-e", "http.sandbox=false")
}
if o.options.DebugPid > 0 {
args = append(args, "-d", strconv.Itoa(o.options.DebugPid))
} else {
args = append(args, "--")
}
radare := exec.Command(o.exePath, args...)
radare.Dir = filepath.Dir(o.exePath)
output := bytes.NewBuffer(nil)
radare.Stderr = output
radare.Stdout = output
err := radare.Start()
if err != nil {
return fmt.Errorf("failed to start radare - %s", err.Error())
}
o.state = Running
o.server = radare
go o.monitor(output)
return nil
}
func (o *deprecatedHttpServer) monitor(output *bytes.Buffer) {
err := o.server.Wait()
o.mutex.Lock()
info := StoppedInfo{
out: output.String(),
}
if o.state != Stopped {
o.state = Dead
info.err = err
}
select {
case o.stopped <- info:
default:
}
o.server = nil
o.mutex.Unlock()
}
func (o *deprecatedHttpServer) Stop() {
o.mutex.Lock()
defer o.mutex.Unlock()
o.stopUnsafe()
}
// stopUnsafe stops the server without use of the lock.
func (o *deprecatedHttpServer) stopUnsafe() {
if o.state != Running {
return
}
o.state = Stopped
if o.options.DetachOnStop && o.options.HttpApi != nil {
o.options.HttpApi.Exec("dp-")
}
o.server.Process.Kill()
}
// TODO: This can race with the 'monitor()' thread.
// Needs to be improved.
func (o *deprecatedHttpServer) Restart() error {
o.mutex.Lock()
o.stopUnsafe()
o.mutex.Unlock()
o.mutex.Lock()
err := o.startUnsafe()
o.mutex.Unlock()
if err != nil {
return err
}
return nil
}
func (o *deprecatedHttpServer) Options() *HttpServerOptions {
return o.options
}
func (o *deprecatedHttpServer) Status() Status {
o.mutex.Lock()
defer o.mutex.Unlock()
return Status{
State: o.state,
}
}
func (o *deprecatedHttpServer) OnStopped() chan StoppedInfo {
return o.stopped
}
func (o *deprecatedHttpServer) Execute(command string) (string, error) {
return o.options.HttpApi.Exec(command)
}
// Deprecated: Use 'NewHttpServerApi()' instead.
func NewHttpServer(exePath string, options *HttpServerOptions) (HttpServer, error) {
finalExePath, err := fullyQualifiedBinaryPath(exePath)
if err != nil {
return nil, err
}
if options.HttpApi == nil {
a, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", options.Port))
if err != nil {
return nil, err
}
options.HttpApi, err = NewHttpApi(a, &HttpApiOptions{
Timeout: 5 * time.Second,
})
if err != nil {
return nil, err
}
}
return &deprecatedHttpServer{
exePath: finalExePath,
options: options,
mutex: &sync.Mutex{},
state: Stopped,
stopped: make(chan StoppedInfo),
}, nil
}
|
package adservertargeting
import (
"encoding/json"
"net/url"
"strings"
"github.com/buger/jsonparser"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/openrtb_ext"
)
type DataSource string
const (
SourceBidRequest DataSource = "bidrequest"
SourceStatic DataSource = "static"
SourceBidResponse DataSource = "bidresponse"
)
const (
bidderMacro = "{{BIDDER}}"
pathDelimiter = "."
)
var (
allowedTypes = []jsonparser.ValueType{jsonparser.String, jsonparser.Number}
)
// RequestTargetingData struct to hold pre-processed ad server targeting keys and values
type RequestTargetingData struct {
SingleVal json.RawMessage
TargetingValueByImpId map[string][]byte
}
type ResponseTargetingData struct {
Key string
HasMacro bool
Path string
}
type adServerTargetingData struct {
RequestTargetingData map[string]RequestTargetingData
ResponseTargetingData []ResponseTargetingData
}
func Apply(
reqWrapper *openrtb_ext.RequestWrapper,
resolvedRequest json.RawMessage,
response *openrtb2.BidResponse,
queryParams url.Values,
bidResponseExt *openrtb_ext.ExtBidResponse,
truncateTargetAttribute *int) *openrtb2.BidResponse {
adServerTargeting, warnings := collect(reqWrapper, resolvedRequest, queryParams)
response, warnings = resolve(adServerTargeting, response, warnings, truncateTargetAttribute)
if len(warnings) > 0 {
bidResponseExt.Warnings[openrtb_ext.BidderReservedGeneral] = append(bidResponseExt.Warnings[openrtb_ext.BidderReservedGeneral], warnings...)
}
return response
}
// collect gathers targeting keys and values from request based on initial config
// and optimizes future key and value that should be collected from response
func collect(
reqWrapper *openrtb_ext.RequestWrapper, resolvedRequest json.RawMessage,
queryParams url.Values) (*adServerTargetingData, []openrtb_ext.ExtBidderMessage) {
var warnings []openrtb_ext.ExtBidderMessage
adServerTargeting, err := getAdServerTargeting(reqWrapper)
if err != nil {
warnings = append(warnings, createWarning("unable to extract adServerTargeting from request"))
return nil, warnings
}
adServerTargeting, validationWarnings := validateAdServerTargeting(adServerTargeting)
if len(validationWarnings) > 0 {
warnings = append(warnings, validationWarnings...)
}
requestTargetingData := map[string]RequestTargetingData{}
responseTargetingData := []ResponseTargetingData{}
impsCache := requestCache{resolvedReq: resolvedRequest}
for _, targetingObj := range adServerTargeting {
source := strings.ToLower(targetingObj.Source)
switch DataSource(source) {
case SourceBidRequest:
//causes PBS to treat 'value' as a path to pull from the request object
value, err := getValueFromBidRequest(&impsCache, targetingObj.Value, queryParams)
if err != nil {
warnings = append(warnings, createWarning(err.Error()))
} else {
requestTargetingData[targetingObj.Key] = value
}
case SourceStatic:
// causes PBS to just use the 'value' provided
staticValue := RequestTargetingData{SingleVal: json.RawMessage(targetingObj.Value)}
requestTargetingData[targetingObj.Key] = staticValue
case SourceBidResponse:
//causes PBS to treat 'value' as a path to pull from the bidder's response object, specifically seatbid[j].bid[k]
bidResponseTargeting := ResponseTargetingData{}
bidResponseTargeting.Key = targetingObj.Key
bidResponseTargeting.Path = targetingObj.Value
bidResponseTargeting.HasMacro = strings.Contains(strings.ToUpper(targetingObj.Key), bidderMacro)
responseTargetingData = append(responseTargetingData, bidResponseTargeting)
}
}
adServerTargetingData := &adServerTargetingData{
RequestTargetingData: requestTargetingData,
ResponseTargetingData: responseTargetingData,
}
return adServerTargetingData, warnings
}
func resolve(
adServerTargetingData *adServerTargetingData,
response *openrtb2.BidResponse,
warnings []openrtb_ext.ExtBidderMessage,
truncateTargetAttribute *int) (*openrtb2.BidResponse, []openrtb_ext.ExtBidderMessage) {
bidCache := bidsCache{bids: make(map[string]map[string][]byte)}
for _, seat := range response.SeatBid {
bidderName := seat.Seat
for i, bid := range seat.Bid {
targetingData := make(map[string]string)
processRequestTargetingData(adServerTargetingData, targetingData, bid.ImpID)
respWarnings := processResponseTargetingData(adServerTargetingData, targetingData, bidderName, bid, bidCache, response, seat.Ext)
if len(respWarnings) > 0 {
warnings = append(warnings, respWarnings...)
}
seat.Bid[i].Ext = buildBidExt(targetingData, bid, warnings, truncateTargetAttribute)
}
}
return response, warnings
}
|
/* ######################################################################
# Author: (zfly1207@126.com)
# Created Time: 2018-11-14 12:50:43
# File Name: main.go
# Description:
####################################################################### */
package main
import (
"flag"
"fmt"
"os"
"strings"
"ant-coder/coder"
"ant-coder/coder/config"
)
// pass through when build project, go build -ldflags "main.__version__ 1.2.1" app
var coders = map[string]coder.Coder{
"go_model": coder.NewGoModelCoder(),
"go_ui": coder.NewGoUiCoder(),
"go_loop_worker": coder.NewGoLoopWorkerCoder(),
"go_crontab_worker": coder.NewGoCrontabWorkerCoder(),
"go_rpcx_server": coder.NewGoRpcxServerCoder(),
}
var (
__version__ string
pwd = flag.String("d", "", "work directory")
verbose = flag.String("v", "false", "enable verbose logging [false]")
scene string
)
func init() {
var scenes []string
for scene, _ := range coders {
scenes = append(scenes, scene)
}
flag.StringVar(&scene, "s", "", fmt.Sprintf("coder scene (options: %s)", strings.Join(scenes, "|")))
flag.Parse()
if len(*pwd) == 0 {
*pwd, _ = os.Getwd()
}
os.Setenv("VERSION", __version__)
os.Setenv("WORKDIR", *pwd)
os.Setenv("VERBOSE", *verbose)
if len(scene) == 0 {
fmt.Println("you must specify `-s` option")
os.Exit(-1)
}
if err := config.SetPathAndLoad(os.Getenv("HOME")); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func main() {
c, ok := coders[scene]
if !ok {
fmt.Println("you specify coder sense not support.")
os.Exit(-1)
}
if err := coder.NewExecutor(c).Do(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package controllers
import (
"fmt"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/DataDog/datadog-operator/controllers/datadogagent"
componentagent "github.com/DataDog/datadog-operator/controllers/datadogagent/component/agent"
"github.com/DataDog/datadog-operator/pkg/config"
"github.com/DataDog/datadog-operator/pkg/datadogclient"
"github.com/DataDog/datadog-operator/pkg/kubernetes"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
)
const (
agentControllerName = "DatadogAgent"
monitorControllerName = "DatadogMonitor"
)
// SetupOptions defines options for setting up controllers to ease testing
type SetupOptions struct {
SupportExtendedDaemonset ExtendedDaemonsetOptions
SupportCilium bool
Creds config.Creds
DatadogAgentEnabled bool
DatadogMonitorEnabled bool
OperatorMetricsEnabled bool
V2APIEnabled bool
}
// ExtendedDaemonsetOptions defines ExtendedDaemonset options
type ExtendedDaemonsetOptions struct {
Enabled bool
MaxPodUnavailable string
MaxPodSchedulerFailure string
CanaryDuration time.Duration
CanaryReplicas string
CanaryAutoPauseEnabled bool
CanaryAutoPauseMaxRestarts int
CanaryAutoFailEnabled bool
CanaryAutoFailMaxRestarts int
}
type starterFunc func(logr.Logger, manager.Manager, *version.Info, kubernetes.PlatformInfo, SetupOptions) error
var controllerStarters = map[string]starterFunc{
agentControllerName: startDatadogAgent,
monitorControllerName: startDatadogMonitor,
}
// SetupControllers starts all controllers (also used by e2e tests)
func SetupControllers(logger logr.Logger, mgr manager.Manager, options SetupOptions) error {
// Get some information about Kubernetes version
// Never use original mgr.GetConfig(), always copy as clients might modify the configuration
discoveryConfig := rest.CopyConfig(mgr.GetConfig())
discoveryClient, err := discovery.NewDiscoveryClientForConfig(discoveryConfig)
if err != nil {
return fmt.Errorf("unable to get discovery client: %w", err)
}
versionInfo, err := discoveryClient.ServerVersion()
if err != nil {
return fmt.Errorf("unable to get APIServer version: %w", err)
}
groups, resources, err := getServerGroupsAndResources(logger, discoveryClient)
if err != nil {
return fmt.Errorf("unable to get API resource versions: %w", err)
}
platformInfo := kubernetes.NewPlatformInfo(versionInfo, groups, resources)
for controller, starter := range controllerStarters {
if err := starter(logger, mgr, versionInfo, platformInfo, options); err != nil {
logger.Error(err, "Couldn't start controller", "controller", controller)
}
}
return nil
}
func getServerGroupsAndResources(log logr.Logger, discoveryClient *discovery.DiscoveryClient) ([]*v1.APIGroup, []*v1.APIResourceList, error) {
groups, resources, err := discoveryClient.ServerGroupsAndResources()
if err != nil {
if !discovery.IsGroupDiscoveryFailedError(err) {
log.Info("GetServerGroupsAndResources ERROR", "err", err)
return nil, nil, err
}
}
return groups, resources, nil
}
func startDatadogAgent(logger logr.Logger, mgr manager.Manager, vInfo *version.Info, pInfo kubernetes.PlatformInfo, options SetupOptions) error {
if !options.DatadogAgentEnabled {
logger.Info("Feature disabled, not starting the controller", "controller", agentControllerName)
return nil
}
return (&DatadogAgentReconciler{
Client: mgr.GetClient(),
VersionInfo: vInfo,
PlatformInfo: pInfo,
Log: ctrl.Log.WithName("controllers").WithName(agentControllerName),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(agentControllerName),
Options: datadogagent.ReconcilerOptions{
ExtendedDaemonsetOptions: componentagent.ExtendedDaemonsetOptions{
Enabled: options.SupportExtendedDaemonset.Enabled,
MaxPodUnavailable: options.SupportExtendedDaemonset.MaxPodUnavailable,
MaxPodSchedulerFailure: options.SupportExtendedDaemonset.MaxPodSchedulerFailure,
CanaryDuration: options.SupportExtendedDaemonset.CanaryDuration,
CanaryReplicas: options.SupportExtendedDaemonset.CanaryReplicas,
CanaryAutoPauseEnabled: options.SupportExtendedDaemonset.CanaryAutoPauseEnabled,
CanaryAutoPauseMaxRestarts: int32(options.SupportExtendedDaemonset.CanaryAutoPauseMaxRestarts),
CanaryAutoFailEnabled: options.SupportExtendedDaemonset.CanaryAutoFailEnabled,
CanaryAutoFailMaxRestarts: int32(options.SupportExtendedDaemonset.CanaryAutoFailMaxRestarts),
},
SupportCilium: options.SupportCilium,
OperatorMetricsEnabled: options.OperatorMetricsEnabled,
V2Enabled: options.V2APIEnabled,
},
}).SetupWithManager(mgr)
}
func startDatadogMonitor(logger logr.Logger, mgr manager.Manager, vInfo *version.Info, pInfo kubernetes.PlatformInfo, options SetupOptions) error {
if !options.DatadogMonitorEnabled {
logger.Info("Feature disabled, not starting the controller", "controller", monitorControllerName)
return nil
}
ddClient, err := datadogclient.InitDatadogMonitorClient(logger, options.Creds)
if err != nil {
return fmt.Errorf("unable to create Datadog API Client: %w", err)
}
return (&DatadogMonitorReconciler{
Client: mgr.GetClient(),
DDClient: ddClient,
VersionInfo: vInfo,
Log: ctrl.Log.WithName("controllers").WithName(monitorControllerName),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(monitorControllerName),
}).SetupWithManager(mgr)
}
|
// Copyright © 2016 Zhang Peihao <zhangpeihao@gmail.com>
package main
import "github.com/zhangpeihao/zim/cmd"
func main() {
cmd.Execute()
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
var a = [5]int{}
for i := 0; i < len(a); i++ {
a[i] = rand.Intn(100)
}
fmt.Println(a)
temp := 0
for i := 0; i < len(a)/2; i++ {
temp = a[i]
a[i] = a[len(a)-i-1]
a[len(a)-i-1] = temp
}
fmt.Println(a)
}
|
package fcgi
type Header struct {
Version byte
Type byte
RequestId uint16
ContentLength uint16
PaddingLength byte
Reserved byte
//ContentData []byte
//PaddingData []byte
}
|
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// ModifyConnectionReader is a Reader for the ModifyConnection structure.
type ModifyConnectionReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the recieved o.
func (o *ModifyConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewModifyConnectionOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewModifyConnectionOK creates a ModifyConnectionOK with default headers values
func NewModifyConnectionOK() *ModifyConnectionOK {
return &ModifyConnectionOK{}
}
/*ModifyConnectionOK handles this case with default header values.
ModifyConnectionOK modify connection o k
*/
type ModifyConnectionOK struct {
}
func (o *ModifyConnectionOK) Error() string {
return fmt.Sprintf("[PUT /{uuid}][%d] modifyConnectionOK ", 200)
}
func (o *ModifyConnectionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
|
// Package record implements functions for marshaling and unmarshaling individual Kafka records.
package record
import "github.com/mkocikowski/libkafka/varint"
func Unmarshal(b []byte) (*Record, error) { // TODO: errors
r := &Record{}
var offset, n int
r.Len, offset = varint.DecodeZigZag64(b)
r.Attributes = int8(b[offset])
offset += 1
r.TimestampDelta, n = varint.DecodeZigZag64(b[offset:])
offset += n
r.OffsetDelta, n = varint.DecodeZigZag64(b[offset:])
offset += n
r.KeyLen, n = varint.DecodeZigZag64(b[offset:])
offset += n
// TODO: remove copy
if r.KeyLen > 0 {
r.Key = make([]byte, r.KeyLen)
}
offset += copy(r.Key, b[offset:])
r.ValueLen, n = varint.DecodeZigZag64(b[offset:])
if r.ValueLen < 1 {
return r, nil
}
offset += n
r.Value = make([]byte, r.ValueLen)
n += copy(r.Value, b[offset:])
return r, nil // TODO: errors
}
func New(key, value []byte) *Record {
return &Record{
KeyLen: int64(len(key)),
Key: key,
ValueLen: int64(len(value)),
Value: value,
}
}
type Record struct {
Len int64
Attributes int8
TimestampDelta int64
OffsetDelta int64
KeyLen int64
Key []byte
ValueLen int64
Value []byte
// TODO: headers
}
func (r *Record) Marshal() []byte {
var b, c []byte
b = append(b, varint.EncodeZigZag64(int64(r.Attributes))...)
b = append(b, varint.EncodeZigZag64(r.TimestampDelta)...)
b = append(b, varint.EncodeZigZag64(r.OffsetDelta)...)
b = append(b, varint.EncodeZigZag64(r.KeyLen)...)
b = append(b, r.Key...)
b = append(b, varint.EncodeZigZag64(r.ValueLen)...)
b = append(b, r.Value...)
b = append(b, varint.EncodeZigZag64(0)...) // no headers
c = append(c, varint.EncodeZigZag64(int64(len(b)))...)
c = append(c, b...)
return c
}
|
/*
* @lc app=leetcode id=11 lang=golang
*
* [11] Container With Most Water
*
* https://leetcode.com/problems/container-with-most-water/description/
*
* algorithms
* Medium (50.24%)
* Likes: 6372
* Dislikes: 595
* Total Accepted: 678.1K
* Total Submissions: 1.3M
* Testcase Example: '[1,8,6,2,5,4,8,3,7]'
*
* Given n non-negative integers a1, a2, ..., an , where each represents a
* point at coordinate (i, ai). n vertical lines are drawn such that the two
* endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together
* with x-axis forms a container, such that the container contains the most
* water.
*
* Note: You may not slant the container and n is at least 2.
*
*
*
*
*
* The above vertical lines are represented by array [1,8,6,2,5,4,8,3,7]. In
* this case, the max area of water (blue section) the container can contain is
* 49.
*
*
*
* Example:
*
*
* Input: [1,8,6,2,5,4,8,3,7]
* Output: 49
*/
// @lc code=start
func maxArea(height []int) int {
return maxArea2(height)
}
func maxArea2(height []int) int {
max, left, right := 0, 0, len(height)-1
for left < right {
minVal, interval := height[left], right-left
if height[right] < minVal {
minVal = height[right]
}
total := minVal * interval
if total > max {
max = total
}
if height[left] < height[right] {
left++
} else {
right--
}
}
return max
}
func maxArea1(height []int) int {
max := 0
for i := 0; i < len(height); i++ {
for j := i + 1; j < len(height); j++ {
minVal, interval := height[i], j-i
if height[j] < minVal {
minVal = height[j]
}
total := minVal * interval
if total > max {
max = total
}
}
}
return max
}
// @lc code=end |
package config
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
"os"
"path/filepath"
)
func InitViper(configLocation, filename, extension string) {
CheckConfigFile(configLocation, filename+extension)
// Set up viper config library
viper.SetConfigName(filename)
viper.AddConfigPath(configLocation)
if err := viper.ReadInConfig(); err != nil {
panic(err)
}
}
func CheckConfigFile(path, filename string) {
//Check to see if the config file exists, if not, download and save it
fullPath := filepath.Join(path, filename)
if !fileExists(fullPath) {
CreateFile(fullPath)
}
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
log.Info().Msg("File not found...")
return false
}
log.Info().Msg("File found...")
return !info.IsDir()
}
func CreateFile(filename string) {
f, err := os.Create(filename)
defer f.Close()
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("There was an error while creating the file. %s", filename))
return
}
}
|
// Package operator contains main implementation of Flatcar Linux Update Operator.
package operator
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"github.com/coreos/locksmith/pkg/timeutil"
"github.com/kinvolk/flatcar-linux-update-operator/pkg/constants"
"github.com/kinvolk/flatcar-linux-update-operator/pkg/k8sutil"
)
const (
leaderElectionEventSourceComponent = "update-operator-leader-election"
maxRebootingNodes = 1
leaderElectionResourceName = "flatcar-linux-update-operator-lock"
// Arbitrarily copied from KVO.
defaultLeaderElectionLease = 90 * time.Second
// ReconciliationPeriod.
defaultReconciliationPeriod = 30 * time.Second
)
var (
// justRebootedSelector is a selector for combination of annotations
// expected to be on a node after it has completed a reboot.
//
// The update-operator sets constants.AnnotationOkToReboot to true to
// trigger a reboot, and the update-agent sets
// constants.AnnotationRebootNeeded and
// constants.AnnotationRebootInProgress to false when it has finished.
justRebootedSelector = fields.Set(map[string]string{
constants.AnnotationOkToReboot: constants.True,
constants.AnnotationRebootNeeded: constants.False,
constants.AnnotationRebootInProgress: constants.False,
}).AsSelector()
// rebootableSelector is a selector for the annotation expected to be on a node when it can be rebooted.
//
// The update-agent sets constants.AnnotationRebootNeeded to true when
// it would like to reboot, and false when it starts up.
//
// If constants.AnnotationRebootPaused is set to "true", the update-agent will not consider it for rebooting.
rebootableSelector = fields.ParseSelectorOrDie(constants.AnnotationRebootNeeded + "==" + constants.True +
"," + constants.AnnotationRebootPaused + "!=" + constants.True +
"," + constants.AnnotationOkToReboot + "!=" + constants.True +
"," + constants.AnnotationRebootInProgress + "!=" + constants.True)
// stillRebootingSelector is a selector for the annotation set expected to be
// on a node when it's in the process of rebooting.
stillRebootingSelector = fields.Set(map[string]string{
constants.AnnotationOkToReboot: constants.True,
constants.AnnotationRebootNeeded: constants.True,
}).AsSelector()
// beforeRebootReq requires a node to be waiting for before reboot checks to complete.
beforeRebootReq = k8sutil.NewRequirementOrDie(constants.LabelBeforeReboot, selection.In, []string{constants.True})
// afterRebootReq requires a node to be waiting for after reboot checks to complete.
afterRebootReq = k8sutil.NewRequirementOrDie(constants.LabelAfterReboot, selection.In, []string{constants.True})
// notBeforeRebootReq is the inverse of the above checks.
//
//nolint:lll
notBeforeRebootReq = k8sutil.NewRequirementOrDie(constants.LabelBeforeReboot, selection.NotIn, []string{constants.True})
)
// Kontroller implement operator part of FLUO.
type Kontroller struct {
kc kubernetes.Interface
nc corev1client.NodeInterface
// Annotations to look for before and after reboots.
beforeRebootAnnotations []string
afterRebootAnnotations []string
leaderElectionEventRecorder record.EventRecorder
// Namespace is the kubernetes namespace any resources (e.g. locks,
// configmaps, agents) should be created and read under.
// It will be set to the namespace the operator is running in automatically.
namespace string
// Auto-label Flatcar Container Linux nodes for migration compatibility.
autoLabelContainerLinux bool
// Reboot window.
rebootWindow *timeutil.Periodic
maxRebootingNodes int
reconciliationPeriod time.Duration
leaderElectionLease time.Duration
lockID string
}
// Config configures a Kontroller.
type Config struct {
// Kubernetes client.
Client kubernetes.Interface
// Migration compatibility.
AutoLabelContainerLinux bool
// Annotations to look for before and after reboots.
BeforeRebootAnnotations []string
AfterRebootAnnotations []string
// Reboot window.
RebootWindowStart string
RebootWindowLength string
Namespace string
LockID string
}
// New initializes a new Kontroller.
func New(config Config) (*Kontroller, error) {
// Kubernetes client.
if config.Client == nil {
return nil, fmt.Errorf("kubernetes client must not be nil")
}
if config.Namespace == "" {
return nil, fmt.Errorf("namespace must not be empty")
}
if config.LockID == "" {
return nil, fmt.Errorf("lockID must not be empty")
}
var rebootWindow *timeutil.Periodic
if config.RebootWindowStart != "" && config.RebootWindowLength != "" {
rw, err := timeutil.ParsePeriodic(config.RebootWindowStart, config.RebootWindowLength)
if err != nil {
return nil, fmt.Errorf("parsing reboot window: %w", err)
}
rebootWindow = rw
}
kc := config.Client
// Create event emitter.
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: kc.CoreV1().Events("")})
leaderElectionBroadcaster := record.NewBroadcaster()
leaderElectionBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{
Interface: corev1client.New(config.Client.CoreV1().RESTClient()).Events(""),
})
leaderElectionEventRecorder := leaderElectionBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{
Component: leaderElectionEventSourceComponent,
})
return &Kontroller{
kc: kc,
nc: kc.CoreV1().Nodes(),
beforeRebootAnnotations: config.BeforeRebootAnnotations,
afterRebootAnnotations: config.AfterRebootAnnotations,
leaderElectionEventRecorder: leaderElectionEventRecorder,
namespace: config.Namespace,
autoLabelContainerLinux: config.AutoLabelContainerLinux,
rebootWindow: rebootWindow,
maxRebootingNodes: maxRebootingNodes,
reconciliationPeriod: defaultReconciliationPeriod,
leaderElectionLease: defaultLeaderElectionLease,
lockID: config.LockID,
}, nil
}
// Run starts the operator reconcilitation process and runs until the stop
// channel is closed.
func (k *Kontroller) Run(stop <-chan struct{}) error {
err := make(chan error, 1)
// Leader election is responsible for shutting down the controller, so when leader election
// is lost, controller is immediately stopped, as shared context will be cancelled.
ctx := k.withLeaderElection(stop, err)
// Start Flatcar Container Linux node auto-labeler.
if k.autoLabelContainerLinux {
go wait.Until(func() { k.legacyLabeler(ctx) }, k.reconciliationPeriod, ctx.Done())
}
klog.V(5).Info("starting controller")
// Call the process loop each period, until stop is closed.
wait.Until(func() { k.process(ctx) }, k.reconciliationPeriod, ctx.Done())
klog.V(5).Info("stopping controller")
return <-err
}
// withLeaderElection creates a new context which is cancelled when this
// operator does not hold a lock to operate on the cluster.
func (k *Kontroller) withLeaderElection(stop <-chan struct{}, err chan<- error) context.Context {
resLock := &resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: k.namespace,
Name: leaderElectionResourceName,
},
Client: k.kc.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: k.lockID,
EventRecorder: k.leaderElectionEventRecorder,
},
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
// When user requests to stop the controller, cancel context to interrupt any ongoing operation.
<-stop
err <- nil
cancel()
}()
waitLeading := make(chan struct{})
go func() {
// Lease values inspired by a combination of
// https://github.com/kubernetes/kubernetes/blob/f7c07a121d2afadde7aa15b12a9d02858b30a0a9/pkg/apis/componentconfig/v1alpha1/defaults.go#L163-L174
// and the KVO values
// See also
// https://github.com/kubernetes/kubernetes/blob/fc31dae165f406026142f0dd9a98cada8474682a/pkg/client/leaderelection/leaderelection.go#L17
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: resLock,
LeaseDuration: k.leaderElectionLease,
//nolint:gomnd // Set renew deadline to 2/3rd of the lease duration to give
// // controller enough time to renew the lease.
RenewDeadline: k.leaderElectionLease * 2 / 3,
//nolint:gomnd // Retry duration is usually around 1/10th of lease duration,
// // but given low dynamics of FLUO, 1/3rd should also be fine.
RetryPeriod: k.leaderElectionLease / 3,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) { // was: func(stop <-chan struct{
klog.V(5).Info("started leading")
waitLeading <- struct{}{}
},
OnStoppedLeading: func() {
err <- fmt.Errorf("leaderelection lost")
cancel()
},
},
})
}()
<-waitLeading
return ctx
}
// process performs the reconcilitation to coordinate reboots.
func (k *Kontroller) process(ctx context.Context) {
klog.V(4).Info("Going through a loop cycle")
// First make sure that all of our nodes are in a well-defined state with
// respect to our annotations and labels, and if they are not, then try to
// fix them.
klog.V(4).Info("Cleaning up node state")
if err := k.cleanupState(ctx); err != nil {
klog.Errorf("Failed to cleanup node state: %v", err)
return
}
// Find nodes with the after-reboot=true label and check if all provided
// annotations are set. if all annotations are set to true then remove the
// after-reboot=true label and set reboot-ok=false, telling the agent that
// the reboot has completed.
klog.V(4).Info("Checking if configured after-reboot annotations are set to true")
if err := k.checkAfterReboot(ctx); err != nil {
klog.Errorf("Failed to check after reboot: %v", err)
return
}
// Find nodes which just rebooted but haven't run after-reboot checks.
// remove after-reboot annotations and add the after-reboot=true label.
klog.V(4).Info("Labeling rebooted nodes with after-reboot label")
if err := k.markAfterReboot(ctx); err != nil {
klog.Errorf("Failed to update recently rebooted nodes: %v", err)
return
}
// Find nodes with the before-reboot=true label and check if all provided
// annotations are set. if all annotations are set to true then remove the
// before-reboot=true label and set reboot=ok=true, telling the agent it's
// time to reboot.
klog.V(4).Info("Checking if configured before-reboot annotations are set to true")
if err := k.checkBeforeReboot(ctx); err != nil {
klog.Errorf("Failed to check before reboot: %v", err)
return
}
// Take some number of the rebootable nodes. remove before-reboot
// annotations and add the before-reboot=true label.
klog.V(4).Info("Labeling rebootable nodes with before-reboot label")
if err := k.markBeforeReboot(ctx); err != nil {
klog.Errorf("Failed to update rebootable nodes: %v", err)
return
}
}
// cleanupState attempts to make sure nodes are in a well-defined state before
// performing state changes on them.
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) cleanupState(ctx context.Context) error {
nodelist, err := k.nc.List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("listing nodes: %w", err)
}
for _, n := range nodelist.Items {
err = k8sutil.UpdateNodeRetry(ctx, k.nc, n.Name, func(node *corev1.Node) {
// Make sure that nodes with the before-reboot label actually
// still wants to reboot.
if _, exists := node.Labels[constants.LabelBeforeReboot]; !exists {
return
}
if rebootableSelector.Matches(fields.Set(node.Annotations)) {
return
}
klog.Warningf("Node %q no longer wanted to reboot while we were trying to label it so: %v",
node.Name, node.Annotations)
delete(node.Labels, constants.LabelBeforeReboot)
for _, annotation := range k.beforeRebootAnnotations {
delete(node.Annotations, annotation)
}
})
if err != nil {
return fmt.Errorf("cleaning up node %q: %w", n.Name, err)
}
}
return nil
}
type checkRebootOptions struct {
req *labels.Requirement
annotations []string
label string
okToReboot string
}
// checkReboot gets all nodes with a given requirement and checks if all of the given annotations are set to true.
//
// If they are, it deletes given annotations and label, then sets ok-to-reboot annotation to either true or false,
// depending on the given parameter.
//
// If ok-to-reboot is set to true, it gives node agent a signal that it is OK to proceed with rebooting.
//
// If ok-to-reboot is set to false, it means node has finished rebooting successfully.
//
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) checkReboot(ctx context.Context, opt checkRebootOptions) error {
nodelist, err := k.nc.List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("listing nodes: %w", err)
}
nodes := k8sutil.FilterNodesByRequirement(nodelist.Items, opt.req)
for _, n := range nodes {
if !hasAllAnnotations(n, opt.annotations) {
continue
}
klog.V(4).Infof("Deleting label %q for %q", opt.label, n.Name)
klog.V(4).Infof("Setting annotation %q to %q for %q",
constants.AnnotationOkToReboot, opt.okToReboot, n.Name)
if err := k8sutil.UpdateNodeRetry(ctx, k.nc, n.Name, func(node *corev1.Node) {
delete(node.Labels, opt.label)
// Cleanup the annotations.
for _, annotation := range opt.annotations {
klog.V(4).Infof("Deleting annotation %q from node %q", annotation, node.Name)
delete(node.Annotations, annotation)
}
node.Annotations[constants.AnnotationOkToReboot] = opt.okToReboot
}); err != nil {
return fmt.Errorf("updating node %q: %w", n.Name, err)
}
}
return nil
}
// checkBeforeReboot gets all nodes with the before-reboot=true label and checks
// if all of the configured before-reboot annotations are set to true. If they
// are, it deletes the before-reboot=true label and sets reboot-ok=true to tell
// the agent that it is ready to start the actual reboot process.
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) checkBeforeReboot(ctx context.Context) error {
opt := checkRebootOptions{
req: beforeRebootReq,
annotations: k.beforeRebootAnnotations,
label: constants.LabelBeforeReboot,
okToReboot: constants.True,
}
return k.checkReboot(ctx, opt)
}
// checkAfterReboot gets all nodes with the after-reboot=true label and checks
// if all of the configured after-reboot annotations are set to true. If they
// are, it deletes the after-reboot=true label and sets reboot-ok=false to tell
// the agent that it has completed it's reboot successfully.
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) checkAfterReboot(ctx context.Context) error {
opt := checkRebootOptions{
req: afterRebootReq,
annotations: k.afterRebootAnnotations,
label: constants.LabelAfterReboot,
okToReboot: constants.False,
}
return k.checkReboot(ctx, opt)
}
// insideRebootWindow checks if process is inside reboot window at the time
// of calling this function.
//
// If reboot window is not configured, true is always returned.
func (k *Kontroller) insideRebootWindow() bool {
if k.rebootWindow == nil {
return true
}
// Most recent reboot window might still be open.
mostRecentRebootWindow := k.rebootWindow.Previous(time.Now())
return time.Now().Before(mostRecentRebootWindow.End)
}
// remainingRebootingCapacity calculates how many more nodes can be rebooted at a time based
// on a given list of nodes.
//
// If maximum capacity is reached, it is logged and list of rebooting nodes is logged as well.
func (k *Kontroller) remainingRebootingCapacity(nodelist *corev1.NodeList) int {
rebootingNodes := k8sutil.FilterNodesByAnnotation(nodelist.Items, stillRebootingSelector)
// Nodes running before and after reboot checks are still considered to be "rebooting" to us.
beforeRebootNodes := k8sutil.FilterNodesByRequirement(nodelist.Items, beforeRebootReq)
afterRebootNodes := k8sutil.FilterNodesByRequirement(nodelist.Items, afterRebootReq)
rebootingNodes = append(append(rebootingNodes, beforeRebootNodes...), afterRebootNodes...)
remainingCapacity := maxRebootingNodes - len(rebootingNodes)
if remainingCapacity == 0 {
for _, n := range rebootingNodes {
klog.Infof("Found node %q still rebooting, waiting", n.Name)
}
klog.Infof("Found %d (of max %d) rebooting nodes; waiting for completion", len(rebootingNodes), maxRebootingNodes)
}
return remainingCapacity
}
// nodesRequiringReboot filters given list of nodes and returns ones which requires a reboot.
func (k *Kontroller) nodesRequiringReboot(nodelist *corev1.NodeList) []corev1.Node {
rebootableNodes := k8sutil.FilterNodesByAnnotation(nodelist.Items, rebootableSelector)
return k8sutil.FilterNodesByRequirement(rebootableNodes, notBeforeRebootReq)
}
// rebootableNodes returns list of nodes which can be marked for rebooting based on remaining capacity.
func (k *Kontroller) rebootableNodes(nodelist *corev1.NodeList) []*corev1.Node {
remainingCapacity := k.remainingRebootingCapacity(nodelist)
nodesRequiringReboot := k.nodesRequiringReboot(nodelist)
chosenNodes := make([]*corev1.Node, 0, remainingCapacity)
for i := 0; i < remainingCapacity && i < len(nodesRequiringReboot); i++ {
chosenNodes = append(chosenNodes, &nodesRequiringReboot[i])
}
klog.Infof("Found %d nodes that need a reboot", len(chosenNodes))
return chosenNodes
}
// markBeforeReboot gets nodes which want to reboot and marks them with the
// before-reboot=true label. This is considered the beginning of the reboot
// process from the perspective of the update-operator. It will only mark
// nodes with this label up to the maximum number of concurrently rebootable
// nodes as configured with the maxRebootingNodes constant. It also checks if
// we are inside the reboot window.
// It cleans up the before-reboot annotations before it applies the label, in
// case there are any left over from the last reboot.
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) markBeforeReboot(ctx context.Context) error {
nodelist, err := k.nc.List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("listing nodes: %w", err)
}
if !k.insideRebootWindow() {
klog.V(4).Info("We are outside the reboot window; not labeling rebootable nodes for now")
return nil
}
// Set before-reboot=true for the chosen nodes.
for _, n := range k.rebootableNodes(nodelist) {
err = k.mark(ctx, n.Name, constants.LabelBeforeReboot, "before-reboot", k.beforeRebootAnnotations)
if err != nil {
return fmt.Errorf("labeling node for before reboot checks: %w", err)
}
}
return nil
}
// markAfterReboot gets nodes which have completed rebooting and marks them with
// the after-reboot=true label. A node with the after-reboot=true label is still
// considered to be rebooting from the perspective of the update-operator, even
// though it has completed rebooting from the machines perspective.
// It cleans up the after-reboot annotations before it applies the label, in
// case there are any left over from the last reboot.
// If there is an error getting the list of nodes or updating any of them, an
// error is immediately returned.
func (k *Kontroller) markAfterReboot(ctx context.Context) error {
nodelist, err := k.nc.List(ctx, metav1.ListOptions{
// Filter out any nodes that are already labeled with after-reboot=true.
LabelSelector: fmt.Sprintf("%s!=%s", constants.LabelAfterReboot, constants.True),
})
if err != nil {
return fmt.Errorf("listing nodes: %w", err)
}
// Find nodes which just rebooted.
justRebootedNodes := k8sutil.FilterNodesByAnnotation(nodelist.Items, justRebootedSelector)
klog.Infof("Found %d rebooted nodes", len(justRebootedNodes))
// For all the nodes which just rebooted, remove any old annotations and add the after-reboot=true label.
for _, n := range justRebootedNodes {
err = k.mark(ctx, n.Name, constants.LabelAfterReboot, "after-reboot", k.afterRebootAnnotations)
if err != nil {
return fmt.Errorf("labeling node for after reboot checks: %w", err)
}
}
return nil
}
func (k *Kontroller) mark(ctx context.Context, nodeName, label, annotationsType string, annotations []string) error {
klog.V(4).Infof("Deleting annotations %v for %q", annotations, nodeName)
klog.V(4).Infof("Setting label %q to %q for node %q", label, constants.True, nodeName)
err := k8sutil.UpdateNodeRetry(ctx, k.nc, nodeName, func(node *corev1.Node) {
for _, annotation := range annotations {
delete(node.Annotations, annotation)
}
node.Labels[label] = constants.True
})
if err != nil {
return fmt.Errorf("setting label %q to %q on node %q: %w", label, constants.True, nodeName, err)
}
if len(annotations) > 0 {
klog.Infof("Waiting for %s annotations on node %q: %v", annotationsType, nodeName, annotations)
}
return nil
}
func hasAllAnnotations(node corev1.Node, annotations []string) bool {
nodeAnnotations := node.GetAnnotations()
for _, annotation := range annotations {
value, ok := nodeAnnotations[annotation]
if !ok || value != constants.True {
return false
}
}
return true
}
|
package bench
import (
"bytes"
"compress/gzip"
"io/ioutil"
"os"
"testing"
)
var codeJSON []byte
var codeStruct = &codeResponse{}
func codeInit() {
f, err := os.Open("testdata/code.json.gz")
if err != nil {
panic(err)
}
defer f.Close()
gz, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
data, err := ioutil.ReadAll(gz)
if err != nil {
panic(err)
}
codeJSON = data
if err := NewcodeResponseJSONDecoder(bytes.NewBuffer(codeJSON)).Decode(&codeStruct); err != nil {
panic("decode code.json: " + err.Error())
}
var b bytes.Buffer
if err = NewcodeResponseJSONEncoder(&b).Encode(codeStruct); err != nil {
panic("encode code.json: " + err.Error())
}
data = b.Bytes()
if !bytes.Equal(data, codeJSON) {
println("different lengths", len(data), len(codeJSON))
for i := 0; i < len(data) && i < len(codeJSON); i++ {
if data[i] != codeJSON[i] {
println("re-marshal: changed at byte", i)
println("orig: ", string(codeJSON[i-10:i+10]))
println("new: ", string(data[i-10:i+10]))
break
}
}
panic("re-marshal code.json: different result")
}
}
func BenchmarkCodeEncoder(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
enc := NewcodeResponseJSONEncoder(ioutil.Discard)
for i := 0; i < b.N; i++ {
if err := enc.Encode(codeStruct); err != nil {
b.Fatal("Encode:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeDecoder(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
var buf bytes.Buffer
dec := NewcodeResponseJSONDecoder(&buf)
r := &codeResponse{}
for i := 0; i < b.N; i++ {
buf.Write(codeJSON)
// hide EOF
buf.WriteByte('\n')
buf.WriteByte('\n')
buf.WriteByte('\n')
if err := dec.Decode(&r); err != nil {
b.Fatal("Decode:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opbench
import "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat"
// TODO(justin): pull schema definitions like this into a file that can be
// imported here as well as in the data-driven tests.
// MakeTPCHCatalog returns a test catalog loaded with the TPCH schema
// and statistics.
func MakeTPCHCatalog() *testcat.Catalog {
cat := testcat.New()
if err := cat.ExecuteMultipleDDL(`
CREATE TABLE public.region (
r_regionkey INT8 PRIMARY KEY,
r_name CHAR(25) NOT NULL,
r_comment VARCHAR(152)
);
ALTER TABLE region INJECT STATISTICS '[
{
"columns": [
"r_regionkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 5
},
{
"columns": [
"r_name"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 5
},
{
"columns": [
"r_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 5
}
]';
CREATE TABLE public.nation (
n_nationkey INT8 PRIMARY KEY,
n_name CHAR(25) NOT NULL,
n_regionkey INT8 NOT NULL,
n_comment VARCHAR(152),
INDEX n_rk (n_regionkey ASC),
CONSTRAINT nation_fkey_region FOREIGN KEY (n_regionkey) REFERENCES public.region (r_regionkey)
);
ALTER TABLE nation INJECT STATISTICS '[
{
"columns": [
"n_nationkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 25
},
{
"columns": [
"n_regionkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 25
},
{
"columns": [
"n_name"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 25
},
{
"columns": [
"n_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 25
}
]';
CREATE TABLE public.supplier (
s_suppkey INT8 PRIMARY KEY,
s_name CHAR(25) NOT NULL,
s_address VARCHAR(40) NOT NULL,
s_nationkey INT8 NOT NULL,
s_phone CHAR(15) NOT NULL,
s_acctbal FLOAT8 NOT NULL,
s_comment VARCHAR(101) NOT NULL,
INDEX s_nk (s_nationkey ASC),
CONSTRAINT supplier_fkey_nation FOREIGN KEY (s_nationkey) REFERENCES public.nation (n_nationkey)
);
ALTER TABLE supplier INJECT STATISTICS '[
{
"columns": [
"s_suppkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10034,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_nationkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_name"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 9990,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_address"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10027,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_phone"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10021,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_acctbal"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 9967,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 10000
},
{
"columns": [
"s_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 9934,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 10000
}
]';
CREATE TABLE public.part (
p_partkey INT8 PRIMARY KEY,
p_name VARCHAR(55) NOT NULL,
p_mfgr CHAR(25) NOT NULL,
p_brand CHAR(10) NOT NULL,
p_type VARCHAR(25) NOT NULL,
p_size INT8 NOT NULL,
p_container CHAR(10) NOT NULL,
p_retailprice FLOAT8 NOT NULL,
p_comment VARCHAR(23) NOT NULL
);
ALTER TABLE part INJECT STATISTICS '[
{
"columns": [
"p_partkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 199810,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_name"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 198131,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_mfgr"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_brand"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_type"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 150,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_size"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 50,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_container"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 40,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_retailprice"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 20831,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 200000
},
{
"columns": [
"p_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 132344,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 200000
}
]';
CREATE TABLE public.partsupp (
ps_partkey INT8 NOT NULL,
ps_suppkey INT8 NOT NULL,
ps_availqty INT8 NOT NULL,
ps_supplycost FLOAT8 NOT NULL,
ps_comment VARCHAR(199) NOT NULL,
PRIMARY KEY (ps_partkey, ps_suppkey),
INDEX ps_sk (ps_suppkey ASC),
CONSTRAINT partsupp_fkey_part FOREIGN KEY (ps_partkey) REFERENCES public.part (p_partkey),
CONSTRAINT partsupp_fkey_supplier FOREIGN KEY (ps_suppkey) REFERENCES public.supplier (s_suppkey)
);
ALTER TABLE partsupp INJECT STATISTICS '[
{
"columns": [
"ps_partkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 199810,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 800000
},
{
"columns": [
"ps_suppkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10034,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 800000
},
{
"columns": [
"ps_availqty"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10032,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 800000
},
{
"columns": [
"ps_supplycost"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 100379,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 800000
},
{
"columns": [
"ps_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 799641,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 800000
}
]';
CREATE TABLE public.customer (
c_custkey INT8 PRIMARY KEY,
c_name VARCHAR(25) NOT NULL,
c_address VARCHAR(40) NOT NULL,
c_nationkey INT8 NOT NULL,
c_phone CHAR(15) NOT NULL,
c_acctbal FLOAT8 NOT NULL,
c_mktsegment CHAR(10) NOT NULL,
c_comment VARCHAR(117) NOT NULL,
INDEX c_nk (c_nationkey ASC),
CONSTRAINT customer_fkey_nation FOREIGN KEY (c_nationkey) REFERENCES public.nation (n_nationkey)
);
ALTER TABLE customer INJECT STATISTICS '[
{
"columns": [
"c_custkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 150097,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_nationkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 25,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_name"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 151126,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_address"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 149937,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_phone"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 150872,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_acctbal"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 140628,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_mktsegment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 150000
},
{
"columns": [
"c_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 149323,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 150000
}
]';
CREATE TABLE public.orders (
o_orderkey INT8 PRIMARY KEY,
o_custkey INT8 NOT NULL,
o_orderstatus CHAR NOT NULL,
o_totalprice FLOAT8 NOT NULL,
o_orderdate DATE NOT NULL,
o_orderpriority CHAR(15) NOT NULL,
o_clerk CHAR(15) NOT NULL,
o_shippriority INT8 NOT NULL,
o_comment VARCHAR(79) NOT NULL,
INDEX o_ck (o_custkey ASC),
INDEX o_od (o_orderdate ASC),
CONSTRAINT orders_fkey_customer FOREIGN KEY (o_custkey) REFERENCES public.customer (c_custkey)
);
ALTER TABLE orders INJECT STATISTICS '[
{
"columns": [
"o_orderkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1508717,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_custkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 99837,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_orderdate"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 2406,
"histo_col_type": "date",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_orderstatus"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 3,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_totalprice"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1459167,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_orderpriority"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 5,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_clerk"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1000,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_shippriority"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 1500000
},
{
"columns": [
"o_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1469402,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 1500000
}
]';
CREATE TABLE public.lineitem (
l_orderkey INT8 NOT NULL,
l_partkey INT8 NOT NULL,
l_suppkey INT8 NOT NULL,
l_linenumber INT8 NOT NULL,
l_quantity FLOAT8 NOT NULL,
l_extendedprice FLOAT8 NOT NULL,
l_discount FLOAT8 NOT NULL,
l_tax FLOAT8 NOT NULL,
l_returnflag CHAR NOT NULL,
l_linestatus CHAR NOT NULL,
l_shipdate DATE NOT NULL,
l_commitdate DATE NOT NULL,
l_receiptdate DATE NOT NULL,
l_shipinstruct CHAR(25) NOT NULL,
l_shipmode CHAR(10) NOT NULL,
l_comment VARCHAR(44) NOT NULL,
PRIMARY KEY (l_orderkey, l_linenumber),
INDEX l_ok (l_orderkey ASC),
INDEX l_pk (l_partkey ASC),
INDEX l_sk (l_suppkey ASC),
INDEX l_sd (l_shipdate ASC),
INDEX l_cd (l_commitdate ASC),
INDEX l_rd (l_receiptdate ASC),
INDEX l_pk_sk (l_partkey ASC, l_suppkey ASC),
INDEX l_sk_pk (l_suppkey ASC, l_partkey ASC),
CONSTRAINT lineitem_fkey_orders FOREIGN KEY (l_orderkey) REFERENCES public.orders (o_orderkey),
CONSTRAINT lineitem_fkey_part FOREIGN KEY (l_partkey) REFERENCES public.part (p_partkey),
CONSTRAINT lineitem_fkey_supplier FOREIGN KEY (l_suppkey) REFERENCES public.supplier (s_suppkey),
CONSTRAINT lineitem_fkey_partsupp FOREIGN KEY (l_partkey, l_suppkey) REFERENCES public.partsupp (ps_partkey, ps_suppkey)
);
ALTER TABLE lineitem INJECT STATISTICS '[
{
"columns": [
"l_orderkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 1508717,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_partkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 199810,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_suppkey"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 10034,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_shipdate"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 2526,
"histo_col_type": "date",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_commitdate"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 2466,
"histo_col_type": "date",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_receiptdate"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 2554,
"histo_col_type": "date",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_linenumber"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 7,
"histo_col_type": "int",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_quantity"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 50,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_extendedprice"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 925955,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_discount"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 11,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_tax"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 9,
"histo_col_type": "float",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_returnflag"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 3,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_linestatus"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 2,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_shipinstruct"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 4,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_shipmode"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 7,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 6001215
},
{
"columns": [
"l_comment"
],
"created_at": "2018-01-01 1:00:00.00000+00:00",
"distinct_count": 4643303,
"histo_col_type": "string",
"name": "_",
"null_count": 0,
"row_count": 6001215
}
]';
`); err != nil {
panic(err)
}
return cat
}
|
package main
import "fmt"
func main() {
var a1 int
var p1 *int
p1 = &a1
*p1 = 1000
fmt.Println(a1)
x1 := 123
x2 := 123
hoge(x1, &x2)
fmt.Println(x1, x2)
}
// b1は値渡し b2はポインタ渡し
func hoge(b1 int, b2 *int) {
b1 = 456
*b2 = 456
}
|
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// TraceValue represents a InitializeParams Trace mode.
type TraceValue string
// list of TraceValue.
const (
// TraceOff disable tracing.
TraceOff TraceValue = "off"
// TraceMessage normal tracing mode.
TraceMessage TraceValue = "message"
// TraceVerbose verbose tracing mode.
TraceVerbose TraceValue = "verbose"
)
// ClientInfo information about the client.
//
// @since 3.15.0.
type ClientInfo struct {
// Name is the name of the client as defined by the client.
Name string `json:"name"`
// Version is the client's version as defined by the client.
Version string `json:"version,omitempty"`
}
// InitializeParams params of Initialize request.
type InitializeParams struct {
WorkDoneProgressParams
// ProcessID is the process Id of the parent process that started
// the server. Is null if the process has not been started by another process.
// If the parent process is not alive then the server should exit (see exit notification) its process.
ProcessID int32 `json:"processId"`
// ClientInfo is the information about the client.
//
// @since 3.15.0
ClientInfo *ClientInfo `json:"clientInfo,omitempty"`
// Locale is the locale the client is currently showing the user interface
// in. This must not necessarily be the locale of the operating
// system.
//
// Uses IETF language tags as the value's syntax
// (See https://en.wikipedia.org/wiki/IETF_language_tag)
//
// @since 3.16.0.
Locale string `json:"locale,omitempty"`
// RootPath is the rootPath of the workspace. Is null
// if no folder is open.
//
// Deprecated: Use RootURI instead.
RootPath string `json:"rootPath,omitempty"`
// RootURI is the rootUri of the workspace. Is null if no
// folder is open. If both `rootPath` and "rootUri" are set
// "rootUri" wins.
//
// Deprecated: Use WorkspaceFolders instead.
RootURI DocumentURI `json:"rootUri,omitempty"`
// InitializationOptions user provided initialization options.
InitializationOptions interface{} `json:"initializationOptions,omitempty"`
// Capabilities is the capabilities provided by the client (editor or tool)
Capabilities ClientCapabilities `json:"capabilities"`
// Trace is the initial trace setting. If omitted trace is disabled ('off').
Trace TraceValue `json:"trace,omitempty"`
// WorkspaceFolders is the workspace folders configured in the client when the server starts.
// This property is only available if the client supports workspace folders.
// It can be `null` if the client supports workspace folders but none are
// configured.
//
// @since 3.6.0.
WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"`
}
// InitializeResult result of ClientCapabilities.
type InitializeResult struct {
// Capabilities is the capabilities the language server provides.
Capabilities ServerCapabilities `json:"capabilities"`
// ServerInfo Information about the server.
//
// @since 3.15.0.
ServerInfo *ServerInfo `json:"serverInfo,omitempty"`
}
// LogTraceParams params of LogTrace notification.
//
// @since 3.16.0.
type LogTraceParams struct {
// Message is the message to be logged.
Message string `json:"message"`
// Verbose is the additional information that can be computed if the "trace" configuration
// is set to "verbose".
Verbose TraceValue `json:"verbose,omitempty"`
}
// SetTraceParams params of SetTrace notification.
//
// @since 3.16.0.
type SetTraceParams struct {
// Value is the new value that should be assigned to the trace setting.
Value TraceValue `json:"value"`
}
// FileOperationPatternKind is a pattern kind describing if a glob pattern matches a file a folder or
// both.
//
// @since 3.16.0.
type FileOperationPatternKind string
// list of FileOperationPatternKind.
const (
// FileOperationPatternKindFile is the pattern matches a file only.
FileOperationPatternKindFile FileOperationPatternKind = "file"
// FileOperationPatternKindFolder is the pattern matches a folder only.
FileOperationPatternKindFolder FileOperationPatternKind = "folder"
)
// FileOperationPatternOptions matching options for the file operation pattern.
//
// @since 3.16.0.
type FileOperationPatternOptions struct {
// IgnoreCase is The pattern should be matched ignoring casing.
IgnoreCase bool `json:"ignoreCase,omitempty"`
}
// FileOperationPattern a pattern to describe in which file operation requests or notifications
// the server is interested in.
//
// @since 3.16.0.
type FileOperationPattern struct {
// The glob pattern to match. Glob patterns can have the following syntax:
// - `*` to match one or more characters in a path segment
// - `?` to match on one character in a path segment
// - `**` to match any number of path segments, including none
// - `{}` to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript
// and JavaScript files)
// - `[]` to declare a range of characters to match in a path segment
// (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
// - `[!...]` to negate a range of characters to match in a path segment
// (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but
// not `example.0`)
Glob string `json:"glob"`
// Matches whether to match files or folders with this pattern.
//
// Matches both if undefined.
Matches FileOperationPatternKind `json:"matches,omitempty"`
// Options additional options used during matching.
Options FileOperationPatternOptions `json:"options,omitempty"`
}
// FileOperationFilter is a filter to describe in which file operation requests or notifications
// the server is interested in.
//
// @since 3.16.0.
type FileOperationFilter struct {
// Scheme is a URI like "file" or "untitled".
Scheme string `json:"scheme,omitempty"`
// Pattern is the actual file operation pattern.
Pattern FileOperationPattern `json:"pattern"`
}
// CreateFilesParams is the parameters sent in notifications/requests for user-initiated creation
// of files.
//
// @since 3.16.0.
type CreateFilesParams struct {
// Files an array of all files/folders created in this operation.
Files []FileCreate `json:"files"`
}
// FileCreate nepresents information on a file/folder create.
//
// @since 3.16.0.
type FileCreate struct {
// URI is a file:// URI for the location of the file/folder being created.
URI string `json:"uri"`
}
// RenameFilesParams is the parameters sent in notifications/requests for user-initiated renames
// of files.
//
// @since 3.16.0.
type RenameFilesParams struct {
// Files an array of all files/folders renamed in this operation. When a folder
// is renamed, only the folder will be included, and not its children.
Files []FileRename `json:"files"`
}
// FileRename represents information on a file/folder rename.
//
// @since 3.16.0.
type FileRename struct {
// OldURI is a file:// URI for the original location of the file/folder being renamed.
OldURI string `json:"oldUri"`
// NewURI is a file:// URI for the new location of the file/folder being renamed.
NewURI string `json:"newUri"`
}
// DeleteFilesParams is the parameters sent in notifications/requests for user-initiated deletes
// of files.
//
// @since 3.16.0.
type DeleteFilesParams struct {
// Files an array of all files/folders deleted in this operation.
Files []FileDelete `json:"files"`
}
// FileDelete represents information on a file/folder delete.
//
// @since 3.16.0.
type FileDelete struct {
// URI is a file:// URI for the location of the file/folder being deleted.
URI string `json:"uri"`
}
// DocumentHighlightParams params of DocumentHighlight request.
//
// @since 3.15.0.
type DocumentHighlightParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// DeclarationParams params of Declaration request.
//
// @since 3.15.0.
type DeclarationParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// DefinitionParams params of Definition request.
//
// @since 3.15.0.
type DefinitionParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// TypeDefinitionParams params of TypeDefinition request.
//
// @since 3.15.0.
type TypeDefinitionParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// ImplementationParams params of Implementation request.
//
// @since 3.15.0.
type ImplementationParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// ShowDocumentParams params to show a document.
//
// @since 3.16.0.
type ShowDocumentParams struct {
// URI is the document uri to show.
URI URI `json:"uri"`
// External indicates to show the resource in an external program.
// To show for example `https://code.visualstudio.com/`
// in the default WEB browser set `external` to `true`.
External bool `json:"external,omitempty"`
// TakeFocus an optional property to indicate whether the editor
// showing the document should take focus or not.
// Clients might ignore this property if an external
// program is started.
TakeFocus bool `json:"takeFocus,omitempty"`
// Selection an optional selection range if the document is a text
// document. Clients might ignore the property if an
// external program is started or the file is not a text
// file.
Selection *Range `json:"selection,omitempty"`
}
// ShowDocumentResult is the result of an show document request.
//
// @since 3.16.0.
type ShowDocumentResult struct {
// Success a boolean indicating if the show was successful.
Success bool `json:"success"`
}
// ServerInfo Information about the server.
//
// @since 3.15.0.
type ServerInfo struct {
// Name is the name of the server as defined by the server.
Name string `json:"name"`
// Version is the server's version as defined by the server.
Version string `json:"version,omitempty"`
}
// InitializeError known error codes for an "InitializeError".
type InitializeError struct {
// Retry indicates whether the client execute the following retry logic:
// (1) show the message provided by the ResponseError to the user
// (2) user selects retry or cancel
// (3) if user selected retry the initialize method is sent again.
Retry bool `json:"retry,omitempty"`
}
// ReferencesOptions ReferencesProvider options.
//
// @since 3.15.0.
type ReferencesOptions struct {
WorkDoneProgressOptions
}
// WorkDoneProgressOptions WorkDoneProgress options.
//
// @since 3.15.0.
type WorkDoneProgressOptions struct {
WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
}
// LinkedEditingRangeParams params for the LinkedEditingRange request.
//
// @since 3.16.0.
type LinkedEditingRangeParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
}
// LinkedEditingRanges result of LinkedEditingRange request.
//
// @since 3.16.0.
type LinkedEditingRanges struct {
// Ranges a list of ranges that can be renamed together.
//
// The ranges must have identical length and contain identical text content.
//
// The ranges cannot overlap.
Ranges []Range `json:"ranges"`
// WordPattern an optional word pattern (regular expression) that describes valid contents for
// the given ranges.
//
// If no pattern is provided, the client configuration's word pattern will be used.
WordPattern string `json:"wordPattern,omitempty"`
}
// MonikerParams params for the Moniker request.
//
// @since 3.16.0.
type MonikerParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// UniquenessLevel is the Moniker uniqueness level to define scope of the moniker.
//
// @since 3.16.0.
type UniquenessLevel string
// list of UniquenessLevel.
const (
// UniquenessLevelDocument is the moniker is only unique inside a document.
UniquenessLevelDocument UniquenessLevel = "document"
// UniquenessLevelProject is the moniker is unique inside a project for which a dump got created.
UniquenessLevelProject UniquenessLevel = "project"
// UniquenessLevelGroup is the moniker is unique inside the group to which a project belongs.
UniquenessLevelGroup UniquenessLevel = "group"
// UniquenessLevelScheme is the moniker is unique inside the moniker scheme.
UniquenessLevelScheme UniquenessLevel = "scheme"
// UniquenessLevelGlobal is the moniker is globally unique.
UniquenessLevelGlobal UniquenessLevel = "global"
)
// MonikerKind is the moniker kind.
//
// @since 3.16.0.
type MonikerKind string
// list of MonikerKind.
const (
// MonikerKindImport is the moniker represent a symbol that is imported into a project.
MonikerKindImport MonikerKind = "import"
// MonikerKindExport is the moniker represents a symbol that is exported from a project.
MonikerKindExport MonikerKind = "export"
// MonikerKindLocal is the moniker represents a symbol that is local to a project (e.g. a local
// variable of a function, a class not visible outside the project, ...).
MonikerKindLocal MonikerKind = "local"
)
// Moniker definition to match LSIF 0.5 moniker definition.
//
// @since 3.16.0.
type Moniker struct {
// Scheme is the scheme of the moniker. For example tsc or .Net.
Scheme string `json:"scheme"`
// Identifier is the identifier of the moniker.
//
// The value is opaque in LSIF however schema owners are allowed to define the structure if they want.
Identifier string `json:"identifier"`
// Unique is the scope in which the moniker is unique.
Unique UniquenessLevel `json:"unique"`
// Kind is the moniker kind if known.
Kind MonikerKind `json:"kind,omitempty"`
}
// StaticRegistrationOptions staticRegistration options to be returned in the initialize request.
type StaticRegistrationOptions struct {
// ID is the id used to register the request. The id can be used to deregister
// the request again. See also Registration#id.
ID string `json:"id,omitempty"`
}
// DocumentLinkRegistrationOptions DocumentLinkRegistration options.
type DocumentLinkRegistrationOptions struct {
TextDocumentRegistrationOptions
// ResolveProvider document links have a resolve provider as well.
ResolveProvider bool `json:"resolveProvider,omitempty"`
}
// InitializedParams params of Initialized notification.
type InitializedParams struct{}
// WorkspaceFolders represents a slice of WorkspaceFolder.
type WorkspaceFolders []WorkspaceFolder
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package conference
// RoomType defines room size for conference CUJ.
type RoomType int
const (
// NoRoom means not joining google meet when running the test.
NoRoom RoomType = iota
// TwoRoomSize creates a conference room with 2 participants.
TwoRoomSize
// SmallRoomSize creates a conference room with 5 participants.
SmallRoomSize
// LargeRoomSize creates a conference room with 16 participants.
LargeRoomSize
// ClassRoomSize creates a conference room with 38/49 participants.
ClassRoomSize
)
// GoogleMeetRoomParticipants defines room size for Google meet cuj.
var GoogleMeetRoomParticipants = map[RoomType]int{
NoRoom: 0,
TwoRoomSize: 2,
SmallRoomSize: 6,
LargeRoomSize: 16,
ClassRoomSize: 49,
}
// ZoomRoomParticipants defines room size for Zoom cuj.
var ZoomRoomParticipants = map[RoomType]int{
TwoRoomSize: 2,
SmallRoomSize: 5,
LargeRoomSize: 16,
ClassRoomSize: 38,
}
|
package main;
import "fmt";
p := make([]int, 1000001);
seen := make(bool, 1000001);
func find_set(x int) int {
return (p[x] == x)? x : p[x] = find_set(p[x]);
}
func union_set(x int, y int) {
px := find_set(x);
py := find_set(y);
if px != py {
p[px] = py;
}
}
func init(n int) {
for i := 0; i <= n; i++ {
p[i] = i;
}
}
func dfs(u int, g[][]int, vi bool) {
vi[u] = true;
fmt.Println(u);
for i := 0; i < len(g[u]); i++ {
if (!vi[g[u][i]]) {
dfs(g[u][i], g, vi);
}
}
}
func main() {
var n, u, v int;
fmt.Scanf("%d", &n);
//g := make([][]int, n);
//vi := make([]bool, n);
init(n);
for i := 0; i < n; i++ {
fmt.Scanf("%d %d", &u, &v);
union_set(u, v);
//g[u] = append(g[u], v);
//g[v] = append(g[v], u);
}
fmt.Println("Parents:");
for i := 0; i < n; i++ {
if !seen[find_set(i)] {
seen[find_set(i)] = true;
fmt.Printf("%d", i);
}
}
fmt.Println();
//fmt.Scanf("%d", &init);
//dfs(init, g, vi);
} |
/*
Command genblog generates a static blog.
Create an article:
genblog article <article-url-slug>
Run a local server:
genblog serve
Build static HTML to `public/`:
genblog build
*/
package main
import (
"fmt"
"os"
)
func main() {
if len(os.Args) < 2 {
usage()
}
blog := currentBlog()
switch os.Args[1] {
case "article":
if len(os.Args) != 3 {
usage()
}
CreateArticle(os.Args[2], blog)
blog.writeConfig()
fmt.Println("genblog: Created article at ./articles/" + os.Args[2] + ".md")
case "serve":
blog := currentBlog()
fmt.Println("genblog: Serving blog at http://localhost:2000")
blog.Serve("2000")
case "build":
blog := currentBlog()
blog.Build()
fmt.Println("genblog: Built blog at ./public")
default:
usage()
}
}
func currentBlog() *Blog {
dir, err := os.Getwd()
check(err)
blog := &Blog{RootDir: dir}
blog.loadConfig()
return blog
}
func usage() {
const s = `usage:
genblog article <article-url-slug>
genblog serve
genblog build
`
fmt.Fprint(os.Stderr, s)
os.Exit(2)
}
func check(err error) {
if err != nil {
panic(err)
}
}
|
package tree
import (
"errors"
"log"
)
// k 树的性质
// 节点数 n = n0 + n1 + n2 + ... + nk(nk 是具有k个子节点的节点数)
// n = n1 + 2n2 +3n3 + ... + knk
// 所以 n0 = n2 + 2n3 + ... + (k-1)nk
// TNode 树根,树根是没有向上的
type TNode interface {
SetVal(val interface{})
GetVal() interface{}
Tree
}
// Tree 子树
type Tree interface {
AddChild(TNode) error
WhichChild(TNode) int
DeleteChild(i int)
Child(i int) (TNode, bool)
Children() []TNode
LevelChildren(lev int) []TNode
LeafGenerations() []TNode
Degree() int
Parent() TNode // 父树根
SetParent(p TNode)
Root() TNode
WhichChildOfParent() int
Depth() int
GenerationsNum() int
LeftSibling() (TNode, bool)
RightSibling() (TNode, bool)
LeftChild() (TNode, bool)
RightChild() (TNode, bool)
DLRVisit(fn func(TNode) error) error
LDRVisit(fn func(n TNode) error) error
LRDVisit(fn func(n TNode) error) error
Map() map[string]interface{}
BinaryTree() TNode
Copy() TNode
}
// Node 节点
type Node struct {
val interface{}
children []TNode
parent TNode
}
// NewNode 新建节点
func NewNode(val interface{}) *Node {
return &Node{
val: val,
}
}
// GetVal 获取节点值
func (n *Node) GetVal() interface{} {
return n.val
}
// SetVal 设置节点值
func (n *Node) SetVal(val interface{}) {
n.val = val
}
// AddChild 添加子节点
func (n *Node) AddChild(child TNode) error {
if child != nil && child.Parent() != nil {
return errors.New("child has parent")
}
child.SetParent(n)
n.children = append(n.children, child)
return nil
}
// AddChildren 添加子节点
func (n *Node) AddChildren(children ...TNode) error {
for _, child := range children {
if err := n.AddChild(child); err != nil {
return err
}
}
return nil
}
// DeleteChild 删除子节点
func (n *Node) DeleteChild(i int) {
if i < len(n.children) && i >= 0 {
n.children[i].SetParent(nil)
n.children = append(n.children[:i], n.children[i+1:]...)
}
}
// WhichChild 第几个子节点
func (n *Node) WhichChild(child TNode) int {
for i, c := range n.children {
if c == child {
return i
}
}
return -1
}
// Child 获取子节点
func (n *Node) Child(i int) (TNode, bool) {
if i >= len(n.children) || 0 < i {
return nil, false
}
return n.children[i], true
}
// Children 获取所有子节点
func (n *Node) Children() []TNode {
return n.children
}
// Degree 度
func (n *Node) Degree() int {
return len(n.children)
}
// Parent 父节点
func (n *Node) Parent() TNode {
return n.parent
}
// SetParent 设置父节点
func (n *Node) SetParent(p TNode) {
n.parent = p
}
// Root 所在树的根节点
func (n *Node) Root() TNode {
if n.parent == nil {
return n
}
root := n.Parent()
for root.Parent() != nil {
root = root.Parent()
}
return root
}
// WhichChildOfParent 节点属于父节点的哪个子节点
func (n *Node) WhichChildOfParent() int {
if n.parent != nil {
return n.parent.WhichChild(n)
}
return -1
}
// LevelChildren 当前节点地往下第i层的所有子节点
func (n *Node) LevelChildren(lev int) []TNode {
ret := make([]TNode, 0)
if lev == 1 {
ret = append(ret, n.children...)
} else if lev > 1 {
for _, c := range n.children {
ret = append(ret, c.LevelChildren(lev-1)...)
}
}
return ret
}
// Depth 当前节点为根节点的树的深度
func (n *Node) Depth() int {
lev := 1
for len(n.LevelChildren(lev)) != 0 {
lev++
}
return lev
}
// Level 当前节点在所在树的第几层
func (n *Node) Level() int {
lev := 0
var p = n.Parent()
for p != nil {
lev++
p = p.Parent()
}
return lev
}
// GenerationsNum 当前节点所有子孙后代的个数
func (n *Node) GenerationsNum() int {
num := len(n.children)
for _, c := range n.children {
num += c.GenerationsNum()
}
return num
}
// LeftSibling 当前节点的左兄弟节点
func (n *Node) LeftSibling() (TNode, bool) {
index := n.WhichChildOfParent()
if index <= 0 {
return nil, false
}
return n.parent.Child(index - 1)
}
// RightSibling 当前节点的右兄弟节点
func (n *Node) RightSibling() (TNode, bool) {
index := n.WhichChildOfParent()
if index == -1 {
return nil, false
}
if n.parent.Degree() < index+2 {
return nil, false
}
return n.parent.Child(index + 1)
}
// LeftChild 左孩子
func (n *Node) LeftChild() (TNode, bool) {
if len(n.children) > 0 {
return n.children[0], true
}
return nil, false
}
// RightChild 右孩子
func (n *Node) RightChild() (TNode, bool) {
l := len(n.children)
if l > 0 {
return n.children[l-1], true
}
return nil, false
}
// LeafGenerations 当前节点所有的叶子节点后代
func (n *Node) LeafGenerations() []TNode {
leafs := make([]TNode, 0)
for _, c := range n.children {
if c.Degree() == 0 {
leafs = append(leafs, c)
} else {
leafs = append(leafs, c.LeafGenerations()...)
}
}
return leafs
}
// Map 转换成map形式
func (n *Node) Map() map[string]interface{} {
m := make(map[string]interface{}, 2)
m["val"] = n.val
chis := make([]map[string]interface{}, len(n.children))
for i, c := range n.children {
chis[i] = c.Map()
}
m["children"] = chis
return m
}
// DLRVisit 先(根)序遍历
func (n *Node) DLRVisit(fn func(TNode) error) error {
err := fn(n)
if err != nil {
return err
}
lnode, has := n.LeftChild()
if has {
err = lnode.DLRVisit(fn)
if err != nil {
return err
}
}
rnode, has := n.RightChild()
if has {
err = rnode.DLRVisit(fn)
if err != nil {
return err
}
}
return nil
}
// LDRVisit 中(根)序遍历
func (n *Node) LDRVisit(fn func(TNode) error) error {
var err error
lnode, has := n.LeftChild()
if has {
err = lnode.LDRVisit(fn)
if err != nil {
return err
}
}
err = fn(n)
if err != nil {
return err
}
rnode, has := n.RightChild()
if has {
err = rnode.LDRVisit(fn)
if err != nil {
return err
}
}
return err
}
// LRDVisit 后(根)序遍历
func (n *Node) LRDVisit(fn func(TNode) error) error {
var err error
lnode, has := n.LeftChild()
if has {
err = lnode.LRDVisit(fn)
if err != nil {
return err
}
}
rnode, has := n.RightChild()
if has {
err = rnode.LRDVisit(fn)
if err != nil {
return err
}
}
err = fn(n)
if err != nil {
return err
}
return err
}
// BinaryTree 树生成二叉树
func (n *Node) BinaryTree() TNode {
nn := NewNode(n.val)
dg := n.Degree()
if dg > 0 {
cls := make([]TNode, dg)
for i := 0; i < dg; i++ {
cp := n.children[i]
log.Println(cp.Map())
cls[i] = cp.BinaryTree()
}
nn.AddChild(bforest2BTree(cls))
}
return nn
}
// Copy 复制
func (n *Node) Copy() TNode {
var nn = NewNode(n.val)
for i := range n.children {
nn.AddChild(n.children[i].Copy())
}
return nn
}
func bforest2BTree(nodes []TNode) TNode {
if len(nodes) == 1 {
return nodes[0]
} else if len(nodes) == 2 {
fnode := nodes[0]
enode := nodes[1]
fnode.AddChild(enode)
return fnode
}
fnod := bforest2BTree(nodes[1:])
return bforest2BTree([]TNode{nodes[0], fnod})
}
// LevelVist 层次遍历
func (n *Node) LevelVist(fn func(TNode) error) error {
// 1.首先将根节点放入队列中。
// 2.当队列为非空时,循环执行步骤3到步骤5,否则执行6;
// 3.出队列取得一个结点,访问该结点;
// 4.若该结点的左子树为非空,则将该结点的左子树入队列;
// 5.若该结点的右子树为非空,则将该结点的右子树入队列;
// 6.结束。
return nil
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gles
import (
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"github.com/google/gapid/core/image"
"github.com/google/gapid/core/image/astc"
"github.com/google/gapid/core/stream"
"github.com/google/gapid/core/stream/fmts"
"github.com/google/gapid/gapis/api"
)
func isUnsizedFormat(fmt GLenum) bool {
switch fmt {
case GLenum_GL_RGB, GLenum_GL_RGBA, GLenum_GL_LUMINANCE_ALPHA, GLenum_GL_LUMINANCE, GLenum_GL_ALPHA, GLenum_GL_DEPTH_COMPONENT, GLenum_GL_DEPTH_STENCIL:
return true
default:
return false
}
}
// getCorrectInternalFormat returns the correct format to use for texture data upload calls (e.g. glTexImage2d).
// See image_format.api:GetSizedFormatFromTuple.
func (img Imageʳ) getCorrectInternalFormat() GLenum {
internalFormat := img.InternalFormat()
if isUnsizedFormat(internalFormat) {
return internalFormat
}
sizedFormat := img.SizedFormat()
switch sizedFormat {
case GLenum_GL_LUMINANCE8_ALPHA8_EXT, GLenum_GL_LUMINANCE_ALPHA16F_EXT, GLenum_GL_LUMINANCE_ALPHA32F_EXT:
return GLenum_GL_LUMINANCE_ALPHA
case GLenum_GL_LUMINANCE8_EXT, GLenum_GL_LUMINANCE16F_EXT, GLenum_GL_LUMINANCE32F_EXT:
return GLenum_GL_LUMINANCE
case GLenum_GL_ALPHA8_EXT, GLenum_GL_ALPHA16F_EXT, GLenum_GL_ALPHA32F_EXT:
return GLenum_GL_ALPHA
default:
return sizedFormat
}
}
func (i Imageʳ) getUnsizedFormatAndType() (unsizedFormat, ty GLenum) {
if i.DataFormat() == 0 && i.DataType() == 0 {
return getUnsizedFormatAndType(i.SizedFormat())
}
dataType := i.DataType()
if i.InternalFormat() == GLenum_GL_DEPTH_COMPONENT && dataType == GLenum_GL_FLOAT {
dataType = GLenum_GL_UNSIGNED_INT
}
return i.DataFormat(), dataType
}
func cubemapFaceToLayer(target GLenum) GLint {
layer, _ := subCubemapFaceToLayer(nil, nil, api.CmdNoID, nil, &api.GlobalState{}, nil, 0, nil, nil, target)
return layer
}
// getSizedFormatFromTuple returns sized format from unsized format and component type.
func getSizedFormatFromTuple(unsizedFormat, ty GLenum) (sizedFormat GLenum) {
sf, _ := subGetSizedFormatFromTuple(nil, nil, api.CmdNoID, nil, &api.GlobalState{}, nil, 0, nil, nil, unsizedFormat, ty)
if sf == GLenum_GL_NONE {
panic(fmt.Errorf("Unknown unsized format: %v, %v", unsizedFormat, ty))
}
return sf
}
// getUnsizedFormatAndType returns unsized format and component type from sized format.
func getUnsizedFormatAndType(sizedFormat GLenum) (unsizedFormat, ty GLenum) {
info := GetSizedFormatInfoOrPanic(sizedFormat)
return info.UnsizedFormat(), info.DataType()
}
// GetSizedFormatInfoOrPanic is wrapper for the 'GetSizedFormatInfo' api subroutine.
func GetSizedFormatInfoOrPanic(sizedFormat GLenum) SizedFormatInfo {
info, _ := subGetSizedFormatInfo(nil, nil, api.CmdNoID, nil, &api.GlobalState{}, nil, 0, nil, nil, sizedFormat)
if info.SizedFormat() == GLenum_GL_NONE {
panic(fmt.Errorf("Unknown sized format: %v", sizedFormat))
}
return info
}
// getImageFormat returns the image.Format for the given format-type tuple.
// The tuple must be in one of the following two forms:
// (unsizedFormat, ty) - Uncompressed data.
// (sizedFormat, NONE) - Compressed data.
// (NONE, NONE) - Uninitialized content.
// Sized uncompressed format (e.g. GL_RGB565) is not a valid input.
func getImageFormat(format, ty GLenum) (*image.Format, error) {
if format != GLenum_GL_NONE {
if ty != GLenum_GL_NONE {
imgfmt, _ := getUncompressedStreamFormat(format, ty)
if imgfmt != nil {
return image.NewUncompressed(fmt.Sprintf("%v, %v", format, ty), imgfmt), nil
}
} else {
imgfmt, _ := getCompressedImageFormat(format)
if imgfmt != nil {
return imgfmt, nil
}
}
} else {
return image.NewUncompressed("<uninitialized>", &stream.Format{}), nil
}
return nil, fmt.Errorf("Unsupported input format-type pair: (%s, %s)", format, ty)
}
// filterUncompressedImageFormat returns a copy of f with only the components
// that have channels that pass the predicate p.
func filterUncompressedImageFormat(f *image.Format, p func(stream.Channel) bool) *image.Format {
u := f.GetUncompressed()
if u == nil {
panic(fmt.Errorf("Format %v is not uncompressed", f))
}
out := proto.Clone(f).(*image.Format)
filtered := out.GetUncompressed().Format
filtered.Components = filtered.Components[:0]
names := []string{}
for _, c := range u.Format.Components {
if p(c.Channel) {
filtered.Components = append(filtered.Components, c)
names = append(names, c.Channel.String())
}
}
out.Name = fmt.Sprintf("%v from %v", strings.Join(names, ", "), f.Name)
return out
}
var glChannelToStreamChannel = map[GLenum]stream.Channel{
GLenum_GL_RED: stream.Channel_Red,
GLenum_GL_GREEN: stream.Channel_Green,
GLenum_GL_BLUE: stream.Channel_Blue,
GLenum_GL_ALPHA: stream.Channel_Alpha,
GLenum_GL_LUMINANCE: stream.Channel_Luminance,
GLenum_GL_DEPTH_COMPONENT: stream.Channel_Depth,
GLenum_GL_STENCIL_INDEX: stream.Channel_Stencil,
}
// getUncompressedStreamFormat returns the decoding format which can be used to read single pixel.
func getUncompressedStreamFormat(unsizedFormat, ty GLenum) (format *stream.Format, err error) {
info, _ := subGetUnsizedFormatInfo(nil, nil, api.CmdNoID, nil, &api.GlobalState{}, nil, 0, nil, nil, unsizedFormat)
if info.Count() == 0 {
return nil, fmt.Errorf("Unknown unsized format: %v", unsizedFormat)
}
glChannels := []GLenum{info.Channel0(), info.Channel1(), info.Channel2(), info.Channel3()}
channels := make(stream.Channels, info.Count())
for i := range channels {
channel, ok := glChannelToStreamChannel[glChannels[i]]
if !ok {
return nil, fmt.Errorf("Unknown GL channel: %v", glChannels[i])
}
channels[i] = channel
}
// Helper method to build the format.
format = &stream.Format{}
addComponent := func(channelIndex int, datatype *stream.DataType) {
channel := stream.Channel_Undefined // Padding field
if 0 <= channelIndex && channelIndex < len(channels) {
channel = channels[channelIndex]
}
sampling := stream.Linear
var sampleAsFloat bool
if channel == stream.Channel_Depth {
sampleAsFloat = true
} else if channel == stream.Channel_Stencil {
sampleAsFloat = false
} else /* colour */ {
sampleAsFloat = !info.Integer()
}
if datatype.IsInteger() && sampleAsFloat {
sampling = stream.LinearNormalized // Convert int to float
}
format.Components = append(format.Components, &stream.Component{
DataType: datatype,
Sampling: sampling,
Channel: channel,
})
}
// Read the components in increasing memory order (assuming little-endian architecture).
// Note that the GL names are based on big-endian, so the order is generally backwards.
switch ty {
case GLenum_GL_UNSIGNED_BYTE:
for i := range channels {
addComponent(i, &stream.U8)
}
case GLenum_GL_BYTE:
for i := range channels {
addComponent(i, &stream.S8)
}
case GLenum_GL_UNSIGNED_SHORT:
for i := range channels {
addComponent(i, &stream.U16)
}
case GLenum_GL_SHORT:
for i := range channels {
addComponent(i, &stream.S16)
}
case GLenum_GL_UNSIGNED_INT:
for i := range channels {
addComponent(i, &stream.U32)
}
case GLenum_GL_INT:
for i := range channels {
addComponent(i, &stream.S32)
}
case GLenum_GL_HALF_FLOAT, GLenum_GL_HALF_FLOAT_OES:
for i := range channels {
addComponent(i, &stream.F16)
}
case GLenum_GL_FLOAT:
for i := range channels {
addComponent(i, &stream.F32)
}
case GLenum_GL_UNSIGNED_SHORT_5_6_5:
addComponent(2, &stream.U5)
addComponent(1, &stream.U6)
addComponent(0, &stream.U5)
case GLenum_GL_UNSIGNED_SHORT_4_4_4_4:
addComponent(3, &stream.U4)
addComponent(2, &stream.U4)
addComponent(1, &stream.U4)
addComponent(0, &stream.U4)
case GLenum_GL_UNSIGNED_SHORT_5_5_5_1:
addComponent(3, &stream.U1)
addComponent(2, &stream.U5)
addComponent(1, &stream.U5)
addComponent(0, &stream.U5)
case GLenum_GL_UNSIGNED_INT_2_10_10_10_REV:
addComponent(0, &stream.U10)
addComponent(1, &stream.U10)
addComponent(2, &stream.U10)
addComponent(3, &stream.U2)
case GLenum_GL_UNSIGNED_INT_24_8:
addComponent(1, &stream.U8)
addComponent(0, &stream.U24)
case GLenum_GL_UNSIGNED_INT_10F_11F_11F_REV:
addComponent(0, &stream.F11)
addComponent(1, &stream.F11)
addComponent(2, &stream.F10)
case GLenum_GL_UNSIGNED_INT_5_9_9_9_REV:
return fmts.RGBE_U9U9U9U5, nil
case GLenum_GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
addComponent(0, &stream.F32)
addComponent(1, &stream.U8)
addComponent(-1, &stream.U24)
default:
return nil, fmt.Errorf("Unsupported data type: %v", ty)
}
return format, nil
}
// getCompressedImageFormat returns a pointer to an image.Format for the given
// compressed format.
func getCompressedImageFormat(format GLenum) (*image.Format, error) {
switch format {
// ETC1
case GLenum_GL_ETC1_RGB8_OES:
return image.NewETC1_RGB_U8_NORM("GL_ETC1_RGB8_OES"), nil
// ASTC
case GLenum_GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
return astc.NewRGBA_4x4("GL_COMPRESSED_RGBA_ASTC_4x4_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
return astc.NewRGBA_5x4("GL_COMPRESSED_RGBA_ASTC_5x4_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
return astc.NewRGBA_5x5("GL_COMPRESSED_RGBA_ASTC_5x5_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
return astc.NewRGBA_6x5("GL_COMPRESSED_RGBA_ASTC_6x5_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
return astc.NewRGBA_6x6("GL_COMPRESSED_RGBA_ASTC_6x6_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
return astc.NewRGBA_8x5("GL_COMPRESSED_RGBA_ASTC_8x5_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
return astc.NewRGBA_8x6("GL_COMPRESSED_RGBA_ASTC_8x6_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
return astc.NewRGBA_8x8("GL_COMPRESSED_RGBA_ASTC_8x8_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
return astc.NewRGBA_10x5("GL_COMPRESSED_RGBA_ASTC_10x5_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
return astc.NewRGBA_10x6("GL_COMPRESSED_RGBA_ASTC_10x6_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
return astc.NewRGBA_10x8("GL_COMPRESSED_RGBA_ASTC_10x8_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
return astc.NewRGBA_10x10("GL_COMPRESSED_RGBA_ASTC_10x10_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
return astc.NewRGBA_12x10("GL_COMPRESSED_RGBA_ASTC_12x10_KHR"), nil
case GLenum_GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
return astc.NewRGBA_12x12("GL_COMPRESSED_RGBA_ASTC_12x12_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
return astc.NewSRGB8_ALPHA8_4x4("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
return astc.NewSRGB8_ALPHA8_5x4("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
return astc.NewSRGB8_ALPHA8_5x5("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
return astc.NewSRGB8_ALPHA8_6x5("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
return astc.NewSRGB8_ALPHA8_6x6("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
return astc.NewSRGB8_ALPHA8_8x5("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
return astc.NewSRGB8_ALPHA8_8x6("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
return astc.NewSRGB8_ALPHA8_8x8("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
return astc.NewSRGB8_ALPHA8_10x5("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
return astc.NewSRGB8_ALPHA8_10x6("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
return astc.NewSRGB8_ALPHA8_10x8("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
return astc.NewSRGB8_ALPHA8_10x10("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
return astc.NewSRGB8_ALPHA8_12x10("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR:
return astc.NewSRGB8_ALPHA8_12x12("GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"), nil
// ATC
case GLenum_GL_ATC_RGB_AMD:
return image.NewATC_RGB_AMD("GL_ATC_RGB_AMD"), nil
case GLenum_GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
return image.NewATC_RGBA_EXPLICIT_ALPHA_AMD("GL_ATC_RGBA_EXPLICIT_ALPHA_AMD"), nil
case GLenum_GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
return image.NewATC_RGBA_INTERPOLATED_ALPHA_AMD("GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD"), nil
// ETC
case GLenum_GL_COMPRESSED_R11_EAC:
return image.NewETC2_R_U11_NORM("GL_COMPRESSED_R11_EAC"), nil
case GLenum_GL_COMPRESSED_SIGNED_R11_EAC:
return image.NewETC2_R_S11_NORM("GL_COMPRESSED_SIGNED_R11_EAC"), nil
case GLenum_GL_COMPRESSED_RG11_EAC:
return image.NewETC2_RG_U11_NORM("GL_COMPRESSED_RG11_EAC"), nil
case GLenum_GL_COMPRESSED_SIGNED_RG11_EAC:
return image.NewETC2_RG_S11_NORM("GL_COMPRESSED_SIGNED_RG11_EAC"), nil
case GLenum_GL_COMPRESSED_RGB8_ETC2:
return image.NewETC2_RGB_U8_NORM("GL_COMPRESSED_RGB8_ETC2"), nil
case GLenum_GL_COMPRESSED_SRGB8_ETC2:
return image.NewETC2_SRGB_U8_NORM("GL_COMPRESSED_SRGB8_ETC2"), nil
case GLenum_GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
return image.NewETC2_RGBA_U8U8U8U1_NORM("GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"), nil
case GLenum_GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
return image.NewETC2_SRGBA_U8U8U8U1_NORM("GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"), nil
case GLenum_GL_COMPRESSED_RGBA8_ETC2_EAC:
return image.NewETC2_RGBA_U8_NORM("GL_COMPRESSED_RGBA8_ETC2_EAC"), nil
case GLenum_GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
return image.NewETC2_SRGBA_U8_NORM("GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"), nil
// S3TC
case GLenum_GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
return image.NewS3_DXT1_RGB("GL_COMPRESSED_RGB_S3TC_DXT1_EXT"), nil
case GLenum_GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
return image.NewS3_DXT1_RGBA("GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"), nil
case GLenum_GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
return image.NewS3_DXT3_RGBA("GL_COMPRESSED_RGBA_S3TC_DXT3_EXT"), nil
case GLenum_GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
return image.NewS3_DXT5_RGBA("GL_COMPRESSED_RGBA_S3TC_DXT5_EXT"), nil
}
return nil, fmt.Errorf("Unsupported compressed format: %s", format)
}
|
package challenge_3
import (
"cryptopals/set_1/challenge_1"
"unicode/utf8"
)
func BuildCorpus(text string) map[rune]float64 {
corpus := make(map[rune]float64)
for _, c := range text {
corpus[c] += 1
}
total := utf8.RuneCountInString(text)
for c := range corpus {
corpus[c] /= float64(total)
}
return corpus
}
func ScoreText(text string, corpus map[rune]float64) float64 {
var total float64
for _, c := range text {
total += corpus[c]
}
total /= float64(utf8.RuneCountInString(text))
return total
}
func SingleByteXor(str []byte, c byte) []byte {
for i, b := range str {
str[i] = c ^ b
}
return str
}
func FindSingleXorKey(str []byte, corpus map[rune]float64) (float64, string) {
var lastScore float64
var res string
for i := 0; i < 256; i++ {
c := rune(i)
out := SingleByteXor(challenge_1.DecodeHex(str), byte(c))
currScore := ScoreText(string(out), corpus)
if lastScore < currScore {
lastScore = currScore
res = string(out)
}
}
return lastScore, res
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernel
import (
"testing"
"gvisor.dev/gvisor/pkg/sentry/kernel/sched"
)
func TestTaskCPU(t *testing.T) {
for _, test := range []struct {
mask sched.CPUSet
tid ThreadID
cpu int32
}{
{
mask: []byte{0xff},
tid: 1,
cpu: 0,
},
{
mask: []byte{0xff},
tid: 10,
cpu: 1,
},
{
// more than 8 cpus.
mask: []byte{0xff, 0xff},
tid: 10,
cpu: 9,
},
{
// missing the first cpu.
mask: []byte{0xfe},
tid: 1,
cpu: 1,
},
{
mask: []byte{0xfe},
tid: 10,
cpu: 3,
},
{
// missing the fifth cpu.
mask: []byte{0xef},
tid: 10,
cpu: 2,
},
} {
assigned := assignCPU(test.mask, test.tid)
if test.cpu != assigned {
t.Errorf("assignCPU(%v, %v) got %v, want %v", test.mask, test.tid, assigned, test.cpu)
}
}
}
|
// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
// I am using the Golang driveR. Documentation can be found
// at https://godoc.org/github.com/mattermost/platform/model#Client
package main
import (
"fmt"
"os"
"os/signal"
"regexp"
"strconv"
"strings"
"github.com/loic-fejoz/platform/model"
)
const (
DFLT_BOT_HOST = "localhost:8065"
DFLT_BOT_LOGIN = "yakafokon"
DFLT_BOT_FIRSTNAME = "Yakafokon"
DFLT_BOT_LASTNAME = "Bot"
DFLT_BOT_TEAM = "myteam"
DFLT_CHANNEL_NAME = "debugging-for-sample-bot"
)
type AnswerHandler func(*model.WebSocketEvent, *model.Post) string
type Entry struct {
Expr string
RegExp *regexp.Regexp
Answer string
Hdler AnswerHandler
}
type MattermostBot struct {
host string
login string
password string
firstName string
lastName string
fullName string
teamName string
channelName string
entries []*Entry
client *model.Client
webSocketClient *model.WebSocketClient
mUser *model.User
mTeam *model.Team
initialLoad *model.InitialLoad
debuggingChannel *model.Channel
}
func GetFromEnv(key string, dfltValue string) string {
result := os.Getenv(key)
if result == "" {
result = dfltValue
}
return result;
}
func MattermostBotFromOsEnv() *MattermostBot {
var bot MattermostBot
bot.host = GetFromEnv("MATTERMOST_BOT_HOST", DFLT_BOT_HOST)
bot.login = GetFromEnv("MATTERMOST_BOT_LOGIN", DFLT_BOT_LOGIN)
bot.password = os.Getenv("MATTERMOST_BOT_PASSWORD")
if bot.password == "" {
println("It is mandatory to set MATTERMOST_BOT_PASSWORD environment variable.")
return nil
}
bot.firstName = GetFromEnv("MATTERMOST_BOT_FIRSTNAME", DFLT_BOT_FIRSTNAME)
bot.lastName = GetFromEnv("MATTERMOST_BOT_LASTNAME", DFLT_BOT_LASTNAME)
bot.fullName = GetFromEnv("MATTERMOST_BOT_NAME", bot.firstName + bot.lastName)
bot.teamName = GetFromEnv("MATTERMOST_BOT_TEAM", DFLT_BOT_TEAM)
bot.channelName = GetFromEnv("MATTERMOST_BOT_CHANNEL", DFLT_CHANNEL_NAME)
return &bot
}
func (bot *MattermostBot) initEntries() {
bot.entries = []*Entry{
&Entry{`(?:^|\W)list_entries(?:$|\W)`, nil, ".", bot.listEntriesHdler},
&Entry{`(?:^|\W)entries_delete(?:$|\W)`, nil, ".", bot.delEntriesHdler},
&Entry{`(?:^|\W)entries_add(?:$|\W)`, nil, ".", bot.addEntriesHdler},
&Entry{`(?:^|\W)(` + bot.login + `)|(` + bot.firstName + `)|(` + bot.fullName + `).+(alive)|(vivant)|(mort)(?:$|\W)`, nil, "Yes I'm alive", nil},
&Entry{`(?:^|\W)((H|h)ello)|((S|s)alut)|((B|b)onjour)|((B|b)onsoir)(?:$|\W)`, nil, "Bonjour", nil},
&Entry{`(?:^|\W)perdu(?:$|\W)`, nil, "Êtes-vous perdu ? http://perdu.com/", nil},
&Entry{`(?:^|\W)Il faudrait que(?:$|\W)`, nil, "Pourquoi pas ? Mais surtout pourquoi ne pas le [faire toi-même](http://yakafokon.detected.fr/) ?", nil},
&Entry{`(?:^|\W)Tu devrais(?:$|\W)`, nil, "Ben tiens, ça tombe bien, j'avais que ça à faire.\nhttp://yakafokon.detected.fr/ ", nil},
&Entry{`(?:^|\W)Il n'y a qu'à(?:$|\W)`, nil, "T'as raison, Mmmhh, tu t'en occupe ? \nhttp://yakafokon.detected.fr/", nil},
&Entry{`(?:^|\W)Il faut qu'on(?:$|\W)`, nil, "C'est celui-qui dit [qui fait](http://yakafokon.detected.fr/) ?", nil},
}
for _, e := range bot.entries {
r, err := regexp.Compile(e.Expr)
if err == nil {
e.RegExp = r
} else {
println("We failed to compile ", e.Expr)
}
}
}
func (bot *MattermostBot) listEntriesHdler(event *model.WebSocketEvent, post *model.Post) string {
ans := "I know " + fmt.Sprintf("%v", len(bot.entries)) + " rules\n\n"
ans = ans + "| id | RegExp | Answer |\n"
ans = ans + "| :----: |:----------:|:----------:|\n"
for k, e := range bot.entries {
ans = ans + "| " + fmt.Sprintf("%v", k) + " | " + strings.Replace(e.Expr, "|", "|", -1) + " | " + e.Answer + " |\n"
}
return ans
}
func (bot *MattermostBot) delEntriesHdler(event *model.WebSocketEvent, post *model.Post) string {
if bot.isTeamAdmin(event.UserId) {
entryIdStr := strings.Split(post.Message, " ")[1]
entryId, err := strconv.Atoi(entryIdStr)
if err != nil {
return "I do not understand which entry you wanted to delete: " + entryIdStr
}
e := bot.entries[entryId]
if e.Hdler == nil {
bot.entries = append(bot.entries[:entryId], bot.entries[entryId+1:]...)
return "Done. I have deleted " + entryIdStr
} else {
return "Sorry, I cannot delete an internal command!"
}
} else {
return "No never, you are not a team administrator."
}
}
func (bot *MattermostBot) addEntriesHdler(event *model.WebSocketEvent, post *model.Post) string {
if bot.isTeamAdmin(event.UserId) {
pattern := `entries_add\s+([0-9]+)\s+When\s+(.+)\s+answer(.+)`
re := regexp.MustCompile(pattern)
parts := re.FindStringSubmatch(post.Message)
if parts == nil {
return "No comprendo. Must be something along: " + pattern
}
entryId, err := strconv.Atoi(parts[1])
if err != nil {
return "I do not understand which index you wanted to insert: " + parts[1]
}
newEntry := &Entry{parts[2], nil, parts[3], nil}
r, err := regexp.Compile(newEntry.Expr)
if err != nil {
return "This is not a valid regular expression: " + newEntry.Expr
}
newEntry.RegExp = r
bot.entries = append(bot.entries[:entryId], append([]*Entry{newEntry}, bot.entries[entryId:]...)...)
return bot.listEntriesHdler(event, post)
} else {
return "No never, you are not a team administrator."
}
}
func main() {
var bot *MattermostBot = MattermostBotFromOsEnv()
if bot == nil {
return
}
bot.start()
// You can block forever with
select {}
}
func (bot *MattermostBot) start() {
println(bot.fullName)
bot.initEntries()
bot.SetupGracefulShutdown()
bot.client = model.NewClient("http://" + bot.host)
// Lets test to see if the mattermost server is up and running
bot.MakeSureServerIsRunning()
// lets attempt to login to the Mattermost server as the bot user
// This will set the token required for all future calls
// You can get this token with client.AuthToken
bot.LoginAsTheBotUser()
// If the bot user doesn't have the correct information lets update his profile
bot.UpdateTheBotUserIfNeeded()
// Lets load all the stuff we might need
bot.InitialLoad()
// Lets find our bot team
bot.FindBotTeam()
// This is an important step. Lets make sure we use the botTeam
// for all future web service requests that require a team.
bot.client.SetTeamId(bot.mTeam.Id)
// Lets create a bot channel for logging debug messages into
bot.CreateBotDebuggingChannelIfNeeded()
bot.SendMsgToDebuggingChannel("_" + bot.fullName + " has **started** running_", "")
// Lets start listening to some channels via the websocket!
webSocketClient, err := model.NewWebSocketClient("ws://" + bot.host, bot.client.AuthToken)
if err != nil {
println("We failed to connect to the web socket")
PrintError(err)
}
webSocketClient.Listen()
go func() {
for {
select {
case resp := <-webSocketClient.EventChannel:
bot.HandleWebSocketResponse(resp)
}
}
}()
}
func (bot *MattermostBot) MakeSureServerIsRunning() {
if props, err := bot.client.GetPing(); err != nil {
println("There was a problem pinging the Mattermost server. Are you sure it's running?")
PrintError(err)
os.Exit(1)
} else {
println("Server detected and is running version " + props["version"])
}
}
func (bot *MattermostBot) LoginAsTheBotUser() {
if loginResult, err := bot.client.Login(bot.login, bot.password); err != nil {
println("There was a problem logging into the Mattermost server. Are you sure ran the setup steps from the README.md?")
PrintError(err)
os.Exit(1)
} else {
bot.mUser = loginResult.Data.(*model.User)
println("I am user with id " + bot.mUser.Id)
}
}
func (bot *MattermostBot) UpdateTheBotUserIfNeeded() {
botUser := bot.mUser
if botUser.FirstName != bot.firstName || botUser.LastName != bot.lastName || botUser.Username != bot.fullName {
botUser.FirstName = bot.firstName
botUser.LastName = bot.lastName
botUser.Username = bot.fullName
if updateUserResult, err := bot.client.UpdateUser(botUser); err != nil {
println("We failed to update the Yakafokon Bot user")
PrintError(err)
os.Exit(1)
} else {
bot.mUser = updateUserResult.Data.(*model.User)
println("Looks like this might be the first run so we've updated the bots account settings")
}
}
}
func (bot *MattermostBot) InitialLoad() {
if initialLoadResults, err := bot.client.GetInitialLoad(); err != nil {
println("We failed to get the initial load")
PrintError(err)
os.Exit(1)
} else {
bot.initialLoad = initialLoadResults.Data.(*model.InitialLoad)
}
}
func (bot *MattermostBot) FindBotTeam() {
for _, team := range bot.initialLoad.Teams {
if team.Name == bot.teamName {
bot.mTeam = team
break
}
}
if bot.mTeam == nil {
println("We do not appear to be a member of the team '" + bot.teamName + "'")
os.Exit(1)
}
}
func (bot *MattermostBot) CreateBotDebuggingChannelIfNeeded() {
if channelsResult, err := bot.client.GetChannels(""); err != nil {
println("We failed to get the channels")
PrintError(err)
} else {
channelList := channelsResult.Data.(*model.ChannelList)
for _, channel := range channelList.Channels {
// The logging channel has alredy been created, lets just use it
println("chan name: " + channel.Name)
if channel.Name == bot.channelName {
bot.debuggingChannel = channel
return
}
}
}
// Looks like we need to create the logging channel
// TODO this will fails if the chan already exists but the bot is not member of it already.
channel := &model.Channel{}
channel.Name = bot.channelName
channel.DisplayName = "Debugging For Sample Bot"
channel.Purpose = "This is used as a test channel for logging bot debug messages"
channel.Type = model.CHANNEL_OPEN
if channelResult, err := bot.client.CreateChannel(channel); err != nil {
println("We failed to create the channel " + channel.Name)
PrintError(err)
} else {
bot.debuggingChannel = channelResult.Data.(*model.Channel)
println("Looks like this might be the first run so we've created the channel " + channel.Name)
}
}
func (bot *MattermostBot) SendMsgToDebuggingChannel(msg string, replyToId string) {
post := &model.Post{}
post.ChannelId = bot.debuggingChannel.Id
post.Message = msg
post.RootId = replyToId
if _, err := bot.client.CreatePost(post); err != nil {
println("We failed to send a message to the logging channel")
PrintError(err)
}
}
func (bot *MattermostBot) HandleWebSocketResponse(event *model.WebSocketEvent) {
bot.HandleMsgFromDebuggingChannel(event)
}
func (bot *MattermostBot) isTeamAdmin(userId string) bool {
teamMembersAnswer, _ := bot.client.GetTeamMembers(bot.mTeam.Id)
for _, member := range teamMembersAnswer.Data.([]*model.TeamMember) {
if member.UserId == userId {
return member.IsTeamAdmin()
}
}
return false
}
func (bot *MattermostBot) HandleMsgFromDebuggingChannel(event *model.WebSocketEvent) {
// If this isn't the debugging channel then lets ingore it
if event.ChannelId != bot.debuggingChannel.Id {
return
}
// Lets only reponded to messaged posted events
if event.Event != model.WEBSOCKET_EVENT_POSTED {
return
}
// Lets ignore if it's my own events just in case
if event.UserId == bot.mUser.Id {
return
}
println("responding to debugging channel msg")
post := model.PostFromJson(strings.NewReader(event.Data["post"].(string)))
if post != nil {
for _, e := range bot.entries {
if e.RegExp.MatchString(post.Message) {
var answer string
if e.Hdler == nil {
answer = e.Answer
} else {
answer = e.Hdler(event, post)
}
bot.SendMsgToDebuggingChannel(answer, post.Id)
return
}
}
}
// bot.SendMsgToDebuggingChannel("I did not understand you!", post.Id)
}
func PrintError(err *model.AppError) {
println("\tError Details:")
println("\t\t" + err.Message)
println("\t\t" + err.Id)
println("\t\t" + err.DetailedError)
}
func (bot *MattermostBot) SetupGracefulShutdown() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for _ = range c {
if bot.webSocketClient != nil {
bot.webSocketClient.Close()
}
bot.SendMsgToDebuggingChannel("_" + bot.fullName + " has **stopped** running_", "")
os.Exit(0)
}
}()
}
|
package gflObject
import "github.com/garclak/gflgo/gflConst"
type Runway struct {
id : int
Designation : string
Surface : gflConst.SurfaceC
Length : int
LocFreq : float32
LocHeading : int
LocAltFreq : float32
LocAltHeading : int
Remarks : string
}
/*
func (l *LogonC) Const(ref string) int {
if ret, ok := l.elements[ref]; ok {
return ret
} else {
return -1
}
}
*/
func NewRunway(inId int) *Runway {
lt := new(Runway)
id := inId
return lt
}
|
package storage_test
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
. "devicedb/storage"
. "devicedb/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"fmt"
)
func newStorageDriver() StorageDriver {
return NewLevelDBStorageDriver("/tmp/testdevicedb-"+RandomString(), nil)
}
var _ = Describe("StorageEngine", func() {
Describe("#Open", func() {
It("Open should not return any error", func() {
storageDriver := newStorageDriver()
defer storageDriver.Close()
Expect(storageDriver.Open()).Should(Succeed())
})
It("Calling open twice should implictly close then open a new database", func() {
storageDriver := newStorageDriver()
defer storageDriver.Close()
Expect(storageDriver.Open()).Should(Succeed())
Expect(storageDriver.Open()).Should(Succeed())
})
})
Describe("#Close", func() {
It("Close should not return any error", func() {
storageDriver := newStorageDriver()
Expect(storageDriver.Open()).Should(Succeed())
Expect(storageDriver.Close()).Should(Succeed())
})
It("Calling close twice should work", func() {
storageDriver := newStorageDriver()
Expect(storageDriver.Open()).Should(Succeed())
Expect(storageDriver.Close()).Should(Succeed())
Expect(storageDriver.Close()).Should(Succeed())
})
})
Describe("Driver", func() {
It("Get should return an array of values corresponding to the input keys. If an input key is nil then the corresponding value should be nil. A nil input array results in an empty value array", func() {
keyCount := 10000
storageDriver := newStorageDriver()
defer storageDriver.Close()
storageDriver.Open()
batch := NewBatch()
for i := 0; i < keyCount; i += 1 {
batch.Put([]byte(fmt.Sprintf("key%05d", i)), []byte(fmt.Sprintf("value%05d", i)))
}
Expect(storageDriver.Batch(batch)).Should(Succeed())
Expect(storageDriver.Get([][]byte{
[]byte("key00000"),
[]byte("key00001"),
[]byte("key00002"),
})).Should(Equal([][]byte{
[]byte("value00000"),
[]byte("value00001"),
[]byte("value00002"),
}))
Expect(storageDriver.Get([][]byte{
[]byte("keyD"),
[]byte("keyE"),
[]byte("keyF"),
})).Should(Equal([][]byte{
nil,
nil,
nil,
}))
Expect(storageDriver.Get([][]byte{
[]byte("key00000"),
[]byte("key00001"),
nil,
})).Should(Equal([][]byte{
[]byte("value00000"),
[]byte("value00001"),
nil,
}))
Expect(storageDriver.Get(nil)).Should(Equal([][]byte{ }))
iterator, err := storageDriver.GetMatches([][]byte{
[]byte("key"),
})
Expect(err).Should(Succeed())
for i := 0; i < keyCount; i += 1 {
Expect(iterator.Next()).Should(BeTrue())
Expect(iterator.Prefix()).Should(Equal([]byte("key")))
Expect(iterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(iterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Prefix()).Should(BeNil())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
iterator.Release()
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Prefix()).Should(BeNil())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
iterator, err = storageDriver.GetMatches([][]byte{
[]byte("key000"),
[]byte("key022"),
[]byte("key044"),
})
Expect(err).Should(Succeed())
for i := 0; i <= 99; i += 1 {
Expect(iterator.Next()).Should(BeTrue())
Expect(iterator.Prefix()).Should(Equal([]byte("key000")))
Expect(iterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(iterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
for i := 2200; i <= 2299; i += 1 {
Expect(iterator.Next()).Should(BeTrue())
Expect(iterator.Prefix()).Should(Equal([]byte("key022")))
Expect(iterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(iterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
for i := 4400; i <= 4499; i += 1 {
Expect(iterator.Next()).Should(BeTrue())
Expect(iterator.Prefix()).Should(Equal([]byte("key044")))
Expect(iterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(iterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Prefix()).Should(BeNil())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
iterator.Release()
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Prefix()).Should(BeNil())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
iterator, err = storageDriver.GetRange([]byte("key055"), []byte("key099"))
Expect(err).Should(Succeed())
reverseIterator, err := storageDriver.GetRanges([][2][]byte{ [2][]byte{ []byte("key055"), []byte("key099") } }, BACKWARD)
for i := 5500; i <= 9899; i += 1 {
Expect(iterator.Next()).Should(BeTrue())
Expect(iterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(iterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
for i := 9899; i >= 5500; i -= 1 {
Expect(reverseIterator.Next()).Should(BeTrue())
Expect(reverseIterator.Key()).Should(Equal([]byte(fmt.Sprintf("key%05d", i))))
Expect(reverseIterator.Value()).Should(Equal([]byte(fmt.Sprintf("value%05d", i))))
}
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
iterator.Release()
Expect(iterator.Next()).Should(BeFalse())
Expect(iterator.Key()).Should(BeNil())
Expect(iterator.Value()).Should(BeNil())
Expect(iterator.Error()).Should(BeNil())
batch = NewBatch()
for i := 0; i < keyCount; i += 1 {
batch.Delete([]byte(fmt.Sprintf("key%05d", i)))
}
Expect(storageDriver.Batch(batch)).Should(Succeed())
for i := 0; i < keyCount; i += 1 {
Expect(storageDriver.Get([][]byte{ []byte(fmt.Sprintf("key%05d", i)) })).Should(Equal([][]byte{ nil }))
}
})
})
Describe("Snapshot", func() {
Specify("Should create snapshot", func() {
keyCount := 10000
storageDriver := newStorageDriver()
defer storageDriver.Close()
storageDriver.Open()
batch := NewBatch()
for i := 0; i < keyCount; i += 1 {
batch.Put([]byte(fmt.Sprintf("key%05d", i)), []byte(fmt.Sprintf("value%05d", i)))
}
largeValue1 := make([]byte, CopyBatchMaxBytes / 2)
largeValue2 := make([]byte, CopyBatchMaxBytes / 2)
largeValue3 := make([]byte, CopyBatchMaxBytes / 2)
batch.Put([]byte("large1"), largeValue1)
batch.Put([]byte("large2"), largeValue2)
batch.Put([]byte("large3"), largeValue3)
Expect(storageDriver.Batch(batch)).Should(Succeed())
snapshotDirectory := "/tmp/testsnapshot-"+RandomString()
snapshotMetaPrefix := []byte("metadata")
snapshotMeta := map[string]string{
"ID": "AAA",
}
Expect(storageDriver.Snapshot(snapshotDirectory, snapshotMetaPrefix, snapshotMeta)).Should(Succeed())
snapshot := NewLevelDBStorageDriver(snapshotDirectory, nil)
defer snapshot.Close()
Expect(snapshot.Open()).Should(Succeed())
for i := 0; i < keyCount; i += 1 {
Expect(snapshot.Get([][]byte{ []byte(fmt.Sprintf("key%05d", i)) })).Should(Equal([][]byte{ []byte(fmt.Sprintf("value%05d", i)) }))
}
Expect(snapshot.Get([][]byte{ []byte("large1"), []byte("large2"), []byte("large3") })).Should(Equal([][]byte{
largeValue1,
largeValue2,
largeValue3,
}))
Expect(snapshot.Get([][]byte{ []byte("metadataID") })).Should(Equal([][]byte{
[]byte("AAA"),
}))
restore := newStorageDriver()
defer restore.Close()
Expect(restore.Open()).Should(Succeed())
Expect(restore.Restore(snapshot)).Should(Succeed())
for i := 0; i < keyCount; i += 1 {
Expect(restore.Get([][]byte{ []byte(fmt.Sprintf("key%05d", i)) })).Should(Equal([][]byte{ []byte(fmt.Sprintf("value%05d", i)) }))
}
Expect(restore.Get([][]byte{ []byte("large1"), []byte("large2"), []byte("large3") })).Should(Equal([][]byte{
largeValue1,
largeValue2,
largeValue3,
}))
Expect(restore.Get([][]byte{ []byte("metadataID") })).Should(Equal([][]byte{
[]byte("AAA"),
}))
})
})
})
|
// Copyright 2022 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"bytes"
"io"
"mime"
"net/http"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/kube/proxy/responsewriters"
)
// newPodFilterer creates a wrapper function that once executed creates
// a runtime filter for Pods.
// The filter exclusion criteria is:
// - deniedPods: excluded if (namespace,name) matches an entry even if it matches
// the allowedPod's list.
// - allowedPods: excluded if (namespace,name) not match a single entry.
func newPodFilterer(allowedPods, deniedPods []types.KubernetesResource, log logrus.FieldLogger) responsewriters.FilterWrapper {
// If the list of allowed pods contains a wildcard and no deniedPods, then we
// don't need to filter anything.
if containsWildcard(allowedPods) && len(deniedPods) == 0 {
return nil
}
return func(contentType string, responseCode int) (responsewriters.Filter, error) {
negotiator := newClientNegotiator()
encoder, decoder, err := newEncoderAndDecoderForContentType(contentType, negotiator)
if err != nil {
return nil, trace.Wrap(err)
}
return &podFilterer{
encoder: encoder,
decoder: decoder,
contentType: contentType,
responseCode: responseCode,
negotiator: negotiator,
allowedResources: allowedPods,
deniedResources: deniedPods,
log: log,
}, nil
}
}
// wildcardFilter is a filter that matches all pods.
var wildcardFilter = types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: types.Wildcard,
Name: types.Wildcard,
}
// containsWildcard returns true if the list of resources contains a wildcard filter.
func containsWildcard(resources []types.KubernetesResource) bool {
for _, r := range resources {
if r.Kind == wildcardFilter.Kind &&
r.Name == wildcardFilter.Name &&
r.Namespace == wildcardFilter.Namespace {
return true
}
}
return false
}
// podFilterer is a pod filterer instance.
type podFilterer struct {
encoder runtime.Encoder
decoder runtime.Decoder
// contentType is the response "Content-Type" header.
contentType string
// responseCode is the response status code.
responseCode int
// negotiator is an instance of a client negotiator.
negotiator runtime.ClientNegotiator
// allowedResources is the list of kubernetes resources the user has access to.
allowedResources []types.KubernetesResource
// deniedResources is the list of kubernetes resources the user must not access.
deniedResources []types.KubernetesResource
// log is the logger.
log logrus.FieldLogger
}
// FilterBuffer receives a byte array, decodes the response into the appropriate
// type and filters the resources based on allowed and denied rules configured.
// After filtering them, it serializes the response and dumps it into output buffer.
// If any error occurs, the call returns an error.
func (d *podFilterer) FilterBuffer(buf []byte, output io.Writer) error {
// decode the response into the appropriate Kubernetes API type.
obj, bf, err := d.decode(buf)
if err != nil {
return trace.Wrap(err)
}
// if bf is not empty, it means that response does not contain any valid response
// and it should be safe to write it back into the buffer.
if len(bf) > 0 {
_, err = output.Write(buf)
return trace.Wrap(err)
}
var filtered runtime.Object
switch o := obj.(type) {
case *metav1.Status:
// Status object is returned when the Kubernetes API returns an error and
// should be forwarded to the user.
filtered = obj
case *corev1.Pod:
// filterPod filters a single corev1.Pod and returns an error if access to
// pod was denied.
if err := filterPod(o, d.allowedResources, d.deniedResources); err != nil {
if !trace.IsAccessDenied(err) {
d.log.WithError(err).Warn("Unable to compile role kubernetes_resources.")
}
return trace.Wrap(err)
}
filtered = obj
case *corev1.PodList:
filtered = filterCoreV1PodList(o, d.allowedResources, d.deniedResources, d.log)
case *metav1.Table:
filtered, err = d.filterMetaV1Table(o, d.allowedResources, d.deniedResources)
if err != nil {
return trace.Wrap(err)
}
default:
// It's important default types are never blindly forwarded or protocol
// extensions could result in information disclosures.
return trace.BadParameter("unexpected type received; got %T", obj)
}
// encode the filterer response back to the user.
return d.encode(filtered, output)
}
// FilterObj receives a runtime.Object type and filters the resources on it
// based on allowed and denied rules.
// After filtering them, the obj is manipulated to hold the filtered information.
// The boolean returned indicates if the client is allowed to receive the event
// that originated this check.
func (d *podFilterer) FilterObj(obj runtime.Object) (bool, error) {
switch o := obj.(type) {
case *corev1.Pod:
err := filterPod(o, d.allowedResources, d.deniedResources)
if err != nil && !trace.IsAccessDenied(err) {
d.log.WithError(err).Warn("Unable to compile role kubernetes_resources.")
}
// if err is not nil we should not include it.
return err == nil, nil
case *corev1.PodList:
_ = filterCoreV1PodList(o, d.allowedResources, d.deniedResources, d.log)
return len(o.Items) > 0, nil
case *metav1.Table:
_, err := d.filterMetaV1Table(o, d.allowedResources, d.deniedResources)
if err != nil {
return false, trace.Wrap(err)
}
return len(o.Rows) > 0, nil
default:
// It's important default types are never blindly forwarded or protocol
// extensions could result in information disclosures.
return false, trace.BadParameter("unexpected type received; got %T", obj)
}
}
// decode decodes the buffer into the appropriate type if the responseCode
// belongs to the range 200(OK)-206(PartialContent).
// If it does not belong, it returns the buffer unchanged since it contains
// an error message from the Kubernetes API server and it's safe to return
// it back to the user.
func (d *podFilterer) decode(buffer []byte) (runtime.Object, []byte, error) {
switch {
case d.responseCode == http.StatusSwitchingProtocols:
// no-op, we've been upgraded
return nil, buffer, nil
case d.responseCode < http.StatusOK /* 200 */ || d.responseCode > http.StatusPartialContent /* 206 */ :
// calculate an unstructured error from the response which the Result object may use if the caller
// did not return a structured error.
// Logic from: https://github.com/kubernetes/client-go/blob/58ff029093df37cad9fa28778a37f11fa495d9cf/rest/request.go#L1040
return nil, buffer, nil
default:
out, err := decodeAndSetGVK(d.decoder, buffer)
return out, nil, trace.Wrap(err)
}
}
// decodePartialObjectMetadata decodes the metav1.PartialObjectMetadata present
// in the metav1.TableRow entry. This information comes from server side and
// includes the resource name and namespace as a structured object.
func (d *podFilterer) decodePartialObjectMetadata(row *metav1.TableRow) error {
if row.Object.Object != nil {
return nil
}
var err error
// decode only if row.Object.Object was not decoded before.
row.Object.Object, err = decodeAndSetGVK(d.decoder, row.Object.Raw)
return trace.Wrap(err)
}
// encode encodes the filtered object into the io.Writer using the same
// content-type.
func (d *podFilterer) encode(obj runtime.Object, w io.Writer) error {
return trace.Wrap(d.encoder.Encode(obj, w))
}
// filterCoreV1PodList excludes pods the user should not have access to.
func filterCoreV1PodList(list *corev1.PodList, allowed, denied []types.KubernetesResource, log logrus.FieldLogger) *corev1.PodList {
pods := make([]corev1.Pod, 0, len(list.Items))
for _, pod := range list.Items {
if err := filterPod(&pod, allowed, denied); err == nil {
pods = append(pods, pod)
} else if !trace.IsAccessDenied(err) {
log.WithError(err).Warnf("Unable to compile role kubernetes_resources.")
}
}
list.Items = pods
return list
}
// filterPod validates if the user should access the current resource.
func filterPod(pod *corev1.Pod, allowed, denied []types.KubernetesResource) error {
err := matchKubernetesResource(
types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: pod.Namespace,
Name: pod.Name,
},
allowed, denied,
)
return trace.Wrap(err)
}
// filterMetaV1Table filters the serverside printed table to exclude pods
// that the user must not have access to.
func (d *podFilterer) filterMetaV1Table(table *metav1.Table, allowedPods, deniedPods []types.KubernetesResource) (*metav1.Table, error) {
pods := make([]metav1.TableRow, 0, len(table.Rows))
for i := range table.Rows {
row := &(table.Rows[i])
if err := d.decodePartialObjectMetadata(row); err != nil {
return nil, trace.Wrap(err)
}
resource, err := getKubeResourcePartialMetadataObject(row.Object.Object)
if err != nil {
return nil, trace.Wrap(err)
}
if err := matchKubernetesResource(resource, allowedPods, deniedPods); err == nil {
pods = append(pods, *row)
} else if !trace.IsAccessDenied(err) {
d.log.WithError(err).Warn("Unable to compile regex expression.")
}
}
table.Rows = pods
return table, nil
}
// getKubeResourcePartialMetadataObject checks if obj is of type *metav1.PartialObjectMetadata
// otherwise returns an error.
func getKubeResourcePartialMetadataObject(obj runtime.Object) (types.KubernetesResource, error) {
switch o := obj.(type) {
case *metav1.PartialObjectMetadata:
return types.KubernetesResource{
Namespace: o.Namespace,
Name: o.Name,
Kind: types.KindKubePod,
}, nil
default:
return types.KubernetesResource{}, trace.BadParameter("expected *metav1.PartialObjectMetadata object, got %T", obj)
}
}
// newEncoderAndDecoderForContentType creates a new encoder and decoder instances
// for the given contentType.
// If the contentType is invalid or not supported this function returns an error.
// Supported content types:
// - "application/json"
// - "application/yaml"
// - "application/vnd.kubernetes.protobuf"
func newEncoderAndDecoderForContentType(contentType string, negotiator runtime.ClientNegotiator) (runtime.Encoder, runtime.Decoder, error) {
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
return nil, nil, trace.WrapWithMessage(err, "unable to parse %q header %q", responsewriters.ContentTypeHeader, contentType)
}
dec, err := negotiator.Decoder(mediaType, params)
if err != nil {
return nil, nil, trace.Wrap(err)
}
enc, err := negotiator.Encoder(mediaType, params)
if err != nil {
return nil, nil, trace.Wrap(err)
}
return enc, dec, nil
}
// decodeAndSetGVK decodes the payload into the appropriate type using the decoder
// provider and sets the GVK if available.
func decodeAndSetGVK(decoder runtime.Decoder, payload []byte) (runtime.Object, error) {
obj, gvk, err := decoder.Decode(payload, nil, nil)
if err != nil {
return nil, trace.Wrap(err)
}
if gvk != nil {
// objects from decode do not contain GroupVersionKind.
// We force it to be present for later encoding.
obj.GetObjectKind().SetGroupVersionKind(*gvk)
}
return obj, nil
}
// filterBuffer filters the response buffer before writing it into the original
// MemoryResponseWriter.
func filterBuffer(filterWrapper responsewriters.FilterWrapper, src *responsewriters.MemoryResponseWriter) error {
if filterWrapper == nil {
return nil
}
filter, err := filterWrapper(responsewriters.GetContentHeader(src.Header()), src.Status())
if err != nil {
return trace.Wrap(err)
}
// copy body into another slice so we can manipulate it.
b := bytes.NewBuffer(make([]byte, 0, src.Buffer().Len()))
// get the compressor and decompressor for the response based on the content type.
compressor, decompressor, err := getResponseCompressorDecompressor(src.Header())
if err != nil {
return trace.Wrap(err)
}
// decompress the response body into b.
if err := decompressor(b, src.Buffer()); err != nil {
return trace.Wrap(err)
}
// filter.FilterBuffer encodes the filtered payload into src.Buffer, so we need to
// reset it to discard the old payload.
src.Buffer().Reset()
// creates a compressor that writes the filtered payload into src.Buffer.
comp := compressor(src.Buffer())
// Close is a no-op operation into src but it's required to put the gzip writer
// into the sync.Pool.
defer comp.Close()
return trace.Wrap(filter.FilterBuffer(b.Bytes(), comp))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.