text stringlengths 11 4.05M |
|---|
package main
import "fmt"
func main() {
p := plusTwo()
fmt.Printf("%v\n", p(2))
fmt.Printf("%v\n", plusX(2)(2))
fmt.Printf("%v\n", plusX(8)(2))
}
func plusTwo() func(int) int {
return func(i int) int { return i + 2 }
}
func plusX(x int) func(int) int {
return func(i int) int { return i + x }
}
|
package main
import (
"bufio"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
func main() {
//타겟 난수 생성
seconds := time.Now().Unix()
rand.Seed(seconds)
target := rand.Intn(100) + 1
fmt.Println("1에서 100사이의 난수 생성 완료.")
fmt.Println("뭐게?")
reader := bufio.NewReader(os.Stdin)
success := false
for t := 0; t < 10; t++ {
fmt.Printf("%d번 남음\n", 10-t)
//입력
fmt.Println("입력 해봐")
input, err := reader.ReadString('\n')
if err != nil {
log.Fatal(err)
}
//입력 받은 문자열 int형으로 변환
input = strings.TrimSpace(input)
guess, err := strconv.Atoi(input)
if err != nil {
log.Fatal(err)
}
//비교
if guess < target {
fmt.Println("너무 작다")
} else if guess > target {
fmt.Println("너무 크다")
} else {
success = true
fmt.Println("정답!")
fmt.Printf("%d = %d", target, guess)
break
}
if !success {
fmt.Println("어렵구만")
}
}
if !success {
fmt.Printf("몰름? %d임", target)
}
}
|
package main
// async.go runs portions of the task asynchronously
// Job describes a task for the worker
// This job is to generate a Diff containing []int of width b-a containing values k
type Job struct {
t Transform
}
// Diff is a fragment of a State slice affected by this transform
type Diff struct {
t Transform
s State // not full width
l int
}
// Worker is spawned by a goroutine to help main
type Worker struct {
input <-chan Job
output chan<- Diff
}
// Work loops through input
func (w *Worker) Work() {
var in Job
for {
// wait for job:
in = <-w.input
// calculate how long the diff is:
l := in.t.b - in.t.a + 1 // a,b one indexed and inclusive
// make a too-big scratch pad:
scratch := make([]int, 2*l)
i := 0 // zero index left of populated
j := 1 // zero index after populated k's
z := 1 // width of this copy
// populate scratch with k's longer than diff:
scratch[0] = in.t.k
for j <= l {
copy(scratch[i+z:j+z], scratch[i:j])
i += z
j += z
z += z
}
// return diff:
w.output <- Diff{
s: scratch[:l],
t: in.t,
l: l,
}
}
}
|
package main
import (
"fmt"
)
func main() {
var quilometros uint8
quilometros = 150
fmt.Println(quilometros)
}
|
package main
import (
"log"
"encoding/json"
"io/ioutil"
"github.com/Shopify/sarama"
)
type Entity struct {
Database string `json:"database"`
Table string `json:"table"`
BeforeColumns Columns `json:"beforeColumns"`
AfterColumns Columns `json:"afterColumns"`
EventType string `json:"eventType"`
ExecuteTime int64 `json:"executeTime"`
}
type Columns []*Column
type Column struct {
Name string `json:"name"`
Value string `json:"value"`
SqlType int `json:"sqlType"`
IsKey bool `json:"isKey"`
IsNull bool `json:"isNull"`
Updated bool `json:"updated"`
}
func (this *Entity) Encode() ([]byte, error) {
return json.Marshal(this)
}
func (this *Entity) Length() int {
data, _ := this.Encode()
return len(data)
}
func main() {
producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
data, err := ioutil.ReadFile("data.json")
if err != nil {
log.Panic(err)
}
entity := Entity{}
err = json.Unmarshal(data, &entity)
if err != nil {
log.Panic(err)
}
msg := &sarama.ProducerMessage{Topic: entity.Database + "___" + entity.Table, Value: &entity}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Printf("FAILED to send message: %s\n", err)
} else {
log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
}
|
package prediction
import (
pbcodec "github.com/streamingfast/sparkle/pb/dfuse/ethereum/codec/v1"
)
func (s *Subgraph) HandlePredictionUnpauseEvent(trace *pbcodec.TransactionTrace, ev *PredictionUnpauseEvent) error {
if s.StepBelow(2) {
return nil
}
market := NewMarket("1")
if err := s.Load(market); err != nil {
return err
}
// if it didn't exist, well.. it'll be created.
market.Epoch = S(ev.Epoch.String())
market.Paused = false
if err := s.Save(market); err != nil {
return err
}
return nil
}
|
package work
import (
"syscall"
"time"
)
type Disk struct {
MonitorData
}
func (d *Disk) SetMonitorData() {
fs := syscall.Statfs_t{}
err := syscall.Statfs("/", &fs)
if err != nil {
return
}
free := float64(fs.Bfree)
all := float64(fs.Blocks)
used := all - free
d.Data = used / all
d.MonitorTime = time.Now()
}
|
package main
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"mime/multipart"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
func CreateRandomNumber() string {
return fmt.Sprintf("%015v", rand.New(rand.NewSource(time.Now().UnixNano())).Int31n(1000000))
}
func CreateRandomDataExchangeId() string{
current_time := time.Now().Format("20060102150405")
ms :=time.Now().UnixNano()/1e6
ms_str :=fmt.Sprintf("%d", ms)
tail :=ms_str[len(ms_str)-3:]
fmt.Println("here", tail)
random_str := CreateRandomNumber()
return current_time+tail+random_str
}
func CreateRandomDataExchangeId_1() string{
current_time := time.Now().Format("20060102150405000")
random_str := CreateRandomNumber()
return current_time+random_str
}
func Base64Encode(input string) string{
return base64.StdEncoding.EncodeToString([]byte(input))
}
func Base64Decode(input string) (string, error){
data, err :=base64.StdEncoding.DecodeString(input)
if err != nil {
return "", err
}
return string(data),err
}
//func ConvertInvoiceTypeToFpdam( invoiceType string) string{
// //to do
//
//}
func ConvertPdfTojpg(pdfFile string, jpgFile string) string{
log.Print("ConvertPdfTojpg ",pdfFile,"\n", "ConvertPdfTojpg ",jpgFile)
command:= exec.Command("magick","convert","-density","300",pdfFile,jpgFile)
err := command.Run()
if err != nil{
log.Fatal(err)
}
return jpgFile
}
func CheckInputFileType(filename string) string{
if filename[len(filename)-4:]==".jpg"{
return filename
}
if filename[len(filename)-4:]==".png"{
return filename
}
if filename[len(filename)-4:]=="jpeg"{
return filename
}
if filename[len(filename)-4:]==".pdf"{
jpgfile :=filename[:len(filename)-4]+".jpg"
ConvertPdfTojpg(filename, jpgfile)
return jpgfile
}
return ""
}
func ConvertFileToInvoiceJson(filename string) SingleInvoiceCheckPostData{
InvoiceInfoJson := GetInvoiceInfoByBaiduai(filename)
//fmt.Println("Invoice Info Json\n", string(InvoiceInfoJson))
//
var invoiceInfoBaiduai InvociceInfoBaiduai
err :=json.Unmarshal(InvoiceInfoJson, &invoiceInfoBaiduai)
if err != nil{
log.Fatal(err)
}
singleInvoiceCheckPostData := SingleInvoiceCheckPostData{
}
singleInvoiceCheckPostData.Fpje = invoiceInfoBaiduai.Words_result.TotalAmount
singleInvoiceCheckPostData.Fpdm = invoiceInfoBaiduai.Words_result.InvoiceCode
st := invoiceInfoBaiduai.Words_result.InvoiceDate
st = strings.Replace(st, "年", "", -1)
st = strings.Replace(st,"月","",-1)
st = strings.Replace(st,"日","",-1)
//singleInvoiceCheckPostData.Kprq = st+"haha"
singleInvoiceCheckPostData.Kprq = st
singleInvoiceCheckPostData.Fphm = invoiceInfoBaiduai.Words_result.InvoiceNum
if invoiceInfoBaiduai.Words_result.InvoiceType=="电子普通发票" {
singleInvoiceCheckPostData.Fpzl = "10"
} else{
if invoiceInfoBaiduai.Words_result.InvoiceType=="专用发票"{
singleInvoiceCheckPostData.Fpzl = "01"
}
}
if singleInvoiceCheckPostData.Fpzl =="10"{
singleInvoiceCheckPostData.Jym = invoiceInfoBaiduai.Words_result.CheckCode[len(invoiceInfoBaiduai.Words_result.CheckCode)-6:]
}
return singleInvoiceCheckPostData
}
func PrepareJsonForHttpRequest(jsonData []byte) []byte{
jsonDataEncoded :=Base64Encode(string(jsonData))
dataExchangeId := CreateRandomDataExchangeId_1()
commonPostData := CommonPostData{
ZipCode: "0",
EncryptCode: "0",
DataExchangeId: dataExchangeId,
EntCode: "",
Content: jsonDataEncoded,
}
commonPostDataJson,_ :=json.Marshal(commonPostData)
return commonPostDataJson
}
func GetUrlFromFactory(RequestType string) string{
token:= GetTokenData()
//fmt.Println("debug 1", token,RequestType)
var Url string
switch RequestType{
case "MultiInvoiceCheck":
Url = "https://sandbox.ele-cloud.com/api/open-recipt/V1/MultilCheckInvoice"
v, ok := token["access_token"].(string)
if ok {
Url += "?" + "access_token=" + v
} else{
log.Println("access_token is not string")
}
case "MultiInvoiceResultQuery":
Url = "https://sandbox.ele-cloud.com/api/open-recipt/V1/BatchGetInvoice"
v, ok := token["access_token"].(string)
if ok {
Url += "?" + "access_token=" + v
} else{
log.Println("access_token is not string")
}
case "SingleInvoiceCheck":
Url = "https://sandbox.ele-cloud.com/api/open-recipt/V1/CheckInvoiceSingle"
v, ok := token["access_token"].(string)
if ok {
Url += "?" + "access_token=" + v
} else{
log.Println("access_token is not string")
}
default:
fmt.Println("default")
}
return Url
}
func SentHttpequestByPost(url string,commonPostDataJson [] byte) string{
fmt.Println("Json data", string(commonPostDataJson))
client := &http.Client{}
request,_ := http.NewRequest("POST", url, bytes.NewBuffer(commonPostDataJson))
request.Header.Set("Content-Type", "application/json")
resp, _ :=client.Do(request)
//fmt.Println("resp", resp)
body,_ := ioutil.ReadAll(resp.Body)
fmt.Println("In Sent Http body", string(body))
resp_result := CommonPostData{}
err := json.Unmarshal(body, &resp_result)
if err != nil{
log.Fatal(err)
}
result,_ := Base64Decode(resp_result.Content)
fmt.Println("result", result)
return result
}
func CopyHttpfilesToLocalFiles(files []*multipart.FileHeader) []string{
var InvoiceFilenames []string
for i :=0; i<len(files);i++{
file, err := files[i].Open()
defer file.Close()
if err != nil{
log.Fatal(err)
}
temp_str :="./cache/"+files[i].Filename
cur, _ := os.Create(temp_str)
InvoiceFilenames = append(InvoiceFilenames,temp_str)
defer cur.Close()
io.Copy(cur, file)
}
return InvoiceFilenames
}
func GeneratePchNumber() string{
current_time := time.Now().Format("20060102150405")
var head0 string
for i:=0;i<(32-len(current_time));i++{
head0 +="0"
}
current_time =head0+current_time
return current_time
}
func AppendContentToFile(filePath string, Content string){
file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
fmt.Println("文件打开失败", err)
}
defer file.Close()
write:= bufio.NewWriter(file)
write.WriteString(Content+"\n")
write.Flush()
} |
package consumergroup
import (
"encoding/json"
"fmt"
"path"
"sort"
"strconv"
"time"
"github.com/samuel/go-zookeeper/zk"
)
// ZK wraps a zookeeper connection
type ZK struct {
*zk.Conn
}
// NewZK creates a new connection instance
func NewZK(servers []string, recvTimeout time.Duration) (*ZK, error) {
conn, _, err := zk.Connect(servers, recvTimeout)
if err != nil {
return nil, err
}
return &ZK{conn}, nil
}
/*******************************************************************
* HIGH LEVEL API
*******************************************************************/
func (z *ZK) Brokers() ([]string, error) {
root := "/brokers/ids"
children, _, childrenErr := z.Children(root)
if childrenErr != nil {
return nil, childrenErr
}
result := make([]string, len(children))
for index, child := range children {
value, _, childErr := z.Get(path.Join(root, child))
if childErr != nil {
return nil, childErr
}
type brokerEntry struct {
Host string `json:host`
Port int `json:port`
}
var brokerNode brokerEntry
if err := json.Unmarshal(value, &brokerNode); err != nil {
return nil, err
}
result[index] = fmt.Sprintf("%s:%d", brokerNode.Host, brokerNode.Port)
}
return result, nil
}
// Consumers returns all active consumers within a group
func (z *ZK) Consumers(group string) ([]string, <-chan zk.Event, error) {
root := fmt.Sprintf("/consumers/%s/ids", group)
err := z.MkdirAll(root)
if err != nil {
return nil, nil, err
}
strs, _, ch, err := z.ChildrenW(root)
if err != nil {
return nil, nil, err
}
sort.Strings(strs)
return strs, ch, nil
}
// Claim claims a topic/partition ownership for a consumer ID within a group
func (z *ZK) Claim(group, topic string, partition int32, id string) (err error) {
root := fmt.Sprintf("/consumers/%s/owners/%s", group, topic)
if err = z.MkdirAll(root); err != nil {
return err
}
node := fmt.Sprintf("%s/%d", root, partition)
tries := 0
for {
if err = z.Create(node, []byte(id), true); err == nil {
break
} else if tries++; err != zk.ErrNodeExists || tries > 100 {
return err
}
time.Sleep(200 * time.Millisecond)
}
return nil
}
// Release releases a claim
func (z *ZK) Release(group, topic string, partition int32, id string) error {
node := fmt.Sprintf("/consumers/%s/owners/%s/%d", group, topic, partition)
val, _, err := z.Get(node)
// Already deleted
if err == zk.ErrNoNode {
return nil
}
// Locked by someone else?
if string(val) != id {
return zk.ErrNotLocked
}
return z.DeleteAll(node)
}
// Commit commits an offset to a group/topic/partition
func (z *ZK) Commit(group, topic string, partition int32, offset int64) (err error) {
root := fmt.Sprintf("/consumers/%s/offsets/%s", group, topic)
if err = z.MkdirAll(root); err != nil {
return err
}
node := fmt.Sprintf("%s/%d", root, partition)
data := []byte(fmt.Sprintf("%d", offset))
_, stat, err := z.Get(node)
// Try to create new node
if err == zk.ErrNoNode {
return z.Create(node, data, false)
} else if err != nil {
return err
}
_, err = z.Set(node, data, stat.Version)
return
}
// Offset retrieves an offset to a group/topic/partition
func (z *ZK) Offset(group, topic string, partition int32) (int64, error) {
node := fmt.Sprintf("/consumers/%s/offsets/%s/%d", group, topic, partition)
val, _, err := z.Get(node)
if err == zk.ErrNoNode {
return 0, nil
} else if err != nil {
return -1, err
}
return strconv.ParseInt(string(val), 10, 64)
}
// RegisterGroup creates/updates a group directory
func (z *ZK) RegisterGroup(group string) error {
return z.MkdirAll("/consumers/" + group + "/ids")
}
// CreateConsumer registers a new consumer within a group
func (z *ZK) RegisterConsumer(group, id, topic string) error {
data, err := json.Marshal(map[string]interface{}{
"pattern": "white_list",
"subscription": map[string]int{topic: 1},
"timestamp": fmt.Sprintf("%d", time.Now().Unix()),
"version": 1,
})
if err != nil {
return err
}
return z.Create("/consumers/"+group+"/ids/"+id, data, true)
}
/*******************************************************************
* LOW LEVEL API
*******************************************************************/
// Exists checks existence of a node
func (z *ZK) Exists(node string) (ok bool, err error) {
ok, _, err = z.Conn.Exists(node)
return
}
// DeleteAll deletes a node recursively
func (z *ZK) DeleteAll(node string) (err error) {
children, stat, err := z.Children(node)
if err == zk.ErrNoNode {
return nil
} else if err != nil {
return
}
for _, child := range children {
if err = z.DeleteAll(path.Join(node, child)); err != nil {
return
}
}
return z.Delete(node, stat.Version)
}
// MkdirAll creates a directory recursively
func (z *ZK) MkdirAll(node string) (err error) {
parent := path.Dir(node)
if parent != "/" {
if err = z.MkdirAll(parent); err != nil {
return
}
}
_, err = z.Conn.Create(node, nil, 0, zk.WorldACL(zk.PermAll))
if err == zk.ErrNodeExists {
err = nil
}
return
}
// Create stores a new value at node. Fails if already set.
func (z *ZK) Create(node string, value []byte, ephemeral bool) (err error) {
if err = z.MkdirAll(path.Dir(node)); err != nil {
return
}
flags := int32(0)
if ephemeral {
flags = zk.FlagEphemeral
}
_, err = z.Conn.Create(node, value, flags, zk.WorldACL(zk.PermAll))
return
}
|
// +build ignore
package main
import (
"fmt"
"log"
"net/http"
"os"
"syscall"
shutdown "github.com/klauspost/shutdown2"
)
// This example shows a server that has logging to a file
//
// When the webserver is closed, it will close the file when all requests have
// been finished.
//
// In a real world, you would not want multiple goroutines writing to the same file
//
// To execute, use 'go run simple-func.go'
// This is the function we would like to execute at shutdown.
func closeFile(f *os.File) func() {
return func() {
log.Println("Closing", f.Name()+"...")
f.Close()
}
}
func main() {
// Make shutdown catch Ctrl+c and system terminate
shutdown.OnSignal(0, os.Interrupt, syscall.SIGTERM)
// Create a log file
var logFile *os.File
logFile, _ = os.Create("log.txt")
// When shutdown is initiated, close the file
shutdown.FirstFn(closeFile(logFile))
// Start a webserver
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
// Get a lock, and write to the file if we get it.
// While we have the lock the file will not be closed.
l := shutdown.Lock()
if l != nil {
_, _ = logFile.WriteString(req.URL.String() + "\n")
l()
}
})
fmt.Println("Starting server. Press Ctrl+c to initiate shutdown")
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
package core
func NewAtom(value Type) *Type {
ptr := &value
return &Type{Atom: &ptr}
}
func (node *Type) IsAtom() bool {
return node.Atom != nil
}
func (node *Type) AsAtom() Type {
if node.IsAtom() {
return **node.Atom
}
return *NewNil()
}
func (node *Type) SetAtom(value Type) {
*node.Atom = &value
}
|
package blocking
import (
"context"
"fmt"
users "github.com/ivansukach/bets/internal/repositories/blocked-users"
log "github.com/sirupsen/logrus"
)
type Service struct {
blockedUsersRps users.Repository
}
func New(blockedUsersRps users.Repository) *Service {
return &Service{blockedUsersRps: blockedUsersRps}
}
func (s *Service) BlockUsers(ctx context.Context, ids []int64) error {
for i := range ids {
if err := s.blockedUsersRps.Create(ctx, &users.BlockedUser{Id: ids[i]}); err != nil {
log.Error(err)
}
}
return nil
}
func (s *Service) DeleteUser(ctx context.Context, id int64) error {
return s.blockedUsersRps.Delete(ctx, id)
}
func (s *Service) BlockedReport(ctx context.Context) ([]byte, error) {
users, err := s.blockedUsersRps.Listing(ctx)
if err != nil {
return nil, err
}
contentOfFile := ""
for i := range users {
str := fmt.Sprintf("%d\n", users[i].Id)
contentOfFile += str
}
return []byte(contentOfFile), nil
}
|
package main
import (
"fmt"
"github.com/gocrazygh/luhn"
)
func main() {
a := luhn.Check("79927398713")
b := luhn.Check("1111")
fmt.Println(a)
fmt.Println(b)
}
|
package 链表
func reverseBetween(head *ListNode, left int, right int) *ListNode {
// 1. 初始化。
dummyHead := &ListNode{
Next: head,
}
pre := dummyHead
// 2. 让 pre 走到翻转链表的前一个节点。
for i := 1; i < left; i++ {
pre = pre.Next
}
// 3. 执行翻转。
cur := pre.Next
for i := 1; i <= right-left; i++ {
next := cur.Next
cur.Next = next.Next
next.Next = pre.Next
pre.Next = next
}
// 4. 返回。
return dummyHead.Next
}
|
package goSolution
func reverseBetween(head *ListNode, left int, right int) *ListNode {
p := &ListNode{Next: head}
index := 0
var last, next *ListNode
var leftNode, rightNode *ListNode
var leftInnerNode, rightInnerNode *ListNode
for c := p; c != nil; c, index = next, index + 1 {
next = c.Next
if index >= left && index <= right {
c.Next = last
if index == left {
leftInnerNode = c
} else {
if index == right {
rightInnerNode = c
}
}
} else {
if index == left-1 {
leftNode = c
} else {
if index == right+1 {
rightNode = c
}
}
}
last = c
}
if leftNode != nil && rightInnerNode != nil {
leftNode.Next = rightInnerNode
}
if leftInnerNode != nil {
leftInnerNode.Next = rightNode
}
return p.Next
}
|
package stateful
import (
"context"
"fmt"
aliceapi "github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/alice/api"
"github.com/yandex-cloud/examples/serverless/alice-shareable-todolist/app/errors"
)
func (h *Handler) listAllListsFromScratch(ctx context.Context, req *aliceapi.Request) (*aliceapi.Response, errors.Err) {
if req.Request.Type != aliceapi.RequestTypeSimple {
return nil, nil
}
intnt := req.Request.NLU.Intents.ListLists
if intnt == nil {
return nil, nil
}
acl, err := h.getUserListsCached(ctx)
if err != nil {
return nil, err
}
if len(acl) == 0 {
return respondNoLists("У вас пока нет списков"), nil
}
text := "Ваши списки:\n"
for _, entry := range acl {
text = text + fmt.Sprintf("%s\n", entry.Alias)
}
return &aliceapi.Response{Response: &aliceapi.Resp{Text: text}}, nil
}
|
package taxjar
type Rate struct {
Zip string `json:"zip"`
State string `json:"state`
StateRate float64 `json:"state_rate,string"`
County string `json:"county"`
CountyRate float64 `json:"county_rate,string"`
City string `json:"city"`
CityRate float64 `json:"city_rate,string"`
CombinedDistrictRate float64 `json:"combined_district_rate,string"`
CombinedRate float64 `json:"combined_rate,string"`
Country string `json:"country"`
Name string `json:"name"`
StandardRate float64 `json:"standard_rate,string"`
ReducedRate float64 `json:"reduced_rate,string"`
SuperReducedRate float64 `json:"super_reduced_rate,string"`
ParkingRate float64 `json:"parking_rate,string"`
DistanceSaleThreshold float64 `json:"distance_sale_threshold,string"`
FreightTaxable *bool `json:"freight_taxable"`
}
type RateList struct {
Rate Rate `json:"rate"`
}
type rateParams struct {
Country string `url:"country,omitempty"`
Zip string `url:"-"`
City string `url:"city,omitempty"`
Street string `url:"street,omitempty"`
}
func RateCountry(country string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.Country = country
return nil
}
}
func RateCity(city string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.City = city
return nil
}
}
type RateService struct {
Repository RateRepository
}
// Get a Rate
func (s *RateService) Get(zip string, options ...func(*rateParams) error) (Rate, error) {
params := rateParams{Zip: zip}
for _, option := range options {
if err := option(¶ms); nil != err {
return Rate{}, err
}
}
return s.Repository.get(params)
}
|
package cms
import "github.com/yueyongyue/aliyungo/common"
const (
TestAccessKeyId = "YOUR_ACCESS_KEY_ID"
TestAccessKeySecret = "YOUR_ACCESS_KEY_SECRET"
TestRegionID = common.Hangzhou
)
var testClient *CMSClient
func NewTestClient() *CMSClient {
if testClient == nil {
testClient = NewCMSClient(TestAccessKeyId, TestAccessKeySecret)
}
return testClient
}
var testDebugClient *CMSClient
func NewTestClientForDebug() *CMSClient {
if testDebugClient == nil {
testDebugClient = NewCMSClient(TestAccessKeyId, TestAccessKeySecret)
testDebugClient.SetDebug(true)
}
return testDebugClient
}
|
// Package config parses command-line/environment/config file arguments
// and make available to other packages.
package config
import (
"io/ioutil"
"path"
"runtime"
"gopkg.in/yaml.v2"
"github.com/Akagi201/utilgo/conflag"
flags "github.com/jessevdk/go-flags"
"github.com/tengattack/tgo/log"
)
// Opts configs
var Opts struct {
Conf string `long:"conf" description:"esalert config file"`
AlertFileDir string `yaml:"alerts" long:"alerts" short:"a" required:"true" description:"A yaml file, or directory with yaml files, containing alert definitions"`
ElasticSearchAddr string `yaml:"es-addr" long:"es-addr" default:"127.0.0.1:9200" description:"Address to find an elasticsearch instance on"`
ElasticSearchUser string `yaml:"es-user" long:"es-user" default:"elastic" description:"Username for the elasticsearch"`
ElasticSearchPass string `yaml:"es-pass" long:"es-pass" default:"changeme" description:"Password for the elasticsearch"`
LuaInit string `yaml:"lua-init" long:"lua-init" description:"If set the given lua script file will be executed at the initialization of every lua vm"`
LuaVMs int `yaml:"lua-vms" long:"lua-vms" default:"1" description:"How many lua vms should be used. Each vm is completely independent of the other, and requests are executed on whatever vm is available at that moment. Allows lua scripts to not all be blocked on the same os thread"`
SlackWebhook string `yaml:"slack-webhook" long:"slack-webhook" description:"Slack webhook url, required if using any Slack actions"`
ForceRun string `yaml:"force-run" long:"force-run" description:"If set with the name of an alert, will immediately run that alert and exit. Useful for testing changes to alert definitions"`
Log log.Config `yaml:"log" long:"log" description:"logging options"`
}
func init() {
runtime.GOMAXPROCS(runtime.NumCPU())
}
func init() {
parser := flags.NewParser(&Opts, flags.Default|flags.IgnoreUnknown)
parser.Parse()
if Opts.Conf != "" {
switch path.Ext(Opts.Conf) {
case ".yaml", ".yml":
f, err := ioutil.ReadFile(Opts.Conf)
if err != nil {
panic(err)
}
err = yaml.Unmarshal(f, &Opts)
if err != nil {
panic(err)
}
default:
conflag.LongHyphen = true
conflag.BoolValue = false
args, err := conflag.ArgsFrom(Opts.Conf)
if err != nil {
panic(err)
}
parser.ParseArgs(args)
}
}
if Opts.Log.AccessLevel == "" || Opts.Log.ErrorLevel == "" {
// Load default logging configuration
Opts.Log = *log.DefaultConfig
}
err := log.InitLog(&Opts.Log)
if err != nil {
panic(err)
}
log.LogAccess.Debugf("esalert opts: %+v", Opts)
}
|
package main
import (
"context"
"fmt"
"time"
)
var exitChan = make(chan bool, 1)
func f2(ctx context.Context) {
LOOP:
for {
fmt.Println("保德路")
time.Sleep(time.Millisecond * 500)
select {
case <-ctx.Done():
break LOOP
default:
}
}
}
func f(ctx context.Context) {
go f2(ctx)
LOOP:
for {
fmt.Println("周林")
time.Sleep(time.Millisecond * 500)
select {
case <-ctx.Done():
break LOOP
default:
}
}
}
func main() {
ctx, cancel := context.WithCancel(context.Background())
go f(ctx)
time.Sleep(time.Second * 5)
//如果通知子goroutine退出
cancel()
}
|
package cmd
import (
"os"
"strings"
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/authelia/authelia/v4/internal/utils"
)
func newBuildCmd() (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: "build",
Short: cmdBuildShort,
Long: cmdBuildLong,
Example: cmdBuildExample,
Args: cobra.NoArgs,
Run: cmdBuildRun,
DisableAutoGenTag: true,
}
return cmd
}
func cmdBuildRun(cobraCmd *cobra.Command, args []string) {
branch := os.Getenv("BUILDKITE_BRANCH")
if strings.HasPrefix(branch, "renovate/") {
buildFrontend(branch)
log.Info("Skip building Authelia for deps...")
os.Exit(0)
}
log.Info("Building Authelia...")
cmdCleanRun(cobraCmd, args)
buildMetaData, err := getBuild(branch, os.Getenv("BUILDKITE_BUILD_NUMBER"), "")
if err != nil {
log.Fatal(err)
}
log.Debug("Creating `" + OutputDir + "` directory")
if err = os.MkdirAll(OutputDir, os.ModePerm); err != nil {
log.Fatal(err)
}
log.Debug("Building Authelia frontend...")
buildFrontend(branch)
log.Debug("Building swagger-ui frontend...")
buildSwagger()
buildkite, _ := cobraCmd.Flags().GetBool("buildkite")
if buildkite {
log.Info("Building Authelia Go binaries with gox...")
buildAutheliaBinaryGOX(buildMetaData.XFlags())
} else {
log.Info("Building Authelia Go binary...")
buildAutheliaBinaryGO(buildMetaData.XFlags())
}
cleanAssets()
}
func buildAutheliaBinaryGOX(xflags []string) {
var wg sync.WaitGroup
s := time.Now()
wg.Add(2)
go func() {
defer wg.Done()
cmd := utils.CommandWithStdout("gox", "-output={{.Dir}}-{{.OS}}-{{.Arch}}-musl", "-buildmode=pie", "-trimpath", "-cgo", "-ldflags=-linkmode=external -s -w "+strings.Join(xflags, " "), "-osarch=linux/amd64 linux/arm linux/arm64", "./cmd/authelia/")
cmd.Env = append(os.Environ(),
"CGO_CPPFLAGS=-D_FORTIFY_SOURCE=2 -fstack-protector-strong", "CGO_LDFLAGS=-Wl,-z,relro,-z,now",
"GOX_LINUX_ARM_CC=arm-linux-musleabihf-gcc", "GOX_LINUX_ARM64_CC=aarch64-linux-musl-gcc")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
}()
go func() {
defer wg.Done()
cmd := utils.CommandWithStdout("bash", "-c", "docker run --rm -e GOX_LINUX_ARM_CC=arm-linux-gnueabihf-gcc -e GOX_LINUX_ARM64_CC=aarch64-linux-gnu-gcc -e GOX_FREEBSD_AMD64_CC=x86_64-pc-freebsd13-gcc -v ${PWD}:/workdir -v /buildkite/.go:/root/go authelia/crossbuild "+
"gox -output={{.Dir}}-{{.OS}}-{{.Arch}} -buildmode=pie -trimpath -cgo -ldflags=\"-linkmode=external -s -w "+strings.Join(xflags, " ")+"\" -osarch=\"linux/amd64 linux/arm linux/arm64 freebsd/amd64\" ./cmd/authelia/")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
}()
wg.Wait()
e := time.Since(s)
log.Debugf("Binary compilation completed in %s.", e)
}
func buildAutheliaBinaryGO(xflags []string) {
cmd := utils.CommandWithStdout("go", "build", "-buildmode=pie", "-trimpath", "-o", OutputDir+"/authelia", "-ldflags", "-linkmode=external -s -w "+strings.Join(xflags, " "), "./cmd/authelia/")
cmd.Env = append(os.Environ(),
"CGO_CPPFLAGS=-D_FORTIFY_SOURCE=2 -fstack-protector-strong", "CGO_LDFLAGS=-Wl,-z,relro,-z,now")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
}
func buildFrontend(branch string) {
cmd := utils.CommandWithStdout("pnpm", "install")
cmd.Dir = webDirectory
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
if !strings.HasPrefix(branch, "renovate/") {
cmd = utils.CommandWithStdout("pnpm", "build")
cmd.Dir = webDirectory
err = cmd.Run()
if err != nil {
log.Fatal(err)
}
}
}
func buildSwagger() {
cmd := utils.CommandWithStdout("bash", "-c", "wget -q https://github.com/swagger-api/swagger-ui/archive/v"+versionSwaggerUI+".tar.gz -O ./v"+versionSwaggerUI+".tar.gz")
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
cmd = utils.CommandWithStdout("cp", "-r", "api", "internal/server/public_html")
err = cmd.Run()
if err != nil {
log.Fatal(err)
}
cmd = utils.CommandWithStdout("tar", "-C", "internal/server/public_html/api", "--exclude=index.html", "--strip-components=2", "-xf", "v"+versionSwaggerUI+".tar.gz", "swagger-ui-"+versionSwaggerUI+"/dist")
err = cmd.Run()
if err != nil {
log.Fatal(err)
}
cmd = utils.CommandWithStdout("rm", "./v"+versionSwaggerUI+".tar.gz")
err = cmd.Run()
if err != nil {
log.Fatal(err)
}
}
func cleanAssets() {
if err := os.Rename("internal/server/public_html", OutputDir+"/public_html"); err != nil {
log.Fatal(err)
}
cmd := utils.CommandWithStdout("mkdir", "-p", "internal/server/public_html/api")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
cmd = utils.CommandWithStdout("bash", "-c", "touch internal/server/public_html/{index.html,api/index.html,api/openapi.yml}")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
}
|
package rest
// Options REST 参数
type Options struct {
APIBase string // API Base URL
WxCallbackServerBase string // 微信回调地址
WxH5ServerBase string // 微信h5地址
}
|
package handler
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
gwApi "github.com/aws/aws-sdk-go/service/apigatewaymanagementapi"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
var (
Dynamo = NewDynamo()
DynamoDbTableConnections = os.Getenv(EnvDynamoDbTableConnections)
DynamoDbTableRooms = os.Getenv(EnvDynamoDbTableRooms)
DynamoDbTableGames = os.Getenv(EnvDynamoDbTableGameStates)
)
func NewDynamo() *dynamodb.DynamoDB {
sess, _ := session.NewSession()
return dynamodb.New(sess)
}
func NewGwApi(domainName string, stage string) (*gwApi.ApiGatewayManagementApi, error) {
endpoint := fmt.Sprintf("%s/%s", domainName, stage)
sess, err := session.NewSession(&aws.Config{
Endpoint: &endpoint,
})
if err != nil {
return nil, err
}
return gwApi.New(sess), nil
}
|
package controller
import (
"context"
"path"
"time"
"github.com/Masterminds/semver"
"github.com/kyma-project/helm-broker/internal"
"github.com/kyma-project/helm-broker/internal/controller/addons"
"github.com/kyma-project/helm-broker/internal/storage"
addonsv1alpha1 "github.com/kyma-project/helm-broker/pkg/apis/addons/v1alpha1"
exerr "github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/helm/pkg/proto/hapi/chart"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// ClusterAddonsConfigurationController holds controller logic
type ClusterAddonsConfigurationController struct {
reconciler reconcile.Reconciler
}
// NewClusterAddonsConfigurationController creates new controller with a given reconciler
func NewClusterAddonsConfigurationController(reconciler reconcile.Reconciler) *ClusterAddonsConfigurationController {
return &ClusterAddonsConfigurationController{reconciler: reconciler}
}
// Start starts a controller
func (cacc *ClusterAddonsConfigurationController) Start(mgr manager.Manager) error {
// Create a new controller
c, err := controller.New("clusteraddonsconfiguration-controller", mgr, controller.Options{Reconciler: cacc.reconciler})
if err != nil {
return err
}
// Watch for changes to ClusterAddonsConfiguration
err = c.Watch(&source.Kind{Type: &addonsv1alpha1.ClusterAddonsConfiguration{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterAddonsConfiguration{}
// ReconcileClusterAddonsConfiguration reconciles a ClusterAddonsConfiguration object
type ReconcileClusterAddonsConfiguration struct {
log logrus.FieldLogger
client.Client
scheme *runtime.Scheme
chartStorage chartStorage
addonStorage addonStorage
docsProvider clusterDocsProvider
brokerFacade clusterBrokerFacade
brokerSyncer clusterBrokerSyncer
addonLoader *addonLoader
protection protection
// syncBroker informs ServiceBroker should be resync, it should be true if
// operation insert/delete was made on storage
syncBroker bool
}
// NewReconcileClusterAddonsConfiguration returns a new reconcile.Reconciler
func NewReconcileClusterAddonsConfiguration(mgr manager.Manager, addonGetterFactory addonGetterFactory, chartStorage chartStorage, addonStorage addonStorage, brokerFacade clusterBrokerFacade, docsProvider clusterDocsProvider, brokerSyncer clusterBrokerSyncer, tmpDir string, log logrus.FieldLogger) reconcile.Reconciler {
return &ReconcileClusterAddonsConfiguration{
log: log.WithField("controller", "cluster-addons-configuration"),
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
addonStorage: addonStorage,
chartStorage: chartStorage,
brokerFacade: brokerFacade,
docsProvider: docsProvider,
brokerSyncer: brokerSyncer,
addonLoader: &addonLoader{
addonGetterFactory: addonGetterFactory,
log: log.WithField("service", "cluster::addons::configuration::addon-creator"),
dstPath: path.Join(tmpDir, "cluster-addon-loader-dst"),
},
protection: protection{},
syncBroker: false,
}
}
// Reconcile reads that state of the cluster for a ClusterAddonsConfiguration object and makes changes based on the state read
// and what is in the ClusterAddonsConfiguration.Spec
func (r *ReconcileClusterAddonsConfiguration) Reconcile(request reconcile.Request) (reconcile.Result, error) {
addon := &addonsv1alpha1.ClusterAddonsConfiguration{}
err := r.Get(context.TODO(), request.NamespacedName, addon)
if err != nil {
return reconcile.Result{}, err
}
r.syncBroker = false
if addon.DeletionTimestamp != nil {
if err := r.deleteAddonsProcess(addon); err != nil {
r.log.Errorf("while deleting ClusterAddonsConfiguration process: %v", err)
return reconcile.Result{RequeueAfter: time.Second * 15}, exerr.Wrapf(err, "while deleting ClusterAddonConfiguration %q", request.NamespacedName)
}
return reconcile.Result{}, nil
}
if addon.Status.ObservedGeneration == 0 {
r.log.Infof("Start add ClusterAddonsConfiguration %s process", addon.Name)
preAddon, err := r.prepareForProcessing(addon)
if err != nil {
r.log.Errorf("while preparing for processing: %v", err)
return reconcile.Result{Requeue: true}, exerr.Wrapf(err, "while adding a finalizer to AddonsConfiguration %q", request.NamespacedName)
}
err = r.addAddonsProcess(preAddon, preAddon.Status)
if err != nil {
r.log.Errorf("while adding ClusterAddonsConfiguration process: %v", err)
return reconcile.Result{}, exerr.Wrapf(err, "while creating ClusterAddonsConfiguration %q", request.NamespacedName)
}
r.log.Infof("Add ClusterAddonsConfiguration process completed")
} else if addon.Generation > addon.Status.ObservedGeneration {
r.log.Infof("Start update ClusterAddonsConfiguration %s process", addon.Name)
lastAddon := addon.DeepCopy()
addon.Status = addonsv1alpha1.ClusterAddonsConfigurationStatus{}
err = r.addAddonsProcess(addon, lastAddon.Status)
if err != nil {
r.log.Errorf("while updating ClusterAddonsConfiguration process: %v", err)
return reconcile.Result{}, exerr.Wrapf(err, "while updating ClusterAddonsConfiguration %q", request.NamespacedName)
}
r.log.Infof("Update ClusterAddonsConfiguration %s process completed", addon.Name)
}
return reconcile.Result{}, nil
}
func (r *ReconcileClusterAddonsConfiguration) addAddonsProcess(addon *addonsv1alpha1.ClusterAddonsConfiguration, lastStatus addonsv1alpha1.ClusterAddonsConfigurationStatus) error {
r.log.Infof("- load addons and charts for each addon")
repositories := r.addonLoader.Load(addon.Spec.Repositories)
r.log.Info("- check duplicate ID addons alongside repositories")
repositories.ReviseAddonDuplicationInRepository()
r.log.Info("- check duplicates ID addons in existing ClusterAddonsConfigurations")
list, err := r.existingAddonsConfigurations(addon.Name)
if err != nil {
return exerr.Wrap(err, "while fetching ClusterAddonsConfigurations list")
}
repositories.ReviseAddonDuplicationInClusterStorage(list)
if repositories.IsRepositoriesFailed() {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationFailed
} else {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationReady
}
r.log.Infof("- status: %s", addon.Status.Phase)
var deletedAddons []string
switch addon.Status.Phase {
case addonsv1alpha1.AddonsConfigurationFailed:
if _, err = r.updateAddonStatus(r.statusSnapshot(addon, repositories)); err != nil {
return exerr.Wrap(err, "while update ClusterAddonsConfiguration status")
}
if lastStatus.Phase == addonsv1alpha1.AddonsConfigurationReady {
deletedAddons, err = r.deleteAddonsFromRepository(lastStatus.Repositories)
if err != nil {
return exerr.Wrap(err, "while deleting addons from repository")
}
}
case addonsv1alpha1.AddonsConfigurationReady:
r.log.Info("- save ready addons and charts in storage")
if err := r.saveAddon(repositories); err != nil {
return exerr.Wrap(err, "while saving ready addons and charts in storage")
}
if _, err = r.updateAddonStatus(r.statusSnapshot(addon, repositories)); err != nil {
return exerr.Wrap(err, "while update ClusterAddonsConfiguration status")
}
if lastStatus.Phase == addonsv1alpha1.AddonsConfigurationReady {
deletedAddons, err = r.deleteOrphanAddons(addon.Status.Repositories, lastStatus.Repositories)
if err != nil {
return exerr.Wrap(err, "while deleting orphan addons from storage")
}
}
}
if r.syncBroker {
r.log.Info("- ensure ClusterServiceBroker")
if err = r.ensureBroker(addon); err != nil {
return exerr.Wrap(err, "while ensuring ClusterServiceBroker")
}
}
if len(deletedAddons) > 0 {
r.log.Info("- reprocessing conflicting addons configurations")
for _, key := range deletedAddons {
// reprocess ClusterAddonsConfiguration again if it contains a conflicting addons
if err := r.reprocessConflictingAddonsConfiguration(key, list); err != nil {
return exerr.Wrap(err, "while requesting processing of conflicting ClusterAddonsConfigurations")
}
}
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) deleteAddonsProcess(addon *addonsv1alpha1.ClusterAddonsConfiguration) error {
r.log.Infof("Start delete ClusterAddonsConfiguration %s", addon.Name)
if addon.Status.Phase == addonsv1alpha1.AddonsConfigurationReady {
adds, err := r.existingAddonsConfigurations(addon.Name)
if err != nil {
return exerr.Wrap(err, "while listing ClusterAddonsConfigurations")
}
deleteBroker := true
for _, addon := range adds.Items {
if addon.Status.Phase != addonsv1alpha1.AddonsConfigurationReady {
// reprocess ClusterAddonsConfiguration again if was failed
if err := r.reprocessAddonsConfiguration(&addon); err != nil {
return exerr.Wrapf(err, "while requesting reprocess for ClusterAddonsConfiguration %s", addon.Name)
}
} else {
deleteBroker = false
}
}
if deleteBroker {
r.log.Info("- delete ClusterServiceBroker")
if err := r.brokerFacade.Delete(); err != nil {
return exerr.Wrap(err, "while deleting ClusterServiceBroker")
}
}
for _, repo := range addon.Status.Repositories {
for _, a := range repo.Addons {
err := r.removeAddon(a)
if err != nil && !storage.IsNotFoundError(err) {
return exerr.Wrapf(err, "while deleting addon with charts for addon %s", a.Name)
}
}
}
if !deleteBroker && r.syncBroker {
if err := r.brokerSyncer.Sync(); err != nil {
return exerr.Wrapf(err, "while syncing ClusterServiceBroker for addon %s", addon.Name)
}
}
}
if err := r.deleteFinalizer(addon); err != nil {
return exerr.Wrapf(err, "while deleting finalizer from ClusterAddonsConfiguration %s", addon.Name)
}
r.log.Info("Delete ClusterAddonsConfiguration process completed")
return nil
}
func (r *ReconcileClusterAddonsConfiguration) ensureBroker(addon *addonsv1alpha1.ClusterAddonsConfiguration) error {
exist, err := r.brokerFacade.Exist()
if err != nil {
return exerr.Wrap(err, "while checking if ClusterServiceBroker exists")
}
if !exist {
r.log.Info("- creating ClusterServiceBroker")
if err := r.brokerFacade.Create(); err != nil {
return exerr.Wrapf(err, "while creating ClusterServiceBroker for addon %s", addon.Name)
}
} else if r.syncBroker {
if err := r.brokerSyncer.Sync(); err != nil {
return exerr.Wrapf(err, "while syncing ClusterServiceBroker for addon %s", addon.Name)
}
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) existingAddonsConfigurations(addonName string) (*addonsv1alpha1.ClusterAddonsConfigurationList, error) {
addonsList := &addonsv1alpha1.ClusterAddonsConfigurationList{}
addonsConfigurationList, err := r.addonsConfigurationList()
if err != nil {
return nil, exerr.Wrap(err, "while listing ClusterAddonsConfigurations")
}
for _, existAddon := range addonsConfigurationList.Items {
if existAddon.Name != addonName {
addonsList.Items = append(addonsList.Items, existAddon)
}
}
return addonsList, nil
}
func (r *ReconcileClusterAddonsConfiguration) addonsConfigurationList() (*addonsv1alpha1.ClusterAddonsConfigurationList, error) {
addonsConfigurationList := &addonsv1alpha1.ClusterAddonsConfigurationList{}
err := r.Client.List(context.TODO(), &client.ListOptions{}, addonsConfigurationList)
if err != nil {
return addonsConfigurationList, exerr.Wrap(err, "during fetching ClusterAddonConfiguration list by client")
}
return addonsConfigurationList, nil
}
func (r *ReconcileClusterAddonsConfiguration) deleteOrphanAddons(repos []addonsv1alpha1.StatusRepository, lastRepos []addonsv1alpha1.StatusRepository) ([]string, error) {
addonsToStay := map[string]addonsv1alpha1.Addon{}
for _, repo := range repos {
for _, ad := range repo.Addons {
addonsToStay[ad.Key()] = ad
}
}
var deletedAddonsKeys []string
for _, repo := range lastRepos {
for _, ad := range repo.Addons {
if _, exist := addonsToStay[ad.Key()]; !exist {
if err := r.removeAddon(ad); err != nil && !storage.IsNotFoundError(err) {
return nil, exerr.Wrapf(err, "while deleting addons and charts for addon %s", ad.Name)
}
deletedAddonsKeys = append(deletedAddonsKeys, ad.Key())
}
}
}
return deletedAddonsKeys, nil
}
func (r *ReconcileClusterAddonsConfiguration) deleteAddonsFromRepository(repos []addonsv1alpha1.StatusRepository) ([]string, error) {
var deletedAddonsKeys []string
for _, repo := range repos {
for _, ad := range repo.Addons {
if err := r.removeAddon(ad); err != nil && !storage.IsNotFoundError(err) {
return nil, exerr.Wrapf(err, "while deleting addons and charts for addon %s", ad.Name)
}
deletedAddonsKeys = append(deletedAddonsKeys, ad.Key())
}
}
return deletedAddonsKeys, nil
}
func (r *ReconcileClusterAddonsConfiguration) removeAddon(ad addonsv1alpha1.Addon) error {
r.log.Infof("- delete addon %s from storage", ad.Name)
addon, err := r.addonStorage.Get(internal.ClusterWide, internal.AddonName(ad.Name), *semver.MustParse(ad.Version))
if err != nil {
return err
}
err = r.addonStorage.Remove(internal.ClusterWide, internal.AddonName(ad.Name), *semver.MustParse(ad.Version))
if err != nil {
return err
}
r.syncBroker = true
r.log.Infof("- delete ClusterDocsTopic for addon %s", addon.Name)
if err := r.docsProvider.EnsureClusterDocsTopicRemoved(string(addon.ID)); err != nil {
return exerr.Wrapf(err, "while ensuring ClusterDocsTopic for addon %s is removed", addon.ID)
}
for _, plan := range addon.Plans {
err = r.chartStorage.Remove(internal.ClusterWide, plan.ChartRef.Name, plan.ChartRef.Version)
if err != nil {
return err
}
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) reprocessConflictingAddonsConfiguration(key string, list *addonsv1alpha1.ClusterAddonsConfigurationList) error {
for _, addonsCfg := range list.Items {
if addonsCfg.Status.Phase != addonsv1alpha1.AddonsConfigurationReady {
for _, repo := range addonsCfg.Status.Repositories {
if repo.Status != addonsv1alpha1.RepositoryStatusReady {
for _, a := range repo.Addons {
if a.Key() == key {
return r.reprocessAddonsConfiguration(&addonsCfg)
}
}
}
}
}
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) reprocessAddonsConfiguration(addon *addonsv1alpha1.ClusterAddonsConfiguration) error {
ad := &addonsv1alpha1.ClusterAddonsConfiguration{}
if err := r.Client.Get(context.Background(), types.NamespacedName{Name: addon.Name}, ad); err != nil {
return exerr.Wrapf(err, "while getting ClusterAddonsConfiguration %s", addon.Name)
}
ad.Spec.ReprocessRequest++
if err := r.Client.Update(context.Background(), ad); err != nil {
return exerr.Wrapf(err, "while incrementing a reprocess requests for ClusterAddonsConfiguration %s", addon.Name)
}
return nil
}
// TODO: fix the error handling. Now it has two different behaviour.
// Move logging `if exists` to the end.
func (r *ReconcileClusterAddonsConfiguration) saveAddon(repositories *addons.RepositoryCollection) error {
for _, addon := range repositories.ReadyAddons() {
if len(addon.CompleteAddon.Docs) == 1 {
r.log.Infof("- ensure ClusterDocsTopic for addon %s", addon.CompleteAddon.ID)
if err := r.docsProvider.EnsureClusterDocsTopic(addon.CompleteAddon); err != nil {
return exerr.Wrapf(err, "While ensuring ClusterDocsTopic for addon %s: %v", addon.CompleteAddon.ID, err)
}
}
exist, err := r.addonStorage.Upsert(internal.ClusterWide, addon.CompleteAddon)
if err != nil {
addon.RegisteringError(err)
r.log.Errorf("cannot upsert addon %v:%v into storage", addon.CompleteAddon.Name, addon.CompleteAddon.Version)
continue
}
if exist {
r.log.Infof("addon %v:%v already existed in storage, addon was replaced", addon.CompleteAddon.Name, addon.CompleteAddon.Version)
}
err = r.saveCharts(addon.Charts)
if err != nil {
addon.RegisteringError(err)
r.log.Errorf("cannot upsert charts of %v:%v addon", addon.CompleteAddon.Name, addon.CompleteAddon.Version)
continue
}
r.syncBroker = true
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) saveCharts(charts []*chart.Chart) error {
for _, addonChart := range charts {
exist, err := r.chartStorage.Upsert(internal.ClusterWide, addonChart)
if err != nil {
return err
}
if exist {
r.log.Infof("chart %s already existed in storage, chart was replaced", addonChart.Metadata.Name)
}
}
return nil
}
func (r *ReconcileClusterAddonsConfiguration) statusSnapshot(addon *addonsv1alpha1.ClusterAddonsConfiguration, repositories *addons.RepositoryCollection) *addonsv1alpha1.ClusterAddonsConfiguration {
addon.Status.Repositories = nil
for _, repo := range repositories.Repositories {
addonsRepository := repo.Repository
addonsRepository.Addons = []addonsv1alpha1.Addon{}
for _, addon := range repo.Addons {
addonsRepository.Addons = append(addonsRepository.Addons, addon.Addon)
}
addon.Status.Repositories = append(addon.Status.Repositories, addonsRepository)
}
return addon
}
func (r *ReconcileClusterAddonsConfiguration) updateAddonStatus(addon *addonsv1alpha1.ClusterAddonsConfiguration) (*addonsv1alpha1.ClusterAddonsConfiguration, error) {
addon.Status.ObservedGeneration = addon.Generation
addon.Status.LastProcessedTime = &v1.Time{Time: time.Now()}
r.log.Infof("- update ClusterAddonsConfiguration %s status", addon.Name)
err := r.Status().Update(context.TODO(), addon)
if err != nil {
return nil, exerr.Wrap(err, "while update ClusterAddonsConfiguration")
}
return addon, nil
}
func (r *ReconcileClusterAddonsConfiguration) prepareForProcessing(addon *addonsv1alpha1.ClusterAddonsConfiguration) (*addonsv1alpha1.ClusterAddonsConfiguration, error) {
obj := addon.DeepCopy()
obj.Status.Phase = addonsv1alpha1.AddonsConfigurationPending
pendingInstance, err := r.updateAddonStatus(obj)
if err != nil {
return nil, exerr.Wrap(err, "while updating addons status")
}
if r.protection.hasFinalizer(pendingInstance.Finalizers) {
return pendingInstance, nil
}
r.log.Info("- add a finalizer")
pendingInstance.Finalizers = r.protection.addFinalizer(pendingInstance.Finalizers)
err = r.Client.Update(context.Background(), pendingInstance)
if err != nil {
return nil, exerr.Wrap(err, "while updating addons status")
}
return pendingInstance, nil
}
func (r *ReconcileClusterAddonsConfiguration) deleteFinalizer(addon *addonsv1alpha1.ClusterAddonsConfiguration) error {
obj := addon.DeepCopy()
if !r.protection.hasFinalizer(obj.Finalizers) {
return nil
}
r.log.Info("- delete a finalizer")
obj.Finalizers = r.protection.removeFinalizer(obj.Finalizers)
return r.Client.Update(context.Background(), obj)
}
|
package pascaltriangle
import (
"fmt"
"testing"
)
func TestBasic(t *testing.T) {
var trianglehigh = 30
for i := 0; i < trianglehigh; i++ {
// Just add white space for view
for k := trianglehigh - i; k > 0; k-- {
fmt.Printf(" ")
} // for
for j := 0; j < i+1; j++ {
fmt.Printf("%d ", combination(i, j))
}
fmt.Printf("\n")
}
}
func TestDP(t *testing.T) {
var trianglehigh = 15
for i := 0; i < trianglehigh; i++ {
// Just add white space for view
for k := trianglehigh - i; k > 0; k-- {
fmt.Printf(" ")
} // for
for j := 0; j < i+1; j++ {
fmt.Printf("%d ", combinationDP(i, j))
}
fmt.Printf("\n")
}
}
func BenchmarkBasic(b *testing.B) {
for i := 0; i < b.N; i++ {
var trianglehigh = 15
for i := 0; i < trianglehigh; i++ {
// Just add white space for view
for j := 0; j < i+1; j++ {
combination(i, j)
}
}
}
}
func BenchmarkDP(b *testing.B) {
for i := 0; i < b.N; i++ {
var trianglehigh = 15
for i := 0; i < trianglehigh; i++ {
// Just add white space for view
for j := 0; j < i+1; j++ {
combinationDP(i, j)
}
}
}
// b.Log(mpt)
}
func TestGenerate(t *testing.T) {
t.Log(generate2(100))
}
|
// Copyright 2018 Andreas Pannewitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"fmt"
)
// ===========================================================================
func ExampleID_Unit() {
var i ID
i = i.Unit()
fmt.Println(i)
// Output:
// <noName>
}
func ExampleIndex_Unit() {
var i Index
i = i.Unit()
fmt.Println(i)
// Output:
// 1
}
func ExampleCardinality_Unit() {
var c Cardinality
c = c.Unit()
fmt.Println(c)
// Output:
// 0
}
// ===========================================================================
func ExampleKindOfPair() {
fmt.Println(KindOfPair(ID("TestName")))
fmt.Println(KindOfPair(Index{}))
fmt.Println(KindOfPair(Cardinality{}))
// Output:
// { name | core.name }
// { ordinalNumber | core.ordinalNumber }
// { cardinalNumber | core.cardinalNumber }
}
// ===========================================================================
func ExampleStringOfPair() {
fmt.Println(StringOfPair(ID("TestName")))
// Output:
// TestName
}
// ===========================================================================
func ExampleStringOfPair_nil() {
var namE name
var indx Index
var card Cardinality
var kinD kind
var head Head
var tail Tail
var nesT nest
var pair Pair
fmt.Println("name:", namE, "==", StringOfPair(namE))
fmt.Println("indx:", indx, "==", StringOfPair(indx))
fmt.Println("card:", card, "==", StringOfPair(card))
fmt.Println("kind:", kinD, "==", StringOfPair(kinD))
fmt.Println("head:", head, "==", StringOfPair(head))
fmt.Println("tail:", tail, "==", StringOfPair(tail))
fmt.Println("nest:", nesT, "==", StringOfPair(nesT))
fmt.Println()
fmt.Println("pair:", pair, "!=", StringOfPair(pair))
// Output:
// name: <noName> == <noName>
// indx: <nil> == <nil>
// card: <nil> == <nil>
// kind: { <noName> | <nil> } == { <noName> | <nil> }
// head: (<nilHead>) == (<nilHead>)
// tail: [<nilTail>] == [<nilTail>]
// nest: { <nilPair> | <nilPair> } == { <nilPair> | <nilPair> }
//
// pair: <nil> != <nilPair>
}
func ExampleKindOfPair_nil() {
var name ID
var index Index
var cardinality Cardinality
var kinD kind
var head Head
var tail Tail
// var pair Pair
{
k := KindOfPair(name)
fmt.Println("name.Kind(): ", k.ID, k.Type)
}
{
k := KindOfPair(index)
fmt.Println("index.Kind(): ", k.ID, k.Type)
}
{
k := KindOfPair(cardinality)
fmt.Println("card.Kind(): ", k.ID, k.Type)
}
fmt.Println()
{
k := KindOfPair(kinD)
fmt.Println("kinD.Kind(): ", k.ID, k.Type)
}
{
k := KindOfPair(head)
fmt.Println("head.Kind(): ", k.ID, k.Type)
}
{
k := KindOfPair(tail)
fmt.Println("tail.Kind(): ", k.ID, k.Type)
}
// Output:
// name.Kind(): name core.name
// index.Kind(): ordinalNumber core.ordinalNumber
// card.Kind(): cardinalNumber core.cardinalNumber
//
// kinD.Kind(): <noName> core.kind
// head.Kind(): <noName> core.Head
// tail.Kind(): <noName> core.Tail
}
func ExampleNilTail() {
tail := NilTail() // Note: Any tail implements Pair
fmt.Println(tail)
fmt.Println(tail.String())
fmt.Println(StringOfPair(tail))
head, tail := tail()
fmt.Println(head)
fmt.Println(head.String())
fmt.Println(StringOfPair(head))
fmt.Println(tail)
fmt.Println(tail.String())
fmt.Println(StringOfPair(tail))
// Output:
// []
// []
// []
// (<nilHead>)
// (<nilHead>)
// (<nilHead>)
// []
// []
// []
}
|
package main
import (
"fmt"
"os"
"os/user"
"compiler/evaluate"
)
func main(){
user, err := user.Current()
if err != nil{
panic(err)
}
fmt.Printf("Hello %s!This is my IDK language\n ",user.Username)
fmt.Printf("You can type something in command line,but first check ../grammar/toke.go\n")
repl.Start(os.Stdin,os.Stdout)
}
|
package main
import (
"fmt"
"os"
"github.com/gin-gonic/gin"
"gopkg.in/urfave/cli.v1"
)
func helpAction(c *cli.Context) error {
fmt.Println("Coucou")
return nil
}
func listenAction(c *cli.Context) error {
kafkaHost := os.Getenv("kafka_host")
topic := os.Getenv("kafka_topic")
group := os.Getenv("kafka_group")
username := os.Getenv("kafka_username")
password := os.Getenv("kafka_password")
if kafkaHost == "" || topic == "" || group == "" || username == "" || password == "" {
return cli.NewExitError("Missing env variable", 11)
}
go consumeFromKafka(kafkaHost, topic, group, username, password)
r := gin.New()
r.GET("/mon/ping", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "pong",
})
})
r.GET("/queue", func(c *gin.Context) {
c.JSON(200, mapPb)
})
r.GET("/queue/:key", func(c *gin.Context) {
key := c.Param("key")
if _, ok := mapPb[key]; !ok {
c.JSON(410, "")
return
}
c.JSON(200, gin.H{"status": mapPb[key]})
})
r.Run("0.0.0.0:8080")
return nil
}
|
package eventbus
import (
"github.com/asaskevich/EventBus"
)
type TaniaEventBus interface {
Publish(eventName string, event interface{})
Subscribe(eventName string, handlerFunc interface{})
}
type SimpleEventBus struct {
bus EventBus.Bus
}
func NewSimpleEventBus(bus EventBus.Bus) *SimpleEventBus {
return &SimpleEventBus{bus: bus}
}
func (e *SimpleEventBus) Publish(eventName string, event interface{}) {
e.bus.Publish(eventName, event)
}
func (e *SimpleEventBus) Subscribe(eventName string, handler interface{}) {
e.bus.Subscribe(eventName, handler)
}
|
package util
import (
"io/ioutil"
"path/filepath"
yaml "gopkg.in/yaml.v1"
)
//Config 全局配置
type Config struct {
Debug bool `yaml:"debug"`
APP string `yaml:"app"`
Auto bool `yaml:"automatic"`
Device string `yaml:"device"`
OCR string `yaml:"ocr"`
AdbAddress string `yaml:"adb_address"`
WdaAddress string `yaml:"wda_address"`
BaiduAPIKey string `yaml:"Baidu_API_Key"`
BaiduSecretKey string `yaml:"Baidu_Secret_Key"`
//西瓜视频截图题目位置
XgQx int `yaml:"xg_q_x"`
XgQy int `yaml:"xg_q_y"`
XgQw int `yaml:"xg_q_w"`
XgQh int `yaml:"xg_q_h"`
//西瓜视频截取答案位置
XgAx int `yaml:"xg_a_x"`
XgAy int `yaml:"xg_a_y"`
XgAw int `yaml:"xg_a_w"`
XgAh int `yaml:"xg_a_h"`
//冲顶大会截图题目位置
CdQx int `yaml:"cd_q_x"`
CdQy int `yaml:"cd_q_y"`
CdQw int `yaml:"cd_q_w"`
CdQh int `yaml:"cd_q_h"`
//冲顶大会截取答案位置
CdAx int `yaml:"cd_a_x"`
CdAy int `yaml:"cd_a_y"`
CdAw int `yaml:"cd_a_w"`
CdAh int `yaml:"cd_a_h"`
//花椒直播截图题目位置
HjQx int `yaml:"hj_q_x"`
HjQy int `yaml:"hj_q_y"`
HjQw int `yaml:"hj_q_w"`
HjQh int `yaml:"hj_q_h"`
//花椒直播截取答案位置
HjAx int `yaml:"hj_a_x"`
HjAy int `yaml:"hj_a_y"`
HjAw int `yaml:"hj_a_w"`
HjAh int `yaml:"hj_a_h"`
//芝士超人截图题目位置
ZsQx int `yaml:"zs_q_x"`
ZsQy int `yaml:"zs_q_y"`
ZsQw int `yaml:"zs_q_w"`
ZsQh int `yaml:"zs_q_h"`
//芝士超人截取答案位置
ZsAx int `yaml:"zs_a_x"`
ZsAy int `yaml:"zs_a_y"`
ZsAw int `yaml:"zs_a_w"`
ZsAh int `yaml:"zs_a_h"`
}
var cfg *Config
var cfgFilename = "./config.yml"
//SetConfigFile 设置配置文件地址
func SetConfigFile(path string) {
cfgFilename = path
}
//GetConfig 解析配置
func GetConfig() *Config {
if cfg != nil {
return cfg
}
filename, _ := filepath.Abs(cfgFilename)
yamlFile, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
var c *Config
err = yaml.Unmarshal(yamlFile, &c)
if err != nil {
panic(err)
}
cfg = c
return cfg
}
|
package main
import (
"io/ioutil"
"log"
"net"
"net/http"
"text/template"
)
type networkHandler struct{}
var networkTemplate = `
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="css/bootstrap.min.css">
</head>
<div class="container">
<body>
<h1>Network Info</h1>
<h3>Interfaces</h3>
<table class="table table-striped">
<tr>
<th>Name</th><th>HardwareAddr</th><th>IP</th>
</tr>
{{range $iface := .Interfaces}}<tr>
<td>{{$iface.Name}}</td>
<td>{{$iface.HardwareAddr}}</td>
<td><ul class="list-unstyled">{{range $addr := $iface.Addrs}}
<li>{{$addr.String}}</li>{{end}}
</ul>
</td>
</tr>{{end}}
<table>
<h3>/etc/resolv.conf</h3>
<pre>{{.ResolvConf}}</pre>
</body>
</div>
</html>
`
func (h networkHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
type Data struct {
ResolvConf string
Interfaces []net.Interface
}
resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
log.Println(err)
return
}
ifaces, err := net.Interfaces()
if err != nil {
log.Println(err)
return
}
data := Data{
ResolvConf: string(resolvConf),
Interfaces: ifaces,
}
t := template.Must(template.New("env").Parse(networkTemplate))
err = t.Execute(w, data)
if err != nil {
log.Println("executing template:", err)
}
}
|
package main
import (
"fmt"
"testing"
)
func TestTwoSum(t *testing.T) {
ans := TwoSum([]int{1, 3, 5, 6}, 7)
fmt.Println(ans)
}
|
package dao
import (
"git.dustess.com/mk-base/mongo-driver/mongo"
"git.dustess.com/mk-training/mk-blog-svc/pkg/blogstatistics/model"
"go.mongodb.org/mongo-driver/bson"
)
// AsynStaticsByBlog 统计数据(非实时)
func (m *BlogStatDao) AsynStaticsByBlog(filter interface{}) (result []model.StatBlog) {
pipe := []bson.M{
{"$match": filter},
{"$group": bson.M{"_id": bson.M{"blogID": "$blogId", "type": "$type"}, "view": bson.M{"$sum": "$view"}}},
{"$project": bson.M{"blogId": "$_id.blogID", "typ": "$_id.type", "view": 1}},
}
cursor, err := m.dao.Aggregate(m.ctx, pipe)
if err != nil {
return nil
}
err = cursor.All(m.ctx, &result)
if err != nil {
return nil
}
return
}
// BlogStatQuery 统计条件
func (m *BlogStatDao) BlogStatQuery(blogID []string, start, end int64, typ string) interface{} {
q := bson.M{}
if len(blogID) > 0 {
q["blogId"] = bson.M{"$in": blogID}
}
if typ != "" {
q["type"] = typ
}
tq := bson.M{}
if start > 0 {
tq["$gte"] = start
}
if end > 0 {
tq["$lte"] = end
}
if len(tq) > 0 {
q["createdAt"] = tq
}
return q
}
// FindLast 查找最近统计数据
func (m *BlogStatDao) FindLast(typ string) (stat model.Statistics, err error) {
filter := bson.M{"type": typ}
op := &mongo.FindOneOptions{
Sort: bson.M{"createdAt": -1},
}
err = m.dao.FindOne(m.ctx, filter, op).Decode(&stat)
return
}
// StatisticsByBlog 统计博客信息
func (m *BlogStatDao) StatisticsByBlog(filter interface{}, typ string, limit int64) (result []model.StatOverview) {
sort := bson.M{}
if typ == model.PV {
sort["stat.pv"] = -1
} else {
sort["stat.uv"] = -1
}
sort["_id"] = 1
pipe := []bson.M{
{"$match": filter},
{"$group": bson.M{"_id": bson.M{"blogID": "$blogId", "type": "$type"}, "uv": bson.M{"$sum": "$view"}, "pv": bson.M{"$sum": "$view"}}},
{"$project": bson.M{"blogId": "$_id.blogID", "typ": "$_id.type", "uv": 1, "pv": 1, "_id": 0}},
{"$group": bson.M{"_id": "$blogId", "stat": bson.M{"$push": bson.M{"uv": "$uv", "pv":"$pv","typ": "$typ"}}}},
{"$sort": sort},
{"$limit": limit},
}
cursor, err := m.dao.Aggregate(m.ctx, pipe)
if err != nil {
return nil
}
err = cursor.All(m.ctx, &result)
if err != nil {
return nil
}
return
}
// StaticsDetail 统计详情
func (m *BlogStatDao) StaticsDetail(blogID string, start,end int64) (result []model.StatBlogDetail) {
filter := bson.M{"blogId": blogID, "createdAt": bson.M{"$gte": start, "$lt": end}}
dateBson := bson.M{"$dateToString": bson.M{"format": "%Y-%m-%d", "date": bson.M{"$toDate": bson.M{"$add": bson.A{28800000, bson.M{"$multiply": bson.A{"$createdAt", 1000}}}}}}}
pipe := []bson.M{
{"$match": filter},
{"$project": bson.M{"date": dateBson, "blogId": 1, "type":1,"view":1}},
{"$group": bson.M{"_id": bson.M{"date": "$date", "type": "$type"}, "uv": bson.M{"$sum": "$view"}, "pv": bson.M{"$sum": "$view"}}},
{"$project": bson.M{"date": "$_id.date", "typ": "$_id.type", "uv": 1, "pv": 1, "_id": 0}},
{"$group": bson.M{"_id": "$date", "stat": bson.M{"$push": bson.M{"uv": "$uv", "pv":"$pv","typ": "$typ"}}}},
{"$sort": bson.M{"_id": 1}},
}
cursor, err := m.dao.Aggregate(m.ctx, pipe)
if err != nil {
return nil
}
err = cursor.All(m.ctx, &result)
if err != nil {
return nil
}
return
}
// FindLogsByTime 统计数据
func (m *BlogLogDao) FindLogsByTime(at int64) (result []model.ViewLogs, err error) {
filter := bson.M{"createdAt": bson.M{"$gte": at}}
cursor, err := m.dao.Find(m.ctx, filter)
if err != nil {
return nil, err
}
err = cursor.All(m.ctx, &result)
return
}
// FindLogs 批量查询
func (m *BlogLogDao) FindLogs(filter interface{}) (result []model.ViewLogs, err error) {
cursor, err := m.dao.Find(m.ctx, filter)
if err != nil {
return nil, err
}
err = cursor.All(m.ctx, &result)
return
}
// FindLogsByUserIDs 通过UserID查询
func (m *BlogLogDao) FindLogsByUserIDs(userIDs []string, blogID string, at int64) (result []model.ViewLogs, err error) {
if len(userIDs) < 1 {
return
}
return m.FindLogs(bson.M{"userId": bson.M{"$in": userIDs}, "blogId": blogID, "createdAt": bson.M{"$lt": at}})
}
// FindLogsByIps 通过IP查询
func (m *BlogLogDao) FindLogsByIps(ips []string, blogID string, at int64) (result []model.ViewLogs, err error) {
if len(ips) < 1 {
return
}
return m.FindLogs(bson.M{"clientIp": bson.M{"$in": ips}, "blogId": blogID, "createdAt": bson.M{"$lt": at}})
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-18 14:52
# @File : of_剑指_Offer_21_调整数组顺序使奇数位于偶数前面.go
# @Description : 双指针
# @Attention :
*/
package offer
func exchange(nums []int) []int {
slow := 0
fast := 0
for fast < len(nums) {
if nums[fast]&1 == 1 {
nums[slow], nums[fast] = nums[fast], nums[slow]
slow++
}
fast++
}
return nums
}
|
package envoy
import "github.com/pivotal-cf-experimental/envoy/domain"
// Broker defines the interface that makes up a Service Broker for CloudFoundry.
// The Broker interface is the combined interface including all of the expected
// functionality of a service broker.
type Broker interface {
Cataloger
Credentialer
Provisioner
Binder
Unbinder
Deprovisioner
}
// Cataloger defines the interface for a broker component providing the catalogl
// information.
type Cataloger interface {
Catalog() domain.Catalog
}
// Credentialer defines the interface for the Basic Auth credentials required to
// interact with the service broker.
type Credentialer interface {
Credentials() (username, password string)
}
// Provisioner defines the interface for a request to provision a service.
type Provisioner interface {
Provision(domain.ProvisionRequest) (domain.ProvisionResponse, error)
}
// Deprovisioner defines the interface for a request to deprovision a service.
type Deprovisioner interface {
Deprovision(domain.DeprovisionRequest) error
}
// Binder defines the interface for a request to bind a service.
type Binder interface {
Bind(domain.BindRequest) (domain.BindResponse, error)
}
// Unbinder defines the interface for a request to unbind a service.
type Unbinder interface {
Unbind(domain.UnbindRequest) error
}
|
package main
import (
"fmt"
"github.com/nattaponra/my-go/interface/geometry"
)
//Interface
type Geometry interface {
Area() float64
Perim() float64
}
func Measure(g Geometry) {
fmt.Println(g)
fmt.Println("Area:", g.Area())
fmt.Println("Perim:", g.Perim())
}
func main() {
Measure(geometry.Rect{Height: 10, Width: 10})
Measure(geometry.Circle{Radius: 50})
}
|
package command_helpers
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
helm_v3 "helm.sh/helm/v3/cmd/helm"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"github.com/werf/logboek"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
)
type BuildChartDependenciesOptions struct {
Keyring string
SkipUpdate bool
Verify downloader.VerificationStrategy
LoadOptions *loader.LoadOptions
}
func BuildChartDependenciesInDir(ctx context.Context, chartFile, chartLockFile *chart.ChartExtenderBufferedFile, targetDir string, helmEnvSettings *cli.EnvSettings, registryClientHandle *helm_v3.RegistryClientHandle, opts BuildChartDependenciesOptions) error {
logboek.Context(ctx).Debug().LogF("-- BuildChartDependenciesInDir\n")
if err := os.MkdirAll(targetDir, os.ModePerm); err != nil {
return fmt.Errorf("error creating dir %q: %s", targetDir, err)
}
files := []*chart.ChartExtenderBufferedFile{chartFile, chartLockFile}
for _, file := range files {
if file == nil {
continue
}
path := filepath.Join(targetDir, file.Name)
if err := ioutil.WriteFile(path, file.Data, 0644); err != nil {
return fmt.Errorf("error writing %q: %s", path, err)
}
}
man := &downloader.Manager{
Out: logboek.Context(ctx).OutStream(),
ChartPath: targetDir,
Keyring: opts.Keyring,
SkipUpdate: opts.SkipUpdate,
Verify: opts.Verify,
Getters: getter.All(helmEnvSettings),
RegistryClient: registryClientHandle.RegistryClient,
RepositoryConfig: helmEnvSettings.RepositoryConfig,
RepositoryCache: helmEnvSettings.RepositoryCache,
Debug: helmEnvSettings.Debug,
}
currentLoaderOptions := loader.GlobalLoadOptions
loader.GlobalLoadOptions = opts.LoadOptions
defer func() {
loader.GlobalLoadOptions = currentLoaderOptions
}()
err := man.Build()
if e, ok := err.(downloader.ErrRepoNotFound); ok {
return fmt.Errorf("%s. Please add the missing repos via 'helm repo add'", e.Error())
}
return err
}
|
package main
import (
"strings"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/sql"
)
type CustomSqlModel struct {
*sql.QSqlQueryModel
}
func newCustomSqlModel(p *core.QObject) *CustomSqlModel {
var model = &CustomSqlModel{sql.NewQSqlQueryModel(p)}
model.ConnectData(model.data)
return model
}
func (m *CustomSqlModel) data(index *core.QModelIndex, role int) *core.QVariant {
var value = m.DataDefault(index, role)
if value.IsValid() && role == int(core.Qt__DisplayRole) {
if index.Column() == 0 {
return core.NewQVariant1("#" + value.ToString())
} else if index.Column() == 2 {
return core.NewQVariant1(strings.ToUpper(value.ToString()))
}
}
if role == int(core.Qt__TextColorRole) && index.Column() == 1 {
return gui.NewQColor2(core.Qt__blue).ToVariant()
}
return value
}
|
package session
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestSessionStore(t *testing.T) {
Convey(`Testing the Session Store`, t, func() {
s := NewStore()
s.Delete("foo")
s1, existed := s.GetOrNew("foo")
So(existed, ShouldBeFalse)
So(s1.Name(), ShouldEqual, "foo")
var s1Deleted bool
s1.SetOnDelete(func() { s1Deleted = true })
s2, existed := s.GetOrNew("foo")
So(existed, ShouldBeTrue)
So(s2, ShouldEqual, s1)
So(s1Deleted, ShouldBeFalse)
s3 := s.New("foo")
So(s3.Name(), ShouldEqual, "foo")
So(s1Deleted, ShouldBeTrue)
var s3Deleted bool
s3.SetOnDelete(func() { s3Deleted = true })
s.Delete("foo")
So(s3Deleted, ShouldBeTrue)
})
}
|
package main
import "fmt"
func main() {
m := make(map[string]int)
m["k1"] = 7
m["k2"] = 71
fmt.Println("map:", m)
fmt.Println("k1 value:", m["k1"])
}
|
package lru
type Cell struct {
Key interface{}
Value interface{}
}
type ILRU interface {
Get(key interface{}) interface{}
Set(c Cell)
QueueLen() int
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"gollum/core"
)
// StreamName formatter
//
// This formatter prefixes data with the name of the current or previous stream.
//
// Parameters
//
// - UsePrevious: Set to true to use the name of the previous stream.
// By default this parameter is set to false.
//
// - Separator: Defines the separator string used between stream name and data.
// By default this parameter is set to ":".
//
// Examples
//
// This example prefixes the message with the most recent routing history.
//
// exampleProducer:
// Type: producer.Console
// Streams: "*"
// Modulators:
// - format.StreamName:
// Separator: ", "
// UsePrevious: true
// - format.StreamName:
// Separator: ": "
type StreamName struct {
core.SimpleFormatter `gollumdoc:"embed_type"`
separator []byte `config:"Separator" default:":"`
usePrevious bool `config:"UsePrevious"`
}
func init() {
core.TypeRegistry.Register(StreamName{})
}
// Configure initializes this formatter with values from a plugin config.
func (format *StreamName) Configure(conf core.PluginConfigReader) {
}
// ApplyFormatter update message payload
func (format *StreamName) ApplyFormatter(msg *core.Message) error {
streamName := format.getStreamName(msg)
content := format.GetSourceDataAsBytes(msg)
dataSize := len(streamName) + len(format.separator) + len(content)
payload := make([]byte, dataSize)
offset := copy(payload, []byte(streamName))
offset += copy(payload[offset:], format.separator)
copy(payload[offset:], content)
format.SetTargetData(msg, payload)
return nil
}
func (format *StreamName) getStreamName(msg *core.Message) string {
var streamName string
switch {
case !format.usePrevious:
streamName = core.StreamRegistry.GetStreamName(msg.GetStreamID())
default:
streamName = core.StreamRegistry.GetStreamName(msg.GetPrevStreamID())
}
return streamName
}
|
package main
import (
"fmt"
"github.com/zdq0394/algorithm/base/queue"
)
func main() {
q := queue.NewQueue()
q.Add(1)
q.Add(2)
q.Add(3)
var v int
var e error
v, e = q.Peek()
fmt.Println(v, e)
q.Remove()
v, e = q.Peek()
fmt.Println(v, e)
fmt.Println("Length:", q.Length())
}
|
package main
import (
"math/rand"
"os"
"time"
"github.com/xmwilldo/edge-health/cmd/app-health/app"
"github.com/xmwilldo/edge-health/pkg/app-health-daemon/util"
"k8s.io/component-base/logs"
)
func main() {
rand.Seed(time.Now().UnixNano())
ctx, _ := util.SignalWatch()
command := app.NewAppHealthCommand(ctx)
logs.InitLogs()
defer logs.FlushLogs()
if err := command.Execute(); err != nil {
os.Exit(1)
}
}
|
package cloud
import (
"testing"
client "github.com/devspace-cloud/devspace/pkg/devspace/cloud/client/testing"
config "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/testing"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/util/hash"
log "github.com/devspace-cloud/devspace/pkg/util/log/testing"
"gotest.tools/assert"
)
type getClusterKeyTestCase struct {
name string
answers []string
localClusterKeys map[int]string
clientClusterKeys map[int]string
clusterID int
expectedErr string
expectedKey string
keyExpectedInClusterID bool
}
func TestGetClusterKey(t *testing.T) {
hash345678, err := hash.Password("345678")
assert.NilError(t, err, "Error getting hash")
hash456789, err := hash.Password("456789")
assert.NilError(t, err, "Error getting hash")
hash567890, err := hash.Password("567890")
assert.NilError(t, err, "Error getting hash")
testCases := []getClusterKeyTestCase{
getClusterKeyTestCase{
name: "Ask for encryption key and succeed on secound try",
answers: []string{"234567", "345678"},
clientClusterKeys: map[int]string{
3: hash345678,
},
clusterID: 3,
expectedKey: hash345678,
keyExpectedInClusterID: true,
},
getClusterKeyTestCase{
name: "Get wrong clusterkey from local config, then get the right by asking",
answers: []string{"456789"},
localClusterKeys: map[int]string{
1: "345678",
},
clientClusterKeys: map[int]string{
5: hash456789,
},
clusterID: 5,
expectedKey: hash456789,
keyExpectedInClusterID: true,
},
getClusterKeyTestCase{
name: "Get correct clusterkey from local config",
localClusterKeys: map[int]string{
2: hash567890,
},
clientClusterKeys: map[int]string{
6: hash567890,
},
clusterID: 6,
expectedKey: hash567890,
keyExpectedInClusterID: true,
},
}
for _, testCase := range testCases {
logger := log.NewFakeLogger()
for _, answer := range testCase.answers {
logger.Survey.SetNextAnswer(answer)
}
if testCase.localClusterKeys == nil {
testCase.localClusterKeys = map[int]string{}
}
provider := &provider{
Provider: latest.Provider{
ClusterKey: testCase.localClusterKeys,
},
log: logger,
client: &client.CloudClient{
ClusterKeys: testCase.clientClusterKeys,
},
loader: config.NewLoader(&latest.Config{}),
}
key, err := provider.GetClusterKey(&latest.Cluster{ClusterID: testCase.clusterID, EncryptToken: true})
if testCase.expectedErr == "" {
assert.NilError(t, err, "Error in testCase: %s", testCase.name)
} else {
assert.Error(t, err, testCase.expectedErr, "Wrong or no error when trying in testCase %s", testCase.name)
}
assert.Equal(t, testCase.expectedKey, key, "Wrong key returned in testCase %s", testCase.name)
if testCase.keyExpectedInClusterID {
assert.Equal(t, testCase.expectedKey, provider.ClusterKey[testCase.clusterID], "Wrong key returned in clusterKey with clusterID %s", testCase.name)
} else {
_, ok := provider.ClusterKey[testCase.clusterID]
assert.Equal(t, false, ok, "ClusterKey with clusterID unexpectedly set. TestCase: %s", testCase.name)
}
}
}
|
// Copyright 2021-present Open Networking Foundation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mastership
import (
"context"
"math/rand"
"time"
"github.com/onosproject/onos-lib-go/pkg/errors"
topoapi "github.com/onosproject/onos-api/go/onos/topo"
"github.com/onosproject/onos-config/pkg/store/topo"
"github.com/onosproject/onos-lib-go/pkg/controller"
"github.com/onosproject/onos-lib-go/pkg/logging"
)
const defaultTimeout = 30 * time.Second
var log = logging.GetLogger("controller", "mastership")
// NewController returns a new mastership controller
func NewController(topo topo.Store) *controller.Controller {
c := controller.NewController("mastership")
c.Watch(&TopoWatcher{
topo: topo,
})
c.Reconcile(&Reconciler{
topo: topo,
})
return c
}
// Reconciler is mastership reconciler
type Reconciler struct {
topo topo.Store
}
// Reconcile reconciles the mastership state for a gnmi target
func (r *Reconciler) Reconcile(id controller.ID) (controller.Result, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
targetID := id.Value.(topoapi.ID)
log.Infof("Reconciling mastership election for the gNMI target %s", targetID)
targetEntity, err := r.topo.Get(ctx, targetID)
if err != nil {
if errors.IsNotFound(err) {
return controller.Result{}, nil
}
log.Warnf("Failed to reconcile mastership election for the gNMI target with ID %s: %s", targetEntity.ID, err)
return controller.Result{}, err
}
// List the objects in the topo store
objects, err := r.topo.List(ctx, &topoapi.Filters{
KindFilter: &topoapi.Filter{
Filter: &topoapi.Filter_Equal_{
Equal_: &topoapi.EqualFilter{
Value: topoapi.CONTROLS,
},
},
},
})
if err != nil {
log.Warnf("Updating MastershipState for target '%s' failed: %v", targetEntity.GetID(), err)
return controller.Result{}, err
}
targetRelations := make(map[topoapi.ID]topoapi.Object)
for _, object := range objects {
if object.GetRelation().TgtEntityID == targetID {
targetRelations[object.ID] = object
}
}
mastership := &topoapi.MastershipState{}
_ = targetEntity.GetAspect(mastership)
if _, ok := targetRelations[topoapi.ID(mastership.NodeId)]; !ok {
if len(targetRelations) == 0 {
if mastership.NodeId == "" {
return controller.Result{}, nil
}
log.Infof("Master in term %d resigned for the gNMI target '%s'", mastership.Term, targetEntity.GetID())
mastership.NodeId = ""
} else {
// Select a random master to assign to the gnmi target
relations := make([]topoapi.Object, 0, len(targetRelations))
for _, targetRelation := range targetRelations {
relations = append(relations, targetRelation)
}
relation := relations[rand.Intn(len(relations))]
// Increment the mastership term and assign the selected master
mastership.Term++
mastership.NodeId = string(relation.ID)
log.Infof("Elected new master '%s' in term %d for the gNMI target '%s'", mastership.NodeId, mastership.Term, targetEntity.GetID())
}
err = targetEntity.SetAspect(mastership)
if err != nil {
log.Warnf("Updating MastershipState for gNMI target '%s' failed: %v", targetEntity.GetID(), err)
return controller.Result{}, err
}
// Update the gNMI target entity
err = r.topo.Update(ctx, targetEntity)
if err != nil {
if !errors.IsNotFound(err) && !errors.IsConflict(err) {
log.Warnf("Updating MastershipState for gNMI target '%s' failed: %v", targetEntity.GetID(), err)
return controller.Result{}, err
}
return controller.Result{}, nil
}
return controller.Result{}, nil
}
return controller.Result{}, nil
}
|
// Package main Contains the main() function of the Server and is the entry of the Programm
package main
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//658. Find K Closest Elements
//Given a sorted array, two integers k and x, find the k closest elements to x in the array. The result should also be sorted in ascending order. If there is a tie, the smaller elements are always preferred.
//Example 1:
//Input: [1,2,3,4,5], k=4, x=3
//Output: [1,2,3,4]
//Example 2:
//Input: [1,2,3,4,5], k=4, x=-1
//Output: [1,2,3,4]
//Note:
//The value k is positive and will always be smaller than the length of the sorted array.
//Length of the given array is positive and will not exceed 104
//Absolute value of elements in the array and x will not exceed 104
//UPDATE (2017/9/19):
//The arr parameter had been changed to an array of integers (instead of a list of integers). Please reload the code definition to get the latest changes.
//func findClosestElements(arr []int, k int, x int) []int {
//}
// Time Is Money |
package pdu
import (
"fmt"
"strconv"
"time"
)
// Time see SMPP v5, section 4.7.23.4 (132p)
type Time struct{ time.Time }
func (t *Time) From(input string) (err error) {
t.Time = time.Time{}
if len(input) == 0 {
return
}
parts, symbol := fromTimeString(input)
if !(symbol == '+' || symbol == '-') {
err = ErrUnparseableTime
return
}
t.Time = time.Date(
int(2000+parts[0]), // year
time.Month(parts[1]), // month
int(parts[2]), // day
int(parts[3]), // hour
int(parts[4]), // minute
int(parts[5]), // second
int(parts[6])*1e8, // tenths of second
time.FixedZone("", int(parts[7]*900)), // timezone offset
)
return
}
func (t Time) String() string {
if t.Time.IsZero() {
return ""
}
_, offset := t.Zone()
symbol := '+'
if offset < 0 {
offset = -offset
symbol = '-'
}
return fmt.Sprintf(
"%02d%02d%02d%02d%02d%02d%d%02d%c",
t.Year()-2000, // year
int(t.Month()), // month
t.Day(), // day
t.Hour(), // hour
t.Minute(), // minute
t.Second(), // second
t.Nanosecond()/1e8, // tenths of second
offset/900, // offset
symbol, // time-zone symbol
)
}
// Duration see SMPP v5, section 4.7.23.5 (132p)
type Duration struct{ time.Duration }
func (p *Duration) From(input string) (err error) {
p.Duration = 0
if len(input) == 0 {
return
}
parts, symbol := fromTimeString(input)
if symbol != 'R' {
err = ErrUnparseableTime
return
}
bases := []time.Duration{
time.Hour * 8760, time.Hour * 720, time.Hour * 24,
time.Hour, time.Minute, time.Second, 1e8, 0,
}
for i, part := range parts {
p.Duration += bases[i] * time.Duration(part)
}
return
}
func (p Duration) String() string {
if p.Duration < time.Second {
return ""
}
ts := p.Duration
parts := []time.Duration{
time.Hour * 8760, time.Hour * 720, time.Hour * 24,
time.Hour, time.Minute, time.Second,
}
for i, part := range parts {
parts[i] = ts / part
ts %= part
}
return fmt.Sprintf(
"%02d%02d%02d%02d%02d%02d%d00R",
parts[0], parts[1], parts[2],
parts[3], parts[4], parts[5],
int(ts.Nanoseconds()/1e8),
)
}
func fromTimeString(input string) (parts [8]int64, symbol byte) {
if len(input) != 16 {
return
}
for i := 0; i < 12; i += 2 {
parts[i/2], _ = strconv.ParseInt(input[i:i+2], 10, 16)
}
parts[6], _ = strconv.ParseInt(input[12:13], 10, 16)
parts[7], _ = strconv.ParseInt(input[13:15], 10, 16)
symbol = input[15]
if symbol == '-' {
parts[7] = -parts[7]
}
return
}
|
package handlers
import (
"github.com/EgorLyutov/Inventor/models"
"github.com/EgorLyutov/Inventor/tools"
"gopkg.in/mgo.v2"
"html/template"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/gorilla/context"
)
func HandleServer(args map[string]interface{}, id string, w http.ResponseWriter, r *http.Request) {
session := context.Get(r, KEY_DB).(*mgo.Session)
w.Header().Set("Content-type", "text/html")
t, err := template.ParseFiles("templates/server/server.html", "templates/server/editserver.html", "templates/server/editserver.html", "templates/navbar.html", "templates/header.html", "templates/footer.html")
if err != nil {
log.Fatal(err.Error())
}
args["listLocation"], err = models.Location{}.GetAll(session)
if err != nil {
log.Println("Error get locations: ", err.Error())
}
server, err := models.Server{}.Get(session, id)
if err != nil {
if err.Error() == "not found" {
t.ExecuteTemplate(w, "editserver", args)
return
} else {
http.Error(w, "Ошибка подключения к базе данных", 500)
}
return
}
args[models.SERVER] = server
if strings.Split(r.URL.String(), "/")[1] == "edit" { // Если в пути есть /edit/ то открывается редактирование
t.ExecuteTemplate(w, "editserver", args)
} else {
t.ExecuteTemplate(w, "server", args)
}
}
func HandleSaveOrUpdateServer(w http.ResponseWriter, r *http.Request) {
session := context.Get(r, KEY_DB).(*mgo.Session)
if authByIP(r) {
if r.Method == "POST" {
isNew := false
server, err := models.Server{}.Get(session, r.FormValue(ID))
if err != nil {
if err == mgo.ErrNotFound {
isNew = true
} else {
log.Printf("Ошибка базы данных: %s", err.Error())
return
}
}
server.CPU = r.FormValue(CPU)
amPosCPU, err := strconv.ParseUint(r.FormValue(AMOUNT_POSSIBLE_CPU), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.AmountPossibleCPU = amPosCPU
memory, err := strconv.ParseUint(r.FormValue(MEMORY), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.Memory = memory
countMemory, err := strconv.ParseUint(r.FormValue(COUNT_MEMORY), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.CountMemory = countMemory
possibleMemory, err := strconv.ParseUint(r.FormValue(AMOUNT_POSSIBLE_MEMORY), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.AmountPossibleMemory = possibleMemory
server.OperationSystem = r.FormValue(OPERATION_SYSTEM)
server.HardDrivesDesc = r.FormValue(HARD_DRIVE_DESC)
amPowerUnits, err := strconv.ParseUint(r.FormValue(AMOUNT_POWER_UNIT), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.AmountPowerUnits = amPowerUnits
nomPower, err := strconv.ParseUint(r.FormValue(NOMINAL_POWER), 10, 64)
if err != nil {
log.Println(err.Error())
return
}
server.NominalPower = nomPower
server.Description = r.FormValue(DESCRIPTION)
currLocation := server.CurrentLocation
server.CurrentLocation = r.FormValue(CURRENT_LOCATION)
if server.CurrentLocation == "Списать" {
server.RemovalMark = true
}
if r.FormValue(IN_WORK) == "on" {
server.InWork = true
} else {
server.InWork = false
}
server.RemovalMark = false
if isNew {
server.ID = server.GetNextID(session)
server.Barcode = "/barcodes/" + server.ID + ".png"
err = server.Save(session)
tools.GenerateBarcode(server.ID)
} else {
if currLocation != server.CurrentLocation {
server.HistoryLocations = append(server.HistoryLocations,
"Перемещено из "+currLocation+" в "+server.CurrentLocation+"\n"+
r.Header.Get("X-Real-Ip")+"\n"+time.Now().String())
}
server.Barcode = "/barcodes/" + server.ID + ".png"
err = server.Update(session)
if err != nil {
log.Printf("Ошибка обновления записи: %s", err.Error())
}
}
urlRedirect := "/search/?ID=" + server.ID
http.Redirect(w, r, urlRedirect, 302)
} else {
args := make(map[string]interface{})
args["auth"] = authByIP(r)
w.Header().Set("Content-type", "text/html")
t, err := template.ParseFiles("templates/server/editserver.html", "templates/navbar.html", "templates/header.html", "templates/footer.html")
if err != nil {
log.Fatal(err.Error())
}
args["listLocation"], err = models.Location{}.GetAll(session)
if err != nil {
log.Println("Error get locations: ", err.Error())
}
t.ExecuteTemplate(w, "editserver", args)
return
}
}
http.Error(w, "Not auth", 500)
}
func HandleGetAllServer(w http.ResponseWriter, r *http.Request) {
args := make(map[string]interface{})
session := context.Get(r, KEY_DB).(*mgo.Session)
w.Header().Set("Content-type", "text/html")
t, err := template.ParseFiles("templates/server/listserver.html", "templates/navbar.html", "templates/header.html", "templates/footer.html")
if err != nil {
log.Fatal(err.Error())
}
listServer, err := models.Server{}.GetAll(session)
if err != nil {
http.Error(w, "Server error", 500)
return
}
args["count"] = len(listServer)
args["listSR"] = listServer
t.ExecuteTemplate(w, "listserver", args)
}
|
package routers
import (
"key-value/lib/ws"
"encoding/json"
"log"
"fmt"
)
func createRequestHandler(strategy RequestStrategy) requestHandler {
return func(request Request) Response {
value, err := strategy(request)
errorMsg := ``
if err != nil {
errorMsg = err.Error()
}
return Response{
Success: err == nil,
Error: errorMsg,
Result: value,
}
}
}
func createMessageHandler(handler requestHandler) ws.RequestHandler {
return func(message []byte) []byte {
var request Request
err := json.Unmarshal(message, &request)
if err != nil {
msg := fmt.Sprintf(`Message: '%s' parse failed: %s`, message, err.Error())
log.Println(msg)
return []byte(msg)
}
response := handler(request)
responseJSON, err := json.Marshal(response)
if err != nil {
msg := fmt.Sprintf(`Message: '%s' encode response failed: %s`, message, err.Error())
log.Println(msg)
return []byte(msg)
}
return responseJSON
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/yaml"
"github.com/kubevela/workflow/pkg/cue/packages"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var testScheme = runtime.NewScheme()
var decoder *admission.Decoder
var pd *packages.PackageDiscover
var ctx = context.Background()
var handler *ValidatingHandler
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
By("bootstrapping test environment")
var yamlPath string
if _, set := os.LookupEnv("COMPATIBILITY_TEST"); set {
yamlPath = "../../../../../test/compatibility-test/testdata"
} else {
yamlPath = filepath.Join("../../../../..", "charts", "vela-core", "crds")
}
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: time.Minute,
ControlPlaneStopTimeout: time.Minute,
CRDDirectoryPaths: []string{yamlPath},
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = v1beta1.SchemeBuilder.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
err = scheme.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
pd, err = packages.NewPackageDiscover(cfg)
Expect(err).ToNot(HaveOccurred())
Expect(pd).ToNot(BeNil())
handler = &ValidatingHandler{pd: pd}
decoder, err = admission.NewDecoder(testScheme)
Expect(err).Should(BeNil())
Expect(decoder).ToNot(BeNil())
ctx := context.Background()
ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "vela-system"}}
Expect(k8sClient.Create(ctx, &ns)).Should(BeNil())
wd := &v1beta1.ComponentDefinition{}
wDDefJson, _ := yaml.YAMLToJSON([]byte(cDDefYaml))
Expect(json.Unmarshal(wDDefJson, wd)).Should(BeNil())
Expect(k8sClient.Create(ctx, wd)).Should(BeNil())
td := &v1beta1.TraitDefinition{}
tDDefJson, _ := yaml.YAMLToJSON([]byte(tDDefYaml))
Expect(json.Unmarshal(tDDefJson, td)).Should(BeNil())
Expect(k8sClient.Create(ctx, td)).Should(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
const (
cDDefYaml = `
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: worker
namespace: vela-system
annotations:
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
extension:
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
selector:
matchLabels:
"app.oam.dev/component": context.name
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
cmd?: [...string]
}`
tDDefYaml = `
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
# Definition source cue file: vela-templates/definitions/internal/scaler.cue
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: Manually scale K8s pod for your workload which follows the pod spec in path 'spec.template'.
name: scaler
namespace: vela-system
spec:
appliesToWorkloads:
- deployments.apps
- statefulsets.apps
podDisruptive: false
schematic:
cue:
template: |
parameter: {
// +usage=Specify the number of workload
replicas: *1 | int
}
// +patchStrategy=retainKeys
patch: spec: replicas: parameter.replicas
`
)
|
//
// Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package grpc provides vald gRPC client functions
package grpc
import (
"context"
"github.com/vdaas/vald/apis/grpc/gateway/vald"
"github.com/vdaas/vald/internal/client"
"github.com/vdaas/vald/internal/config"
igrpc "github.com/vdaas/vald/internal/net/grpc"
"google.golang.org/grpc"
)
// Client represents gateway client interface.
type Client interface {
client.Client
client.MetaObjectReader
client.Upserter
}
type gatewayClient struct {
addr string
cfg *config.GRPCClient
igrpc.Client
}
// New returns Client implementation if no error occurs.
func New(ctx context.Context, opts ...Option) (Client, error) {
c := new(gatewayClient)
for _, opt := range append(defaultOptions, opts...) {
opt(c)
}
c.Client = igrpc.New(c.cfg.Opts()...)
if err := c.Client.Connect(ctx, c.addr); err != nil {
return nil, err
}
return c, nil
}
func (c *gatewayClient) Exists(
ctx context.Context,
req *client.ObjectID,
) (*client.ObjectID, error) {
res, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Exists(ctx, req, copts...)
},
)
if err != nil {
return nil, err
}
return res.(*client.ObjectID), nil
}
func (c *gatewayClient) Search(
ctx context.Context,
req *client.SearchRequest,
) (*client.SearchResponse, error) {
res, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Search(ctx, req, copts...)
},
)
if err != nil {
return nil, err
}
return res.(*client.SearchResponse), nil
}
func (c *gatewayClient) SearchByID(
ctx context.Context,
req *client.SearchIDRequest,
) (*client.SearchResponse, error) {
res, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).SearchByID(ctx, req, copts...)
},
)
if err != nil {
return nil, err
}
return res.(*client.SearchResponse), nil
}
func (c *gatewayClient) StreamSearch(
ctx context.Context,
dataProvider func() *client.SearchRequest,
f func(*client.SearchResponse, error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamSearchClient
st, err = vald.NewValdClient(conn).StreamSearch(ctx, copts...)
if err != nil {
return nil, err
}
return nil, streamSearch(st,
func() interface{} {
if d := dataProvider(); d != nil {
return d
}
return nil
}, f,
)
},
)
return err
}
func (c *gatewayClient) StreamSearchByID(
ctx context.Context,
dataProvider func() *client.SearchIDRequest,
f func(*client.SearchResponse, error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamSearchByIDClient
st, err = vald.NewValdClient(conn).StreamSearchByID(ctx, copts...)
if err != nil {
return nil, err
}
return nil, streamSearch(st,
func() interface{} {
if d := dataProvider(); d != nil {
return d
}
return nil
}, f,
)
},
)
return err
}
func (c *gatewayClient) Insert(
ctx context.Context,
req *client.ObjectVector,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Insert(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) StreamInsert(
ctx context.Context,
dataProvider func() *client.ObjectVector,
f func(error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamInsertClient
st, err = vald.NewValdClient(conn).StreamInsert(ctx, copts...)
if err != nil {
return nil, err
}
return nil, stream(st, func() interface{} {
if d := dataProvider(); d != nil {
return d
}
return nil
}, f)
},
)
return err
}
func (c *gatewayClient) MultiInsert(
ctx context.Context,
req *client.ObjectVectors,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).MultiInsert(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) Update(
ctx context.Context,
req *client.ObjectVector,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Update(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) StreamUpdate(
ctx context.Context,
dataProvider func() *client.ObjectVector,
f func(error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamUpdateClient
st, err = vald.NewValdClient(conn).StreamUpdate(ctx, copts...)
if err != nil {
return nil, err
}
return nil, stream(st, func() interface{} {
if d := dataProvider(); d != nil {
return d
}
return nil
}, f)
},
)
return err
}
func (c *gatewayClient) MultiUpdate(
ctx context.Context,
req *client.ObjectVectors,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).MultiUpdate(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) Upsert(
ctx context.Context,
req *client.ObjectVector,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Upsert(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) MultiUpsert(
ctx context.Context,
req *client.ObjectVectors,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).MultiUpsert(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) StreamUpsert(
ctx context.Context,
dataProvider func() *client.ObjectVector,
f func(error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
var st vald.Vald_StreamUpsertClient
st, err := vald.NewValdClient(conn).StreamUpsert(ctx, copts...)
if err != nil {
return nil, err
}
return nil, stream(st,
func() interface{} {
if d := dataProvider(); d != nil {
return d
}
return nil
}, f,
)
},
)
return err
}
func (c *gatewayClient) Remove(
ctx context.Context,
req *client.ObjectID,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).Remove(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) StreamRemove(
ctx context.Context,
dataProvider func() *client.ObjectID,
f func(error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamRemoveClient
st, err = vald.NewValdClient(conn).StreamRemove(ctx, copts...)
if err != nil {
return nil, err
}
return nil, stream(st,
func() interface{} {
return dataProvider()
}, f,
)
},
)
return err
}
func (c *gatewayClient) MultiRemove(
ctx context.Context,
req *client.ObjectIDs,
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).MultiRemove(ctx, req, copts...)
},
)
return err
}
func (c *gatewayClient) GetObject(
ctx context.Context,
req *client.ObjectID,
) (*client.MetaObject, error) {
res, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) {
return vald.NewValdClient(conn).GetObject(ctx, req, copts...)
},
)
if err != nil {
return nil, err
}
return res.(*client.MetaObject), err
}
func (c *gatewayClient) StreamGetObject(
ctx context.Context,
dataProvider func() *client.ObjectID,
f func(*client.MetaObject, error),
) error {
_, err := c.Client.Do(ctx, c.addr,
func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) {
var st vald.Vald_StreamGetObjectClient
st, err = vald.NewValdClient(conn).StreamGetObject(ctx, copts...)
if err != nil {
return nil, err
}
return nil, igrpc.BidirectionalStreamClient(st,
func() interface{} {
return dataProvider()
}, func() interface{} {
return new(client.BackupMetaVector)
}, func(res interface{}, err error) {
f(res.(*client.MetaObject), err)
})
},
)
return err
}
func streamSearch(
st grpc.ClientStream,
dataProvider func() interface{},
f func(*client.SearchResponse, error),
) error {
return igrpc.BidirectionalStreamClient(st, dataProvider,
func() interface{} {
return new(client.SearchResponse)
}, func(res interface{}, err error) {
f(res.(*client.SearchResponse), err)
},
)
}
func stream(
st grpc.ClientStream,
dataProvider func() interface{},
f func(error),
) error {
return igrpc.BidirectionalStreamClient(st, dataProvider,
func() interface{} {
return new(client.Empty)
}, func(_ interface{}, err error) {
f(err)
},
)
}
|
package models
type Entry struct {
ID int `db:"entry_id"`
User
Slug string `db:"slug"`
DisplayName string `db:"display_name"`
}
|
package config
type NodeEvents struct {
Enabled *bool `json:"enabled,omitempty"`
}
func (e NodeEvents) IsEnabled() bool {
return e.Enabled == nil || *e.Enabled
}
|
package main
import (
"io"
"os"
"path/filepath"
)
type teeFileReader struct {
r io.ReadCloser
f *os.File
}
// TeeReader returns a Reader that writes to the named file what it reads from
// r. All reads from r performed through it are matched with corresponding
// writes. There is no internal buffering - the write must complete before the
// read completes. Any error encountered while writing is reported as a read
// error. The file is truncated before the first write and removed if an error
// occurs.
func TeeFileReader(r io.ReadCloser, fname string) (io.ReadCloser, error) {
if err := os.MkdirAll(filepath.Dir(fname), 0755); err != nil {
return nil, err
}
f, err := os.Create(fname)
if err != nil {
return nil, err
}
return &teeFileReader{r, f}, nil
}
func (t *teeFileReader) Read(p []byte) (n int, err error) {
n, err = t.r.Read(p)
if n > 0 {
if n, err := t.f.Write(p[:n]); err != nil {
os.Remove(t.f.Name())
return n, err
}
}
return
}
func (t *teeFileReader) Close() error {
e1 := t.r.Close()
e2 := t.f.Close()
if e1 != nil {
return e1
}
return e2
}
|
package abclientstate
import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/volatiletech/authboss/v3"
)
func TestGetCookieState(t *testing.T) {
t.Parallel()
var c CookieState = map[string]string{"hello": "world"}
val, ok := c.Get("hello")
if !ok {
t.Error("could not get cookie state")
}
if val != "world" {
t.Errorf("expected true, got %s", val)
}
}
func writeInitialState(s CookieStorer, w *httptest.ResponseRecorder, r *http.Request) (authboss.ClientState, error) {
emptyState, err := s.ReadState(r)
if err != nil {
return nil, err
}
if val, ok := emptyState.Get("hello"); ok || len(val) != 0 {
return nil, fmt.Errorf("it should not have had a value: %s", val)
}
put := []authboss.ClientStateEvent{{Kind: authboss.ClientStateEventPut, Key: "hello", Value: "World"}}
if err := s.WriteState(w, emptyState, put); err != nil {
return nil, err
}
state, err := s.ReadState(r)
if err != nil {
return nil, err
}
return state, nil
}
func TestCookieExpiry(t *testing.T) {
t.Parallel()
storer := NewCookieStorer([]byte("key"), nil)
storer.MaxAge = 1
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
_, err := writeInitialState(storer, w, r)
if err != nil {
t.Error(err)
}
w.Flush()
cookie := w.Header().Get("Set-Cookie")
if !strings.Contains(cookie, "Max-Age=1") {
t.Error("max age should have been set")
}
time.Sleep(time.Second * 2)
r = httptest.NewRequest("GET", "/", nil)
r.Header.Set("Cookie", cookie)
state, err := storer.ReadState(r)
if err != nil {
t.Fatal(err)
}
if val, ok := state.Get("hello"); ok || len(val) != 0 {
t.Error("it should not have had a value:", val)
}
}
func TestCookiePutAndDelete(t *testing.T) {
t.Parallel()
storer := NewCookieStorer([]byte("key"), nil)
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
state, err := writeInitialState(storer, w, r)
if err != nil {
t.Fatal(err)
}
w.Flush()
cookie := w.Header().Get("Set-Cookie")
want := []string{"Path=/;", "Max-Age=2628000;", "HttpOnly", "Expires=", "hello=", "Secure"}
for _, w := range want {
if !strings.Contains(cookie, w) {
t.Errorf("cookie did not include: %s", w)
}
}
del := []authboss.ClientStateEvent{{Kind: authboss.ClientStateEventDel, Key: "hello", Value: "World"}}
if err := storer.WriteState(w, state, del); err != nil {
t.Fatal(err)
}
r = httptest.NewRequest("GET", "/", nil)
r.Header.Set("Cookie", cookie)
state, err = storer.ReadState(r)
if err != nil {
t.Fatal(err)
}
if val, ok := state.Get("hello"); ok || len(val) != 0 {
t.Error("it should not have had a value:", val)
}
}
|
package http
// -> username string
import (
"github.com/gin-gonic/gin"
"github.com/hokora/bank/util"
"net/http"
)
func (s *Server) createAccount(ctx *gin.Context) {
username := ctx.GetString("username")
pw := util.NewPacketWriterNoLen(len(username) + 1)
pw.AppendString(username)
success, _, err := s.mainDBClient.CallReply(PROTO_OUT_NEW, pw.Pack(), DEFAULT_RESP_TIMEOUT)
if err != nil {
ctx.JSON(http.StatusInternalServerError, Resp{"message": "server error"})
return
}
if !success {
ctx.JSON(http.StatusBadRequest, Resp{"message": "username unavailable"})
} else {
ctx.JSON(http.StatusOK, Resp{})
}
}
|
package structs
type AppUserUpdate struct {
Avatar string `json:"avatar"`
UpdatedAt string `json:"updated_at"`
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package coretypes
import (
"bytes"
"errors"
"fmt"
"io"
"strings"
"github.com/mr-tron/base58"
)
// ContractIDLength size of the contract ID in bytes
const ContractIDLength = ChainIDLength + HnameLength
// ContractID global identifier of the smart contract. It consists of chainID and the hname of the contract on the chain
type ContractID [ContractIDLength]byte
// NewContractID creates new ContractID from chainID and contract hname
func NewContractID(chid ChainID, contractHn Hname) (ret ContractID) {
copy(ret[:ChainIDLength], chid[:])
copy(ret[ChainIDLength:], contractHn.Bytes())
return
}
// NewContractIDFromBytes creates contract ID frm its binary representation
func NewContractIDFromBytes(data []byte) (ret ContractID, err error) {
err = ret.Read(bytes.NewReader(data))
return
}
// NewContractIDFromBase58 decodes contract ID from base58 string
func NewContractIDFromBase58(base58string string) (ret ContractID, err error) {
var data []byte
if data, err = base58.Decode(base58string); err != nil {
return
}
return NewContractIDFromBytes(data)
}
// NewContractIDFromString parses the human-readable string representation of the contract ID
func NewContractIDFromString(s string) (ret ContractID, err error) {
parts := strings.Split(s, "::")
if len(parts) != 2 {
err = errors.New("invalid ContractID")
return
}
chid, err := NewChainIDFromBase58(parts[0])
if err != nil {
return
}
cid, err := HnameFromString(parts[1])
if err != nil {
return
}
ret = NewContractID(chid, cid)
return
}
// ChainID returns ID of the native chain of the contract
func (scid ContractID) ChainID() (ret ChainID) {
copy(ret[:ChainIDLength], scid[:ChainIDLength])
return
}
// Hname returns hashed name of the contract, local ID on the chain
func (scid ContractID) Hname() Hname {
ret, _ := NewHnameFromBytes(scid[ChainIDLength:])
return ret
}
// Base58 base58 representation of the binary representation
func (scid ContractID) Base58() string {
return base58.Encode(scid[:])
}
const (
long_format = "%s::%s"
short_format = "%s..::%s"
)
// Bytes contract ID as byte slice
func (scid ContractID) Bytes() []byte {
return scid[:]
}
// String human readable representation of the contract ID <chainID>::<hanme>
func (scid ContractID) String() string {
return fmt.Sprintf(long_format, scid.ChainID().String(), scid.Hname().String())
}
// Short human readable representation in short form
func (scid ContractID) Short() string {
return fmt.Sprintf(short_format, scid.ChainID().String()[:8], scid.Hname().String())
}
// Read from reated
func (scid *ContractID) Read(r io.Reader) error {
n, err := r.Read(scid[:])
if err != nil {
return err
}
if n != ContractIDLength {
return ErrWrongDataLength
}
return nil
}
// Write to writer
func (scid *ContractID) Write(w io.Writer) error {
_, err := w.Write(scid[:])
return err
}
|
package day5
import (
"testing"
"github.com/kdeberk/advent-of-code/2019/internal/utils"
)
const part1Answer = 13978427
const part2Answer = 11189491
func TestPart1(t *testing.T) {
program, _ := utils.ReadProgram("../../input/5.txt")
machine := utils.MakeMachine("day5", program)
answer, _ := part1(machine)
if part1Answer != answer {
t.Errorf("part1(input) == %d, want %d", answer, part1Answer)
}
}
func TestPart2(t *testing.T) {
program, _ := utils.ReadProgram("../../input/5.txt")
machine := utils.MakeMachine("day5", program)
answer, _ := part2(machine)
if part2Answer != answer {
t.Errorf("part2(input) == %d, want %d", answer, part2Answer)
}
}
|
// Copyright 2014-2015 The DevMine authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package repotool is able to fetch information from a source code repository.
// Typically, it can get all commits, their authors and commiters and so on
// and return this information in a JSON object. Alternatively, it is able
// to populate the information into a PostgreSQL database.
// Currently, on the Git VCS is supported.
package main
import (
"bufio"
"bytes"
"database/sql"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
_ "github.com/lib/pq"
mmh3 "github.com/spaolacci/murmur3"
"github.com/DevMine/srcanlzr/src"
"github.com/DevMine/repotool/config"
"github.com/DevMine/repotool/model"
"github.com/DevMine/repotool/repo"
)
const version = "0.1.0"
func main() {
flag.Usage = func() {
fmt.Printf("usage: %s [OPTION(S)] [REPOSITORY PATH]\n", os.Args[0])
flag.PrintDefaults()
os.Exit(0)
}
configPath := flag.String("c", "", "configuration file")
vflag := flag.Bool("v", false, "print version.")
jsonflag := flag.Bool("json", true, "json output")
dbflag := flag.Bool("db", false, "import data into the database")
srctoolflag := flag.String("srctool", "", "read json file produced by srctool (give stdin to read from stdin)")
flag.Parse()
if *vflag {
fmt.Printf("%s - %s\n", filepath.Base(os.Args[0]), version)
os.Exit(0)
}
if len(flag.Args()) != 1 {
fmt.Fprintln(os.Stderr, "invalid # of arguments")
flag.Usage()
}
if *dbflag && len(*configPath) == 0 {
fatal(errors.New("a configuration file must be specified when using db option"))
}
if !*jsonflag && (len(*srctoolflag) > 0) {
fatal(errors.New("srctool flag may be used only in conjonction with json flag"))
}
cfg, err := config.ReadConfig(*configPath)
if err != nil {
fatal(err)
}
repoPath := flag.Arg(0)
repository, err := repo.New(cfg.Data, repoPath)
if err != nil {
fatal(err)
}
fmt.Fprintln(os.Stderr, "fetching repository commits...")
tic := time.Now()
err = repository.FetchCommits()
if err != nil {
fatal(err)
}
toc := time.Now()
fmt.Fprintln(os.Stderr, "done in ", toc.Sub(tic))
if *jsonflag && (len(*srctoolflag) == 0) {
bs, err := json.Marshal(repository)
if err != nil {
fatal(err)
}
fmt.Println(string(bs))
}
if *jsonflag && (len(*srctoolflag)) > 0 {
var bs []byte
var r *bufio.Reader
buf := new(bytes.Buffer)
if *srctoolflag == strings.ToLower("stdin") {
// read from stdin
r = bufio.NewReader(os.Stdin)
} else {
// read from srctool json file
var f *os.File
if f, err = os.Open(*srctoolflag); err != nil {
fatal(err)
}
r = bufio.NewReader(f)
}
if _, err = io.Copy(buf, r); err != nil {
fatal(err)
}
bs = buf.Bytes()
p, err := src.Unmarshal(bs)
if err != nil {
fatal(err)
}
p.Repo = repository.GetRepository()
bs, err = src.Marshal(p)
if err != nil {
fatal(err)
}
fmt.Println(string(bs))
}
if *dbflag {
db, err := openDBSession(cfg.Database)
if err != nil {
fatal(err)
}
defer db.Close()
fmt.Fprintf(os.Stderr,
"inserting %d commits from %s repository into the database...\n",
len(repository.GetCommits()), repository.GetName())
tic := time.Now()
insertRepoData(db, repository)
toc := time.Now()
fmt.Fprintln(os.Stderr, "done in ", toc.Sub(tic))
}
}
// fatal prints an error on standard error stream and exits.
func fatal(a ...interface{}) {
fmt.Fprintln(os.Stderr, a...)
os.Exit(1)
}
// openDBSession creates a session to the database.
func openDBSession(cfg config.DatabaseConfig) (*sql.DB, error) {
dbURL := fmt.Sprintf(
"user='%s' password='%s' host='%s' port=%d dbname='%s' sslmode='%s'",
cfg.UserName, cfg.Password, cfg.HostName, cfg.Port, cfg.DBName, cfg.SSLMode)
return sql.Open("postgres", dbURL)
}
// insertRepoData inserts repository data into the database, or updates it
// if it is already there.
func insertRepoData(db *sql.DB, r repo.Repo) {
if db == nil {
fatal(errors.New("nil database given"))
}
repoID := getRepoID(db, r)
if repoID == 0 {
fatal(errors.New("no corresponding repository found in the database: impossible to insert data"))
}
for _, c := range r.GetCommits() {
insertCommit(db, repoID, c)
}
}
// insertCommit inserts a commit into the database
func insertCommit(db *sql.DB, repoID int, c model.Commit) {
commitFields := []string{
"repository_id",
"author_id",
"committer_id",
"hash",
"vcs_id",
"message",
"author_date",
"commit_date",
"file_changed_count",
"insertions_count",
"deletions_count"}
authorID := getUserID(db, c.Author.Email)
committerID := getUserID(db, c.Committer.Email)
hash := genCommitHash(c)
var commitID int64
query := genInsQuery("commits", commitFields...)
err := db.QueryRow(query+" RETURNING id",
repoID, authorID, committerID, hash,
c.VCSID, c.Message, c.AuthorDate, c.CommitDate,
c.FileChangedCount, c.InsertionsCount, c.DeletionsCount).Scan(&commitID)
if err != nil {
fatal(err)
}
for _, d := range c.DiffDelta {
insertDiffDelta(db, commitID, d)
}
}
// insertDiffDelta inserts a commit diff delta into the database.
func insertDiffDelta(db *sql.DB, commitID int64, d model.DiffDelta) {
diffDeltaFields := []string{
"commit_id",
"file_status",
"is_file_binary",
"similarity",
"old_file_path",
"new_file_path"}
query := genInsQuery("commit_diff_deltas", diffDeltaFields...)
_, err := db.Exec(query,
commitID, d.Status, d.Binary, d.Similarity, d.OldFilePath, d.NewFilePath)
if err != nil {
fatal(err)
}
}
// genCommitHash generates a hash (mmh3) from commit fields.
// This hash can then be used to uniquely identify a commit.
func genCommitHash(c model.Commit) string {
h := mmh3.New128()
io.WriteString(h, c.VCSID)
io.WriteString(h, c.Message)
io.WriteString(h, c.Author.Name)
io.WriteString(h, c.Author.Email)
io.WriteString(h, c.Committer.Name)
io.WriteString(h, c.Committer.Email)
io.WriteString(h, c.AuthorDate.String())
io.WriteString(h, c.CommitDate.String())
io.WriteString(h, strconv.FormatInt(int64(c.FileChangedCount), 10))
io.WriteString(h, strconv.FormatInt(int64(c.InsertionsCount), 10))
io.WriteString(h, strconv.FormatInt(int64(c.DeletionsCount), 10))
return hex.EncodeToString(h.Sum(nil))
}
// getRepoID returns the repository id of a repo in repositories table.
// If repo is not in the table, then 0 is returned.
func getRepoID(db *sql.DB, r repo.Repo) int {
if db == nil {
fatal(errors.New("nil database given"))
}
var id int
// Clone URL is unique
err := db.QueryRow("SELECT id FROM repositories WHERE clone_url=$1", r.GetCloneURL()).Scan(&id)
switch {
case err == sql.ErrNoRows:
return 0
case err != nil:
fatal(err)
}
return id
}
// getUserID attempts to find a user ID given its email address.
// Email addresses are unique, however they may not be provided.
// If no user ID is found, nil is returned, otherwhise the user ID
// is returned.
func getUserID(db *sql.DB, email string) *int {
if db == nil {
fatal(errors.New("nil database given"))
}
if len(email) == 0 {
return nil
}
var id *int
err := db.QueryRow("SELECT id FROM users WHERE email=$1", email).Scan(&id)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
fatal(err)
}
return id
}
// genInsQuery generates a query string for an insertion in the database.
func genInsQuery(tableName string, fields ...string) string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("INSERT INTO %s(%s)\n",
tableName, strings.Join(fields, ",")))
buf.WriteString("VALUES(")
for ind := range fields {
if ind > 0 {
buf.WriteString(",")
}
buf.WriteString(fmt.Sprintf("$%d", ind+1))
}
buf.WriteString(")\n")
return buf.String()
}
|
package lib
import (
"github.com/dproject21/di_container_test/sampleinterface"
"github.com/fgrosse/goldi"
)
var container *goldi.Container
func CreateContainer() {
registry := goldi.NewTypeRegistry()
RegisterTypes(registry)
// create a new container when your application loads
config := map[string]interface{}{}
container = goldi.NewContainer(registry, config)
}
func GetPrinter() sampleinterface.SamplePrinter {
p := container.MustGet("printer").(sampleinterface.SamplePrinter)
return p
}
|
package v5
import (
"encoding/json"
"net/http"
"reflect"
"strings"
)
// ByID is "id" constant to use as `by` property in methods
const ByID = "id"
// ByExternalId is "externalId" constant to use as `by` property in methods
const ByExternalID = "externalId"
// Client type
type Client struct {
URL string
Key string
Debug bool
httpClient *http.Client
}
// Pagination type
type Pagination struct {
Limit int `json:"limit,omitempty"`
TotalCount int `json:"totalCount,omitempty"`
CurrentPage int `json:"currentPage,omitempty"`
TotalPageCount int `json:"totalPageCount,omitempty"`
}
// Address type
type Address struct {
Index string `json:"index,omitempty"`
CountryIso string `json:"countryIso,omitempty"`
Region string `json:"region,omitempty"`
RegionID int `json:"regionId,omitempty"`
City string `json:"city,omitempty"`
CityID int `json:"cityId,omitempty"`
CityType string `json:"cityType,omitempty"`
Street string `json:"street,omitempty"`
StreetID int `json:"streetId,omitempty"`
StreetType string `json:"streetType,omitempty"`
Building string `json:"building,omitempty"`
Flat string `json:"flat,omitempty"`
Floor int `json:"floor,omitempty"`
Block int `json:"block,omitempty"`
House string `json:"house,omitempty"`
Metro string `json:"metro,omitempty"`
Notes string `json:"notes,omitempty"`
Text string `json:"text,omitempty"`
}
// GeoHierarchyRow type
type GeoHierarchyRow struct {
Country string `json:"country,omitempty"`
Region string `json:"region,omitempty"`
RegionID int `json:"regionId,omitempty"`
City string `json:"city,omitempty"`
CityID int `json:"cityId,omitempty"`
}
// Source type
type Source struct {
Source string `json:"source,omitempty"`
Medium string `json:"medium,omitempty"`
Campaign string `json:"campaign,omitempty"`
Keyword string `json:"keyword,omitempty"`
Content string `json:"content,omitempty"`
}
// Contragent type
type Contragent struct {
ContragentType string `json:"contragentType,omitempty"`
LegalName string `json:"legalName,omitempty"`
LegalAddress string `json:"legalAddress,omitempty"`
INN string `json:"INN,omitempty"`
OKPO string `json:"OKPO,omitempty"`
KPP string `json:"KPP,omitempty"`
OGRN string `json:"OGRN,omitempty"`
OGRNIP string `json:"OGRNIP,omitempty"`
CertificateNumber string `json:"certificateNumber,omitempty"`
CertificateDate string `json:"certificateDate,omitempty"`
BIK string `json:"BIK,omitempty"`
Bank string `json:"bank,omitempty"`
BankAddress string `json:"bankAddress,omitempty"`
CorrAccount string `json:"corrAccount,omitempty"`
BankAccount string `json:"bankAccount,omitempty"`
}
// APIKey type
type APIKey struct {
Current bool `json:"current,omitempty"`
}
// Property type
type Property struct {
Code string `json:"code,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
Sites []string `json:"Sites,omitempty,brackets"`
}
// IdentifiersPair type
type IdentifiersPair struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
}
// DeliveryTime type
type DeliveryTime struct {
From string `json:"from,omitempty"`
To string `json:"to,omitempty"`
Custom string `json:"custom,omitempty"`
}
/**
Customer related types
*/
// Customer type
type Customer struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
FirstName string `json:"firstName,omitempty"`
LastName string `json:"lastName,omitempty"`
Patronymic string `json:"patronymic,omitempty"`
Sex string `json:"sex,omitempty"`
Email string `json:"email,omitempty"`
Phones []Phone `json:"phones,brackets,omitempty"`
Address *Address `json:"address,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Birthday string `json:"birthday,omitempty"`
ManagerID int `json:"managerId,omitempty"`
Vip bool `json:"vip,omitempty"`
Bad bool `json:"bad,omitempty"`
Site string `json:"site,omitempty"`
Source *Source `json:"source,omitempty"`
Contragent *Contragent `json:"contragent,omitempty"`
PersonalDiscount float32 `json:"personalDiscount,omitempty"`
CumulativeDiscount float32 `json:"cumulativeDiscount,omitempty"`
DiscountCardNumber string `json:"discountCardNumber,omitempty"`
EmailMarketingUnsubscribedAt string `json:"emailMarketingUnsubscribedAt,omitempty"`
AvgMarginSumm float32 `json:"avgMarginSumm,omitempty"`
MarginSumm float32 `json:"marginSumm,omitempty"`
TotalSumm float32 `json:"totalSumm,omitempty"`
AverageSumm float32 `json:"averageSumm,omitempty"`
OrdersCount int `json:"ordersCount,omitempty"`
CostSumm float32 `json:"costSumm,omitempty"`
MaturationTime int `json:"maturationTime,omitempty"`
FirstClientID string `json:"firstClientId,omitempty"`
LastClientID string `json:"lastClientId,omitempty"`
BrowserID string `json:"browserId,omitempty"`
MgCustomerID string `json:"mgCustomerId,omitempty"`
PhotoURL string `json:"photoUrl,omitempty"`
CustomFields map[string]string `json:"customFields,omitempty,brackets"`
Tags []Tag `json:"tags,brackets,omitempty"`
}
// CorporateCustomer type
type CorporateCustomer struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Nickname string `json:"nickName,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Vip bool `json:"vip,omitempty"`
Bad bool `json:"bad,omitempty"`
CustomFields map[string]string `json:"customFields,omitempty,brackets"`
PersonalDiscount float32 `json:"personalDiscount,omitempty"`
DiscountCardNumber string `json:"discountCardNumber,omitempty"`
ManagerID int `json:"managerId,omitempty"`
Source *Source `json:"source,omitempty"`
CustomerContacts []CorporateCustomerContact `json:"customerContacts,omitempty"`
Companies []Company `json:"companies,omitempty"`
Addresses []CorporateCustomerAddress `json:"addresses,omitempty"`
}
type CorporateCustomerContact struct {
IsMain bool `json:"isMain,omitempty"`
Customer CorporateCustomerContactCustomer `json:"customer,omitempty"`
Companies []IdentifiersPair `json:"companies,omitempty"`
}
// CorporateCustomerAddress type. Address didn't inherited in order to simplify declaration.
type CorporateCustomerAddress struct {
ID int `json:"id,omitempty"`
Index string `json:"index,omitempty"`
CountryISO string `json:"countryIso,omitempty"`
Region string `json:"region,omitempty"`
RegionID int `json:"regionId,omitempty"`
City string `json:"city,omitempty"`
CityID int `json:"cityId,omitempty"`
CityType string `json:"cityType,omitempty"`
Street string `json:"street,omitempty"`
StreetID int `json:"streetId,omitempty"`
StreetType string `json:"streetType,omitempty"`
Building string `json:"building,omitempty"`
Flat string `json:"flat,omitempty"`
IntercomCode string `json:"intercomCode,omitempty"`
Floor int `json:"floor,omitempty"`
Block int `json:"block,omitempty"`
House string `json:"house,omitempty"`
Housing string `json:"housing,omitempty"`
Metro string `json:"metro,omitempty"`
Notes string `json:"notes,omitempty"`
Text string `json:"text,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Name string `json:"name,omitempty"`
}
type CorporateCustomerContactCustomer struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
BrowserID string `json:"browserId,omitempty"`
Site string `json:"site,omitempty"`
}
type Company struct {
ID int `json:"id,omitempty"`
IsMain bool `json:"isMain,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Active bool `json:"active,omitempty"`
Name string `json:"name,omitempty"`
Brand string `json:"brand,omitempty"`
Site string `json:"site,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Contragent *Contragent `json:"contragent,omitempty"`
Address *IdentifiersPair `json:"address,omitempty"`
CustomFields map[string]string `json:"customFields,omitempty,brackets"`
}
// CorporateCustomerNote type
type CorporateCustomerNote struct {
ManagerID int `json:"managerId,omitempty"`
Text string `json:"text,omitempty"`
Customer *IdentifiersPair `json:"customer,omitempty"`
}
// Phone type
type Phone struct {
Number string `json:"number,omitempty"`
}
// CustomerHistoryRecord type
type CustomerHistoryRecord struct {
ID int `json:"id,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Created bool `json:"created,omitempty"`
Deleted bool `json:"deleted,omitempty"`
Source string `json:"source,omitempty"`
Field string `json:"field,omitempty"`
User *User `json:"user,omitempty,brackets"`
APIKey *APIKey `json:"apiKey,omitempty,brackets"`
Customer *Customer `json:"customer,omitempty,brackets"`
}
// CorporateCustomerHistoryRecord type
type CorporateCustomerHistoryRecord struct {
ID int `json:"id,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Created bool `json:"created,omitempty"`
Deleted bool `json:"deleted,omitempty"`
Source string `json:"source,omitempty"`
Field string `json:"field,omitempty"`
User *User `json:"user,omitempty,brackets"`
APIKey *APIKey `json:"apiKey,omitempty,brackets"`
CorporateCustomer *CorporateCustomer `json:"corporateCustomer,omitempty,brackets"`
}
/**
Order related types
*/
// Order type
type Order struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Number string `json:"number,omitempty"`
FirstName string `json:"firstName,omitempty"`
LastName string `json:"lastName,omitempty"`
Patronymic string `json:"patronymic,omitempty"`
Email string `json:"email,omitempty"`
Phone string `json:"phone,omitempty"`
AdditionalPhone string `json:"additionalPhone,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
StatusUpdatedAt string `json:"statusUpdatedAt,omitempty"`
ManagerID int `json:"managerId,omitempty"`
Mark int `json:"mark,omitempty"`
Call bool `json:"call,omitempty"`
Expired bool `json:"expired,omitempty"`
FromAPI bool `json:"fromApi,omitempty"`
MarkDatetime string `json:"markDatetime,omitempty"`
CustomerComment string `json:"customerComment,omitempty"`
ManagerComment string `json:"managerComment,omitempty"`
Status string `json:"status,omitempty"`
StatusComment string `json:"statusComment,omitempty"`
FullPaidAt string `json:"fullPaidAt,omitempty"`
Site string `json:"site,omitempty"`
OrderType string `json:"orderType,omitempty"`
OrderMethod string `json:"orderMethod,omitempty"`
CountryIso string `json:"countryIso,omitempty"`
Summ float32 `json:"summ,omitempty"`
TotalSumm float32 `json:"totalSumm,omitempty"`
PrepaySum float32 `json:"prepaySum,omitempty"`
PurchaseSumm float32 `json:"purchaseSumm,omitempty"`
DiscountManualAmount float32 `json:"discountManualAmount,omitempty"`
DiscountManualPercent float32 `json:"discountManualPercent,omitempty"`
Weight float32 `json:"weight,omitempty"`
Length int `json:"length,omitempty"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
ShipmentStore string `json:"shipmentStore,omitempty"`
ShipmentDate string `json:"shipmentDate,omitempty"`
ClientID string `json:"clientId,omitempty"`
Shipped bool `json:"shipped,omitempty"`
UploadedToExternalStoreSystem bool `json:"uploadedToExternalStoreSystem,omitempty"`
Source *Source `json:"source,omitempty"`
Contragent *Contragent `json:"contragent,omitempty"`
Customer *Customer `json:"customer,omitempty"`
Delivery *OrderDelivery `json:"delivery,omitempty"`
Marketplace *OrderMarketplace `json:"marketplace,omitempty"`
Items []OrderItem `json:"items,omitempty,brackets"`
CustomFields map[string]string `json:"customFields,omitempty,brackets"`
Payments map[string]OrderPayment `json:"payments,omitempty,brackets"`
}
// OrdersStatus type
type OrdersStatus struct {
ID int `json:"id"`
ExternalID string `json:"externalId,omitempty"`
Status string `json:"status"`
Group string `json:"group"`
}
// OrderDelivery type
type OrderDelivery struct {
Code string `json:"code,omitempty"`
IntegrationCode string `json:"integrationCode,omitempty"`
Cost float32 `json:"cost,omitempty"`
NetCost float32 `json:"netCost,omitempty"`
VatRate string `json:"vatRate,omitempty"`
Date string `json:"date,omitempty"`
Time *OrderDeliveryTime `json:"time,omitempty"`
Address *Address `json:"address,omitempty"`
Service *OrderDeliveryService `json:"service,omitempty"`
Data *OrderDeliveryData `json:"data,omitempty"`
}
// OrderDeliveryTime type
type OrderDeliveryTime struct {
From string `json:"from,omitempty"`
To string `json:"to,omitempty"`
Custom string `json:"custom,omitempty"`
}
// OrderDeliveryService type
type OrderDeliveryService struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
}
// OrderDeliveryDataBasic type
type OrderDeliveryDataBasic struct {
TrackNumber string `json:"trackNumber,omitempty"`
Status string `json:"status,omitempty"`
PickuppointAddress string `json:"pickuppointAddress,omitempty"`
PayerType string `json:"payerType,omitempty"`
}
// OrderDeliveryData type
type OrderDeliveryData struct {
OrderDeliveryDataBasic
AdditionalFields map[string]interface{}
}
// UnmarshalJSON method
func (v *OrderDeliveryData) UnmarshalJSON(b []byte) error {
var additionalData map[string]interface{}
json.Unmarshal(b, &additionalData)
json.Unmarshal(b, &v.OrderDeliveryDataBasic)
object := reflect.TypeOf(v.OrderDeliveryDataBasic)
for i := 0; i < object.NumField(); i++ {
field := object.Field(i)
if i, ok := field.Tag.Lookup("json"); ok {
name := strings.Split(i, ",")[0]
delete(additionalData, strings.TrimSpace(name))
} else {
delete(additionalData, field.Name)
}
}
v.AdditionalFields = additionalData
return nil
}
// MarshalJSON method
func (v OrderDeliveryData) MarshalJSON() ([]byte, error) {
result := map[string]interface{}{}
data, _ := json.Marshal(v.OrderDeliveryDataBasic)
json.Unmarshal(data, &result)
for key, value := range v.AdditionalFields {
result[key] = value
}
return json.Marshal(result)
}
// OrderMarketplace type
type OrderMarketplace struct {
Code string `json:"code,omitempty"`
OrderID string `json:"orderId,omitempty"`
}
// OrderPayment type
type OrderPayment struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Type string `json:"type,omitempty"`
Status string `json:"status,omitempty"`
PaidAt string `json:"paidAt,omitempty"`
Amount float32 `json:"amount,omitempty"`
Comment string `json:"comment,omitempty"`
}
// OrderItem type
type OrderItem struct {
ID int `json:"id,omitempty"`
InitialPrice float32 `json:"initialPrice,omitempty"`
PurchasePrice float32 `json:"purchasePrice,omitempty"`
DiscountTotal float32 `json:"discountTotal,omitempty"`
DiscountManualAmount float32 `json:"discountManualAmount,omitempty"`
DiscountManualPercent float32 `json:"discountManualPercent,omitempty"`
ProductName string `json:"productName,omitempty"`
VatRate string `json:"vatRate,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Quantity float32 `json:"quantity,omitempty"`
Status string `json:"status,omitempty"`
Comment string `json:"comment,omitempty"`
IsCanceled bool `json:"isCanceled,omitempty"`
Offer Offer `json:"offer,omitempty"`
Properties map[string]Property `json:"properties,omitempty,brackets"`
PriceType *PriceType `json:"priceType,omitempty"`
}
// OrdersHistoryRecord type
type OrdersHistoryRecord struct {
ID int `json:"id,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Created bool `json:"created,omitempty"`
Deleted bool `json:"deleted,omitempty"`
Source string `json:"source,omitempty"`
Field string `json:"field,omitempty"`
User *User `json:"user,omitempty,brackets"`
APIKey *APIKey `json:"apiKey,omitempty,brackets"`
Order *Order `json:"order,omitempty,brackets"`
}
// Pack type
type Pack struct {
ID int `json:"id,omitempty"`
PurchasePrice float32 `json:"purchasePrice,omitempty"`
Quantity float32 `json:"quantity,omitempty"`
Store string `json:"store,omitempty"`
ShipmentDate string `json:"shipmentDate,omitempty"`
InvoiceNumber string `json:"invoiceNumber,omitempty"`
DeliveryNoteNumber string `json:"deliveryNoteNumber,omitempty"`
Item *PackItem `json:"item,omitempty"`
ItemID int `json:"itemId,omitempty"`
Unit *Unit `json:"unit,omitempty"`
}
// PackItem type
type PackItem struct {
ID int `json:"id,omitempty"`
Order *Order `json:"order,omitempty"`
Offer *Offer `json:"offer,omitempty"`
}
// PacksHistoryRecord type
type PacksHistoryRecord struct {
ID int `json:"id,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Created bool `json:"created,omitempty"`
Deleted bool `json:"deleted,omitempty"`
Source string `json:"source,omitempty"`
Field string `json:"field,omitempty"`
User *User `json:"user,omitempty,brackets"`
Pack *Pack `json:"pack,omitempty,brackets"`
}
// Offer type
type Offer struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Name string `json:"name,omitempty"`
XMLID string `json:"xmlId,omitempty"`
Article string `json:"article,omitempty"`
VatRate string `json:"vatRate,omitempty"`
Price float32 `json:"price,omitempty"`
PurchasePrice float32 `json:"purchasePrice,omitempty"`
Quantity float32 `json:"quantity,omitempty"`
Height float32 `json:"height,omitempty"`
Width float32 `json:"width,omitempty"`
Length float32 `json:"length,omitempty"`
Weight float32 `json:"weight,omitempty"`
Stores []Inventory `json:"stores,omitempty,brackets"`
Properties map[string]string `json:"properties,omitempty,brackets"`
Prices []OfferPrice `json:"prices,omitempty,brackets"`
Images []string `json:"images,omitempty,brackets"`
Unit *Unit `json:"unit,omitempty,brackets"`
}
// Inventory type
type Inventory struct {
PurchasePrice float32 `json:"purchasePrice,omitempty"`
Quantity float32 `json:"quantity,omitempty"`
Store string `json:"store,omitempty"`
}
// InventoryUpload type
type InventoryUpload struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
XMLID string `json:"xmlId,omitempty"`
Stores []InventoryUploadStore `json:"stores,omitempty"`
}
// InventoryUploadStore type
type InventoryUploadStore struct {
PurchasePrice float32 `json:"purchasePrice,omitempty"`
Available float32 `json:"available,omitempty"`
Code string `json:"code,omitempty"`
}
// OfferPrice type
type OfferPrice struct {
Price float32 `json:"price,omitempty"`
Ordering int `json:"ordering,omitempty"`
PriceType string `json:"priceType,omitempty"`
}
// OfferPriceUpload type
type OfferPriceUpload struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
XMLID string `json:"xmlId,omitempty"`
Site string `json:"site,omitempty"`
Prices []PriceUpload `json:"prices,omitempty"`
}
// PriceUpload type
type PriceUpload struct {
Code string `json:"code,omitempty"`
Price float32 `json:"price,omitempty"`
}
// Unit type
type Unit struct {
Code string `json:"code"`
Name string `json:"name"`
Sym string `json:"sym"`
Default bool `json:"default,omitempty"`
Active bool `json:"active,omitempty"`
}
/**
User related types
*/
// User type
type User struct {
ID int `json:"id,omitempty"`
FirstName string `json:"firstName,omitempty"`
LastName string `json:"lastName,omitempty"`
Patronymic string `json:"patronymic,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Active bool `json:"active,omitempty"`
Online bool `json:"online,omitempty"`
IsAdmin bool `json:"isAdmin,omitempty"`
IsManager bool `json:"isManager,omitempty"`
Email string `json:"email,omitempty"`
Phone string `json:"phone,omitempty"`
Status string `json:"status,omitempty"`
Groups []UserGroup `json:"groups,omitempty,brackets"`
MgUserId uint64 `json:"mgUserId,omitempty"`
}
// UserGroup type
type UserGroup struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
SignatureTemplate string `json:"signatureTemplate,omitempty"`
IsManager bool `json:"isManager,omitempty"`
IsDeliveryMen bool `json:"isDeliveryMen,omitempty"`
DeliveryTypes []string `json:"deliveryTypes,omitempty,brackets"`
BreakdownOrderTypes []string `json:"breakdownOrderTypes,omitempty,brackets"`
BreakdownSites []string `json:"breakdownSites,omitempty,brackets"`
BreakdownOrderMethods []string `json:"breakdownOrderMethods,omitempty,brackets"`
GrantedOrderTypes []string `json:"grantedOrderTypes,omitempty,brackets"`
GrantedSites []string `json:"grantedSites,omitempty,brackets"`
}
/**
Task related types
*/
// Task type
type Task struct {
ID int `json:"id,omitempty"`
PerformerID int `json:"performerId,omitempty"`
Text string `json:"text,omitempty"`
Commentary string `json:"commentary,omitempty"`
Datetime string `json:"datetime,omitempty"`
Complete bool `json:"complete,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Creator int `json:"creator,omitempty"`
Performer int `json:"performer,omitempty"`
Phone string `json:"phone,omitempty"`
PhoneSite string `json:"phoneSite,omitempty"`
Customer *Customer `json:"customer,omitempty"`
Order *Order `json:"order,omitempty"`
}
/*
Notes related types
*/
// Note type
type Note struct {
ID int `json:"id,omitempty"`
ManagerID int `json:"managerId,omitempty"`
Text string `json:"text,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Customer *Customer `json:"customer,omitempty"`
}
/*
Payments related types
*/
// Payment type
type Payment struct {
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
PaidAt string `json:"paidAt,omitempty"`
Amount float32 `json:"amount,omitempty"`
Comment string `json:"comment,omitempty"`
Status string `json:"status,omitempty"`
Type string `json:"type,omitempty"`
Order *Order `json:"order,omitempty"`
}
/*
Segment related types
*/
// Segment type
type Segment struct {
ID int `json:"id,omitempty"`
Code string `json:"code,omitempty"`
Name string `json:"name,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
CustomersCount int `json:"customersCount,omitempty"`
IsDynamic bool `json:"isDynamic,omitempty"`
Active bool `json:"active,omitempty"`
}
/*
* Settings related types
*/
// SettingsNode represents an item in settings. All settings nodes contains only string value and update time for now.
type SettingsNode struct {
Value string `json:"value"`
UpdatedAt string `json:"updated_at"`
}
// Settings type. Contains retailCRM configuration.
type Settings struct {
DefaultCurrency SettingsNode `json:"default_currency"`
SystemLanguage SettingsNode `json:"system_language"`
Timezone SettingsNode `json:"timezone"`
}
/**
Reference related types
*/
// CostGroup type
type CostGroup struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Color string `json:"color,omitempty"`
Active bool `json:"active,omitempty"`
Ordering int `json:"ordering,omitempty"`
}
// CostItem type
type CostItem struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Group string `json:"group,omitempty"`
Type string `json:"type,omitempty"`
Active bool `json:"active,omitempty"`
AppliesToOrders bool `json:"appliesToOrders,omitempty"`
AppliesToUsers bool `json:"appliesToUsers,omitempty"`
Ordering int `json:"ordering,omitempty"`
Source *Source `json:"source,omitempty"`
}
// Courier type
type Courier struct {
ID int `json:"id,omitempty"`
FirstName string `json:"firstName,omitempty"`
LastName string `json:"lastName,omitempty"`
Patronymic string `json:"patronymic,omitempty"`
Email string `json:"email,omitempty"`
Description string `json:"description,omitempty"`
Active bool `json:"active,omitempty"`
Phone *Phone `json:"phone,omitempty"`
}
// DeliveryService type
type DeliveryService struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
}
// DeliveryType type
type DeliveryType struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
DefaultCost float32 `json:"defaultCost,omitempty"`
DefaultNetCost float32 `json:"defaultNetCost,omitempty"`
Description string `json:"description,omitempty"`
IntegrationCode string `json:"integrationCode,omitempty"`
VatRate string `json:"vatRate,omitempty"`
DefaultForCrm bool `json:"defaultForCrm,omitempty"`
DeliveryServices []string `json:"deliveryServices,omitempty"`
PaymentTypes []string `json:"paymentTypes,omitempty"`
}
// LegalEntity type
type LegalEntity struct {
Code string `json:"code,omitempty"`
VatRate string `json:"vatRate,omitempty"`
CountryIso string `json:"countryIso,omitempty"`
ContragentType string `json:"contragentType,omitempty"`
LegalName string `json:"legalName,omitempty"`
LegalAddress string `json:"legalAddress,omitempty"`
INN string `json:"INN,omitempty"`
OKPO string `json:"OKPO,omitempty"`
KPP string `json:"KPP,omitempty"`
OGRN string `json:"OGRN,omitempty"`
OGRNIP string `json:"OGRNIP,omitempty"`
CertificateNumber string `json:"certificateNumber,omitempty"`
CertificateDate string `json:"certificateDate,omitempty"`
BIK string `json:"BIK,omitempty"`
Bank string `json:"bank,omitempty"`
BankAddress string `json:"bankAddress,omitempty"`
CorrAccount string `json:"corrAccount,omitempty"`
BankAccount string `json:"bankAccount,omitempty"`
}
// OrderMethod type
type OrderMethod struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
DefaultForCRM bool `json:"defaultForCrm,omitempty"`
DefaultForAPI bool `json:"defaultForApi,omitempty"`
}
// OrderType type
type OrderType struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
DefaultForCRM bool `json:"defaultForCrm,omitempty"`
DefaultForAPI bool `json:"defaultForApi,omitempty"`
}
// PaymentStatus type
type PaymentStatus struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
DefaultForCRM bool `json:"defaultForCrm,omitempty"`
DefaultForAPI bool `json:"defaultForApi,omitempty"`
PaymentComplete bool `json:"paymentComplete,omitempty"`
Description string `json:"description,omitempty"`
Ordering int `json:"ordering,omitempty"`
PaymentTypes []string `json:"paymentTypes,omitempty,brackets"`
}
// PaymentType type
type PaymentType struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
DefaultForCRM bool `json:"defaultForCrm,omitempty"`
DefaultForAPI bool `json:"defaultForApi,omitempty"`
Description string `json:"description,omitempty"`
DeliveryTypes []string `json:"deliveryTypes,omitempty,brackets"`
PaymentStatuses []string `json:"PaymentStatuses,omitempty,brackets"`
}
// PriceType type
type PriceType struct {
ID int `json:"id,omitempty"`
Code string `json:"code,omitempty"`
Name string `json:"name,omitempty"`
Active bool `json:"active,omitempty"`
Default bool `json:"default,omitempty"`
Description string `json:"description,omitempty"`
FilterExpression string `json:"filterExpression,omitempty"`
Ordering int `json:"ordering,omitempty"`
Groups []string `json:"groups,omitempty,brackets"`
Geo []GeoHierarchyRow `json:"geo,omitempty,brackets"`
}
// ProductStatus type
type ProductStatus struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
Ordering int `json:"ordering,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
CancelStatus bool `json:"cancelStatus,omitempty"`
OrderStatusByProductStatus string `json:"orderStatusByProductStatus,omitempty"`
OrderStatusForProductStatus string `json:"orderStatusForProductStatus,omitempty"`
}
// Status type
type Status struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
Ordering int `json:"ordering,omitempty"`
Group string `json:"group,omitempty"`
}
// StatusGroup type
type StatusGroup struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Active bool `json:"active,omitempty"`
Ordering int `json:"ordering,omitempty"`
Process bool `json:"process,omitempty"`
Statuses []string `json:"statuses,omitempty,brackets"`
}
// Site type
type Site struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
URL string `json:"url,omitempty"`
Description string `json:"description,omitempty"`
Phones string `json:"phones,omitempty"`
Zip string `json:"zip,omitempty"`
Address string `json:"address,omitempty"`
CountryIso string `json:"countryIso,omitempty"`
YmlURL string `json:"ymlUrl,omitempty"`
LoadFromYml bool `json:"loadFromYml,omitempty"`
CatalogUpdatedAt string `json:"catalogUpdatedAt,omitempty"`
CatalogLoadingAt string `json:"catalogLoadingAt,omitempty"`
Contragent *LegalEntity `json:"contragent,omitempty"`
}
// Store type
type Store struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Description string `json:"description,omitempty"`
XMLID string `json:"xmlId,omitempty"`
Email string `json:"email,omitempty"`
Type string `json:"type,omitempty"`
InventoryType string `json:"inventoryType,omitempty"`
Active bool `json:"active,omitempty"`
Phone *Phone `json:"phone,omitempty"`
Address *Address `json:"address,omitempty"`
}
// ProductGroup type
type ProductGroup struct {
ID int `json:"id,omitempty"`
ParentID int `json:"parentId,omitempty"`
Name string `json:"name,omitempty"`
Site string `json:"site,omitempty"`
Active bool `json:"active,omitempty"`
}
// Product type
type Product struct {
ID int `json:"id,omitempty"`
MaxPrice float32 `json:"maxPrice,omitempty"`
MinPrice float32 `json:"minPrice,omitempty"`
Name string `json:"name,omitempty"`
URL string `json:"url,omitempty"`
Article string `json:"article,omitempty"`
ExternalID string `json:"externalId,omitempty"`
Manufacturer string `json:"manufacturer,omitempty"`
ImageURL string `json:"imageUrl,omitempty"`
Description string `json:"description,omitempty"`
Popular bool `json:"popular,omitempty"`
Stock bool `json:"stock,omitempty"`
Novelty bool `json:"novelty,omitempty"`
Recommended bool `json:"recommended,omitempty"`
Active bool `json:"active,omitempty"`
Quantity float32 `json:"quantity,omitempty"`
Offers []Offer `json:"offers,omitempty,brackets"`
Groups []ProductGroup `json:"groups,omitempty,brackets"`
Properties map[string]string `json:"properties,omitempty,brackets"`
}
// DeliveryHistoryRecord type
type DeliveryHistoryRecord struct {
Code string `json:"code,omitempty"`
UpdatedAt string `json:"updatedAt,omitempty"`
Comment string `json:"comment,omitempty"`
}
// DeliveryShipment type
type DeliveryShipment struct {
IntegrationCode string `json:"integrationCode,omitempty"`
ID int `json:"id,omitempty"`
ExternalID string `json:"externalId,omitempty"`
DeliveryType string `json:"deliveryType,omitempty"`
Store string `json:"store,omitempty"`
ManagerID int `json:"managerId,omitempty"`
Status string `json:"status,omitempty"`
Date string `json:"date,omitempty"`
Time *DeliveryTime `json:"time,omitempty"`
LunchTime string `json:"lunchTime,omitempty"`
Comment string `json:"comment,omitempty"`
Orders []Order `json:"orders,omitempty,brackets"`
ExtraData map[string]string `json:"extraData,omitempty,brackets"`
}
// IntegrationModule type
type IntegrationModule struct {
Code string `json:"code,omitempty"`
IntegrationCode string `json:"integrationCode,omitempty"`
Active bool `json:"active,omitempty"`
Freeze bool `json:"freeze,omitempty"`
Native bool `json:"native,omitempty"`
Name string `json:"name,omitempty"`
Logo string `json:"logo,omitempty"`
ClientID string `json:"clientId,omitempty"`
BaseURL string `json:"baseUrl,omitempty"`
AccountURL string `json:"accountUrl,omitempty"`
AvailableCountries []string `json:"availableCountries,omitempty"`
Actions map[string]string `json:"actions,omitempty"`
Integrations *Integrations `json:"integrations,omitempty"`
}
// Integrations type
type Integrations struct {
Telephony *Telephony `json:"telephony,omitempty"`
Delivery *Delivery `json:"delivery,omitempty"`
Store *Warehouse `json:"store,omitempty"`
MgTransport *MgTransport `json:"mgTransport,omitempty"`
MgBot *MgBot `json:"mgBot,omitempty"`
}
// Delivery type
type Delivery struct {
Description string `json:"description,omitempty"`
Actions map[string]string `json:"actions,omitempty,brackets"`
PayerType []string `json:"payerType,omitempty,brackets"`
PlatePrintLimit int `json:"platePrintLimit,omitempty"`
RateDeliveryCost bool `json:"rateDeliveryCost,omitempty"`
AllowPackages bool `json:"allowPackages,omitempty"`
CodAvailable bool `json:"codAvailable,omitempty"`
SelfShipmentAvailable bool `json:"selfShipmentAvailable,omitempty"`
AllowTrackNumber bool `json:"allowTrackNumber,omitempty"`
AvailableCountries []string `json:"availableCountries,omitempty"`
RequiredFields []string `json:"requiredFields,omitempty"`
StatusList []DeliveryStatus `json:"statusList,omitempty"`
PlateList []Plate `json:"plateList,omitempty"`
DeliveryDataFieldList []DeliveryDataField `json:"deliveryDataFieldList,omitempty"`
ShipmentDataFieldList []DeliveryDataField `json:"shipmentDataFieldList,omitempty"`
}
// DeliveryStatus type
type DeliveryStatus struct {
Code string `json:"code,omitempty"`
Name string `json:"name,omitempty"`
IsEditable bool `json:"isEditable,omitempty"`
}
// Plate type
type Plate struct {
Code string `json:"code,omitempty"`
Label string `json:"label,omitempty"`
}
// DeliveryDataField type
type DeliveryDataField struct {
Code string `json:"code,omitempty"`
Label string `json:"label,omitempty"`
Hint string `json:"hint,omitempty"`
Type string `json:"type,omitempty"`
AutocompleteURL string `json:"autocompleteUrl,omitempty"`
Multiple bool `json:"multiple,omitempty"`
Required bool `json:"required,omitempty"`
AffectsCost bool `json:"affectsCost,omitempty"`
Editable bool `json:"editable,omitempty"`
}
// Telephony type
type Telephony struct {
MakeCallURL string `json:"makeCallUrl,omitempty"`
AllowEdit bool `json:"allowEdit,omitempty"`
InputEventSupported bool `json:"inputEventSupported,omitempty"`
OutputEventSupported bool `json:"outputEventSupported,omitempty"`
HangupEventSupported bool `json:"hangupEventSupported,omitempty"`
ChangeUserStatusURL string `json:"changeUserStatusUrl,omitempty"`
AdditionalCodes []AdditionalCode `json:"additionalCodes,omitempty,brackets"`
ExternalPhones []ExternalPhone `json:"externalPhones,omitempty,brackets"`
}
// AdditionalCode type
type AdditionalCode struct {
Code string `json:"code,omitempty"`
UserID string `json:"userId,omitempty"`
}
// ExternalPhone type
type ExternalPhone struct {
SiteCode string `json:"siteCode,omitempty"`
ExternalPhone string `json:"externalPhone,omitempty"`
}
// Warehouse type
type Warehouse struct {
Actions []Action `json:"actions,omitempty,brackets"`
}
// Action type
type Action struct {
Code string `json:"code,omitempty"`
URL string `json:"url,omitempty"`
CallPoints []string `json:"callPoints,omitempty"`
}
// MgTransport type
type MgTransport struct {
WebhookUrl string `json:"webhookUrl,omitempty"`
}
// MgBot type
type MgBot struct{}
/**
Cost related types
*/
// CostRecord type
type CostRecord struct {
Source *Source `json:"source,omitempty"`
Comment string `json:"comment,omitempty"`
DateFrom string `json:"dateFrom,omitempty"`
DateTo string `json:"dateTo,omitempty"`
Summ float32 `json:"summ,omitempty"`
CostItem string `json:"costItem,omitempty"`
UserId int `json:"userId,omitempty"`
Order *Order `json:"order,omitempty"`
Sites []string `json:"sites,omitempty,brackets"`
}
// Cost type
type Cost struct {
Source *Source `json:"source,omitempty"`
ID int `json:"id,omitempty"`
DateFrom string `json:"dateFrom,omitempty"`
DateTo string `json:"dateTo,omitempty"`
Summ float32 `json:"summ,omitempty"`
CostItem string `json:"costItem,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
CreatedBy string `json:"createdBy,omitempty"`
Order *Order `json:"order,omitempty"`
UserId int `json:"userId,omitempty"`
Sites []string `json:"sites,omitempty,brackets"`
}
// File type
type File struct {
ID int `json:"id,omitempty"`
Filename string `json:"filename,omitempty"`
Type string `json:"type,omitempty"`
CreatedAt string `json:"createdAt,omitempty"`
Size int `json:"size,omitempty"`
Attachment []Attachment `json:"attachment,omitempty"`
}
// Attachment type
type Attachment struct {
Customer *Customer `json:"customer,omitempty"`
Order *Order `json:"order,omitempty"`
}
// CustomFields type
type CustomFields struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Required bool `json:"required,omitempty"`
InFilter bool `json:"inFilter,omitempty"`
InList bool `json:"inList,omitempty"`
InGroupActions bool `json:"inGroupActions,omitempty"`
Type string `json:"type,omitempty"`
Entity string `json:"entity,omitempty"`
Default string `json:"default,omitempty"`
Ordering int `json:"ordering,omitempty"`
DisplayArea string `json:"displayArea,omitempty"`
ViewMode string `json:"viewMode,omitempty"`
Dictionary string `json:"dictionary,omitempty"`
}
/**
CustomDictionaries related types
*/
// CustomDictionary type
type CustomDictionary struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Elements []Element `json:"elements,omitempty,brackets"`
}
// Element type
type Element struct {
Name string `json:"name,omitempty"`
Code string `json:"code,omitempty"`
Ordering int `json:"ordering,omitempty"`
}
// Activity struct
type Activity struct {
Active bool `json:"active"`
Freeze bool `json:"freeze"`
}
// Tag struct
type Tag struct {
Name string `json:"name,omitempty"`
Color string `json:"color,omitempty"`
Attached bool `json:"attached,omitempty"`
}
|
/*
Copyright IBM Corporation 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collection_test
import (
"testing"
"github.com/konveyor/move2kube/types"
"github.com/konveyor/move2kube/types/collection"
)
func TestNewImageInfo(t *testing.T) {
img := collection.NewImageInfo()
if img.Kind != string(collection.ImageMetadataKind) || img.APIVersion != types.SchemeGroupVersion.String() {
t.Fatal("Failed to initialize ImageInfo properly.")
}
}
|
// +build windows
package main
const (
// identifies if test suite is running on a unix platform
isUnixCli = false
)
|
package main
import (
"fmt"
"io"
"log"
"os"
"strings"
"github.com/biogo/hts/bam"
"github.com/biogo/hts/sam"
)
func DecodeQual(qual []byte) string {
squal := make([]string, 0, len(qual))
for pos := range qual {
squal = append(squal, string(qual[pos]+33))
}
return strings.Join(squal, "")
}
func main() {
bam_file := os.Args[1]
f, err := os.Open(bam_file)
if err != nil {
log.Fatalf("could not open file %q:", err)
}
defer f.Close()
b, err := bam.NewReader(f, 1)
if err != nil {
log.Fatalf("could not read bam:", err)
}
defer b.Close()
var read_name string
for {
rec, err := b.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("error reading bam: %v", err)
}
if rec != nil {
var read []string
// See if the cell barcode tag is available
cb := rec.AuxFields.Get(sam.NewTag("CB"))
if cb != nil {
read_name = strings.Join([]string{"@", rec.Name, "_", cb.String()}, "")
} else {
read_name = strings.Join([]string{"@", rec.Name}, "")
}
// See if the molecular barcode is available
ub := rec.AuxFields.Get(sam.NewTag("UB"))
if ub != nil {
read_name = strings.Join([]string{read_name, "_", ub.String()}, "")
}
read = append(read, read_name)
read = append(read, string(rec.Seq.Expand()))
read = append(read, "+")
read = append(read, DecodeQual(rec.Qual))
fmt.Println(strings.Join(read, "\n"))
}
}
}
|
package x
import (
"errors"
"fmt"
"ms/sun/shared/base"
)
//TODO: WE MUST separate int from string to not let empty string "" from preloading or loading and inserting into caches
// Action - PRIMARY
// Action - ActorUserId
// Blocked - PRIMARY
// Comment - PRIMARY
//field//field//field
///// Generated from index 'PostId'.
func (c _StoreImpl) Comment_ByPostId(PostId int) (*Comment, bool) {
o, ok := RowCacheIndex.Get("Comment_PostId:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*Comment); ok {
return obj, true
}
}
row, err := NewComment_Selector().PostId_Eq(PostId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Comment_PostId:"+fmt.Sprintf("%v", row.PostId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Comment_ByPostId_JustCache(PostId int) (*Comment, bool) {
o, ok := RowCacheIndex.Get("Comment_PostId:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*Comment); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Comment_PostId:" + fmt.Sprintf("%v", PostId)))
return nil, false
}
func (c _StoreImpl) PreLoadComment_ByPostIds(PostIds []int) {
not_cached := make([]int, 0, len(PostIds))
for _, id := range PostIds {
_, ok := RowCacheIndex.Get("Comment_PostId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewComment_Selector().PostId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Comment_PostId:"+fmt.Sprintf("%v", row.PostId), row, 0)
}
}
}
}
// CommentDeleted - PRIMARY
// Event - PRIMARY
// Followed - PRIMARY
// Likes - PRIMARY
// Likes - PostId
//field//field//field
///// Generated from index 'Id'.
func (c _StoreImpl) Likes_ById(Id int) (*Likes, bool) {
o, ok := RowCacheIndex.Get("Likes_Id:" + fmt.Sprintf("%v", Id))
if ok {
if obj, ok := o.(*Likes); ok {
return obj, true
}
}
row, err := NewLikes_Selector().Id_Eq(Id).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Likes_Id:"+fmt.Sprintf("%v", row.Id), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Likes_ById_JustCache(Id int) (*Likes, bool) {
o, ok := RowCacheIndex.Get("Likes_Id:" + fmt.Sprintf("%v", Id))
if ok {
if obj, ok := o.(*Likes); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Likes_Id:" + fmt.Sprintf("%v", Id)))
return nil, false
}
func (c _StoreImpl) PreLoadLikes_ByIds(Ids []int) {
not_cached := make([]int, 0, len(Ids))
for _, id := range Ids {
_, ok := RowCacheIndex.Get("Likes_Id:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewLikes_Selector().Id_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Likes_Id:"+fmt.Sprintf("%v", row.Id), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'PostId_2'.
func (c _StoreImpl) Likes_ByPostId(PostId int) (*Likes, bool) {
o, ok := RowCacheIndex.Get("Likes_PostId_2:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*Likes); ok {
return obj, true
}
}
row, err := NewLikes_Selector().PostId_Eq(PostId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Likes_PostId_2:"+fmt.Sprintf("%v", row.PostId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Likes_ByPostId_JustCache(PostId int) (*Likes, bool) {
o, ok := RowCacheIndex.Get("Likes_PostId_2:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*Likes); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Likes_PostId_2:" + fmt.Sprintf("%v", PostId)))
return nil, false
}
func (c _StoreImpl) PreLoadLikes_ByPostIds(PostIds []int) {
not_cached := make([]int, 0, len(PostIds))
for _, id := range PostIds {
_, ok := RowCacheIndex.Get("Likes_PostId_2:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewLikes_Selector().PostId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Likes_PostId_2:"+fmt.Sprintf("%v", row.PostId), row, 0)
}
}
}
}
// Notify - PRIMARY
// Notify - ForUserId
// NotifyRemoved - PRIMARY
// PhoneContacts - PRIMARY
// Post - PRIMARY
//field//field//field
///// Generated from index 'UserId'.
func (c _StoreImpl) Post_ByUserId(UserId int) (*Post, bool) {
o, ok := RowCacheIndex.Get("Post_UserId:" + fmt.Sprintf("%v", UserId))
if ok {
if obj, ok := o.(*Post); ok {
return obj, true
}
}
row, err := NewPost_Selector().UserId_Eq(UserId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Post_UserId:"+fmt.Sprintf("%v", row.UserId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Post_ByUserId_JustCache(UserId int) (*Post, bool) {
o, ok := RowCacheIndex.Get("Post_UserId:" + fmt.Sprintf("%v", UserId))
if ok {
if obj, ok := o.(*Post); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Post_UserId:" + fmt.Sprintf("%v", UserId)))
return nil, false
}
func (c _StoreImpl) PreLoadPost_ByUserIds(UserIds []int) {
not_cached := make([]int, 0, len(UserIds))
for _, id := range UserIds {
_, ok := RowCacheIndex.Get("Post_UserId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPost_Selector().UserId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Post_UserId:"+fmt.Sprintf("%v", row.UserId), row, 0)
}
}
}
}
// PostCount - PRIMARY
// PostDeleted - PRIMARY
// PostKeys - PRIMARY
//field//field//field
///// Generated from index 'PostKey'.
func (c _StoreImpl) PostKeys_ByPostKeyStr(PostKeyStr string) (*PostKeys, bool) {
o, ok := RowCacheIndex.Get("PostKeys_PostKey:" + fmt.Sprintf("%v", PostKeyStr))
if ok {
if obj, ok := o.(*PostKeys); ok {
return obj, true
}
}
row, err := NewPostKeys_Selector().PostKeyStr_Eq(PostKeyStr).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostKeys_PostKey:"+fmt.Sprintf("%v", row.PostKeyStr), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostKeys_ByPostKeyStr_JustCache(PostKeyStr string) (*PostKeys, bool) {
o, ok := RowCacheIndex.Get("PostKeys_PostKey:" + fmt.Sprintf("%v", PostKeyStr))
if ok {
if obj, ok := o.(*PostKeys); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostKeys_PostKey:" + fmt.Sprintf("%v", PostKeyStr)))
return nil, false
}
func (c _StoreImpl) PreLoadPostKeys_ByPostKeyStrs(PostKeyStrs []string) {
not_cached := make([]string, 0, len(PostKeyStrs))
for _, id := range PostKeyStrs {
_, ok := RowCacheIndex.Get("PostKeys_PostKey:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostKeys_Selector().PostKeyStr_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostKeys_PostKey:"+fmt.Sprintf("%v", row.PostKeyStr), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'Used'.
func (c _StoreImpl) PostKeys_ByUsed(Used int) (*PostKeys, bool) {
o, ok := RowCacheIndex.Get("PostKeys_Used:" + fmt.Sprintf("%v", Used))
if ok {
if obj, ok := o.(*PostKeys); ok {
return obj, true
}
}
row, err := NewPostKeys_Selector().Used_Eq(Used).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostKeys_Used:"+fmt.Sprintf("%v", row.Used), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostKeys_ByUsed_JustCache(Used int) (*PostKeys, bool) {
o, ok := RowCacheIndex.Get("PostKeys_Used:" + fmt.Sprintf("%v", Used))
if ok {
if obj, ok := o.(*PostKeys); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostKeys_Used:" + fmt.Sprintf("%v", Used)))
return nil, false
}
func (c _StoreImpl) PreLoadPostKeys_ByUseds(Useds []int) {
not_cached := make([]int, 0, len(Useds))
for _, id := range Useds {
_, ok := RowCacheIndex.Get("PostKeys_Used:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostKeys_Selector().Used_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostKeys_Used:"+fmt.Sprintf("%v", row.Used), row, 0)
}
}
}
}
// PostLink - PRIMARY
// PostMedia - PRIMARY
//field//field//field
///// Generated from index 'HashMd5'.
func (c _StoreImpl) PostMedia_ByMd5Hash(Md5Hash string) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_HashMd5:" + fmt.Sprintf("%v", Md5Hash))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
row, err := NewPostMedia_Selector().Md5Hash_Eq(Md5Hash).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostMedia_HashMd5:"+fmt.Sprintf("%v", row.Md5Hash), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostMedia_ByMd5Hash_JustCache(Md5Hash string) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_HashMd5:" + fmt.Sprintf("%v", Md5Hash))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostMedia_HashMd5:" + fmt.Sprintf("%v", Md5Hash)))
return nil, false
}
func (c _StoreImpl) PreLoadPostMedia_ByMd5Hashs(Md5Hashs []string) {
not_cached := make([]string, 0, len(Md5Hashs))
for _, id := range Md5Hashs {
_, ok := RowCacheIndex.Get("PostMedia_HashMd5:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostMedia_Selector().Md5Hash_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostMedia_HashMd5:"+fmt.Sprintf("%v", row.Md5Hash), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'CreatedTime'.
func (c _StoreImpl) PostMedia_ByCreatedTime(CreatedTime int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_CreatedTime:" + fmt.Sprintf("%v", CreatedTime))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
row, err := NewPostMedia_Selector().CreatedTime_Eq(CreatedTime).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostMedia_CreatedTime:"+fmt.Sprintf("%v", row.CreatedTime), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostMedia_ByCreatedTime_JustCache(CreatedTime int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_CreatedTime:" + fmt.Sprintf("%v", CreatedTime))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostMedia_CreatedTime:" + fmt.Sprintf("%v", CreatedTime)))
return nil, false
}
func (c _StoreImpl) PreLoadPostMedia_ByCreatedTimes(CreatedTimes []int) {
not_cached := make([]int, 0, len(CreatedTimes))
for _, id := range CreatedTimes {
_, ok := RowCacheIndex.Get("PostMedia_CreatedTime:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostMedia_Selector().CreatedTime_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostMedia_CreatedTime:"+fmt.Sprintf("%v", row.CreatedTime), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'AlbumId'.
func (c _StoreImpl) PostMedia_ByAlbumId(AlbumId int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_AlbumId:" + fmt.Sprintf("%v", AlbumId))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
row, err := NewPostMedia_Selector().AlbumId_Eq(AlbumId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostMedia_AlbumId:"+fmt.Sprintf("%v", row.AlbumId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostMedia_ByAlbumId_JustCache(AlbumId int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_AlbumId:" + fmt.Sprintf("%v", AlbumId))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostMedia_AlbumId:" + fmt.Sprintf("%v", AlbumId)))
return nil, false
}
func (c _StoreImpl) PreLoadPostMedia_ByAlbumIds(AlbumIds []int) {
not_cached := make([]int, 0, len(AlbumIds))
for _, id := range AlbumIds {
_, ok := RowCacheIndex.Get("PostMedia_AlbumId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostMedia_Selector().AlbumId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostMedia_AlbumId:"+fmt.Sprintf("%v", row.AlbumId), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'PostId2'.
func (c _StoreImpl) PostMedia_ByPostId(PostId int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_PostId2:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
row, err := NewPostMedia_Selector().PostId_Eq(PostId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PostMedia_PostId2:"+fmt.Sprintf("%v", row.PostId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PostMedia_ByPostId_JustCache(PostId int) (*PostMedia, bool) {
o, ok := RowCacheIndex.Get("PostMedia_PostId2:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*PostMedia); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PostMedia_PostId2:" + fmt.Sprintf("%v", PostId)))
return nil, false
}
func (c _StoreImpl) PreLoadPostMedia_ByPostIds(PostIds []int) {
not_cached := make([]int, 0, len(PostIds))
for _, id := range PostIds {
_, ok := RowCacheIndex.Get("PostMedia_PostId2:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPostMedia_Selector().PostId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PostMedia_PostId2:"+fmt.Sprintf("%v", row.PostId), row, 0)
}
}
}
}
// PostPromoted - PRIMARY
// PostReshared - PRIMARY
// ProfileAll - PRIMARY
// ProfileMedia - PRIMARY
// ProfileMentioned - PRIMARY
// Session - PRIMARY
// Session - SessionUuid_2
//field//field//field
///// Generated from index 'UserId'.
func (c _StoreImpl) Session_ByUserId(UserId int) (*Session, bool) {
o, ok := RowCacheIndex.Get("Session_UserId:" + fmt.Sprintf("%v", UserId))
if ok {
if obj, ok := o.(*Session); ok {
return obj, true
}
}
row, err := NewSession_Selector().UserId_Eq(UserId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Session_UserId:"+fmt.Sprintf("%v", row.UserId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Session_ByUserId_JustCache(UserId int) (*Session, bool) {
o, ok := RowCacheIndex.Get("Session_UserId:" + fmt.Sprintf("%v", UserId))
if ok {
if obj, ok := o.(*Session); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Session_UserId:" + fmt.Sprintf("%v", UserId)))
return nil, false
}
func (c _StoreImpl) PreLoadSession_ByUserIds(UserIds []int) {
not_cached := make([]int, 0, len(UserIds))
for _, id := range UserIds {
_, ok := RowCacheIndex.Get("Session_UserId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewSession_Selector().UserId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Session_UserId:"+fmt.Sprintf("%v", row.UserId), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'SessionUuid'.
func (c _StoreImpl) Session_BySessionUuid(SessionUuid string) (*Session, bool) {
o, ok := RowCacheIndex.Get("Session_SessionUuid:" + fmt.Sprintf("%v", SessionUuid))
if ok {
if obj, ok := o.(*Session); ok {
return obj, true
}
}
row, err := NewSession_Selector().SessionUuid_Eq(SessionUuid).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Session_SessionUuid:"+fmt.Sprintf("%v", row.SessionUuid), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Session_BySessionUuid_JustCache(SessionUuid string) (*Session, bool) {
o, ok := RowCacheIndex.Get("Session_SessionUuid:" + fmt.Sprintf("%v", SessionUuid))
if ok {
if obj, ok := o.(*Session); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Session_SessionUuid:" + fmt.Sprintf("%v", SessionUuid)))
return nil, false
}
func (c _StoreImpl) PreLoadSession_BySessionUuids(SessionUuids []string) {
not_cached := make([]string, 0, len(SessionUuids))
for _, id := range SessionUuids {
_, ok := RowCacheIndex.Get("Session_SessionUuid:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewSession_Selector().SessionUuid_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Session_SessionUuid:"+fmt.Sprintf("%v", row.SessionUuid), row, 0)
}
}
}
}
// SettingNotifications - PRIMARY
// Sms - PRIMARY
// Tag - PRIMARY
//field//field//field
///// Generated from index 'Name'.
func (c _StoreImpl) Tag_ByName(Name string) (*Tag, bool) {
o, ok := RowCacheIndex.Get("Tag_Name:" + fmt.Sprintf("%v", Name))
if ok {
if obj, ok := o.(*Tag); ok {
return obj, true
}
}
row, err := NewTag_Selector().Name_Eq(Name).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Tag_Name:"+fmt.Sprintf("%v", row.Name), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Tag_ByName_JustCache(Name string) (*Tag, bool) {
o, ok := RowCacheIndex.Get("Tag_Name:" + fmt.Sprintf("%v", Name))
if ok {
if obj, ok := o.(*Tag); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Tag_Name:" + fmt.Sprintf("%v", Name)))
return nil, false
}
func (c _StoreImpl) PreLoadTag_ByNames(Names []string) {
not_cached := make([]string, 0, len(Names))
for _, id := range Names {
_, ok := RowCacheIndex.Get("Tag_Name:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewTag_Selector().Name_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Tag_Name:"+fmt.Sprintf("%v", row.Name), row, 0)
}
}
}
}
// TagPost - PRIMARY
// TagPost - TagId
// TriggerLog - PRIMARY
//field//field//field
///// Generated from index 'CreatedSe'.
func (c _StoreImpl) TriggerLog_ByCreatedSe(CreatedSe int) (*TriggerLog, bool) {
o, ok := RowCacheIndex.Get("TriggerLog_CreatedSe:" + fmt.Sprintf("%v", CreatedSe))
if ok {
if obj, ok := o.(*TriggerLog); ok {
return obj, true
}
}
row, err := NewTriggerLog_Selector().CreatedSe_Eq(CreatedSe).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("TriggerLog_CreatedSe:"+fmt.Sprintf("%v", row.CreatedSe), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) TriggerLog_ByCreatedSe_JustCache(CreatedSe int) (*TriggerLog, bool) {
o, ok := RowCacheIndex.Get("TriggerLog_CreatedSe:" + fmt.Sprintf("%v", CreatedSe))
if ok {
if obj, ok := o.(*TriggerLog); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "TriggerLog_CreatedSe:" + fmt.Sprintf("%v", CreatedSe)))
return nil, false
}
func (c _StoreImpl) PreLoadTriggerLog_ByCreatedSes(CreatedSes []int) {
not_cached := make([]int, 0, len(CreatedSes))
for _, id := range CreatedSes {
_, ok := RowCacheIndex.Get("TriggerLog_CreatedSe:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewTriggerLog_Selector().CreatedSe_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("TriggerLog_CreatedSe:"+fmt.Sprintf("%v", row.CreatedSe), row, 0)
}
}
}
}
// User - PRIMARY
//field//field//field
///// Generated from index 'UserName'.
func (c _StoreImpl) User_ByUserName(UserName string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_UserName:" + fmt.Sprintf("%v", UserName))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
row, err := NewUser_Selector().UserName_Eq(UserName).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("User_UserName:"+fmt.Sprintf("%v", row.UserName), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) User_ByUserName_JustCache(UserName string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_UserName:" + fmt.Sprintf("%v", UserName))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "User_UserName:" + fmt.Sprintf("%v", UserName)))
return nil, false
}
func (c _StoreImpl) PreLoadUser_ByUserNames(UserNames []string) {
not_cached := make([]string, 0, len(UserNames))
for _, id := range UserNames {
_, ok := RowCacheIndex.Get("User_UserName:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewUser_Selector().UserName_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("User_UserName:"+fmt.Sprintf("%v", row.UserName), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'Phone'.
func (c _StoreImpl) User_ByPhone(Phone string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_Phone:" + fmt.Sprintf("%v", Phone))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
row, err := NewUser_Selector().Phone_Eq(Phone).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("User_Phone:"+fmt.Sprintf("%v", row.Phone), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) User_ByPhone_JustCache(Phone string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_Phone:" + fmt.Sprintf("%v", Phone))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "User_Phone:" + fmt.Sprintf("%v", Phone)))
return nil, false
}
func (c _StoreImpl) PreLoadUser_ByPhones(Phones []string) {
not_cached := make([]string, 0, len(Phones))
for _, id := range Phones {
_, ok := RowCacheIndex.Get("User_Phone:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewUser_Selector().Phone_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("User_Phone:"+fmt.Sprintf("%v", row.Phone), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'UserNameLower'.
func (c _StoreImpl) User_ByUserNameLower(UserNameLower string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_UserNameLower:" + fmt.Sprintf("%v", UserNameLower))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
row, err := NewUser_Selector().UserNameLower_Eq(UserNameLower).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("User_UserNameLower:"+fmt.Sprintf("%v", row.UserNameLower), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) User_ByUserNameLower_JustCache(UserNameLower string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_UserNameLower:" + fmt.Sprintf("%v", UserNameLower))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "User_UserNameLower:" + fmt.Sprintf("%v", UserNameLower)))
return nil, false
}
func (c _StoreImpl) PreLoadUser_ByUserNameLowers(UserNameLowers []string) {
not_cached := make([]string, 0, len(UserNameLowers))
for _, id := range UserNameLowers {
_, ok := RowCacheIndex.Get("User_UserNameLower:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewUser_Selector().UserNameLower_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("User_UserNameLower:"+fmt.Sprintf("%v", row.UserNameLower), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'Email'.
func (c _StoreImpl) User_ByEmail(Email string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_Email:" + fmt.Sprintf("%v", Email))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
row, err := NewUser_Selector().Email_Eq(Email).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("User_Email:"+fmt.Sprintf("%v", row.Email), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) User_ByEmail_JustCache(Email string) (*User, bool) {
o, ok := RowCacheIndex.Get("User_Email:" + fmt.Sprintf("%v", Email))
if ok {
if obj, ok := o.(*User); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "User_Email:" + fmt.Sprintf("%v", Email)))
return nil, false
}
func (c _StoreImpl) PreLoadUser_ByEmails(Emails []string) {
not_cached := make([]string, 0, len(Emails))
for _, id := range Emails {
_, ok := RowCacheIndex.Get("User_Email:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewUser_Selector().Email_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("User_Email:"+fmt.Sprintf("%v", row.Email), row, 0)
}
}
}
}
// UserRelation - PRIMARY
// Chat - PRIMARY
//field//field//field
///// Generated from index 'RoomKey'.
func (c _StoreImpl) Chat_ByRoomKey(RoomKey string) (*Chat, bool) {
o, ok := RowCacheIndex.Get("Chat_RoomKey:" + fmt.Sprintf("%v", RoomKey))
if ok {
if obj, ok := o.(*Chat); ok {
return obj, true
}
}
row, err := NewChat_Selector().RoomKey_Eq(RoomKey).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("Chat_RoomKey:"+fmt.Sprintf("%v", row.RoomKey), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) Chat_ByRoomKey_JustCache(RoomKey string) (*Chat, bool) {
o, ok := RowCacheIndex.Get("Chat_RoomKey:" + fmt.Sprintf("%v", RoomKey))
if ok {
if obj, ok := o.(*Chat); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "Chat_RoomKey:" + fmt.Sprintf("%v", RoomKey)))
return nil, false
}
func (c _StoreImpl) PreLoadChat_ByRoomKeys(RoomKeys []string) {
not_cached := make([]string, 0, len(RoomKeys))
for _, id := range RoomKeys {
_, ok := RowCacheIndex.Get("Chat_RoomKey:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewChat_Selector().RoomKey_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("Chat_RoomKey:"+fmt.Sprintf("%v", row.RoomKey), row, 0)
}
}
}
}
// ChatDeleted - PRIMARY
// ChatDeleted - ChatId
// ChatLastMessage - PRIMARY
// ChatUserVersion - PRIMARY
// Group - PRIMARY
// GroupMember - PRIMARY
//field//field//field
///// Generated from index 'Id'.
func (c _StoreImpl) GroupMember_ByOrderId(OrderId int) (*GroupMember, bool) {
o, ok := RowCacheIndex.Get("GroupMember_Id:" + fmt.Sprintf("%v", OrderId))
if ok {
if obj, ok := o.(*GroupMember); ok {
return obj, true
}
}
row, err := NewGroupMember_Selector().OrderId_Eq(OrderId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("GroupMember_Id:"+fmt.Sprintf("%v", row.OrderId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) GroupMember_ByOrderId_JustCache(OrderId int) (*GroupMember, bool) {
o, ok := RowCacheIndex.Get("GroupMember_Id:" + fmt.Sprintf("%v", OrderId))
if ok {
if obj, ok := o.(*GroupMember); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "GroupMember_Id:" + fmt.Sprintf("%v", OrderId)))
return nil, false
}
func (c _StoreImpl) PreLoadGroupMember_ByOrderIds(OrderIds []int) {
not_cached := make([]int, 0, len(OrderIds))
for _, id := range OrderIds {
_, ok := RowCacheIndex.Get("GroupMember_Id:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewGroupMember_Selector().OrderId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("GroupMember_Id:"+fmt.Sprintf("%v", row.OrderId), row, 0)
}
}
}
}
// GroupOrderdUser - PRIMARY
//field//field//field
///// Generated from index 'GroupId'.
func (c _StoreImpl) GroupOrderdUser_ByGroupId(GroupId int) (*GroupOrderdUser, bool) {
o, ok := RowCacheIndex.Get("GroupOrderdUser_GroupId:" + fmt.Sprintf("%v", GroupId))
if ok {
if obj, ok := o.(*GroupOrderdUser); ok {
return obj, true
}
}
row, err := NewGroupOrderdUser_Selector().GroupId_Eq(GroupId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("GroupOrderdUser_GroupId:"+fmt.Sprintf("%v", row.GroupId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) GroupOrderdUser_ByGroupId_JustCache(GroupId int) (*GroupOrderdUser, bool) {
o, ok := RowCacheIndex.Get("GroupOrderdUser_GroupId:" + fmt.Sprintf("%v", GroupId))
if ok {
if obj, ok := o.(*GroupOrderdUser); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "GroupOrderdUser_GroupId:" + fmt.Sprintf("%v", GroupId)))
return nil, false
}
func (c _StoreImpl) PreLoadGroupOrderdUser_ByGroupIds(GroupIds []int) {
not_cached := make([]int, 0, len(GroupIds))
for _, id := range GroupIds {
_, ok := RowCacheIndex.Get("GroupOrderdUser_GroupId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewGroupOrderdUser_Selector().GroupId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("GroupOrderdUser_GroupId:"+fmt.Sprintf("%v", row.GroupId), row, 0)
}
}
}
}
// GroupPinedMsg - PRIMARY
// FileMsg - PRIMARY
// FilePost - PRIMARY
// ActionFanout - PRIMARY
// ActionFanout - ForUserId
//field//field//field
///// Generated from index 'ActionId'.
func (c _StoreImpl) ActionFanout_ByActionId(ActionId int) (*ActionFanout, bool) {
o, ok := RowCacheIndex.Get("ActionFanout_ActionId:" + fmt.Sprintf("%v", ActionId))
if ok {
if obj, ok := o.(*ActionFanout); ok {
return obj, true
}
}
row, err := NewActionFanout_Selector().ActionId_Eq(ActionId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("ActionFanout_ActionId:"+fmt.Sprintf("%v", row.ActionId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) ActionFanout_ByActionId_JustCache(ActionId int) (*ActionFanout, bool) {
o, ok := RowCacheIndex.Get("ActionFanout_ActionId:" + fmt.Sprintf("%v", ActionId))
if ok {
if obj, ok := o.(*ActionFanout); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "ActionFanout_ActionId:" + fmt.Sprintf("%v", ActionId)))
return nil, false
}
func (c _StoreImpl) PreLoadActionFanout_ByActionIds(ActionIds []int) {
not_cached := make([]int, 0, len(ActionIds))
for _, id := range ActionIds {
_, ok := RowCacheIndex.Get("ActionFanout_ActionId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewActionFanout_Selector().ActionId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("ActionFanout_ActionId:"+fmt.Sprintf("%v", row.ActionId), row, 0)
}
}
}
}
// ActionFanout - ForUserId_2
// HomeFanout - PRIMARY
// HomeFanout - ForUserId_2
//field//field//field
///// Generated from index 'PostId'.
func (c _StoreImpl) HomeFanout_ByPostId(PostId int) (*HomeFanout, bool) {
o, ok := RowCacheIndex.Get("HomeFanout_PostId:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*HomeFanout); ok {
return obj, true
}
}
row, err := NewHomeFanout_Selector().PostId_Eq(PostId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("HomeFanout_PostId:"+fmt.Sprintf("%v", row.PostId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) HomeFanout_ByPostId_JustCache(PostId int) (*HomeFanout, bool) {
o, ok := RowCacheIndex.Get("HomeFanout_PostId:" + fmt.Sprintf("%v", PostId))
if ok {
if obj, ok := o.(*HomeFanout); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "HomeFanout_PostId:" + fmt.Sprintf("%v", PostId)))
return nil, false
}
func (c _StoreImpl) PreLoadHomeFanout_ByPostIds(PostIds []int) {
not_cached := make([]int, 0, len(PostIds))
for _, id := range PostIds {
_, ok := RowCacheIndex.Get("HomeFanout_PostId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewHomeFanout_Selector().PostId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("HomeFanout_PostId:"+fmt.Sprintf("%v", row.PostId), row, 0)
}
}
}
}
//field//field//field
///// Generated from index 'ForUserId'.
func (c _StoreImpl) HomeFanout_ByForUserId(ForUserId int) (*HomeFanout, bool) {
o, ok := RowCacheIndex.Get("HomeFanout_ForUserId:" + fmt.Sprintf("%v", ForUserId))
if ok {
if obj, ok := o.(*HomeFanout); ok {
return obj, true
}
}
row, err := NewHomeFanout_Selector().ForUserId_Eq(ForUserId).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("HomeFanout_ForUserId:"+fmt.Sprintf("%v", row.ForUserId), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) HomeFanout_ByForUserId_JustCache(ForUserId int) (*HomeFanout, bool) {
o, ok := RowCacheIndex.Get("HomeFanout_ForUserId:" + fmt.Sprintf("%v", ForUserId))
if ok {
if obj, ok := o.(*HomeFanout); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "HomeFanout_ForUserId:" + fmt.Sprintf("%v", ForUserId)))
return nil, false
}
func (c _StoreImpl) PreLoadHomeFanout_ByForUserIds(ForUserIds []int) {
not_cached := make([]int, 0, len(ForUserIds))
for _, id := range ForUserIds {
_, ok := RowCacheIndex.Get("HomeFanout_ForUserId:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewHomeFanout_Selector().ForUserId_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("HomeFanout_ForUserId:"+fmt.Sprintf("%v", row.ForUserId), row, 0)
}
}
}
}
// SuggestedTopPosts - PRIMARY
// SuggestedUser - PRIMARY
// PushChat - PRIMARY
//field//field//field
///// Generated from index 'Seq'.
func (c _StoreImpl) PushChat_BySeq(Seq int) (*PushChat, bool) {
o, ok := RowCacheIndex.Get("PushChat_Seq:" + fmt.Sprintf("%v", Seq))
if ok {
if obj, ok := o.(*PushChat); ok {
return obj, true
}
}
row, err := NewPushChat_Selector().Seq_Eq(Seq).GetRow(base.DB)
if err == nil {
RowCacheIndex.Set("PushChat_Seq:"+fmt.Sprintf("%v", row.Seq), row, 0)
return row, true
}
XOLogErr(err)
return nil, false
}
func (c _StoreImpl) PushChat_BySeq_JustCache(Seq int) (*PushChat, bool) {
o, ok := RowCacheIndex.Get("PushChat_Seq:" + fmt.Sprintf("%v", Seq))
if ok {
if obj, ok := o.(*PushChat); ok {
return obj, true
}
}
XOLogErr(errors.New("_JustCache is empty for secondry index " + "PushChat_Seq:" + fmt.Sprintf("%v", Seq)))
return nil, false
}
func (c _StoreImpl) PreLoadPushChat_BySeqs(Seqs []int) {
not_cached := make([]int, 0, len(Seqs))
for _, id := range Seqs {
_, ok := RowCacheIndex.Get("PushChat_Seq:" + fmt.Sprintf("%v", id))
if !ok {
not_cached = append(not_cached, id)
}
}
if len(not_cached) > 0 {
rows, err := NewPushChat_Selector().Seq_In(not_cached).GetRows(base.DB)
if err == nil {
for _, row := range rows {
RowCacheIndex.Set("PushChat_Seq:"+fmt.Sprintf("%v", row.Seq), row, 0)
}
}
}
}
// HTTPRPCLog - PRIMARY
// MetricLog - PRIMARY
// XfileServiceInfoLog - PRIMARY
// XfileServiceMetricLog - PRIMARY
// XfileServiceRequestLog - PRIMARY
// InvalidateCache - PRIMARY
|
/*
Fast Random Generator for use from single threaded data injection code. It is
especially useful if you need to load test a service while generating random
data. It that case, the random generator locks can become a bottleneck.
*/
package random // import "fluux.io/random"
import (
"math/rand"
"strconv"
"strings"
"time"
"unsafe"
"github.com/golang/protobuf/ptypes/wrappers"
)
//=============================================================================
// Unsafe but faster random generator
// RandomUnsafe is a structure wrapping non-thread safe random generator
// to use from a single go routine.
// It is more efficient than the default generator as it avoid using the mutex
// locks used as default for thread safety.
// It is intended to be used in part of code that use random value heavily.
type RandomUnsafe struct {
src *rand.Rand
// preallocated random string
prealloc []byte
// Cache for generating boolean number more efficiently
boolcache int64
boolcount int
}
const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const numbers = "0123456789"
const stringSeedSize = 10000
// NewRandomUnsafe creates an initialized random generator to use from a
// single go routine.
func NewRandomUnsafe() RandomUnsafe {
src := rand.New(rand.NewSource(time.Now().Unix()))
prealloc := make([]byte, stringSeedSize)
for i := range prealloc {
prealloc[i] = letters[src.Int63()%int64(len(letters))]
}
return RandomUnsafe{src: src, prealloc: prealloc}
}
// NumString returns a random string containing numbers.
func (r *RandomUnsafe) NumString(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = numbers[r.src.Int63()%int64(len(numbers))]
}
return ByteSliceToString(b)
}
// Length generates an integer between min and max.
func (r *RandomUnsafe) Length(min, max int) int {
if min > max {
return 0
}
if min == max {
return min
}
return r.src.Intn(max-min) + min
}
// String returns a random string of random length between min and max.
func (r *RandomUnsafe) String(min, max int) string {
length := r.Length(min, max)
return r.FixedLenString(length)
}
// FixedLenString returns a random string of n chars.
func (r *RandomUnsafe) FixedLenString(n int) string {
pos := r.src.Intn(stringSeedSize - n)
return ByteSliceToString(r.prealloc[pos : pos+n])
}
// Bool returns a random boolean. This function uses a cache to only trigger call to random number
// generator every 63 calls. We generate a 63 bits number and then use each bits as one random boolean.
func (r *RandomUnsafe) Bool() bool {
if r.boolcount == 0 {
r.boolcache, r.boolcount = r.src.Int63(), 63
}
result := r.boolcache&0x01 == 1
r.boolcache >>= 1
r.boolcount--
return result
}
// OptBool return an optional random boolean.
func (r *RandomUnsafe) OptBool() *wrappers.BoolValue {
if !r.Bool() {
return nil
}
return &wrappers.BoolValue{Value: r.Bool()}
}
// Int returns a random int32.
func (r *RandomUnsafe) Int(n int) int32 {
return int32(r.src.Intn(n))
}
// OptInt32 returns a optional random int32.
func (r *RandomUnsafe) OptInt32(n int) *wrappers.Int32Value {
if !r.Bool() {
return nil
}
return &wrappers.Int32Value{Value: r.Int(n)}
}
// OptInt64 returns a optional random int64.
func (r *RandomUnsafe) OptInt64(n int) *wrappers.Int64Value {
if !r.Bool() {
return nil
}
return &wrappers.Int64Value{Value: int64(r.Int(n))}
}
// Date returns a random recent date formatted as string.
func (r *RandomUnsafe) Date() string {
min := time.Now().AddDate(0, 0, -5).Unix() // 5 days ago
max := time.Now().Unix() // Now
delta := max - min
sec := r.src.Int63n(delta) + min
return time.Unix(sec, 0).Format(time.RFC3339)
}
// OptString returns an optional random string of random length between min and
// max.
func (r *RandomUnsafe) OptString(min, max int) *wrappers.StringValue {
if !r.Bool() {
return nil
}
return &wrappers.StringValue{Value: r.String(min, max)}
}
// Size returns a physical measure for an object using a normal distribution.
func (r *RandomUnsafe) Size() *wrappers.Int32Value {
if !r.Bool() {
return nil
}
var size int32
for ; size <= 0; size = int32(r.src.NormFloat64()*2500 + 3000) {
}
return &wrappers.Int32Value{Value: size}
}
// RandomId returns a random string to use as id, starting with prefix.
func (r *RandomUnsafe) RandomId(prefix string) string {
timestamp := strconv.FormatInt(time.Now().UTC().UnixNano(), 10)
id := []string{prefix, r.String(10, 20), timestamp}
return strings.Join(id, "_")
}
func (r *RandomUnsafe) Code(prefix string, i int) string {
code := []string{prefix, strconv.Itoa(i), r.String(10, 32)}
return strings.Join(code, "_")
}
// ByteSliceToString is used when you really want to convert a slice // of bytes to a string without incurring overhead.
// It is only safe to use if you really know the byte slice is not going to change // in the lifetime of the string.
// The unsafe pointer operation will not be needed in Go 1.10, when this feature is added:
// https://github.com/golang/go/issues/18990
func ByteSliceToString(bs []byte) string {
// This is copied from runtime. It relies on the string
// header being a prefix of the slice header!
return *(*string)(unsafe.Pointer(&bs))
}
|
package igapi
import (
"github.com/lemkova/instagramapi-go/signature"
"github.com/lemkova/instagramapi-go/igreq"
u "net/url"
"strings"
"fmt"
"log"
)
const (
uagent = "Instagram 9.2.0 Android (18/4.3; 320dpi; 720x1280; Xiaomi; HM 1SW; armani; qcom; en_US)"
challange = "si/fetch_headers/?challenge_type=signup&guid="
)
type InstagramClient struct {
user string
pass string
csrftoken string
isLogged bool
deviceid string
agent *igreq.Agent
uuid string
}
type Stage2 struct {
}
func NewClient(user string, pass string) *InstagramClient {
and_id := signature.GenerateDeviceID(user+pass)
agent := igreq.NewAgent(uagent)
uuid := signature.GenerateUUID(true)
return &InstagramClient{user, pass, "", false, and_id, agent, uuid}
}
func (i *InstagramClient) Login() bool {
if i.isLogged != true {
constant := &signature.Constants{}
call := fmt.Sprintf("%s%s%s", constant.GetApiEndpoint(), challange, signature.GenerateUUID(false))
err, res := i.agent.SendRequest(call, nil)
if err != nil {
log.Fatal(err)
return false
}
cookies := res.Cookies()[0].String()
tok := strings.Split(cookies, ";")
csrf := strings.Replace(tok[0], "csrftoken=", "", -1)
i.csrftoken = csrf
//fmt.Println(csrf) //DEBUG
//Stage 2
data := u.Values{}
data.Set("data", "test");
return true
}
return false
} |
package ui
import (
"fmt"
"strings"
"github.com/chzyer/readline"
"github.com/kr/text"
"github.com/manifoldco/torus-cli/prefs"
)
// enableProgress is whether progress events should be displayed
var enableProgress = false
// enableHints is whether hints should be displayed
var enableHints = false
// Init prepares the ui preferences
func Init(preferences *prefs.Preferences) {
enableProgress = preferences.Core.EnableProgress
enableHints = preferences.Core.EnableHints
}
// Progress handles the ui output for progress events, when enabled
func Progress(str string) {
if !enableProgress {
return
}
fmt.Println(str)
}
// Hint handles the ui output for hint/onboarding messages, when enabled
func Hint(str string, noPadding bool) {
if !enableHints {
return
}
if !noPadding {
fmt.Println("")
}
printWrapLabeled("Protip:", str)
}
func printWrapLabeled(label, message string) {
cols := readline.GetScreenWidth() - 2
fmt.Printf("%s ", label)
longest := len(label)
wrapped := text.Wrap(message, cols-(2+longest))
fmt.Println(indentOthers(wrapped, 2+longest))
}
func indentOthers(str string, indent int) string {
nl := strings.IndexRune(str, '\n')
if nl == -1 {
nl = len(str)
}
return str[:nl] + text.Indent(str[nl:], fmt.Sprintf("%*s", indent, ""))
}
|
package main
import (
"bytes"
"fmt"
"unicode/utf8"
)
type tokenClass string
const (
tkObjStart tokenClass = "{"
tkObjEnd tokenClass = "}"
tkArrStart tokenClass = "["
tkArrEnd tokenClass = "]"
tkDot tokenClass = "."
tkLiteral tokenClass = "LITERAL"
tkRawLiteral tokenClass = ":LITERAL"
tkAssign tokenClass = "="
tkEOF tokenClass = "$"
)
type token struct {
class tokenClass
value string
pos int
}
type lexer struct {
input string
start int
pos int
width int
}
func newLexer(input string) *lexer {
return &lexer{
input: input,
pos: 0,
width: 0,
}
}
func (lx *lexer) next() token {
for {
for lx.peek() == ' ' {
lx.pop()
}
lx.drop()
r := lx.pop()
switch {
case r == eof:
return lx.emit(tkEOF)
case r == '{':
return lx.emit(tkObjStart)
case r == '}':
return lx.emit(tkObjEnd)
case r == '[':
return lx.emit(tkArrStart)
case r == ']':
return lx.emit(tkArrEnd)
case r == '.':
return lx.emit(tkDot)
case r == '=':
return lx.emit(tkAssign)
case r == '"':
return lx.lexString()
case r == ':':
lx.drop()
return lx.lexRawLiteral()
default:
return lx.lexLiteral()
}
}
}
var notOkInLiteral = []rune{eof, ' ', '\t', '{', '}', '[', ']', '=', '\n', '.'}
func (lx *lexer) lexLiteral() token {
for notIn(lx.peek(), notOkInLiteral) {
lx.pop()
}
return lx.emit(tkLiteral)
}
var notOkInRawLiteral = []rune{eof, ' ', '\t', '{', '}', '[', ']', '=', '\n'}
func (lx *lexer) lexRawLiteral() token {
for notIn(lx.peek(), notOkInRawLiteral) {
lx.pop()
}
return lx.emit(tkRawLiteral)
}
func (lx *lexer) lexString() token {
lx.drop() // get rid of the opening quotes "
var buffer bytes.Buffer
for {
r := lx.pop()
switch r {
case eof:
panic("unclosed string")
case '\\':
if lx.peek() == '"' {
buffer.WriteRune(lx.pop())
} else {
buffer.WriteRune('\\')
}
case '"':
return lx.emitV(tkLiteral, buffer.String())
default:
buffer.WriteRune(r)
}
}
}
func notIn(needle rune, haystack []rune) bool {
for _, r := range haystack {
if needle == r {
return false
}
}
return true
}
const eof = -1
func (l *lexer) pop() rune {
r, w := l.nextRune()
l.width = w
l.pos += w
return r
}
func (l *lexer) peek() rune {
r, _ := l.nextRune()
return r
}
func (l *lexer) nextRune() (rune, int) {
if l.pos >= len(l.input) {
return eof, 0
}
return utf8.DecodeRuneInString(l.input[l.pos:])
}
func (l *lexer) push() {
l.pos -= l.width
l.width = 0
}
func (l *lexer) drop() {
l.start = l.pos
}
func (l *lexer) matched() string {
return l.input[l.start:l.pos]
}
func (l *lexer) errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
func (l *lexer) emit(class tokenClass) token {
res := token{
class: class,
value: l.input[l.start:l.pos],
pos: l.start,
}
l.start = l.pos
return res
}
func (l *lexer) emitV(class tokenClass, v string) token {
res := token{
class: class,
value: v,
pos: l.start,
}
l.start = l.pos
return res
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/cloudidentity/beta/cloudidentity_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta"
)
// MembershipServer implements the gRPC interface for Membership.
type MembershipServer struct{}
// ProtoToMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum converts a MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum enum from its proto representation.
func ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(e betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum) *beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum_name[int32(e)]; ok {
e := beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(n[len("CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum"):])
return &e
}
return nil
}
// ProtoToMembershipTypeEnum converts a MembershipTypeEnum enum from its proto representation.
func ProtoToCloudidentityBetaMembershipTypeEnum(e betapb.CloudidentityBetaMembershipTypeEnum) *beta.MembershipTypeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudidentityBetaMembershipTypeEnum_name[int32(e)]; ok {
e := beta.MembershipTypeEnum(n[len("CloudidentityBetaMembershipTypeEnum"):])
return &e
}
return nil
}
// ProtoToMembershipDeliverySettingEnum converts a MembershipDeliverySettingEnum enum from its proto representation.
func ProtoToCloudidentityBetaMembershipDeliverySettingEnum(e betapb.CloudidentityBetaMembershipDeliverySettingEnum) *beta.MembershipDeliverySettingEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudidentityBetaMembershipDeliverySettingEnum_name[int32(e)]; ok {
e := beta.MembershipDeliverySettingEnum(n[len("CloudidentityBetaMembershipDeliverySettingEnum"):])
return &e
}
return nil
}
// ProtoToMembershipPreferredMemberKey converts a MembershipPreferredMemberKey object from its proto representation.
func ProtoToCloudidentityBetaMembershipPreferredMemberKey(p *betapb.CloudidentityBetaMembershipPreferredMemberKey) *beta.MembershipPreferredMemberKey {
if p == nil {
return nil
}
obj := &beta.MembershipPreferredMemberKey{
Id: dcl.StringOrNil(p.GetId()),
Namespace: dcl.StringOrNil(p.GetNamespace()),
}
return obj
}
// ProtoToMembershipRoles converts a MembershipRoles object from its proto representation.
func ProtoToCloudidentityBetaMembershipRoles(p *betapb.CloudidentityBetaMembershipRoles) *beta.MembershipRoles {
if p == nil {
return nil
}
obj := &beta.MembershipRoles{
Name: dcl.StringOrNil(p.GetName()),
ExpiryDetail: ProtoToCloudidentityBetaMembershipRolesExpiryDetail(p.GetExpiryDetail()),
RestrictionEvaluations: ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluations(p.GetRestrictionEvaluations()),
}
return obj
}
// ProtoToMembershipRolesExpiryDetail converts a MembershipRolesExpiryDetail object from its proto representation.
func ProtoToCloudidentityBetaMembershipRolesExpiryDetail(p *betapb.CloudidentityBetaMembershipRolesExpiryDetail) *beta.MembershipRolesExpiryDetail {
if p == nil {
return nil
}
obj := &beta.MembershipRolesExpiryDetail{
ExpireTime: dcl.StringOrNil(p.GetExpireTime()),
}
return obj
}
// ProtoToMembershipRolesRestrictionEvaluations converts a MembershipRolesRestrictionEvaluations object from its proto representation.
func ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluations(p *betapb.CloudidentityBetaMembershipRolesRestrictionEvaluations) *beta.MembershipRolesRestrictionEvaluations {
if p == nil {
return nil
}
obj := &beta.MembershipRolesRestrictionEvaluations{
MemberRestrictionEvaluation: ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation(p.GetMemberRestrictionEvaluation()),
}
return obj
}
// ProtoToMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation converts a MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation object from its proto representation.
func ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation(p *betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation) *beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation {
if p == nil {
return nil
}
obj := &beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation{
State: ProtoToCloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(p.GetState()),
}
return obj
}
// ProtoToMembershipDisplayName converts a MembershipDisplayName object from its proto representation.
func ProtoToCloudidentityBetaMembershipDisplayName(p *betapb.CloudidentityBetaMembershipDisplayName) *beta.MembershipDisplayName {
if p == nil {
return nil
}
obj := &beta.MembershipDisplayName{
GivenName: dcl.StringOrNil(p.GetGivenName()),
FamilyName: dcl.StringOrNil(p.GetFamilyName()),
FullName: dcl.StringOrNil(p.GetFullName()),
}
return obj
}
// ProtoToMembershipMemberKey converts a MembershipMemberKey object from its proto representation.
func ProtoToCloudidentityBetaMembershipMemberKey(p *betapb.CloudidentityBetaMembershipMemberKey) *beta.MembershipMemberKey {
if p == nil {
return nil
}
obj := &beta.MembershipMemberKey{
Id: dcl.StringOrNil(p.GetId()),
Namespace: dcl.StringOrNil(p.GetNamespace()),
}
return obj
}
// ProtoToMembership converts a Membership resource from its proto representation.
func ProtoToMembership(p *betapb.CloudidentityBetaMembership) *beta.Membership {
obj := &beta.Membership{
Name: dcl.StringOrNil(p.GetName()),
PreferredMemberKey: ProtoToCloudidentityBetaMembershipPreferredMemberKey(p.GetPreferredMemberKey()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
Type: ProtoToCloudidentityBetaMembershipTypeEnum(p.GetType()),
DeliverySetting: ProtoToCloudidentityBetaMembershipDeliverySettingEnum(p.GetDeliverySetting()),
DisplayName: ProtoToCloudidentityBetaMembershipDisplayName(p.GetDisplayName()),
MemberKey: ProtoToCloudidentityBetaMembershipMemberKey(p.GetMemberKey()),
Group: dcl.StringOrNil(p.GetGroup()),
}
for _, r := range p.GetRoles() {
obj.Roles = append(obj.Roles, *ProtoToCloudidentityBetaMembershipRoles(r))
}
return obj
}
// MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnumToProto converts a MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum enum to its proto representation.
func CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnumToProto(e *beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum) betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum {
if e == nil {
return betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(0)
}
if v, ok := betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum_value["MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum"+string(*e)]; ok {
return betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(v)
}
return betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnum(0)
}
// MembershipTypeEnumToProto converts a MembershipTypeEnum enum to its proto representation.
func CloudidentityBetaMembershipTypeEnumToProto(e *beta.MembershipTypeEnum) betapb.CloudidentityBetaMembershipTypeEnum {
if e == nil {
return betapb.CloudidentityBetaMembershipTypeEnum(0)
}
if v, ok := betapb.CloudidentityBetaMembershipTypeEnum_value["MembershipTypeEnum"+string(*e)]; ok {
return betapb.CloudidentityBetaMembershipTypeEnum(v)
}
return betapb.CloudidentityBetaMembershipTypeEnum(0)
}
// MembershipDeliverySettingEnumToProto converts a MembershipDeliverySettingEnum enum to its proto representation.
func CloudidentityBetaMembershipDeliverySettingEnumToProto(e *beta.MembershipDeliverySettingEnum) betapb.CloudidentityBetaMembershipDeliverySettingEnum {
if e == nil {
return betapb.CloudidentityBetaMembershipDeliverySettingEnum(0)
}
if v, ok := betapb.CloudidentityBetaMembershipDeliverySettingEnum_value["MembershipDeliverySettingEnum"+string(*e)]; ok {
return betapb.CloudidentityBetaMembershipDeliverySettingEnum(v)
}
return betapb.CloudidentityBetaMembershipDeliverySettingEnum(0)
}
// MembershipPreferredMemberKeyToProto converts a MembershipPreferredMemberKey object to its proto representation.
func CloudidentityBetaMembershipPreferredMemberKeyToProto(o *beta.MembershipPreferredMemberKey) *betapb.CloudidentityBetaMembershipPreferredMemberKey {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipPreferredMemberKey{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetNamespace(dcl.ValueOrEmptyString(o.Namespace))
return p
}
// MembershipRolesToProto converts a MembershipRoles object to its proto representation.
func CloudidentityBetaMembershipRolesToProto(o *beta.MembershipRoles) *betapb.CloudidentityBetaMembershipRoles {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipRoles{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetExpiryDetail(CloudidentityBetaMembershipRolesExpiryDetailToProto(o.ExpiryDetail))
p.SetRestrictionEvaluations(CloudidentityBetaMembershipRolesRestrictionEvaluationsToProto(o.RestrictionEvaluations))
return p
}
// MembershipRolesExpiryDetailToProto converts a MembershipRolesExpiryDetail object to its proto representation.
func CloudidentityBetaMembershipRolesExpiryDetailToProto(o *beta.MembershipRolesExpiryDetail) *betapb.CloudidentityBetaMembershipRolesExpiryDetail {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipRolesExpiryDetail{}
p.SetExpireTime(dcl.ValueOrEmptyString(o.ExpireTime))
return p
}
// MembershipRolesRestrictionEvaluationsToProto converts a MembershipRolesRestrictionEvaluations object to its proto representation.
func CloudidentityBetaMembershipRolesRestrictionEvaluationsToProto(o *beta.MembershipRolesRestrictionEvaluations) *betapb.CloudidentityBetaMembershipRolesRestrictionEvaluations {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipRolesRestrictionEvaluations{}
p.SetMemberRestrictionEvaluation(CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationToProto(o.MemberRestrictionEvaluation))
return p
}
// MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationToProto converts a MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation object to its proto representation.
func CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationToProto(o *beta.MembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation) *betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluation{}
p.SetState(CloudidentityBetaMembershipRolesRestrictionEvaluationsMemberRestrictionEvaluationStateEnumToProto(o.State))
return p
}
// MembershipDisplayNameToProto converts a MembershipDisplayName object to its proto representation.
func CloudidentityBetaMembershipDisplayNameToProto(o *beta.MembershipDisplayName) *betapb.CloudidentityBetaMembershipDisplayName {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipDisplayName{}
p.SetGivenName(dcl.ValueOrEmptyString(o.GivenName))
p.SetFamilyName(dcl.ValueOrEmptyString(o.FamilyName))
p.SetFullName(dcl.ValueOrEmptyString(o.FullName))
return p
}
// MembershipMemberKeyToProto converts a MembershipMemberKey object to its proto representation.
func CloudidentityBetaMembershipMemberKeyToProto(o *beta.MembershipMemberKey) *betapb.CloudidentityBetaMembershipMemberKey {
if o == nil {
return nil
}
p := &betapb.CloudidentityBetaMembershipMemberKey{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetNamespace(dcl.ValueOrEmptyString(o.Namespace))
return p
}
// MembershipToProto converts a Membership resource to its proto representation.
func MembershipToProto(resource *beta.Membership) *betapb.CloudidentityBetaMembership {
p := &betapb.CloudidentityBetaMembership{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetPreferredMemberKey(CloudidentityBetaMembershipPreferredMemberKeyToProto(resource.PreferredMemberKey))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetType(CloudidentityBetaMembershipTypeEnumToProto(resource.Type))
p.SetDeliverySetting(CloudidentityBetaMembershipDeliverySettingEnumToProto(resource.DeliverySetting))
p.SetDisplayName(CloudidentityBetaMembershipDisplayNameToProto(resource.DisplayName))
p.SetMemberKey(CloudidentityBetaMembershipMemberKeyToProto(resource.MemberKey))
p.SetGroup(dcl.ValueOrEmptyString(resource.Group))
sRoles := make([]*betapb.CloudidentityBetaMembershipRoles, len(resource.Roles))
for i, r := range resource.Roles {
sRoles[i] = CloudidentityBetaMembershipRolesToProto(&r)
}
p.SetRoles(sRoles)
return p
}
// applyMembership handles the gRPC request by passing it to the underlying Membership Apply() method.
func (s *MembershipServer) applyMembership(ctx context.Context, c *beta.Client, request *betapb.ApplyCloudidentityBetaMembershipRequest) (*betapb.CloudidentityBetaMembership, error) {
p := ProtoToMembership(request.GetResource())
res, err := c.ApplyMembership(ctx, p)
if err != nil {
return nil, err
}
r := MembershipToProto(res)
return r, nil
}
// applyCloudidentityBetaMembership handles the gRPC request by passing it to the underlying Membership Apply() method.
func (s *MembershipServer) ApplyCloudidentityBetaMembership(ctx context.Context, request *betapb.ApplyCloudidentityBetaMembershipRequest) (*betapb.CloudidentityBetaMembership, error) {
cl, err := createConfigMembership(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyMembership(ctx, cl, request)
}
// DeleteMembership handles the gRPC request by passing it to the underlying Membership Delete() method.
func (s *MembershipServer) DeleteCloudidentityBetaMembership(ctx context.Context, request *betapb.DeleteCloudidentityBetaMembershipRequest) (*emptypb.Empty, error) {
cl, err := createConfigMembership(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteMembership(ctx, ProtoToMembership(request.GetResource()))
}
// ListCloudidentityBetaMembership handles the gRPC request by passing it to the underlying MembershipList() method.
func (s *MembershipServer) ListCloudidentityBetaMembership(ctx context.Context, request *betapb.ListCloudidentityBetaMembershipRequest) (*betapb.ListCloudidentityBetaMembershipResponse, error) {
cl, err := createConfigMembership(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListMembership(ctx, request.GetGroup())
if err != nil {
return nil, err
}
var protos []*betapb.CloudidentityBetaMembership
for _, r := range resources.Items {
rp := MembershipToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListCloudidentityBetaMembershipResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigMembership(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package main
import (
"fmt"
"os"
"strings"
"github.com/giantswarm/conair/btrfs"
"github.com/giantswarm/conair/nspawn"
)
var (
flagBind stringSlice
flagSnapshot stringSlice
cmdRun = &Command{
Name: "run",
Summary: "Run a container",
Usage: "[-bind=S] [-snapshot=S] <image> [<container>]",
Run: runRun,
Description: `Run a new container
Example:
conair run base test
You can either bind mount a directory into the container or take a snapshot of a volume that will be deleted with the container.
conair run -bind=/var/data:/data base test
conair run -snapshot=mysnapshot:/data base test
`,
}
)
func init() {
cmdRun.Flags.Var(&flagBind, "bind", "Bind mount a directory into the container")
cmdRun.Flags.Var(&flagSnapshot, "snapshot", "Add a snapshot into the container")
}
func runRun(args []string) (exit int) {
if len(args) < 1 {
fmt.Fprintln(os.Stderr, "Image name missing.")
return 1
}
imagePath := args[0]
var container string
if len(args) < 2 {
// add some hashing here
container = imagePath
} else {
container = args[1]
}
containerPath := fmt.Sprintf(".#%s", container)
fs, _ := btrfs.Init(home)
if err := fs.Snapshot(imagePath, containerPath, false); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't create filesystem for container.", err)
return 1
}
c := nspawn.Init(container, fmt.Sprintf("%s/%s", home, containerPath))
if len(flagBind) > 0 {
c.SetBinds(flagBind)
}
if len(flagSnapshot) > 0 {
c.SetSnapshots(flagSnapshot)
}
for _, snap := range c.Snapshots {
paths := strings.Split(snap, ":")
if len(paths) < 2 {
fmt.Fprintln(os.Stderr, "Couldn't create snapshot for container.")
return 1
}
from := fmt.Sprintf(".cnr-snapshot-%s", paths[0])
to := fmt.Sprintf("%s/%s", containerPath, paths[1])
if fs.Exists(to) {
if err := os.Remove(fmt.Sprintf("%s/%s", home, to)); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't remove existing directory for snapshot.")
return 1
}
}
if err := fs.Snapshot(from, to, false); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't create snapshot for container.", err)
return 1
}
}
if err := c.Enable(); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't enable container.", err)
return 1
}
if err := c.Start(); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't start container.", err)
return 1
}
return 0
}
|
package todolist
import (
"bufio"
"fmt"
"io"
"os"
"os/user"
"strconv"
"strings"
)
type ConfigStore struct {
FileLocation string
Loaded bool
}
type Config struct {
Aliases map[string]string
Reports map[string]map[string]string
Views map[string][]string
CurrentView string
SyncFilepath string
SyncEncryptionPassphrase string
OpenNotesFolder string
OpenNotesExt string
OpenNotesRegex string
OpenNotesCmd string
OpenCustomRegex map[string]string
OpenCustomCmd map[string]string
}
//Declare Priority global because need access in filter and sorter
var (
Priority map[string]int
)
func NewConfigStore() *ConfigStore {
return &ConfigStore{FileLocation: ".todorc", Loaded: false}
}
func (f *ConfigStore) Load() (*Config, error) {
f.FileLocation = getConfigLocation()
usr, _ := user.Current()
notesDir := fmt.Sprintf("%s/.todo_notes", usr.HomeDir)
// init with some defaults
config := Config{
Aliases: map[string]string{"alias.report": "list"},
Reports: map[string]map[string]string{},
Views: map[string][]string{},
CurrentView: "",
SyncFilepath: "",
SyncEncryptionPassphrase: "",
OpenNotesFolder: notesDir,
OpenNotesExt: ".txt",
OpenNotesRegex: "notes",
OpenNotesCmd: "",
OpenCustomRegex: map[string]string{},
OpenCustomCmd: map[string]string{},
}
//Default regex for web URLs
config.OpenCustomRegex["browser"] = "((((https?://)?(www.))|(https?://))\\S+)"
//Default regex for files
slash := string(os.PathSeparator)
config.OpenCustomRegex["file"] = "((\\/|\\.\\/|~\\/|\\w:\\" + slash + ").+)"
// default values for priority
Priority = map[string]int{"H": 1, "M": 2, "L": 3}
if len(f.FileLocation) == 0 {
return &config, nil
}
file, err := os.Open(f.FileLocation)
if err != nil {
return &config, nil
}
defer file.Close()
reader := bufio.NewReader(file)
for {
line, err := reader.ReadString('\n')
if strings.HasPrefix(line, "#") {
continue
}
// check if the line has = sign
// and process the line. Ignore the rest.
if equal := strings.Index(line, "="); equal >= 0 {
if key := strings.TrimSpace(line[:equal]); len(key) > 0 {
value := ""
if len(line) > equal {
value = strings.TrimSpace(line[equal+1:])
}
// assign the config map
if strings.HasPrefix(key, "alias") {
keys := strings.Split(key, ".")
if len(keys) > 1 {
config.Aliases[keys[1]] = value
}
} else if strings.HasPrefix(key, "report") {
keys := strings.Split(key, ".")
if len(keys) > 2 {
rep, ok := config.Reports[keys[1]]
if !ok {
rep = map[string]string{}
config.Reports[keys[1]] = rep
}
rep[keys[2]] = value
}
} else if strings.HasPrefix(key, "view") {
keys := strings.Split(key, ".")
if len(keys) > 2 {
if keys[2] == "filter" {
config.Views[keys[1]] = strings.Split(value, " ")
}
} else {
if keys[1] == "current" {
config.CurrentView = value
}
}
} else if strings.HasPrefix(key, "priority") {
Priority = map[string]int{} //replace default values
v := strings.Split(strings.TrimSpace(value), ",")
for i, p := range v {
Priority[p] = i
}
} else if strings.HasPrefix(key, "sync.filepath") {
config.SyncFilepath = strings.TrimSpace(value)
} else if strings.HasPrefix(key, "sync.encrypt.passphrase") {
config.SyncEncryptionPassphrase = strings.TrimSpace(value)
} else if strings.HasPrefix(key, "open") {
keys := strings.Split(key, ".")
if len(keys) < 3 {
continue
}
if keys[1] == "notes" {
switch keys[2] {
case "ext":
config.OpenNotesExt = strings.TrimSpace(value)
case "folder":
config.OpenNotesFolder = strings.TrimSpace(value)
case "cmd":
config.OpenNotesCmd = strings.TrimSpace(value)
case "regex":
config.OpenNotesRegex = strings.TrimSpace(value)
}
} else {
switch keys[2] {
case "regex":
config.OpenCustomRegex[strings.TrimSpace(keys[1])] = strings.TrimSpace(value)
case "cmd":
config.OpenCustomCmd[strings.TrimSpace(keys[1])] = strings.TrimSpace(value)
}
}
}
}
}
if err == io.EOF {
break
}
if err != nil {
return &config, nil
}
}
f.Loaded = true
return &config, nil
}
func (f *ConfigStore) SetConfigValue(attr string, attrValue string) error {
var key string
var line string
var err error
var file *os.File
f.FileLocation = getConfigLocation()
if len(f.FileLocation) == 0 {
return nil
}
file, err = os.Open(f.FileLocation)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
modified := false
todorc := []string{}
for {
line, err = reader.ReadString('\n')
if len(line) < 1 {
break
}
//Only modify non-comment lines
if !strings.HasPrefix(line, "#") {
// check if the line has = sign
// and process the line. Ignore the rest.
if equal := strings.Index(line, "="); equal >= 0 {
key = strings.TrimSpace(line[:equal])
}
if key == attr {
modified = true
line = attr + "=" + attrValue + "\n"
todorc = append(todorc, line)
} else {
todorc = append(todorc, line)
}
//put comment back in resulting file
} else {
todorc = append(todorc, line)
}
}
//If no modification of existing attribute, then append as a new attribute.
if !modified {
line = attr + "=" + attrValue + "\n"
todorc = append(todorc, line)
}
file.Close()
file, err = os.OpenFile(f.FileLocation, os.O_RDWR, os.FileMode(int(0777)))
if err != nil {
return nil
}
defer file.Close()
cnt := 0
writer := bufio.NewWriter(file)
for _, line := range todorc {
num, err := writer.WriteString(line)
if err != nil {
return nil
}
cnt += num
}
err = writer.Flush()
if err != nil {
return nil
}
return nil
}
func CreateDefaultConfig() error {
repoLoc := ".todorc"
file, err := os.Create(repoLoc)
if err != nil {
println("Error writing .todorc file")
return nil
}
defer file.Close()
writer := bufio.NewWriter(file)
_, err = writer.WriteString("## Notes on reports and commands. Type 'todolist help' for details and examles.\n")
_, err = writer.WriteString("## Columns: 'id' 'completed' 'age' 'due' 'context' 'project' 'ord:all' 'ord:pro' 'ord:ctx'\n")
_, err = writer.WriteString("## Headers: Labels for the columns.\n")
_, err = writer.WriteString("## Sort: '+/-' plus 'id' 'age' 'due' 'context' 'project' 'ord:all' 'ord:pro' 'ord:ctx'\n")
_, err = writer.WriteString("## Filter: Show results matching projects, contexts, due dates, etc.\n")
_, err = writer.WriteString("###### Exclusion: Prefix the filter with '-' to include todos that do NOT match that filter.\n")
_, err = writer.WriteString("## Group: Show results grouped by 'project' or 'context'\n")
_, err = writer.WriteString("## Notes: Show notes if true.\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Define a default report format used to print tasks to terminal\n")
_, err = writer.WriteString("report.default.description='Default report of pending todos'\n")
_, err = writer.WriteString("report.default.columns=id,completed,age,due,context,project,subject\n")
_, err = writer.WriteString("report.default.headers=Id,Status,Age,Due,Context,Project,Subject\n")
_, err = writer.WriteString("report.default.sort=+project,+due\n")
_, err = writer.WriteString("report.default.filter=\n")
_, err = writer.WriteString("report.default.group=project\n")
_, err = writer.WriteString("#report.default.notes=true\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Define custom priorities. Default is H,M,L.\n")
_, err = writer.WriteString("#priority=H,M,L\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Define sync file path and encryption passphrase.\n")
_, err = writer.WriteString("###### encrypt.passphrase options: actual passphrase, *=prompt, <blank>=do not encrypt.\n")
_, err = writer.WriteString("###### filepath includes filename. Directory must exist.\n")
_, err = writer.WriteString("sync.encrypt.passphrase=*\n")
_, err = writer.WriteString("sync.filepath=./backup/todo_sync.json\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Define aliases to save typing on common commands\n")
_, err = writer.WriteString("#alias.top2=top:pro:2 list sort:+project,+due\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Define named view filters that can be applied by default\n")
_, err = writer.WriteString("#view.work.filter=@Work\n")
_, err = writer.WriteString("#view.home.filter=@Home\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Set the currently applied view filter\n")
_, err = writer.WriteString("#view.current=home\n")
_, err = writer.WriteString("\n")
_, err = writer.WriteString("## Configure the open command. Below are all defaults. Uncomment and change to override.\n")
_, err = writer.WriteString("## Notes folder. If you sync, use a location available to other computers. E.g. a cloud drive\n")
_, err = writer.WriteString("#open.notes.folder=~/.todo_notes\n")
_, err = writer.WriteString("# Extension for notes\n")
_, err = writer.WriteString("#open.notes.ext=.txt\n")
_, err = writer.WriteString("# Command that opens notes\n")
_, err = writer.WriteString("#open.notes.cmd=mousepad\n")
_, err = writer.WriteString("# Regular expression if matched opens a notes file for a todo\n")
_, err = writer.WriteString("#open.notes.regex=notes\n")
_, err = writer.WriteString("## Define regex and (optionally) commands for open command to open differnt URI types.\n")
_, err = writer.WriteString("# Web URLs (www|http)\n")
_, err = writer.WriteString("#open.browser.regex=((((https?://)?(www.))|(https?://))\\S+)\n")
_, err = writer.WriteString("#open.browser.cmd=netsurf\n")
_, err = writer.WriteString("# File paths\n")
_, err = writer.WriteString("#open.file.regex=((\\/|\\.\\/|~\\/|\\w:\\/\\w)\\S+)\n")
err = writer.Flush()
if err != nil {
return nil
} else {
return err
}
return nil
}
func getConfigLocation() string {
localrepo := ".todorc"
usr, _ := user.Current()
homerepo := fmt.Sprintf("%s/.todorc", usr.HomeDir)
_, ferr := os.Stat(localrepo)
if ferr == nil {
return localrepo
} else {
return homerepo
}
}
type Report struct {
Description string
Filters []string
Columns []string
Headers []string
Sorter *TodoSorter
Group string
PrintNotes bool
}
func (r *Report) Init(rc map[string]string) {
/*
report.default.description="Default report of pending todos"
report.default.columns=id,completed,due,context,project,subject
report.default.headers=Id,Status,Due,Context,Project,Subject
report.default.sort=+project,-due
report.default.filter=
report.default.notes=false
report.byid.description='List of tasks ordered by ID'
report.byid.columns=id,project,subject
report.byid.headers=ID,Proj,Desc
report.byid.sort=+id
report.byid.filter=
report.t2p.description='List of top 2 tasks for each project'
report.t2p.columns=id,project,subject
report.t2p.headers=ID,Proj,Desc
report.t2p.sort=+project,-due
report.t2p.filter=top:pro:2
*/
//Set the description
r.Description = rc["description"]
//Create the Filter slice
r.Filters = strings.Split(rc["filter"], ",")
//Get group by (none, project or context)
group := strings.ToLower(rc["group"])
if group == "project" || group == "context" {
r.Group = group
}
//Create the Sorter
sorts := strings.Split(rc["sort"], ",")
//if a group was specified, add group as first sort if not already there.
//Will then print in groups in ScreenPrinter as needed
if r.Group != "" {
if !strings.Contains(sorts[0], r.Group) {
sorts = append([]string{r.Group}, sorts...)
}
}
r.Sorter = NewTodoSorter(sorts...)
//Create the Header slice
r.Headers = strings.Split(rc["headers"], ",")
//Create the Columns slice
r.Columns = strings.Split(rc["columns"], ",")
//Include/exclude notes
if tmp, ok := rc["notes"]; ok {
doPrint, err := strconv.ParseBool(tmp)
if err != nil {
fmt.Println("Error parsing bool from report configuration: ", rc["notes"])
os.Exit(1)
}
r.PrintNotes = doPrint
}
}
func (c *Config) GetAlias(alias string) (string, bool) {
command, ok := c.Aliases[alias]
return command, ok
}
func (c *Config) GetReport(report string) (*Report, bool) {
//Get the report configuration from Config (map of values for the report name (from nested map under map of reports))
rc, ok := c.Reports[report]
if ok {
rep := Report{}
rep.Init(rc)
return &rep, true
} else {
return nil, false
}
}
|
package cmd
import (
"github.com/bitrise-io/go-utils/log"
"github.com/fehersanyi/microtis-cli/stargate"
"github.com/spf13/cobra"
)
// jumpCmd represents the jump command
var jumpCmd = &cobra.Command{
Use: "jump",
Short: "jump will hehe, jump to a given directory for you",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
if err := stargate.Jump(args[0]); err != nil {
log.Errorf("Tilk, something happened, we can't jump: \n%s", err)
}
}
},
}
func init() {
rootCmd.AddCommand(jumpCmd)
}
|
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"fmt"
"strconv"
_ "github.com/go-sql-driver/mysql"
)
func (t *Tester) dropDB() bool {
dsn := t.ServiceConf.DbUser + ":" + t.ServiceConf.DbPassword + "@" + "tcp(" + t.ServiceConf.DbHost + ":" + strconv.Itoa(t.ServiceConf.DbPort) + ")/" + t.ServiceConf.DbName + "?charset=utf8"
db, err := sql.Open("mysql", dsn)
if err != nil {
fmt.Println(err)
}
defer db.Close()
q := "DROP DATABASE " + t.ServiceConf.DbName + ";"
_, err = db.Exec(q)
if err != nil {
fmt.Println(err)
}
fmt.Println("------ Dropped Cameradar Database -------")
return true
}
|
/*
Auther :chenglinguang
date: 2019-01-11
*/
package main
import (
"os/exec"
//"log"
"io/ioutil"
"fmt"
"strings"
)
func main(){
myFolder := "/etc/fluent"
var fileConf map[int]string
fileConf = listFile(myFolder)
//start the fluentd process in this folder
for _,file := range(fileConf){
conf:= file[7:9]
startFluent(conf)
}
}
//get the fluent config files and return a map with fluentd config file as value "fluent-01.conf"
func listFile(myFolder string) map[int]string{
var i int = 0
var fileConf map[int]string = map[int]string{}
//var fileConf map[int]string = make(map[int]string)
files,_:=ioutil.ReadDir(myFolder)
for _,file := range files{
if strings.Contains(file.Name(),"fluent") &&strings.HasPrefix(file.Name(),"fluent") &&strings.HasSuffix(file.Name(),"conf"){
//fmt.Println(file.Name())
fileConf[i] = string(file.Name())
i = i+1
}
}
return fileConf
}
//start fluent service with the fluentd config number
func startFluent(conf string){
command := `./init_env.sh conf`
cmd := exec.Command("/bin/bash", "-c", command)
output, err := cmd.Output()
if err != nil {
fmt.Printf("the process %s has been there!\n",conf)
}else{
fmt.Printf("Execute Shell:%s finished with output:\n%s", command, string(output))
}
}
|
package main
import (
"bytes"
"fmt"
"github.com/gorilla/websocket"
"log"
"net/http"
"time"
)
type Client struct {
hub *Hub
conn *websocket.Conn
id string
send chan []byte
}
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = []byte{'\n'}
space = []byte{' '}
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
func (c *Client) readPump() {
defer func() {
c.hub.unregister <- c
_ = c.conn.Close()
}()
c.conn.SetReadLimit(maxMessageSize)
_ = c.conn.SetReadDeadline(time.Now().Add(pongWait))
c.conn.SetPongHandler(func(appData string) error {
_ = c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil
})
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure){
log.Printf("error: %v", err)
}
break
}
message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1))
c.hub.broadcast <- Message{id: c.id, message: message}
}
}
func (c *Client) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
_ = c.conn.Close()
}()
for {
select {
case message, ok := <-c.send:
_ = c.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
_, err = w.Write(message)
n := len(c.send)
for i := 0; i < n; i++ {
_, err = w.Write(newline)
_, err = w.Write(<-c.send)
}
if err := w.Close(); err != nil {
return
}
case <- ticker.C:
_ = c.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
func serveWs(hub *Hub, w http.ResponseWriter, r *http.Request){
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
}
id := r.URL.Query().Get("roomid")
client := &Client{hub: hub, conn: conn, send: make(chan []byte, 256), id: id}
client.hub.register <- SingleClient{client: client, id: id}
s := fmt.Sprintf("{\"Username\":\"System\",\"Roomid\":\"%s\",\"Message\":\"%d\",\"Piece\":null}", client.id, len(client.hub.clients[client.id]) + 1)
client.hub.broadcast <- Message{id: client.id, message: []byte(s)}
log.Println(r.URL)
go client.writePump()
go client.readPump()
}
|
// Copyright © 2018 Sunface <CTO@188.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"io/ioutil"
"log"
"gopkg.in/yaml.v2"
)
type Config struct {
Common struct {
Version string
IsDebug bool `yaml:"debug"`
LogPath string
LogLevel string
}
Broker struct {
Host string
TcpPort string
WsPort string
Token string
ServerID int64
}
Admin struct {
Port string
}
Store struct {
Engine string
FDB struct {
Namespace string
Threads int
}
Mem struct {
}
}
Cluster struct {
HwAddr string
Port string
SeedPeers []string
}
}
func initConfig(path string) *Config {
conf := &Config{}
data, err := ioutil.ReadFile(path)
if err != nil {
log.Fatal("read config error :", err)
}
err = yaml.Unmarshal(data, &conf)
if err != nil {
log.Fatal("yaml decode error :", err)
}
return conf
}
|
package plan
import (
"github.com/kainosnoema/terracost-cli/prices"
"github.com/kainosnoema/terracost-cli/terraform"
)
// Resource maps a Terraform resource to AWS pricing
type Resource struct {
Address string
Action string
Before prices.ByID
After prices.ByID
}
// Calculate takes a TF plan, fetches AWS prices, and returns priced Resources
func Calculate(tfPlan *terraform.PlanJSON) ([]Resource, error) {
resources := []Resource{}
priceLookup := prices.NewLookup()
for i := len(tfPlan.ResourceChanges) - 1; i >= 0; i-- { // changes are in reverse order
res := tfPlan.ResourceChanges[i]
action := res.Change.Actions[0]
if action == "read" {
continue
}
if len(res.Change.Actions) > 1 {
action = "update" // we don't care about update vs. replace
}
resource := Resource{
Address: res.Address,
Action: action,
Before: prices.ByID{},
After: prices.ByID{},
}
changesPriceIDs := prices.ResourceChangesPriceIDs(tfPlan.Region(), res)
for _, beforePriceID := range changesPriceIDs.Before {
resource.Before[beforePriceID] = priceLookup.Add(beforePriceID)
}
for _, afterPriceID := range changesPriceIDs.After {
resource.After[afterPriceID] = priceLookup.Add(afterPriceID)
}
resources = append(resources, resource)
}
err := priceLookup.Perform()
if err != nil {
return nil, err
}
return resources, nil
}
|
package main
import "fmt"
func main() {
// 声明一个变量并初始化
var a = "RUNOOB"
fmt.Println(a)
// 没有初始化就为零值
var b int
fmt.Println(b)
// bool 零值为 false
var c bool
fmt.Println(c)
d := "1"
p := &d
fmt.Println(d)
fmt.Println(*p)
*p = "3"
fmt.Println(*p)
} |
package qa
import (
"context"
"github.com/go-kit/kit/endpoint"
"qa/pkg"
)
type Endpoints struct {
ReadQuestionEndpoint endpoint.Endpoint
ReadAllQuestionsEndpoint endpoint.Endpoint
CreateQuestionEndpoint endpoint.Endpoint
UpdateQuestionEndpoint endpoint.Endpoint
DeleteQuestionEndpoint endpoint.Endpoint
DeleteAllQuestionsEndpoint endpoint.Endpoint
ReadQuestionsOfUserEndpoint endpoint.Endpoint
ReadAnswersOfUserEndpoint endpoint.Endpoint
}
func MakeReadQuestionEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(IDRequest)
qa1, err := srv.ReadQuestion(ctx, req.Id)
if err != nil {
return ReadQuestionResponse{qa.QA {}, err.Error()}, nil
}
return ReadQuestionResponse{qa1, ""}, nil
}
}
func MakeReadAllQuestionsEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
_ = request.(EmptyRequest)
qas, err := srv.ReadAllQuestions(ctx)
if err != nil {
return ReadQuestionsResponse{nil, err.Error()}, nil
}
return ReadQuestionsResponse{qas, ""}, nil
}
}
func MakeCreateQuestionEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(QuestionRequest)
qa1, err := srv.CreateQuestion(ctx, req.QA)
if err != nil {
return CreateQuestionResponse{qa.QA {}, err.Error()}, nil
}
return CreateQuestionResponse{qa1, ""}, nil
}
}
func MakeUpdateQuestionEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(QuestionRequest)
err := srv.UpdateQuestion(ctx, req.QA)
if err != nil {
return EmptyResponse {err.Error()}, nil
} else {
return EmptyResponse {}, err
}
}
}
func MakeDeleteQuestionEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(IDRequest)
err := srv.DeleteQuestion(ctx, req.Id)
if err != nil {
return EmptyResponse {err.Error()}, nil
} else {
return EmptyResponse {}, err
}
}
}
func MakeDeleteAllQuestionsEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
_ = request.(EmptyRequest)
err := srv.DeleteAllQuestions(ctx)
if err != nil {
return EmptyResponse {err.Error()}, nil
} else {
return EmptyResponse {}, err
}
}
}
func MakeReadQuestionsOfUserEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(UserRequest)
qas, err := srv.ReadQuestionsOfUser(ctx, req.User)
if err != nil {
return ReadQuestionsResponse{nil, err.Error()}, nil
}
return ReadQuestionsResponse{qas, ""}, nil
}
}
func MakeReadAnswersOfUserEndpoint(srv Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(UserRequest)
qas, err := srv.ReadAnswersOfUser(ctx, req.User)
if err != nil {
return ReadQuestionsResponse{nil, err.Error()}, nil
}
return ReadQuestionsResponse{qas, ""}, nil
}
}
|
package gravity
import (
"github.com/althea-net/cosmos-gravity-bridge/gravity/x/gravity/keeper"
"github.com/althea-net/cosmos-gravity-bridge/gravity/x/gravity/types"
sdk "github.com/cosmos/cosmos-sdk/types"
)
func handleMsgCreateOrchestratorAddress(ctx sdk.Context, k keeper.Keeper, msg *types.MsgCreateOrchestratorAddress) (*sdk.Result, error) {
k.CreateOrchestratorAddress(ctx, *msg)
return &sdk.Result{Events: ctx.EventManager().ABCIEvents()}, nil
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io"
)
// mp4Reader handles all things related to the buffer.
// It receives an io.ReadSeeker from invoker and reads un-parsed
// data from the latter.
type mp4Reader struct {
readSeeker io.ReadSeeker
b []byte // processing bytes
a *atom // processing atom
startPos int64
endPos int64
}
func newMp4Reader(i io.ReadSeeker) *mp4Reader {
return &mp4Reader{
readSeeker: i,
}
}
func (p *mp4Reader) GetAtomPosition() int64 {
return p.startPos
}
// CheckAtomParseEnd checks whether the currently
// processed data is outside the buffer range of the current atom.
func (p *mp4Reader) CheckAtomParseEnd() bool {
n, _ := p.readSeeker.Seek(0, io.SeekCurrent)
if n >= p.endPos {
return true
}
return false
}
// PeekAtomHeader will try to peek the buffer and get the atom's
// type/size information without moving the file pointer.
func (p *mp4Reader) PeekAtomHeader() (a *atom, err error) {
startPos, _ := p.readSeeker.Seek(0, io.SeekCurrent)
a, err = p.ReadAtomHeader()
p.readSeeker.Seek(startPos, io.SeekStart)
return a, nil
}
// ReadAtomHeader will read the next atom's header if no error occur.
// If it returns without error, the new atom will replace the previous one.
// And the previous atomReader will be invalid.
func (p *mp4Reader) ReadAtomHeader() (a *atom, err error) {
readInt := func(b []byte) uint32 {
return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
}
if err != nil {
return nil, err
}
header := make([]byte, 8)
n, e := p.Read(header)
if n != 8 {
return nil, e
}
a = new(atom)
a.bodySize = int64(readInt(header[:4]))
a.atomType = readInt(header[4:8])
if a.bodySize == 1 {
realSize := make([]byte, 8)
n, e = p.Read(realSize)
if n != 8 {
return nil, e
}
a.headerSize = 16
a.bodySize = int64(readInt(realSize[:4]))<<32 | int64(readInt(realSize[4:8])) - 16
header = append(header, realSize...)
} else {
a.bodySize -= 8
a.headerSize = 8
}
p.a = a
p.b = nil
p.startPos, _ = p.readSeeker.Seek(0, io.SeekCurrent)
p.endPos = p.startPos + a.bodySize
p.startPos -= int64(a.headerSize)
return a, nil
}
func (p *mp4Reader) ReadAtomData() error {
p.b = make([]byte, p.a.bodySize)
n, _ := p.readSeeker.Read(p.b)
if int64(n) != p.a.bodySize {
p.readSeeker.Seek(int64(-n), io.SeekCurrent)
return ErrNoEnoughData
}
return nil
}
/*
1. first time read the atom: p.a == nil && p.b == nil
2. has tried to read the atom but failed, and retry read
3.
*/
// GetAtom return an atomReader if no error encountered.
// If the ReadSeeker failed to read atom's size of buffer, it will
// return error and restore the read pointer.
func (p *mp4Reader) GetAtom() (*atomReader, error) {
if _, err := p.ReadAtomHeader(); err != nil {
return nil, nil
}
if err := p.ReadAtomData(); err != nil {
return nil, err
}
return newAtomReader(p.b, p.a), nil
}
// SkipCurrentAtom will skip the following atom. It must be called
// in the boundary of the atoms.
func (p *mp4Reader) SkipCurrentAtom() (err error) {
currentPos, _ := p.readSeeker.Seek(0, io.SeekCurrent)
_, err = p.readSeeker.Seek(p.a.Size(), io.SeekCurrent)
if err == nil {
return nil
}
_, _ = p.readSeeker.Seek(currentPos, io.SeekStart) // restore the reader
return ErrOperationWithDraw
}
// use it to read sample in "mdat"
func (p *mp4Reader) Read(b []byte) (n int, err error) {
return p.readSeeker.Read(b)
}
func (p *mp4Reader) getReaderPosition() int64 {
n, _ := p.readSeeker.Seek(0, io.SeekCurrent)
return n
}
// Peek will read at most len(b) bytes without moving the reading pointer.
// Note that the no-nil error should be processed.
func (p *mp4Reader) Peek(b []byte) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
n, err = p.readSeeker.Read(b)
p.readSeeker.Seek(int64(-n), io.SeekCurrent)
return n, err
}
// bitReader wraps an io.Reader and provides the ability to read values,
// bit-by-bit, from it. Its Read* methods don't return the usual error
// because the error handling was verbose. Instead, any error is kept and can
// be checked afterwards.
// modify from https://golang.org/src/compress/bzip2/bit_reader.go
type bitReader struct {
r io.ByteReader
n uint64
bits uint
err error
}
func newBitReader(r io.Reader) bitReader {
byteReader, ok := r.(io.ByteReader)
if !ok {
byteReader = bufio.NewReader(r)
}
return bitReader{r: byteReader}
}
func newBitReaderFromSlice(src []byte) bitReader {
return newBitReader(bytes.NewReader(src))
}
// ReadBitsLE64 when bits <= 64
func (br *bitReader) ReadBitsLE64(bits uint) (n uint64) {
for bits > br.bits {
b, err := br.r.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
br.err = err
return 0
}
br.n <<= 8
br.n |= uint64(b)
br.bits += 8
}
n = (br.n >> (br.bits - bits)) & ((1 << bits) - 1)
br.bits -= bits
return
}
// ReadBitsLE32 only when bits <= 32
func (br *bitReader) ReadBitsLE32(bits uint) (n uint32) {
n64 := br.ReadBitsLE64(bits)
return uint32(n64)
}
// ReadBitsLE8 read less(equal) than 8 bits
func (br *bitReader) ReadBitsLE8(bits uint) (n uint8) {
return uint8(br.ReadBitsLE64(bits))
}
// ReadBitsLE16 read less(equal) than 16 bits
func (br *bitReader) ReadBitsLE16(bits uint) (n uint16) {
return uint16(br.ReadBitsLE64(bits))
}
func (br *bitReader) ReadBool() bool {
n := br.ReadBitsLE32(1)
return n != 0
}
func (br *bitReader) Err() error {
return br.err
}
func int2String(n uint32) string {
return fmt.Sprintf("%c%c%c%c", uint8(n>>24), uint8(n>>16), uint8(n>>8), uint8(n))
}
func string2int(s string) uint32 {
if len(s) != 4 {
logE.Printf("string2int, the length of %s is not 4", s)
}
b := []byte(s)
b = b[0:4]
return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
}
|
// Package user contains protobuf types for users.
package user
import (
context "context"
"google.golang.org/protobuf/types/known/structpb"
"github.com/pomerium/pomerium/internal/identity"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/slices"
)
// Get gets a user from the databroker.
func Get(ctx context.Context, client databroker.DataBrokerServiceClient, userID string) (*User, error) {
u := &User{Id: userID}
return u, databroker.Get(ctx, client, u)
}
// GetServiceAccount gets a service account from the databroker.
func GetServiceAccount(ctx context.Context, client databroker.DataBrokerServiceClient, serviceAccountID string) (*ServiceAccount, error) {
sa := &ServiceAccount{Id: serviceAccountID}
return sa, databroker.Get(ctx, client, sa)
}
// PutServiceAccount saves a service account to the databroker.
func PutServiceAccount(ctx context.Context, client databroker.DataBrokerServiceClient, serviceAccount *ServiceAccount) (*databroker.PutResponse, error) {
return databroker.Put(ctx, client, serviceAccount)
}
// AddClaims adds the flattened claims to the user.
func (x *User) AddClaims(claims identity.FlattenedClaims) {
if x.Claims == nil {
x.Claims = make(map[string]*structpb.ListValue)
}
for k, svs := range claims.ToPB() {
x.Claims[k] = svs
}
}
// GetClaim returns a claim.
//
// This method is used by the dashboard template HTML to display claim data.
func (x *User) GetClaim(claim string) []interface{} {
var vs []interface{}
for _, sv := range x.GetClaims()[claim].GetValues() {
vs = append(vs, sv.AsInterface())
}
return vs
}
// AddDeviceCredentialID adds a device credential id to the list of device credential ids.
func (x *User) AddDeviceCredentialID(deviceCredentialID string) {
x.DeviceCredentialIds = slices.Unique(append(x.DeviceCredentialIds, deviceCredentialID))
}
// HasDeviceCredentialID returns true if the user has the device credential id.
func (x *User) HasDeviceCredentialID(deviceCredentialID string) bool {
return slices.Contains(x.DeviceCredentialIds, deviceCredentialID)
}
// RemoveDeviceCredentialID removes the device credential id from the list of device credential ids.
func (x *User) RemoveDeviceCredentialID(deviceCredentialID string) {
x.DeviceCredentialIds = slices.Remove(x.DeviceCredentialIds, deviceCredentialID)
}
|
// Copyright 2020 Torben Schinke
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package appbar
import (
. "github.com/golangee/gotrino"
. "github.com/golangee/gotrino-html"
. "github.com/golangee/gotrino-html/svg"
"github.com/golangee/property"
)
// The AppBar provides a drawer area (side menu) at the left and a toolbar area at the top. It has only limited
// capabilities for customization. IfCond you are sure, that you really need a custom AppBar, feel free to
// copy-paste to start a new component for your specific project.
type AppBar struct {
isOpen property.Bool
toolbarArea Renderable
icon Renderable
title Renderable
drawerHeader Renderable
drawerMain Renderable
drawerFooter Renderable
View
}
// NewAppBar allocates a new AppBar instance.
func NewAppBar() *AppBar {
c := &AppBar{}
return c
}
// SetToolbarArea updates the node for right side of the AppBar. Consider mobile devices and only offer a context
// menu for small screens.
func (c *AppBar) SetToolbarArea(node Renderable) *AppBar {
c.toolbarArea = node
c.Invalidate()
return c
}
// SetIcon sets a Node as the first entry right of the hamburger menu.
func (c *AppBar) SetIcon(node Renderable) *AppBar {
c.icon = node
c.Invalidate()
return c
}
// Self assigns the receiver to the given reference.
func (c *AppBar) Self(ref **AppBar) *AppBar {
*ref = c
return c
}
// SetTitle sets a Node as the entry right of the Icon.
func (c *AppBar) SetTitle(node Renderable) *AppBar {
c.title = node
c.Invalidate()
return c
}
// SetDrawerHeader sets a Node into the header section of the drawer. At least this should be the app icon.
func (c *AppBar) SetDrawerHeader(node Renderable) *AppBar {
c.drawerHeader = node
c.Invalidate()
return c
}
// SetDrawerMain sets a Node as the drawers main content.
func (c *AppBar) SetDrawerMain(node Renderable) *AppBar {
c.drawerMain = node
c.Invalidate()
return c
}
// SetDrawerFooter sets a Node into the bottom of the drawer.
func (c *AppBar) SetDrawerFooter(node Renderable) *AppBar {
c.drawerFooter = node
c.Invalidate()
return c
}
// Close closes the side menu (also known as drawer).
func (c *AppBar) Close() *AppBar {
c.isOpen.Set(false)
return c
}
func (c *AppBar) Render() Node {
return Div(
Nav(Class("flex fixed w-full items-center justify-between px-6 h-12 bg-primary text-on-primary shadow z-10"),
// menu and logo
Div(Class("flex items-center"),
// burger menu button
Button(Class("focus:outline-none"), AriaLabel("Open Menu"),
AddClickListener(c.isOpen.Toggle),
Svg(
Class("w-8 h-8"),
Fill("none"),
Stroke("currentColor"),
StrokeLinecap("round"),
StrokeLinejoin("round"),
StrokeWidth("2"),
ViewBox("0 0 24 24"),
Path(D("M4 6h16M4 12h16M4 18h16")),
),
),
// app logo in app bar
c.icon,
// app title
c.title,
),
// button section in app bar
Div(Class("flex items-center"),
c.toolbarArea,
),
// semi-transparent content blocking layer
Div(
Class(" z-10 fixed ease-in-out inset-0 bg-black opacity-0 transition-all duration-500"),
IfCond(&c.isOpen,
Modifiers(
Style("visibility", "visible"),
AddClass("opacity-50"),
),
Modifiers(
Style("visibility", "hidden"),
RemoveClass("opacity-50"),
),
),
Div(
Class("absolute inset-0"),
AddClickListener(c.isOpen.Toggle),
),
),
// Side menu
Aside(
Class("transform top-0 left-0 w-64 bg-white fixed h-full overflow-auto ease-in-out transition-all duration-500 z-30"),
IfCond(&c.isOpen,
Modifiers(
AddClass("translate-x-0"),
RemoveClass("-translate-x-full"),
),
Modifiers(
RemoveClass("translate-x-0"),
AddClass("-translate-x-full"),
),
),
// keep the logo in the menu
Span(
Class("flex w-full items-center p-4 border-b"),
c.drawerHeader,
),
Div(
c.drawerMain,
),
// button at the bottom in the side menu
Div(
c.drawerFooter,
),
),
),
)
}
|
package two_sum
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestTwoSum(t *testing.T) {
nums := []int{2, 7, 11, 15}
target := 9
assert.Equal(t, []int{0, 1}, twoSum(nums, target))
nums = []int{2, 11, 15, 5}
target = 7
assert.Equal(t, []int{0, 3}, twoSum(nums, target))
}
|
package main
import (
"container/heap"
"fmt"
"io/ioutil"
"log"
"math"
"math/rand"
"runtime"
"sync"
"time"
)
/*
n person
2 person dorm rooms
match room-mates to satisfy most requests
p1, p2, ..., pn
slot1, slot2, ..., slotn
slot1 and slot2 -> room1
*/
type roommates [2]int
type selectorFunc func(numPerson int, numIterations int) (int, []roommates)
var pref = map[roommates]int{
roommates{2, 3}: 1,
roommates{5, 7}: -1,
roommates{0, 1}: -1,
roommates{0, 2}: -1,
roommates{0, 3}: -1,
roommates{0, 4}: -1,
roommates{0, 5}: -1,
roommates{0, 6}: -1,
roommates{0, 7}: -1,
roommates{0, 8}: 1,
roommates{0, 9}: 1,
}
var logger = log.New(ioutil.Discard, "Log: ", log.Ltime|log.Lshortfile)
func initPref(count int) {
runtime.GOMAXPROCS(4)
fmt.Println("GOMAXPROCS is ", runtime.GOMAXPROCS(0))
rand.Seed(time.Now().Unix() * 123)
var weight int
for i := 0; i < count; i++ {
for j := i + 1; j < count; j++ {
lastDigit := (i + j) % 10
if lastDigit == 8 {
weight = -100
} else if lastDigit == 9 {
weight = -50
} else if lastDigit == 4 {
weight = 1000
} else {
weight = 0
}
if weight != 0 {
pref[roommates{i, j}] = weight
}
}
}
}
func main() {
numPeople := 100
initPref(numPeople)
var selectors = []selectorFunc{simpleSelector, iterateRoommateChoicesSelector, geneticSelector}
var iterations = []int{100000, 1, 1000}
waitGroup := new(sync.WaitGroup)
fmt.Println("Running simulated with ", numPeople, " people with iteration count of ", iterations)
for i, selector := range selectors {
waitGroup.Add(1)
go selectorJob(i, selector, numPeople, iterations[i], waitGroup)
}
waitGroup.Wait()
}
func selectorJob(index int, selector selectorFunc, numPeople int, numIterations int, waitGroup *sync.WaitGroup) {
defer (*waitGroup).Done()
startTime := time.Now()
cost, assignment := selector(numPeople, numIterations)
fmt.Println(index, ": ", cost, "in", time.Since(startTime), "\n", assignment)
}
// input assignment
func getCostAndAssignment(slotAssignment []int) (int, []roommates) {
roomAssignment := slotToRoom(slotAssignment)
cost := getRoommateCost(roomAssignment)
return cost, roomAssignment
}
func simpleSelector(numPerson int, numIterations int) (int, []roommates) {
lowestCost := 1000000
var finalAssignment []roommates
for i := 0; i < numIterations; i++ {
slotAssignment := getRandomChoice(numPerson)
cost, assignment := getCostAndAssignment(slotAssignment)
if cost < lowestCost {
lowestCost = cost
finalAssignment = assignment
}
if i%100 == 0 {
logger.Println("cost: ", i, cost, lowestCost)
}
}
return lowestCost, finalAssignment
}
func geneticSelector(numPerson int, numIterations int) (int, []roommates) {
numVariations := numPerson
if numVariations < 100 {
numVariations = 100
}
numVariations = 100
choices := make([][]int, numVariations)
for i := 0; i < numVariations; i++ {
choices[i] = getRandomChoice(numPerson)
}
bestScore, numTopSolutions, selectedChoices := evaluateGeneration(numVariations, choices)
for i := 1; i < numIterations; i++ {
// fmt.Println("Generation ", i)
choices = getNextGeneration(numPerson, numTopSolutions, numVariations, selectedChoices)
// fmt.Println(choices[0])
bestScore, numTopSolutions, selectedChoices = evaluateGeneration(numVariations, choices)
}
bestSolution := slotToRoom(selectedChoices[numTopSolutions-1])
// fmt.Println("Genetic: \n", bestScore, bestSolution)
return bestScore, bestSolution
}
func getNextGeneration(numPerson, numTopSolutions int, numVariations int, selectedChoices [][]int) [][]int {
// fmt.Println("GetNextGeneration: ", numTopSolutions, numVariations)
result := make([][]int, numVariations)
curIndex := numVariations - 1
for i := 0; i < numTopSolutions; i++ {
result[curIndex] = make([]int, numPerson)
copy(result[curIndex], selectedChoices[numTopSolutions-i-1])
// result[curIndex] = selectedChoices[numTopSolutions-i-1]
// apply a mutation here
if i > 1 {
randomBit := rand.Intn(numPerson)
// fmt.Print(i, curIndex, randomBit, result[curIndex])
result[curIndex][randomBit] = rand.Intn(numPerson - randomBit)
// fmt.Println(result[curIndex])
}
curIndex--
}
for i := 0; i < numTopSolutions; i++ {
for j := 0; j < numTopSolutions; j++ {
if i == j {
continue
} else {
cutLocation := rand.Intn(numPerson-4) + 1
// fmt.Println("Joining: ", cutLocation, i, j)
// fmt.Println(selectedChoices[i], selectedChoices[j])
result[curIndex] = make([]int, numPerson)
copy(result[curIndex], selectedChoices[i])
result[curIndex] = append(result[curIndex][:cutLocation], selectedChoices[j][cutLocation:]...)
// apply point mutation here
pointMutation := rand.Intn(numPerson)
result[curIndex][pointMutation] = rand.Intn(numPerson - pointMutation)
// fmt.Println(result[curIndex])
curIndex--
}
}
}
for i := curIndex; i >= 0; i-- {
cutLocation := rand.Intn(numPerson-4) + 2
result[i] = make([]int, numPerson)
copy(result[i], selectedChoices[rand.Intn(numVariations)])
result[i] = append(result[i][:cutLocation+1], selectedChoices[rand.Intn(numVariations)][cutLocation+1:]...)
// apply point mutation here
pointMutation := rand.Intn(numPerson)
result[i][pointMutation] = rand.Intn(numPerson - pointMutation)
}
// for i := 0; i < numVariations; i++ {
// fmt.Println(i, result[i])
// }
return result
}
func evaluateGeneration(numVariations int, choices [][]int) (int, int, [][]int) {
numTopSolutions := int(math.Sqrt(float64(numVariations)))
if numTopSolutions < 10 {
numTopSolutions = 10
}
pq := make(CappedPriorityQueue, 0, numTopSolutions+1)
heap.Init(&pq)
lowestCost := 9999999
var cost int
//var solution []roommates
// fmt.Println("EvaluateGeneration")
for i := 0; i < numVariations; i++ {
choice := choices[i]
cost, _ = getCostAndAssignment(choice)
// fmt.Println(i, cost, choice)
if cost < lowestCost {
lowestCost = cost
}
item := &Item{
value: choice,
priority: cost,
}
heap.Push(&pq, item)
// keep the top 100 solutions
if len(pq) > numTopSolutions {
heap.Pop(&pq)
}
}
topChoices := make([][]int, numTopSolutions)
index := 0
for {
result := heap.Pop(&pq)
if result == nil {
break
} else {
data := result.(*Item)
cost = data.priority
topChoices[index] = data.value.([]int)
// fmt.Println(index, cost, topChoices[index])
}
index++
}
// solution = slotToRoom(topChoices[numTopSolutions-1])
// fmt.Println("genetic: ", cost, lowestCost)
//fmt.Println("topChoice: ", topChoices[numTopSolutions-1])
return cost, numTopSolutions, topChoices
}
func iterateRoommateChoicesSelector(numPerson int, numIterations int) (int, []roommates) {
// numIterations is ignored since we will iterate through implicitly
slotAssignment := getRandomChoice(numPerson)
bestCost, bestRoommateAssignment := getCostAndAssignment(slotAssignment)
workingRoommateAssignment := make([]roommates, len(bestRoommateAssignment))
copy(workingRoommateAssignment, bestRoommateAssignment)
logger.Println("starting iteration: ", workingRoommateAssignment)
for i := 1; i < len(slotAssignment); i++ {
room1 := i / 2
order1 := i % 2
person := workingRoommateAssignment[room1][order1]
switchedRoom := -1
switchedOrder := -1
for j := i + 1; j < len(slotAssignment); j++ {
room2 := j / 2
if room1 == room2 {
continue
}
order2 := j % 2
candidate := workingRoommateAssignment[room2][order2]
workingRoommateAssignment[room1][order1] = workingRoommateAssignment[room2][order2]
workingRoommateAssignment[room2][order2] = person
curCost := getRoommateCost(workingRoommateAssignment)
if curCost <= bestCost {
switchedRoom = room2
switchedOrder = order2
bestCost = curCost
}
workingRoommateAssignment[room2][order2] = candidate
}
if switchedRoom >= 0 {
tmp := workingRoommateAssignment[switchedRoom][switchedOrder]
workingRoommateAssignment[switchedRoom][switchedOrder] = person
workingRoommateAssignment[room1][order1] = tmp
bestRoommateAssignment[switchedRoom][switchedOrder] = person
bestRoommateAssignment[room1][order1] = tmp
} else {
workingRoommateAssignment[room1][order1] = person
}
}
return bestCost, bestRoommateAssignment
}
func getRoommateCost(roomAssignment []roommates) int {
result := 0
for _, match := range roomAssignment {
min := match[0]
max := match[1]
if match[1] < min {
min = match[1]
max = match[0]
}
searchMatch := roommates{min, max}
result += pref[searchMatch]
}
return result
}
func slotToRoom(slotAssignment []int) []roommates {
length := len(slotAssignment)
result := make([]roommates, (length+1)/2)
slots := make([]int, length)
for i := 0; i < length; i++ {
slots[i] = i
}
for i, v := range slotAssignment {
curSlot := slots[v]
result[curSlot/2][(curSlot+1)%2] = i
if v == 0 {
slots = slots[1:]
} else if v == len(slots) {
slots = slots[0 : len(slots)-1]
} else {
slots = append(slots[0:v], slots[v+1:]...)
}
}
return result
}
func getRandomChoice(count int) []int {
choice := make([]int, count)
for i := 0; i < count; i++ {
tmp := rand.Int() % (count - i)
choice[i] = tmp
}
return choice
}
|
// HELPER FUNCTION - Using bytes.Buffer for efficient string concatenation in Go
package helper
import (
"bytes"
)
func Concat(values []string) string {
var b bytes.Buffer
for _, s := range values {
b.WriteString(s)
}
return b.String()
}
|
package main
import "fmt"
func main() {
// 1、byte 类型的默认值为 0, 最大值为255
var ch byte
fmt.Println("ch =", ch)
fmt.Printf("ch = %c, %T\n", ch, ch)
ch = 255
fmt.Println("ch =", ch)
fmt.Printf("ch = %c\n", ch)
// 2、字符使用单引号, 可以直接进行数值计算,以反斜杠开头的字符是转义字符
var a = 'a'
fmt.Println("a = ", a)
fmt.Printf("a = %c\n", a)
fmt.Printf("a = %c\n", a - 32)
} |
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//637. Average of Levels in Binary Tree
//Given a non-empty binary tree, return the average value of the nodes on each level in the form of an array.
//Example 1:
//Input:
// 3
// / \
// 9 20
// / \
// 15 7
//Output: [3, 14.5, 11]
//Explanation:
//The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].
//Note:
//The range of node's value is in the range of 32-bit signed integer.
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func averageOfLevels(root *TreeNode) []float64 {
//}
// Time Is Money |
package _429_N_ary_Tree_Level_Order_Traversal
type Node struct {
Val int
Children []*Node
}
func levelOrder(root *Node) [][]int {
var (
ret [][]int
q, tq []*Node
)
if root == nil {
return ret
}
q = append(q, root)
for len(q) > 0 {
tr := []int{}
for _, n := range q {
if n == nil {
continue
}
tr = append(tr, n.Val)
tq = append(tq, n.Children...)
}
q = tq
tq = []*Node{}
ret = append(ret, tr)
}
return ret
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type RangeTableSample struct {
Relation ast.Node
Method *ast.List
Args *ast.List
Repeatable ast.Node
Location int
}
func (n *RangeTableSample) Pos() int {
return n.Location
}
|
package network
import "github.com/meidoworks/nekoq-api/errorutil"
var _ERROR_CHANNEL_CLOSED = errorutil.New("channel closed <- network <- nekoq-api")
func ErrChannelClosed() error {
return _ERROR_CHANNEL_CLOSED
}
var _ERROR_CHANNEL_QUEUE_NOT_READY = errorutil.New("write queue not ready <- network <- nekoq-api")
func ErrChannelQueueNotReady() error {
return _ERROR_CHANNEL_QUEUE_NOT_READY
}
var _ERROR_UNKNOWN = errorutil.New("unknown <- network <- nekoq-api")
func ErrUnknown() error {
return _ERROR_UNKNOWN
}
|
package controllers
import (
"fmt"
"github.com/cmsvault/api/logging"
"net/http"
)
func Index() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
fmt.Printf("Params %+v", ctx.Value("params"))
_, err := fmt.Fprint(w, "Not protected!\n")
if err != nil {
fmt.Println(err)
}
logging.LogZap()
})
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func highestProduct(num string, size int) int {
strArr := strings.Split(num, "")
max := 0
for i := 0; i < len(strArr)-size+1; i++ {
product := 1
for j := i; j < i+size; j++ {
num, _ := strconv.Atoi(strArr[j])
product *= num
}
if product > max {
max = product
}
}
return max
}
func main() {
times, num, size := 0, "", 0
fmt.Scanf("%d", ×)
for i := 0; i < times; i++ {
for j := 0; j < 2; j++ {
fmt.Scanf("%d", &size)
}
fmt.Scanf("%v", &num)
fmt.Println(highestProduct(num, size))
}
}
|
package service
import (
"io"
"net/http"
"os"
humanize "github.com/dustin/go-humanize"
)
type WriteCounter struct {
Total uint64
onProgress func(string)
}
func (wc *WriteCounter) Write(p []byte) (int, error) {
n := len(p)
wc.Total += uint64(n)
wc.onProgress(humanize.Bytes(wc.Total))
return n, nil
}
type onProgress func(string)
type onSuccess func()
type onFailure func()
func DownloadFile(filepath string, url string, progress onProgress, success onSuccess, fail onFailure) error {
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
counter := &WriteCounter{onProgress: progress}
_, err = io.Copy(out, io.TeeReader(resp.Body, counter))
if err != nil {
fail()
return err
}
success()
return nil
}
|
package wifi
import (
"bylib/bylog"
"bylib/byutils"
"encoding/json"
"fmt"
"github.com/go-cmd/cmd"
"runtime"
"strconv"
"strings"
"time"
)
type ConnResult struct{
Result string `json:"result"`
Message string `json:"message"`
IP string `json:"ip"`
Connect bool `json:"connect"`
}
//ap管理器
type ApManager struct{
Config ApConfig //配置文件.
}
func (self *ApManager)UciGetString(key string)(str string,err error){
aps:=cmd.NewCmd("/sbin/uci","get",key)
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
bylog.Error("UciGetString %s err=%v",key,status.Error)
return "",status.Error
//return ctx.Json(400,status.Error)
}
if len(status.Stdout) <= 0{
return "",fmt.Errorf("UciGetString %s empty",key)
}
return status.Stdout[0],nil
}
//列出目前配置了的多连接Ap列表
func (self *ApManager)ListAp()[]*ApInfo{
//apcli=`ifconfig | grep apcli0`
//ip=`ifconfig apcli0 | grep 'inet addr:'| cut -d: -f2 | awk '{ print $1}'`
//LogDebug("ListAp")
self.ClearApState()
ap,err:=self.GetCurrAp()
if err!=nil{
bylog.Error("GetCurAp err=%v",err)
}else{
//找到一个当前选择项
if c:=self.FindAp(ap.SSID);c!=nil{
c.Connected = ap.Connected
c.Selected = true
c.IP = ap.IP
}
}
return self.Config.ApList
}
func (self *ApManager)GetCurrApStatus()(*ConnResult){
aps:=cmd.NewCmd("/usr/sbin/checkwifi","1")
result:=&ConnResult{}
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
bylog.Error("checkwifi err=%v",status.Error)
return nil
//return ctx.Json(400,status.Error)
}
if len(status.Stdout) > 0{
//for _, line := range status.Stdout {
// fmt.Println(line)
//}
if err:=json.Unmarshal([]byte(status.Stdout[0]),&result);err!=nil{
bylog.Error("checkwifi err=%v",err)
return nil
}else{
//LogDebug("result=%v",result)
if result.Result=="success"{
result.Connect = true
}else{
result.Connect = false
}
return result
}
}
return nil
}
//func (self *ApManager)UciGetInt(key string)(int,err error){
// return 0,nil
//}
//获取当前配置了的ap
func (self *ApManager)GetCurrAp()(ap *ApInfo,err error){
//uci get wireless.@wifi-iface[0].ApCliSsid
ap=&ApInfo{}
ap.SSID,err = self.UciGetString("wireless.@wifi-iface[0].ApCliSsid")
if err!=nil{
return
}
//LogDebug("ssid=%s",ap.SSID)
ap.PassWord,err = self.UciGetString("wireless.@wifi-iface[0].ApCliPassWord")
if err!=nil{
return
}
//LogDebug("password=%s",ap.PassWord)
//var result *ConnResult
result:=self.GetCurrApStatus()
if result !=nil{
ap.IP = result.IP
ap.Connected = result.Connect
ap.Selected = true
//LogDebug("ip=%s ,conn=%v",ap.IP,ap.Connected)
}
return
}
func (self *ApManager)FindAp(ssid string)*ApInfo{
for _,ap:=range self.Config.ApList{
if ap.SSID == ssid{
return ap
}
}
return nil
}
func (self *ApManager)ClearApState(){
for _,ap:=range self.Config.ApList{
ap.Selected = false
ap.Connected = false
ap.IP=""
}
}
//扫描获取ap信号列表
func (self *ApManager)ScanApList()(aplist []ApSignal,err error){
if runtime.GOOS == "windows" {
for i:=0 ; i < 5; i++{
aplist = append(aplist,ApSignal{
SSID:fmt.Sprintf("test%d",i+1),
})
}
return aplist,nil
}
//iwpriv ra0 set SiteSurvey=1
aps:=cmd.NewCmd("iwpriv","ra0","set","SiteSurvey=1")
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
return nil,byutil.FormatError("iwpriv ra0 set SiteSurvey=1",status.Error)
}
aps=cmd.NewCmd("sleep","5")
//等待aps完成
status = <-aps.Start()
if status.Error!=nil{
return nil,byutil.FormatError("sleep err",status.Error)
}
aps=cmd.NewCmd("iwpriv","ra0","get_site_survey")
//等待aps完成
status = <-aps.Start()
if status.Error!=nil{
return nil,byutil.FormatError("iwpriv ra0 get_site_survey",status.Error)
}
//iwpriv ra0 get_site_survey
for i, line := range status.Stdout {
if i < 2{
continue
}
//line = strings.TrimSpace(line)
apstr := strings.Fields(line)
//apstr:=strings.Split(line," ") //这个分隔有问题
if len(apstr) != 8{
bylog.Error("%s len not 8",apstr)
continue
}
//fmt.Printf("index=%d line=%s cont=%d\n",i,line,len(apstr))
//fmt.Printf("1=%s\n",apstr[0])
//fmt.Printf("2=%s\n",apstr[1])
//fmt.Printf("3=%s\n",apstr[2])
//fmt.Printf("4=%s\n",apstr[3])
//fmt.Printf("5=%s\n",apstr[4])
//fmt.Println(apstr)
ap:=ApSignal{}
var err error
ap.Channle,err =strconv.Atoi(apstr[0])
if err!=nil{
bylog.Error("channel err=%s",err)
continue
}
ap.SSID = apstr[1]
ap.BSSID = apstr[2]
ap.Security = apstr[3]
ap.Signal,err =strconv.Atoi(apstr[4])
if err!=nil{
bylog.Error("Signal err=%s",err)
continue
}
aplist=append(aplist,ap)
}
//bylog.Debug("aplist=%+v",aplist)
return aplist,nil
}
//列举连接ap列表
func (self *ApManager)ConnectAp(ssid,passwd string)error{
aps:=cmd.NewCmd("/usr/sbin/setwifi",ssid,passwd)
//等待aps完成
status := <-aps.Start()
if status.Error!=nil{
bylog.Error("set wifi err=%v",status.Error)
}
return status.Error
}
//添加一个新的ap
func (self *ApManager)AddAp(ap ApInfo)error{
self.Config.AddAp(&ap)
return self.Config.SaveApList()
}
//删除某个ap
func (self *ApManager)RemoveAp(ap ApInfo)error {
self.Config.RemoveAp(ap.SSID)
return self.Config.SaveApList()
}
//测试外网地址.
func (self *ApManager)ping(url string)bool{
return true
}
func (s *ApManager)findConnectAps(api []*ApInfo, aps ApSignalList)(apis []*ApInfo){
for _,ai:=range api{
if _,find:=aps.Find(ai.SSID);find{
//信号找找到了该ID,检查该信号上次连接时间
apis=append(apis,ai)
}
}
return
}
func (s *ApManager)findMinTimeAp(apis []*ApInfo)( ai *ApInfo){
for i,ap:=range apis{
if i == 0{
ai = ap
continue
}
if ap.ConnStamp < ai.ConnStamp{
ai = ap
}
}
return
}
//从aps列表中查找一个可连接的信号
func (s *ApManager)findNextAp(api []*ApInfo, aps ApSignalList )*ApInfo{
apis:=s.findConnectAps(api,aps)
//找不到信号,或者找到的信号长度为0
if apis==nil || len(apis) ==0 {
return nil
}
return s.findMinTimeAp(apis)
//找到了可以连接的ap列表,查找其中时间最小的
//for _,ai:=range api{
// if _,find:=aps.Find(ai.SSID);find{
// //信号找找到了该ID,检查该信号上次连接时间
// if ai.Beyond(s.Config.RetryTime){
// return ai
// }
// }
//}
//return nil
}
/**
自动多连接线程
1.每隔1分钟读取当前ap连接情况,检查wifi是否已经启动成功,不成功就等待,因为wifi重启需要一定的时间。
2.ifconfig apcli0 查看是否有连接成功的ap,如果成功就跳过
3.没有连接成功的话,调用aps获取在线ap列表,比较本地连接列表中,找到待连接列表中信号最强的一个ap,并且连接次数最小的,如果没有搜索到一个,跳转到第一步,否则下一步
4.把当前ap切换为最强信号ap,并且把该ap的连接次数加1,重启wifi ,连接,跳转到第一步
*/
func (self *ApManager)runMultiConn(){
for{
//每隔一段时间检测一次ap连接状态.
bylog.Debug("retry=%d",self.Config.RetryTime)
time.Sleep(time.Duration(self.Config.RetryTime) * time.Second)
ap:=GetApConnState("apcli0")
bylog.Debug("%+v",ap)
if ap.Connected && self.ping(""){
//已经连接成功了,可以判断外网情况,但是有时候不需要测试外网情况
bylog.Debug("wifi has connected to %s",ap.Ap.SSID)
continue
}
//网络不通,搜索可连接的信号,连接下一个.
var aps ApSignalList
var err error
if aps,err=self.ScanApList();err!=nil{
//一个都搜索不到,跳到下次
bylog.Error("can not find ap list %s",err)
continue
}
//搜索到了一个,从本地连接列表中查找一个满足要求的ap
api:=self.findNextAp(self.Config.ApList,aps)
if api==nil{
//找不到一个可以连接的对象
bylog.Error("can not find nextAp")
continue
}
bylog.Debug("find next ap=%s ready connect",api.SSID)
//找到了可连接的ap,连接他.有可能这个ap是目前正在连接的.那么直接重启网络就可以了
curAp,err:=self.GetCurrAp()
if err!=nil{
bylog.Error("GetCurrAp err=%s ",err)
//没有配置过AP,切换ap
if err:=self.ConnectAp(api.SSID,api.PassWord);err!=nil{
bylog.Error("ConnectAp err=%s",err)
}
}else{
//配置过AP,并且跟目前不一致,那么切换ap
bylog.Debug("curAp=%s connect ap=%s",curAp.SSID,api.SSID)
if curAp.SSID != api.SSID{
//不是同一个,切换用户名和密码
if err:=self.ConnectAp(api.SSID,api.PassWord);err!=nil{
bylog.Error("ConnectAp err=%s",err)
}
}
}
//更新时间戳.
api.ConnStamp = time.Now().Unix()
//重启网络
bylog.Debug("WifiReload")
WifiReload()
}
}
//启动AP管理器
func (self *ApManager)Start(enHttp bool)error{
bylog.Debug("ApManager Start------")
if err:=self.Config.LoadConfig();err!=nil{
bylog.Error("LoadConfig failed %v",err)
}
//获取当前配置好了的AP
ap,err:=self.GetCurrAp()
bylog.Debug("Find current Ap = %v",ap)
if err == nil && ap!=nil{
if self.FindAp(ap.SSID) == nil{
bylog.Debug("Save Current Ap ")
//在已保存待连接WIFI列表中找不到当前AP,则加入并保存
self.Config.AddAp(ap)
self.Config.SaveApList()
}
}
if enHttp{
ApHttpInit()
}
go self.runMultiConn()
return nil
}
var apm ApManager
func DefaultApAdmin()*ApManager{
return &apm
} |
package main
import (
"io/ioutil"
"os"
"strings"
"github.com/UlisseMini/leetlog"
)
func main() {
files, err := ioutil.ReadDir(".")
if err != nil {
leetlog.Fatal(err)
}
padnum := 23
for _, file := range files {
fname := file.Name()
if !strings.HasPrefix(fname, "license_") {
continue
}
newName := fname[8:]
padding := strings.Repeat(" ", padnum-len(fname))
leetlog.Infof("%s%s --> %s", fname, padding, newName)
if err := os.Rename(fname, newName); err != nil {
leetlog.Error(err)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.