text stringlengths 11 4.05M |
|---|
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// GetReportExecutionReader is a Reader for the GetReportExecution structure.
type GetReportExecutionReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the recieved o.
func (o *GetReportExecutionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetReportExecutionOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewGetReportExecutionOK creates a GetReportExecutionOK with default headers values
func NewGetReportExecutionOK() *GetReportExecutionOK {
return &GetReportExecutionOK{}
}
/*GetReportExecutionOK handles this case with default header values.
GetReportExecutionOK get report execution o k
*/
type GetReportExecutionOK struct {
}
func (o *GetReportExecutionOK) Error() string {
return fmt.Sprintf("[GET /{executionId}][%d] getReportExecutionOK ", 200)
}
func (o *GetReportExecutionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
|
package siprocket
import (
"bytes"
"fmt"
"strings"
)
var sip_type = 0
var keep_src = true
type SipMsg struct {
Req sipReq
From sipFrom
To sipTo
Contact sipContact
Via []sipVia
Cseq sipCseq
Ua sipVal
Exp sipVal
MaxFwd sipVal
CallId sipVal
ContType sipVal
ContLen sipVal
Sdp SdpMsg
}
type SdpMsg struct {
MediaDesc sdpMediaDesc
Attrib []sdpAttrib
ConnData sdpConnData
}
type sipVal struct {
Value []byte // Sip Value
Src []byte // Full source if needed
}
// Main parsing routine, passes by value
func Parse(v []byte) (output SipMsg) {
// Allow multiple vias and media Attribs
via_idx := 0
output.Via = make([]sipVia, 0, 8)
attr_idx := 0
output.Sdp.Attrib = make([]sdpAttrib, 0, 8)
lines := bytes.Split(v, []byte("\r\n"))
for i, line := range lines {
//fmt.Println(i, string(line))
line = bytes.TrimSpace(line)
if i == 0 {
// For the first line parse the request
parseSipReq(line, &output.Req)
} else {
// For subsequent lines split in sep (: for sip, = for sdp)
spos, stype := indexSep(line)
if spos > 0 && stype == ':' {
// SIP: Break up into header and value
lhdr := strings.ToLower(string(line[0:spos]))
lval := bytes.TrimSpace(line[spos+1:])
// Switch on the line header
//fmt.Println(i, string(lhdr), string(lval))
switch {
case lhdr == "f" || lhdr == "from":
parseSipFrom(lval, &output.From)
case lhdr == "t" || lhdr == "to":
parseSipTo(lval, &output.To)
case lhdr == "m" || lhdr == "contact":
parseSipContact(lval, &output.Contact)
case lhdr == "v" || lhdr == "via":
var tmpVia sipVia
output.Via = append(output.Via, tmpVia)
parseSipVia(lval, &output.Via[via_idx])
via_idx++
case lhdr == "i" || lhdr == "call-id":
output.CallId.Value = lval
case lhdr == "c" || lhdr == "content-type":
output.ContType.Value = lval
case lhdr == "content-length":
output.ContLen.Value = lval
case lhdr == "user-agent":
output.Ua.Value = lval
case lhdr == "expires":
output.Exp.Value = lval
case lhdr == "max-forwards":
output.MaxFwd.Value = lval
case lhdr == "cseq":
parseSipCseq(lval, &output.Cseq)
} // End of Switch
}
if spos == 1 && stype == '=' {
// SDP: Break up into header and value
lhdr := strings.ToLower(string(line[0]))
lval := bytes.TrimSpace(line[2:])
// Switch on the line header
//fmt.Println(i, spos, string(lhdr), string(lval))
switch {
case lhdr == "m":
parseSdpMediaDesc(lval, &output.Sdp.MediaDesc)
case lhdr == "c":
parseSdpConnectionData(lval, &output.Sdp.ConnData)
case lhdr == "a":
var tmpAttrib sdpAttrib
output.Sdp.Attrib = append(output.Sdp.Attrib, tmpAttrib)
parseSdpAttrib(lval, &output.Sdp.Attrib[attr_idx])
attr_idx++
} // End of Switch
}
}
}
return
}
// Finds the first valid Seperate or notes its type
func indexSep(s []byte) (int, byte) {
for i := 0; i < len(s); i++ {
if s[i] == ':' {
return i, ':'
}
if s[i] == '=' {
return i, '='
}
}
return -1, ' '
}
// Get a string from a slice of bytes
// Checks the bounds to avoid any range errors
func getString(sl []byte, from, to int) string {
// Remove negative values
if from < 0 {
from = 0
}
if to < 0 {
to = 0
}
// Limit if over len
if from > len(sl) || from > to {
return ""
}
if to > len(sl) {
return string(sl[from:])
}
return string(sl[from:to])
}
// Get a slice from a slice of bytes
// Checks the bounds to avoid any range errors
func getBytes(sl []byte, from, to int) []byte {
// Remove negative values
if from < 0 {
from = 0
}
if to < 0 {
to = 0
}
// Limit if over len
if from > len(sl) || from > to {
return nil
}
if to > len(sl) {
return sl[from:]
}
return sl[from:to]
}
// Function to print all we know about the struct in a readable format
func PrintSipStruct(data *SipMsg) {
fmt.Println("-SIP --------------------------------")
fmt.Println(" [REQ]")
fmt.Println(" [UriType] =>", data.Req.UriType)
fmt.Println(" [Method] =>", string(data.Req.Method))
fmt.Println(" [StatusCode] =>", string(data.Req.StatusCode))
fmt.Println(" [User] =>", string(data.Req.User))
fmt.Println(" [Host] =>", string(data.Req.Host))
fmt.Println(" [Port] =>", string(data.Req.Port))
fmt.Println(" [UserType] =>", string(data.Req.UserType))
fmt.Println(" [Src] =>", string(data.Req.Src))
// FROM
fmt.Println(" [FROM]")
fmt.Println(" [UriType] =>", data.From.UriType)
fmt.Println(" [Name] =>", string(data.From.Name))
fmt.Println(" [User] =>", string(data.From.User))
fmt.Println(" [Host] =>", string(data.From.Host))
fmt.Println(" [Port] =>", string(data.From.Port))
fmt.Println(" [Tag] =>", string(data.From.Tag))
fmt.Println(" [Src] =>", string(data.From.Src))
// TO
fmt.Println(" [TO]")
fmt.Println(" [UriType] =>", data.To.UriType)
fmt.Println(" [Name] =>", string(data.To.Name))
fmt.Println(" [User] =>", string(data.To.User))
fmt.Println(" [Host] =>", string(data.To.Host))
fmt.Println(" [Port] =>", string(data.To.Port))
fmt.Println(" [Tag] =>", string(data.To.Tag))
fmt.Println(" [UserType] =>", string(data.To.UserType))
fmt.Println(" [Src] =>", string(data.To.Src))
// TO
fmt.Println(" [Contact]")
fmt.Println(" [UriType] =>", data.Contact.UriType)
fmt.Println(" [Name] =>", string(data.Contact.Name))
fmt.Println(" [User] =>", string(data.Contact.User))
fmt.Println(" [Host] =>", string(data.Contact.Host))
fmt.Println(" [Port] =>", string(data.Contact.Port))
fmt.Println(" [Transport] =>", string(data.Contact.Tran))
fmt.Println(" [Q] =>", string(data.Contact.Qval))
fmt.Println(" [Expires] =>", string(data.Contact.Expires))
fmt.Println(" [Src] =>", string(data.Contact.Src))
// UA
fmt.Println(" [Cseq]")
fmt.Println(" [Id] =>", string(data.Cseq.Id))
fmt.Println(" [Method] =>", string(data.Cseq.Method))
fmt.Println(" [Src] =>", string(data.Cseq.Src))
// UA
fmt.Println(" [User Agent]")
fmt.Println(" [Value] =>", string(data.Ua.Value))
fmt.Println(" [Src] =>", string(data.Ua.Src))
// Exp
fmt.Println(" [Expires]")
fmt.Println(" [Value] =>", string(data.Exp.Value))
fmt.Println(" [Src] =>", string(data.Exp.Src))
// MaxFwd
fmt.Println(" [Max Forwards]")
fmt.Println(" [Value] =>", string(data.MaxFwd.Value))
fmt.Println(" [Src] =>", string(data.MaxFwd.Src))
// CallId
fmt.Println(" [Call-ID]")
fmt.Println(" [Value] =>", string(data.CallId.Value))
fmt.Println(" [Src] =>", string(data.CallId.Src))
// Content-Type
fmt.Println(" [Content-Type]")
fmt.Println(" [Value] =>", string(data.ContType.Value))
fmt.Println(" [Src] =>", string(data.ContType.Src))
// Via - Multiple
fmt.Println(" [Via]")
for i, via := range data.Via {
fmt.Println(" [", i, "]")
fmt.Println(" [Tansport] =>", via.Trans)
fmt.Println(" [Host] =>", string(via.Host))
fmt.Println(" [Port] =>", string(via.Port))
fmt.Println(" [Branch] =>", string(via.Branch))
fmt.Println(" [Rport] =>", string(via.Rport))
fmt.Println(" [Maddr] =>", string(via.Maddr))
fmt.Println(" [ttl] =>", string(via.Ttl))
fmt.Println(" [Recevied] =>", string(via.Rcvd))
fmt.Println(" [Src] =>", string(via.Src))
}
fmt.Println("-SDP --------------------------------")
// Media Desc
fmt.Println(" [MediaDesc]")
fmt.Println(" [MediaType] =>", string(data.Sdp.MediaDesc.MediaType))
fmt.Println(" [Port] =>", string(data.Sdp.MediaDesc.Port))
fmt.Println(" [Proto] =>", string(data.Sdp.MediaDesc.Proto))
fmt.Println(" [Fmt] =>", string(data.Sdp.MediaDesc.Fmt))
fmt.Println(" [Src] =>", string(data.Sdp.MediaDesc.Src))
// Connection Data
fmt.Println(" [ConnData]")
fmt.Println(" [AddrType] =>", string(data.Sdp.ConnData.AddrType))
fmt.Println(" [ConnAddr] =>", string(data.Sdp.ConnData.ConnAddr))
fmt.Println(" [Src] =>", string(data.Sdp.ConnData.Src))
// Attribs - Multiple
fmt.Println(" [Attrib]")
for i, attr := range data.Sdp.Attrib {
fmt.Println(" [", i, "]")
fmt.Println(" [Cat] =>", string(attr.Cat))
fmt.Println(" [Val] =>", string(attr.Val))
fmt.Println(" [Src] =>", string(attr.Src))
}
fmt.Println("-------------------------------------")
}
const FIELD_NULL = 0
const FIELD_BASE = 1
const FIELD_VALUE = 2
const FIELD_NAME = 3
const FIELD_NAMEQ = 4
const FIELD_USER = 5
const FIELD_HOST = 6
const FIELD_PORT = 7
const FIELD_TAG = 8
const FIELD_ID = 9
const FIELD_METHOD = 10
const FIELD_TRAN = 11
const FIELD_BRANCH = 12
const FIELD_RPORT = 13
const FIELD_MADDR = 14
const FIELD_TTL = 15
const FIELD_REC = 16
const FIELD_EXPIRES = 17
const FIELD_Q = 18
const FIELD_USERTYPE = 19
const FIELD_STATUS = 20
const FIELD_STATUSDESC = 21
const FIELD_ADDRTYPE = 40
const FIELD_CONNADDR = 41
const FIELD_MEDIA = 42
const FIELD_PROTO = 43
const FIELD_FMT = 44
const FIELD_CAT = 45
const FIELD_IGNORE = 255
|
package taxes
import (
"fmt"
"github.com/julienschmidt/httprouter"
"github.com/mpdroog/invoiced/config"
"github.com/mpdroog/invoiced/db"
"github.com/mpdroog/invoiced/hour"
"github.com/mpdroog/invoiced/invoice"
"github.com/mpdroog/invoiced/writer"
"github.com/shopspring/decimal"
"github.com/xuri/excelize/v2"
"log"
"net/http"
"strconv"
"strings"
)
type InvoiceLine struct {
Description string
InvoiceId string
Debet string
Credit string
Total string
Issuedate string
CustomerName string
}
type Overview struct {
Sum string
Ex string
Tax string
EUEx string // TOOODOOO
Hours string
NLCompany map[string]string
EUCompany map[string]string
Invoices map[string]string
InvoiceLines map[string]InvoiceLine
}
// Create summary for accountant
func Summary(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
entity := ps.ByName("entity")
year, e := strconv.Atoi(ps.ByName("year"))
if e != nil {
log.Printf(e.Error())
http.Error(w, fmt.Sprintf("taxes.Summary failed reading year-arg"), 400)
return
}
// TODO
relationCodes := map[string]string{
"XSNews B.V.": "3",
"ITS HOSTED": "4",
"Money Factory B.V.": "5",
"Omniga GmbH & Co. KG": "6",
"NIMA": "7",
}
sum := &Overview{}
sum.NLCompany = make(map[string]string)
sum.EUCompany = make(map[string]string)
sum.Invoices = make(map[string]string)
sum.InvoiceLines = make(map[string]InvoiceLine)
sum.Hours = "0"
sum.EUEx = "0.00"
e = db.View(func(t *db.Txn) error {
// hours
paths := []string{fmt.Sprintf("%s/%d/{all}/hours", entity, year)}
h := new(hour.Hour)
_, e := t.List(paths, db.Pagination{From: 0, Count: 0}, h, func(filename, filepath, path string) error {
hours := "0.00"
var e error
for n := 0; n < len(h.Lines); n++ {
raw := strconv.FormatFloat(h.Lines[n].Hours, 'f', 0, 64)
hours, e = addValue(hours, raw, 0)
if e != nil {
return e
}
}
if config.Verbose {
log.Printf("hours=%s", hours)
}
sum.Hours, e = addValue(sum.Hours, hours, 0)
if e != nil {
return e
}
h.Lines = nil
return nil
})
if e != nil {
return e
}
// invoice
paths = []string{
fmt.Sprintf("%s/%d/{all}/sales-invoices-paid", entity, year),
fmt.Sprintf("%s/%d/{all}/sales-invoices-unpaid", entity, year),
}
u := new(invoice.Invoice)
_, e = t.List(paths, db.Pagination{From: 0, Count: 0}, &u, func(filename, filepath, path string) error {
var e error
if config.Verbose {
log.Printf("Invoice(%s) total=%s ex=%s", u.Meta.Invoiceid, u.Total.Total, u.Total.Ex)
}
sum.Sum, e = addValue(sum.Sum, u.Total.Total, 2)
if e != nil {
return e
}
idname := u.Customer.Name + "-" + u.Customer.Vat
sum.Invoices[u.Meta.Invoiceid] = u.Total.Total
if strings.Contains(u.Notes, "VAT Reverse charge") {
sum.EUEx, e = addValue(sum.EUEx, u.Total.Ex, 2)
custvat, ok := sum.EUCompany[idname]
if !ok {
custvat = "0.00"
}
sum.EUCompany[idname], e = addValue(custvat, u.Total.Total, 2)
} else {
sum.Ex, e = addValue(sum.Ex, u.Total.Ex, 2)
custvat, ok := sum.NLCompany[idname]
if !ok {
custvat = "0.00"
}
sum.NLCompany[idname], e = addValue(custvat, u.Total.Total, 2)
}
if e != nil {
return e
}
// lines
for idx, line := range u.Lines {
// 4647,75/100*21
extotal, e := decimal.NewFromString(line.Total)
if e != nil {
return e
}
tax := decimal.NewFromFloat(0)
// TODO: if debtor.TAX == "NL21" {
tax = extotal.Div(decimal.NewFromFloat(100)).Mul(decimal.NewFromFloat(21))
total := extotal.Add(tax)
sum.InvoiceLines[fmt.Sprintf("%s-%d", u.Meta.Invoiceid, idx)] = InvoiceLine{
Description: line.Description,
InvoiceId: u.Meta.Invoiceid,
Debet: line.Total,
Credit: tax.StringFixed(2),
Total: total.StringFixed(2),
Issuedate: u.Meta.Issuedate,
CustomerName: u.Customer.Name,
}
}
sum.Tax, e = addValue(sum.Tax, u.Total.Tax, 2)
return e
})
return e
})
if e != nil {
panic(e)
}
isExcel := false
if accept := r.Header.Get("Accept"); accept == "application/vnd.ms-excel" {
isExcel = true
}
if r.URL.Query().Get("excel") != "" {
isExcel = true
}
if isExcel {
// Return Excel-sheet for accountant
f := excelize.NewFile()
// overview sheet
{
sheet := "Sheet1"
f.SetCellValue(sheet, "A1", "Revenue")
f.SetCellValue(sheet, "B1", "RevenueExTax")
f.SetCellValue(sheet, "C1", "Tax")
f.SetCellValue(sheet, "D1", "Hours")
//
f.SetCellValue(sheet, "A2", sum.Sum)
f.SetCellValue(sheet, "B2", sum.Ex)
f.SetCellValue(sheet, "C2", sum.Tax)
f.SetCellValue(sheet, "D2", sum.Hours)
}
{
sheet := "Companies"
f.NewSheet(sheet)
f.SetCellValue(sheet, "A1", "Company-VAT")
f.SetCellValue(sheet, "B1", "Revenue")
pos := 1
for idname, total := range sum.EUCompany {
pos++
f.SetCellValue(sheet, fmt.Sprintf("A%d", pos), idname)
f.SetCellValue(sheet, fmt.Sprintf("B%d", pos), total)
}
for idname, total := range sum.NLCompany {
pos++
f.SetCellValue(sheet, fmt.Sprintf("A%d", pos), idname)
f.SetCellValue(sheet, fmt.Sprintf("B%d", pos), total)
}
}
{
sheet := "Invoices"
f.NewSheet(sheet)
f.SetCellValue(sheet, "A1", "InvoiceID")
f.SetCellValue(sheet, "B1", "AccountingID")
f.SetCellValue(sheet, "C1", "Revenue")
pos := 1
for id, total := range sum.Invoices {
pos++
acctId := fmt.Sprintf("%d%s", year, strings.Split(id, "-")[1])
f.SetCellValue(sheet, fmt.Sprintf("A%d", pos), id)
f.SetCellValue(sheet, fmt.Sprintf("B%d", pos), acctId)
f.SetCellValue(sheet, fmt.Sprintf("C%d", pos), total)
}
}
{
sheet := "AccountingSales"
f.NewSheet(sheet)
f.SetCellValue(sheet, "A1", "fldDagboek")
f.SetCellValue(sheet, "B1", "fldBoekingcode")
f.SetCellValue(sheet, "C1", "Datum")
f.SetCellValue(sheet, "D1", "Grootboeknummer")
f.SetCellValue(sheet, "E1", "Debet")
f.SetCellValue(sheet, "F1", "Credit")
f.SetCellValue(sheet, "G1", "ImportBoekingID")
f.SetCellValue(sheet, "H1", "Volgnummer")
f.SetCellValue(sheet, "I1", "Boekstuk")
f.SetCellValue(sheet, "J1", "Omschrijving")
f.SetCellValue(sheet, "K1", "Relatiecode")
f.SetCellValue(sheet, "L1", "Factuurnummer")
f.SetCellValue(sheet, "M1", "Kostenplaatsnummer")
pos := 1
for _, line := range sum.InvoiceLines {
// "2021Q1-0152"
acctId := fmt.Sprintf("%d%s", year, strings.Split(line.InvoiceId, "-")[1])
// TODO: hardcoded ledgers
for _, ledger := range []string{"1300", "1671", "8000"} {
pos++
f.SetCellValue(sheet, fmt.Sprintf("A%d", pos), "1300")
f.SetCellValue(sheet, fmt.Sprintf("B%d", pos), acctId)
f.SetCellValue(sheet, fmt.Sprintf("C%d", pos), line.Issuedate)
f.SetCellValue(sheet, fmt.Sprintf("D%d", pos), ledger)
debet := ""
if ledger == "1300" {
debet = line.Total
}
f.SetCellValue(sheet, fmt.Sprintf("E%d", pos), debet)
credit := ""
if ledger == "1671" {
credit = line.Credit
} else if ledger == "8000" {
credit = line.Debet
}
f.SetCellValue(sheet, fmt.Sprintf("F%d", pos), credit)
f.SetCellValue(sheet, fmt.Sprintf("G%d", pos), "")
f.SetCellValue(sheet, fmt.Sprintf("H%d", pos), "")
f.SetCellValue(sheet, fmt.Sprintf("I%d", pos), acctId)
f.SetCellValue(sheet, fmt.Sprintf("J%d", pos), line.Description)
debtorCode := "0"
if val, ok := relationCodes[line.CustomerName]; ok {
debtorCode = val
}
f.SetCellValue(sheet, fmt.Sprintf("K%d", pos), debtorCode)
f.SetCellValue(sheet, fmt.Sprintf("L%d", pos), line.InvoiceId)
f.SetCellValue(sheet, fmt.Sprintf("M%d", pos), "")
}
}
}
{
sheet := "AccountingPurchases"
f.NewSheet(sheet)
// TODO: Read from Voorbelasting.txt?
}
fname := fmt.Sprintf("%s-%d.xlsx", entity, year)
w.Header().Set("Content-Type", "application/vnd.ms-excel")
w.Header().Set("Content-Disposition", "attachment; filename="+fname)
if _, e := f.WriteTo(w); e != nil {
log.Printf("summary.excel.WriteTo " + e.Error())
}
return
}
if e := writer.Encode(w, r, sum); e != nil {
log.Printf("summary.Summary " + e.Error())
}
}
|
package main
import (
"testing"
)
// Test configuration loading and parsing
// using the default config
func TestLoadConfigs(t *testing.T) {
config, err := loadConfig("../etc/alicelg/alice.conf")
if err != nil {
t.Error("Could not load test config:", err)
}
if config.Server.Listen == "" {
t.Error("Listen string not present.")
}
if len(config.Ui.RoutesColumns) == 0 {
t.Error("Route columns settings missing")
}
if len(config.Ui.RoutesRejections.Reasons) == 0 {
t.Error("Rejection reasons missing")
}
}
|
package history
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"reflect"
"sync"
"time"
"github.com/oklog/ulid/v2"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/jinzhu/copier"
)
const (
pluginName = "gorm-history"
createCbName = pluginName + ":after_create"
updateCbName = pluginName + ":after_update"
disabledOptionKey disabledOptionCtxKey = pluginName + ":disabled"
)
var (
_ gorm.Plugin = (*Plugin)(nil)
ErrUnsupportedOperation = errors.New("history is not supported for this operation")
)
type (
disabledOptionCtxKey string
VersionFunc func(ctx *Context) (Version, error)
CopyFunc func(r Recordable, h interface{}) error
callback func(db *gorm.DB)
Context struct {
object Recordable
history History
objectID interface{}
action Action
db *gorm.DB
}
Option struct{}
Config struct {
VersionFunc VersionFunc
CopyFunc CopyFunc
}
ConfigFunc func(c *Config)
ULIDVersion struct {
entropy io.Reader
mu sync.Mutex
}
IsZeroer interface {
IsZero() bool
}
primaryKeyField struct {
name string
value interface{}
isZero bool
}
Plugin struct {
versionFunc VersionFunc
copyFunc CopyFunc
createCb callback
updateCb callback
}
)
func New(configFuncs ...ConfigFunc) *Plugin {
version := NewULIDVersion()
cfg := &Config{
VersionFunc: version.Version,
CopyFunc: DefaultCopyFunc,
}
for _, f := range configFuncs {
f(cfg)
}
p := Plugin{
versionFunc: cfg.VersionFunc,
copyFunc: cfg.CopyFunc,
}
return &p
}
func WithVersionFunc(fn VersionFunc) ConfigFunc {
return func(c *Config) {
c.VersionFunc = fn
}
}
func WithCopyFunc(fn CopyFunc) ConfigFunc {
return func(c *Config) {
c.CopyFunc = fn
}
}
func NewULIDVersion() *ULIDVersion {
entropy := ulid.Monotonic(rand.New(rand.NewSource(time.Now().UnixNano())), 0)
return &ULIDVersion{entropy: entropy}
}
func Disable(db *gorm.DB) *gorm.DB {
ctx := context.WithValue(db.Statement.Context, disabledOptionKey, true)
return db.WithContext(ctx).Set(string(disabledOptionKey), true)
}
func IsDisabled(db *gorm.DB) bool {
_, ok := db.Get(string(disabledOptionKey))
if ok {
return true
}
return db.Statement.Context.Value(disabledOptionKey) != nil
}
func (p *Plugin) Name() string {
return pluginName
}
func (p *Plugin) Initialize(db *gorm.DB) error {
p.createCb = p.callback(ActionCreate)
p.updateCb = p.callback(ActionUpdate)
err := db.
Callback().
Create().
After("gorm:create").
Register(createCbName, p.createCb)
if err != nil {
return err
}
return db.
Callback().
Update().
After("gorm:update").
Register(updateCbName, p.updateCb)
}
func (p Plugin) callback(action Action) func(db *gorm.DB) {
return func(db *gorm.DB) {
if db.Statement.Schema == nil {
return
}
if IsDisabled(db) {
return
}
v := db.Statement.ReflectValue
switch v.Kind() {
case reflect.Struct:
h, isRecordable, err := p.processStruct(v, action, db)
if err != nil {
db.AddError(err)
return
}
if !isRecordable {
return
}
if err := p.saveHistory(db, h); err != nil {
db.AddError(err)
return
}
case reflect.Slice:
hs, err := p.processSlice(v, action, db)
if err != nil {
db.AddError(err)
return
}
if len(hs) == 0 {
return
}
if err := p.saveHistory(db, hs...); err != nil {
db.AddError(err)
return
}
}
}
}
func (p *Plugin) saveHistory(db *gorm.DB, hs ...History) error {
if len(hs) == 0 {
return nil
}
db = db.Session(&gorm.Session{
NewDB: true,
})
for _, h := range hs {
if err := db.Omit(clause.Associations).Create(h).Error; err != nil {
return err
}
}
return nil
}
func (p *Plugin) processStruct(v reflect.Value, action Action, db *gorm.DB) (History, bool, error) {
vi := v.Interface()
r, ok := vi.(Recordable)
if !ok {
return nil, false, nil
}
pk, err := getPrimaryKeyValue(db, v)
if err != nil {
return nil, false, err
}
if pk.isZero {
return nil, false, fmt.Errorf("not able to determine record primary key value: %w", ErrUnsupportedOperation)
}
h, err := p.newHistory(r, action, db, pk)
if err != nil {
return nil, true, err
}
return h, true, nil
}
func (p *Plugin) processSlice(v reflect.Value, action Action, db *gorm.DB) ([]History, error) {
var hs []History
for i := 0; i < v.Len(); i++ {
el := v.Index(i)
h, isRecordable, err := p.processStruct(el, action, db)
if err != nil {
return nil, err
}
if !isRecordable {
continue
}
hs = append(hs, h)
}
return hs, nil
}
func (p *Plugin) newHistory(r Recordable, action Action, db *gorm.DB, pk *primaryKeyField) (History, error) {
hist := r.CreateHistory()
ihist := makePtr(hist)
if err := p.copyFunc(r, ihist); err != nil {
return nil, err
}
if err := unsetStructField(hist, pk.name); err != nil {
return nil, err
}
if err := db.Statement.Parse(hist); err != nil {
return nil, err
}
ctx := &Context{
object: r,
objectID: pk.value,
history: hist.(History),
action: action,
db: db,
}
version, err := p.versionFunc(ctx)
if err != nil {
return nil, fmt.Errorf("error generating history version: %w", err)
}
hist.SetHistoryAction(action)
hist.SetHistoryVersion(version)
hist.SetHistoryObjectID(pk.value)
if th, ok := hist.(TimestampableHistory); ok {
th.SetHistoryCreatedAt(db.NowFunc())
}
if bh, ok := hist.(BlameableHistory); ok {
if user, ok := GetUser(db); ok {
bh.SetHistoryUserID(user.ID)
bh.SetHistoryUserEmail(user.Email)
}
}
if bh, ok := hist.(SourceableHistory); ok {
if source, ok := GetSource(db); ok {
bh.SetHistorySourceID(source.ID)
bh.SetHistorySourceType(source.Type)
}
}
return hist.(History), nil
}
func (c *Context) Object() Recordable {
return c.object
}
func (c *Context) History() History {
return c.history
}
func (c *Context) ObjectID() interface{} {
return c.objectID
}
func (c *Context) Action() Action {
return c.action
}
func (c *Context) DB() *gorm.DB {
return c.db
}
func (v *ULIDVersion) Version(ctx *Context) (Version, error) {
if ctx.Action() == ActionCreate {
return "", nil
}
v.mu.Lock()
defer v.mu.Unlock()
uid, err := ulid.New(ulid.Timestamp(time.Now()), v.entropy)
if err != nil {
return "", err
}
return Version(uid.String()), nil
}
func DefaultCopyFunc(r Recordable, h interface{}) error {
if reflect.ValueOf(h).Kind() != reflect.Ptr {
return fmt.Errorf("pointer expected but got %T", h)
}
return copier.Copy(h, r)
}
|
package pod_mutator
import (
"testing"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects/address"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
func TestCreateInstallInitContainerBase(t *testing.T) {
t.Run("should create the init container with set container sec ctx but without user and group", func(t *testing.T) {
dynakube := getTestDynakube()
pod := getTestPod()
pod.Spec.Containers[0].SecurityContext.RunAsUser = nil
pod.Spec.Containers[0].SecurityContext.RunAsGroup = nil
webhookImage := "test-image"
clusterID := "id"
initContainer := createInstallInitContainerBase(webhookImage, clusterID, pod, *dynakube)
require.NotNil(t, initContainer)
assert.Equal(t, initContainer.Image, webhookImage)
assert.Equal(t, initContainer.Resources, testResourceRequirements)
require.NotNil(t, initContainer.SecurityContext.AllowPrivilegeEscalation)
assert.False(t, *initContainer.SecurityContext.AllowPrivilegeEscalation)
require.NotNil(t, initContainer.SecurityContext.Privileged)
assert.False(t, *initContainer.SecurityContext.Privileged)
require.NotNil(t, initContainer.SecurityContext.ReadOnlyRootFilesystem)
assert.True(t, *initContainer.SecurityContext.ReadOnlyRootFilesystem)
require.NotNil(t, initContainer.SecurityContext.RunAsNonRoot)
assert.True(t, *initContainer.SecurityContext.RunAsNonRoot)
require.NotNil(t, initContainer.SecurityContext.RunAsUser)
assert.Equal(t, *initContainer.SecurityContext.RunAsUser, defaultUser)
require.NotNil(t, initContainer.SecurityContext.RunAsGroup)
assert.Equal(t, *initContainer.SecurityContext.RunAsGroup, defaultGroup)
})
t.Run("should overwrite partially", func(t *testing.T) {
dynakube := getTestDynakube()
pod := getTestPod()
testUser := address.Of(int64(420))
pod.Spec.Containers[0].SecurityContext.RunAsUser = nil
pod.Spec.Containers[0].SecurityContext.RunAsGroup = testUser
webhookImage := "test-image"
clusterID := "id"
initContainer := createInstallInitContainerBase(webhookImage, clusterID, pod, *dynakube)
require.NotNil(t, initContainer.SecurityContext.RunAsNonRoot)
assert.True(t, *initContainer.SecurityContext.RunAsNonRoot)
require.NotNil(t, *initContainer.SecurityContext.RunAsUser)
assert.Equal(t, *initContainer.SecurityContext.RunAsUser, defaultUser)
require.NotNil(t, *initContainer.SecurityContext.RunAsGroup)
assert.Equal(t, *initContainer.SecurityContext.RunAsGroup, *testUser)
})
t.Run("container SecurityContext overrules defaults", func(t *testing.T) {
dynakube := getTestDynakube()
pod := getTestPod()
overruledUser := address.Of(int64(420))
testUser := address.Of(int64(420))
pod.Spec.SecurityContext = &corev1.PodSecurityContext{}
pod.Spec.SecurityContext.RunAsUser = overruledUser
pod.Spec.SecurityContext.RunAsGroup = overruledUser
pod.Spec.Containers[0].SecurityContext.RunAsUser = testUser
pod.Spec.Containers[0].SecurityContext.RunAsGroup = testUser
webhookImage := "test-image"
clusterID := "id"
initContainer := createInstallInitContainerBase(webhookImage, clusterID, pod, *dynakube)
require.NotNil(t, initContainer.SecurityContext.RunAsNonRoot)
assert.True(t, *initContainer.SecurityContext.RunAsNonRoot)
require.NotNil(t, *initContainer.SecurityContext.RunAsUser)
assert.Equal(t, *initContainer.SecurityContext.RunAsUser, *testUser)
require.NotNil(t, *initContainer.SecurityContext.RunAsGroup)
assert.Equal(t, *initContainer.SecurityContext.RunAsGroup, *testUser)
})
t.Run("PodSecurityContext overrules defaults", func(t *testing.T) {
dynakube := getTestDynakube()
testUser := address.Of(int64(420))
pod := getTestPod()
pod.Spec.Containers[0].SecurityContext = nil
pod.Spec.SecurityContext = &corev1.PodSecurityContext{}
pod.Spec.SecurityContext.RunAsUser = testUser
pod.Spec.SecurityContext.RunAsGroup = testUser
webhookImage := "test-image"
clusterID := "id"
initContainer := createInstallInitContainerBase(webhookImage, clusterID, pod, *dynakube)
require.NotNil(t, initContainer.SecurityContext.RunAsNonRoot)
assert.True(t, *initContainer.SecurityContext.RunAsNonRoot)
require.NotNil(t, initContainer.SecurityContext.RunAsUser)
assert.Equal(t, *testUser, *initContainer.SecurityContext.RunAsUser)
require.NotNil(t, initContainer.SecurityContext.RunAsGroup)
assert.Equal(t, *testUser, *initContainer.SecurityContext.RunAsGroup)
})
t.Run("should not set RunAsNonRoot if root user is used", func(t *testing.T) {
dynakube := getTestDynakube()
pod := getTestPod()
pod.Spec.Containers[0].SecurityContext.RunAsUser = address.Of(rootUserGroup)
pod.Spec.Containers[0].SecurityContext.RunAsGroup = address.Of(rootUserGroup)
webhookImage := "test-image"
clusterID := "id"
initContainer := createInstallInitContainerBase(webhookImage, clusterID, pod, *dynakube)
assert.Nil(t, initContainer.SecurityContext.RunAsNonRoot)
require.NotNil(t, *initContainer.SecurityContext.RunAsUser)
assert.Equal(t, *initContainer.SecurityContext.RunAsUser, rootUserGroup)
require.NotNil(t, *initContainer.SecurityContext.RunAsGroup)
assert.Equal(t, *initContainer.SecurityContext.RunAsGroup, rootUserGroup)
})
}
|
package main
import (
"errors"
"fmt"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/node/bindnode"
)
const (
UndefTime = Time(0)
)
type Status int64
type Time int64
type PopTask struct {
Status Status
WorkedBy string
Tag []string
}
type RetrievalTask struct {
Miner string
PayloadCID string
CARExport bool
Schedule *string
ScheduleLimit *string
Tag *string
MaxPriceAttoFIL *int
}
type AuthenticatedRecord struct {
Record ipld.Link
Signature []byte
}
type RecordUpdate struct {
Records []*AuthenticatedRecord
SigPrev []byte
Previous *ipld.Link
}
type StorageTask struct {
Miner string
MaxPriceAttoFIL int64
Size int64
StartOffset int64
FastRetrieval bool
Verified bool
Schedule *string
ScheduleLimit *string
Tag *string
RetrievalSchedule *string
RetrievalScheduleLimit *string
RetrievalMaxPriceAttoFIL *int64
}
type Logs struct {
Log string
UpdatedAt Time
}
type StageDetails struct {
Description string
ExpectedDuration string
Logs []*Logs
UpdatedAt Time
}
type UpdateTask struct {
Status Status
ErrorMessage string
Stage string
CurrentStageDetails *StageDetails
WorkedBy string
RunCount int64
}
type StageDetailsList []*StageDetails
type Task struct {
UUID string
Status Status
WorkedBy string
Stage string
CurrentStageDetails *StageDetails
PastStageDetails StageDetailsList
StartedAt Time
RunCount int64
ErrorMessage string
RetrievalTask *RetrievalTask
StorageTask *StorageTask
}
type Tasks []Task
type FinishedTask struct {
Status Status
StartedAt Time
ErrorMessage *string
RetrievalTask *RetrievalTask
StorageTask *StorageTask
DealID int64
MinerMultiAddr string
ClientApparentAddr string
MinerLatencyMS *int64
TimeToFirstByteMS *int64
TimeToLastByteMS *int64
Events ipld.Link
MinerVersion *string
ClientVersion *string
Size *int64
PayloadCID *string
ProposalCID *string
DealIDString *string
MinerPeerID *string
}
type FinishedTasks []FinishedTask
func (t *Task) ToNode() (n ipld.Node, err error) {
// TODO: remove the panic recovery once IPLD bindnode is stabilized.
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&t, TaskPrototype.Type()).Representation()
return
}
// UnwrapTask unwraps the given node as a Task.
//
// Note that the node is reassigned to TaskPrototype if its prototype is different.
// Therefore, it is recommended to load the node using the correct prototype initially
// function to avoid unnecessary node assignment.
func UnwrapTask(node ipld.Node) (*Task, error) {
// When an IPLD node is loaded using `Prototype.Any` unwrap with bindnode will not work.
// Here we defensively check the prototype and wrap if needed, since:
// - linksystem in sti is passed into other libraries, like go-legs, and
// - for whatever reason clients of this package may load nodes using Prototype.Any.
//
// The code in this repo, however should load nodes with appropriate prototype and never trigger
// this if statement.
if node.Prototype() != TaskPrototype {
tsBuilder := TaskPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*Task)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (f *FinishedTask) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&f, FinishedTaskPrototype.Type()).Representation()
return
}
func UnwrapFinishedTask(node ipld.Node) (*FinishedTask, error) {
if node.Prototype() != FinishedTaskPrototype {
tsBuilder := FinishedTaskPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*FinishedTask)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (s *StorageTask) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&s, StorageTaskPrototype.Type()).Representation()
return
}
func UnwrapStorageTask(node ipld.Node) (*StorageTask, error) {
if node.Prototype() != StorageTaskPrototype {
tsBuilder := StorageTaskPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*StorageTask)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (u *UpdateTask) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&u, UpdatedTaskPrototype.Type()).Representation()
return
}
func UnwrapUpdateTask(node ipld.Node) (*UpdateTask, error) {
if node.Prototype() != UpdatedTaskPrototype {
tsBuilder := UpdatedTaskPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*UpdateTask)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (r *RetrievalTask) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&r, RetrievalTaskPrototype.Type()).Representation()
return
}
func UnwrapRetrievalTask(node ipld.Node) (*RetrievalTask, error) {
if node.Prototype() != RetrievalTaskPrototype {
tsBuilder := RetrievalTaskPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*RetrievalTask)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func NewTasks(ts []*Task) *Tasks {
t := Tasks{}
for _, c := range ts {
t = append(t, *c)
}
return &t
}
func (ts *Tasks) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&ts, TasksPrototype.Type()).Representation()
return
}
func UnwrapTasks(node ipld.Node) (*Tasks, error) {
if node.Prototype() != TasksPrototype {
tsBuilder := TasksPrototype.NewBuilder()
err := tsBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = tsBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*Tasks)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (pt *PopTask) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&pt, PopTaskPrototype.Type()).Representation()
return
}
func UnwrapPopTask(node ipld.Node) (*PopTask, error) {
if node.Prototype() != PopTaskPrototype {
ptBuilder := PopTaskPrototype.NewBuilder()
err := ptBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = ptBuilder.Build()
}
t, ok := bindnode.Unwrap(node).(*PopTask)
if !ok || t == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return t, nil
}
func (sdl *StageDetailsList) ToNode() (n ipld.Node, err error) {
defer func() {
if r := recover(); r != nil {
err = toError(r)
}
}()
n = bindnode.Wrap(&sdl, StageDetailsListPrototype.Type()).Representation()
return
}
func UnwrapStageDetailsList(node ipld.Node) (*StageDetailsList, error) {
if node.Prototype() != StageDetailsListPrototype {
sdlBuilder := StageDetailsListPrototype.NewBuilder()
err := sdlBuilder.AssignNode(node)
if err != nil {
return nil, fmt.Errorf("faild to convert node prototype: %w", err)
}
node = sdlBuilder.Build()
}
s, ok := bindnode.Unwrap(node).(*StageDetailsList)
if !ok || s == nil {
return nil, fmt.Errorf("unwrapped node does not match schema.Task")
}
return s, nil
}
func toError(r interface{}) error {
switch x := r.(type) {
case string:
return errors.New(x)
case error:
return x
default:
return fmt.Errorf("unknown panic: %v", r)
}
}
|
// +build !linux
package kernel
import (
"github.com/lavaorg/telex"
"github.com/lavaorg/telex/plugins/inputs"
)
type Kernel struct {
}
func (k *Kernel) Description() string {
return "Get kernel statistics from /proc/stat"
}
func (k *Kernel) SampleConfig() string { return "" }
func (k *Kernel) Gather(acc telex.Accumulator) error {
return nil
}
func init() {
inputs.Add("kernel", func() telex.Input {
return &Kernel{}
})
}
|
package cmd
import(
"rediproxy/cmd/strings"
)
type Cmd struct{
strings.Strings
}
|
package bd
import (
"github.com/Estiven9644/twittor-backend/models"
"golang.org/x/crypto/bcrypt"
)
func IntentoLogin(email, password string) (models.Usuario, bool) {
usu, encontrado, _ := ChequeoYaExisteUsuario(email)
if !encontrado { // si esto es igual a false entonces returne así
return usu, false
}
passwordBytes := []byte(password) // está no viene encriptada
passwordBD := []byte(usu.Password) // está ya viene encryptada de la base de datos
err := bcrypt.CompareHashAndPassword(passwordBD, passwordBytes)
if err != nil {
return usu, false
}
return usu, true
}
|
package cmd
import (
"errors"
"github.com/geospace/sac"
"github.com/spf13/cobra"
)
func createReplacesCmd(sacJSON *sac.Sac) *cobra.Command {
cmd := &cobra.Command{
Use: "replaces",
Short: "Indicate which module you want to replace",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("replaces requires at least one module to replace")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if err := sacJSON.ReadConfig("package.json"); err != nil {
return errors.New(errPackageJSONNeeded)
}
sacJSON.Set("dessert.replaces", args)
Logger.Infof("replacing %v", args)
return sacJSON.WriteConfig()
},
}
return cmd
}
|
package main
import (
"fmt"
)
func main(){
s:="hello"
//将字符串转换为 []byte 类型
c:=[]byte(s)
c[0]='c'
fmt.Println(c) // [104 101 108 108 111]
// 再转换回string 类型
s2:=string(c)
fmt.Printf("%s\n",s2) // cello
} |
// 公众号服务列表
// 1. 获取已托管的公众号列表
// 2. 获取公众号基本信息
package controllers
import (
"github.com/1046102779/common/utils"
. "github.com/1046102779/official_account/logger"
"github.com/1046102779/official_account/models"
"github.com/astaxie/beego"
"github.com/pkg/errors"
)
// OfficialAccountsController oprations for OfficialAccounts
type OfficialAccountsController struct {
beego.Controller
}
// 1. 获取已托管的公众号列表
// @router /official_accounts [GET]
func (t *OfficialAccountsController) GetOfficialAccounts() {
companyId, retcode, err := utils.GetCompanyIdFromHeader(t.Ctx.Request)
if err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": retcode,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
officialAccounts, retcode, err := models.GetOfficialAccounts(companyId)
if err != nil {
Logger.Error(err.Error())
t.Data["json"] = map[string]interface{}{
"err_code": retcode,
"err_msg": errors.Cause(err).Error(),
}
t.ServeJSON()
return
}
t.Data["json"] = map[string]interface{}{
"err_code": 0,
"err_msg": "",
"official_accounts": officialAccounts,
}
t.ServeJSON()
return
}
|
package router
import (
"fmt"
"github.com/futurehomeno/fimpgo"
"github.com/futurehomeno/fimpgo/edgeapp"
log "github.com/sirupsen/logrus"
"github.com/thingsplex/thingsplex_service_template/model"
"path/filepath"
"strings"
)
type FromFimpRouter struct {
inboundMsgCh fimpgo.MessageCh
mqt *fimpgo.MqttTransport
instanceId string
appLifecycle *edgeapp.Lifecycle
configs *model.Configs
}
func NewFromFimpRouter(mqt *fimpgo.MqttTransport, appLifecycle *edgeapp.Lifecycle, configs *model.Configs) *FromFimpRouter {
fc := FromFimpRouter{inboundMsgCh: make(fimpgo.MessageCh, 5), mqt: mqt, appLifecycle: appLifecycle, configs: configs}
fc.mqt.RegisterChannel("ch1", fc.inboundMsgCh)
return &fc
}
func (fc *FromFimpRouter) Start() {
// TODO: Choose either adapter or app topic
// ------ Adapter topics ---------------------------------------------
fc.mqt.Subscribe(fmt.Sprintf("pt:j1/+/rt:dev/rn:%s/ad:1/#", model.ServiceName))
fc.mqt.Subscribe(fmt.Sprintf("pt:j1/+/rt:ad/rn:%s/ad:1", model.ServiceName))
// ------ Application topic -------------------------------------------
//fc.mqt.Subscribe(fmt.Sprintf("pt:j1/+/rt:app/rn:%s/ad:1",model.ServiceName))
go func(msgChan fimpgo.MessageCh) {
for {
select {
case newMsg := <-msgChan:
fc.routeFimpMessage(newMsg)
}
}
}(fc.inboundMsgCh)
}
func (fc *FromFimpRouter) routeFimpMessage(newMsg *fimpgo.Message) {
log.Debug("New fimp msg")
addr := strings.Replace(newMsg.Addr.ServiceAddress, "_0", "", 1)
switch newMsg.Payload.Service {
case "out_lvl_switch":
addr = strings.Replace(addr, "l", "", 1)
switch newMsg.Payload.Type {
case "cmd.binary.set":
// TODO: This is example . Add your logic here or remove
case "cmd.lvl.set":
// TODO: This is an example . Add your logic here or remove
}
case "out_bin_switch":
log.Debug("Sending switch")
// TODO: This is an example . Add your logic here or remove
case model.ServiceName:
adr := &fimpgo.Address{MsgType: fimpgo.MsgTypeEvt, ResourceType: fimpgo.ResourceTypeAdapter, ResourceName: model.ServiceName, ResourceAddress: "1"}
switch newMsg.Payload.Type {
case "cmd.auth.login":
authReq := model.Login{}
err := newMsg.Payload.GetObjectValue(&authReq)
if err != nil {
log.Error("Incorrect login message ")
return
}
status := model.AuthStatus{
Status: edgeapp.AuthStateAuthenticated,
ErrorText: "",
ErrorCode: "",
}
if authReq.Username != "" && authReq.Password != "" {
// TODO: This is an example . Add your logic here or remove
} else {
status.Status = "ERROR"
status.ErrorText = "Empty username or password"
}
fc.appLifecycle.SetAuthState(edgeapp.AuthStateAuthenticated)
msg := fimpgo.NewMessage("evt.auth.status_report", model.ServiceName, fimpgo.VTypeObject, status, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
// if response topic is not set , sending back to default application event topic
fc.mqt.Publish(adr, msg)
}
case "cmd.auth.set_tokens":
authReq := model.SetTokens{}
err := newMsg.Payload.GetObjectValue(&authReq)
if err != nil {
log.Error("Incorrect login message ")
return
}
status := model.AuthStatus{
Status: edgeapp.AuthStateAuthenticated,
ErrorText: "",
ErrorCode: "",
}
if authReq.AccessToken != "" && authReq.RefreshToken != "" {
// TODO: This is an example . Add your logic here or remove
} else {
status.Status = "ERROR"
status.ErrorText = "Empty username or password"
}
fc.appLifecycle.SetAuthState(edgeapp.AuthStateAuthenticated)
msg := fimpgo.NewMessage("evt.auth.status_report", model.ServiceName, fimpgo.VTypeObject, status, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
// if response topic is not set , sending back to default application event topic
fc.mqt.Publish(adr, msg)
}
case "cmd.app.get_manifest":
mode, err := newMsg.Payload.GetStringValue()
if err != nil {
log.Error("Incorrect request format ")
return
}
manifest := edgeapp.NewManifest()
err = manifest.LoadFromFile(filepath.Join(fc.configs.GetDefaultDir(), "app-manifest.json"))
if err != nil {
log.Error("Failed to load manifest file .Error :", err.Error())
return
}
if mode == "manifest_state" {
manifest.AppState = *fc.appLifecycle.GetAllStates()
manifest.ConfigState = fc.configs
}
msg := fimpgo.NewMessage("evt.app.manifest_report", model.ServiceName, fimpgo.VTypeObject, manifest, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
// if response topic is not set , sending back to default application event topic
fc.mqt.Publish(adr, msg)
}
case "cmd.app.get_state":
msg := fimpgo.NewMessage("evt.app.manifest_report", model.ServiceName, fimpgo.VTypeObject, fc.appLifecycle.GetAllStates(), nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
// if response topic is not set , sending back to default application event topic
fc.mqt.Publish(adr, msg)
}
case "cmd.config.get_extended_report":
msg := fimpgo.NewMessage("evt.config.extended_report", model.ServiceName, fimpgo.VTypeObject, fc.configs, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
fc.mqt.Publish(adr, msg)
}
case "cmd.config.extended_set":
conf := model.Configs{}
err := newMsg.Payload.GetObjectValue(&conf)
if err != nil {
// TODO: This is an example . Add your logic here or remove
log.Error("Can't parse configuration object")
return
}
fc.configs.Param1 = conf.Param1
fc.configs.Param2 = conf.Param2
fc.configs.SaveToFile()
log.Debugf("App reconfigured . New parameters : %v", fc.configs)
// TODO: This is an example . Add your logic here or remove
configReport := model.ConfigReport{
OpStatus: "ok",
AppState: *fc.appLifecycle.GetAllStates(),
}
msg := fimpgo.NewMessage("evt.app.config_report", model.ServiceName, fimpgo.VTypeObject, configReport, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
fc.mqt.Publish(adr, msg)
}
case "cmd.log.set_level":
// Configure log level
level, err := newMsg.Payload.GetStringValue()
if err != nil {
return
}
logLevel, err := log.ParseLevel(level)
if err == nil {
log.SetLevel(logLevel)
fc.configs.LogLevel = level
fc.configs.SaveToFile()
}
log.Info("Log level updated to = ", logLevel)
case "cmd.system.reconnect":
// This is optional operation.
//val := map[string]string{"status":status,"error":errStr}
val := edgeapp.ButtonActionResponse{
Operation: "cmd.system.reconnect",
OperationStatus: "ok",
Next: "config",
ErrorCode: "",
ErrorText: "",
}
msg := fimpgo.NewMessage("evt.app.config_action_report", model.ServiceName, fimpgo.VTypeObject, val, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
fc.mqt.Publish(adr, msg)
}
case "cmd.app.factory_reset":
val := edgeapp.ButtonActionResponse{
Operation: "cmd.app.factory_reset",
OperationStatus: "ok",
Next: "config",
ErrorCode: "",
ErrorText: "",
}
fc.appLifecycle.SetConfigState(edgeapp.ConfigStateNotConfigured)
fc.appLifecycle.SetAppState(edgeapp.AppStateNotConfigured, nil)
fc.appLifecycle.SetAuthState(edgeapp.AuthStateNotAuthenticated)
msg := fimpgo.NewMessage("evt.app.config_action_report", model.ServiceName, fimpgo.VTypeObject, val, nil, nil, newMsg.Payload)
if err := fc.mqt.RespondToRequest(newMsg.Payload, msg); err != nil {
fc.mqt.Publish(adr, msg)
}
case "cmd.app.uninstall":
// TODO: The message is sent to the app from fhbutler before performing package uninstall operation
case "cmd.network.get_all_nodes":
// TODO: This is an example . Add your logic here or remove
case "cmd.thing.get_inclusion_report":
//nodeId , _ := newMsg.Payload.GetStringValue()
// TODO: This is an example . Add your logic here or remove
case "cmd.thing.inclusion":
//flag , _ := newMsg.Payload.GetBoolValue()
// TODO: This is an example . Add your logic here or remove
case "cmd.thing.delete":
// remove device from network
val, err := newMsg.Payload.GetStrMapValue()
if err != nil {
log.Error("Wrong msg format")
return
}
deviceId, ok := val["address"]
if ok {
// TODO: This is an example . Add your logic here or remove
log.Info(deviceId)
} else {
log.Error("Incorrect address")
}
}
}
}
|
package utils
import (
"fmt"
"log"
"os"
"os/exec"
)
func Check(e error) {
if e != nil {
panic(e)
}
}
func CheckFileLsExist(filename string) bool {
var exist = true
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false
}
return exist
}
func ExecCommand(commands string) string {
out, err := exec.Command("bash", "-c", commands).Output()
if err != nil {
fmt.Println(err.Error())
}
log.Println(commands, string(out))
return string(out)
}
|
package template
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"os/exec"
txttmpl "text/template"
"github.com/n0rad/go-erlog/data"
"github.com/n0rad/go-erlog/errs"
"github.com/n0rad/go-erlog/logs"
"gopkg.in/yaml.v2"
)
type TemplateFile struct {
Uid int `yaml:"uid"`
Gid int `yaml:"gid"`
CheckCmd string `yaml:"checkCmd"`
fields data.Fields
Mode os.FileMode
template *Templating
}
func NewTemplateFile(partials *txttmpl.Template, src string, mode os.FileMode) (*TemplateFile, error) {
fields := data.WithField("src", src)
content, err := ioutil.ReadFile(src)
if err != nil {
return nil, errs.WithEF(err, fields, "Cannot read template file")
}
template, err := NewTemplating(partials, src, string(content))
if err != nil {
return nil, errs.WithEF(err, fields, "Failed to prepare template")
}
t := &TemplateFile{
Uid: os.Getuid(),
Gid: os.Getgid(),
fields: fields,
template: template,
Mode: mode,
}
err = t.loadTemplateConfig(src)
logs.WithF(fields).WithField("data", t).Trace("Template loaded")
return t, err
}
func (t *TemplateFile) loadTemplateConfig(src string) error {
cfgPath := src + EXT_CFG
if _, err := os.Stat(cfgPath); os.IsNotExist(err) {
return nil
}
source, err := ioutil.ReadFile(cfgPath)
if err != nil {
return err
}
err = yaml.Unmarshal([]byte(source), t)
if err != nil {
return errs.WithEF(err, data.WithField("name", src), "Cannot unmarshall cfg")
}
return nil
}
func (f *TemplateFile) RunTemplate(dst string, attributes map[string]interface{}, failOnNoValue bool) error {
if logs.IsTraceEnabled() {
logs.WithF(f.fields).WithField("attributes", attributes).WithField("failOnNoValue", failOnNoValue).Trace("templating with attributes")
}
fields := f.fields.WithField("dst", dst)
logs.WithF(fields).Info("Templating file")
out, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, f.Mode)
if err != nil {
return errs.WithEF(err, fields, "Cannot open destination file")
}
defer func() {
out.Close()
}()
buff := bytes.Buffer{}
writer := bufio.NewWriter(&buff)
if err := f.template.Execute(writer, attributes); err != nil {
return errs.WithEF(err, fields, "Templating execution failed")
}
if err := writer.Flush(); err != nil {
return errs.WithEF(err, fields, "Failed to flush buffer")
}
buff.WriteByte('\n')
b := buff.Bytes()
if logs.IsTraceEnabled() {
logs.WithF(f.fields).WithField("result", string(b)).Trace("templating done")
}
scanner := bufio.NewScanner(bytes.NewReader(b)) // TODO this sux
scanner.Split(bufio.ScanLines)
for i := 1; scanner.Scan(); i++ {
text := scanner.Text()
if bytes.Contains([]byte(text), []byte("<no value>")) {
err = errs.WithF(fields.WithField("line", i).WithField("text", text), "Templating result have <no value>")
if failOnNoValue {
return err
} else {
logs.WithE(err).Error("Templating result have <no value>")
}
}
}
if length, err := out.Write(b); length != len(b) || err != nil {
return errs.WithEF(err, fields, "Write to file failed")
}
if err = out.Sync(); err != nil {
return errs.WithEF(err, fields, "Failed to sync output file")
}
if err = os.Chmod(dst, f.Mode); err != nil {
return errs.WithEF(err, fields.WithField("file", dst), "Failed to set mode on file")
}
if err = os.Chown(dst, f.Uid, f.Gid); err != nil {
return errs.WithEF(err, fields.WithField("file", dst), "Failed to set owner of file")
}
if f.CheckCmd != "" {
cmd := exec.Command("/dgr/bin/busybox", "sh", "-c", f.CheckCmd)
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
if err = cmd.Run(); err != nil {
return errs.WithEF(err, fields.WithField("file", dst), "Check command failed after templating")
}
}
return err
}
|
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/G-Research/armada/internal/armada/repository"
"github.com/G-Research/armada/internal/armada/scheduling"
"github.com/G-Research/armada/internal/common"
)
const MetricPrefix = "armada_"
func ExposeDataMetrics(
queueRepository repository.QueueRepository,
jobRepository repository.JobRepository,
usageRepository repository.UsageRepository,
) *QueueInfoCollector {
collector := &QueueInfoCollector{
queueRepository,
jobRepository,
usageRepository}
prometheus.MustRegister(collector)
return collector
}
type QueueInfoCollector struct {
queueRepository repository.QueueRepository
jobRepository repository.JobRepository
usageRepository repository.UsageRepository
}
var queueSizeDesc = prometheus.NewDesc(
MetricPrefix+"queue_size",
"Number of jobs in a queue",
[]string{"queueName"},
nil,
)
var queuePriorityDesc = prometheus.NewDesc(
MetricPrefix+"queue_priority",
"Priority of a queue",
[]string{"queueName"},
nil,
)
var queueAllocatedDesc = prometheus.NewDesc(
MetricPrefix+"queue_resource_allocated",
"Resource allocated to running jobs of a queue",
[]string{"cluster", "queueName", "resourceType"},
nil,
)
var queueUsedDesc = prometheus.NewDesc(
MetricPrefix+"queue_resource_used",
"Resource actually being used by running jobs of a queue",
[]string{"cluster", "queueName", "resourceType"},
nil,
)
var clusterCapacityDesc = prometheus.NewDesc(
MetricPrefix+"cluster_capacity",
"Cluster capacity",
[]string{"cluster", "resourceType"},
nil,
)
var clusterAvailableCapacity = prometheus.NewDesc(
MetricPrefix+"cluster_available_capacity",
"Cluster capacity available for Armada jobs",
[]string{"cluster", "resourceType"},
nil,
)
func (c *QueueInfoCollector) Describe(desc chan<- *prometheus.Desc) {
desc <- queueSizeDesc
desc <- queuePriorityDesc
}
func (c *QueueInfoCollector) Collect(metrics chan<- prometheus.Metric) {
queues, e := c.queueRepository.GetAllQueues()
if e != nil {
log.Errorf("Error while getting queue metrics %s", e)
recordInvalidMetrics(metrics, e)
return
}
queueSizes, e := c.jobRepository.GetQueueSizes(queues)
if e != nil {
log.Errorf("Error while getting queue size metrics %s", e)
recordInvalidMetrics(metrics, e)
return
}
usageReports, e := c.usageRepository.GetClusterUsageReports()
if e != nil {
log.Errorf("Error while getting queue usage metrics %s", e)
recordInvalidMetrics(metrics, e)
return
}
activeClusterReports := scheduling.FilterActiveClusters(usageReports)
clusterPriorities, e := c.usageRepository.GetClusterPriorities(scheduling.GetClusterReportIds(activeClusterReports))
if e != nil {
log.Errorf("Error while getting queue priority metrics %s", e)
recordInvalidMetrics(metrics, e)
return
}
queuePriority := scheduling.CalculateQueuesPriorityInfo(clusterPriorities, activeClusterReports, queues)
for queue, priority := range queuePriority {
metrics <- prometheus.MustNewConstMetric(queuePriorityDesc, prometheus.GaugeValue, priority.Priority, queue.Name)
}
for i, q := range queues {
metrics <- prometheus.MustNewConstMetric(queueSizeDesc, prometheus.GaugeValue, float64(queueSizes[i]), q.Name)
}
for cluster, report := range activeClusterReports {
for _, queueReport := range report.Queues {
for resourceType, value := range queueReport.Resources {
metrics <- prometheus.MustNewConstMetric(
queueAllocatedDesc,
prometheus.GaugeValue,
common.QuantityAsFloat64(value),
cluster,
queueReport.Name,
resourceType)
}
for resourceType, value := range queueReport.ResourcesUsed {
metrics <- prometheus.MustNewConstMetric(
queueUsedDesc,
prometheus.GaugeValue,
common.QuantityAsFloat64(value),
cluster,
queueReport.Name,
resourceType)
}
}
for resourceType, value := range report.ClusterCapacity {
metrics <- prometheus.MustNewConstMetric(
clusterCapacityDesc,
prometheus.GaugeValue,
common.QuantityAsFloat64(value),
cluster,
resourceType)
}
for resourceType, value := range report.ClusterAvailableCapacity {
metrics <- prometheus.MustNewConstMetric(
clusterAvailableCapacity,
prometheus.GaugeValue,
common.QuantityAsFloat64(value),
cluster,
resourceType)
}
}
}
func recordInvalidMetrics(metrics chan<- prometheus.Metric, e error) {
metrics <- prometheus.NewInvalidMetric(queueSizeDesc, e)
metrics <- prometheus.NewInvalidMetric(queuePriorityDesc, e)
metrics <- prometheus.NewInvalidMetric(queueAllocatedDesc, e)
}
|
package channel
import (
"fmt"
"sync"
"testing"
"time"
)
func TestGorCounter(t *testing.T) {
var mut sync.Mutex
counter := 0
for i := 0; i < 5000; i++ {
go func() {
//defer mut.Unlock() // 1
defer func() { // 2
mut.Unlock()
}()
mut.Lock()
counter++
}()
}
time.Sleep(3 * time.Second)
fmt.Println("counter: ", counter)
}
func TestGorCounterWait(t *testing.T) {
var mut sync.Mutex
counter := 0
var wg sync.WaitGroup
for i := 0; i < 5000; i++ {
wg.Add(1)
go func() {
defer func() {
mut.Unlock()
}()
mut.Lock()
counter++
wg.Done()
}()
}
wg.Wait()
fmt.Println("counter: ", counter)
}
|
package kiteroot
// Stack implements the stack container
type Stack []*Element
// Push adds the given element to the stack.
func (s *Stack) Push(e *Element) {
*s = append(*s, e)
}
// Top returns the last element in the stack if it is not empty.
// Otherwise, nil is returned.
func (s *Stack) Top() (e *Element) {
if s.Len() == 0 {
return nil
}
return (*s)[s.Len()-1]
}
// Pop removes top element from stack and returns it. if stack is empty,
// nil is returned.
func (s *Stack) Pop() (e *Element) {
if s.Len() > 0 {
n := s.Len() - 1
e, *s = (*s)[n], (*s)[:n]
return
}
return nil
}
// Len returns the number of element currently in the stack.
func (s *Stack) Len() int {
return len(*s)
}
func (s *Stack) existsTag(tag string) bool {
for _, e := range *s {
if e == nil {
continue
}
if e.Type == TagType && e.Content == tag {
return true
}
}
return false
}
|
package taskqueueworker
import (
"context"
"fmt"
"log"
"reflect"
"sync"
"time"
"pkg.agungdwiprasetyo.com/candi/codebase/factory"
"pkg.agungdwiprasetyo.com/candi/codebase/factory/types"
"pkg.agungdwiprasetyo.com/candi/config/env"
"pkg.agungdwiprasetyo.com/candi/logger"
)
var (
registeredTask map[string]struct {
handlerFunc types.WorkerHandlerFunc
errorHandlers []types.WorkerErrorHandler
workerIndex int
}
workers []reflect.SelectCase
workerIndexTask map[int]*struct {
taskName string
activeInterval *time.Ticker
}
queue QueueStorage
refreshWorkerNotif, shutdown, semaphore chan struct{}
mutex sync.Mutex
)
type taskQueueWorker struct {
service factory.ServiceFactory
wg sync.WaitGroup
}
// NewWorker create new cron worker
func NewWorker(service factory.ServiceFactory) factory.AppServerFactory {
if service.GetDependency().GetRedisPool() == nil {
panic("Task queue worker require redis for queue storage")
}
queue = NewRedisQueue(service.GetDependency().GetRedisPool().WritePool())
refreshWorkerNotif, shutdown, semaphore = make(chan struct{}), make(chan struct{}, 1), make(chan struct{}, env.BaseEnv().MaxGoroutines)
registeredTask = make(map[string]struct {
handlerFunc types.WorkerHandlerFunc
errorHandlers []types.WorkerErrorHandler
workerIndex int
})
workerIndexTask = make(map[int]*struct {
taskName string
activeInterval *time.Ticker
})
// add shutdown channel to first index
workers = append(workers, reflect.SelectCase{
Dir: reflect.SelectRecv, Chan: reflect.ValueOf(shutdown),
})
// add refresh worker channel to second index
workers = append(workers, reflect.SelectCase{
Dir: reflect.SelectRecv, Chan: reflect.ValueOf(refreshWorkerNotif),
})
for _, m := range service.GetModules() {
if h := m.WorkerHandler(types.TaskQueue); h != nil {
var handlerGroup types.WorkerHandlerGroup
h.MountHandlers(&handlerGroup)
for _, handler := range handlerGroup.Handlers {
workerIndex := len(workers)
registeredTask[handler.Pattern] = struct {
handlerFunc types.WorkerHandlerFunc
errorHandlers []types.WorkerErrorHandler
workerIndex int
}{
handlerFunc: handler.HandlerFunc, workerIndex: workerIndex, errorHandlers: handler.ErrorHandler,
}
workerIndexTask[workerIndex] = &struct {
taskName string
activeInterval *time.Ticker
}{
taskName: handler.Pattern,
}
workers = append(workers, reflect.SelectCase{Dir: reflect.SelectRecv})
logger.LogYellow(fmt.Sprintf(`[TASK-QUEUE-WORKER] Task name: %s`, handler.Pattern))
}
}
}
// get current queue
for taskName, registered := range registeredTask {
for _, job := range queue.GetAllJobs(taskName) {
registerJobToWorker(job, registered.workerIndex)
}
}
fmt.Printf("\x1b[34;1m⇨ Task queue worker running with %d task\x1b[0m\n\n", len(registeredTask))
return &taskQueueWorker{
service: service,
}
}
func (t *taskQueueWorker) Serve() {
for {
chosen, _, ok := reflect.Select(workers)
if !ok {
continue
}
// if shutdown channel captured, break loop (no more jobs will run)
if chosen == 0 {
break
}
// notify for refresh worker
if chosen == 1 {
continue
}
semaphore <- struct{}{}
t.wg.Add(1)
go func(chosen int) {
defer func() {
t.wg.Done()
<-semaphore
}()
execJob(chosen)
}(chosen)
}
}
func (t *taskQueueWorker) Shutdown(ctx context.Context) {
log.Println("\x1b[33;1mStopping Task Queue Worker...\x1b[0m")
defer func() { log.Println("\x1b[33;1mStopping Task Queue Worker:\x1b[0m \x1b[32;1mSUCCESS\x1b[0m") }()
if len(registeredTask) == 0 {
return
}
shutdown <- struct{}{}
runningJob := len(semaphore)
if runningJob != 0 {
fmt.Printf("\x1b[34;1mTask Queue Worker:\x1b[0m waiting %d job until done...\x1b[0m\n", runningJob)
}
t.wg.Wait()
}
|
package mention
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"image"
_ "image/gif"
_ "image/jpeg"
"image/png"
_ "image/png"
"io"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strings"
"time"
"cloud.google.com/go/datastore"
"google.golang.org/api/iterator"
"willnorris.com/go/microformats"
"willnorris.com/go/webmention"
"github.com/jcgregorio/go-lib/ds"
"github.com/jcgregorio/slog"
"github.com/nfnt/resize"
)
const (
MENTIONS ds.Kind = "Mentions"
WEB_MENTION_SENT ds.Kind = "WebMentionSent"
THUMBNAIL ds.Kind = "Thumbnail"
)
func in(s string, arr []string) bool {
for _, a := range arr {
if a == s {
return true
}
}
return false
}
func (m *Mentions) close(c io.Closer) {
if err := c.Close(); err != nil {
m.log.Warningf("Failed to close: %s", err)
}
}
type Mentions struct {
DS *ds.DS
log slog.Logger
}
func NewMentions(ctx context.Context, project, ns string, log slog.Logger) (*Mentions, error) {
d, err := ds.New(ctx, project, ns)
if err != nil {
return nil, err
}
return &Mentions{
DS: d,
log: log,
}, nil
}
type WebMentionSent struct {
TS time.Time
}
func (m *Mentions) sent(source string) (time.Time, bool) {
key := m.DS.NewKey(WEB_MENTION_SENT)
key.Name = source
dst := &WebMentionSent{}
if err := m.DS.Client.Get(context.Background(), key, dst); err != nil {
m.log.Warningf("Failed to find source: %q", source)
return time.Time{}, false
} else {
m.log.Infof("Found source: %q", source)
return dst.TS, true
}
}
func (m *Mentions) recordSent(source string, updated time.Time) error {
key := m.DS.NewKey(WEB_MENTION_SENT)
key.Name = source
src := &WebMentionSent{
TS: updated.UTC(),
}
_, err := m.DS.Client.Put(context.Background(), key, src)
return err
}
const (
GOOD_STATE = "good"
UNTRIAGED_STATE = "untriaged"
SPAM_STATE = "spam"
)
type Mention struct {
Source string
Target string
State string
TS time.Time
// Metadata found when validating. We might display this.
Title string `datastore:",noindex"`
Author string `datastore:",noindex"`
AuthorURL string `datastore:",noindex"`
Published time.Time `datastore:",noindex"`
Thumbnail string `datastore:",noindex"`
URL string `datastore:",noindex"`
}
func New(source, target string) *Mention {
return &Mention{
Source: source,
Target: target,
State: UNTRIAGED_STATE,
TS: time.Now(),
}
}
func (m *Mention) key() string {
return fmt.Sprintf("%x", md5.Sum([]byte(m.Source+m.Target)))
}
func (m *Mention) FastValidate(validTargets []string) error {
if m.Source == "" {
return fmt.Errorf("Source is empty.")
}
if m.Target == "" {
return fmt.Errorf("Target is empty.")
}
if m.Target == m.Source {
return fmt.Errorf("Source and Target must be different.")
}
target, err := url.Parse(m.Target)
if err != nil {
return fmt.Errorf("Target is not a valid URL: %s", err)
}
if !in(target.Hostname(), validTargets) {
return fmt.Errorf("Wrong target domain.")
}
if target.Scheme != "https" {
return fmt.Errorf("Wrong scheme for target.")
}
return nil
}
func (m *Mentions) SlowValidate(mention *Mention, c *http.Client) error {
m.log.Infof("SlowValidate: %q", mention.Source)
resp, err := c.Get(mention.Source)
if err != nil {
return fmt.Errorf("Failed to retrieve source: %s", err)
}
defer m.close(resp.Body)
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Failed to read content: %s", err)
}
reader := bytes.NewReader(b)
links, err := webmention.DiscoverLinksFromReader(reader, mention.Source, "")
if err != nil {
return fmt.Errorf("Failed to discover links: %s", err)
}
for _, link := range links {
if link == mention.Target {
_, err := reader.Seek(0, io.SeekStart)
if err != nil {
return nil
}
m.ParseMicroformats(mention, reader, MakeUrlToImageReader(c))
return nil
}
}
return fmt.Errorf("Failed to find target link in source.")
}
func (m *Mentions) ParseMicroformats(mention *Mention, r io.Reader, urlToImageReader UrlToImageReader) {
u, err := url.Parse(mention.Source)
if err != nil {
return
}
data := microformats.Parse(r, u)
m.findHEntry(context.Background(), urlToImageReader, mention, data, data.Items)
}
func (m *Mentions) VerifyQueuedMentions(c *http.Client) {
queued := m.GetQueued(context.Background())
m.log.Infof("About to slow verify %d queud mentions.", len(queued))
for _, mention := range queued {
mention.Published = time.Now()
mention.URL = mention.Source
m.log.Infof("Verifying queued webmention from %q", mention.Source)
if err := m.SlowValidate(mention, c); err == nil {
mention.State = GOOD_STATE
} else {
mention.State = SPAM_STATE
m.log.Infof("Failed to validate webmention: %#v: %s", *mention, err)
}
if err := m.Put(context.Background(), mention); err != nil {
m.log.Warningf("Failed to save validated message: %s", err)
}
}
}
type MentionSlice []*Mention
func (p MentionSlice) Len() int { return len(p) }
func (p MentionSlice) Less(i, j int) bool { return p[i].TS.Before(p[j].TS) }
func (p MentionSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (m *Mentions) get(ctx context.Context, target string, all bool) []*Mention {
ret := []*Mention{}
q := m.DS.NewQuery(MENTIONS).
Filter("Target =", target)
if !all {
q = q.Filter("State =", GOOD_STATE)
}
it := m.DS.Client.Run(ctx, q)
for {
mention := &Mention{}
_, err := it.Next(mention)
if err == iterator.Done {
break
}
if err != nil {
m.log.Infof("Failed while reading: %s", err)
break
}
ret = append(ret, mention)
}
sort.Sort(MentionSlice(ret))
return ret
}
func (m *Mentions) GetAll(ctx context.Context, target string) []*Mention {
return m.get(ctx, target, true)
}
func (m *Mentions) GetGood(ctx context.Context, target string) []*Mention {
return m.get(ctx, target, false)
}
func (m *Mentions) UpdateState(ctx context.Context, encodedKey, state string) error {
tx, err := m.DS.Client.NewTransaction(ctx)
if err != nil {
return fmt.Errorf("client.NewTransaction: %v", err)
}
key, err := datastore.DecodeKey(encodedKey)
if err != nil {
return fmt.Errorf("Unable to decode key: %s", err)
}
var mention Mention
if err := tx.Get(key, &mention); err != nil {
tx.Rollback()
return fmt.Errorf("tx.GetMulti: %v", err)
}
mention.State = state
if _, err := tx.Put(key, &mention); err != nil {
tx.Rollback()
return fmt.Errorf("tx.Put: %v", err)
}
if _, err = tx.Commit(); err != nil {
return fmt.Errorf("tx.Commit: %v", err)
}
return nil
}
type MentionWithKey struct {
Mention
Key string
}
func (m *Mentions) GetTriage(ctx context.Context, limit, offset int) []*MentionWithKey {
ret := []*MentionWithKey{}
q := m.DS.NewQuery(MENTIONS).Order("-TS").Limit(limit).Offset(offset)
it := m.DS.Client.Run(ctx, q)
for {
var mention Mention
key, err := it.Next(&mention)
if err == iterator.Done {
break
}
if err != nil {
m.log.Infof("Failed while reading: %s", err)
break
}
ret = append(ret, &MentionWithKey{
Mention: mention,
Key: key.Encode(),
})
}
return ret
}
func (m *Mentions) GetQueued(ctx context.Context) []*Mention {
ret := []*Mention{}
q := m.DS.NewQuery(MENTIONS).
Filter("State =", UNTRIAGED_STATE)
it := m.DS.Client.Run(ctx, q)
for {
mention := &Mention{}
_, err := it.Next(mention)
if err == iterator.Done {
break
}
if err != nil {
m.log.Infof("Failed while reading: %s", err)
break
}
ret = append(ret, mention)
}
return ret
}
func (m *Mentions) Put(ctx context.Context, mention *Mention) error {
// TODO See if there's an existing mention already, so we don't overwrite its status?
key := m.DS.NewKey(MENTIONS)
key.Name = mention.key()
if _, err := m.DS.Client.Put(ctx, key, mention); err != nil {
return fmt.Errorf("Failed writing %#v: %s", *mention, err)
}
return nil
}
type UrlToImageReader func(url string) (io.ReadCloser, error)
func firstPropAsString(uf *microformats.Microformat, key string) string {
for _, sint := range uf.Properties[key] {
if s, ok := sint.(string); ok {
return s
}
}
return ""
}
func (m *Mentions) findHEntry(ctx context.Context, u2r UrlToImageReader, mention *Mention, data *microformats.Data, items []*microformats.Microformat) {
for _, it := range items {
if in("h-entry", it.Type) {
mention.Title = firstPropAsString(it, "name")
if mention.Title == "" {
mention.Title = firstPropAsString(it, "uid")
}
if strings.HasPrefix(mention.Title, "tag:twitter") {
mention.Title = "Twitter"
}
if firstPropAsString(it, "like-of") != "" {
mention.Title += " Like"
}
if firstPropAsString(it, "repost-of") != "" {
mention.Title += " Repost"
}
if url := firstPropAsString(it, "url"); url != "" {
mention.URL = url
}
if t, err := time.Parse(time.RFC3339, firstPropAsString(it, "published")); err == nil {
mention.Published = t
}
if authorsInt, ok := it.Properties["author"]; ok {
for _, authorInt := range authorsInt {
if author, ok := authorInt.(*microformats.Microformat); ok {
m.findAuthor(ctx, u2r, mention, data, author)
}
}
}
}
m.findHEntry(ctx, u2r, mention, data, it.Children)
}
}
type Thumbnail struct {
PNG []byte `datastore:",noindex"`
}
func MakeUrlToImageReader(c *http.Client) UrlToImageReader {
return func(u string) (io.ReadCloser, error) {
resp, err := c.Get(u)
if err != nil {
return nil, fmt.Errorf("Error retrieving thumbnail: %s", err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Not a 200 response: %d", resp.StatusCode)
}
return resp.Body, nil
}
}
func (m *Mentions) findAuthor(ctx context.Context, u2r UrlToImageReader, mention *Mention, data *microformats.Data, it *microformats.Microformat) {
mention.Author = it.Value
if len(data.Rels["author"]) > 0 {
mention.AuthorURL = data.Rels["author"][0]
} else {
mention.AuthorURL = firstPropAsString(it, "url")
}
u := firstPropAsString(it, "photo")
if u == "" {
m.log.Infof("No photo URL found.")
return
}
r, err := u2r(u)
if err != nil {
m.log.Infof("Failed to retrieve photo.")
return
}
defer m.close(r)
img, _, err := image.Decode(r)
if err != nil {
m.log.Infof("Failed to decode photo.")
return
}
rect := img.Bounds()
var x uint = 32
var y uint = 32
if rect.Max.X > rect.Max.Y {
y = 0
} else {
x = 0
}
resized := resize.Resize(x, y, img, resize.Lanczos3)
var buf bytes.Buffer
encoder := png.Encoder{
CompressionLevel: png.BestCompression,
}
if err := encoder.Encode(&buf, resized); err != nil {
m.log.Errorf("Failed to encode photo.")
return
}
hash := fmt.Sprintf("%x", md5.Sum(buf.Bytes()))
t := &Thumbnail{
PNG: buf.Bytes(),
}
key := m.DS.NewKey(THUMBNAIL)
key.Name = hash
if _, err := m.DS.Client.Put(ctx, key, t); err != nil {
m.log.Errorf("Failed to write: %s", err)
return
}
mention.Thumbnail = hash
}
func (m *Mentions) GetThumbnail(ctx context.Context, id string) ([]byte, error) {
key := m.DS.NewKey(THUMBNAIL)
key.Name = id
var t Thumbnail
if err := m.DS.Client.Get(ctx, key, &t); err != nil {
return nil, fmt.Errorf("Failed to find image: %s", err)
}
return t.PNG, nil
}
|
package postgres
const QueryMarkerSQL string = "/* pganalyze-collector */ "
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package lacros
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"google.golang.org/grpc"
lacroscommon "chromiumos/tast/common/cros/lacros"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/chrome/lacros/lacrosfaillog"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/lacros/lacrosproc"
"chromiumos/tast/local/cryptohome"
lacrosservice "chromiumos/tast/services/cros/lacros"
"chromiumos/tast/testing"
)
var versionRegexp = regexp.MustCompile(`(\d+\.)(\d+\.)(\d+\.)(\d+)`)
type lacrosMetadata struct {
Content struct {
Version string `json:"version"`
} `json:"content"`
}
func init() {
testing.AddService(&testing.Service{
Register: func(srv *grpc.Server, s *testing.ServiceState) {
lacrosservice.RegisterUpdateTestServiceServer(srv, &UpdateTestService{s: s})
},
})
}
// UpdateTestService implements tast.cros.lacros.UpdateTestService.
type UpdateTestService struct {
s *testing.ServiceState
}
// VerifyUpdate checks if the expected version of Lacros is loaded successfully without crash given the browsers provisioned.
func (uts *UpdateTestService) VerifyUpdate(ctx context.Context, req *lacrosservice.VerifyUpdateRequest) (*lacrosservice.VerifyUpdateResponse, error) {
// Setup Ash Chrome with the given context including options and login info.
cr, tconn, err := uts.setupChrome(ctx, req.AshContext.GetOpts(), req.ExpectedComponent)
if err != nil {
return nil, errors.Wrap(err, "failed to setup Ash Chrome")
}
defer func(ctx context.Context) error {
if err := cr.Close(ctx); err != nil {
return errors.Wrap(err, "failed to close Ash Chrome")
}
return nil
}(ctx)
// Open Lacros.
var expectedLacrosDir string
switch req.ExpectedBrowser {
case lacrosservice.BrowserType_LACROS_STATEFUL:
// expectedLacrosDir will be versioned if Stateful Lacros is mounted.
expectedLacrosDir = filepath.Join("/run/imageloader", req.ExpectedComponent, req.ExpectedVersion)
case lacrosservice.BrowserType_LACROS_ROOTFS:
if req.ExpectedComponent != "" {
return nil, errors.New("invalid request: ExpectedComponent should be nil for verifying Rootfs Lacros")
}
expectedLacrosDir = "/run/lacros"
default:
return nil, errors.Errorf("Able to verify only Lacros browser, but got: %v", req.ExpectedBrowser)
}
expectedLacrosPath := filepath.Join(expectedLacrosDir, "chrome")
downloadsPath, err := cryptohome.DownloadsPath(ctx, cr.NormalizedUser())
if err != nil {
return nil, errors.Wrap(err, "failed to get user's Downloads path")
}
// Start a screen record before launching Lacros for troubleshooting a failure in launching Lacros.
hasRecordStarted := true
if err := lacrosfaillog.StartRecord(ctx, tconn, downloadsPath); err != nil {
hasRecordStarted = false
}
hasError := true
ctxForFailLog := ctx
ctx, cancel := ctxutil.Shorten(ctx, 30*time.Second)
defer cancel()
defer func(ctx context.Context) {
// Save faillogs and screen record only when it fails or returns with an error.
lacrosfaillog.SaveIf(ctx, tconn, func() bool { return hasError })
lacrosfaillog.StopRecordAndSaveOnError(ctx, tconn, hasRecordStarted, downloadsPath, func() bool { return hasError })
}(ctxForFailLog)
l, err := lacros.Launch(ctx, tconn)
if err != nil {
// TODO(crbug.com/1258664): Log shelf items in case the Lacros app is neither launched nor shown.
items, _ := ash.ShelfItems(ctx, tconn)
for _, item := range items {
testing.ContextLogf(ctx, "ShelfItem: Title: %v, Status: %v, Type: %v, AppID: %v", item.Title, item.Status, item.Type, item.AppID)
}
return nil, errors.Wrap(err, "failed to launch Lacros browser from Shelf")
}
defer l.Close(ctx)
// Verify Lacros updates.
testing.ContextLogf(ctx, "Verifying provisioned Lacros at %v with UI: %v", expectedLacrosPath, req.UseUi)
status := lacrosservice.TestResult_NO_STATUS
statusDetails := ""
if req.UseUi {
// Verify on the chrome://version UI that it has the expected version in "Executable Path".
// Note that "Google Chrome" on the page cannot be used for verification since it is read from the binary
// and does not reflect the version that is mocked for test in runtime.
const exprExecutablePath = `document.querySelector('#executable_path').innerText`
actualVersionedLacrosPath, err := uts.versionInfoFromUI(ctx, l, exprExecutablePath)
if err != nil {
return nil, errors.Wrap(err, "failed to read Lacros executable path")
}
if actualVersionedLacrosPath == expectedLacrosPath {
status = lacrosservice.TestResult_PASSED
statusDetails = fmt.Sprintf("executable path: %v", actualVersionedLacrosPath)
} else {
status = lacrosservice.TestResult_FAILED
statusDetails = fmt.Sprintf("executable path expected: %v, actual: %v", expectedLacrosPath, actualVersionedLacrosPath)
}
} else {
// Verify without UI that the lacros process is running from the expected versioned path.
if err := testing.Poll(ctx, func(ctx context.Context) error {
procs, err := lacrosproc.ProcsFromPath(ctx, expectedLacrosPath)
if err != nil {
return err
}
if len(procs) > 0 {
return nil
}
return errors.New("waiting for lacros process")
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
return nil, errors.Wrap(err, "could not find running process")
}
status = lacrosservice.TestResult_PASSED
}
// Don't save faillogs and a screen record if the test is passed.
hasError = (status != lacrosservice.TestResult_PASSED)
return &lacrosservice.VerifyUpdateResponse{
Result: &lacrosservice.TestResult{Status: status, StatusDetails: statusDetails},
}, nil
}
// ClearUpdate removes all provisioned Lacros on a DUT to reset to the previous state between tests.
func (uts *UpdateTestService) ClearUpdate(ctx context.Context, req *lacrosservice.ClearUpdateRequest) (*lacrosservice.ClearUpdateResponse, error) {
testing.ContextLog(ctx, "Clearing provisioned Lacros")
// Mark the stateful partition corrupt, so the provision can restore it.
// Remove it only if the clean up is successful.
if err := ioutil.WriteFile(lacroscommon.CorruptStatefulFilePath, nil, 0644); err != nil {
testing.ContextLog(ctx, "Failed to touch file: ", err)
}
// Try to unmount provisioned Stateful Lacros, then remove mount points.
matches, _ := filepath.Glob("/run/imageloader/lacros*/*")
for _, match := range matches {
if err := testexec.CommandContext(ctx, "umount", "-f", match).Run(); err != nil {
testing.ContextLog(ctx, "Failed to unmount ", match)
}
if err := os.RemoveAll(match); err != nil {
testing.ContextLog(ctx, "Failed to remove ", match)
}
}
// Remove provisioned files. Note that 'sh' is used to handle the glob.
lacrosComponentPathGlob := filepath.Join(lacroscommon.LacrosRootComponentPath, "*")
if err := testexec.CommandContext(ctx, "sh", "-c",
strings.Join([]string{"rm", "-rf", lacrosComponentPathGlob}, " ")).Run(); err != nil {
testing.ContextLog(ctx, "Failed to remove provisioned components at ", lacrosComponentPathGlob)
}
// If succeeded to clear, we no longer need to mark the stateful partition corrupt.
matches, _ = filepath.Glob(lacrosComponentPathGlob)
if len(matches) == 0 {
os.Remove(lacroscommon.CorruptStatefulFilePath)
}
return &lacrosservice.ClearUpdateResponse{}, nil
}
// GetBrowserVersion returns version info of the given browser type.
// If multiple Lacros browsers are provisioned in the stateful partition, all the versions will be returned.
func (uts *UpdateTestService) GetBrowserVersion(ctx context.Context, req *lacrosservice.GetBrowserVersionRequest) (*lacrosservice.GetBrowserVersionResponse, error) {
var versions []string
switch req.Browser {
case lacrosservice.BrowserType_ASH:
version, err := uts.ashVersion(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to get the version for %v", req.Browser)
}
versions = append(versions, version)
case lacrosservice.BrowserType_LACROS_ROOTFS:
version, err := uts.lacrosRootfsVersion(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to get the version for %v", req.Browser)
}
versions = append(versions, version)
// TODO: Implement case lacrosservice.BrowserType_LACROS_STATEFUL when needed.
default:
return nil, errors.Errorf("unknown browser type: %v", req.Browser)
}
testing.ContextLogf(ctx, "GetBrowserVersion: type: %v, version: %v", req.Browser, versions)
return &lacrosservice.GetBrowserVersionResponse{
Versions: versions,
}, nil
}
// ashVersion returns non-empty version of Ash Chrome.
// TODO(hyungtaekim): Move the function to a common place for other tests.
func (uts *UpdateTestService) ashVersion(ctx context.Context) (string, error) {
out, err := testexec.CommandContext(ctx, "/opt/google/chrome/chrome", "--version").Output(testexec.DumpLogOnError)
if err != nil {
return "", err
}
version := versionRegexp.FindString(string(out))
if version == "" {
return "", errors.New("invalid version: " + version)
}
return version, nil
}
// lacrosRootfsVersion returns non-empty version of Lacros Chrome in Rootfs.
// TODO(hyungtaekim): Move the function to a common place for other tests.
func (uts *UpdateTestService) lacrosRootfsVersion(ctx context.Context) (string, error) {
metadata, err := ioutil.ReadFile("/opt/google/lacros/metadata.json")
if err != nil {
return "", err
}
metadataJSON := lacrosMetadata{}
if err := json.Unmarshal(metadata, &metadataJSON); err != nil {
return "", errors.Wrap(err, "failed to parse Rootfs Lacros Chrome version")
}
version := versionRegexp.FindString(metadataJSON.Content.Version)
if version == "" {
return "", errors.New("invalid version: " + version)
}
return version, nil
}
// setupChrome configures Ash Chrome to be able to launch Lacros with given options.
// Note that it uses fake login credentials and loads test extension for Lacros by default.
func (uts *UpdateTestService) setupChrome(ctx context.Context, options []string, component string) (*chrome.Chrome, *chrome.TestConn, error) {
var opts []chrome.Option
for _, opt := range options {
opts = append(opts, chrome.Option(chrome.ExtraArgs(opt)))
}
// Enable Lacros with default options.
// Do not specify which lacros to select between Rootfs and Stateful in which mode as this test is to verify the selection logic itself.
lacrosOpts, err := lacrosfixt.NewConfig(
lacrosfixt.Selection(lacros.NotSelected),
lacrosfixt.Mode(lacros.NotSpecified)).Opts()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get default options")
}
opts = append(opts, lacrosOpts...)
// Select Lacros channel.
var channel string
switch component {
case lacroscommon.LacrosCanaryComponent:
channel = "canary"
case lacroscommon.LacrosDevComponent:
channel = "dev"
case lacroscommon.LacrosBetaComponent:
channel = "beta"
case lacroscommon.LacrosStableComponent:
channel = "stable"
default:
// rootfs-lacros is not provisioned from a channel.
}
if channel != "" {
opts = append(opts, chrome.ExtraArgs("--lacros-stability="+channel))
}
// Block Component Updater.
opts = append(opts, chrome.ExtraArgs("--component-updater=url-source="+lacroscommon.BogusComponentUpdaterURL))
// KeepState should be enabled to retain user data including provisioned Lacros image
// after restarting ui to make new Chrome options take effect.
opts = append(opts, chrome.KeepState())
cr, err := chrome.New(ctx, opts...)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to connect to Chrome")
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create test API connection for Ash Chrome")
}
return cr, tconn, nil
}
// versionInfoFromUI opens chrome://version/ from UI and returns the version info that matches the given JS expression.
// eg, Executable Path = /run/imageloader/lacros-dogfood-dev/X.X.X.X/chrome
func (uts *UpdateTestService) versionInfoFromUI(ctx context.Context, l *lacros.Lacros, expr string) (string, error) {
lconn, err := l.NewConn(ctx, lacroscommon.VersionURL)
if err != nil {
return "", errors.Wrapf(err, "failed to open: %v", lacroscommon.VersionURL)
}
defer lconn.Close()
var value string
if err := testing.Poll(ctx, func(ctx context.Context) error {
var raw json.RawMessage
if err := lconn.Eval(ctx, expr, &raw); err != nil {
return errors.Wrapf(err, "failed to eval expr: %v", expr)
}
value = strings.Trim(string(raw), ` "`)
if len(value) > 0 {
return nil
}
return errors.New("failed to find value for eval")
}, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
return "", err
}
return value, nil
}
|
package ravendb
import (
"net/http"
"reflect"
)
var (
_ IOperation = &PatchOperation{}
)
// PatchOperationPayload represents payload of patch operation
// Note: in Java it's Payload nested in PatchOperation
type PatchOperationPayload struct {
patch *PatchRequest
patchIfMissing *PatchRequest
}
// PatchOperationResult represents result of patch operation
// Note: in Java it's Result nested in PatchOperation
type PatchOperationResult struct {
Status PatchStatus `json:"Status"`
Document map[string]interface{} `json:"Document"`
}
func (r *PatchOperationResult) GetResult(result interface{}) error {
entityType := reflect.TypeOf(result)
entity, err := makeStructFromJSONMap(entityType, r.Document)
if err != nil {
return err
}
return setInterfaceToValue(result, entity)
}
// PatchOperation represents patch operation
type PatchOperation struct {
Command *PatchCommand
id string
changeVector *string
patch *PatchRequest
patchIfMissing *PatchRequest
skipPatchIfChangeVectorMismatch bool
}
// NewPatchOperation returns new PatchOperation
func NewPatchOperation(id string, changeVector *string, patch *PatchRequest, patchIfMissing *PatchRequest, skipPatchIfChangeVectorMismatch bool) (*PatchOperation, error) {
if patch == nil {
return nil, newIllegalArgumentError("Patch cannot be null")
}
if stringIsBlank(patch.Script) {
return nil, newIllegalArgumentError("Patch script cannot be null")
}
if patchIfMissing != nil && stringIsBlank(patchIfMissing.Script) {
return nil, newIllegalArgumentError("PatchIfMissing script cannot be null")
}
return &PatchOperation{
id: id,
changeVector: changeVector,
patch: patch,
patchIfMissing: patchIfMissing,
skipPatchIfChangeVectorMismatch: skipPatchIfChangeVectorMismatch,
}, nil
}
func (o *PatchOperation) GetCommand(store *DocumentStore, conventions *DocumentConventions, cache *httpCache) (RavenCommand, error) {
var err error
o.Command, err = NewPatchCommand(conventions, o.id, o.changeVector, o.patch, o.patchIfMissing, o.skipPatchIfChangeVectorMismatch, false, false)
return o.Command, err
}
var _ RavenCommand = &PatchCommand{}
// PatchCommand represents patch command
type PatchCommand struct {
RavenCommandBase
// TODO: unused
//conventions *DocumentConventions
id string
changeVector *string
patch *PatchOperationPayload
skipPatchIfChangeVectorMismatch bool
returnDebugInformation bool
test bool
Result *PatchResult
}
// NewPatchCommand returns new PatchCommand
func NewPatchCommand(conventions *DocumentConventions, id string, changeVector *string,
patch *PatchRequest, patchIfMissing *PatchRequest, skipPatchIfChangeVectorMismatch bool,
returnDebugInformation bool, test bool) (*PatchCommand, error) {
/* TODO: used only for json mapper, not used in Go
if conventions == nil {
return nil, newIllegalArgumentError("Conventions cannot be null")
}
*/
if patch == nil {
return nil, newIllegalArgumentError("Patch cannot be null")
}
if stringIsBlank(patch.Script) {
return nil, newIllegalArgumentError("Patch script cannot be null")
}
if patchIfMissing != nil && stringIsBlank(patchIfMissing.Script) {
return nil, newIllegalArgumentError("PatchIfMissing script cannot be null")
}
if id == "" {
return nil, newIllegalArgumentError("Id cannot be null")
}
payload := &PatchOperationPayload{
patch: patch,
patchIfMissing: patchIfMissing,
}
cmd := &PatchCommand{
RavenCommandBase: NewRavenCommandBase(),
id: id,
changeVector: changeVector,
patch: payload,
skipPatchIfChangeVectorMismatch: skipPatchIfChangeVectorMismatch,
returnDebugInformation: returnDebugInformation,
test: test,
}
return cmd, nil
}
func (c *PatchCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/docs?id=" + urlUtilsEscapeDataString(c.id)
if c.skipPatchIfChangeVectorMismatch {
url += "&skipPatchIfChangeVectorMismatch=true"
}
if c.returnDebugInformation {
url += "&debug=true"
}
if c.test {
url += "&test=true"
}
patch := map[string]interface{}{}
if c.patch.patch != nil {
patch = c.patch.patch.Serialize()
}
var patchIfMissing map[string]interface{}
if c.patch.patchIfMissing != nil {
patchIfMissing = c.patch.patchIfMissing.Serialize()
}
m := map[string]interface{}{
"Patch": patch,
"PatchIfMissing": patchIfMissing,
}
d, err := jsonMarshal(m)
panicIf(err != nil, "jsonMarshal failed with %s", err)
request, err := newHttpPatch(url, d)
if err != nil {
return nil, err
}
addChangeVectorIfNotNull(c.changeVector, request)
return request, nil
}
func (c *PatchCommand) SetResponse(response []byte, fromCache bool) error {
if len(response) == 0 {
return nil
}
return jsonUnmarshal(response, &c.Result)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package sync2_test
import (
"context"
"errors"
"sync/atomic"
"testing"
"time"
"golang.org/x/sync/errgroup"
"storj.io/common/sync2"
"storj.io/common/testcontext"
)
func TestFence(t *testing.T) {
t.Parallel()
ctx := testcontext.NewWithTimeout(t, 30*time.Second)
var group errgroup.Group
var fence sync2.Fence
var done int32
for i := 0; i < 10; i++ {
group.Go(func() error {
if !fence.Wait(ctx) {
return errors.New("got false from Wait")
}
if atomic.LoadInt32(&done) == 0 {
return errors.New("fence not yet released")
}
return nil
})
}
// wait a bit for all goroutines to hit the fence
time.Sleep(100 * time.Millisecond)
for i := 0; i < 3; i++ {
group.Go(func() error {
atomic.StoreInt32(&done, 1)
fence.Release()
return nil
})
}
if err := group.Wait(); err != nil {
t.Fatal(err)
}
}
func TestFence_ContextCancel(t *testing.T) {
t.Parallel()
tctx := testcontext.NewWithTimeout(t, 30*time.Second)
ctx, cancel := context.WithCancel(tctx)
var group errgroup.Group
var fence sync2.Fence
for i := 0; i < 10; i++ {
group.Go(func() error {
if fence.Wait(ctx) {
return errors.New("got true from Wait")
}
return nil
})
}
// wait a bit for all goroutines to hit the fence
time.Sleep(100 * time.Millisecond)
cancel()
if err := group.Wait(); err != nil {
t.Fatal(err)
}
}
|
package router
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/sudarshan-reddy/benjerry/httputils"
)
type fakeAuthHandler struct {
AuthHandler
err *httputils.HandlerError
}
func (f *fakeAuthHandler) Authenticate(r *http.Request) (*http.Request, *httputils.HandlerError) {
if f.err != nil {
return nil, f.err
}
return r, nil
}
func Test_Authenticate(t *testing.T) {
var testCases = []struct {
desc string
authHandlers []AuthHandler
expectedResp string
}{
{
"basic case",
[]AuthHandler{&fakeAuthHandler{err: nil}},
"",
},
{
"when the first authentication is success dont call the second one",
[]AuthHandler{
&fakeAuthHandler{err: nil},
&fakeAuthHandler{err: httputils.NewHandlerError(http.StatusUnauthorized, nil)},
},
"",
},
{
"when the first authentication is a failure call the second one",
[]AuthHandler{
&fakeAuthHandler{err: httputils.NewHandlerError(http.StatusUnauthorized, nil)},
&fakeAuthHandler{err: nil},
},
"",
},
{
"when the all cases are a failure throw error",
[]AuthHandler{
&fakeAuthHandler{err: httputils.NewHandlerError(http.StatusUnauthorized, nil)},
&fakeAuthHandler{err: httputils.NewHandlerError(http.StatusUnauthorized, nil)},
},
`{"httpStatus":401,"httpCode":"unauthorized","requestId":"","errors":[]}` + "\n",
},
}
for _, testCase := range testCases {
t.Run(testCase.desc, func(t *testing.T) {
assert := assert.New(t)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
authenticator := NewAuthenticator(testCase.authHandlers...)
authHandler := authenticator.Authenticate(handler)
req, err := http.NewRequest("GET", "/url", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
authHandler.ServeHTTP(rr, req)
assert.Equal(testCase.expectedResp, rr.Body.String())
})
}
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/plugin"
"github.com/bsostech/vault-blockchain/internal/backend"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
err := flags.Parse(os.Args[1:]) // Ignore command, strictly parse flags
if err != nil {
log.Println(err)
os.Exit(1)
}
tlsConfig := apiClientMeta.GetTLSConfig()
tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig)
err = plugin.Serve(&plugin.ServeOpts{
BackendFactoryFunc: backend.Factory,
TLSProviderFunc: tlsProviderFunc,
})
if err != nil {
log.Println(err)
os.Exit(1)
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"context"
"fmt"
"time"
"chromiumos/tast/common/hwsec"
"chromiumos/tast/common/pkcs11"
"chromiumos/tast/common/servo"
"chromiumos/tast/errors"
hwsecremote "chromiumos/tast/remote/hwsec"
"chromiumos/tast/testing"
)
// PowerButtonHelper is a helper interface that can press the power button
// on the device.
type PowerButtonHelper interface {
PressAndRelease(ctx context.Context) error
}
// ServoPowerButtonHelper presses the power button using servo key press.
// Note that this will only work on specific test suites where servo-micro
// is connected (e.g., firmware_cr50).
type ServoPowerButtonHelper struct {
svo *servo.Servo
}
// NewServoPowerButtonHelper creates a new ServoPowerButtonHelper.
func NewServoPowerButtonHelper(svo *servo.Servo) ServoPowerButtonHelper {
return ServoPowerButtonHelper{svo}
}
// PressAndRelease implements PowerButtonHelper.PressAndRelease.
func (helper ServoPowerButtonHelper) PressAndRelease(ctx context.Context) error {
return helper.svo.KeypressWithDuration(ctx, servo.PowerKey, servo.DurTab)
}
// SocketPowerButtonHelper presses the power button by sending bytes to
// the GPIO power button socket. Note that this will only work on VMs
// running ti50-emulator (dependencies "tpm2-simulator" + "gsc").
type SocketPowerButtonHelper struct {
cmd hwsec.CmdRunner
}
// NewSocketPowerButtonHelper creates a new SocketPowerButtonHelper.
func NewSocketPowerButtonHelper(cmd hwsec.CmdRunner) SocketPowerButtonHelper {
return SocketPowerButtonHelper{cmd}
}
// PressAndRelease implements PowerButtonHelper.PressAndRelease.
func (helper SocketPowerButtonHelper) PressAndRelease(ctx context.Context) error {
const (
socketCommandTempl string = "echo -e %s | socat -t1 unix-connect:/run/tpm2-simulator/sockets/gpioPwrBtn -"
zero string = "0"
one string = "1"
)
// Sending the character zero to the socket triggers a power button pressed
// signal, while sending the character one triggers a power button released
// signal.
if _, err := helper.cmd.Run(ctx, "sh", "-c", fmt.Sprintf(socketCommandTempl, one)); err != nil {
return errors.Wrap(err, "failed to press power button")
}
testing.Sleep(ctx, 500*time.Millisecond)
if _, err := helper.cmd.Run(ctx, "sh", "-c", fmt.Sprintf(socketCommandTempl, zero)); err != nil {
return errors.Wrap(err, "failed to release power button")
}
return nil
}
// SetU2fdFlags sets the flags and restarts u2fd, which will re-create the u2f device.
func SetU2fdFlags(ctx context.Context, helper *hwsecremote.FullHelperRemote, u2f, g2f, userKeys bool) (retErr error) {
const (
uf2ForcePath = "/var/lib/u2f/force/u2f.force"
gf2ForcePath = "/var/lib/u2f/force/g2f.force"
userKeysForcePath = "/var/lib/u2f/force/user_keys.force"
)
cmd := helper.CmdRunner()
dCtl := helper.DaemonController()
if err := dCtl.Stop(ctx, hwsec.U2fdDaemon); err != nil {
return errors.Wrap(err, "failed to stop u2fd")
}
defer func(ctx context.Context) {
if err := dCtl.Start(ctx, hwsec.U2fdDaemon); err != nil {
if retErr != nil {
testing.ContextLog(ctx, "Failed to restart u2fd: ", err)
} else {
retErr = errors.Wrap(err, "failed to restart u2fd")
}
}
}(ctx)
// Remove flags.
if _, err := cmd.Run(ctx, "sh", "-c", "rm -f /var/lib/u2f/force/*.force"); err != nil {
return errors.Wrap(err, "failed to remove flags")
}
if u2f {
if _, err := cmd.Run(ctx, "touch", uf2ForcePath); err != nil {
return errors.Wrap(err, "failed to set u2f flag")
}
}
if g2f {
if _, err := cmd.Run(ctx, "touch", gf2ForcePath); err != nil {
return errors.Wrap(err, "failed to set g2f flag")
}
}
if userKeys {
if _, err := cmd.Run(ctx, "touch", userKeysForcePath); err != nil {
return errors.Wrap(err, "failed to set userKeys flag")
}
}
return nil
}
// EnsureChapsSlotsInitialized ensures chaps is initialized.
func EnsureChapsSlotsInitialized(ctx context.Context, chaps *pkcs11.Chaps) error {
return testing.Poll(ctx, func(context.Context) error {
slots, err := chaps.ListSlots(ctx)
if err != nil {
return errors.Wrap(err, "failed to list chaps slots")
}
testing.ContextLog(ctx, slots)
if len(slots) < 2 {
return errors.Wrap(err, "chaps initialization hasn't finished")
}
return nil
}, &testing.PollOptions{
Timeout: 30 * time.Second,
Interval: time.Second,
})
}
|
package main
import (
"fmt"
"io"
"os"
)
func read() {
file, err := os.Open("./test.txt")
defer file.Close()
if err != nil {
fmt.Println(err)
return
}
fmt.Println(file)
var tmpSlice = make([]byte, 128)
var strSlice []byte
for {
n, err := file.Read(tmpSlice)
if err == io.EOF {
break
}
if err != nil {
fmt.Println(err)
return
}
strSlice = append(strSlice, tmpSlice[:n]...)
fmt.Printf("read bytesLength: %v\n", n)
}
fmt.Println(string(strSlice))
}
func write() {
file, err := os.OpenFile("./write.txt", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
defer file.Close()
if err != nil {
fmt.Println(err)
return
}
file.WriteString("11111\r\n")
file.Write([]byte("22222\r\n"))
}
func main() {
read()
write()
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package roleoption
import (
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/errors"
)
//go:generate stringer -type=Option
// Option defines a role option. This is output by the parser
type Option uint32
// RoleOption represents an Option with a value.
type RoleOption struct {
Option
HasValue bool
// Need to resolve value in Exec for the case of placeholders.
Value func() (bool, string, error)
}
// KindList of role options.
const (
_ Option = iota
CREATEROLE
NOCREATEROLE
PASSWORD
LOGIN
NOLOGIN
VALIDUNTIL
CONTROLJOB
NOCONTROLJOB
CONTROLCHANGEFEED
NOCONTROLCHANGEFEED
CREATEDB
NOCREATEDB
CREATELOGIN
NOCREATELOGIN
VIEWACTIVITY
NOVIEWACTIVITY
CANCELQUERY
NOCANCELQUERY
MODIFYCLUSTERSETTING
NOMODIFYCLUSTERSETTING
)
// toSQLStmts is a map of Kind -> SQL statement string for applying the
// option to the role.
var toSQLStmts = map[Option]string{
CREATEROLE: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CREATEROLE')`,
NOCREATEROLE: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CREATEROLE'`,
LOGIN: `DELETE FROM system.role_options WHERE username = $1 AND option = 'NOLOGIN'`,
NOLOGIN: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'NOLOGIN')`,
VALIDUNTIL: `UPSERT INTO system.role_options (username, option, value) VALUES ($1, 'VALID UNTIL', $2::timestamptz::string)`,
CONTROLJOB: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CONTROLJOB')`,
NOCONTROLJOB: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CONTROLJOB'`,
CONTROLCHANGEFEED: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CONTROLCHANGEFEED')`,
NOCONTROLCHANGEFEED: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CONTROLCHANGEFEED'`,
CREATEDB: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CREATEDB')`,
NOCREATEDB: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CREATEDB'`,
CREATELOGIN: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CREATELOGIN')`,
NOCREATELOGIN: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CREATELOGIN'`,
VIEWACTIVITY: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'VIEWACTIVITY')`,
NOVIEWACTIVITY: `DELETE FROM system.role_options WHERE username = $1 AND option = 'VIEWACTIVITY'`,
CANCELQUERY: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'CANCELQUERY')`,
NOCANCELQUERY: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CANCELQUERY'`,
MODIFYCLUSTERSETTING: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'MODIFYCLUSTERSETTING')`,
NOMODIFYCLUSTERSETTING: `DELETE FROM system.role_options WHERE username = $1 AND option = 'MODIFYCLUSTERSETTING'`,
}
// Mask returns the bitmask for a given role option.
func (o Option) Mask() uint32 {
return 1 << o
}
// ByName is a map of string -> kind value.
var ByName = map[string]Option{
"CREATEROLE": CREATEROLE,
"NOCREATEROLE": NOCREATEROLE,
"PASSWORD": PASSWORD,
"LOGIN": LOGIN,
"NOLOGIN": NOLOGIN,
"VALID_UNTIL": VALIDUNTIL,
"CONTROLJOB": CONTROLJOB,
"NOCONTROLJOB": NOCONTROLJOB,
"CONTROLCHANGEFEED": CONTROLCHANGEFEED,
"NOCONTROLCHANGEFEED": NOCONTROLCHANGEFEED,
"CREATEDB": CREATEDB,
"NOCREATEDB": NOCREATEDB,
"CREATELOGIN": CREATELOGIN,
"NOCREATELOGIN": NOCREATELOGIN,
"VIEWACTIVITY": VIEWACTIVITY,
"NOVIEWACTIVITY": NOVIEWACTIVITY,
"CANCELQUERY": CANCELQUERY,
"NOCANCELQUERY": NOCANCELQUERY,
"MODIFYCLUSTERSETTING": MODIFYCLUSTERSETTING,
"NOMODIFYCLUSTERSETTING": NOMODIFYCLUSTERSETTING,
}
// ToOption takes a string and returns the corresponding Option.
func ToOption(str string) (Option, error) {
ret := ByName[strings.ToUpper(str)]
if ret == 0 {
return 0, pgerror.Newf(pgcode.Syntax, "unrecognized role option %s", str)
}
return ret, nil
}
// List is a list of role options.
type List []RoleOption
// GetSQLStmts returns a map of SQL stmts to apply each role option.
// Maps stmts to values (value of the role option).
func (rol List) GetSQLStmts(op string) (map[string]func() (bool, string, error), error) {
if len(rol) <= 0 {
return nil, nil
}
stmts := make(map[string]func() (bool, string, error), len(rol))
err := rol.CheckRoleOptionConflicts()
if err != nil {
return stmts, err
}
for _, ro := range rol {
sqltelemetry.IncIAMOptionCounter(
op,
strings.ToLower(ro.Option.String()),
)
// Skip PASSWORD option.
// Since PASSWORD still resides in system.users, we handle setting PASSWORD
// outside of this set stmt.
// TODO(richardjcai): migrate password to system.role_options
if ro.Option == PASSWORD {
continue
}
stmt := toSQLStmts[ro.Option]
if ro.HasValue {
stmts[stmt] = ro.Value
} else {
stmts[stmt] = nil
}
}
return stmts, nil
}
// ToBitField returns the bitfield representation of
// a list of role options.
func (rol List) ToBitField() (uint32, error) {
var ret uint32
for _, p := range rol {
if ret&p.Option.Mask() != 0 {
return 0, pgerror.Newf(pgcode.Syntax, "redundant role options")
}
ret |= p.Option.Mask()
}
return ret, nil
}
// Contains returns true if List contains option, false otherwise.
func (rol List) Contains(p Option) bool {
for _, ro := range rol {
if ro.Option == p {
return true
}
}
return false
}
// CheckRoleOptionConflicts returns an error if two or more options conflict with each other.
func (rol List) CheckRoleOptionConflicts() error {
roleOptionBits, err := rol.ToBitField()
if err != nil {
return err
}
if (roleOptionBits&CREATEROLE.Mask() != 0 &&
roleOptionBits&NOCREATEROLE.Mask() != 0) ||
(roleOptionBits&LOGIN.Mask() != 0 &&
roleOptionBits&NOLOGIN.Mask() != 0) ||
(roleOptionBits&CONTROLJOB.Mask() != 0 &&
roleOptionBits&NOCONTROLJOB.Mask() != 0) ||
(roleOptionBits&CONTROLCHANGEFEED.Mask() != 0 &&
roleOptionBits&NOCONTROLCHANGEFEED.Mask() != 0) ||
(roleOptionBits&CREATEDB.Mask() != 0 &&
roleOptionBits&NOCREATEDB.Mask() != 0) ||
(roleOptionBits&CREATELOGIN.Mask() != 0 &&
roleOptionBits&NOCREATELOGIN.Mask() != 0) ||
(roleOptionBits&VIEWACTIVITY.Mask() != 0 &&
roleOptionBits&NOVIEWACTIVITY.Mask() != 0) ||
(roleOptionBits&CANCELQUERY.Mask() != 0 &&
roleOptionBits&NOCANCELQUERY.Mask() != 0) ||
(roleOptionBits&MODIFYCLUSTERSETTING.Mask() != 0 &&
roleOptionBits&NOMODIFYCLUSTERSETTING.Mask() != 0) {
return pgerror.Newf(pgcode.Syntax, "conflicting role options")
}
return nil
}
// GetPassword returns the value of the password or whether the
// password was set to NULL. Returns error if the string was invalid
// or if no password option is found.
func (rol List) GetPassword() (isNull bool, password string, err error) {
for _, ro := range rol {
if ro.Option == PASSWORD {
return ro.Value()
}
}
// Password option not found.
return false, "", errors.New("password not found in role options")
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pubsub
import (
"encoding/json"
"fmt"
"net"
"strconv"
"sync"
"time"
"github.com/apache/servicecomb-kie/server/config"
"github.com/go-chassis/openlog"
"github.com/hashicorp/serf/cmd/serf/command/agent"
"github.com/hashicorp/serf/serf"
)
var once sync.Once
var bus *Bus
// const
const (
EventKVChange = "kv-chg"
DefaultEventBatchSize = 5000
DefaultEventBatchInterval = 500 * time.Millisecond
)
var topics sync.Map
func Topics() *sync.Map {
return &topics
}
// Bus is message bug
type Bus struct {
agent *agent.Agent
}
// Init create serf agent
func Init() {
once.Do(func() {
ac := agent.DefaultConfig()
sc := serf.DefaultConfig()
scmc := sc.MemberlistConfig
listenPeerAddr := config.Configurations.ListenPeerAddr
if listenPeerAddr != "" {
ac.BindAddr = listenPeerAddr
scmc.BindAddr, scmc.BindPort = splitHostPort(listenPeerAddr, scmc.BindAddr, scmc.BindPort)
}
advertiseAddr := config.Configurations.AdvertiseAddr
if advertiseAddr != "" {
ac.AdvertiseAddr = advertiseAddr
scmc.AdvertiseAddr, scmc.AdvertisePort = splitHostPort(advertiseAddr, scmc.AdvertiseAddr, scmc.AdvertisePort)
}
if config.Configurations.NodeName != "" {
sc.NodeName = config.Configurations.NodeName
}
ac.UserEventSizeLimit = 512
a, err := agent.Create(ac, sc, nil)
if err != nil {
openlog.Fatal("can not sync key value change events to other kie nodes:" + err.Error())
}
bus = &Bus{
agent: a,
}
})
}
// splitHostPort split input string to host port
func splitHostPort(advertiseAddr string, defaultHost string, defaultPort int) (string, int) {
if len(advertiseAddr) == 0 {
return defaultHost, defaultPort
}
host, port, err := net.SplitHostPort(advertiseAddr)
if err != nil {
openlog.Fatal(fmt.Sprintf("split string[%s] to host:port failed", advertiseAddr))
}
p, err := strconv.Atoi(port)
if err != nil {
openlog.Fatal(fmt.Sprintf("invalid port in string[%s]", advertiseAddr))
}
return host, p
}
// Start start serf agent
func Start() {
err := bus.agent.Start()
if err != nil {
openlog.Fatal("can not sync key value change events to other kie nodes" + err.Error())
}
openlog.Info("kie message bus started")
eh := &ClusterEventHandler{}
bus.agent.RegisterEventHandler(eh)
if config.Configurations.PeerAddr != "" {
err := join([]string{config.Configurations.PeerAddr})
if err != nil {
openlog.Fatal("lost event message")
} else {
openlog.Info("join kie node:" + config.Configurations.PeerAddr)
}
}
}
func join(addresses []string) error {
_, err := bus.agent.Join(addresses, false)
if err != nil {
return err
}
return nil
}
// Publish send event
func Publish(event *KVChangeEvent) error {
b, err := json.Marshal(event)
if err != nil {
return err
}
return bus.agent.UserEvent(EventKVChange, b, true)
}
// AddObserver observe key changes by (key or labels) or (key and labels)
func AddObserver(o *Observer, topic *Topic) (string, error) {
t, err := topic.Encode()
if err != nil {
return "", err
}
observers, ok := topics.Load(t)
if !ok {
var observers = &sync.Map{}
observers.Store(o.UUID, o)
topics.Store(t, observers)
openlog.Info("new topic:" + t)
return t, nil
}
m := observers.(*sync.Map)
m.Store(o.UUID, o)
openlog.Debug("add new observer for topic:" + t)
return t, nil
}
func RemoveObserver(uuid string, topic *Topic) {
t, err := topic.Encode()
if err != nil {
openlog.Error(err.Error())
}
observers, _ := topics.Load(t)
m := observers.(*sync.Map)
m.Delete(uuid)
}
|
package datasource
import (
"fmt"
"github.com/go-xorm/core"
"github.com/go-xorm/xorm"
_ "github.com/go-sql-driver/mysql"
"gopkg.in/yaml.v2"
"gotest/common"
"io/ioutil"
"path/filepath"
)
func Instance() (*xorm.Engine,error){
var c DBconfigs
c,err := xdb_config("./configs/databases.yml")
if(err !=nil){
common.Log("xorm databases yml error",err)
return nil,err
}
var dns string
dns = fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=%s",c.UserName,c.PassWord,c.Host,c.Port,c.DBName,c.Charset)
//创建orm引擎
common.Dbengine, err = xorm.NewEngine("mysql", dns)
if err!=nil{
common.Log("xorm new engine error",err)
return nil,err
}
//defer engine.Close()
//连接测试
if err := common.Dbengine.Ping(); err!=nil{
common.Log("xorm ping error",err)
return nil,err
}
//日志打印SQL
common.Dbengine.ShowSQL(c.ShowSQL)
//设置连接池的空闲数大小
common.Dbengine.SetMaxIdleConns(c.IdleNum)
//设置最大打开连接数
common.Dbengine.SetMaxOpenConns(c.OpenNum)
//名称映射规则主要负责结构体名称到表名和结构体field到表字段的名称映射
common.Dbengine.SetTableMapper(core.SnakeMapper{})
return common.Dbengine,nil
}
func xdb_config(filename string) (DBconfigs,error){
c :=DefaultDbconfig()
yamlAbsPath, err := filepath.Abs(filename)
if err != nil {
return c,err
}
// read the raw contents of the file
data, err := ioutil.ReadFile(yamlAbsPath)
if err != nil {
return c, err
}
// put the file's contents as yaml to the default configuration(c)
if err := yaml.Unmarshal(data, &c); err != nil {
return c, err
}
return c, nil
}
|
package constant
type ItemJson struct {
Cid string
Pid int
Cname string
Pname string
Price float32
Energy int
Img string
Flag string
Tag string
Note string
}
type ProductJson struct {
classification string
tid int
items ItemJson
}
type ResJson struct {
Header struct {
Code int `json:"code"`
Msg string `json:"msg"`
} `json:"header"`
Body [] interface{} `json:"body"`
}
type Ids struct {
Cid int
Pid int
} |
package problem0054
func spiralOrder(matrix [][]int) []int {
r := len(matrix)
if r == 0 {
return []int{}
}
c := len(matrix[0])
if c == 0 {
return []int{}
}
if len(matrix) == 1 {
return matrix[0]
}
res := make([]int, 0, r*c)
res = append(res, matrix[0]...)
for i := 1; i < r-1; i++ {
res = append(res, matrix[i][c-1])
}
for j := c - 1; j >= 0; j-- {
res = append(res, matrix[r-1][j])
}
for i := r - 2; i > 0 && c > 1; i-- {
res = append(res, matrix[i][0])
}
if r == 2 || c <= 2 {
return res
}
nextMatrix := make([][]int, 0, r-2)
for i := 1; i < r-1; i++ {
nextMatrix = append(nextMatrix, matrix[i][1:c-1])
}
return append(res, spiralOrder(nextMatrix)...)
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"reflect"
"strings"
"github.com/jasonlvhit/gocron"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/koderover/zadig/lib/microservice/cron/core/service"
"github.com/koderover/zadig/lib/setting"
"github.com/koderover/zadig/lib/tool/xlog"
)
func (c *CronClient) UpsertWorkflowScheduler(log *xlog.Logger) {
workflows, err := c.AslanCli.ListWorkflows(log)
if err != nil {
log.Error(err)
return
}
log.Info("start init workflow scheduler..")
taskMap := make(map[string]bool)
for _, workflow := range workflows {
key := "workflow-" + workflow.Name
taskMap[key] = true
if workflow.Schedules == nil {
workflow.Schedules = &service.ScheduleCtrl{}
}
if _, ok := c.lastSchedulers[key]; ok && reflect.DeepEqual(workflow.Schedules.Items, c.lastSchedulers[key]) {
// 增加判断:enabled的值未被更新时才能跳过
if enabled, ok := c.enabledMap[key]; ok && enabled == workflow.Schedules.Enabled {
continue
}
}
c.enabledMap[key] = workflow.Schedules.Enabled
c.lastSchedulers[key] = workflow.Schedules.Items
newScheduler := gocron.NewScheduler()
for _, schedule := range workflow.Schedules.Items {
if schedule != nil {
if err := schedule.Validate(); err != nil {
log.Errorf("[%s] invalid schedule: %v", key, err)
continue
}
BuildScheduledJob(newScheduler, schedule).Do(c.RunScheduledTask, workflow, schedule.WorkflowArgs, log)
}
}
// 所有scheduler总开关
if !workflow.Schedules.Enabled {
newScheduler.Clear()
}
c.Schedulers[key] = newScheduler
log.Infof("[%s] building schedulers..", key)
// 停掉旧的scheduler
if _, ok := c.SchedulerController[key]; ok {
c.SchedulerController[key] <- true
}
log.Infof("[%s]lens of scheduler: %d", key, c.Schedulers[key].Len())
c.SchedulerController[key] = c.Schedulers[key].Start()
}
pipelines, err := c.AslanCli.ListPipelines(log)
if err != nil {
log.Error(err)
return
}
log.Info("start init pipeline scheduler..")
for _, pipeline := range pipelines {
key := "pipeline-" + pipeline.Name
taskMap[key] = true
if _, ok := c.lastSchedulers[key]; ok && reflect.DeepEqual(pipeline.Schedules.Items, c.lastSchedulers[key]) {
// 增加判断:enabled的值未被更新时才能跳过
if enabled, ok := c.enabledMap[key]; ok && enabled == pipeline.Schedules.Enabled {
continue
}
}
c.enabledMap[key] = pipeline.Schedules.Enabled
c.lastSchedulers[key] = pipeline.Schedules.Items
newScheduler := gocron.NewScheduler()
for _, schedule := range pipeline.Schedules.Items {
if schedule != nil {
if err := schedule.Validate(); err != nil {
log.Errorf("[%s] invalid schedule: %v", key, err)
continue
}
BuildScheduledPipelineJob(newScheduler, schedule).Do(c.RunScheduledPipelineTask, pipeline, schedule.TaskArgs, log)
}
}
// 所有scheduler总开关
if !pipeline.Schedules.Enabled {
newScheduler.Clear()
}
c.Schedulers[key] = newScheduler
log.Infof("[%s] building schedulers..", key)
// 停掉旧的scheduler
if _, ok := c.SchedulerController[key]; ok {
c.SchedulerController[key] <- true
}
log.Infof("[%s]lens of scheduler: %d", key, c.Schedulers[key].Len())
c.SchedulerController[key] = c.Schedulers[key].Start()
}
ScheduleNames := sets.NewString(
CleanJobScheduler, UpsertWorkflowScheduler, UpsertTestScheduler,
InitStatScheduler, InitOperationStatScheduler,
UpsertColliePipelineScheduler)
// 停掉已被删除的pipeline对应的scheduler
for name := range c.Schedulers {
if _, ok := taskMap[name]; !ok && !ScheduleNames.Has(name) {
//排除非容器部署服务健康检查定时器
if strings.HasPrefix(name, "service-") && strings.Contains(name, "pm") {
continue
}
// 排除测试定时任务定时器
if strings.HasPrefix(name, "test-timer-") {
continue
}
// 排除自由编排工作流定时器
if strings.HasPrefix(name, "collie-pipeline-timer-") {
continue
}
log.Warnf("[%s]deleted workflow detached", name)
if _, ok := c.SchedulerController[name]; ok {
c.SchedulerController[name] <- true
}
delete(c.Schedulers, name)
delete(c.lastSchedulers, name)
}
}
}
// RunScheduledTask ...
func (c *CronClient) RunScheduledTask(workflow *service.Workflow, params *service.WorkflowTaskArgs, log *xlog.Logger) {
log.Infof("start workflow cron job: %s ...", workflow.Name)
args := &service.WorkflowTaskArgs{
WorkflowName: workflow.Name,
WorklowTaskCreator: setting.CronTaskCreator,
ReqID: log.ReqID(),
}
if params != nil {
args.Description = params.Description
args.ProductTmplName = params.ProductTmplName
args.Target = params.Target
args.Namespace = params.Namespace
args.Tests = params.Tests
args.DistributeEnabled = params.DistributeEnabled
}
if err := c.AslanCli.RunWorkflowTask(args, log); err != nil {
log.Errorf("[%s]RunScheduledTask err: %v", workflow.Name, err)
}
}
// BuildScheduledJob ...
func BuildScheduledJob(scheduler *gocron.Scheduler, schedule *service.Schedule) *gocron.Job {
switch schedule.Frequency {
case setting.FrequencyMinutes:
return scheduler.Every(schedule.Number).Minutes()
case setting.FrequencyHour:
return scheduler.Every(schedule.Number).Hour()
case setting.FrequencyHours:
return scheduler.Every(schedule.Number).Hours()
case setting.FrequencyDay:
return scheduler.Every(schedule.Number).Day().At(schedule.Time)
case setting.FrequencyDays:
return scheduler.Every(schedule.Number).Days().At(schedule.Time)
case setting.FrequencyMondy:
return scheduler.Every(schedule.Number).Monday().At(schedule.Time)
case setting.FrequencyTuesday:
return scheduler.Every(schedule.Number).Tuesday().At(schedule.Time)
case setting.FrequencyWednesday:
return scheduler.Every(schedule.Number).Wednesday().At(schedule.Time)
case setting.FrequencyThursday:
return scheduler.Every(schedule.Number).Thursday().At(schedule.Time)
case setting.FrequencyFriday:
return scheduler.Every(schedule.Number).Friday().At(schedule.Time)
case setting.FrequencySaturday:
return scheduler.Every(schedule.Number).Saturday().At(schedule.Time)
case setting.FrequencySunday:
return scheduler.Every(schedule.Number).Sunday().At(schedule.Time)
}
return nil
}
|
package store
import "context"
type Reducer func(ctx context.Context, engineState *EngineState, action Action)
var EmptyReducer = Reducer(func(ctx context.Context, s *EngineState, action Action) {})
|
package proxycommands
import (
"github.com/spf13/cobra"
)
// NewProxyCommands creates a new cobra command
func NewProxyCommands() *cobra.Command {
reverseCommandsCmd := &cobra.Command{
Use: "proxy-commands",
Short: "Execute Commands on a remote environment",
Args: cobra.NoArgs,
}
reverseCommandsCmd.AddCommand(NewConfigureCmd())
reverseCommandsCmd.AddCommand(NewRunCmd())
reverseCommandsCmd.AddCommand(NewGitCredentialsCmd())
return reverseCommandsCmd
}
|
package leetcode_go
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
stack1 := toStackP445(l1)
stack2 := toStackP445(l2)
carry := 0
var preNode *ListNode
head := ListNode{}
for i, j := len(stack1)-1, len(stack2)-1; i > 0 || j > 0 || carry > 0; {
var val int
if i > 0 && j > 0 {
val = stack1[i] + stack2[j] + carry
i--
j--
} else if i < 0 {
val = stack2[j] + carry
j--
} else if j < 0 {
val = stack1[i] + carry
i--
} else {
val = carry
}
val, carry = val%10, val/10
node := ListNode{Val: val, Next: preNode}
preNode = &node
head.Next = &node
}
return head.Next
}
func toStackP445(node *ListNode) []int {
stack := []int{}
for node != nil {
stack = append(stack, node.Val)
node = node.Next
}
return stack
}
|
package main
import (
"fmt"
"io"
"os"
)
func solve(r io.Reader, w io.Writer) {
var t int
fmt.Fscan(r, &t)
for i := 0; i < t; i++ {
var a, b int
fmt.Fscan(r, &a, &b)
if i > 0 {
fmt.Fprint(w, "\n")
}
fmt.Fprint(w, a+b)
}
}
func main() {
solve(os.Stdin, os.Stdout)
}
|
package operatingsystem
import (
"bytes"
"fmt"
"os/exec"
"strings"
)
func GetOperatingSystem() (string, error) {
sw_vers, err := exec.LookPath("sw_vers")
if err != nil {
return "", fmt.Errorf("Can not find sw_vers")
}
cmd := exec.Command(sw_vers)
stdbuf := new(bytes.Buffer)
cmd.Stdout = stdbuf
if err := cmd.Run(); err != nil {
return "", err
}
b, _ := stdbuf.ReadBytes(byte('\n'))
if i := bytes.Index(b, []byte("ProductName:")); i >= 0 {
b = b[i+16:]
return strings.Trim(string(b), " "), nil
}
return "", fmt.Errorf("ProductName not found")
}
// No-op on Mac OSX
func IsContainerized() (bool, error) {
return false, nil
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8s
import (
"yunion.io/x/jsonutils"
)
type RoleCreateOpt struct {
NamespaceResourceCreateOptions
Rule []string `help:"role rule, e.g: 'apps/v1:deployments:get,watch,list'"`
}
func (o *RoleCreateOpt) Params() (jsonutils.JSONObject, error) {
ret, err := o.NamespaceResourceCreateOptions.Params()
if err != nil {
return nil, err
}
params := ret.(*jsonutils.JSONDict)
rules, err := parsePolicyRules(o.Rule)
if err != nil {
return nil, err
}
params.Add(jsonutils.Marshal(rules), "rules")
return params, nil
}
type RoleUpdateOpt struct {
NamespaceResourceUpdateOptions
Rule []string `help:"role rule, e.g: 'apps/v1:deployments:get,watch,list'"`
}
func (o *RoleUpdateOpt) Params() (jsonutils.JSONObject, error) {
params := jsonutils.NewDict()
rules, err := parsePolicyRules(o.Rule)
if err != nil {
return nil, err
}
params.Add(jsonutils.Marshal(rules), "rules")
return params, nil
}
|
package helpers
import (
"github.com/LiveSocket/bot/conv"
"github.com/LiveSocket/bot/service"
"github.com/gammazero/nexus/v3/wamp"
)
// CustomCommand The important parts of a custom command
type CustomCommand struct {
Channel string `json:"channel"`
Name string `json:"name"`
Proc string `json:"proc"`
Enabled bool `json:"enabled"`
Restricted bool `json:"restricted"`
Description string `json:"description"`
}
// GetCustomCommand WAMP call helper for getting a custom command
func GetCustomCommand(service *service.Service, channel string, name string) (*CustomCommand, error) {
// Call get command by id endpoint
res, err := service.SimpleCall("private.command.getCustom", nil, wamp.Dict{"channel": channel, "name": name})
if err != nil {
return nil, err
}
// Check, convert, and return response
if len(res.Arguments) > 0 && res.Arguments[0] != nil {
command, err := conv.ToStringMap(res.Arguments[0])
if err != nil {
return nil, err
}
result, err := toCustom(command)
return result, err
}
return nil, nil
}
func toCustom(command map[string]interface{}) (*CustomCommand, error) {
enabled, err := conv.ToBool(command["enabled"])
if err != nil {
return nil, err
}
restricted, err := conv.ToBool(command["restricted"])
if err != nil {
return nil, err
}
return &CustomCommand{
Channel: conv.ToString(command["channel"]),
Name: conv.ToString(command["name"]),
Proc: conv.ToString(command["proc"]),
Enabled: enabled,
Restricted: restricted,
Description: conv.ToString(command["description"]),
}, nil
}
|
// Copyright 2017-2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"archive/tar"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
func createArchive(dbgDir string) (string, error) {
// Based on http://blog.ralch.com/tutorial/golang-working-with-tar-and-gzip/
archivePath := fmt.Sprintf("%s.tar", dbgDir)
file, err := os.Create(archivePath)
if err != nil {
return "", err
}
defer file.Close()
writer := tar.NewWriter(file)
defer writer.Close()
var baseDir string
if info, err := os.Stat(dbgDir); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Debug directory does not exist %s", err)
return "", err
} else if err == nil && info.IsDir() {
baseDir = filepath.Base(dbgDir)
}
return archivePath, filepath.Walk(dbgDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
header, err := tar.FileInfoHeader(info, info.Name())
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to compress %s %s", info.Name(), err)
return err
}
if baseDir != "" {
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, dbgDir))
}
if err := writer.WriteHeader(header); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write header %s", err)
return err
}
if info.IsDir() {
return err
}
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to open %s %s", path, err)
}
defer file.Close()
_, err = io.Copy(writer, file)
return err
})
}
|
package main
import "regexp"
import "fmt"
import "strings"
const swear = "I absolutely fucking hate you, kill yourself you little shit"
const fword_regex = `(?i)(f+u+(k+|c+k+))|f *u |f *u[^a-z\.]`
const sword_regex = `(?i)(s+h+(t+|i+t+))|s *h *i *t`
func main() {
new1 := deleteFword(swear, "f***")
new2 := deleteSword(new1, "s***")
fmt.Println(new2)
}
func deleteFword(sentance string, toReplace string) string {
r, err := regexp.Compile(fword_regex)
if err != nil {
fmt.Println(err)
}
stringnew := strings.Replace(sentance, r.FindString(sentance), toReplace, -1)
return stringnew
}
func deleteSword(sentance string, toReplace string) string {
r, err := regexp.Compile(sword_regex)
if err != nil {
fmt.Println(err)
}
stringnew := strings.Replace(sentance, r.FindString(sentance), toReplace, -1)
return stringnew
}
|
package pushover
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/TwiN/gatus/v5/alerting/alert"
"github.com/TwiN/gatus/v5/client"
"github.com/TwiN/gatus/v5/core"
)
const (
restAPIURL = "https://api.pushover.net/1/messages.json"
defaultPriority = 0
)
// AlertProvider is the configuration necessary for sending an alert using Pushover
type AlertProvider struct {
// Key used to authenticate the application sending
// See "Your Applications" on the dashboard, or add a new one: https://pushover.net/apps/build
ApplicationToken string `yaml:"application-token"`
// Key of the user or group the messages should be sent to
UserKey string `yaml:"user-key"`
// The title of your message, likely the application name
// default: the name of your application in Pushover
Title string `yaml:"title,omitempty"`
// Priority of all messages, ranging from -2 (very low) to 2 (Emergency)
// default: 0
Priority int `yaml:"priority,omitempty"`
// Sound of the messages (see: https://pushover.net/api#sounds)
// default: "" (pushover)
Sound string `yaml:"sound,omitempty"`
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
}
// IsValid returns whether the provider's configuration is valid
func (provider *AlertProvider) IsValid() bool {
if provider.Priority == 0 {
provider.Priority = defaultPriority
}
return len(provider.ApplicationToken) == 30 && len(provider.UserKey) == 30 && provider.Priority >= -2 && provider.Priority <= 2
}
// Send an alert using the provider
// Reference doc for pushover: https://pushover.net/api
func (provider *AlertProvider) Send(endpoint *core.Endpoint, alert *alert.Alert, result *core.Result, resolved bool) error {
buffer := bytes.NewBuffer(provider.buildRequestBody(endpoint, alert, result, resolved))
request, err := http.NewRequest(http.MethodPost, restAPIURL, buffer)
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
response, err := client.GetHTTPClient(nil).Do(request)
if err != nil {
return err
}
defer response.Body.Close()
if response.StatusCode > 399 {
body, _ := io.ReadAll(response.Body)
return fmt.Errorf("call to provider alert returned status code %d: %s", response.StatusCode, string(body))
}
return err
}
type Body struct {
Token string `json:"token"`
User string `json:"user"`
Title string `json:"title,omitempty"`
Message string `json:"message"`
Priority int `json:"priority"`
Sound string `json:"sound,omitempty"`
}
// buildRequestBody builds the request body for the provider
func (provider *AlertProvider) buildRequestBody(endpoint *core.Endpoint, alert *alert.Alert, result *core.Result, resolved bool) []byte {
var message string
if resolved {
message = fmt.Sprintf("RESOLVED: %s - %s", endpoint.DisplayName(), alert.GetDescription())
} else {
message = fmt.Sprintf("TRIGGERED: %s - %s", endpoint.DisplayName(), alert.GetDescription())
}
body, _ := json.Marshal(Body{
Token: provider.ApplicationToken,
User: provider.UserKey,
Title: provider.Title,
Message: message,
Priority: provider.priority(),
Sound: provider.Sound,
})
return body
}
func (provider *AlertProvider) priority() int {
if provider.Priority == 0 {
return defaultPriority
}
return provider.Priority
}
// GetDefaultAlert returns the provider's default alert configuration
func (provider AlertProvider) GetDefaultAlert() *alert.Alert {
return provider.DefaultAlert
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// ListActiveCoursesInAccount Retrieve a paginated list of courses in this account.
// https://canvas.instructure.com/doc/api/accounts.html
//
// Path Parameters:
// # Path.AccountID (Required) ID
//
// Query Parameters:
// # Query.WithEnrollments (Optional) If true, include only courses with at least one enrollment. If false,
// include only courses with no enrollments. If not present, do not filter
// on course enrollment status.
// # Query.EnrollmentType (Optional) . Must be one of teacher, student, ta, observer, designerIf set, only return courses that have at least one user enrolled in
// in the course with one of the specified enrollment types.
// # Query.Published (Optional) If true, include only published courses. If false, exclude published
// courses. If not present, do not filter on published status.
// # Query.Completed (Optional) If true, include only completed courses (these may be in state
// 'completed', or their enrollment term may have ended). If false, exclude
// completed courses. If not present, do not filter on completed status.
// # Query.Blueprint (Optional) If true, include only blueprint courses. If false, exclude them.
// If not present, do not filter on this basis.
// # Query.BlueprintAssociated (Optional) If true, include only courses that inherit content from a blueprint course.
// If false, exclude them. If not present, do not filter on this basis.
// # Query.ByTeachers (Optional) List of User IDs of teachers; if supplied, include only courses taught by
// one of the referenced users.
// # Query.BySubaccounts (Optional) List of Account IDs; if supplied, include only courses associated with one
// of the referenced subaccounts.
// # Query.HideEnrollmentlessCourses (Optional) If present, only return courses that have at least one enrollment.
// Equivalent to 'with_enrollments=true'; retained for compatibility.
// # Query.State (Optional) . Must be one of created, claimed, available, completed, deleted, allIf set, only return courses that are in the given state(s). By default,
// all states but "deleted" are returned.
// # Query.EnrollmentTermID (Optional) If set, only includes courses from the specified term.
// # Query.SearchTerm (Optional) The partial course name, code, or full ID to match and return in the results list. Must be at least 3 characters.
// # Query.Include (Optional) . Must be one of syllabus_body, term, course_progress, storage_quota_used_mb, total_students, teachers, account_name, concluded- All explanations can be seen in the {api:CoursesController#index Course API index documentation}
// - "sections", "needs_grading_count" and "total_scores" are not valid options at the account level
// # Query.Sort (Optional) . Must be one of course_name, sis_course_id, teacher, account_nameThe column to sort results by.
// # Query.Order (Optional) . Must be one of asc, descThe order to sort the given column by.
// # Query.SearchBy (Optional) . Must be one of course, teacherThe filter to search by. "course" searches for course names, course codes,
// and SIS IDs. "teacher" searches for teacher names
// # Query.StartsBefore (Optional) If set, only return courses that start before the value (inclusive)
// or their enrollment term starts before the value (inclusive)
// or both the course's start_at and the enrollment term's start_at are set to null.
// The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
// # Query.EndsAfter (Optional) If set, only return courses that end after the value (inclusive)
// or their enrollment term ends after the value (inclusive)
// or both the course's end_at and the enrollment term's end_at are set to null.
// The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
// # Query.Homeroom (Optional) If set, only return homeroom courses.
//
type ListActiveCoursesInAccount struct {
Path struct {
AccountID string `json:"account_id" url:"account_id,omitempty"` // (Required)
} `json:"path"`
Query struct {
WithEnrollments bool `json:"with_enrollments" url:"with_enrollments,omitempty"` // (Optional)
EnrollmentType []string `json:"enrollment_type" url:"enrollment_type,omitempty"` // (Optional) . Must be one of teacher, student, ta, observer, designer
Published bool `json:"published" url:"published,omitempty"` // (Optional)
Completed bool `json:"completed" url:"completed,omitempty"` // (Optional)
Blueprint bool `json:"blueprint" url:"blueprint,omitempty"` // (Optional)
BlueprintAssociated bool `json:"blueprint_associated" url:"blueprint_associated,omitempty"` // (Optional)
ByTeachers []string `json:"by_teachers" url:"by_teachers,omitempty"` // (Optional)
BySubaccounts []string `json:"by_subaccounts" url:"by_subaccounts,omitempty"` // (Optional)
HideEnrollmentlessCourses bool `json:"hide_enrollmentless_courses" url:"hide_enrollmentless_courses,omitempty"` // (Optional)
State []string `json:"state" url:"state,omitempty"` // (Optional) . Must be one of created, claimed, available, completed, deleted, all
EnrollmentTermID int64 `json:"enrollment_term_id" url:"enrollment_term_id,omitempty"` // (Optional)
SearchTerm string `json:"search_term" url:"search_term,omitempty"` // (Optional)
Include []string `json:"include" url:"include,omitempty"` // (Optional) . Must be one of syllabus_body, term, course_progress, storage_quota_used_mb, total_students, teachers, account_name, concluded
Sort string `json:"sort" url:"sort,omitempty"` // (Optional) . Must be one of course_name, sis_course_id, teacher, account_name
Order string `json:"order" url:"order,omitempty"` // (Optional) . Must be one of asc, desc
SearchBy string `json:"search_by" url:"search_by,omitempty"` // (Optional) . Must be one of course, teacher
StartsBefore time.Time `json:"starts_before" url:"starts_before,omitempty"` // (Optional)
EndsAfter time.Time `json:"ends_after" url:"ends_after,omitempty"` // (Optional)
Homeroom bool `json:"homeroom" url:"homeroom,omitempty"` // (Optional)
} `json:"query"`
}
func (t *ListActiveCoursesInAccount) GetMethod() string {
return "GET"
}
func (t *ListActiveCoursesInAccount) GetURLPath() string {
path := "accounts/{account_id}/courses"
path = strings.ReplaceAll(path, "{account_id}", fmt.Sprintf("%v", t.Path.AccountID))
return path
}
func (t *ListActiveCoursesInAccount) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *ListActiveCoursesInAccount) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ListActiveCoursesInAccount) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ListActiveCoursesInAccount) HasErrors() error {
errs := []string{}
if t.Path.AccountID == "" {
errs = append(errs, "'Path.AccountID' is required")
}
for _, v := range t.Query.EnrollmentType {
if v != "" && !string_utils.Include([]string{"teacher", "student", "ta", "observer", "designer"}, v) {
errs = append(errs, "EnrollmentType must be one of teacher, student, ta, observer, designer")
}
}
for _, v := range t.Query.State {
if v != "" && !string_utils.Include([]string{"created", "claimed", "available", "completed", "deleted", "all"}, v) {
errs = append(errs, "State must be one of created, claimed, available, completed, deleted, all")
}
}
for _, v := range t.Query.Include {
if v != "" && !string_utils.Include([]string{"syllabus_body", "term", "course_progress", "storage_quota_used_mb", "total_students", "teachers", "account_name", "concluded"}, v) {
errs = append(errs, "Include must be one of syllabus_body, term, course_progress, storage_quota_used_mb, total_students, teachers, account_name, concluded")
}
}
if t.Query.Sort != "" && !string_utils.Include([]string{"course_name", "sis_course_id", "teacher", "account_name"}, t.Query.Sort) {
errs = append(errs, "Sort must be one of course_name, sis_course_id, teacher, account_name")
}
if t.Query.Order != "" && !string_utils.Include([]string{"asc", "desc"}, t.Query.Order) {
errs = append(errs, "Order must be one of asc, desc")
}
if t.Query.SearchBy != "" && !string_utils.Include([]string{"course", "teacher"}, t.Query.SearchBy) {
errs = append(errs, "SearchBy must be one of course, teacher")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ListActiveCoursesInAccount) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.Course, *canvasapi.PagedResource, error) {
var err error
var response *http.Response
if next != nil {
response, err = c.Send(next, t.GetMethod(), nil)
} else {
response, err = c.SendRequest(t)
}
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, nil, err
}
ret := []*models.Course{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, nil, err
}
pagedResource, err := canvasapi.ExtractPagedResource(response.Header)
if err != nil {
return nil, nil, err
}
return ret, pagedResource, nil
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package announce_test
import (
"fmt"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/bitmark-inc/bitmarkd/announce"
"github.com/bitmark-inc/bitmarkd/announce/fixtures"
"github.com/bitmark-inc/bitmarkd/announce/mocks"
)
func TestSendRegistration(t *testing.T) {
fixtures.SetupTestLogger()
defer fixtures.TeardownTestLogger()
f := func(_ string) ([]string, error) { return []string{}, nil }
_ = announce.Initialise("domain.not.exist", "cache", f)
defer announce.Finalise()
// make sure background jobs already finish first round, so
// no logger will be called
time.Sleep(20 * time.Millisecond)
ctl := gomock.NewController(t)
defer ctl.Finish()
e := fmt.Errorf("wrong")
c := mocks.NewMockClient(ctl)
c.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(e).Times(1)
err := announce.SendRegistration(c, "")
assert.Equal(t, e, err, "wrong SendRegistration")
}
|
package main
import (
"fmt"
"time"
)
func selectdriver() {
c := counts("rose")
for i := 0; i < 20; i++ {
select {
case r := <-c:
fmt.Println(r)
case <-time.After(time.Second):
fmt.Println("too slow")
}
}
}
func mainTimerDriver() {
c := counts("rose")
tc := time.After(time.Second * 5)
for {
select {
case r := <-c:
fmt.Println(r)
case <-tc:
fmt.Println("you talk too much")
return
}
}
}
|
package client
import (
fmt "fmt"
"lemna/arpc"
"sync"
)
//manager 仅供client.Service使用
type manager struct {
clients map[uint32]*Target
mu sync.Mutex
}
func newMananger() *manager {
return &manager{clients: make(map[uint32]*Target)}
}
func (cm *manager) newTarget(s arpc.CAgent_ForwardServer, id uint32) (*Target, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
_, ok := cm.clients[id]
if !ok {
cm.clients[id] = newTarget(s, id)
return cm.clients[id], nil
}
return nil, fmt.Errorf("repeated client<id=%d>", id)
}
func (cm *manager) getTarget(id uint32) *Target {
cm.mu.Lock()
defer cm.mu.Unlock()
ret, ok := cm.clients[id]
if ok {
return ret
}
return nil
}
func (cm *manager) delTarget(id uint32) {
cm.mu.Lock()
delete(cm.clients, id)
cm.mu.Unlock()
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package perf
import (
"context"
"time"
"github.com/shirou/gopsutil/v3/mem"
"chromiumos/tast/common/perf"
)
// diffWait is the default duration to measure the baseline of memoryDataSource.
const diffWait = 5 * time.Second
// memoryDataSource is a perf.TimelineDatasource reporting the memory usage and its diff from certain point.
type memoryDataSource struct {
name string
diffName string
percentName string
previous float64
}
// NewMemoryDataSource creates a new instance of memoryDataSource with the
// given name.
func NewMemoryDataSource(name, diffName, percentName string) *memoryDataSource {
return &memoryDataSource{name: name, diffName: diffName, percentName: percentName}
}
// Setup implements perf.TimelineDatasource.Setup.
func (s *memoryDataSource) Setup(ctx context.Context, prefix string) error {
s.name = prefix + s.name
s.diffName = prefix + s.diffName
s.percentName = prefix + s.percentName
return nil
}
// Start implements perf.TimelineDatasource.Start.
func (s *memoryDataSource) Start(ctx context.Context) error {
memInfo, err := mem.VirtualMemoryWithContext(ctx)
if err != nil {
return err
}
s.previous = float64(memInfo.Used)
return nil
}
// Snapshot implements perf.TimelineDatasource.Snapshot.
func (s *memoryDataSource) Snapshot(ctx context.Context, values *perf.Values) error {
memInfo, err := mem.VirtualMemoryWithContext(ctx)
if err != nil {
return err
}
used := float64(memInfo.Used)
values.Append(perf.Metric{
Name: s.diffName,
Unit: "bytes",
Direction: perf.SmallerIsBetter,
Multiple: true,
}, used-s.previous)
values.Append(perf.Metric{
Name: s.name,
Unit: "bytes",
Direction: perf.SmallerIsBetter,
Multiple: true,
}, float64(used))
values.Append(perf.Metric{
Name: s.percentName,
Unit: "percent",
Direction: perf.SmallerIsBetter,
Multiple: true,
}, memInfo.UsedPercent)
s.previous = used
return nil
}
// Stop does nothing.
func (s *memoryDataSource) Stop(_ context.Context, values *perf.Values) error {
return nil
}
|
package main
import (
"errors"
"fmt"
"time"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
g, errCtx := errgroup.WithContext(ctx)
for i := 0; i < 10; i++ {
// goroutine闭包使用的变量全用copy出来的新值,
//因为创建go需要时间,在go未被创建时for循环就跑完了
//闭包拿到的值其实是最终的i
tmp := i
g.Go(func() error {
if tmp == 2 {
fmt.Println("index ", tmp)
//这里一般都是某个协程发生异常之后,调用cancel()
//这样别的协程就可以通过errCtx获取到err信息,以便决定是否需要取消后续操作
cancel()
// 这里在调用cancel之后还会执行,异常之后的收尾工作
fmt.Println("err index ", tmp)
return errors.New("errrrrrrrrr ")
} else if tmp == 7 || tmp == 8 || tmp == 9 {
time.Sleep(time.Second * 3)
//检查 其他协程已经发生错误,如果已经发生异常,则不再执行下面的代码
err := CheckGoErr(errCtx)
if err != nil {
fmt.Println("check err:", err, tmp)
return err
}
}
return nil
})
}
if err := g.Wait(); err != nil {
fmt.Println("wait err :", err)
}
}
func CheckGoErr(errContext context.Context) error {
select {
case <-errContext.Done():
return errContext.Err()
default:
return nil
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package gcjob
import (
"context"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/sql"
)
// GcTenant is a wrapper around the internal function that gc-s a tenant.
func GcTenant(
ctx context.Context,
execCfg *sql.ExecutorConfig,
tenID uint64,
progress *jobspb.SchemaChangeGCProgress,
) error {
return gcTenant(ctx, execCfg, tenID, progress)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudprovider
import (
"context"
"reflect"
"yunion.io/x/onecloud/pkg/cloudcommon/db/lockman"
)
const (
SET_TAGS = "set-tags"
)
type TagsUpdateInfo struct {
OldTags map[string]string
NewTags map[string]string
}
func (t TagsUpdateInfo) IsChanged() bool {
return !reflect.DeepEqual(t.OldTags, t.NewTags)
}
func SetTags(ctx context.Context, res ICloudResource, managerId string, tags map[string]string, replace bool) error {
// 避免同时设置多个资源标签出现以下错误
// Code=ResourceInUse.TagDuplicate, Message=tagKey-tagValue have exists., RequestId=e87714c0-e50b-4241-b79d-32897437174d
lockman.LockRawObject(ctx, SET_TAGS, managerId)
defer lockman.ReleaseRawObject(ctx, SET_TAGS, managerId)
return res.SetTags(tags, replace)
}
|
package xmppcore
import (
"encoding/xml"
)
// RFC 6120 6 SASL Negotiation
const SASLNS = "urn:ietf:params:xml:ns:xmpp-sasl"
const (
SASLAuthElementName = SASLNS + " auth"
SASLSuccessElementName = SASLNS + " success"
SASLFailureElementName = SASLNS + " failure"
)
// RFC 6120 6.4.1
type SASLMechanisms struct {
XMLName xml.Name `xml:"urn:ietf:params:xml:ns:xmpp-sasl mechanisms"`
Mechanism []string `xml:"mechanism"`
}
// RFC 6120 6.4.2
type SASLAuth struct {
XMLName xml.Name `xml:"urn:ietf:params:xml:ns:xmpp-sasl auth"`
Mechanism string `xml:"mechanism,attr"`
CharData string `xml:",chardata"`
}
//
// RFC 6120 6.4.6
type SASLSuccess struct {
XMLName xml.Name `xml:"urn:ietf:params:xml:ns:xmpp-sasl success"`
}
type SASLFailure struct {
XMLName xml.Name `xml:"urn:ietf:params:xml:ns:xmpp-sasl failure"`
Condition SASLFailureCondition
Text string `xml:"text"`
}
type SASLFailureCondition struct {
XMLName xml.Name // Deliberately un-tagged
}
// RFC 6120 section 6.5
var (
SASLFailureConditionAborted = saslFailureCondition("aborted")
SASLFailureConditionAccountDisabled = saslFailureCondition("account-disabled")
SASLFailureConditionCredentialsExpired = saslFailureCondition("credentials-expired")
SASLFailureConditionEncryptionRequired = saslFailureCondition("encryption-required")
SASLFailureConditionIncorrectEncoding = saslFailureCondition("incorrect-encoding")
SASLFailureConditionInvalidAuthzid = saslFailureCondition("invalid-authzid")
SASLFailureConditionInvalidMechanism = saslFailureCondition("invalid-mechanism")
SASLFailureConditionMalformedRequest = saslFailureCondition("malformed-request")
SASLFailureConditionMechanismTooWeak = saslFailureCondition("mechanism-too-weak")
SASLFailureConditionNotAuthorized = saslFailureCondition("not-authorized")
SASLFailureConditionTemporaryAuthFailure = saslFailureCondition("temporary-auth-failure")
)
func saslFailureCondition(local string) SASLFailureCondition {
return SASLFailureCondition{XMLName: xml.Name{Space: SASLNS, Local: local}}
}
|
package main
import _ "fmt"
import "videocrawler/crawler"
func main() {
var youku *crawler.Youku = crawler.NewYouku()
//fmt.Println(youku.GetCna())
_, _ = youku.GetVideoInfo("http://v.youku.com/v_show/id_XMjg1Mzg0MzkyOA==.html?spm=a2hww.20023042.m_223465.5~5~5~5!2~5~5~A&f=50219377", 0)
//fmt.Println(res)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package platform
import (
"context"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/resourced"
"chromiumos/tast/testing"
)
type resourcedTestParams struct {
isBaseline bool
}
func init() {
testing.AddTest(&testing.Test{
Func: Resourced,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that resourced works",
Contacts: []string{"vovoy@chromium.org"},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome"},
Timeout: 3 * time.Minute,
Params: []testing.Param{{
ExtraAttr: []string{"informational"},
Val: resourcedTestParams{
isBaseline: false,
},
}, {
Name: "baseline",
Val: resourcedTestParams{
isBaseline: true,
},
}},
})
}
func checkSetGameMode(ctx context.Context, rm *resourced.Client) (resErr error) {
// Get the original game mode.
origGameMode, err := rm.GameMode(ctx)
if err != nil {
return errors.Wrap(err, "failed to query game mode state")
}
testing.ContextLog(ctx, "Original game mode: ", origGameMode)
defer func() {
// Restore game mode.
if err = rm.SetGameMode(ctx, origGameMode); err != nil {
if resErr == nil {
resErr = errors.Wrap(err, "failed to reset game mode state")
} else {
testing.ContextLog(ctx, "Failed to reset game mode state: ", err)
}
}
}()
// Set game mode to different value.
var newGameMode uint8
if origGameMode == 0 {
newGameMode = 1
}
if err = rm.SetGameMode(ctx, newGameMode); err != nil {
return errors.Wrap(err, "failed to set game mode state")
}
testing.ContextLog(ctx, "Set game mode: ", newGameMode)
// Check game mode is set to the new value.
gameMode, err := rm.GameMode(ctx)
if err != nil {
return errors.Wrap(err, "failed to query game mode state")
}
if newGameMode != gameMode {
return errors.Errorf("set game mode to: %d, but got game mode: %d", newGameMode, gameMode)
}
return nil
}
func checkSetGameModeWithTimeout(ctx context.Context, rm *resourced.Client) (resErr error) {
var newGameMode uint8 = resourced.GameModeBorealis
if err := rm.SetGameModeWithTimeout(ctx, newGameMode, 1); err != nil {
return errors.Wrap(err, "failed to set game mode state")
}
testing.ContextLog(ctx, "Set game mode with 1 second timeout: ", newGameMode)
// Check game mode is set to the new value.
gameMode, err := rm.GameMode(ctx)
if err != nil {
return errors.Wrap(err, "failed to query game mode state")
}
if newGameMode != gameMode {
return errors.Errorf("set game mode to: %d, but got game mode: %d", newGameMode, gameMode)
}
// Check game mode is reset after timeout.
if err := testing.Poll(ctx, func(ctx context.Context) error {
gameMode, err := rm.GameMode(ctx)
if err != nil {
return errors.Wrap(err, "failed to query game mode state")
}
if gameMode != resourced.GameModeOff {
return errors.New("game mode is not reset")
}
return nil
}, &testing.PollOptions{Timeout: 2 * time.Second, Interval: 100 * time.Millisecond}); err != nil {
return errors.Wrap(err, "failed to wait for game mode reset")
}
return nil
}
func checkQueryMemoryStatus(ctx context.Context, rm *resourced.Client) error {
availableKB, err := rm.AvailableMemoryKB(ctx)
if err != nil {
return errors.Wrap(err, "failed to query available memory")
}
testing.ContextLog(ctx, "GetAvailableMemoryKB returns: ", availableKB)
foregroundAvailableKB, err := rm.ForegroundAvailableMemoryKB(ctx)
if err != nil {
return errors.Wrap(err, "failed to query foreground available memory")
}
testing.ContextLog(ctx, "GetForegroundAvailableMemoryKB returns: ", foregroundAvailableKB)
m, err := rm.MemoryMarginsKB(ctx)
if err != nil {
return errors.Wrap(err, "failed to query memory margins")
}
testing.ContextLog(ctx, "GetMemoryMarginsKB returns, critical: ", m.CriticalKB, ", moderate: ", m.ModerateKB)
componentMemoryMargins, err := rm.ComponentMemoryMarginsKB(ctx)
if err != nil {
return errors.Wrap(err, "failed to query component memory margins")
}
testing.ContextLogf(ctx, "GetComponentMemoryMarginsKB returns %+v", componentMemoryMargins)
return nil
}
func checkMemoryPressureSignal(ctx context.Context, rm *resourced.Client) error {
// Check MemoryPressureChrome signal is sent.
ctxWatcher, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
watcher, err := rm.NewChromePressureWatcher(ctxWatcher)
if err != nil {
return errors.Wrap(err, "failed to create PressureWatcher")
}
defer watcher.Close(ctx)
select {
case sig := <-watcher.Signals:
testing.ContextLogf(ctx, "Got MemoryPressureChrome signal, level: %d, delta: %d", sig.Level, sig.Delta)
case <-ctxWatcher.Done():
return errors.New("didn't get MemoryPressureChrome signal")
}
return nil
}
func checkSetRTCAudioActive(ctx context.Context, rm *resourced.Client) error {
// Get the original RTC audio active.
origRTCAudioActive, err := rm.RTCAudioActive(ctx)
if err != nil {
return errors.Wrap(err, "failed to query RTC audio active")
}
testing.ContextLog(ctx, "Original RTC audio active: ", origRTCAudioActive)
defer func() {
// Restore RTC audio active.
if err = rm.SetRTCAudioActive(ctx, origRTCAudioActive); err != nil {
testing.ContextLog(ctx, "Failed to reset RTC audio active: ", err)
}
}()
// Set RTC audio ative to different value.
newRTCAudioActive := resourced.RTCAudioActiveOff
if origRTCAudioActive == resourced.RTCAudioActiveOff {
newRTCAudioActive = resourced.RTCAudioActiveOn
}
if err = rm.SetRTCAudioActive(ctx, newRTCAudioActive); err != nil {
// On machines not supporting Intel hardware EPP, SetRTCAudioActive returning error is expected.
testing.ContextLog(ctx, "Failed to set RTC audio active: ", err)
}
testing.ContextLog(ctx, "Set RTC audio active: ", newRTCAudioActive)
// Check RTC audio active is set to the new value.
rtcAudioActive, err := rm.RTCAudioActive(ctx)
if err != nil {
return errors.Wrap(err, "failed to query RTC audio active")
}
if newRTCAudioActive != rtcAudioActive {
return errors.Errorf("failed to set RTC audio active: got %d, want: %d", rtcAudioActive, newRTCAudioActive)
}
return nil
}
func checkSetFullscreenVideo(ctx context.Context, rm *resourced.Client) (resErr error) {
var newFullscreenVideo uint8 = resourced.FullscreenVideoActive
var timeout uint32 = 1
if err := rm.SetFullscreenVideoWithTimeout(ctx, newFullscreenVideo, timeout); err != nil {
// On machines not supporting Intel hardware EPP, SetFullscreenVideoWithTimeout returning error is expected.
testing.ContextLog(ctx, "Failed to set full screen video active: ", err)
return nil
}
testing.ContextLogf(ctx, "Set full screen video active to %d with %d second timeout", newFullscreenVideo, timeout)
// Check full screen video state is set to the new value.
fullscreenVideo, err := rm.FullscreenVideo(ctx)
if err != nil {
return errors.Wrap(err, "failed to query full screen video state")
}
if newFullscreenVideo != fullscreenVideo {
return errors.Errorf("failed to set full screen video state: got %d, want: %d", fullscreenVideo, newFullscreenVideo)
}
// Check full screen video state is reset after timeout.
if err := testing.Poll(ctx, func(ctx context.Context) error {
fullscreenVideo, err := rm.FullscreenVideo(ctx)
if err != nil {
return errors.Wrap(err, "failed to query full screen video state")
}
if fullscreenVideo != resourced.FullscreenVideoInactive {
return errors.New("full screen video state is not reset")
}
return nil
}, &testing.PollOptions{Timeout: time.Duration(2*timeout) * time.Second, Interval: 100 * time.Millisecond}); err != nil {
return errors.Wrap(err, "failed to wait for full screen video state reset")
}
return nil
}
func checkPowerSupplyChange(ctx context.Context, rm *resourced.Client) (resErr error) {
// Check PowerSupplyChange method can be called successfully.
if err := rm.PowerSupplyChange(ctx); err != nil {
return errors.Wrap(err, "failed to call power supply change")
}
return nil
}
func Resourced(ctx context.Context, s *testing.State) {
rm, err := resourced.NewClient(ctx)
if err != nil {
s.Fatal("Failed to create Resource Manager client: ", err)
}
if s.Param().(resourcedTestParams).isBaseline {
// Baseline checks.
if err := checkSetGameMode(ctx, rm); err != nil {
s.Fatal("Checking SetGameMode failed: ", err)
}
if err := checkQueryMemoryStatus(ctx, rm); err != nil {
s.Fatal("Querying memory status failed: ", err)
}
if err := checkMemoryPressureSignal(ctx, rm); err != nil {
s.Fatal("Checking memory pressure signal failed: ", err)
}
if err := checkSetGameModeWithTimeout(ctx, rm); err != nil {
s.Fatal("Checking SetGameModeWithTimeout failed: ", err)
}
if err := checkSetRTCAudioActive(ctx, rm); err != nil {
s.Fatal("Checking SetRTCAudioActive failed: ", err)
}
if err := checkSetFullscreenVideo(ctx, rm); err != nil {
s.Fatal("Checking SetFullscreenVideoWithTimeout failed: ", err)
}
if err := checkPowerSupplyChange(ctx, rm); err != nil {
s.Fatal("Checking PowerSupplyChange failed: ", err)
}
return
}
// New tests will be added here. Stable tests are promoted to baseline.
}
|
package controller
import (
"fmt"
"net/http"
"github.com/gin-gonic/gin"
"github.com/gocolly/colly"
// "encoding/json"
)
type Sku struct {
Name string `gorm:"column:user_name" json:"name"`
Image string `gorm:"column:user_account" json:"image"`
}
func Index(c *gin.Context) {
c.HTML(http.StatusOK, "index.html", nil)
}
func Login(cm *gin.Context){
// create a new collector
c := colly.NewCollector()
// data :=map[string]string{
// }
// authenticate
// err := c.Post("https://passport.jd.com/uc/loginService?uuid=2d8d4116-148e-4772-b500-c62b9fb6bb5c<ype=logout&r=0.06150847446715635&version=2015", data)
// if err != nil {
// fmt.Println(err)
// }
// attach callbacks after login
c.OnResponse(func(r *colly.Response) {
cookie := r.Headers.Get("cookie")
fmt.Println("coolie:"+cookie)
})
// Before making a request print "Visiting ..."
c.OnRequest(func(r *colly.Request) {
r.Headers.Add("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0")
r.
fmt.Println("Visiting", r.URL.String())
})
// start scraping
c.Visit("https://passport.jd.com/uc/loginService?uuid=2d8d4116-148e-4772-b500-c62b9fb6bb5c<ype=logout&r=0.06150847446715635&version=2015")
}
func Search(c *gin.Context) {
kw := c.PostForm("kw")
fmt.Println(kw)
skus := []Sku{}
// authenticate
targetUrl := "https://search.jd.com/Search?keyword=" + kw + "&enc=utf-8&spm=2.1.1"
// Instantiate default collector
co := colly.NewCollector()
co.UserAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
// On every a element which has href attribute call callback
co.OnHTML("div[id=J_goodsList] ul li[class=gl-item]", func(e *colly.HTMLElement) {
//link := e.Attr("href")
// Print link
fmt.Printf("Link found1: -> %+v\n", e.Attr("data-sku"))
//fmt.Printf("Link found: -> %+v\n", e)
// Visit link found on page
// Only those links are visited which are in AllowedDomains
co.Visit("https://item.jd.com/" + e.Attr("data-sku") + ".html")
})
//商品介绍
co.OnHTML(".product-intro", func(e *colly.HTMLElement) {
//fmt.Println("product-intro -> %+v\n",)
skuName := e.DOM.Find(".sku-name").Text()
skuImg, _ := e.DOM.Find("#spec-img").Attr("data-origin")
// va:=e.ChildAttr("div[class=sku-name]", "text")
// //link := e.Attr("href")
// // Print link
skus = append(skus, Sku{Name: skuName, Image: skuImg})
fmt.Printf("sku-name: -> %s\n", skuName)
fmt.Printf("sku-img: -> %s\n", skuImg)
})
// Before making a request print "Visiting ..."
co.OnResponse(func(r *colly.Response) {
fmt.Println(r.Ctx.Get("url"))
fmt.Println(string(r.Body))
})
// Before making a request print "Visiting ..."
co.OnRequest(func(r *colly.Request) {
fmt.Println("Visiting", r.URL.String())
})
// Start scraping on https://hackerspaces.org
co.Visit(targetUrl)
//co.Request("post", targetUrl, requestData io.Reader, , hdr http.Header)
c.JSON(http.StatusOK, skus)
}
|
package statgo
import (
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestHostInfo(t *testing.T) {
s := NewStat()
hi := s.HostInfos()
assert.NotNil(t, s)
assert.NotEmpty(t, hi.HostName, hi.OSName, hi.OSRelease, hi.OSVersion, hi.Platform)
assert.True(t, hi.NCPUs > 0, hi.MaxCPUs > 0)
t.Log(hi)
}
func TestCPU(t *testing.T) {
s := NewStat()
cpu := s.CPUStats()
assert.NotNil(t, s)
assert.NotNil(t, cpu)
time.Sleep(100 * time.Millisecond)
cpu = s.CPUStats()
assert.False(t, math.IsNaN(cpu.User), math.IsNaN(cpu.Kernel), math.IsNaN(cpu.Idle))
assert.False(t, math.IsNaN(cpu.LoadMin1), math.IsNaN(cpu.LoadMin5), math.IsNaN(cpu.LoadMin15))
t.Log(cpu)
}
func TestFSInfos(t *testing.T) {
s := NewStat()
f := s.FSInfos()
assert.True(t, len(f) > 0)
for _, fs := range f {
t.Log(fs)
}
}
func TestInterfaceInfos(t *testing.T) {
s := NewStat()
interfaces := s.InteraceInfos()
assert.True(t, len(interfaces) > 0)
for _, i := range interfaces {
t.Log(i)
}
}
func TestVM(t *testing.T) {
s := NewStat()
m := s.MemStats()
assert.NotNil(t, s)
assert.NotNil(t, m)
t.Log(m)
}
func TestDisksIO(t *testing.T) {
s := NewStat()
d := s.DiskIOStats()
assert.NotNil(t, s)
assert.NotNil(t, d)
t.Log(d)
}
func TestNetIO(t *testing.T) {
s := NewStat()
n := s.NetIOStats()
assert.NotNil(t, s)
assert.NotNil(t, n)
t.Log(n)
}
func TestProcess(t *testing.T) {
s := NewStat()
p := s.ProcessStats()
assert.NotNil(t, s)
assert.NotNil(t, p)
t.Log(p)
}
func TestPages(t *testing.T) {
s := NewStat()
p := s.PageStats()
assert.NotNil(t, s)
assert.NotNil(t, p)
t.Log(p)
}
|
package ccpa
import (
"errors"
"fmt"
gpplib "github.com/prebid/go-gpp"
gppConstants "github.com/prebid/go-gpp/constants"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
gppPolicy "github.com/prebid/prebid-server/privacy/gpp"
)
// Policy represents the CCPA regulatory information from an OpenRTB bid request.
type Policy struct {
Consent string
NoSaleBidders []string
}
// ReadFromRequestWrapper extracts the CCPA regulatory information from an OpenRTB bid request.
func ReadFromRequestWrapper(req *openrtb_ext.RequestWrapper, gpp gpplib.GppContainer) (Policy, error) {
var noSaleBidders []string
var gppSIDs []int8
var requestUSPrivacy string
var warn error
if req == nil || req.BidRequest == nil {
return Policy{}, nil
}
if req.BidRequest.Regs != nil {
requestUSPrivacy = req.BidRequest.Regs.USPrivacy
gppSIDs = req.BidRequest.Regs.GPPSID
}
consent, err := SelectCCPAConsent(requestUSPrivacy, gpp, gppSIDs)
if err != nil {
warn = &errortypes.Warning{
Message: "regs.us_privacy consent does not match uspv1 in GPP, using regs.gpp",
WarningCode: errortypes.InvalidPrivacyConsentWarningCode}
}
if consent == "" {
// Read consent from request.regs.ext
regsExt, err := req.GetRegExt()
if err != nil {
return Policy{}, fmt.Errorf("error reading request.regs.ext: %s", err)
}
if regsExt != nil {
consent = regsExt.GetUSPrivacy()
}
}
// Read no sale bidders from request.ext.prebid
reqExt, err := req.GetRequestExt()
if err != nil {
return Policy{}, fmt.Errorf("error reading request.ext: %s", err)
}
reqPrebid := reqExt.GetPrebid()
if reqPrebid != nil {
noSaleBidders = reqPrebid.NoSale
}
return Policy{consent, noSaleBidders}, warn
}
func ReadFromRequest(req *openrtb2.BidRequest) (Policy, error) {
var gpp gpplib.GppContainer
if req != nil && req.Regs != nil && len(req.Regs.GPP) > 0 {
gpp, _ = gpplib.Parse(req.Regs.GPP)
}
return ReadFromRequestWrapper(&openrtb_ext.RequestWrapper{BidRequest: req}, gpp)
}
// Write mutates an OpenRTB bid request with the CCPA regulatory information.
func (p Policy) Write(req *openrtb_ext.RequestWrapper) error {
if req == nil {
return nil
}
regsExt, err := req.GetRegExt()
if err != nil {
return err
}
reqExt, err := req.GetRequestExt()
if err != nil {
return err
}
regsExt.SetUSPrivacy(p.Consent)
setPrebidNoSale(p.NoSaleBidders, reqExt)
return nil
}
func SelectCCPAConsent(requestUSPrivacy string, gpp gpplib.GppContainer, gppSIDs []int8) (string, error) {
var consent string
var err error
if len(gpp.SectionTypes) > 0 {
if gppPolicy.IsSIDInList(gppSIDs, gppConstants.SectionUSPV1) {
if i := gppPolicy.IndexOfSID(gpp, gppConstants.SectionUSPV1); i >= 0 {
consent = gpp.Sections[i].GetValue()
}
}
}
if requestUSPrivacy != "" {
if consent == "" {
consent = requestUSPrivacy
} else if consent != requestUSPrivacy {
err = errors.New("request.us_privacy consent does not match uspv1")
}
}
return consent, err
}
func setPrebidNoSale(noSaleBidders []string, ext *openrtb_ext.RequestExt) {
if len(noSaleBidders) == 0 {
setPrebidNoSaleClear(ext)
} else {
setPrebidNoSaleWrite(noSaleBidders, ext)
}
}
func setPrebidNoSaleClear(ext *openrtb_ext.RequestExt) {
prebid := ext.GetPrebid()
if prebid == nil {
return
}
// Remove no sale member
prebid.NoSale = []string{}
ext.SetPrebid(prebid)
}
func setPrebidNoSaleWrite(noSaleBidders []string, ext *openrtb_ext.RequestExt) {
if ext == nil {
// This should hopefully not be possible. The only caller insures that this has been initialized
return
}
prebid := ext.GetPrebid()
if prebid == nil {
prebid = &openrtb_ext.ExtRequestPrebid{}
}
prebid.NoSale = noSaleBidders
ext.SetPrebid(prebid)
}
|
package inflectlab
import (
"encoding/json"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
//-----------------------------------------------------------------------------
// test specific types
type Model struct {
// ID bson.ObjectId `json:"id" bson:"_id"`
ID string `json:"id" bson:"_id"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time `json:",omitempty" bson:",omitempty"`
}
// MetaData .
type MetaData map[string]interface{}
// MetaModel .
type MetaModel struct {
Model `json:",inline" bson:",inline"`
MetaData `json:",omitempty" bson:",omitempty"`
}
type Data struct {
MetaModel `json:",inline"`
CreatedAt time.Time
Name string
Code int
}
//-----------------------------------------------------------------------------
var (
t1 = time.Now()
t2 = t1.Add(time.Hour)
t3 = t2.Add(time.Hour)
code = 66
name = "test"
)
func data() *Data {
d := &Data{
Code: code,
Name: name,
}
d.CreatedAt = t1
d.UpdatedAt = t2
d.DeletedAt = &t3
return d
}
func Test01(t *testing.T) {
d := data()
json.Marshal(d)
fields, err := GetFields(d)
if err != nil {
t.Fatal(err)
}
for k := range fields {
switch k {
case "MetaModel":
for k1 := range fields[k].Children {
switch k1 {
case "Model":
fields1 := fields[k].Children[k1].Children
for k2 := range fields1 {
switch k2 {
case "CreatedAt", "UpdatedAt", "DeletedAt", "ID":
default:
t.Failed()
}
}
case "MetaData":
assert.Equal(t, 0, len(fields[k].Children[k1].Children))
default:
t.Failed()
}
}
case "CreatedAt", "Name", "Code":
assert.Equal(t, 0, len(fields[k].Children))
default:
t.Failed()
}
}
nid := "IDNEW"
fields["MetaModel"].Children["Model"].Children["ID"].Ptr.Set(reflect.ValueOf(nid))
assert.Equal(t, nid, d.ID)
assert.Equal(t, nid, fields["MetaModel"].Children["Model"].Children["ID"].Ptr.Interface())
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resolve
import (
"context"
"sort"
"github.com/google/gapid/core/fault"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/sync"
"github.com/google/gapid/gapis/capture"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/messages"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
)
// FramebufferChanges returns the list of attachment changes over the span of
// the entire capture.
func FramebufferChanges(ctx context.Context, c *path.Capture, r *path.ResolveConfig) (*AttachmentFramebufferChanges, error) {
obj, err := database.Build(ctx, &FramebufferChangesResolvable{Capture: c, Config: r})
if err != nil {
return nil, err
}
return obj.(*AttachmentFramebufferChanges), nil
}
// AttachmentFramebufferChanges describes the list of attachment changes over
// the span of the entire capture.
type AttachmentFramebufferChanges struct {
attachments []framebufferAttachmentChanges
}
// Get returns the framebuffer dimensions and format after a given command in
// the given capture, command and attachment.
func (c AttachmentFramebufferChanges) Get(ctx context.Context, after *path.Command, att api.FramebufferAttachment) (FramebufferAttachmentInfo, error) {
info, err := c.attachments[att].after(ctx, api.SubCmdIdx(after.Indices))
if err != nil {
return FramebufferAttachmentInfo{}, err
}
if info.Err != nil {
log.W(ctx, "Framebuffer error after %v: %v", after, info.Err)
return FramebufferAttachmentInfo{}, &service.ErrDataUnavailable{Reason: messages.ErrFramebufferUnavailable()}
}
return info, nil
}
const errNoAPI = fault.Const("Command has no API")
// Resolve implements the database.Resolver interface.
func (r *FramebufferChangesResolvable) Resolve(ctx context.Context) (interface{}, error) {
ctx = SetupContext(ctx, r.Capture, r.Config)
c, err := capture.ResolveGraphics(ctx)
if err != nil {
return nil, err
}
out := &AttachmentFramebufferChanges{
// TODO: Remove hardcoded upper limit
attachments: make([]framebufferAttachmentChanges, api.FramebufferAttachment_Color3+1),
}
postCmdAndSubCmd := func(s *api.GlobalState, subcommandIndex api.SubCmdIdx, cmd api.Cmd) {
api := cmd.API()
idx := append([]uint64(nil), subcommandIndex...)
for _, att := range allFramebufferAttachments {
info := FramebufferAttachmentInfo{After: idx}
if api != nil {
if inf, err := api.GetFramebufferAttachmentInfo(ctx, idx, s, cmd.Thread(), att); err == nil && inf.Format != nil {
info.Width, info.Height, info.Index, info.Format, info.CanResize = inf.Width, inf.Height, inf.Index, inf.Format, inf.CanResize
} else {
info.Err = err
}
} else {
info.Err = errNoAPI
}
if last := out.attachments[att].last(); !last.equal(info) {
attachment := out.attachments[att]
attachment.changes = append(attachment.changes, info)
out.attachments[att] = attachment
}
}
}
sync.MutateWithSubcommands(ctx, r.Capture, c.Commands, postCmdAndSubCmd, nil, postCmdAndSubCmd)
// Since subcommands may have been executed out of order, this will sort them back into the proper order.
for ii := 0; ii < int(api.FramebufferAttachment_Color3+1); ii++ {
sort.Slice(out.attachments[ii].changes, func(i, j int) bool {
return out.attachments[ii].changes[i].After.LessThan(out.attachments[ii].changes[j].After)
})
}
return out, nil
}
|
package main
import (
"fmt"
"github.com/disintegration/imaging"
"github.com/gin-gonic/gin"
"github.com/h2non/bimg"
"net/http"
"time"
)
//var pngSrc, _ = bimg.Read("/Users/svenweisker/Downloads/big_png_big.png")
//var jpgSrc, _ = bimg.Read("pexels-artem-beliaikin-853199.jpg")
//var pngSrc, _ = imaging.Open("/Users/svenweisker/Downloads/big_png_big.png")
//var src, _ = imaging.Open("pexels-artem-beliaikin-853199.jpg")
//pngSrc, _ := bimg.Read("/Users/svenweisker/Downloads/big_png_big.png")
//src, err := imaging.Open("pexels-artem-beliaikin-853199.jpg")
//image, _ := vips.NewImageFromFile("pexels-artem-beliaikin-853199.jpg")
func main() {
fmt.Println("### Started ###")
gin.SetMode(gin.ReleaseMode)
bimg.Initialize()
defer bimg.Shutdown()
r := gin.Default()
//pngSrc, _ := bimg.Read("big_png_big.png")
//pngSrc, _ := imaging.Open("big_png_big.png")
//src, _ := imaging.Open("pexels-artem-beliaikin-853199.jpg")
//image, _ := vips.NewImageFromFile("pexels-artem-beliaikin-853199.jpg")
r.GET("/png", func(c *gin.Context) {
pngSrc, _ := imaging.Open("big_png_big.png")
// Resize the cropped image to width = 200px preserving the aspect ratio.
pngSrc = imaging.Resize(pngSrc, 1200, 600, imaging.NearestNeighbor)
extension, _ := imaging.FormatFromExtension(".png")
_ = imaging.Encode(c.Writer, pngSrc, extension)
c.Writer.Header().Add("Content-Disposition", "attachment; filename=bla.png" )
})
r.GET("/jpg", func(c *gin.Context) {
src, _ := imaging.Open("pexels-artem-beliaikin-853199.jpg")
//src = imaging.Resize(src, 1200, 600, imaging.NearestNeighbor)
extension, _ := imaging.FormatFromExtension(".jpg")
_ = imaging.Encode(c.Writer, src, extension)
c.Writer.Header().Add("Content-Disposition", "attachment; filename=bla.jpg" )
})
r.GET("/jpeg", func(c *gin.Context) {
jpgSrc, _ := bimg.Read("pexels-artem-beliaikin-853199.jpg")
resizeImage, _ := bimg.Resize(jpgSrc, bimg.Options{Width: 1200})
c.Writer.Header().Add("Content-Disposition", "attachment; filename=bla.jpg" )
c.Data(200, "image/jpg", resizeImage)
})
r.GET("/peng", func(c *gin.Context) {
pngSrc, _ := bimg.Read("big_png_big.png")
resize, err := bimg.Resize(pngSrc, bimg.Options{
Width: 1200,
})
if err != nil {
_, _ = c.Writer.Write([]byte(fmt.Sprintf("failed to resize %s: %v", "blub", err)))
c.Writer.WriteHeader(http.StatusInternalServerError)
return
}
c.Writer.Header().Add("Content-Disposition", "attachment; filename=bla.png" )
c.Data(200, "image/png", resize)
})
_ = r.Run() // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080")
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
|
package main
import (
"errors"
"fmt"
"os"
"strings"
"github.com/codegangsta/cli"
"github.com/landaire/pbo"
)
const (
version = "0.2.0"
)
var pboFile *pbo.Pbo
func main() {
app := cli.NewApp()
app.Name = "pboextractor"
app.Usage = "Extract PBO archives used in games such as Arma 3"
app.Author = "Lander Brandt"
app.Email = "@landaire"
app.Version = version
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "pbo",
Usage: "PBO file to read",
},
}
app.Commands = []cli.Command{
cli.Command{
Name: "extract",
ShortName: "e",
Usage: "Extract the PBO to the given output directory",
Before: LoadPbo,
Action: Extract,
},
cli.Command{
Name: "header",
Usage: "Print header information to stdout",
Before: LoadPbo,
Action: PrintHeader,
},
}
err := app.Run(os.Args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
}
}
func LoadPbo(c *cli.Context) error {
if c.GlobalString("pbo") == "" {
return errors.New("No PBO provided")
}
if pboFile != nil {
return nil
}
var err error
pboFile, err = pbo.NewPbo(c.GlobalString("pbo"))
if err != nil {
return err
}
return nil
}
func PrintHeader(c *cli.Context) {
// Print header extension info if it's present
if pboFile.HeaderExtension != nil {
fmt.Println("Header Extension:")
lines := strings.Split(pboFile.HeaderExtension.String(), "\n")
for _, line := range lines {
fmt.Println("\t", line)
}
fmt.Println()
fmt.Println("\tExtended Fields:")
for key, val := range pboFile.HeaderExtension.ExtendedFields {
fmt.Printf("\t\t%s: %s\n", key, val)
}
fmt.Println()
}
fmt.Println("Entries:")
for _, entry := range pboFile.Entries {
lines := strings.Split(entry.String(), "\n")
for _, line := range lines {
fmt.Println("\t", line)
}
fmt.Println()
}
}
|
package main
import (
"log"
"net"
"net/http"
"time"
)
// Function to use for transport dial.
func dialTimeout(network, addr string) (net.Conn, error) {
return net.DialTimeout(network, addr, time.Duration(2*time.Second))
}
func newClient() http.Client {
transport := http.Transport{
Dial: dialTimeout,
}
client := http.Client{
Transport: &transport,
}
return client
}
func get(endpoint string, client http.Client) *http.Response {
resp, err := client.Get(endpoint)
if err != nil {
log.Fatal(err)
}
return resp
}
func getInstanceId(client http.Client) *http.Response {
return get("http://169.254.169.254/latest/meta-data/instance-id", client)
}
func getDevices(token string, client http.Client) *http.Response {
return get("https://api.serverdensity.io/inventory/devices?token="+token, client)
}
func getInstaller(script string, client http.Client) *http.Response {
return get("https://www.serverdensity.com/downloads/"+script, client)
}
|
package day5
import "adventofcode/io"
func isNaughtyPair(p string) bool {
return p == "ab" || p == "cd" || p == "pq" || p == "xy"
}
func isVowel(c string) bool {
return c == "a" || c == "e" || c == "i" || c == "o" || c == "u"
}
func isNice(s string) bool {
prev := "" // previous letter
hasTwice := false
vowels := 0
for _, r := range s {
c := string(r)
if isVowel(c) { vowels++ }
if c == prev { hasTwice = true }
if isNaughtyPair(prev + c) { return false }
prev = c
}
return vowels >= 3 && hasTwice
}
func _isNice(s string) bool {
dict := make(map[string]int)
prev := "" // previous letter
hasRepeating := false // ex: (aoa, aaa)
hasTwicePair := false // ex: (xyxy, aaaa) not (aaa)
for i := 0; i < len(s)-1; i++ {
c := string(s[i])
next := string(s[i+1])
pair := c + next
lastIndex, exists := dict[pair]
if !exists { dict[pair] = i+1 } else
if lastIndex != i { hasTwicePair = true }
if prev == next { hasRepeating = true }
prev = c
}
return hasRepeating && hasTwicePair
}
func Solve(filepath string) (int, int) {
strings := io.Readlines(filepath)
count := 0
_count := 0
for _, str := range strings {
if isNice(str) { count++ }
if _isNice(str) { _count++ }
}
return count, _count
} |
// Copyright (c) 2019-present Mattermost, Inc. All Rights Reserved.
// See License for license information.
package types
import (
"encoding/json"
)
type ID string
func (id ID) GetID() ID { return id }
func (id ID) String() string { return string(id) }
type IDArray []ID
func (p IDArray) Len() int { return len(p) }
func (p IDArray) GetAt(n int) Value { return p[n] }
func (p IDArray) SetAt(n int, v Value) { p[n] = v.(ID) }
func (p *IDArray) Ref() interface{} { return p }
func (p IDArray) InstanceOf() ValueArray {
inst := make(IDArray, 0)
return &inst
}
func (p *IDArray) Resize(n int) {
*p = make(IDArray, n)
}
var IDArrayProto = &IDArray{}
type IDSet struct {
ValueSet
}
func NewIDSet(vv ...ID) *IDSet {
i := &IDSet{
ValueSet: *NewValueSet(&IDArray{}),
}
for _, v := range vv {
i.Set(v)
}
return i
}
func (i *IDSet) Set(v ID) {
i.ValueSet.Set(v)
}
func (i *IDSet) MarshalJSON() ([]byte, error) {
return json.Marshal(i.IDs())
}
func (i *IDSet) UnmarshalJSON(data []byte) error {
ids := []ID{}
err := json.Unmarshal(data, &ids)
if err != nil {
return err
}
n := NewIDSet(ids...)
*i = *n
return nil
}
|
package knife
// Only ECB/CBC aes encryption/decryption supported
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
)
type AesOp string
const (
CBC AesOp = "CBC"
ECB AesOp = "ECB"
)
type Coding int
const (
_ Coding = iota
HEX
BASE64
)
func AesEncrypt(plain, key []byte, coding Coding, op AesOp) ([]byte, error) {
block, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
var res []byte
switch op {
case ECB:
size := block.BlockSize()
plain = PKCS7Padding(plain, size)
res = make([]byte, len(plain))
for bs, be := 0, size; bs < len(plain); bs, be = bs+size, be+size {
block.Encrypt(res[bs:be], plain[bs:be])
}
case CBC:
size := aes.BlockSize
plain = PKCS7Padding(plain, size)
res = make([]byte, aes.BlockSize+len(plain))
iv := res[:aes.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic(err)
}
fmt.Println(string(iv))
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(res[aes.BlockSize:], plain)
default:
return nil, errors.New("op invalid")
}
switch coding {
case HEX:
res = []byte(hex.EncodeToString(res))
case BASE64:
res = []byte(base64.StdEncoding.EncodeToString(res))
}
return res, nil
}
func AesDecrypt(data, key []byte, coding Coding, op AesOp) ([]byte, error) {
var err error
var res []byte
switch coding {
case HEX:
data, err = hex.DecodeString(string(data))
case BASE64:
data, err = base64.StdEncoding.DecodeString(string(data))
}
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
switch op {
case ECB:
size := block.BlockSize()
res = make([]byte, len(data))
for bs, be := 0, size; bs < len(data); bs, be = bs+size, be+size {
block.Decrypt(res[bs:be], data[bs:be])
}
res = PKCS7UnPadding(res)
case CBC:
if len(data) < aes.BlockSize {
panic("data too short")
}
iv := data[:aes.BlockSize]
mode := cipher.NewCBCDecrypter(block, iv)
data = data[aes.BlockSize:]
res = make([]byte, len(data))
mode.CryptBlocks(res, data)
res = PKCS7UnPadding(res)
}
return res, nil
}
func PKCS7Padding(data []byte, blockSize int) []byte {
padding := blockSize - len(data)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(data, padtext...)
}
func PKCS7UnPadding(data []byte) []byte {
length := len(data)
unpadding := int(data[length-1])
return data[:(length - unpadding)]
}
|
package main
import (
"cmgmt/datastore"
"encoding/json"
"log"
"strconv"
"strings"
"net/http"
)
func handleMemberByID(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
log.Println("GET /member/id")
handleGETMemberByID(w, r)
case "DELETE":
log.Println("DELETE /member/id")
handleDELETEMemberByID(w, r)
default:
log.Println("Route not handled ", r.Method)
}
}
func handleMembers(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
log.Println("GET /members")
handleGETMembers(w, r)
case "POST":
log.Println("POST /members")
handlePOSTMembers(w, r)
default:
log.Println("Route not handled ", r.Method)
}
}
func handleGETMembers(w http.ResponseWriter, r *http.Request) {
log.Println("handleGETMembers()")
mems, err := store.GetMembers()
if err != nil {
log.Println("getMembers() error ", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Println(mems)
js, err := json.Marshal(mems)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
func handleGETMemberByID(w http.ResponseWriter, r *http.Request) {
splitPath := strings.Split(r.URL.Path, "/")
id, err := strconv.ParseFloat(splitPath[len(splitPath)-1], 64)
if err != nil {
log.Println(err)
}
mem, err := store.GetMemberByID(id)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Println(mem)
prof, err := store.ResolveProfession(mem.ProfessionID)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Println(mem)
payload := make(map[string]interface{})
payload["member"] = mem
payload["resolvedProfession"] = prof
js, err := json.Marshal(payload)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
func handleDELETEMemberByID(w http.ResponseWriter, r *http.Request) {
log.Println(r.URL.Path)
splitPath := strings.Split(r.URL.Path, "/")
id, err := strconv.Atoi(splitPath[len(splitPath)-1])
if err != nil {
log.Println(err)
}
if err := store.DeleteMember(int64(id)); err != nil {
log.Println(err)
}
w.WriteHeader(http.StatusCreated)
js, err := json.Marshal(map[string]string{"status": "deleted"})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
func handlePOSTMembers(w http.ResponseWriter, r *http.Request) {
log.Println("handlePOSTMembers()")
decoder := json.NewDecoder(r.Body)
var m datastore.Member
err := decoder.Decode(&m)
if err != nil {
log.Println(err)
}
// log.Println("FirstName : ", m.FirstName)
log.Println("FirstName : ", m.FirstName)
log.Println("LastName : ", m.LastName)
m.ID = float64(int(store.NextID()))
log.Println(m)
err = store.AddMember(&m)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
js, err := json.Marshal(map[string]string{"status": "created"})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
|
package main
import (
"binary_tree/tree"
"fmt"
"os"
)
/*
Daily Coding Problem: Problem #502 [Easy]
This problem was asked by PayPal.
Given a binary tree, determine whether or not it is height-balanced. A
height-balanced binary tree can be defined as one in which the heights
of the two subtrees of any node never differ by more than one.
*/
// There's no time or complexity requirement, so what's to prevent you
// from traversing the tree and finding all the subtree heights?
func main() {
root := tree.CreateFromString(os.Args[1])
phrase := " not"
if Balanced(root) {
phrase = ""
}
fmt.Printf("input tree is%s balanced\n", phrase)
}
// Balanced decides whether or not its argument is height-balanced. A
// height-balanced binary tree can be defined as one in which the
// heights of the two subtrees of any node never differ by more than
// one.
func Balanced(node tree.Node) bool {
if node.IsNil() {
return true
}
leftDepth := tree.FindDepth(node.LeftChild(), 0)
fmt.Printf("Left depth %d\n", leftDepth)
rightDepth := tree.FindDepth(node.RightChild(), 0)
fmt.Printf("Right depth %d\n", rightDepth)
depthDiff := leftDepth - rightDepth
if depthDiff >= -1 && depthDiff <= 1 {
return true
}
return false
}
|
package responses
import "time"
type QRcodeResponses struct {
UUID string `bson:"uuid" json:"uuid"` //UUID
Title string `bson:"title" json:"title"` //标题
Info string `bson:"info" json:"info"` //json内容
IsDel bool `bson:"is_del" json:"is_del"` //是否删除
Size int `bson:"size" json:"size"` //二维码图片大小
CreatedOn time.Time `bson:"created_on" json:"created_on"` //创建时间
UpdateTime time.Time `bson:"update_time" json:"update_time"` //更新时间
}
|
package handlers
import (
"encoding/json"
"io"
"net/http"
)
type ErrorResponse struct {
Message string `json:"message"`
}
func ApiHandler(fn ApiHandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json, err := fn(w, r)
if err != nil {
response := ErrorResponse{Message: err.Message}
jsonErr, _ := EncodeResponse(response)
w.WriteHeader(err.Code)
w.Write(jsonErr)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(json)
}
}
func DecodePayload(body io.ReadCloser, payload interface{}) *ApiHandlerError {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
err := decoder.Decode(&payload)
if err != nil {
return &ApiHandlerError{Code: http.StatusBadRequest, Message: "Cannot decode payload", Error: err}
}
return nil
}
func EncodeResponse(response interface{}) ([]byte, *ApiHandlerError) {
json, err := json.Marshal(response)
if err != nil {
return nil, &ApiHandlerError{Code: http.StatusInternalServerError, Message: "Cannot encode response", Error: err}
}
return json, nil
}
|
package seeker
import (
"context"
"math"
"strconv"
"strings"
"time"
"../logger"
"../utils"
"github.com/chromedp/cdproto/cdp"
"github.com/chromedp/chromedp"
"github.com/chromedp/chromedp/client"
"github.com/chromedp/chromedp/runner"
"github.com/gorilla/websocket"
)
// Seeker seeks targets with search engine.
type Seeker struct {
mconn *utils.MuxConn
query string
se string
maxPage int
Results []string
}
// NewSeeker returns a new Seeker.
func NewSeeker(q, se string, maxPage int) *Seeker {
return &Seeker{
mconn: &utils.MuxConn{},
query: q,
se: se,
maxPage: maxPage,
}
}
// Set params.
// Params should be {conn *websocket.Conn, query ,se stirng, maxPage int}
func (s *Seeker) Set(v ...interface{}) {
s.mconn.Conn = v[0].(*websocket.Conn)
s.query = v[1].(string)
s.se = v[2].(string)
s.maxPage = v[3].(int)
}
// Run starts seeker.
func (s *Seeker) Run() {
logger.Green.Println("Seeking Targets...")
logger.Blue.Println("Search Engine:", s.se)
logger.Blue.Println("Keyword:", s.query)
logger.Blue.Println("Max Page:", s.maxPage)
var err error
if err != nil {
}
// create context
ctxt, cancel := context.WithCancel(context.Background())
defer cancel()
options := chromedp.WithRunnerOptions(
runner.Flag("no-first-run", true),
runner.Flag("no-sandbox", true),
runner.Flag("disable-gpu", true),
)
// create chrome instance
// c, err := chromedp.New(ctxt, options)
c, err := chromedp.New(ctxt, chromedp.WithTargets(client.New().WatchPageTargets(ctxt)), options)
if err != nil {
logger.Red.Println(err)
return
}
if s.se == "google" {
err = c.Run(ctxt, s.searchGoogle())
} else if s.se == "bing" {
err = c.Run(ctxt, s.searchBing())
}
// shutdown chrome
err = c.Shutdown(ctxt)
// wait for chrome to finish
err = c.Wait()
}
func (s *Seeker) searchBing() chromedp.Tasks {
var urls []string
return chromedp.Tasks{
chromedp.Navigate(`https://www.bing.com`),
chromedp.Sleep(2 * time.Second),
chromedp.SendKeys(`#sb_form_q`, s.query+"\n", chromedp.ByID),
chromedp.WaitVisible(`.sb_count`, chromedp.ByQuery),
chromedp.ActionFunc(func(c context.Context, e cdp.Executor) error {
var resCount string
chromedp.Text(`.sb_count`, &resCount, chromedp.ByQuery).Do(c, e)
n := strings.Replace(strings.Split(resCount, " ")[0], ",", "", -1)
count, _ := strconv.Atoi(n)
p := int(math.Floor(float64(count / 10)))
if p < s.maxPage {
s.maxPage = p
}
for i := 0; i <= s.maxPage; i++ {
chromedp.Sleep(2*time.Second).Do(c, e)
chromedp.EvaluateAsDevTools(`
var h2 = document.getElementsByTagName('h2');
var urls = [];
for(var i=0;i<h2.length-2;i++){
var a = h2[i].getElementsByTagName('a');
urls.push(a[0].href);
}
urls`, &urls).Do(c, e)
for _, u := range urls {
logger.Blue.Println(u)
}
ret := map[string][]string{
"urls": urls,
}
s.mconn.Send(ret)
s.Results = append(s.Results, urls...)
if i != s.maxPage {
chromedp.Click(`//*[@title="Next page"]`, chromedp.BySearch).Do(c, e)
}
}
return nil
}),
}
}
func (s *Seeker) searchGoogle() chromedp.Tasks {
urls := []string{}
return chromedp.Tasks{
chromedp.Navigate(`https://www.google.com`),
chromedp.SendKeys(`#lst-ib`, s.query+"\n", chromedp.ByID),
chromedp.WaitVisible(`#res`, chromedp.ByID),
chromedp.ActionFunc(func(c context.Context, e cdp.Executor) error {
var resCount string
var xx string
chromedp.Text(`#resultStats`, &resCount, chromedp.ByID).Do(c, e)
x := strings.Split(resCount, " ")
if x[1] == "results" {
xx = x[0]
} else {
xx = x[1]
}
n := strings.Replace(xx, ",", "", -1)
count, _ := strconv.Atoi(n)
p := int(math.Floor(float64(count / 10)))
if p < s.maxPage {
s.maxPage = p
}
for i := 0; i <= s.maxPage; i++ {
chromedp.Sleep(1*time.Second).Do(c, e)
chromedp.EvaluateAsDevTools(`
var h3 = document.getElementsByTagName('h3');
var c = h3.length;
if(h3.length==11){
c=10;
}
var urls = [];
for(var i=0;i<c;i++){
var a = h3[i].getElementsByTagName('a');
urls.push(a[0].href);
}
urls`, &urls).Do(c, e)
for _, u := range urls {
logger.Blue.Println(u)
}
ret := map[string][]string{
"urls": urls,
}
s.mconn.Send(ret)
s.Results = append(s.Results, urls...)
if i != s.maxPage {
chromedp.Click(`#pnnext`, chromedp.NodeVisible).Do(c, e)
}
}
return nil
}),
}
}
|
// *** WARNING: this file was generated by pulumigen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v1
import (
"fmt"
"github.com/blang/semver"
"github.com/pulumi/pulumi-kubernetes/sdk/v3/go/kubernetes"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
type module struct {
version semver.Version
}
func (m *module) Version() semver.Version {
return m.version
}
func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi.Resource, err error) {
switch typ {
case "kubernetes:core/v1:Binding":
r = &Binding{}
case "kubernetes:core/v1:ConfigMap":
r = &ConfigMap{}
case "kubernetes:core/v1:ConfigMapList":
r = &ConfigMapList{}
case "kubernetes:core/v1:Endpoints":
r = &Endpoints{}
case "kubernetes:core/v1:EndpointsList":
r = &EndpointsList{}
case "kubernetes:core/v1:Event":
r = &Event{}
case "kubernetes:core/v1:EventList":
r = &EventList{}
case "kubernetes:core/v1:LimitRange":
r = &LimitRange{}
case "kubernetes:core/v1:LimitRangeList":
r = &LimitRangeList{}
case "kubernetes:core/v1:Namespace":
r = &Namespace{}
case "kubernetes:core/v1:NamespaceList":
r = &NamespaceList{}
case "kubernetes:core/v1:Node":
r = &Node{}
case "kubernetes:core/v1:NodeList":
r = &NodeList{}
case "kubernetes:core/v1:PersistentVolume":
r = &PersistentVolume{}
case "kubernetes:core/v1:PersistentVolumeClaim":
r = &PersistentVolumeClaim{}
case "kubernetes:core/v1:PersistentVolumeClaimList":
r = &PersistentVolumeClaimList{}
case "kubernetes:core/v1:PersistentVolumeList":
r = &PersistentVolumeList{}
case "kubernetes:core/v1:Pod":
r = &Pod{}
case "kubernetes:core/v1:PodList":
r = &PodList{}
case "kubernetes:core/v1:PodTemplate":
r = &PodTemplate{}
case "kubernetes:core/v1:PodTemplateList":
r = &PodTemplateList{}
case "kubernetes:core/v1:ReplicationController":
r = &ReplicationController{}
case "kubernetes:core/v1:ReplicationControllerList":
r = &ReplicationControllerList{}
case "kubernetes:core/v1:ResourceQuota":
r = &ResourceQuota{}
case "kubernetes:core/v1:ResourceQuotaList":
r = &ResourceQuotaList{}
case "kubernetes:core/v1:Secret":
r = &Secret{}
case "kubernetes:core/v1:SecretList":
r = &SecretList{}
case "kubernetes:core/v1:Service":
r = &Service{}
case "kubernetes:core/v1:ServiceAccount":
r = &ServiceAccount{}
case "kubernetes:core/v1:ServiceAccountList":
r = &ServiceAccountList{}
case "kubernetes:core/v1:ServiceList":
r = &ServiceList{}
default:
return nil, fmt.Errorf("unknown resource type: %s", typ)
}
err = ctx.RegisterResource(typ, name, nil, r, pulumi.URN_(urn))
return
}
func init() {
version, err := kubernetes.PkgVersion()
if err != nil {
fmt.Printf("failed to determine package version. defaulting to v1: %v\n", err)
}
pulumi.RegisterResourceModule(
"kubernetes",
"core/v1",
&module{version},
)
}
|
package bgo
import "time"
/**
* Wait a few seconds.
**/
type Wait struct {
BaseNode
endTime <-chan time.Time
delay int64
}
func (this *Wait) Open(context *Context) {
t := time.Duration(this.delay) * time.Millisecond
this.endTime = time.Tick(t)
}
func (this *Wait) Tick(context Context) Status {
for range this.endTime {
return SUCCESS
}
return RUNNING
}
func NewWait(title string) *Wait {
wait := &Wait{}
wait.ID = CreateUUID()
wait.Category = ACTION
wait.Name = "Wait"
wait.Title = title
wait.Description = "Priority contexts its children sequentially until one of them returns `SUCCESS`, `RUNNING` or `ERROR`"
return wait
} |
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package multivm contains utilities for working with more than one VM (ARCVM,
// Crostini, etc.) at a time.
package multivm
|
package service
import (
"testing"
ut "github.com/zdnscloud/cement/unittest"
)
func TestIngressRemoveRule(t *testing.T) {
cases := []struct {
src *Ingress
protocol IngressProtocol
leftRuleCount int
}{
{
&Ingress{
name: "i1",
rules: []IngressRule{
IngressRule{
protocol: IngressProtocolHTTP,
},
},
},
IngressProtocolHTTP,
0,
},
{
&Ingress{
name: "i1",
rules: []IngressRule{
IngressRule{
protocol: IngressProtocolHTTP,
},
IngressRule{
protocol: IngressProtocolUDP,
},
IngressRule{
protocol: IngressProtocolTCP,
},
},
},
IngressProtocolHTTP,
2,
},
{
&Ingress{
name: "i1",
rules: []IngressRule{
IngressRule{
protocol: IngressProtocolHTTP,
},
IngressRule{
protocol: IngressProtocolUDP,
},
IngressRule{
protocol: IngressProtocolTCP,
},
},
},
IngressProtocolUDP,
1,
},
{
&Ingress{
name: "i1",
rules: []IngressRule{
IngressRule{
protocol: IngressProtocolHTTP,
},
IngressRule{
protocol: IngressProtocolUDP,
},
},
},
IngressProtocolTCP,
1,
},
}
for _, tc := range cases {
ingressRemoveRules(tc.src, tc.protocol)
ut.Equal(t, len(tc.src.rules), tc.leftRuleCount)
}
}
|
package notifier
import (
"github.com/sirupsen/logrus"
mint "github.com/void616/gm.mint"
"github.com/void616/gm.mint.sender/internal/watcher/db"
"github.com/void616/gm.mint/amount"
)
const itemsPerShot = 50
// Notifier sends refilling notifications
type Notifier struct {
logger *logrus.Entry
natsTrans NatsTransporter
httpTrans HTTPTransporter
dao db.DAO
}
// NatsTransporter delivers notifications or fail with an error
type NatsTransporter interface {
NotifyRefilling(service string, to, from mint.PublicKey, t mint.Token, a *amount.Amount, tx mint.Digest) error
}
// HTTPTransporter delivers notifications or fail with an error
type HTTPTransporter interface {
NotifyRefilling(url, service string, to, from mint.PublicKey, t mint.Token, a *amount.Amount, tx mint.Digest) error
}
// New Notifier instance
func New(
dao db.DAO,
natsTrans NatsTransporter,
httpTrans HTTPTransporter,
logger *logrus.Entry,
) (*Notifier, error) {
n := &Notifier{
logger: logger,
dao: dao,
natsTrans: natsTrans,
httpTrans: httpTrans,
}
return n, nil
}
|
package routes
import (
"fmt"
"math"
"strconv"
"github.com/ankithans/youtube-api/pkg/database"
"github.com/ankithans/youtube-api/pkg/models"
"github.com/gofiber/fiber/v2"
)
// GetVideos returns the videos from database
// according the queries provided by the user
func GetVideos(c *fiber.Ctx) error {
var products []models.Video
// basic sql query
sql := "SELECT * FROM videos"
// if your searches for video using title ot description
if s := c.Query("s"); s != "" {
sql = fmt.Sprintf("%s WHERE title LIKE '%%%s%%' OR description LIKE '%%%s%%'", sql, s, s)
}
// sort by asc or desc
if sort := c.Query("sort"); sort != "" {
sql = fmt.Sprintf("%s ORDER BY date %s", sql, sort)
}
// page number specified by user
page, _ := strconv.Atoi(c.Query("page", "1"))
perPage := 9
var total int64
// total videos
database.DBConn.Raw(sql).Count(&total)
// sql query to get videos on particular page
sql = fmt.Sprintf("%s LIMIT %d OFFSET %d", sql, perPage, (page-1)*perPage)
database.DBConn.Raw(sql).Scan(&products)
// return the result to user
return c.JSON(fiber.Map{
"data": products,
"total": total,
"page": page,
"last_page": math.Ceil(float64(total / int64(perPage))),
})
}
|
package handler
import (
"net/http"
"net/http/httptest"
"testing"
"gopkg.in/mgo.v2"
"github.com/labstack/echo"
"github.com/stretchr/testify/assert"
)
func TestFetchUserInfo (t *testing.T) {
session, err := mgo.Dial("mongodb://SEavanger:SEavanger@ds139964.mlab.com:39964/se_avangers")
if err != nil {
panic(err)
}
// Setup
e := echo.New()
h := &Handler{session}
// test cases
requestParam := []string {
"JasonHo",
"MarsLee",
"JasonHe",
"DianeLin"}
expectedJSON := []string {
`[{"firstname":"Jason","lastname":"Ho","password":"test1","email":"hojason117@gmail.com"}]`,
`[{"firstname":"Chih-Yin","lastname":"Lee","password":"test2","email":"c788678867886@gmail.com"}]`,
`[{"firstname":"Jason","lastname":"He","password":"test3","email":"hexing_h@hotmail.com"}]`,
`[{"firstname":"Diane","lastname":"Lin","password":"test4","email":"diane@gmail.com"}]`,
}
// Run
for i, rp := range requestParam {
// Setup
req := httptest.NewRequest(echo.GET, "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
// Set the registered path for the handler.
c.SetPath("/api/v1/UserInfolist")
// Set path parameter names.
c.SetParamNames("user")
// Set path parameter values.
c.SetParamValues(rp)
// Assertion
if assert.NoError(t, h.FetchUserInfo(c)) {
assert.Equal(t, http.StatusOK, rec.Code)
assert.Equal(t, expectedJSON[i], rec.Body.String())
}
}
}
|
package main
import (
"encoding/json"
"mime"
"net/http"
"strconv"
"strings"
)
func indexController(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
ErrorReply(r, w, ErrNotFound, ServerOptions{})
return
}
body, _ := json.Marshal(CurrentVersions)
w.Header().Set("Content-Type", "application/json")
w.Write(body)
}
func healthController(w http.ResponseWriter, r *http.Request) {
health := GetHealthStats()
body, _ := json.Marshal(health)
w.Header().Set("Content-Type", "application/json")
w.Write(body)
}
func imageController(o ServerOptions, operation Operation) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
var imageSource = MatchSource(req)
if imageSource == nil {
ErrorReply(req, w, ErrMissingImageSource, o)
return
}
buf, err := imageSource.GetImage(req)
if err != nil {
ErrorReply(req, w, NewError(err.Error(), BadRequest), o)
return
}
if len(buf) == 0 {
ErrorReply(req, w, ErrEmptyBody, o)
return
}
imageHandler(w, req, buf, operation, o)
}
}
func determineAcceptMimeType(accept string) string {
for _, v := range strings.Split(accept, ",") {
mediatype, _, _ := mime.ParseMediaType(v)
if mediatype == "image/png" {
return "png"
} else if mediatype == "image/gif" {
return "gif"
} else if mediatype == "image/jpeg" {
return "jpeg"
}
}
// default
return ""
}
func imageHandler(w http.ResponseWriter, r *http.Request, buf []byte, Operation Operation, o ServerOptions) {
// Infer the body MIME type via mimesniff algorithm
mimeType := http.DetectContentType(buf)
// Finally check if image MIME type is supported
if IsImageMimeTypeSupported(mimeType) == false {
ErrorReply(r, w, ErrUnsupportedMedia, o)
return
}
opts, err := readParams(r.URL.Path, r.URL.Query(), o.Profiles)
if err != nil {
ErrorReply(r, w, NewError("Error params image: "+err.Error(), BadRequest), o)
return
}
image, err := Operation.Run(buf, opts)
if err != nil {
ErrorReply(r, w, NewError("Error while processing the image: "+err.Error(), BadRequest), o)
return
}
// Expose Content-Length response header
w.Header().Set("Content-Length", strconv.Itoa(len(image.Body)))
w.Header().Set("Content-Type", image.Mime)
w.Write(image.Body)
}
|
package go_benchmarks
import (
"testing"
)
const (
testStr = "This is the string that will be copied into the tests"
sliceLen = 5
)
func BenchmarkMakeWithCapacity(b *testing.B) {
var result []TextRange // Just to avoid the compiler optimizing the method call out
for i := 0; i < b.N; i++ {
MakeWithCapacity(sliceLen, testStr, sliceLen + 1)
}
_ = result
}
func BenchmarkMakeWithoutCapacity(b *testing.B) {
var result []TextRange // Just to avoid the compiler optimizing the method call out
for i := 0; i < b.N; i++ {
MakeWithoutCapacity(sliceLen, testStr)
}
_ = result
}
|
package pif
import (
"n64emu/pkg/core/joybus"
"n64emu/pkg/types"
"testing"
"github.com/stretchr/testify/assert"
)
// definition of dummy device
type MockJoyBusDevice struct {
wantCmd joybus.CommandType
wantTxLen types.Byte
wantRxLen types.Byte
readDatas []types.Byte
}
func (m *MockJoyBusDevice) Reset() {
}
func (m *MockJoyBusDevice) Run(cmd joybus.CommandType, txBuf, rxBuf []types.Byte) joybus.CommandResult {
if cmd != m.wantCmd {
return joybus.DeviceNotPresent
}
if len(txBuf) != int(m.wantTxLen) {
return joybus.UnableToTransferDatas
}
if len(rxBuf) != int(m.wantRxLen) {
return joybus.UnableToTransferDatas
}
// copy datas
for i := 0; (i < len(rxBuf)) && (i < len(m.readDatas)); i++ {
rxBuf[i] = m.readDatas[i]
}
return joybus.Success
}
func TestUpdate(t *testing.T) {
const numOfControllers = 4
const numOfEEPROMs = 2
tests := []struct {
name string
controllers [numOfControllers]*MockJoyBusDevice
eeproms [numOfEEPROMs]*MockJoyBusDevice
initialRAM [PIFRAMSize]types.Byte
wantRAM [PIFRAMSize]types.Byte
}{
{
name: "DeviceNotPresent",
controllers: [numOfControllers]*MockJoyBusDevice{nil, nil, nil, nil},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{nil, nil},
initialRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // [dummy*7], [new command]
},
wantRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
{
name: "ReadButtonValuesFromPort2",
controllers: [numOfControllers]*MockJoyBusDevice{
nil,
nil,
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0xaa, 0x99, 0x55, 0x66}},
nil,
},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{nil, nil},
initialRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // [dummy*7], [new command]
},
wantRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xaa, 0x99, 0x55, 0x66, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [rd0, rd1, rd2, rd3]
0xff, 0x01, 0x84, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:(device not present, 4), cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
{
name: "ContinueIdle",
controllers: [numOfControllers]*MockJoyBusDevice{
nil,
nil,
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0xaa, 0x99, 0x55, 0x66}},
nil,
},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{nil, nil},
initialRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
wantRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
{
name: "ReadButtonValuesFromAllPort",
controllers: [numOfControllers]*MockJoyBusDevice{
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0x00, 0x11, 0x22, 0x33}},
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0x44, 0x55, 0x66, 0x77}},
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0x88, 0x99, 0xaa, 0xbb}},
{wantCmd: joybus.ReadButtonValues, wantTxLen: 1, wantRxLen: 4, readDatas: []types.Byte{0xcc, 0xdd, 0xee, 0xff}},
},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{nil, nil},
initialRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xff, 0x01, 0x04, 0x01, 0xff, 0xff, 0xff, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [dummy*4]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // [dummy*7], [new command]
},
wantRAM: [PIFRAMSize]types.Byte{
0xff, 0x01, 0x04, 0x01, 0x00, 0x11, 0x22, 0x33, // controller0: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [rd0, rd1, rd2, rd3]
0xff, 0x01, 0x04, 0x01, 0x44, 0x55, 0x66, 0x77, // controller1: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [rd0, rd1, rd2, rd3]
0xff, 0x01, 0x04, 0x01, 0x88, 0x99, 0xaa, 0xbb, // controller2: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [rd0, rd1, rd2, rd3]
0xff, 0x01, 0x04, 0x01, 0xcc, 0xdd, 0xee, 0xff, // controller3: [dummy, tx:1, rx:4, cmd:ReadButtonValues], [rd0, rd1, rd2, rd3]
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [end of setup], [dummy*7]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
{
name: "RequestInfoFromEEPROM0",
controllers: [numOfControllers]*MockJoyBusDevice{nil, nil, nil, nil},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{
{wantCmd: joybus.RequestInfo, wantTxLen: 1, wantRxLen: 3, readDatas: []types.Byte{0x01, 0x23, 0x45}},
nil,
},
initialRAM: [PIFRAMSize]types.Byte{
0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x03, 0x00, // [skip*4], [dummy, tx:1, rx:3, cmd:RequestInfo]
0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x00, // [dummy*3, skip], [end of setup], [dummy*3]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // [dummy*7], [new command]
},
wantRAM: [PIFRAMSize]types.Byte{
0x00, 0x00, 0x00, 0x00, 0xff, 0x01, 0x03, 0x00, // [skip*4], [dummy, tx:1, rx:3, cmd:RequestInfo]
0x01, 0x23, 0x45, 0xff, 0xfe, 0x00, 0x00, 0x00, // [dummy*3, skip], [end of setup], [dummy*3]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
{
name: "ReadEEPROMFromEEPROM0",
controllers: [numOfControllers]*MockJoyBusDevice{nil, nil, nil, nil},
eeproms: [numOfEEPROMs]*MockJoyBusDevice{
{wantCmd: joybus.ReadEEPROM, wantTxLen: 2, wantRxLen: 8, readDatas: []types.Byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}},
nil,
},
initialRAM: [PIFRAMSize]types.Byte{
0x00, 0x00, 0x00, 0x00, 0x02, 0x08, 0x04, 0x09, // [skip*4], [ tx:2, rx:8, cmd:ReadEEPROM, tx0(block addr)]
0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x00, // [dummy*4], [end of setup]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // [dummy*7], [new command]
},
wantRAM: [PIFRAMSize]types.Byte{
0x00, 0x00, 0x00, 0x00, 0x02, 0x08, 0x04, 0x09, // [skip*4], [ tx:2, rx:8, cmd:ReadEEPROM, tx0(block addr)]
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, // [rd0, rd1, rd2, ... , rd5, rd6, rd7,]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*8]
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [dummy*7], [idle]
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Setup structure
p := PIF{}
p.ram = tt.initialRAM
for i := 0; i < numOfControllers; i++ {
if tt.controllers[i] != nil {
var c joybus.JoyBus = tt.controllers[i]
p.controllers[i] = &c
}
}
for i := 0; i < numOfEEPROMs; i++ {
if tt.eeproms[i] != nil {
var c joybus.JoyBus = tt.eeproms[i]
p.eeproms[i] = &c
}
}
// Run
p.Update()
// Verify
assert.Equal(t, tt.wantRAM, p.ram)
})
}
}
|
package main
import (
"fmt"
)
func main() {
array := []int{2, 1, 20, 5, 6, 42, -2, 4, 3, 5, 3, 25, 122, 4, -4, 300, 73, -10}
quickSort(array)
fmt.Println(array)
}
func quickSort(subArray []int) {
if len(subArray) <= 1 {
return
}
pivot := partition(subArray)
quickSort(subArray[0:pivot])
quickSort(subArray[pivot:len(subArray)])
}
func partition(subArray []int) int {
q, j := 0, 0
r := len(subArray) - 1
pivotValue := subArray[r]
for ; j < r; j++ {
if subArray[j] <= pivotValue {
swap(subArray, j, q)
q++
}
}
swap(subArray, q, r)
return q
}
func sort(array []int) {
if array[1] < array[0] {
swap(array, 0, 1)
}
}
func swap(array []int, first int, second int) {
if array[first] == array[second] {
return
}
array[first], array[second] = array[second], array[first]
}
|
package worker
import (
"context"
"time"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
)
type Options struct {
// QueueName is the name of the queue to process work from. Having this configurable
// allows us to have multiple worker pools with different resource requirements and
// horizontal scaling factors while still uniformly processing events.
QueueName string
// HeartbeatInterval denotes the time between heartbeat requests to the queue API.
HeartbeatInterval time.Duration
// GitServicePath is the path to the internal git service API proxy in the frontend.
// This path should contain the endpoints info/refs and git-upload-pack.
GitServicePath string
// RedactedValues is a map from strings to replace to their replacement in the command
// output before sending it to the underlying job store. This should contain all worker
// environment variables, as well as secret values passed along with the dequeued job
// payload, which may be sensitive (e.g. shared API tokens, URLs with credentials).
RedactedValues map[string]string
// WorkerOptions configures the worker behavior.
WorkerOptions workerutil.WorkerOptions
// ClientOptions configures the client that interacts with the queue API.
ClientOptions apiclient.Options
// FirecrackerOptions configures the behavior of Firecracker virtual machine creation.
FirecrackerOptions command.FirecrackerOptions
// ResourceOptions configures the resource limits of docker container and Firecracker
// virtual machines running on the executor.
ResourceOptions command.ResourceOptions
}
// NewWorker creates a worker that polls a remote job queue API for work. The returned
// routine contains both a worker that periodically polls for new work to perform, as well
// as a heartbeat routine that will periodically hit the remote API with the work that is
// currently being performed, which is necessary so the job queue API doesn't hand out jobs
// it thinks may have been dropped.
func NewWorker(options Options, observationContext *observation.Context) goroutine.BackgroundRoutine {
idSet := newIDSet()
queueStore := apiclient.New(options.ClientOptions, observationContext)
store := &storeShim{queueName: options.QueueName, queueStore: queueStore}
handler := &handler{
idSet: idSet,
options: options,
operations: command.NewOperations(observationContext),
runnerFactory: command.NewRunner,
}
indexer := workerutil.NewWorker(context.Background(), store, handler, options.WorkerOptions)
heartbeat := goroutine.NewHandlerWithErrorMessage("heartbeat", func(ctx context.Context) error {
return queueStore.Heartbeat(ctx, idSet.Slice())
})
return goroutine.CombinedRoutine{
indexer,
goroutine.NewPeriodicGoroutine(context.Background(), options.HeartbeatInterval, heartbeat),
}
}
|
package starlarkfn_test
import (
"testing"
"github.com/golang/mock/gomock"
"github.com/raba-jp/primus/pkg/operations/directory/handlers"
mock_handlers "github.com/raba-jp/primus/pkg/operations/directory/handlers/mock"
"github.com/raba-jp/primus/pkg/operations/directory/starlarkfn"
"github.com/raba-jp/primus/pkg/starlark"
"golang.org/x/xerrors"
)
func TestCreateDirectory(t *testing.T) {
tests := []struct {
name string
data string
mock func(*mock_handlers.MockCreateHandler)
hasErr bool
}{
{
name: "success",
data: `test(path="/sym/test", permission=0o777)`,
mock: func(m *mock_handlers.MockCreateHandler) {
m.EXPECT().Create(
gomock.Any(),
gomock.Any(),
gomock.Eq(&handlers.CreateParams{
Path: "/sym/test",
Permission: 0o777,
Cwd: "/sym",
}),
).Return(nil)
},
hasErr: false,
},
{
name: "success: relative path",
data: `test(path="test", permission=0o777)`,
mock: func(m *mock_handlers.MockCreateHandler) {
m.EXPECT().Create(
gomock.Any(),
gomock.Any(),
gomock.Eq(&handlers.CreateParams{
Path: "test",
Permission: 0o777,
Cwd: "/sym",
}),
).Return(nil)
},
hasErr: false,
},
{
name: "success: without permission",
data: `test(path="/sym/test")`,
mock: func(m *mock_handlers.MockCreateHandler) {
m.EXPECT().Create(
gomock.Any(),
gomock.Any(),
gomock.Eq(
&handlers.CreateParams{
Path: "/sym/test",
Permission: 0o644,
Cwd: "/sym",
}),
).Return(nil)
},
hasErr: false,
},
{
name: "error: too many arguments",
data: `test("/sym/test", 0o644, "too many")`,
mock: func(m *mock_handlers.MockCreateHandler) {},
hasErr: true,
},
{
name: "error: failed to create directory",
data: `test("/sym/test", 0o644)`,
mock: func(m *mock_handlers.MockCreateHandler) {
m.EXPECT().Create(
gomock.Any(),
gomock.Any(),
gomock.Any(),
).Return(xerrors.New("dummy"))
},
hasErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
m := mock_handlers.NewMockCreateHandler(ctrl)
tt.mock(m)
_, err := starlark.ExecForTest("test", tt.data, starlarkfn.Create(m))
if !tt.hasErr && err != nil {
t.Fatalf("%v", err)
}
})
}
}
|
// Copyright (c) 2018 soren yang
//
// Licensed under the MIT License
// you may not use this file except in complicance with the License.
// You may obtain a copy of the License at
//
// https://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package convert
// FieldName for convert field full name, human readable
type FieldName string
const (
// FieldNameDate for date field name
FieldNameDate FieldName = "date"
// FieldNameLevel for level field name
FieldNameLevel FieldName = "level"
// FieldNamePackage for package field name
FieldNamePackage FieldName = "package"
// FieldNameMethod for method field name
FieldNameMethod FieldName = "method"
// FieldNameFile for file field name
FieldNameFile FieldName = "file"
// FieldNameLine for line field name
FieldNameLine FieldName = "line"
// FieldNameMessage for message field name
FieldNameMessage FieldName = "message"
// FieldNameText for text field name
FieldNameText FieldName = "text"
)
// FieldKey for convert field key, in layout string
type FieldKey string
const (
// FieldKeyDate for date field key
FieldKeyDate FieldKey = "d"
// FieldKeyLevel for level field key
FieldKeyLevel FieldKey = "level"
// FieldKeyPackage for package field key
FieldKeyPackage FieldKey = "P"
// FieldKeyMethod for method field key
FieldKeyMethod FieldKey = "M"
// FieldKeyFile for file field key
FieldKeyFile FieldKey = "F"
// FieldKeyLine for line field key
FieldKeyLine FieldKey = "L"
// FieldKeyMessage for message field key
FieldKeyMessage FieldKey = "msg"
// FieldKeyText for text field key
FieldKeyText FieldKey = "text"
)
const (
// the default timestamp format string
defaultTimestampFormat = "2006-01-02T15:04:05.000000000Z07:00"
)
|
package models
import (
"errors"
"github.com/sirupsen/logrus"
)
type Github struct {
Id int `json:"id"`
UserName string `json:"username"`
}
func NewGitUser(logger *logrus.Logger, name string) (*Github, error) {
if len(name) == 0 {
err := errors.New("name are required")
logger.Error(err)
return nil, err
}
user := &Github{
UserName: name,
}
return user, nil
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for RequiredPrivilegeEnum enum
*/
type RequiredPrivilegeEnum int
/**
* Value collection for RequiredPrivilegeEnum enum
*/
const (
RequiredPrivilege_KREADACCESS RequiredPrivilegeEnum = 1 + iota
RequiredPrivilege_KREADWRITEACCESS
RequiredPrivilege_KMANAGEMENTACCESS
)
func (r RequiredPrivilegeEnum) MarshalJSON() ([]byte, error) {
s := RequiredPrivilegeEnumToValue(r)
return json.Marshal(s)
}
func (r *RequiredPrivilegeEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := RequiredPrivilegeEnumFromValue(s)
*r = v
return nil
}
/**
* Converts RequiredPrivilegeEnum to its string representation
*/
func RequiredPrivilegeEnumToValue(requiredPrivilegeEnum RequiredPrivilegeEnum) string {
switch requiredPrivilegeEnum {
case RequiredPrivilege_KREADACCESS:
return "kReadAccess"
case RequiredPrivilege_KREADWRITEACCESS:
return "kReadWriteAccess"
case RequiredPrivilege_KMANAGEMENTACCESS:
return "kManagementAccess"
default:
return "kReadAccess"
}
}
/**
* Converts RequiredPrivilegeEnum Array to its string Array representation
*/
func RequiredPrivilegeEnumArrayToValue(requiredPrivilegeEnum []RequiredPrivilegeEnum) []string {
convArray := make([]string,len( requiredPrivilegeEnum))
for i:=0; i<len(requiredPrivilegeEnum);i++ {
convArray[i] = RequiredPrivilegeEnumToValue(requiredPrivilegeEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func RequiredPrivilegeEnumFromValue(value string) RequiredPrivilegeEnum {
switch value {
case "kReadAccess":
return RequiredPrivilege_KREADACCESS
case "kReadWriteAccess":
return RequiredPrivilege_KREADWRITEACCESS
case "kManagementAccess":
return RequiredPrivilege_KMANAGEMENTACCESS
default:
return RequiredPrivilege_KREADACCESS
}
}
|
package segment_tree
// SegmentTree the data structure define
type SegmentTree struct {
data []int
sum []int
}
// NewSegmentTree the construct of the segment tree
func NewSegmentTree(array []int) *SegmentTree {
return &SegmentTree{
data: array,
sum: make([]int, len(array)<<2),
}
}
// Build build the segment tree
func (st *SegmentTree) Build() {
st.build(0, len(st.data)-1, 0)
}
func (st *SegmentTree) build(l, r, rt int) {
if l == r {
st.sum[rt] = st.data[l]
return
}
mid := (l + r) >> 1
st.build(l, mid, rt<<1|1)
st.build(mid+1, r, (rt+1)<<1)
st.buildRoot(rt)
}
// buildRoot update the node info
// if the root is rt, the left child is rt*2+1 = rt<<1+1 = rt<<1|1
// and the right child is rt*2+2 = (rt+1)*2 = (rt+1)<<1
func (st *SegmentTree) buildRoot(rt int) {
st.sum[rt] = st.sum[rt<<1|1] + st.sum[(rt+1)<<1]
}
// Update add the num at the index i
func (st *SegmentTree) Update(i, num int) {
st.update(0, len(st.data)-1, i, num, 0)
}
// Update st.data[l] += num
// then update the sum from left to right
func (st *SegmentTree) update(l, r, i, num, rt int) {
if l == r {
st.sum[rt] += num
return
}
mid := (l + r) >> 1
if i <= mid {
st.update(l, mid, i, num, rt<<1|1)
} else {
st.update(mid+1, r, i, num, (rt+1)<<1)
}
st.buildRoot(rt)
}
// SumRange return the sum of data from left to right
func (st *SegmentTree) SumRange(l, r int) int {
return st.sumRange(0, len(st.data)-1, l, r, 0)
}
// L and R represent the data range
// l and r represent the want range
func (st *SegmentTree) sumRange(L, R, l, r, rt int) int {
// no common interval
if L > r || R < l {
return 0
}
// the data range all in want range range
if L >= l && R <= r {
return st.sum[rt]
}
mid := (L + R) >> 1
return st.sumRange(L, mid, l, r, rt<<1|1) +
st.sumRange(mid+1, R, l, r, (rt+1)<<1)
}
|
package server
import "math"
// HSin haversin(θ) function
func HSin(theta float64) float64 {
return math.Pow(math.Sin(theta/2), 2)
}
// Distance calculated in km between the given lat/lon coordinates.
func Distance(aLat, aLon, bLat, bLon float64) float64 {
// convert to radians
// must cast radius as float to multiply later
var aRLat, aRLon, bRLat, bRLon, r float64
aRLat = aLat * math.Pi / 180
aRLon = aLon * math.Pi / 180
bRLat = bLat * math.Pi / 180
bRLon = bLon * math.Pi / 180
r = 6378100 // Earth radius in METERS
// calculate
h := HSin(bRLat-aRLat) + math.Cos(aRLat)*math.Cos(bRLat)*HSin(bRLon-aRLon)
return 2 * r * math.Asin(math.Sqrt(h)) / 1000
}
|
package uaa_test
import (
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"regexp"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"github.com/pivotal-cf/on-demand-service-broker/config"
"github.com/pivotal-cf/on-demand-service-broker/integration_tests/helpers"
"github.com/pivotal-cf/on-demand-service-broker/uaa"
)
var _ = Describe("UAA", func() {
Describe("Client", func() {
var (
server *ghttp.Server
uaaClient *uaa.Client
uaaConfig config.UAAConfig
trustedCert string
skipTLSValidation bool
)
BeforeEach(func() {
server = ghttp.NewTLSServer()
if !skipTLSValidation {
rawPem := server.HTTPTestServer.Certificate().Raw
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawPem})
trustedCert = string(pemCert)
}
uaaConfig = config.UAAConfig{
URL: server.URL(),
Authentication: config.UAACredentials{
ClientCredentials: config.ClientCredentials{
ID: "authentication_id",
Secret: "authentication_secret",
},
},
ClientDefinition: config.ClientDefinition{
Authorities: "some-authority,another-authority",
AuthorizedGrantTypes: "client_credentials,password",
ResourceIDs: "resource1,resource2",
Scopes: "admin,read,write",
},
}
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
setupUAARoutes(server, uaaConfig)
})
AfterEach(func() {
server.Close()
})
Describe("Constructor", func() {
It("returns a new client", func() {
uaaClient, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(uaaClient).NotTo(BeNil())
})
It("is created with a default random function", func() {
uaaClient, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(uaaClient.RandFunc).NotTo(BeNil())
Expect(uaaClient.RandFunc()).NotTo(Equal(uaaClient.RandFunc()))
})
It("returns an error when cannot construct the underlying go-uaa client", func() {
uaaConfig.URL = ""
_, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("the target is missing"))
})
When("no client credentials are passed", func() {
It("is created with a noop underlying client", func() {
uaaConfig = config.UAAConfig{
URL: server.URL(),
ClientDefinition: config.ClientDefinition{
Authorities: "some",
AuthorizedGrantTypes: "another",
},
}
uaaClient, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(uaaClient).NotTo(BeNil())
c, err := uaaClient.CreateClient("foo", "bar", "baz")
Expect(err).NotTo(HaveOccurred())
Expect(c).To(BeNil())
c, err = uaaClient.UpdateClient("foo", "bar", "baz")
Expect(err).NotTo(HaveOccurred())
Expect(c).To(BeNil())
err = uaaClient.DeleteClient("foo")
Expect(err).NotTo(HaveOccurred())
c, err = uaaClient.GetClient("foo")
Expect(err).NotTo(HaveOccurred())
Expect(c).To(BeNil())
})
})
When("skip ssl is set to true", func() {
var handler *helpers.FakeHandler
BeforeEach(func() {
handler = new(helpers.FakeHandler)
server.RouteToHandler(http.MethodGet, regexp.MustCompile(`/oauth/clients`), ghttp.CombineHandlers(
handler.Handle,
))
handler.RespondsWith(http.StatusOK, `{"resources":[{"client_id":"some-client-id"}]}`)
})
It("is created with a noop underlying client", func() {
uaaClient, err := uaa.New(uaaConfig, "", true)
Expect(err).ToNot(HaveOccurred())
Expect(uaaClient).NotTo(BeNil())
_, err = uaaClient.GetClient("foo")
Expect(err).ToNot(HaveOccurred())
Expect(handler.RequestsReceived()).To(Equal(1))
})
})
})
Describe("#CreateClient", func() {
var (
createHandler *helpers.FakeHandler
)
BeforeEach(func() {
createHandler = new(helpers.FakeHandler)
server.RouteToHandler(http.MethodPost, regexp.MustCompile(`/oauth/clients`), ghttp.CombineHandlers(
createHandler.Handle,
))
createJsonResponse := `{
"scope": [ "admin", "read", "write", "extra-scope" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2", "some-extra-resource"],
"authorized_grant_types": [ "client_credentials", "password", "token" ],
"authorities": [ "some-authority", "another-authority", "some-extra-authority" ],
"name": "some-name",
"lastModified": 1588809891186,
"required_user_groups": [ ]
}`
createHandler.RespondsWith(http.StatusCreated, createJsonResponse)
})
It("creates a client on UAA and returns a client map", func() {
uaaClient.RandFunc = func() string {
return "superrandomsecret"
}
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
By("injecting some properties", func() {
Expect(actualClient["client_id"]).To(Equal("some-client-id"))
Expect(actualClient["client_secret"]).To(Equal("superrandomsecret"))
Expect(actualClient["name"]).To(Equal("some-name"))
})
By("using the configured properties", func() {
Expect(actualClient["scopes"]).To(Equal(uaaConfig.ClientDefinition.Scopes + ",extra-scope"))
Expect(actualClient["resource_ids"]).To(Equal(uaaConfig.ClientDefinition.ResourceIDs + ",some-extra-resource"))
Expect(actualClient["authorities"]).To(Equal(uaaConfig.ClientDefinition.Authorities + ",some-extra-authority"))
Expect(actualClient["authorized_grant_types"]).To(Equal(uaaConfig.ClientDefinition.AuthorizedGrantTypes + ",token"))
})
By("creating a client on UAA", func() {
Expect(createHandler.RequestsReceived()).To(Equal(1))
request := createHandler.GetRequestForCall(0)
Expect(request.Body).To(MatchJSON(`
{
"scope": [ "admin", "read", "write" ],
"client_id": "some-client-id",
"client_secret": "superrandomsecret",
"resource_ids": ["resource1", "resource2"],
"authorized_grant_types": [ "client_credentials", "password" ],
"authorities": [ "some-authority", "another-authority" ],
"name": "some-name"
}`,
), "Expected request body mismatch")
})
})
When("the definition has implicit grant type", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.AuthorizedGrantTypes = "implicit"
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
})
It("does not generate a secret", func() {
uaaClient.RandFunc = func() string {
Fail("secret should not be generated")
return ""
}
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient["client_secret"]).To(BeEmpty())
})
It("generates the client with a placeholder redirect uri", func() {
_, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(createHandler.RequestsReceived()).To(Equal(1))
request := createHandler.GetRequestForCall(0)
Expect(request.Body).To(MatchJSON(`
{
"scope": [ "admin", "read", "write" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2"],
"authorized_grant_types": [ "implicit" ],
"authorities": [ "some-authority", "another-authority" ],
"name": "some-name",
"redirect_uri": [ "https://placeholder.example.com" ]
}`), "Expected request body mismatch")
})
})
When("the definition has authorization_code grant type", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.AuthorizedGrantTypes = "authorization_code"
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
uaaClient.RandFunc = func() string {
return "a-secret"
}
})
It("does generate a secret", func() {
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient["client_secret"]).NotTo(BeEmpty())
})
It("generates the client with a placeholder redirect uri", func() {
_, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(createHandler.RequestsReceived()).To(Equal(1))
request := createHandler.GetRequestForCall(0)
Expect(request.Body).To(MatchJSON(`
{
"scope": [ "admin", "read", "write" ],
"client_id": "some-client-id",
"client_secret": "a-secret",
"resource_ids": ["resource1", "resource2"],
"authorized_grant_types": [ "authorization_code" ],
"authorities": [ "some-authority", "another-authority" ],
"name": "some-name",
"redirect_uri": [ "https://placeholder.example.com" ]
}`), "Expected request body mismatch")
})
})
When("the definition has allowpublic: true", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.AllowPublic = true
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
createJsonResponse := `{
"scope": [ "admin", "read", "write", "extra-scope" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2", "some-extra-resource"],
"authorized_grant_types": [ "client_credentials", "password", "token" ],
"authorities": [ "some-authority", "another-authority", "some-extra-authority" ],
"name": "some-name",
"lastModified": 1588809891186,
"required_user_groups": [ ],
"allowpublic": true
}`
createHandler.RespondsWith(http.StatusCreated, createJsonResponse)
})
It("sets allowpublic on the client, and set a hardcoded client_secret", func() {
// See https://www.pivotaltracker.com/n/projects/2482247/stories/183763390
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient["allowpublic"]).To(Equal("true"))
Expect(actualClient["client_secret"]).To(Equal("-"))
})
})
When("the definition does not have allowpublic", func() {
BeforeEach(func() {
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
uaaClient.RandFunc = func() string {
return "superrandomsecret"
}
})
It("sets allowpublic on the client, and configures a client_secret", func() {
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient["allowpublic"]).To(Equal("false"))
Expect(actualClient["client_secret"]).To(Equal("superrandomsecret"))
})
})
When("scopes include ODB_SPACE_GUID", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.Scopes = "scope1,scope-2-ODB_SPACE_GUID.*,odb_space_guid_admin"
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
})
It("replaces it with the provided space guid", func() {
_, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
var m map[string]interface{}
err = json.Unmarshal([]byte(createHandler.GetRequestForCall(0).Body), &m)
Expect(err).ToNot(HaveOccurred())
Expect(m["scope"].([]interface{})).To(ContainElements(
"scope1",
`scope-2-some-space-guid.*`,
"odb_space_guid_admin"),
)
})
})
It("doesn't go to uaa when client definition is not provided", func() {
uaaConfig.ClientDefinition = config.ClientDefinition{}
uaaClient, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).NotTo(HaveOccurred())
actualClient, err := uaaClient.CreateClient("some-client-id", "some-name", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient).To(BeNil())
Expect(createHandler.RequestsReceived()).To(Equal(0))
})
It("generates a new password every time it is called", func() {
c1, _ := uaaClient.CreateClient("foo", "foo", "baz")
c2, _ := uaaClient.CreateClient("foo", "foo", "baz")
Expect(c1["client_secret"]).NotTo(Equal(c2["client_secret"]))
})
It("generates unique but reproducible ids", func() {
_, err := uaaClient.CreateClient("client1", "name1", "space1")
Expect(err).NotTo(HaveOccurred())
_, err = uaaClient.CreateClient("client2", "name2", "space2")
Expect(err).NotTo(HaveOccurred())
_, err = uaaClient.CreateClient("client1", "name1", "space1")
Expect(err).NotTo(HaveOccurred())
c1ReqBody := toMap(createHandler.GetRequestForCall(0).Body)
c2ReqBody := toMap(createHandler.GetRequestForCall(1).Body)
anotherC1ReqBody := toMap(createHandler.GetRequestForCall(2).Body)
Expect(c1ReqBody["client_id"]).NotTo(Equal(c2ReqBody["client_id"]), "client_ids are not unique")
Expect(c1ReqBody["client_id"]).To(Equal(anotherC1ReqBody["client_id"]), "client_ids are not reproducible")
})
When("client_definition has name set", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.Name = "configured-name"
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
})
It("uses the configured name", func() {
_, err := uaaClient.CreateClient("client1", "some-other-name", "space-1")
Expect(err).NotTo(HaveOccurred())
c1ReqBody := toMap(createHandler.GetRequestForCall(0).Body)
Expect(c1ReqBody["name"]).To(Equal("configured-name"))
})
})
It("does not generate a name if not passed and not configured", func() {
_, err := uaaClient.CreateClient("client1", "", "space-1")
Expect(err).NotTo(HaveOccurred())
c1ReqBody := toMap(createHandler.GetRequestForCall(0).Body)
Expect(c1ReqBody).NotTo(HaveKey("name"))
})
It("fails when UAA responds with error", func() {
createHandler.RespondsOnCall(0, 500, "")
_, err := uaaClient.CreateClient("some-client-id", "some-name", "space-1")
Expect(err).To(HaveOccurred())
errorMsg := fmt.Sprintf("An error occurred while calling %s/oauth/clients", server.URL())
Expect(err).To(MatchError(ContainSubstring(errorMsg)))
})
})
Describe("#UpdateClient", func() {
var updateHandler *helpers.FakeHandler
BeforeEach(func() {
updateHandler = new(helpers.FakeHandler)
server.RouteToHandler(
http.MethodPut, regexp.MustCompile(`/oauth/clients/some-client-id`),
ghttp.CombineHandlers(
updateHandler.Handle,
),
)
updateJsonResponse := `{
"scope": [ "admin", "read", "write", "extra-scope" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2", "some-extra-resource"],
"authorized_grant_types": [ "client_credentials", "password", "token" ],
"authorities": [ "some-authority", "another-authority", "some-extra-authority" ],
"redirect_uri": ["https://example.com/dashboard/some-client-id/response"],
"name": "some-name",
"lastModified": 1588809891186,
"required_user_groups": [ ],
"allowpublic": true
}`
updateHandler.RespondsWith(http.StatusCreated, updateJsonResponse)
})
It("doesn't go to uaa when client definition is not provided", func() {
uaaConfig.ClientDefinition = config.ClientDefinition{}
uaaClient, err := uaa.New(uaaConfig, trustedCert, skipTLSValidation)
Expect(err).NotTo(HaveOccurred())
actualClient, err := uaaClient.UpdateClient("some-client-id", "some-name", "space-1")
Expect(err).NotTo(HaveOccurred())
Expect(actualClient).To(BeNil())
Expect(updateHandler.RequestsReceived()).To(Equal(0))
})
It("updates and returns a client map", func() {
actualClient, err := uaaClient.UpdateClient("some-client-id", "https://example.com/dashboard/some-client-id", "space-guid")
Expect(err).NotTo(HaveOccurred())
By("updating the client on UAA", func() {
Expect(updateHandler.RequestsReceived()).To(Equal(1))
request := updateHandler.GetRequestForCall(0)
Expect(request.Body).To(MatchJSON(`
{
"scope": [ "admin", "read", "write" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2"],
"redirect_uri": ["https://example.com/dashboard/some-client-id"],
"authorized_grant_types": [ "client_credentials", "password" ],
"authorities": [ "some-authority", "another-authority" ]
}`,
), "Expected request body mismatch")
})
By("injecting some properties", func() {
Expect(actualClient["client_id"]).To(Equal("some-client-id"))
})
By("using the configured and returned properties", func() {
Expect(actualClient["scopes"]).To(Equal(uaaConfig.ClientDefinition.Scopes + ",extra-scope"))
Expect(actualClient["resource_ids"]).To(Equal(uaaConfig.ClientDefinition.ResourceIDs + ",some-extra-resource"))
Expect(actualClient["authorities"]).To(Equal(uaaConfig.ClientDefinition.Authorities + ",some-extra-authority"))
Expect(actualClient["authorized_grant_types"]).To(Equal(uaaConfig.ClientDefinition.AuthorizedGrantTypes + ",token"))
})
})
It("does not send redirect_uri when not passed", func() {
_, err := uaaClient.UpdateClient("some-client-id", "", "some-space")
Expect(err).NotTo(HaveOccurred())
Expect(updateHandler.RequestsReceived()).To(Equal(1))
request := updateHandler.GetRequestForCall(0)
Expect(request.Body).To(MatchJSON(`
{
"scope": [ "admin", "read", "write" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2"],
"authorized_grant_types": [ "client_credentials", "password" ],
"authorities": [ "some-authority", "another-authority" ]
}`,
), "Expected request body mismatch")
})
When("properties include ODB_SPACE_GUID", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.Scopes = "scope1,scope-2-ODB_SPACE_GUID.*,odb_space_guid_admin"
uaaConfig.ClientDefinition.Authorities = "authorities1,authorities-2-ODB_SPACE_GUID.*,odb_space_guid_admin"
uaaConfig.ClientDefinition.ResourceIDs = "resource1,resource-2-ODB_SPACE_GUID.*,odb_space_guid_admin"
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
})
It("replaces it with the provided space guid", func() {
_, err := uaaClient.UpdateClient("some-client-id", "", "some-space-guid")
Expect(err).NotTo(HaveOccurred())
var m map[string]interface{}
err = json.Unmarshal([]byte(updateHandler.GetRequestForCall(0).Body), &m)
Expect(err).ToNot(HaveOccurred())
Expect(m["scope"].([]interface{})).To(ContainElements(
"scope1",
`scope-2-some-space-guid.*`,
"odb_space_guid_admin"),
)
Expect(m["authorities"].([]interface{})).To(ContainElements(
"authorities1",
`authorities-2-some-space-guid.*`,
"odb_space_guid_admin"),
)
Expect(m["resource_ids"].([]interface{})).To(ContainElements(
"resource1",
`resource-2-some-space-guid.*`,
"odb_space_guid_admin"),
)
})
})
When("client_definition has name set", func() {
BeforeEach(func() {
uaaConfig.ClientDefinition.Name = "configured-name"
uaaConfig.ClientDefinition.AllowPublic = true
uaaClient, _ = uaa.New(uaaConfig, trustedCert, skipTLSValidation)
})
It("uses the configured name", func() {
_, err := uaaClient.UpdateClient("some-client-id", "https://example.com/dashboard/some-client-id", "space-guid")
Expect(err).NotTo(HaveOccurred())
body := toMap(updateHandler.GetRequestForCall(0).Body)
Expect(body["name"]).To(Equal("configured-name"))
})
})
It("fails when UAA responds with error", func() {
updateHandler.RespondsOnCall(0, 500, "")
_, err := uaaClient.UpdateClient("some-client-id", "some-dashboard", "some-space")
Expect(err).To(HaveOccurred())
errorMsg := fmt.Sprintf("An error occurred while calling %s/oauth/clients/some-client-id", server.URL())
Expect(err).To(MatchError(ContainSubstring(errorMsg)))
})
})
Describe("#DeleteClient", func() {
var (
deleteHandler *helpers.FakeHandler
)
BeforeEach(func() {
deleteHandler = new(helpers.FakeHandler)
server.RouteToHandler(
http.MethodDelete, regexp.MustCompile(`/oauth/clients/some-client-id`),
ghttp.CombineHandlers(
deleteHandler.Handle,
),
)
deleteJsonResponse := `{
"scope": [ "admin", "read", "write", "extra-scope" ],
"client_id": "some-client-id",
"resource_ids": ["resource1", "resource2", "some-extra-resource"],
"authorized_grant_types": [ "client_credentials", "password", "token" ],
"authorities": [ "some-authority", "another-authority", "some-extra-authority" ],
"redirect_uri": ["https://example.com/dashboard/some-client-id/response"],
"name": "some-name",
"lastModified": 1588809891186,
"required_user_groups": [ ]
}`
deleteHandler.RespondsWith(http.StatusOK, deleteJsonResponse)
})
It("deletes the client successfully", func() {
err := uaaClient.DeleteClient("some-client-id")
Expect(err).NotTo(HaveOccurred())
By("deleting the client on UAA", func() {
Expect(deleteHandler.RequestsReceived()).To(Equal(1))
})
})
It("fails when UAA responds with error", func() {
deleteHandler.RespondsOnCall(0, http.StatusNotFound, "")
err := uaaClient.DeleteClient("some-client-id")
Expect(err).To(HaveOccurred())
errorMsg := fmt.Sprintf("An error occurred while calling %s/oauth/clients/some-client-id", server.URL())
Expect(err).To(MatchError(ContainSubstring(errorMsg)))
})
})
Describe("#GetClient", func() {
var (
listHandler *helpers.FakeHandler
query []string
)
BeforeEach(func() {
listHandler = new(helpers.FakeHandler)
server.RouteToHandler(http.MethodGet, regexp.MustCompile(`/oauth/clients`), ghttp.CombineHandlers(
listHandler.Handle,
))
query = []string{`count=1`, `filter=client_id+eq+%22some-client-id%22`, `startIndex=1`}
listHandler.
WithQueryParams(query...).
RespondsWith(http.StatusOK, `{"resources":[{"client_id":"some-client-id"}]}`)
})
It("returns a client when the client exists", func() {
client, err := uaaClient.GetClient("some-client-id")
Expect(err).NotTo(HaveOccurred())
Expect(client).NotTo(BeNil())
Expect(client["client_id"]).To(Equal("some-client-id"))
})
It("returns nil when the client does not exist", func() {
listHandler.
WithQueryParams(query...).
RespondsWith(http.StatusOK, `{"resources":[]}`)
client, err := uaaClient.GetClient("some-client-id")
Expect(err).NotTo(HaveOccurred())
Expect(client).To(BeNil())
})
It("fails when cannot query list of clients", func() {
listHandler.
WithQueryParams(query...).
RespondsWith(http.StatusBadRequest, `{"resources":[]}`)
_, err := uaaClient.GetClient("some-client-id")
Expect(err).To(HaveOccurred())
errorMsg := fmt.Sprintf("An error occurred while calling %s/oauth/clients", server.URL())
Expect(err).To(MatchError(ContainSubstring(errorMsg)))
})
})
Describe("#HasClientDefinition", func() {
It("returns true when at least one property is set", func() {
c := config.UAAConfig{ClientDefinition: config.ClientDefinition{AuthorizedGrantTypes: "123"}}
client, err := uaa.New(c, "", skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(client.HasClientDefinition()).To(BeTrue())
c = config.UAAConfig{ClientDefinition: config.ClientDefinition{Authorities: "asd"}}
client, err = uaa.New(c, "", skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(client.HasClientDefinition()).To(BeTrue())
c = config.UAAConfig{ClientDefinition: config.ClientDefinition{ResourceIDs: "fff"}}
client, err = uaa.New(c, "", skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(client.HasClientDefinition()).To(BeTrue())
c = config.UAAConfig{ClientDefinition: config.ClientDefinition{Scopes: "admin"}}
client, err = uaa.New(c, "", skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(client.HasClientDefinition()).To(BeTrue())
})
It("returns false when no property is set", func() {
client, err := uaa.New(config.UAAConfig{}, "", skipTLSValidation)
Expect(err).ToNot(HaveOccurred())
Expect(client.HasClientDefinition()).To(BeFalse())
})
})
})
})
func toMap(body string) map[string]interface{} {
var m map[string]interface{}
err := json.Unmarshal([]byte(body), &m)
Expect(err).NotTo(HaveOccurred())
return m
}
func setupUAARoutes(uaaAPI *ghttp.Server, uaaConfig config.UAAConfig) {
uaaAuthenticationHandler := new(helpers.FakeHandler)
secret := uaaConfig.Authentication.ClientCredentials.Secret
id := uaaConfig.Authentication.ClientCredentials.ID
uaaAPI.RouteToHandler(http.MethodPost, regexp.MustCompile(`/oauth/token`), ghttp.CombineHandlers(
ghttp.VerifyBasicAuth(id, secret),
uaaAuthenticationHandler.Handle,
))
authenticationResponse := `{ "access_token": " some-token", "expires_in": 3600, "token_type":"bearer"}`
uaaAuthenticationHandler.RespondsWith(http.StatusOK, authenticationResponse)
}
|
package manager
import (
"github.com/gin-gonic/gin"
"starter/internal/entities"
"starter/internal/manager/controllers"
managerMiddleWares "starter/internal/manager/middlewares"
"starter/pkg/app"
"starter/pkg/captcha"
"starter/pkg/database/managers"
"starter/pkg/middlewares"
"starter/pkg/rbac"
"starter/pkg/sessions"
)
// GetEngine 设置路由的主方法,示例
func GetEngine(engine *gin.Engine) {
// 静态资源路径, 这里只是临时写了一个文件夹作为示例
engine.Static("/test", "./test")
// 注册公用的中间件
engine.Use(middlewares.CORS)
// 登录路由需要在jwt验证中间件之前
engine.POST("/login", controllers.Login)
engine.GET("/captcha", func(context *gin.Context) {
cpat := captcha.New("medivh")
app.NewResponse(app.Success, gin.H{"content": cpat.ToBase64EncodeString(), "captcha_id": cpat.CaptchaID}).End(context)
})
engine.POST("/captcha", func(context *gin.Context) {
id := context.DefaultQuery("captcha_id", "medivh")
app.Logger().Debug(captcha.Verify(id, context.DefaultQuery("captcha", "")))
})
engine.Use(middlewares.VerifyAuth)
sessions.Inject(engine)
engine.GET("/staffs/info", controllers.StaffInfo)
// 注册一个权限验证的中间件
engine.Use(managerMiddleWares.CheckPermission)
// 注册一个公共上传接口
var saveHandler = new(app.DefaultSaveHandler).SetPrefix("http://manager.golang-project.com/").SetDst("./test/")
engine.POST("/upload", app.Upload("file", saveHandler, "png", "jpg"))
// CSRFtoken支持, 因为 upload 不需要受 CSRFtoken 限制, 故将上传接口放在了上边
engine.Use(middlewares.CsrfToken)
// 将对应数据接口注册生成 CURD 接口
managers.New().
Register(entities.Staff{}, managers.Mongo).
Register(entities.Mgo{}, managers.Mgo).
RegisterCustomManager(&controllers.CustomOrder{}, entities.Order{}).
Start(engine)
// 将权限验证数据表的CURD接口进行注册
rbac.Inject(engine)
}
|
package middleware
// func Authentication(next http.Handler) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// log.Println(r.RequestURI)
// type tbody struct {
// UserID, CompanyID int64
// AccessToken string
// }
// var body tbody
// err := json.NewDecoder(r.Body).Decode(&body)
// if err != nil {
// fmt.Println(err)
// utils.Message(w, answer.WRONG_DATA, 400)
// return
// }
// database, err := db.CopmanyDB(body.CompanyID)
// if err != nil {
// fmt.Println(err)
// utils.Message(w, answer.NOT_FOUND_COMPANY, 400)
// return
// }
// defer database.Close()
// err = db.CheckUserAccessToken(database, body.UserID, body.AccessToken)
// if err != nil {
// fmt.Println(err)
// utils.Message(w, answer.UNAUTHORIZED, 401)
// return
// }
// next.ServeHTTP(w, r)
// })
// }
|
package main
import "sort"
//852. 山脉数组的峰顶索引
//符合下列属性的数组 arr 称为 山脉数组 :
//arr.length >= 3
//存在 i(0 < i< arr.length - 1)使得:
//arr[0] < arr[1] < ... arr[i-1] < arr[i]
//arr[i] > arr[i+1] > ... > arr[arr.length - 1]
//给你由整数组成的山脉数组 arr ,返回任何满足 arr[0] < arr[1] < ... arr[i - 1] < arr[i] > arr[i + 1] > ... > arr[arr.length - 1] 的下标 i 。
//
//
//
//示例 1:
//
//输入:arr = [0,1,0]
//输出:1
//示例 2:
//
//输入:arr = [0,2,1,0]
//输出:1
//示例 3:
//
//输入:arr = [0,10,5,2]
//输出:1
//示例 4:
//
//输入:arr = [3,4,5,1]
//输出:2
//示例 5:
//
//输入:arr = [24,69,100,99,79,78,67,36,26,19]
//输出:2
//
//
//提示:
//
//3 <= arr.length <= 10^4
//0 <= arr[i] <= 10^6
//题目数据保证 arr 是一个山脉数组
func peakIndexInMountainArray(arr []int) int {
return sort.Search(len(arr)-1, func(i int) bool {
return arr[i] > arr[i+1]
})
}
|
package main
// Bidder represents a string name for a particular bidder
type Bidder string
const (
// Nike represents Nike as a bidder/buyer on the ad exchange
Nike Bidder = "nike"
// Amazon represents Amazon as a bidder/buyer on the ad exchange
Amazon Bidder = "amazon"
// Apple represents Apple as a bidder/buyer on the ad exchange
Apple Bidder = "apple"
)
var bidders = []Bidder{Nike, Apple, Amazon}
// GetAllBidders return the list of supported bidders
func GetAllBidders() []Bidder {
return bidders
}
|
package tapo
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/evcc-io/evcc/util"
"github.com/evcc-io/evcc/util/request"
"github.com/mergermarket/go-pkcs7"
)
// Tapo homepage + api reverse engineering results
// https://www.tapo.com/de/
// Credits to & inspired by:
// https://k4czp3r.xyz/reverse-engineering/tp-link/tapo/2020/10/15/reverse-engineering-tp-link-tapo.html
// https://github.com/fishbigger/TapoP100
// https://github.com/artemvang/p100-go
const Timeout = time.Second * 15
// Connection is the Tapo connection
type Connection struct {
*request.Helper
log *util.Logger
URI string
EncodedUser string
EncodedPassword string
Cipher *ConnectionCipher
SessionID string
Token string
TerminalUUID string
updated time.Time
lasttodayenergy int64
energy int64
}
// NewConnection creates a new Tapo device connection.
// User is encoded by using MessageDigest of SHA1 which is afterwards B64 encoded.
// Password is directly B64 encoded.
func NewConnection(uri, user, password string) (*Connection, error) {
if uri == "" {
return nil, errors.New("missing uri")
}
if user == "" || password == "" {
return nil, fmt.Errorf("missing user or password")
}
for _, suffix := range []string{"/", "/app"} {
uri = strings.TrimSuffix(uri, suffix)
}
log := util.NewLogger("tapo")
// nosemgrep:go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
h := sha1.New()
_, err := h.Write([]byte(user))
userhash := hex.EncodeToString(h.Sum(nil))
conn := &Connection{
log: log,
Helper: request.NewHelper(log),
URI: fmt.Sprintf("%s/app", util.DefaultScheme(uri, "http")),
EncodedUser: base64.StdEncoding.EncodeToString([]byte(userhash)),
EncodedPassword: base64.StdEncoding.EncodeToString([]byte(password)),
}
conn.Client.Timeout = Timeout
return conn, err
}
// Login provides the Tapo device session token and MAC address (TerminalUUID).
func (d *Connection) Login() error {
err := d.Handshake()
if err != nil {
return err
}
req := map[string]interface{}{
"method": "login_device",
"params": map[string]interface{}{
"username": d.EncodedUser,
"password": d.EncodedPassword,
},
}
res, err := d.DoSecureRequest(d.URI, req)
if err != nil {
return err
}
if err := d.CheckErrorCode(res.ErrorCode); err != nil {
return err
}
d.Token = res.Result.Token
deviceResponse, err := d.ExecMethod("get_device_info", false)
if err != nil {
return err
}
d.TerminalUUID = deviceResponse.Result.MAC
return nil
}
// Handshake provides the Tapo device session cookie and encryption cipher.
func (d *Connection) Handshake() error {
privKey, pubKey, err := GenerateRSAKeys()
if err != nil {
return err
}
pubPEM, err := DumpRSAPEM(pubKey)
if err != nil {
return err
}
req, err := json.Marshal(map[string]interface{}{
"method": "handshake",
"params": map[string]interface{}{
"key": string(pubPEM),
"requestTimeMils": 0,
},
})
if err != nil {
return err
}
resp, err := http.Post(d.URI, "application/json", bytes.NewBuffer(req))
if err != nil {
return err
}
defer resp.Body.Close()
var res DeviceResponse
if err = json.NewDecoder(resp.Body).Decode(&res); err != nil {
return err
}
if err = d.CheckErrorCode(res.ErrorCode); err != nil {
return err
}
encryptedEncryptionKey, err := base64.StdEncoding.DecodeString(res.Result.Key)
if err != nil {
return err
}
encryptionKey, err := rsa.DecryptPKCS1v15(rand.Reader, privKey, encryptedEncryptionKey)
if err != nil {
return err
}
d.Cipher = &ConnectionCipher{
Key: encryptionKey[:16],
Iv: encryptionKey[16:],
}
cookie := strings.Split(resp.Header.Get("Set-Cookie"), ";")
if len(cookie) == 0 {
return errors.New("missing session cookie")
}
d.SessionID = cookie[0]
return nil
}
// ExecMethod executes a Tapo device command method and provides the corresponding response.
func (d *Connection) ExecMethod(method string, deviceOn bool) (*DeviceResponse, error) {
var req map[string]interface{}
switch method {
case "set_device_info":
req = map[string]interface{}{
"method": method,
"params": map[string]interface{}{
"device_on": deviceOn,
},
"requestTimeMils": int(time.Now().Unix() * 1000),
"terminalUUID": d.TerminalUUID,
}
default:
req = map[string]interface{}{
"method": method,
"requestTimeMils": int(time.Now().Unix() * 1000),
}
}
res, err := d.DoSecureRequest(fmt.Sprintf("%s?token=%s", d.URI, d.Token), req)
if err != nil {
return nil, err
}
if method == "get_device_info" {
res.Result.Nickname, err = base64Decode(res.Result.Nickname)
if err != nil {
return nil, err
}
res.Result.SSID, err = base64Decode(res.Result.SSID)
if err != nil {
return nil, err
}
}
return res, nil
}
// ExecCmd executes a Tapo api command and provides the response
func (d *Connection) ExecCmd(method string, enable bool) (*DeviceResponse, error) {
// refresh session id
if time.Since(d.updated) >= 600*time.Minute {
if err := d.Login(); err != nil {
return nil, err
}
d.updated = time.Now()
}
return d.ExecMethod(method, enable)
}
// CurrentPower provides current power consuption
func (d *Connection) CurrentPower() (float64, error) {
resp, err := d.ExecCmd("get_energy_usage", false)
if err != nil {
return 0, err
}
return float64(resp.Result.Current_Power) / 1e3, nil
}
// ChargedEnergy collects the daily charged energy
func (d *Connection) ChargedEnergy() (float64, error) {
resp, err := d.ExecCmd("get_energy_usage", false)
if err != nil {
return 0, err
}
if resp.Result.Today_Energy > d.lasttodayenergy {
d.energy = d.energy + (resp.Result.Today_Energy - d.lasttodayenergy)
}
d.lasttodayenergy = resp.Result.Today_Energy
return float64(d.energy) / 1000, nil
}
// DoSecureRequest executes a Tapo device request by encding the request and decoding its response.
func (d *Connection) DoSecureRequest(uri string, taporequest map[string]interface{}) (*DeviceResponse, error) {
payload, err := json.Marshal(taporequest)
if err != nil {
return nil, err
}
d.log.TRACE.Printf("request: %s", string(payload))
encryptedRequest, err := d.Cipher.Encrypt(payload)
if err != nil {
return nil, err
}
data := map[string]interface{}{
"method": "securePassthrough",
"params": map[string]interface{}{
"request": base64.StdEncoding.EncodeToString(encryptedRequest),
},
}
req, err := request.New(http.MethodPost, uri, request.MarshalJSON(data), map[string]string{
"Cookie": d.SessionID,
})
if err != nil {
return nil, err
}
var res *DeviceResponse
if err := d.DoJSON(req, &res); err != nil {
return nil, err
}
// Login atempt in case of tapo switch connection hicups
if res.ErrorCode == 9999 {
if err := d.Login(); err != nil {
return nil, err
}
if err := d.DoJSON(req, &res); err != nil {
return nil, err
}
}
if err := d.CheckErrorCode(res.ErrorCode); err != nil {
return nil, err
}
decodedResponse, err := base64.StdEncoding.DecodeString(res.Result.Response)
if err != nil {
return nil, err
}
decryptedResponse, err := d.Cipher.Decrypt(decodedResponse)
if err != nil {
return nil, err
}
d.log.TRACE.Printf("decrypted result: %v", string(decryptedResponse))
var deviceResp *DeviceResponse
err = json.Unmarshal(decryptedResponse, &deviceResp)
return deviceResp, err
}
// Tapo helper functions
func (d *Connection) CheckErrorCode(errorCode int) error {
errorDesc := map[int]string{
0: "Success",
9999: "Login failed, invalid user or password",
-1002: "Incorrect Request/Method",
-1003: "JSON formatting error ",
-1010: "Invalid Public Key Length",
-1012: "Invalid terminalUUID",
-1501: "Invalid Request or Credentials",
}
if errorCode != 0 {
return fmt.Errorf("tapo error %d: %s", errorCode, errorDesc[errorCode])
}
return nil
}
func (c *ConnectionCipher) Encrypt(payload []byte) ([]byte, error) {
paddedPayload, err := pkcs7.Pad(payload, aes.BlockSize)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(c.Key)
if err != nil {
return nil, err
}
encrypter := cipher.NewCBCEncrypter(block, c.Iv)
encryptedPayload := make([]byte, len(paddedPayload))
encrypter.CryptBlocks(encryptedPayload, paddedPayload)
return encryptedPayload, nil
}
func (c *ConnectionCipher) Decrypt(payload []byte) ([]byte, error) {
block, err := aes.NewCipher(c.Key)
if err != nil {
return nil, err
}
encrypter := cipher.NewCBCDecrypter(block, c.Iv)
decryptedPayload := make([]byte, len(payload))
encrypter.CryptBlocks(decryptedPayload, payload)
return pkcs7.Unpad(decryptedPayload, aes.BlockSize)
}
func DumpRSAPEM(pubKey *rsa.PublicKey) ([]byte, error) {
pubKeyPKIX, err := x509.MarshalPKIXPublicKey(pubKey)
if err != nil {
return nil, err
}
pubPEM := pem.EncodeToMemory(
&pem.Block{
Type: "PUBLIC KEY",
Bytes: pubKeyPKIX,
},
)
return pubPEM, nil
}
func GenerateRSAKeys() (*rsa.PrivateKey, *rsa.PublicKey, error) {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return nil, nil, err
}
return key, key.Public().(*rsa.PublicKey), nil
}
func base64Decode(base64String string) (string, error) {
decodedString, err := base64.StdEncoding.DecodeString(base64String)
if err != nil {
return "", err
}
return string(decodedString), nil
}
|
/*
* Wager service APIs
*
* APIs for a wager system
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package models
import (
"context"
"github.com/astaxie/beego/orm"
)
type WagerPurchase struct {
Id int64 `orm:"auto;pk" json:"id"`
WagerId int64 `json:"wager_id"`
BuyingPrice float32 `json:"buying_price"`
BoughtAt int64 `json:"bought_at"`
}
func (w *WagerPurchase) Create(ctx context.Context) (int64, error) {
var (
o = orm.NewOrm()
)
return o.Insert(w)
}
|
package graph
import (
_ "leetcode/topic"
"sort"
)
/*
1030
距离顺序排列矩阵单元格
思路:
- 由于输出是一个2列 R* C行的二维数组
- 所以先将矩阵转换为上面说得二维数组
- 再对二维数组中的数据排序
*/
func allCellsDistOrder(R int, C int, r0 int, c0 int) [][]int {
ret := make([][]int, 0, R*C)
for i := 0; i < R; i++ {
for j := 0; j < C; j++ {
ret = append(ret, []int{i, j})
}
}
sort.Slice(ret, func(i, j int) bool {
a, b := ret[i], ret[j]
return abs(a[0]-r0)+abs(a[1]-c0) < abs(b[0]-r0)+abs(b[1]-c0)
})
return ret
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
/*
* 797
* 所有可能的路径
* 输入:graph = [[4,3,1],[3,2,4],[3],[4],[]]
* 输出:[[0,4],[0,3,4],[0,1,3,4],[0,1,2,3,4],[0,1,4]]
* <p>
* 题目中给出的图是有向无环的,那么我们可以通过深度优先搜索的方法,递归求解出所有的路径。
* <p>
* 设图中有 N 个节点,在搜索时,如果我们到达了节点 N - 1,那么此时的路径就为 {N - 1};
* 否则如果我们到达了其它的节点 node,那么路径就为 {node} 加上 {所有从 nei 到 N - 1} 的路径集合,其中 nei 是 node 直接相邻的节点。
*/
func AllPathsSourceTarget(graph [][]int) [][]int {
return allPathsSourceTargetDfs(graph, 0)
}
func allPathsSourceTargetDfs(graph [][]int, node int) [][]int {
var ans [][]int
N := len(graph)
if node == N-1 {
path := []int{N - 1}
ans = append(ans, path)
return ans
}
for _, val := range graph[node] {
for _, paths := range allPathsSourceTargetDfs(graph, val) {
paths = append([]int{node}, paths...)
ans = append(ans, paths)
}
}
return ans
}
|
package sqlite3
import (
"database/sql"
"github.com/sohaha/zlsgo/zfile"
"github.com/sohaha/zlsgo/zutil"
"github.com/zlsgo/zdb/driver"
)
var _ driver.IfeConfig = &Config{}
var _ driver.Dialect = &Config{}
// Config database configuration
type Config struct {
driver.Typ
File string
Dsn string
Parameters string
db *sql.DB
Memory bool
ForeignKeys bool
}
func (c *Config) DB() *sql.DB {
db, _ := c.MustDB()
return db
}
func (c *Config) MustDB() (*sql.DB, error) {
var err error
if c.db == nil {
c.db, err = sql.Open(c.GetDriver(), c.GetDsn())
if c.db != nil && c.ForeignKeys {
_, _ = c.db.Exec("PRAGMA foreign_keys = ON")
}
}
return c.db, err
}
func (c *Config) SetDB(db *sql.DB) {
c.db = db
}
func (c *Config) GetDsn() string {
if c.Dsn != "" {
return c.Dsn
}
c.Typ = driver.SQLite
f := c.File
if f == "" {
f = "zlsgo.db"
}
return "file:" + zfile.RealPath(f) + zutil.IfVal(c.Memory, "?cache=shared&mode=memory", "?").(string) + "&" + c.Parameters
}
func (c *Config) Value() driver.Typ {
return driver.SQLite
}
|
package eventbus
import (
"log"
"sync"
"sync/atomic"
)
const (
maxBufSize = 1000
)
type eventbus struct {
handlers map[string]*handlers
handlersLock sync.RWMutex
replySequence int64
}
var eb *eventbus
var once sync.Once
func EventBus() *eventbus {
once.Do(func() {
eb = &eventbus{
handlers: make(map[string]*handlers),
}
})
return eb
}
type Message interface{}
type ReplyAddress int64
type WrapperMessage struct {
eb *eventbus
replyAddress *ReplyAddress
Body Message
}
func newWrapperMessage(message Message, eb *eventbus, replyAddress *ReplyAddress, replyHandler func(*WrapperMessage)) *WrapperMessage {
if replyAddress != nil {
eb.addLocalHandlerHolder(string(*replyAddress), true, replyHandler)
}
return &WrapperMessage{
eb: eb,
replyAddress: replyAddress,
Body: message,
}
}
func (w *WrapperMessage) Reply(message Message, replyHandler func(*WrapperMessage)) {
if w.replyAddress != nil {
w.eb.sendReply(w.replyAddress, message, replyHandler)
}
}
func (eb *eventbus) sendReply(address *ReplyAddress, message Message, replyHandler func(*WrapperMessage)) {
if replyHandler != nil {
eb.addLocalHandlerHolder(string(*eb.generateReplyAddress()), true, replyHandler)
}
eb.deliverMessage(string(*address), message, replyHandler)
}
type handlerHolder struct {
handler func(message *WrapperMessage)
isReply bool
address string
messages chan *WrapperMessage
done chan struct{}
}
type handlers struct {
handlerHolders []*handlerHolder
index int64
}
func (h *handlers) choose() *handlerHolder {
for {
if len(h.handlerHolders) == 0 {
return nil
}
atomic.AddInt64(&h.index, 1)
if int(h.index) >= len(h.handlerHolders) {
atomic.StoreInt64(&h.index, 0)
}
return h.handlerHolders[h.index]
}
}
func (h *handlerHolder) GetHandler() func(message *WrapperMessage) {
return h.handler
}
func (h *handlerHolder) Stop() {
close(h.done)
}
func newHandlerHolder(address string, isReply bool, handler func(message *WrapperMessage)) *handlerHolder {
holder := &handlerHolder{
handler: handler,
isReply: isReply,
address: address,
done: make(chan struct{}),
messages: make(chan *WrapperMessage, maxBufSize),
}
go func() {
for {
select {
case <-holder.done:
return
case msg, ok := <-holder.messages:
if !ok {
return
}
go func() {
defer func() {
if holder.isReply {
eb.removeLocalHandlerHolder(holder.address, holder)
}
}()
holder.handler(msg)
}()
}
}
}()
return holder
}
func (h *handlerHolder) Unregister() {
eb.removeLocalHandlerHolder(h.address, h)
}
func (eb *eventbus) addLocalHandlerHolder(address string, isReply bool, handler func(*WrapperMessage)) *handlerHolder {
eb.handlersLock.Lock()
defer eb.handlersLock.Unlock()
holder := newHandlerHolder(address, isReply, handler)
h, ok := eb.handlers[address]
if !ok {
h = &handlers{
handlerHolders: make([]*handlerHolder, 0, 10),
index: 0,
}
eb.handlers[address] = h
}
h.handlerHolders = append(h.handlerHolders, holder)
return holder
}
func (eb *eventbus) removeLocalHandlerHolder(address string, holder *handlerHolder) {
eb.handlersLock.Lock()
defer eb.handlersLock.Unlock()
h, ok := eb.handlers[address]
if ok {
for i, holder := range h.handlerHolders {
if holder == holder {
holder.Stop()
copy(h.handlerHolders[i:], h.handlerHolders[i+1:])
h.handlerHolders[len(h.handlerHolders)-1] = nil
h.handlerHolders = h.handlerHolders[:len(h.handlerHolders)-1]
break
}
}
if len(h.handlerHolders) == 0 {
delete(eb.handlers, address)
}
}
}
func (eb *eventbus) Consumer(address string, handler func(message *WrapperMessage)) MessageConsumerInterface {
return eb.addLocalHandlerHolder(address, false, handler)
}
func (eb *eventbus) Publish(address string, message Message) {
if eb.handlers[address] == nil {
return
}
for _, handlerHolder := range eb.handlers[address].handlerHolders {
if len(handlerHolder.messages) == maxBufSize {
log.Printf("Fast publish, slow consumer. Number of buffered messages: %d.", maxBufSize)
}
handlerHolder.messages <- newWrapperMessage(message, eb, nil, nil)
}
}
func (eb *eventbus) Send(address string, message Message, replyHandler func(*WrapperMessage)) {
eb.deliverMessage(address, message, replyHandler)
}
func (eb *eventbus) deliverMessage(address string, message Message, replyHandler func(*WrapperMessage)) {
holders, exists := eb.handlers[address]
if !exists {
return
}
handlerHolder := holders.choose()
if handlerHolder == nil {
return
}
if replyHandler == nil {
handlerHolder.messages <- newWrapperMessage(message, eb, nil, nil)
} else {
handlerHolder.messages <- newWrapperMessage(message, eb, eb.generateReplyAddress(), replyHandler)
}
}
func (eb *eventbus) generateReplyAddress() *ReplyAddress {
reply := ReplyAddress(atomic.AddInt64(&eb.replySequence, 1))
return &reply
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.