text stringlengths 11 4.05M |
|---|
package sgfw
import (
"fmt"
"strconv"
"strings"
"sync"
// "encoding/binary"
// nfnetlink "github.com/subgraph/go-nfnetlink"
"github.com/google/gopacket/layers"
nfqueue "github.com/subgraph/go-nfnetlink/nfqueue"
"github.com/subgraph/go-procsnitch"
"net"
"os"
"syscall"
"unsafe"
)
var _interpreters = []string{
"python",
"ruby",
"bash",
}
/*type sandboxRule struct {
SrcIf net.IP
DstIP net.IP
DstPort uint16
Whitelist bool
}
var sandboxRules = []sandboxRule {
// { net.IP{172,16,1,42}, net.IP{140,211,166,134}, 21, false },
} */
type pendingConnection interface {
policy() *Policy
procInfo() *procsnitch.Info
hostname() string
getOptString() string
proto() string
src() net.IP
srcPort() uint16
dst() net.IP
dstPort() uint16
sandbox() string
socks() bool
accept()
acceptTLSOnly()
drop()
setPrompting(bool)
getPrompting() bool
print() string
}
type pendingPkt struct {
pol *Policy
name string
pkt *nfqueue.NFQPacket
pinfo *procsnitch.Info
optstring string
prompting bool
}
func getEmptyPInfo() *procsnitch.Info {
pinfo := procsnitch.Info{}
pinfo.UID, pinfo.GID, pinfo.Pid, pinfo.ParentPid = -1, -1, -1, -1
pinfo.ExePath = "[unknown-exe]"
pinfo.CmdLine = "[unknown-cmdline]"
pinfo.FirstArg = "[unknown-arg]"
pinfo.ParentCmdLine = "[unknown-pcmdline]"
pinfo.ParentExePath = "[unknown-pexe]"
return &pinfo
}
func (pp *pendingPkt) sandbox() string {
return pp.pinfo.Sandbox
}
func (pp *pendingPkt) socks() bool {
return false
}
func (pp *pendingPkt) policy() *Policy {
return pp.pol
}
func (pp *pendingPkt) procInfo() *procsnitch.Info {
if pp.pinfo == nil {
return getEmptyPInfo()
}
return pp.pinfo
}
func (pp *pendingPkt) getOptString() string {
return pp.optstring
}
func (pp *pendingPkt) hostname() string {
return pp.name
}
func (pp *pendingPkt) src() net.IP {
src, _ := getPacketIPAddrs(pp.pkt)
return src
}
func (pp *pendingPkt) dst() net.IP {
_, dst := getPacketIPAddrs(pp.pkt)
return dst
}
func getNFQProto(pkt *nfqueue.NFQPacket) string {
if pkt.Packet.Layer(layers.LayerTypeTCP) != nil {
return "tcp"
} else if pkt.Packet.Layer(layers.LayerTypeUDP) != nil {
return "udp"
} else if pkt.Packet.Layer(layers.LayerTypeICMPv4) != nil {
return "icmp"
}
return "[unknown]"
}
func (pp *pendingPkt) proto() string {
return getNFQProto(pp.pkt)
}
func (pp *pendingPkt) srcPort() uint16 {
srcp, _ := getPacketTCPPorts(pp.pkt)
return srcp
}
func (pp *pendingPkt) dstPort() uint16 {
if pp.proto() == "tcp" {
_, dstp := getPacketTCPPorts(pp.pkt)
return dstp
} else if pp.proto() == "udp" {
_, dstp := getPacketUDPPorts(pp.pkt)
return dstp
} else if pp.proto() == "icmp" {
code, _ := getpacketICMPCode(pp.pkt)
return uint16(code)
}
return 0
}
func (pp *pendingPkt) accept() {
pp.pkt.Accept()
}
func (pp *pendingPkt) acceptTLSOnly() {
// Not implemented
pp.pkt.SetMark(1)
pp.pkt.Accept()
}
func (pp *pendingPkt) drop() {
pp.pkt.SetMark(1)
pp.pkt.Accept()
}
func (pp *pendingPkt) getPrompting() bool {
return pp.prompting
}
func (pp *pendingPkt) setPrompting(val bool) {
pp.prompting = val
}
func (pp *pendingPkt) print() string {
return printPacket(pp.pkt, pp.name, pp.pinfo)
}
type Policy struct {
fw *Firewall
path string
sandbox string
application string
icon string
rules RuleList
pendingQueue []pendingConnection
promptInProgress bool
lock sync.Mutex
}
func (fw *Firewall) PolicyForPath(path string) *Policy {
fw.lock.Lock()
defer fw.lock.Unlock()
return fw.policyForPath(path)
}
func (fw *Firewall) PolicyForPathAndSandbox(path string, sandbox string) *Policy {
fw.lock.Lock()
defer fw.lock.Unlock()
return fw.policyForPathAndSandbox(path, sandbox)
}
func (fw *Firewall) policyForPathAndSandbox(path string, sandbox string) *Policy {
policykey := sandbox + "|" + path
if _, ok := fw.policyMap[policykey]; !ok {
p := new(Policy)
p.fw = fw
p.path = path
p.application = path
p.sandbox = sandbox
entry := entryForPath(path)
if entry != nil {
p.application = entry.name
p.icon = entry.icon
}
fw.policyMap[policykey] = p
log.Infof("Creating new policy for path and sandbox: %s\n", policykey)
fw.policies = append(fw.policies, p)
}
return fw.policyMap[policykey]
}
func (fw *Firewall) policyForPath(path string) *Policy {
if _, ok := fw.policyMap[path]; !ok {
p := new(Policy)
p.fw = fw
p.path = path
p.application = path
entry := entryForPath(path)
if entry != nil {
p.application = entry.name
p.icon = entry.icon
}
fw.policyMap[path] = p
fw.policies = append(fw.policies, p)
}
return fw.policyMap[path]
}
func (p *Policy) processPacket(pkt *nfqueue.NFQPacket, pinfo *procsnitch.Info, optstr string) {
/* hbytes, err := pkt.GetHWAddr()
if err != nil {
log.Notice("Failed to get HW address underlying packet: ", err)
} else { log.Notice("got hwaddr: ", hbytes) } */
p.lock.Lock()
defer p.lock.Unlock()
dstb := pkt.Packet.NetworkLayer().NetworkFlow().Dst().Raw()
dstip := net.IP(dstb)
srcip := net.IP(pkt.Packet.NetworkLayer().NetworkFlow().Src().Raw())
name := p.fw.dns.Lookup(dstip, pinfo.Pid)
if !FirewallConfig.LogRedact {
log.Infof("Lookup(%s): %s", dstip.String(), name)
}
// fwo := matchAgainstOzRules(srcip, dstip, dstp)
result := p.rules.filterPacket(pkt, pinfo, srcip, name, optstr)
switch result {
case FILTER_DENY:
pkt.SetMark(1)
pkt.Accept()
case FILTER_ALLOW:
pkt.Accept()
case FILTER_PROMPT:
p.processPromptResult(&pendingPkt{pol: p, name: name, pkt: pkt, pinfo: pinfo, optstring: optstr, prompting: false})
default:
log.Warningf("Unexpected filter result: %d", result)
}
}
func (p *Policy) processPromptResult(pc pendingConnection) {
p.pendingQueue = append(p.pendingQueue, pc)
//fmt.Println("processPromptResult(): p.promptInProgress = ", p.promptInProgress)
if DoMultiPrompt || (!DoMultiPrompt && !p.promptInProgress) {
p.promptInProgress = true
go p.fw.dbus.prompt(p)
}
}
func (p *Policy) nextPending() (pendingConnection, bool) {
p.lock.Lock()
defer p.lock.Unlock()
if !DoMultiPrompt {
if len(p.pendingQueue) == 0 {
return nil, true
}
return p.pendingQueue[0], false
}
if len(p.pendingQueue) == 0 {
return nil, true
}
// for len(p.pendingQueue) != 0 {
for i := 0; i < len(p.pendingQueue); i++ {
if !p.pendingQueue[i].getPrompting() {
return p.pendingQueue[i], false
}
}
// }
return nil, false
}
func (p *Policy) removePending(pc pendingConnection) {
p.lock.Lock()
defer p.lock.Unlock()
remaining := []pendingConnection{}
for _, c := range p.pendingQueue {
if c != pc {
remaining = append(remaining, c)
}
}
if len(remaining) != len(p.pendingQueue) {
p.pendingQueue = remaining
}
}
func (p *Policy) processNewRule(r *Rule, scope FilterScope) bool {
p.lock.Lock()
defer p.lock.Unlock()
if scope != APPLY_ONCE {
p.rules = append(p.rules, r)
}
p.filterPending(r)
if len(p.pendingQueue) == 0 {
p.promptInProgress = false
}
return p.promptInProgress
}
func (p *Policy) parseRule(s string, add bool) (*Rule, error) {
//log.Noticef("XXX: attempt to parse rule: |%s|\n", s)
r := new(Rule)
r.pid = -1
r.mode = RULE_MODE_PERMANENT
r.policy = p
if !r.parse(s) {
return nil, parseError(s)
}
if add {
p.lock.Lock()
defer p.lock.Unlock()
p.rules = append(p.rules, r)
}
p.fw.addRule(r)
return r, nil
}
func (p *Policy) removeRule(r *Rule) {
p.lock.Lock()
defer p.lock.Unlock()
var newRules RuleList
for _, rr := range p.rules {
if rr.id != r.id {
newRules = append(newRules, rr)
}
}
p.rules = newRules
}
func (p *Policy) filterPending(rule *Rule) {
remaining := []pendingConnection{}
for _, pc := range p.pendingQueue {
if rule.match(pc.src(), pc.dst(), pc.dstPort(), pc.hostname(), pc.proto(), pc.procInfo().UID, pc.procInfo().GID, uidToUser(pc.procInfo().UID), gidToGroup(pc.procInfo().GID), pc.procInfo().Sandbox) {
log.Infof("Adding rule for: %s", rule.getString(FirewallConfig.LogRedact))
// log.Noticef("%s > %s", rule.getString(FirewallConfig.LogRedact), pc.print())
if rule.rtype == RULE_ACTION_ALLOW {
pc.accept()
} else if rule.rtype == RULE_ACTION_ALLOW_TLSONLY {
pc.acceptTLSOnly()
} else {
srcs := pc.src().String() + ":" + strconv.Itoa(int(pc.srcPort()))
dests := STR_REDACTED
if !FirewallConfig.LogRedact {
dests = fmt.Sprintf("%s%d",pc.dst(), pc.dstPort)
}
log.Warningf("DENIED outgoing connection attempt by %s from %s %s -> %s (user prompt) %v",
pc.procInfo().ExePath, pc.proto(), srcs, dests, rule.rtype)
pc.drop()
}
} else {
remaining = append(remaining, pc)
}
}
if len(remaining) != len(p.pendingQueue) {
p.pendingQueue = remaining
}
}
func (p *Policy) hasPersistentRules() bool {
for _, r := range p.rules {
if r.mode != RULE_MODE_SESSION {
return true
}
}
return false
}
func printPacket(pkt *nfqueue.NFQPacket, hostname string, pinfo *procsnitch.Info) string {
proto := "???"
SrcPort, DstPort := uint16(0), uint16(0)
SrcIp, DstIp := getPacketIPAddrs(pkt)
code := 0
codestr := ""
if pkt.Packet.Layer(layers.LayerTypeTCP) != nil {
proto = "TCP"
} else if pkt.Packet.Layer(layers.LayerTypeUDP) != nil {
proto = "UDP"
} else if pkt.Packet.Layer(layers.LayerTypeICMPv4) != nil {
proto = "ICMP"
}
if proto == "TCP" {
SrcPort, DstPort = getPacketTCPPorts(pkt)
} else if proto == "UDP" {
SrcPort, DstPort = getPacketUDPPorts(pkt)
} else if proto == "ICMP" {
code, codestr = getpacketICMPCode(pkt)
}
if FirewallConfig.LogRedact {
hostname = STR_REDACTED
}
name := hostname
if name == "" {
name = DstIp.String()
}
if pinfo == nil {
if proto == "ICMP" {
return fmt.Sprintf("(%s %s -> %s: %s [%d])", proto, SrcIp, name, codestr, code)
}
return fmt.Sprintf("(%s %s:%d -> %s:%d)", proto, SrcIp, SrcPort, name, DstPort)
}
return fmt.Sprintf("%s %s %s:%d -> %s:%d", pinfo.ExePath, proto, SrcIp, SrcPort, name, DstPort)
}
func (fw *Firewall) filterPacket(pkt *nfqueue.NFQPacket) {
isudp := pkt.Packet.Layer(layers.LayerTypeUDP) != nil
if basicAllowPacket(pkt) {
if isudp {
srcport, _ := getPacketUDPPorts(pkt)
if srcport == 53 {
fw.dns.processDNS(pkt)
}
}
pkt.Accept()
return
}
/* if isudp {
srcport, _ := getPacketUDPPorts(pkt)
if srcport == 53 {
fw.dns.processDNS(pkt)
pkt.Accept()
return
}
}
*/
_, dstip := getPacketIPAddrs(pkt)
/* _, dstp := getPacketPorts(pkt)
fwo := eatchAgainstOzRules(srcip, dstip, dstp)
log.Notice("XXX: Attempting [2] to filter packet on rules -> ", fwo)
if fwo == OZ_FWRULE_WHITELIST {
log.Noticef("Automatically passed through whitelisted sandbox traffic from %s to %s:%d\n", srcip, dstip, dstp)
pkt.Accept()
return
} else if fwo == OZ_FWRULE_BLACKLIST {
log.Noticef("Automatically blocking blacklisted sandbox traffic from %s to %s:%d\n", srcip, dstip, dstp)
pkt.SetMark(1)
pkt.Accept()
return
} */
ppath := "*"
strictness := procsnitch.MATCH_STRICT
if isudp {
strictness = procsnitch.MATCH_LOOSE
}
pinfo, optstring := findProcessForPacket(pkt, false, strictness)
if pinfo == nil {
pinfo = getEmptyPInfo()
ppath = "[unknown]"
optstring = "[Connection could not be mapped]"
log.Warningf("No proc found for %s", printPacket(pkt, fw.dns.Lookup(dstip, pinfo.Pid), nil))
// pkt.Accept()
// return
} else {
ppath = pinfo.ExePath
cf := strings.Fields(pinfo.CmdLine)
if len(cf) > 1 && strings.HasPrefix(cf[1], "/") {
for _, intp := range _interpreters {
if strings.Contains(pinfo.ExePath, intp) {
ppath = cf[1]
break
}
}
}
}
log.Debugf("filterPacket [%s] %s", ppath, printPacket(pkt, fw.dns.Lookup(dstip, pinfo.Pid), nil))
/* if basicAllowPacket(pkt) {
pkt.Accept()
return
}
*/
policy := fw.PolicyForPathAndSandbox(ppath, pinfo.Sandbox)
//log.Notice("XXX: flunked basicallowpacket; policy = ", policy)
policy.processPacket(pkt, pinfo, optstring)
}
func readFileDirect(filename string) ([]byte, error) {
bfilename, err := syscall.BytePtrFromString(filename)
if err != nil {
return nil, err
}
res, _, err := syscall.Syscall(syscall.SYS_OPEN, uintptr(unsafe.Pointer(bfilename)), syscall.O_RDONLY, 0)
fdlong := int64(res)
if fdlong < 0 {
return nil, err
}
fd := int(res)
data := make([]byte, 65535)
i := 0
val := 0
for i = 0; i < 65535; {
val, err = syscall.Read(fd, data[i:])
i += val
if err != nil && val != 0 {
return nil, err
}
if val == 0 {
break
}
}
data = data[0:i]
/*
val, err := syscall.Read(fd, data)
if err != nil {
return nil, err
}
*/
syscall.Close(fd)
/*
if val < 65535 {
data = data[0:val]
}
*/
return data, nil
}
func getAllProcNetDataLocal() ([]string, error) {
data := ""
OzInitPidsLock.Lock()
for i := 0; i < len(OzInitPids); i++ {
fname := fmt.Sprintf("/proc/%d/net/tcp", OzInitPids[i])
//fmt.Println("XXX: opening: ", fname)
bdata, err := readFileDirect(fname)
if err != nil {
fmt.Println("Error reading proc data from ", fname, ": ", err)
} else {
data += string(bdata)
}
OzInitPidsLock.Unlock()
}
lines := strings.Split(data, "\n")
rlines := make([]string, 0)
ctr := 1
for l := 0; l < len(lines); l++ {
lines[l] = strings.TrimSpace(lines[l])
ssplit := strings.Split(lines[l], ":")
if len(ssplit) != 6 {
continue
}
ssplit[0] = fmt.Sprintf("%d", ctr)
ctr++
rlines = append(rlines, strings.Join(ssplit, ":"))
}
return rlines, nil
}
func GetRealRoot(pathname string, pid int) string {
pfname := fmt.Sprintf("/proc/%d/root", pid)
lnk, err := os.Readlink(pfname)
if err != nil {
fmt.Printf("Error reading link at %s: %v", pfname, err)
return pathname
}
if lnk == "/" {
return pathname
}
if strings.HasPrefix(pathname, lnk) {
return pathname[len(lnk):]
}
return pathname
}
// XXX: This is redundant code.... it should be called by findProcessForPacket()
func LookupSandboxProc(srcip net.IP, srcp uint16, dstip net.IP, dstp uint16, proto string, strictness, icode int) (*procsnitch.Info, string) {
var res *procsnitch.Info = nil
var optstr string
removePids := make([]int, 0)
OzInitPidsLock.Lock()
for i := 0; i < len(OzInitPids); i++ {
data := ""
fname := fmt.Sprintf("/proc/%d/net/%s", OzInitPids[i].Pid, proto)
//fmt.Println("XXX: opening: ", fname)
bdata, err := readFileDirect(fname)
if err != nil {
fmt.Println("Error reading proc data from ", fname, ": ", err)
if err == syscall.ENOENT {
removePids = append(removePids, OzInitPids[i].Pid)
}
continue
} else {
data = string(bdata)
lines := strings.Split(data, "\n")
rlines := make([]string, 0)
for l := 0; l < len(lines); l++ {
lines[l] = strings.TrimSpace(lines[l])
ssplit := strings.Split(lines[l], ":")
if len(ssplit) != 6 {
continue
}
rlines = append(rlines, strings.Join(ssplit, ":"))
}
// log.Warningf("Looking for %s:%d => %s:%d \n %s\n******\n", srcip, srcp, dstip, dstp, data)
if proto == "tcp" {
res = procsnitch.LookupTCPSocketProcessAll(srcip, srcp, dstip, dstp, rlines)
} else if proto == "udp" {
res = procsnitch.LookupUDPSocketProcessAll(srcip, srcp, dstip, dstp, rlines, strictness)
} else if proto == "icmp" {
res = procsnitch.LookupICMPSocketProcessAll(srcip, dstip, icode, rlines)
} else {
fmt.Printf("unknown proto: %s", proto)
}
if res != nil {
// optstr = "Sandbox: " + OzInitPids[i].Name
res.Sandbox = OzInitPids[i].Name
res.ExePath = GetRealRoot(res.ExePath, OzInitPids[i].Pid)
break
} /*else {
log.Warningf("*****\nCouldn't find proc for %s:%d => %s:%d \n %s\n******\n", srcip, srcp, dstip, dstp, data)
} */
}
}
OzInitPidsLock.Unlock()
for _, p := range removePids {
removeInitPid(p)
}
return res, optstr
}
func findProcessForPacket(pkt *nfqueue.NFQPacket, reverse bool, strictness int) (*procsnitch.Info, string) {
srcip, dstip := getPacketIPAddrs(pkt)
srcp, dstp := getPacketPorts(pkt)
proto := ""
optstr := ""
icode := -1
if reverse {
dstip, srcip = getPacketIPAddrs(pkt)
dstp, srcp = getPacketPorts(pkt)
}
if pkt.Packet.Layer(layers.LayerTypeTCP) != nil {
proto = "tcp"
} else if pkt.Packet.Layer(layers.LayerTypeUDP) != nil {
proto = "udp"
} else if pkt.Packet.Layer(layers.LayerTypeICMPv4) != nil {
proto = "icmp"
icode, _ = getpacketICMPCode(pkt)
} else if pkt.Packet.Layer(layers.LayerTypeICMPv6) != nil {
proto = "icmp"
icode, _ = getpacketICMPCode(pkt)
}
if proto == "" {
log.Warningf("Packet has unknown protocol: %d", pkt.Packet.NetworkLayer().LayerType())
return nil, optstr
}
// log.Noticef("XXX proto = %s, from %v : %v -> %v : %v\n", proto, srcip, srcp, dstip, dstp)
var res *procsnitch.Info = nil
// Try normal way first, before the more resource intensive/invasive way.
if proto == "tcp" {
res = procsnitch.LookupTCPSocketProcessAll(srcip, srcp, dstip, dstp, nil)
} else if proto == "udp" {
res = procsnitch.LookupUDPSocketProcessAll(srcip, srcp, dstip, dstp, nil, strictness)
} else if proto == "icmp" {
res = procsnitch.LookupICMPSocketProcessAll(srcip, dstip, icode, nil)
}
if res == nil {
removePids := make([]int, 0)
for i := 0; i < len(OzInitPids); i++ {
data := ""
fname := fmt.Sprintf("/proc/%d/net/%s", OzInitPids[i].Pid, proto)
//fmt.Println("XXX: opening: ", fname)
bdata, err := readFileDirect(fname)
if err != nil {
fmt.Println("Error reading proc data from ", fname, ": ", err)
if err == syscall.ENOENT {
removePids = append(removePids, OzInitPids[i].Pid)
}
continue
} else {
data = string(bdata)
lines := strings.Split(data, "\n")
rlines := make([]string, 0)
for l := 0; l < len(lines); l++ {
lines[l] = strings.TrimSpace(lines[l])
ssplit := strings.Split(lines[l], ":")
if len(ssplit) != 6 {
continue
}
rlines = append(rlines, strings.Join(ssplit, ":"))
}
if proto == "tcp" {
res = procsnitch.LookupTCPSocketProcessAll(srcip, srcp, dstip, dstp, rlines)
} else if proto == "udp" {
res = procsnitch.LookupUDPSocketProcessAll(srcip, srcp, dstip, dstp, rlines, strictness)
} else if proto == "icmp" {
res = procsnitch.LookupICMPSocketProcessAll(srcip, dstip, icode, rlines)
}
if res != nil {
optstr = "Sandbox: " + OzInitPids[i].Name
res.ExePath = GetRealRoot(res.ExePath, OzInitPids[i].Pid)
break
}
}
}
for _, p := range removePids {
removeInitPid(p)
}
}
return res, optstr
}
func basicAllowPacket(pkt *nfqueue.NFQPacket) bool {
srcip, dstip := getPacketIPAddrs(pkt)
if pkt.Packet.Layer(layers.LayerTypeUDP) != nil {
_, dport := getPacketUDPPorts(pkt)
if dport == 53 {
// fw.dns.processDNS(pkt)
return true
}
}
if pkt.Packet.Layer(layers.LayerTypeICMPv4) != nil && srcip.Equal(dstip) {
// An ICMP dest unreach packet sent to ourselves probably isn't a big security risk.
return true
}
return dstip.IsLoopback() ||
dstip.IsLinkLocalMulticast() ||
(pkt.Packet.Layer(layers.LayerTypeTCP) == nil &&
pkt.Packet.Layer(layers.LayerTypeUDP) == nil &&
pkt.Packet.Layer(layers.LayerTypeICMPv4) == nil &&
pkt.Packet.Layer(layers.LayerTypeICMPv6) == nil)
}
func getPacketIPAddrs(pkt *nfqueue.NFQPacket) (net.IP, net.IP) {
ipv4 := true
ipLayer := pkt.Packet.Layer(layers.LayerTypeIPv4)
if ipLayer == nil {
ipv4 = false
ipLayer = pkt.Packet.Layer(layers.LayerTypeIPv6)
}
if ipLayer == nil {
if ipv4 {
return net.IP{0, 0, 0, 0}, net.IP{0, 0, 0, 0}
}
return net.IP{}, net.IP{}
}
if !ipv4 {
ip6, _ := ipLayer.(*layers.IPv6)
return ip6.SrcIP, ip6.DstIP
}
ip4, _ := ipLayer.(*layers.IPv4)
return ip4.SrcIP, ip4.DstIP
}
func getpacketICMPCode(pkt *nfqueue.NFQPacket) (int, string) {
icmpLayer := pkt.Packet.Layer(layers.LayerTypeICMPv4)
if icmpLayer == nil {
return -1, ""
}
icmp, _ := icmpLayer.(*layers.ICMPv4)
return int(icmp.TypeCode.Code()), icmp.TypeCode.String()
}
func getPacketTCPPorts(pkt *nfqueue.NFQPacket) (uint16, uint16) {
tcpLayer := pkt.Packet.Layer(layers.LayerTypeTCP)
if tcpLayer == nil {
return 0, 0
}
tcp, _ := tcpLayer.(*layers.TCP)
return uint16(tcp.SrcPort), uint16(tcp.DstPort)
}
func getPacketUDPPorts(pkt *nfqueue.NFQPacket) (uint16, uint16) {
udpLayer := pkt.Packet.Layer(layers.LayerTypeUDP)
if udpLayer == nil {
return 0, 0
}
udp, _ := udpLayer.(*layers.UDP)
return uint16(udp.SrcPort), uint16(udp.DstPort)
}
func getPacketPorts(pkt *nfqueue.NFQPacket) (uint16, uint16) {
s, d := getPacketTCPPorts(pkt)
if s == 0 && d == 0 {
s, d = getPacketUDPPorts(pkt)
}
return s, d
}
/*func matchAgainstOzRules(srci, dsti net.IP, dstp uint16) int {
for i := 0; i < len(sandboxRules); i++ {
log.Notice("XXX: Attempting to match: ", srci, " / ", dsti, " / ", dstp, " | ", sandboxRules[i])
if sandboxRules[i].SrcIf.Equal(srci) && sandboxRules[i].DstIP.Equal(dsti) && sandboxRules[i].DstPort == dstp {
if sandboxRules[i].Whitelist {
return OZ_FWRULE_WHITELIST
}
return OZ_FWRULE_BLACKLIST
}
}
return OZ_FWRULE_NONE
} */
|
package entry
type LevelCfgCache interface {
//GetExpToNextLevel(nowLevel int32) (int32, bool)
GetExpArr() []int32
}
|
package controllers
import (
"fmt"
"github.com/BurntSushi/toml"
"github.com/gin-gonic/gin"
"log"
"net/http"
"test/pkg/middleware"
"test/src/constant"
)
func init(){
gin.SetMode(gin.DebugMode) //全局设置环境,此为开发环境,线上环境为gin.ReleaseMode
if _, err := toml.DecodeFile("conf/config.toml", &constant.Config); err != nil {
log.Fatalln(err)
}
constant.DbConnection = fmt.Sprintf("%s:%s@(%s:%s)/%s?parseTime=%s",
constant.Config.Base.Db["database"],
constant.Config.Base.Db["password"],
constant.Config.Base.Db["host"],
constant.Config.Base.Db["port"],
constant.Config.Base.Db["username"],
constant.Config.Base.Db["parseTime"])
constant.Port = fmt.Sprintf(":%s", constant.Config.Base.Server["port"])
}
func Run() {
router := gin.Default() //获得路由实例
registerMiddleware(router)
registerHandler(router)
err := http.ListenAndServe(constant.Port, router)
if err != nil{
panic(err)
}
}
func registerMiddleware(router *gin.Engine) {
//添加中间件
router.Use(middleware.Middleware)
router.Use(middleware.MyMid)
}
func registerHandler(router *gin.Engine) {
//注册接口
router.GET("/server/get", GetHandler)
router.POST("/server/post", PostHandler)
router.POST("/api/home/products", ProductHandler)
router.PUT("/server/put", PutHandler)
router.DELETE("/server/delete", DeleteHandler)
} |
package proto
import (
"packet"
)
type AckScenePlayers struct {
players []*MsgScenePlayer
}
func AckScenePlayersDecode(pack *packet.Packet) *AckScenePlayers {
ackScenePlayers := &AckScenePlayers{}
playersCount := pack.ReadUint16()
for ;playersCount > 0; playersCount-- {
ackScenePlayers.players = append(ackScenePlayers.players, MsgScenePlayerDecode(pack))
}
return ackScenePlayers
}
func (this *AckScenePlayers) GetPlayers() []*MsgScenePlayer {
return this.players
}
|
// Package line implements the Messenger handler of LINE for Telepathy framework.
// Needed configs:
// - SECRET: Channel scecret of LINE Messenging API
// - TOKEN: Channel access token of LINE Messenging API
package line
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"gitlab.com/kavenc/telepathy/internal/pkg/imgur"
"github.com/line/line-bot-sdk-go/linebot"
"github.com/sirupsen/logrus"
"gitlab.com/kavenc/telepathy/internal/pkg/telepathy"
)
const (
inMsgLen = 5
)
// InitError indicates an error when initializing Discord messenger handler
type InitError struct {
msg string
}
// Messenger implements the communication with Line APP
type Messenger struct {
Secret string
Token string
inMsgChannel chan telepathy.InboundMessage
outMsgChannel <-chan telepathy.OutboundMessage
bot *linebot.Client
replyTokenMap sync.Map
logger *logrus.Entry
}
func (e InitError) Error() string {
return "LINE init failed: " + e.msg
}
// ID returns messenger ID
func (m *Messenger) ID() string {
return "LINE"
}
// Start starts the plugin
func (m *Messenger) Start() {
bot, err := linebot.New(m.Secret, m.Token)
if err != nil {
m.logger.Panic(err.Error())
}
m.bot = bot
m.logger.Info("started")
m.transmitter()
m.logger.Info("terminated")
}
// Stop terminates the plugin
func (m *Messenger) Stop() {
close(m.inMsgChannel)
}
// SetLogger sets the logger
func (m *Messenger) SetLogger(logger *logrus.Entry) {
m.logger = logger
}
// InMsgChannel provides inbound message channel
func (m *Messenger) InMsgChannel() <-chan telepathy.InboundMessage {
if m.inMsgChannel == nil {
m.inMsgChannel = make(chan telepathy.InboundMessage, inMsgLen)
}
return m.inMsgChannel
}
// AttachOutMsgChannel attaches outbound message channel
func (m *Messenger) AttachOutMsgChannel(ch <-chan telepathy.OutboundMessage) {
m.outMsgChannel = ch
}
// Webhook provides webhook endpoints
func (m *Messenger) Webhook() map[string]telepathy.HTTPHandler {
return map[string]telepathy.HTTPHandler{
"line-callback": m.webhookHandler,
}
}
// SetWebhookURL receives webhook urls
func (m *Messenger) SetWebhookURL(urls map[string]*url.URL) {
}
func (m *Messenger) transmitter() {
for message := range m.outMsgChannel {
messages := []linebot.SendingMessage{}
if message.Text == "" && message.Image == nil {
continue
}
text := strings.Builder{}
if message.AsName != "" {
fmt.Fprintf(&text, "[ %s ]", message.AsName)
if message.Text != "" {
fmt.Fprintf(&text, "\n%s", message.Text)
}
} else {
text.WriteString(message.Text)
}
if text.Len() > 0 {
messages = append(messages, linebot.NewTextMessage(text.String()))
}
if message.Image != nil {
messages = append(messages, linebot.NewTextMessage("(image)"))
}
// Try to use reply token
channelID := message.ToChannel.ChannelID
item, _ := m.replyTokenMap.LoadOrStore(channelID, &sync.Pool{})
pool, _ := item.(*sync.Pool)
item = pool.Get()
if item != nil {
replyTokenStr, _ := item.(string)
call := m.bot.ReplyMessage(replyTokenStr, messages...)
_, err := call.Do()
if err == nil {
continue
}
// If send failed with replyToken, output err msg and retry with PushMessage
logger := m.logger.WithFields(logrus.Fields{
"target": channelID,
"reply_token": replyTokenStr,
})
logger.Warn("reply message failed: " + err.Error())
logger.Info("trying push message")
}
call := m.bot.PushMessage(channelID, messages...)
_, err := call.Do()
if err != nil {
logger := m.logger.WithField("target", channelID)
logger.Error("push message failed: " + err.Error())
}
}
}
func (m *Messenger) webhookHandler(response http.ResponseWriter, request *http.Request) {
events, err := m.bot.ParseRequest(request)
if err != nil {
m.logger.Errorf("invalid request: %s", err.Error())
if err == linebot.ErrInvalidSignature {
response.WriteHeader(400)
} else {
response.WriteHeader(500)
}
return
}
for _, event := range events {
if event.Type == linebot.EventTypeMessage {
message := telepathy.InboundMessage{FromChannel: telepathy.Channel{
MessengerID: m.ID(),
}}
profile, channelID := m.getSourceProfile(event.Source)
message.SourceProfile = profile
if message.SourceProfile == nil {
m.logger.Warn("ignored message with unknown source")
continue
}
message.FromChannel.ChannelID = channelID
message.IsDirectMessage = message.SourceProfile.ID == channelID
item, _ := m.replyTokenMap.LoadOrStore(channelID, &sync.Pool{})
pool, _ := item.(*sync.Pool)
pool.Put(event.ReplyToken)
switch lineMessage := event.Message.(type) {
case *linebot.TextMessage:
message.Text = lineMessage.Text
case *linebot.StickerMessage:
message.Text = "(Sticker)"
case *linebot.ImageMessage:
response, err := m.bot.GetMessageContent(lineMessage.ID).Do()
if err != nil {
m.logger.Warn("fail to get image content")
continue
}
content, err := ioutil.ReadAll(response.Content)
response.Content.Close()
if err != nil {
m.logger.Warn("fail to read image content")
continue
}
message.Image = imgur.NewImage(
imgur.ByteContent{
Type: response.ContentType,
Content: content,
})
default:
m.logger.Warnf("unsupported message type: %T", event.Message)
continue
}
m.inMsgChannel <- message
}
}
}
func (m *Messenger) getSourceProfile(source *linebot.EventSource) (*telepathy.MsgrUserProfile, string) {
if source.GroupID != "" {
profile, err := m.bot.GetGroupMemberProfile(source.GroupID, source.UserID).Do()
if err != nil {
logger := m.logger.WithFields(logrus.Fields{"GroupID": source.GroupID, "UserID": source.UserID})
logger.Error("GetGroupMemberProfile failed: " + err.Error())
return nil, ""
}
return &telepathy.MsgrUserProfile{
ID: profile.UserID,
DisplayName: profile.DisplayName,
}, source.GroupID
} else if source.UserID != "" {
profile, err := m.bot.GetProfile(source.UserID).Do()
if err != nil {
logger := m.logger.WithField("UserID", source.UserID)
logger.Error("GetProfile failed: " + err.Error())
return nil, ""
}
return &telepathy.MsgrUserProfile{
ID: profile.UserID,
DisplayName: profile.DisplayName,
}, source.UserID
} else if source.RoomID != "" {
profile, err := m.bot.GetRoomMemberProfile(source.RoomID, source.UserID).Do()
if err != nil {
logger := m.logger.WithFields(logrus.Fields{"RoomID": source.RoomID, "UserID": source.UserID})
logger.Error("GetRoomMemberProfile failed: " + err.Error())
return nil, ""
}
return &telepathy.MsgrUserProfile{
ID: profile.UserID,
DisplayName: profile.DisplayName,
}, source.RoomID
} else {
m.logger.Warn("unknown source " + source.Type)
return nil, ""
}
}
|
package main
func main() {
// 用字面量初始化数组、slice 和 map 时,最好是在每个元素后面加上逗号,即使 是声明在一行或者多行都不会出错。
x := []int{
1,
2
}
_ = x
} |
package model
type GetCart struct {
User User
Product []Product
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package logs
import (
"context"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/systemlogs"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: Smoke,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests that writing system logs succeeds",
Contacts: []string{
"cros-networking@chromium.org", // Team alias
"stevenjb@chromium.org", // Test author
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Pre: chrome.LoggedIn(),
})
}
func Smoke(ctx context.Context, s *testing.State) {
cr := s.PreValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
const expectedKey = "CHROME VERSION"
result, err := systemlogs.GetSystemLogs(ctx, tconn, expectedKey)
if err != nil {
s.Fatal("Error getting system logs: ", err)
}
if result == "" {
s.Fatal("System logs result empty")
}
}
|
package cmd
import (
"github.com/spf13/cobra"
"github.com/MichaelDarr/ahab/internal"
ahab "github.com/MichaelDarr/ahab/pkg"
)
var cmdCmd = &cobra.Command{
Use: "cmd",
Short: "Execute an attached command in the container",
Run: func(cmd *cobra.Command, args []string) {
helpRequested, err := internal.PrintDockerHelp(&args, "exec", `Execute an in-container command, attaching the input/output to your terminal
Docker Command:
docker exec -it CONTAINER COMMAND [OPTIONS]
Usage:
ahab cmd [-h/--help] COMMAND [OPTIONS]
`)
ahab.PrintErrFatal(err)
if helpRequested {
return
}
container, err := internal.GetContainer()
ahab.PrintErrFatal(err)
ahab.PrintErrFatal(container.Up())
containerOpts := []string{"exec", "-it"}
if container.Fields.User != "" {
containerOpts = append(containerOpts, "-u", container.Fields.User)
} else if !container.Fields.Permissions.Disable {
containerOpts = append(containerOpts, "-u", internal.ContainerUserName)
}
containerOpts = append(containerOpts, container.Name())
containerOpts = append(containerOpts, args...)
ahab.PrintErrFatal(internal.DockerCmd(&containerOpts))
},
Args: cobra.ArbitraryArgs,
DisableFlagParsing: true,
}
func init() {
rootCmd.AddCommand(cmdCmd)
}
|
package BlackJack
import "testing"
func TestPlayerScoreEquals21(t *testing.T) {
expectedResult := 21
if Score(10,11) == expectedResult {
if Winner() != 1 {
t.Errorf("Expected the result to be %d, but got %d", expectedResult, Score(10,11))
}
}
}
func TestPlayersTwoCardsGeneration(t *testing.T) {
var card1,card2 = Generate()
if card1 == 1 || card2 == 0 {
t.Errorf("Expected the result to be greater than 0 ")
}
}
|
package parser
import (
"fmt"
"testing"
"../ast"
"../lexer"
)
func TestAsStatements(t *testing.T) {
input := `
as x = 5;
as y = 10;
as z = 895678;
`
// Initialize a lexer, parser and a program.
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if program == nil {
t.Fatalf("Parse returned nil.")
}
if len(program.Statements) != 3 {
t.Fatalf("Program doesn't contain 3 statements, got: %d",
len(program.Statements))
}
tests := []struct {
expectedIdentifier string
}{
{"x"},
{"y"},
{"z"},
}
for i, tt := range tests {
statement := program.Statements[i]
if !testAsStatements(t, statement, tt.expectedIdentifier) {
return
}
}
}
func testAsStatements(t *testing.T, statement ast.Statement, expectedIdentifier string) bool {
if statement.TokenLiteral() != "as" {
t.Errorf("Statement's token literal is not 'as'. Got: %q",
statement.TokenLiteral())
return false
}
asStatement, ok := statement.(*ast.DeclareStatement)
if !ok {
t.Errorf("Statement not a DeclareStatement. Got: %q", statement)
return false
}
if asStatement.Name.Value != expectedIdentifier {
t.Errorf("Statement doesn't contain an expected identifier %q. Got: %q",
expectedIdentifier, asStatement.Name.Value)
return false
}
if asStatement.Name.TokenLiteral() != expectedIdentifier {
t.Errorf("Statement doesn't contain an expected token literal %q. Got: %q",
expectedIdentifier, asStatement.Name)
return false
}
return true
}
func checkParseErrors(t *testing.T, par *Parser) {
errors := par.Errors()
if len(errors) == 0 {
return
}
t.Errorf("Parser had %d errors.", len(errors))
for _, msg := range errors {
t.Errorf("Parse error: %q", msg)
}
t.FailNow()
}
func TestReturnStatements(t *testing.T) {
input := `
ret 5;
ret 10;
ret 959854;
`
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 3 {
t.Fatalf("Program doesn't contain 3 statements, got: %d",
len(program.Statements))
}
for _, statement := range program.Statements {
returnStatement, ok := statement.(*ast.ReturnStatement)
if !ok {
t.Errorf("Statement not a ReturnStatement. Got: %T", statement)
continue
}
if returnStatement.TokenLiteral() != "ret" {
t.Errorf("ReturnStatement's token literal not 'rt'. Got: %q",
returnStatement.TokenLiteral())
}
}
}
func TestIdentifierExpression(t *testing.T) {
input := "add;"
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Program should contain only one statement. Got: %d",
len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statement is not a ExpressionStatement. Got: %T",
program.Statements[0])
}
ident, ok := statement.Expression.(*ast.Identifier)
if !ok {
t.Fatalf("Statement does not contain an identifier. Got: %T",
statement.Expression)
}
if ident.Value != "add" {
t.Fatalf("Identifier does not contain a value 'add'. Got: %q",
ident.Value)
}
if ident.TokenLiteral() != "add" {
t.Fatalf("Identifier's TokenLiteral does not contain a value 'add'. Got: %q",
ident.TokenLiteral())
}
}
func TestIntegerIdentifierExpression(t *testing.T) {
input := "10;"
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Program should contain only one statement. Got: %d",
len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statement is not a ExpressionStatement. Got: %T",
program.Statements[0])
}
ident, ok := statement.Expression.(*ast.IntegerLiteral)
if !ok {
t.Fatalf("Statement does not contain an identifier. Got: %T",
statement.Expression)
}
if ident.Value != 10 {
t.Fatalf("Identifier does not contain a value '10'. Got: %q",
ident.Value)
}
if ident.TokenLiteral() != "10" {
t.Fatalf("Identifier's TokenLiteral does not contain a value '10'. Got: %q",
ident.TokenLiteral())
}
}
func TestParsingPrefixExpressions(t *testing.T) {
prefixTests := []struct {
input string
operator string
value interface{}
}{
{"!5;", "!", 5},
{"-15;", "-", 15},
{"!true", "!", true},
{"!false", "!", false},
}
for _, tt := range prefixTests {
lex := lexer.New(tt.input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Expected only one ExpressionStatement. Got: %d",
len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Expected statement to be an ExpressionStatement. Got: %T",
program.Statements[0])
}
expression, ok := statement.Expression.(*ast.PrefixExpression)
if !ok {
t.Fatalf("Expected expression to be a PrefixExpression. Got: %T",
statement.Expression)
}
if expression.Operator != tt.operator {
t.Fatalf("Operator doesn't match. Expected: %q. Got: %q",
tt.operator, expression.Operator)
}
if !testLiteralExpression(t, expression.Right, tt.value) {
return
}
}
}
func testIntegerLiteral(
t *testing.T,
il ast.Expression,
value int64) bool {
integer, ok := il.(*ast.IntegerLiteral)
if !ok {
t.Errorf("Expression not an IntegerLiteral. Got: %T",
il)
return false
}
if integer.Value != value {
t.Errorf("Values don't match. Expected: %d. Got: %d",
value, integer.Value)
return false
}
if integer.TokenLiteral() != fmt.Sprintf("%d", value) {
t.Errorf("TokenLiteral not %d. Got: %s",
value, integer.TokenLiteral())
return false
}
return true
}
func testIdentifier(
t *testing.T,
exp ast.Expression,
value string) bool {
identifier, ok := exp.(*ast.Identifier)
if !ok {
t.Errorf("Expression not an Identifier. Got: %T",
exp)
return false
}
if identifier.Value != value {
t.Errorf("Identifier's value %q doesn't match the expected %q.",
identifier.Value, value)
return false
}
if identifier.TokenLiteral() != value {
t.Errorf("Identifier's TokenLiteral %q doesn't match the expected %q.",
identifier.TokenLiteral(), value)
return false
}
return true
}
func testLiteralExpression(
t *testing.T,
exp ast.Expression,
expected interface{}) bool {
switch v := expected.(type) {
case int:
return testIntegerLiteral(t, exp, int64(v))
case int64:
return testIntegerLiteral(t, exp, v)
case string:
return testIdentifier(t, exp, v)
case bool:
return testBooleanLiteral(t, exp, v)
}
t.Errorf("Could not handle an expression %T", exp)
return false
}
func testBooleanLiteral(
t *testing.T,
exp ast.Expression,
value bool) bool {
bo, ok := exp.(*ast.Boolean)
if !ok {
t.Errorf("Expression not a Boolean. Got: %T",
exp)
return false
}
if bo.Value != value {
t.Errorf("Boolean value %t not an expected value %t",
bo.Value, value)
return false
}
if bo.TokenLiteral() != fmt.Sprintf("%t", value) {
t.Errorf("Boolean TokenLiteral %s not an expected %t.",
bo.TokenLiteral(), value)
return false
}
return true
}
func testInfixExpression(
t *testing.T,
exp ast.Expression,
left interface{},
operator string,
right interface{}) bool {
opExp, ok := exp.(*ast.InfixExpression)
if !ok {
t.Errorf("Expression not an InfixExpression. Got: %T", exp)
return false
}
if !testLiteralExpression(t, opExp.Left, left) {
return false
}
if opExp.Operator != operator {
t.Errorf("Operator %q doesn't match the expected %q.",
opExp.Operator, operator)
return false
}
if !testLiteralExpression(t, opExp.Right, right) {
return false
}
return true
}
func TestParsingInfixExpressions(t *testing.T) {
infixTests := []struct {
input string
leftValue interface{}
operator string
rightValue interface{}
}{
{"5 + 5", 5, "+", 5},
{"5 - 5", 5, "-", 5},
{"5 * 5", 5, "*", 5},
{"5 / 5", 5, "/", 5},
{"5 < 5", 5, "<", 5},
{"5 > 5", 5, ">", 5},
{"5 == 5", 5, "==", 5},
{"5 != 5", 5, "!=", 5},
{"true == true", true, "==", true},
{"false == false", false, "==", false},
{"true != false", true, "!=", false},
}
for _, tt := range infixTests {
lex := lexer.New(tt.input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Expected only %d expression statement. Got: %d",
1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Expected an ExpressionStatement. Got: %T",
program.Statements[0])
}
expression, ok := statement.Expression.(*ast.InfixExpression)
if !ok {
t.Fatalf("Expected an InfixExpression. Got: %T",
statement.Expression)
}
if !testLiteralExpression(t, expression.Left, tt.leftValue) {
return
}
if expression.Operator != tt.operator {
t.Fatalf("Expected a %q operator. Got: %q",
tt.operator, expression.Operator)
}
if !testLiteralExpression(t, expression.Right, tt.rightValue) {
return
}
}
}
func TestOperatorPrecedenceParsing(t *testing.T) {
tests := []struct {
input string
expected string
}{
{"-a * b", "((-a) * b)"},
{"!-a", "(!(-a))"},
{"a + b + c", "((a + b) + c)"},
{"a * b * c", "((a * b) * c)"},
{"a * b / c", "((a * b) / c)"},
{"a * b + c / d", "((a * b) + (c / d))"},
{"true", "true"},
{"false", "false"},
{"3 > 5 == false", "((3 > 5) == false)"},
{"3 < 5 == true", "((3 < 5) == true)"},
{"(5 + 5) * 3", "((5 + 5) * 3)"},
{"-(5 + 5)", "(-(5 + 5))"},
{"!(true == true)", "(!(true == true))"},
{"2 / (5 * 5)", "(2 / (5 * 5))"},
}
for _, tt := range tests {
lex := lexer.New(tt.input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if tt.expected != program.String() {
t.Fatalf("Expected operator precedence: %q. Got: %q",
tt.expected, program.String())
}
}
}
func TestIfExpression(t *testing.T) {
input := `
if (x < y) { x }
`
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Expected one program statement. Got: %d",
len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Expected an ExpressionStatement. Got: %T",
program.Statements[0])
}
expression, ok := statement.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("Expected an IfExpression. Got: %T",
statement.Expression)
}
if !testInfixExpression(t, expression.Condition, "x", "<", "y") {
return
}
if len(expression.Consequence.Statements) != 1 {
t.Errorf("Consequence is not a one statement. Got: %d",
len(expression.Consequence.Statements))
}
consequence, ok := expression.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Consequence not an ExpressionStatement. Got: %T",
expression.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if expression.Alternative != nil {
t.Errorf("Expression's alternative was not nil. Got: %T",
expression.Alternative)
}
}
func TestIfElseExpression(t *testing.T) {
input := `
if (x < y) { x } else { y }
`
lex := lexer.New(input)
par := New(lex)
program := par.Parse()
checkParseErrors(t, par)
if len(program.Statements) != 1 {
t.Fatalf("Expected one program statement. Got: %d",
len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Expected an ExpressionStatement. Got: %T",
program.Statements[0])
}
expression, ok := statement.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("Expected an IfExpression. Got: %T",
statement.Expression)
}
if !testInfixExpression(t, expression.Condition, "x", "<", "y") {
return
}
if len(expression.Consequence.Statements) != 1 {
t.Errorf("Consequence is not a one statement. Got: %d",
len(expression.Consequence.Statements))
}
consequence, ok := expression.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Consequence not an ExpressionStatement. Got: %T",
expression.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if len(expression.Alternative.Statements) != 1 {
t.Errorf("Alternative is not a one statement. Got: %d",
len(expression.Alternative.Statements))
}
alternative, ok := expression.Alternative.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Alternative not an ExpressionStatement. Got: %T",
expression.Alternative.Statements[0])
}
if !testIdentifier(t, alternative.Expression, "y") {
return
}
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
var nowFunc = time.Now //for testing
var db *sql.DB
func init() {
db, _ = sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/test?charset=utf8")
}
type ConnPool struct {
//dial is an application supplied function for creating and configuring a
//connection
Dial func() (interface{}, error)
//maximum number of idle connection in th pool
MaxIdle int
//maximum number of connections allocated by the pool at a given time
//when zero, there is no limit on the number of connections in th pool
MaxActive int
active int
idle chan interface{}
}
type idleConn struct {
c interface{}
t time.Time
}
//批量生成连接,并把连接放到连接池channel里面
func (this *ConnPool) InitPool() error {
this.idle = make(chan interface{}, this.MaxActive)
for x := 0; x < this.MaxActive; x++ {
conn, err := this.Dial()
//这里返回DB类,而不是返回mysql.Conn, 否则DB类的insert, update 这些 Active
// Record 类方法没法使用
db, err := this.Dial()
fmt.Println("----------- reflect -----------", reflect.TypeOf(db))
if err != nil {
return err
}
//this idle <-conn
this.idle <-idleConn{t: nowFunc(), c: db }
}
return nil
}
//从连接池里面取出连接
func (this *ConnPool)Get() interface{} (
//如果空闲连接为空,则初始化连接
if this.idle == nil {
this.InitPool()
}
//赋值一下好给下面回收和返回
ic := this.idle
//这里要用 (idleConn) 把interface{} 类型转化为 idleConn 类型的,否则拿不到里面
//的属性t, c
conn := ic.(idleConn).c
//使用完把连接回收到连接池里
defer this.Release(conn)
//因为channel是有锁的,所以就没必要借助sync.Mutex来进行读写锁定
//container/list就需要锁住,不然冰法就互抢出问题了
return conn
)
//回收连接到连接池
func (this *ConnPool) Release(conn interface{}) {
//this.idle <-conn
this.idle <-idleConn{t: nowFunc(), c: conn}
}
|
// +build !windows
package os
import (
"fmt"
"syscall"
"github.com/shirou/gopsutil/process"
log "github.com/sirupsen/logrus"
)
// KillTree sends signal to whole process tree, starting from given pid as root.
// Order of signalling in process tree is undefined.
func KillTree(signal syscall.Signal, pid int32) error {
proc, err := process.NewProcess(pid)
if err != nil {
return err
}
processes := getAllChildren(proc)
processes = append(processes, proc)
curPid := syscall.Getpid()
curPgid, err := syscall.Getpgid(curPid)
if err != nil {
return fmt.Errorf("error getting current process pgid: %s", err)
}
var pgids []int
pgidsSeen := map[int]bool{}
for _, proc := range processes {
pgid, err := syscall.Getpgid(int(proc.Pid))
if err != nil {
return fmt.Errorf("error getting child process pgid: %s", err)
}
if pgid == curPgid {
continue
}
if !pgidsSeen[pgid] {
pgids = append(pgids, pgid)
pgidsSeen[pgid] = true
}
}
return wrapWithStopAndCont(signal, pgids)
}
// getAllChildren gets whole descendants tree of given process. Order of returned
// processes is undefined.
func getAllChildren(proc *process.Process) []*process.Process {
children, _ := proc.Children() // #nosec
for _, child := range children {
children = append(children, getAllChildren(child)...)
}
return children
}
// wrapWithStopAndCont wraps original process tree signal sending with SIGSTOP and
// SIGCONT to prevent processes from forking during termination, so we will not
// have orphaned processes after.
func wrapWithStopAndCont(signal syscall.Signal, pgids []int) error {
signals := []syscall.Signal{syscall.SIGSTOP, signal, syscall.SIGCONT}
for _, currentSignal := range signals {
if err := sendSignalToProcessGroups(currentSignal, pgids); err != nil {
return err
}
}
return nil
}
func sendSignalToProcessGroups(signal syscall.Signal, pgids []int) error {
for _, pgid := range pgids {
log.Infof("Sending signal %s to pgid %d", signal, pgid)
err := syscall.Kill(-pgid, signal)
if err != nil {
log.Infof("Error sending signal to pgid %d: %s", pgid, err)
return err
}
}
return nil
}
|
//go:generate statik -src=./data
package holidays
import (
"encoding/json"
"os"
_ "github.com/bastengao/chinese-holidays-go/holidays/statik" // load data
"github.com/rakyll/statik/fs"
)
func loadData() ([]event, error) {
statikFS, err := fs.New()
if err != nil {
return nil, err
}
var events []event
err = fs.Walk(statikFS, "/", func(path string, info os.FileInfo, err error) error {
if path == "/" {
return err
}
b, err := fs.ReadFile(statikFS, path)
if err != nil {
return nil
}
e, err := parseEvents(b)
events = append(events, e...)
return err
})
if err != nil {
return nil, err
}
return events, nil
}
func parseEvents(b []byte) ([]event, error) {
var events []event
err := json.Unmarshal(b, &events)
if err != nil {
return nil, err
}
return events, nil
}
|
package aesencryptor_test
import (
"encoding/base64"
"testing"
"github.com/spotlight21c/aesencryptor"
)
func TestEncrypt(t *testing.T) {
tables := map[string]string{
"This-is-plain-text": "ACFbo72am8SBFGTHgiUlDygR1jYdLLrCWPeFJ2BlyRU=",
}
key := "1234567890abcdef"
for plain, result := range tables {
encValue, _ := aesencryptor.Encrypt(plain, key)
encodedString := base64.StdEncoding.EncodeToString(encValue)
if encodedString != result {
t.Errorf("%s expected %s, but %s", plain, result, encodedString)
}
}
}
|
package lib
import (
"fmt"
"github.com/jinzhu/gorm"
_ "gorm.io/driver/mysql"
"os"
)
var (
db *gorm.DB
err error
)
func InitDb() {
db, err = gorm.Open("mysql", "root:root@/realtime?charset=utf8&parseTime=True&loc=Local")
if err != nil {
fmt.Println("连接数据库失败,请检查参数:", err)
os.Exit(1)
}
}
func DBConn() *gorm.DB {
return db
} |
package main
import (
//"strconv"
"sort"
"strings"
"sync"
"fmt"
)
// SingleHash: crc32(data)+"~"+crc32(md5(data))
func SingleHash(in, out chan interface{}) {
mutex := &sync.Mutex{}
wgSH := &sync.WaitGroup{}
for inputRaw := range in { // получаем дату из канала in
data := fmt.Sprintf("%v", inputRaw)
wgSH.Add(1)
go func(data string, mutex *sync.Mutex, wgSH *sync.WaitGroup) {
defer wgSH.Done()
mutex.Lock()
md5Result := DataSignerMd5(data) // считаем...
mutex.Unlock()
crcChan1 := make(chan string)
go func(data string, outChan chan string) {
crc32FirstResult := DataSignerCrc32(data)
outChan <- crc32FirstResult
}(data, crcChan1)
crcChan2 := make(chan string)
go func(data string, outChan chan string) {
crc32SecondResult := DataSignerCrc32(data)
outChan <- crc32SecondResult
}(md5Result, crcChan2)
crc32FirstResult := <-crcChan1
crc32SecondResult := <-crcChan2
out <- crc32FirstResult+"~"+crc32SecondResult // пишем результат в канал out
}(data, mutex, wgSH)
}
wgSH.Wait()
}
// OneMultiHash: res_th = crc32(th+data)), где th=0..5, -> конкатенация всех res_th в порядке от 0 до 5
func OneMultiHash(data string, wgMH *sync.WaitGroup, out chan interface{}) {
defer wgMH.Done()
wgOMH := &sync.WaitGroup{}
result := [6]string{} // слайс для результатов
for th := 0; th < 6; th++ {
wgOMH.Add(1)
go func(data string, th int){
defer wgOMH.Done()
res := DataSignerCrc32(fmt.Sprintf("%d%s", th, data))
result[th] = res
}(data, th)
}
wgOMH.Wait()
out <- strings.Join(result[:], "")
}
// MultiHash: вызывает функцию, считающую MultiHash, для каждого полученного результата SingleHash
func MultiHash(in, out chan interface{}) {
wgMH := &sync.WaitGroup{}
for dataRaw := range in { // получаем дату из канала in
data := fmt.Sprintf("%v", dataRaw)
wgMH.Add(1)
go OneMultiHash(data, wgMH, out)
}
wgMH.Wait()
}
// CombineResults - функция, собирающая результаты
func CombineResults(in, out chan interface{}) {
results := make([]string, 0) // создаем слайс для результатов
for inputRaw := range in { // читаем результаты, пока они есть
results = append(results, fmt.Sprintf("%s", inputRaw))
}
sort.Strings(results) // сортируем результаты
out <- strings.Join(results, "_") // соединяем результаты в строку и пишем их в канал out
}
// JobWrapper - обертка для job для закрытия канала и работы с WaitGroup
func JobWrapper(job_ job, in, out chan interface{}, wg *sync.WaitGroup) {
defer wg.Done()
job_(in, out)
close(out)
}
// ExecutePipeline - функция, реализующая конвейер из поданных на вход функций
func ExecutePipeline(jobs ...job) {
wg := &sync.WaitGroup{}
var chanIn chan interface{} = make(chan interface{})
var chanOut chan interface{} = make(chan interface{})
for _, job_ := range jobs {
wg.Add(1)
go JobWrapper(job_, chanIn, chanOut, wg)
chanIn = chanOut
chanOut = make(chan interface{})
}
wg.Wait()
}
|
package cmd
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"path"
"strings"
)
type RootCmd struct {
Cmd *cobra.Command
//flags
CfgFile string
envPrefix string
}
func (c RootCmd) GetCmd() *cobra.Command {
return c.Cmd
}
func NewRootCmd() *RootCmd {
c := &RootCmd{}
c.Cmd = &cobra.Command{
Use: "mono-repo-tag",
Short: "semver compatible cli for monorepos",
Long: `Helps with tagging multiple projects under a monorepo.
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// You can bind cobra and viper in a few locations, but PersistencePreRunE on the root command works well
return c.initializeConfig(cmd)
},
//RunE: func(cmd *cobra.Command, args []string) error {
//},
}
c.Cmd.PersistentFlags().StringVar(&c.CfgFile, "config", "", "config file (default is .msc/config.yaml)")
c.addCmds()
return c
}
// Import and add autogenerated commands here
func getCommandList() []*cobra.Command {
var commands []*cobra.Command
return commands
}
func (c *RootCmd) addCmds() {
for _, command := range getCommandList() {
c.Cmd.AddCommand(command)
}
}
func (c *RootCmd) bindFlags(cmd *cobra.Command, v *viper.Viper) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
// Environment variables can't have dashes in them, so bind them to their equivalent
// keys with underscores, e.g. --favorite-color to STRING_FAVORITE_COLOR
if strings.Contains(f.Name, "-") {
envVarSuffix := strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_"))
err := v.BindEnv(f.Name, fmt.Sprintf("%s_%s", c.envPrefix, envVarSuffix))
if err != nil {
log.Fatal(err)
}
}
// Apply the viper config value to the flag when the flag is not set and viper has a value
if !f.Changed && v.IsSet(f.Name) {
val := v.Get(f.Name)
err := cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
if err != nil {
log.Fatal(err)
}
}
})
}
func (c *RootCmd) initializeConfig(cmd *cobra.Command) error {
c.envPrefix = "MSC"
v := viper.New()
if c.CfgFile != "" {
// Use config file from the flag.
v.SetConfigFile(c.CfgFile)
} else {
configDir := path.Join(".", ".msc")
v.AddConfigPath(configDir)
v.SetConfigName("config")
}
v.SetEnvPrefix(c.envPrefix)
v.AutomaticEnv()
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// Config file not found; ignore error if desired
log.WithFields(log.Fields{
"config path": v.ConfigFileUsed(),
}).Fatal("Did not find configuration file ")
} else {
// Config file was found but another error was produced
}
}
if err := v.ReadInConfig(); err != nil {
// config file does not exist
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
return err
}
}
// Bind the current command's flags to viper
c.bindFlags(cmd, v)
return nil
}
|
package router
import (
"github.com/KashEight/not/router/api"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
)
func Init(engine *gin.Engine, db *gorm.DB) {
api.Init(engine, db)
}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querynode
import (
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/flowgraph"
)
type gcNode struct {
baseNode
replica ReplicaInterface
}
func (gcNode *gcNode) Name() string {
return "gcNode"
}
func (gcNode *gcNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
//log.Debug("Do gcNode operation")
if len(in) != 1 {
log.Error("Invalid operate message input in gcNode", zap.Int("input length", len(in)))
// TODO: add error handling
}
_, ok := in[0].(*gcMsg)
if !ok {
log.Error("type assertion failed for gcMsg")
// TODO: add error handling
}
// Use `releasePartition` and `releaseCollection`,
// because if we drop collections or partitions here, query service doesn't know this behavior,
// which would lead the wrong result of `showCollections` or `showPartition`
//// drop collections
//for _, collectionID := range gcMsg.gcRecord.collections {
// err := gcNode.replica.removeCollection(collectionID)
// if err != nil {
// log.Println(err)
// }
//}
//
//// drop partitions
//for _, partition := range gcMsg.gcRecord.partitions {
// err := gcNode.replica.removePartition(partition.partitionID)
// if err != nil {
// log.Println(err)
// }
//}
return nil
}
func newGCNode(replica ReplicaInterface) *gcNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := baseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
return &gcNode{
baseNode: baseNode,
replica: replica,
}
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
)
// traceRoute is essentially getDepth. I originally intended to make changes, but didn't.
// It returns an integer representing body's depth.
func traceRoute(depth map[string]int, body string, relationships [][]string) int {
if _, ok := depth[body]; !ok {
for _, v := range relationships{
if(v[1] == body) {
depth[body] = traceRoute(depth, v[0], relationships) + 1
}
}
}
return depth[body]
}
func main() {
bytes, e := ioutil.ReadFile("./input.txt")
if(e != nil) { panic(e) }
input := strings.Split(string(bytes), "\r\n")
relationships := [][]string{}
for _, v := range input {
relationships = append(relationships, strings.Split(v, ")"))
}
you := map[string]int{}
san := map[string]int{}
you["COM"] = 0
san["COM"] = 0
// This time, we're only concerned with building out the depths of all nodes connected to YOU and SAN.
// Once we know those, we can:
// * find the highest common depth shared by both of you
// * remove that node's depth from both of your nodes, and
// * add the depth of the bodies you and Santa orbit
// And that should produce the same number of the minimum number of orbital transfers required.
for _, v := range relationships {
parent, child := v[0], v[1]
if child == "YOU" {
you["YOU"] = traceRoute(you, parent, relationships) // We are actually not going to add 1 depth to make calculations simpler later.
} else if child == "SAN" {
san["SAN"] = traceRoute(san, parent, relationships)
}
}
// Find nodes which you both orbit indirectly.
shared := map[string]int{}
for k, v := range you {
if _, ok := san[k]; ok {
shared[k] = v
}
}
// Find the highest-valued shared orbit.
max := 0
for _, v := range shared {
if(v > max) {
max = v
}
}
// Exploit
total := (you["YOU"] - max) + (san["SAN"] - max)
fmt.Println(total)
} |
package parsers
import (
"bufio"
. "github.com/ruxton/tracklist_parsers/data"
"github.com/ruxton/term"
"io"
"os"
"strings"
)
func ParseVirtualDJTracklist(bufReader *bufio.Reader) []Track {
var list []Track
for line, _, err := bufReader.ReadLine(); err != io.EOF; line, _, err = bufReader.ReadLine() {
data := strings.SplitN(string(line), " : ", 2)
trackdata := strings.SplitN(data[1], " - ", 2)
if len(trackdata) != 2 {
term.OutputError("Error parsing track " + string(data[1]))
term.OutputMessage("Please enter an artist for this track: ")
artist, err := term.STD_IN.ReadString('\n')
if err != nil {
term.OutputError("Incorrect artist entry.")
os.Exit(2)
}
term.OutputMessage("Please enter a name for this track: ")
track, err := term.STD_IN.ReadString('\n')
if err != nil {
term.OutputError("Incorrect track name entry.")
os.Exit(2)
}
trackdata = []string{artist, track}
}
thistrack := new(Track)
thistrack.Artist = trackdata[0]
thistrack.Song = trackdata[1]
list = append(list, *thistrack)
}
return list
}
|
package main
import "fmt"
func playGame(name string) {
name = "ysatnaf"
}
func changeName(name *string) {
*name = "nanoo"
}
func main() {
name := "ysatnaf"
fmt.Println("昵称:", name)
playGame(name)
fmt.Println("游戏昵称:", name)
changeName(&name)
fmt.Println("改名后:", name)
fmt.Println("你家住哪:", &name)
}
|
package main
import "fmt"
func main() {
foo()
func() {
fmt.Println("No args Anonymous says 'Wonderful Wednesday'")
}() // END func no args
func(x int) {
fmt.Println("I have an arg, the meaning of life is ", x)
}(51) // END func with arg
} // END main
func foo() {
fmt.Println("Inside of foo")
}
//Hands-on exercise #6
//Build and use an anonymous func
//code: https://play.golang.org/p/DQX3xEIcRe
//video: 107
//SOLUTION:
//package main
//import (
//"fmt"
//)
//func main() {
// func() {
// for i := 0; i < 100; i++ {
// fmt.Println(i)
// }
// }()
// fmt.Println("done")
//} |
package main
import fmt "fmt"
func main() {
array := make([]int, 40)
test := create(array)
pop := 0
fmt.Println("Inserting 6")
test.insert(6)
test.print()
fmt.Println("Inserting 8")
test.insert(8)
test.print()
fmt.Println("Inserting 20")
test.insert(20)
test.print()
fmt.Println("Inserting 11")
test.insert(11)
test.print()
fmt.Println("Inserting 2")
test.insert(2)
test.print()
fmt.Println("Inserting 32")
test.insert(32)
test.print()
fmt.Println("Inserting 8")
test.insert(8)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
fmt.Println("Inserting 8")
test.insert(8)
test.print()
fmt.Println("Inserting 88")
test.insert(88)
test.print()
fmt.Println("Inserting 15")
test.insert(15)
test.print()
fmt.Println("Inserting 56")
test.insert(56)
test.print()
fmt.Println("Inserting 99")
test.insert(99)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
pop = test.pop()
fmt.Println("Popped number: ", pop)
test.print()
}
type heap struct {
array []int
//temp int
max int
amount int
}
func create(s []int) *heap {
h := new(heap)
h.array = s
h.max = len(s)
h.amount = 0
return h
}
func (h *heap) insert(e int) {
if h.max > h.amount {
h.array[h.amount] = e
h.amount++
current := h.amount - 1
for {
if current == 0 {
break
}
parent := (current - 1) / 2
if h.array[current] > h.array[parent] {
h.array[parent], h.array[current] = h.array[current], h.array[parent]
current = parent
} else {
break
}
}
}
}
func (h *heap) pop() int {
popped := h.array[0]
h.array[0] = h.array[h.amount-1]
h.array[h.amount-1] = 0
h.amount--
left := 0
right := 0
x := 0
for {
parent := h.array[x]
left = h.array[x*2+1]
right = h.array[x*2+2]
if parent >= left && parent >= right {
break
} else if parent < left || parent < right {
if left >= right {
h.array[x], h.array[x*2+1] = left, parent
x = x*2+1
} else {
h.array[x], h.array[x*2+2] = right, parent
x = x*2+2
}
}
}
return popped
}
// func (h *heap) String() string
// Got your comment, on printing in Go.
// I've never programmed in C,
// so fmt.Printf("%v", h) looks
// a bit weird to me =D
// Decided to keep the current function
// this time.
func (h *heap) print() {
fmt.Println(h.array[0])
level := 2
counter := 0
for i := 1; i <= h.amount-1; i++ {
fmt.Print(h.array[i], " ")
counter++
if level == counter {
fmt.Println()
level = level * 2
counter = 0
}
}
fmt.Println()
fmt.Println()
}
|
package utils
import (
"../../config"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
)
type PackageJsonRequired struct {
Main string `json:"main"`
Scripts struct {
Start string `json:"start"`
Build string `json:"build"`
} `json:"scripts"`
}
func VerifyPackageJson(fpath string) error {
if _, err := os.Stat(fpath); err != nil {
return errors.New("package.json not found in instance root")
}
data, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
req := PackageJsonRequired{}
err = json.Unmarshal(data, &req)
if err != nil {
return errors.New("unable to parse package.json")
}
if req.Main == "" {
return errors.New("'main' not found in package.json")
}
return nil
}
func VerifyPackageJsonFieldList(fpath string, fields []string) error {
if _, err := os.Stat(fpath); err != nil {
return errors.New("package.json not found in instance root")
}
data, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
req := PackageJsonRequired{}
err = json.Unmarshal(data, &req)
if err != nil {
return errors.New("unable to parse package.json")
}
for _, field := range fields {
switch field {
case "main":
if req.Main == "" {
return errors.New("invalid field 'main' in package.json")
}
case "build":
if req.Scripts.Build == "" {
return errors.New("invalid field 'build' in package.json")
}
case "start":
if req.Scripts.Start == "" {
return errors.New("invalid field 'start' in package.json")
}
default:
return errors.New(fmt.Sprintf("missing field '%s' in package.json", field))
}
}
return nil
}
func RunNpmScript(script []string, root string, env []string) error {
npm := exec.Command("npm", script...)
npm.Dir = root
npm.Env = append(npm.Env, env...)
var rtype string
if len(script) > 1 {
rtype = script[1]
} else {
rtype = script[0]
}
var errBuf bytes.Buffer
wr, _ := SetUpLog(config.Config.Deployer.LogRoot, path.Base(root), rtype+"_out", os.Stdout)
wre, _ := SetUpLog(config.Config.Deployer.LogRoot, path.Base(root), rtype+"_out", os.Stderr)
mw := io.MultiWriter(wre, &errBuf)
npm.Stdout = wr
npm.Stderr = mw
if err := npm.Run(); err != nil {
errStr := string(errBuf.Bytes())
_, _ = fmt.Fprintln(os.Stderr, errStr)
return errors.New(errStr)
}
return nil
}
func StartNpmScript(script []string, root string, env []string) (*os.Process, error) {
npm := exec.Command("npm", script...)
npm.Dir = root
npm.Env = append(npm.Env, env...)
var errBuf bytes.Buffer
var rtype string
if len(script) > 1 {
rtype = script[1]
} else {
rtype = script[0]
}
wr, _ := SetUpLog(config.Config.Deployer.LogRoot, path.Base(root), rtype+"_out", os.Stdout)
wre, _ := SetUpLog(config.Config.Deployer.LogRoot, path.Base(root), rtype+"_err", os.Stderr)
npm.Stdout = wr
mw := io.MultiWriter(wre, &errBuf)
npm.Stderr = mw
if err := npm.Start(); err != nil {
errStr := string(errBuf.Bytes())
_, _ = fmt.Fprintln(os.Stderr, errStr)
return nil, err
}
return npm.Process, nil
}
func GetPackageJsonMain(fpath string) string {
if _, err := os.Stat(fpath); err != nil {
return ""
}
data, err := ioutil.ReadFile(fpath)
if err != nil {
return ""
}
req := PackageJsonRequired{}
err = json.Unmarshal(data, &req)
if err != nil {
return ""
}
return req.Main
}
|
package controllers_test
import (
"os"
"tax-calculator/boot"
"testing"
)
func TestMain(m *testing.M) {
os.Chdir("../")
boot.Bootstrap()
os.Exit(m.Run())
}
|
package parser
import (
"testing"
"github.com/tombuildsstuff/teamcity-go-test-json/models"
)
func TestNoTests(t *testing.T) {
lines := []string{
`Hello`,
`World`,
}
results := testParser(lines)
if len(results) != 0 {
t.Fatalf("Expected no results for junk data but got %d", len(results))
}
}
func TestSingleSuccessful(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T05:05:45.438058+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T05:05:45.438424+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== RUN TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T05:05:45.459045+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== PAUSE TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T05:05:45.459075+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T05:05:45.459099+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T05:05:45.459105+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== CONT TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T05:07:03.818099+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"--- PASS: TestAccAzureRMResourceGroup_basic (78.38s)\n"}`,
`{"Time":"2020-01-06T05:07:03.818215+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Elapsed":78.38}`,
`{"Time":"2020-01-06T05:07:03.818313+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"PASS\n"}`,
`{"Time":"2020-01-06T05:07:03.828007+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"ok \tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t78.429s\n"}`,
`{"Time":"2020-01-06T05:07:03.860807+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Elapsed":78.462}`,
}
results := testParser(lines)
if len(results) != 1 {
t.Fatalf("Expected a single result but got %d", len(results))
}
result := results[0]
if result.Result != models.Successful {
t.Fatalf("Expected the result to be Successful but got %q", result.Result)
}
if result.Duration != 78.38 {
t.Fatalf("Expected the duration to be 78.38 but got %.2f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_basic
=== PAUSE TestAccAzureRMResourceGroup_basic
=== CONT TestAccAzureRMResourceGroup_basic
--- PASS: TestAccAzureRMResourceGroup_basic (78.38s)
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
}
func TestSingleIgnored(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T12:48:32.302781+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport"}`,
`{"Time":"2020-01-06T12:48:32.30319+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":"=== RUN TestAccAzureRMResourceGroup_requiresImport\n"}`,
`{"Time":"2020-01-06T12:48:32.30322+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":"--- SKIP: TestAccAzureRMResourceGroup_requiresImport (0.00s)\n"}`,
`{"Time":"2020-01-06T12:48:32.303229+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":" resource_arm_resource_group_test.go:36: Skipping since resources aren't required to be imported\n"}`,
`{"Time":"2020-01-06T12:48:32.303239+01:00","Action":"skip","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Elapsed":0}`,
`{"Time":"2020-01-06T12:48:32.303264+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"PASS\n"}`,
`{"Time":"2020-01-06T12:48:32.304518+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"ok \tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t0.029s\n"}`,
`{"Time":"2020-01-06T12:48:32.308532+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Elapsed":0.033}`,
}
results := testParser(lines)
if len(results) != 1 {
t.Fatalf("Expected a single result but got %d", len(results))
}
result := results[0]
if result.Result != models.Ignored {
t.Fatalf("Expected the result to be Ignored but got %q", result.Result)
}
if result.Duration != 0.0 {
t.Fatalf("Expected the duration to be 0.0 but got %.2f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_requiresImport
--- SKIP: TestAccAzureRMResourceGroup_requiresImport (0.00s)
resource_arm_resource_group_test.go:36: Skipping since resources aren't required to be imported
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
}
func TestSingleFailed(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T12:44:04.182228+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:44:04.182571+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== RUN TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:44:04.206855+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== PAUSE TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:44:04.206929+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:44:04.206961+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:44:04.206969+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== CONT TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:44:04.333766+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"--- FAIL: TestAccAzureRMResourceGroup_basic (0.15s)\n"}`,
`{"Time":"2020-01-06T12:44:04.333798+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" testing.go:569: Step 0 error: config is invalid: Invalid resource type: The provider provider.azurerm does not support resource type \"azurerm_resource_group\".\n"}`,
`{"Time":"2020-01-06T12:44:04.333822+01:00","Action":"fail","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Elapsed":0.15}`,
`{"Time":"2020-01-06T12:44:04.333843+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"FAIL\n"}`,
`{"Time":"2020-01-06T12:44:04.336708+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"FAIL\tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t0.180s\n"}`,
`{"Time":"2020-01-06T12:44:04.33689+01:00","Action":"fail","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Elapsed":0.18}`,
}
results := testParser(lines)
if len(results) != 1 {
t.Fatalf("Expected a single result but got %d", len(results))
}
result := results[0]
if result.Result != models.Failed {
t.Fatalf("Expected the result to be Failed but got %q", result.Result)
}
if result.Duration != 0.15 {
t.Fatalf("Expected the duration to be 0.15 but got %.2f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_basic
=== PAUSE TestAccAzureRMResourceGroup_basic
=== CONT TestAccAzureRMResourceGroup_basic
--- FAIL: TestAccAzureRMResourceGroup_basic (0.15s)
testing.go:569: Step 0 error: config is invalid: Invalid resource type: The provider provider.azurerm does not support resource type "azurerm_resource_group".
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
}
func TestSingleFailedMultipleLines(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T12:51:47.588077+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:51:47.588388+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== RUN TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:51:47.608038+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== PAUSE TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:51:47.608092+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:51:47.608107+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:51:47.608113+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== CONT TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:51:47.611735+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"--- FAIL: TestAccAzureRMResourceGroup_basic (0.02s)\n"}`,
`{"Time":"2020-01-06T12:51:47.611781+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" testing.go:569: Step 0 error: Error initializing context: 2 problems:\n"}`,
`{"Time":"2020-01-06T12:51:47.611796+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" \n"}`,
`{"Time":"2020-01-06T12:51:47.611827+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" - Could not satisfy plugin requirements: \n"}`,
`{"Time":"2020-01-06T12:51:47.611848+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" Plugin reinitialization required. Please run \"terraform init\".\n"}`,
`{"Time":"2020-01-06T12:51:47.611856+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" \n"}`,
`{"Time":"2020-01-06T12:51:47.611953+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" Plugins are external binaries that Terraform uses to access and manipulate\n"}`,
`{"Time":"2020-01-06T12:51:47.611968+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" resources. The configuration provided requires plugins which can't be located,\n"}`,
`{"Time":"2020-01-06T12:51:47.611975+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" don't satisfy the version constraints, or are otherwise incompatible.\n"}`,
`{"Time":"2020-01-06T12:51:47.612092+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" \n"}`,
`{"Time":"2020-01-06T12:51:47.612108+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" Terraform automatically discovers provider requirements from your\n"}`,
`{"Time":"2020-01-06T12:51:47.612136+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" configuration, including providers used in child modules. To see the\n"}`,
`{"Time":"2020-01-06T12:51:47.612146+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" requirements and constraints from each module, run \"terraform providers\".\n"}`,
`{"Time":"2020-01-06T12:51:47.612153+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" \n"}`,
`{"Time":"2020-01-06T12:51:47.61216+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":" - provider \"azurerm\" is not available\n"}`,
`{"Time":"2020-01-06T12:51:47.612173+01:00","Action":"fail","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Elapsed":0.02}`,
`{"Time":"2020-01-06T12:51:47.612194+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"FAIL\n"}`,
`{"Time":"2020-01-06T12:51:47.613587+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"FAIL\tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t0.053s\n"}`,
`{"Time":"2020-01-06T12:51:47.613711+01:00","Action":"fail","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Elapsed":0.053}`,
}
results := testParser(lines)
if len(results) != 1 {
t.Fatalf("Expected a single result but got %d", len(results))
}
result := results[0]
if result.Result != models.Failed {
t.Fatalf("Expected the result to be Failed but got %q", result.Result)
}
if result.Duration != 0.02 {
t.Fatalf("Expected the duration to be 0.02 but got %.2f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_basic
=== PAUSE TestAccAzureRMResourceGroup_basic
=== CONT TestAccAzureRMResourceGroup_basic
--- FAIL: TestAccAzureRMResourceGroup_basic (0.02s)
testing.go:569: Step 0 error: Error initializing context: 2 problems:
- Could not satisfy plugin requirements:
Plugin reinitialization required. Please run "terraform init".
Plugins are external binaries that Terraform uses to access and manipulate
resources. The configuration provided requires plugins which can't be located,
don't satisfy the version constraints, or are otherwise incompatible.
Terraform automatically discovers provider requirements from your
configuration, including providers used in child modules. To see the
requirements and constraints from each module, run "terraform providers".
- provider "azurerm" is not available
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
}
func TestSinglePanic(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T12:57:33.387711+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:57:33.388053+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== RUN TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:57:33.40788+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== PAUSE TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:57:33.40794+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:57:33.407959+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T12:57:33.407964+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== CONT TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T12:57:33.407978+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"--- FAIL: TestAccAzureRMResourceGroup_basic (0.02s)\n"}`,
`{"Time":"2020-01-06T12:57:33.410433+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"panic: Hello [recovered]\n"}`,
`{"Time":"2020-01-06T12:57:33.41049+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\tpanic: Hello\n"}`,
`{"Time":"2020-01-06T12:57:33.4105+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\n"}`,
`{"Time":"2020-01-06T12:57:33.410521+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"goroutine 23 [running]:\n"}`,
`{"Time":"2020-01-06T12:57:33.410535+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"testing.tRunner.func1(0xc000130300)\n"}`,
`{"Time":"2020-01-06T12:57:33.410564+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:874 +0x3a3\n"}`,
`{"Time":"2020-01-06T12:57:33.410578+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"panic(0x459bc80, 0x52ea040)\n"}`,
`{"Time":"2020-01-06T12:57:33.410589+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/usr/local/Cellar/go/1.13.5/libexec/src/runtime/panic.go:679 +0x1b2\n"}`,
`{"Time":"2020-01-06T12:57:33.410682+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests.TestAccAzureRMResourceGroup_basic.func1()\n"}`,
`{"Time":"2020-01-06T12:57:33.410695+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/Users/tharvey/code/src/github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests/resource_arm_resource_group_test.go:19 +0x39\n"}`,
`{"Time":"2020-01-06T12:57:33.410715+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"github.com/hashicorp/terraform-plugin-sdk/helper/resource.Test(0x53d4a80, 0xc000130300, 0x0, 0x4dc16f0, 0xc0008977a0, 0x0, 0x0, 0x4dc17a8, 0xc0006eb728, 0x2, ...)\n"}`,
`{"Time":"2020-01-06T12:57:33.410724+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/Users/tharvey/code/pkg/mod/github.com/hashicorp/terraform-plugin-sdk@v1.1.1/helper/resource/testing.go:482 +0x179b\n"}`,
`{"Time":"2020-01-06T12:57:33.410742+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"github.com/hashicorp/terraform-plugin-sdk/helper/resource.ParallelTest(0x53d4a80, 0xc000130300, 0x0, 0x4dc16f0, 0xc0008977a0, 0x0, 0x0, 0x4dc17a8, 0xc0006eb728, 0x2, ...)\n"}`,
`{"Time":"2020-01-06T12:57:33.430902+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/Users/tharvey/code/pkg/mod/github.com/hashicorp/terraform-plugin-sdk@v1.1.1/helper/resource/testing.go:444 +0x83\n"}`,
`{"Time":"2020-01-06T12:57:33.430934+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests.TestAccAzureRMResourceGroup_basic(0xc000130300)\n"}`,
`{"Time":"2020-01-06T12:57:33.430943+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/Users/tharvey/code/src/github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests/resource_arm_resource_group_test.go:18 +0x457\n"}`,
`{"Time":"2020-01-06T12:57:33.430981+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"testing.tRunner(0xc000130300, 0x4dc16f8)\n"}`,
`{"Time":"2020-01-06T12:57:33.430997+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:909 +0xc9\n"}`,
`{"Time":"2020-01-06T12:57:33.431007+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"created by testing.(*T).Run\n"}`,
`{"Time":"2020-01-06T12:57:33.431032+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"\t/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:960 +0x350\n"}`,
`{"Time":"2020-01-06T12:57:33.431123+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"FAIL\tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t0.071s\n"}`,
`{"Time":"2020-01-06T12:57:33.43165+01:00","Action":"fail","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Elapsed":0.071}`,
}
results := testParser(lines)
if len(results) != 1 {
t.Fatalf("Expected a single result but got %d", len(results))
}
result := results[0]
if result.Result != models.Failed {
t.Fatalf("Expected the result to be Failed but got %q", result.Result)
}
if result.Duration != 0.071 {
t.Fatalf("Expected the duration to be 0.071 but got %.3f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_basic
=== PAUSE TestAccAzureRMResourceGroup_basic
=== CONT TestAccAzureRMResourceGroup_basic
--- FAIL: TestAccAzureRMResourceGroup_basic (0.02s)
panic: Hello [recovered]
panic: Hello
goroutine 23 [running]:
testing.tRunner.func1(0xc000130300)
/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:874 +0x3a3
panic(0x459bc80, 0x52ea040)
/usr/local/Cellar/go/1.13.5/libexec/src/runtime/panic.go:679 +0x1b2
github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests.TestAccAzureRMResourceGroup_basic.func1()
/Users/tharvey/code/src/github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests/resource_arm_resource_group_test.go:19 +0x39
github.com/hashicorp/terraform-plugin-sdk/helper/resource.Test(0x53d4a80, 0xc000130300, 0x0, 0x4dc16f0, 0xc0008977a0, 0x0, 0x0, 0x4dc17a8, 0xc0006eb728, 0x2, ...)
/Users/tharvey/code/pkg/mod/github.com/hashicorp/terraform-plugin-sdk@v1.1.1/helper/resource/testing.go:482 +0x179b
github.com/hashicorp/terraform-plugin-sdk/helper/resource.ParallelTest(0x53d4a80, 0xc000130300, 0x0, 0x4dc16f0, 0xc0008977a0, 0x0, 0x0, 0x4dc17a8, 0xc0006eb728, 0x2, ...)
/Users/tharvey/code/pkg/mod/github.com/hashicorp/terraform-plugin-sdk@v1.1.1/helper/resource/testing.go:444 +0x83
github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests.TestAccAzureRMResourceGroup_basic(0xc000130300)
/Users/tharvey/code/src/github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests/resource_arm_resource_group_test.go:18 +0x457
testing.tRunner(0xc000130300, 0x4dc16f8)
/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:909 +0xc9
created by testing.(*T).Run
/usr/local/Cellar/go/1.13.5/libexec/src/testing/testing.go:960 +0x350
FAIL github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests 0.071s
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
}
func TestMultipleTestsSamePackage(t *testing.T) {
lines := []string{
`{"Time":"2020-01-06T04:45:17.228802+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T04:45:17.231582+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== RUN TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T04:45:17.25869+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== PAUSE TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T04:45:17.258756+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T04:45:17.25878+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport"}`,
`{"Time":"2020-01-06T04:45:17.258799+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":"=== RUN TestAccAzureRMResourceGroup_requiresImport\n"}`,
`{"Time":"2020-01-06T04:45:17.259549+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":"--- SKIP: TestAccAzureRMResourceGroup_requiresImport (0.00s)\n"}`,
`{"Time":"2020-01-06T04:45:17.259594+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Output":" resource_arm_resource_group_test.go:36: Skipping since resources aren't required to be imported\n"}`,
`{"Time":"2020-01-06T04:45:17.259629+01:00","Action":"skip","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_requiresImport","Elapsed":0}`,
`{"Time":"2020-01-06T04:45:17.259665+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears"}`,
`{"Time":"2020-01-06T04:45:17.259683+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears","Output":"=== RUN TestAccAzureRMResourceGroup_disappears\n"}`,
`{"Time":"2020-01-06T04:45:17.277204+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears","Output":"=== PAUSE TestAccAzureRMResourceGroup_disappears\n"}`,
`{"Time":"2020-01-06T04:45:17.277263+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears"}`,
`{"Time":"2020-01-06T04:45:17.27729+01:00","Action":"run","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags"}`,
`{"Time":"2020-01-06T04:45:17.277306+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags","Output":"=== RUN TestAccAzureRMResourceGroup_withTags\n"}`,
`{"Time":"2020-01-06T04:45:17.300809+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags","Output":"=== PAUSE TestAccAzureRMResourceGroup_withTags\n"}`,
`{"Time":"2020-01-06T04:45:17.301086+01:00","Action":"pause","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags"}`,
`{"Time":"2020-01-06T04:45:17.301151+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic"}`,
`{"Time":"2020-01-06T04:45:17.301196+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"=== CONT TestAccAzureRMResourceGroup_basic\n"}`,
`{"Time":"2020-01-06T04:45:17.302446+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags"}`,
`{"Time":"2020-01-06T04:45:17.302486+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags","Output":"=== CONT TestAccAzureRMResourceGroup_withTags\n"}`,
`{"Time":"2020-01-06T04:45:17.302503+01:00","Action":"cont","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears"}`,
`{"Time":"2020-01-06T04:45:17.302519+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears","Output":"=== CONT TestAccAzureRMResourceGroup_disappears\n"}`,
`{"Time":"2020-01-06T04:46:27.433877+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears","Output":"--- PASS: TestAccAzureRMResourceGroup_disappears (70.15s)\n"}`,
`{"Time":"2020-01-06T04:46:34.422914+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_disappears","Elapsed":70.15}`,
`{"Time":"2020-01-06T04:46:34.422995+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Output":"--- PASS: TestAccAzureRMResourceGroup_basic (77.15s)\n"}`,
`{"Time":"2020-01-06T04:47:04.104737+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_basic","Elapsed":77.15}`,
`{"Time":"2020-01-06T04:47:04.104807+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags","Output":"--- PASS: TestAccAzureRMResourceGroup_withTags (106.83s)\n"}`,
`{"Time":"2020-01-06T04:47:04.104869+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Test":"TestAccAzureRMResourceGroup_withTags","Elapsed":106.83}`,
`{"Time":"2020-01-06T04:47:04.104901+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"PASS\n"}`,
`{"Time":"2020-01-06T04:47:04.11591+01:00","Action":"output","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Output":"ok \tgithub.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests\t106.926s\n"}`,
`{"Time":"2020-01-06T04:47:04.189611+01:00","Action":"pass","Package":"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/tests","Elapsed":107}`,
}
results := testParser(lines)
if len(results) != 4 {
t.Fatalf("Expected 4 results but got %d", len(results))
}
for _, result := range results {
switch result.TestName {
case "TestAccAzureRMResourceGroup_basic":
if result.Result != models.Successful {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_basic` to be Successful but got %q", result.Result)
}
if result.Duration != 77.15 {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_basic` to take 77.15 but took %2.f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_basic
=== PAUSE TestAccAzureRMResourceGroup_basic
=== CONT TestAccAzureRMResourceGroup_basic
--- PASS: TestAccAzureRMResourceGroup_basic (77.15s)
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
continue
case "TestAccAzureRMResourceGroup_disappears":
if result.Result != models.Successful {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_disappears` to be Successful but got %q", result.Result)
}
if result.Duration != 70.15 {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_disappears` to take 70.15 but took %2.f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_disappears
=== PAUSE TestAccAzureRMResourceGroup_disappears
=== CONT TestAccAzureRMResourceGroup_disappears
--- PASS: TestAccAzureRMResourceGroup_disappears (70.15s)
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
continue
case "TestAccAzureRMResourceGroup_requiresImport":
if result.Result != models.Ignored {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_requiresImport` to be Ignored but got %q", result.Result)
}
if result.Duration != 0.0 {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_requiresImport` to take 0.0 but took %2.f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_requiresImport
--- SKIP: TestAccAzureRMResourceGroup_requiresImport (0.00s)
resource_arm_resource_group_test.go:36: Skipping since resources aren't required to be imported
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
continue
case "TestAccAzureRMResourceGroup_withTags":
if result.Result != models.Successful {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_withTags` to be Successful but got %q", result.Result)
}
if result.Duration != 106.83 {
t.Fatalf("Expected `TestAccAzureRMResourceGroup_withTags` to take 106.83 but took %2.f", result.Duration)
}
expectedStdOut := `=== RUN TestAccAzureRMResourceGroup_withTags
=== PAUSE TestAccAzureRMResourceGroup_withTags
=== CONT TestAccAzureRMResourceGroup_withTags
--- PASS: TestAccAzureRMResourceGroup_withTags (106.83s)
`
if result.StdOut != expectedStdOut {
t.Fatalf("Expected the stdout to be %q but got %q", expectedStdOut, result.StdOut)
}
if len(result.StdErr) != 0 {
t.Fatalf("Expected the stderr to be empty but got %q", result.StdErr)
}
continue
default:
t.Fatalf("Unknown Test %q", result.TestName)
}
}
}
func TestMultipleTestsDifferentPackages(t *testing.T) {
lines := multipleTestsDifferentPackageConfig
results := testParser(lines)
if len(results) != 11 {
t.Fatalf("Expected 11 tests but got %d", len(results))
}
uniquePackages := make(map[string]struct{}, 0)
for _, r := range results {
uniquePackages[r.Package] = struct{}{}
}
if len(uniquePackages) != 2 {
t.Fatalf("Expected 2 packages but got %d", len(uniquePackages))
}
}
func testParser(lines []string) []models.TestResult {
results := make([]models.TestResult, 0)
parser := NewResultsParser(func(result models.TestResult) {
results = append(results, result)
})
for _, v := range lines {
parser.ParseLine(v)
}
return results
}
|
package main
/*
func (___Vun *_TudpNodeSt) _FudpNode__540211x__gap() {
if 0 == ___Vun.unLoopGap {
return
}
___Vun.unCBrece = _FudpNode__540211z__receiveCallBack_withTimeGap
if nil == ___Vun.unCBgap {
go _FudpNode__540211y__gap_default(___Vun)
} else {
go ___Vun.unCBgap(___Vun)
}
}
func _FudpNode__540211y__gap_default(___Vun *_TudpNodeSt) {
for {
_Fsleep(___Vun.unLoopGap)
___Vun._FudpNode__540211yy__gap_default()
}
}
func (___Vun *_TudpNodeSt) _FudpNode__540211yy__gap_default() {
//_FpfNdb(" 848231 01 ")
defer ___Vun.unRmap.unrMux.Unlock()
___Vun.unRmap.unrMux.Lock()
//_FpfN(" 848231 03 las2 %v", ___Vun.unRmap.unrMapLas2)
//_FpfN(" 848231 04 last %v", ___Vun.unRmap.unrMapLast)
//_FpfN(" 848231 05 now %v", ___Vun.unRmap.unrMapNow)
___Vun.unRmap.unrMapLas2 = ___Vun.unRmap.unrMapLast
___Vun.unRmap.unrMapLast = ___Vun.unRmap.unrMapNow
___Vun.unRmap.unrMapNow = make(map[string]_TuNodeDataRcnt)
//_FpfN(" 848231 06 las2 %v", ___Vun.unRmap.unrMapLas2)
//_FpfN(" 848231 07 last %v", ___Vun.unRmap.unrMapLast)
//_FpfN(" 848231 08 now %v", ___Vun.unRmap.unrMapNow)
}
// replace the _FudpNode__500101yy3__receiveCallBack_default__randDecodeOut_noKeyWillDirect
func _FudpNode__540211z__receiveCallBack_withTimeGap(___Vun *_TudpNodeSt) {
//_FpfNhex(&___Vun.unRbuf1500, 30, " 848236 01 rece %d", ___Vun.unRlen)
__Vrece := _TudpNodeDataRece{
UrrRemoteAddr: ___Vun.unRemoteAddr,
UrrBuf: ___Vun.unRbuf1500[:___Vun.unRlen],
}
__VrKey := __Vrece.UrrRemoteAddr.IP.String()
//_FpfNhex(&___Vun.unRbuf1500, 30, " 848236 02 <%s>", __VrKey)
//_FpfNhex(&__Vrece.UrrBuf, 30, " 848236 03 ")
if "" == __VrKey || "<nil>" == __VrKey {
_FpfN(" 848236 04 address error %v", __Vrece.UrrRemoteAddr)
return
}
//_FpfNdb(" 848236 05 %s", __VrKey)
__Vreply := false
___Vun.unRmap.unrMux.Lock()
__Vnow, __VokN := ___Vun.unRmap.unrMapNow[__VrKey]
if __VokN {
__Vnow.cnt++ // alreay exist ... so , skip
_FpfN(" 848237 01 ")
} else {
___Vun.unRmap.unrMapNow[__VrKey] = _TuNodeDataRcnt{
cnt: 1,
urr: __Vrece,
}
__Vlast, __VokL := ___Vun.unRmap.unrMapLast[__VrKey]
if __VokL { // map[string]_TuNodeDataRcnt
if 1 == __Vlast.cnt {
__Vreply = true
//_FpfN(" 848237 03 ")
} else {
_FpfN(" 848237 04 ")
}
} else {
__Vlas2, __Vok2 := ___Vun.unRmap.unrMapLas2[__VrKey]
if __Vok2 { // map[string]_TuNodeDataRcnt
if 1 == __Vlas2.cnt {
//_FpfN(" 848237 06 ")
__Vreply = true
} else {
_FpfN(" 848237 07 ")
}
} else {
//_FpfN(" 848237 09 ")
}
}
//_FpfN(" 848238 01 las2 %v", ___Vun.unRmap.unrMapLas2)
//_FpfN(" 848238 02 last %v", ___Vun.unRmap.unrMapLast)
//_FpfN(" 848238 03 now %v", ___Vun.unRmap.unrMapNow)
}
___Vun.unRmap.unrMux.Unlock()
//_FpfN(" 848238 06 : __Vreply %t", __Vreply)
if __Vreply {
//(*___Vun.unCHreceByteLO) <- __Vrece
//_FpfN(" 848238 09 custom_receive 02 checkok,start to loginIn")
___Vun.
_FudpNode__500101yy4__receiveCallBack_default__randDecodeOut_mustDecode(&__Vrece.UrrBuf)
}
}
*/
|
package client
import (
"crypto/tls"
"encoding/json"
"fmt"
"log"
"github.com/golang/protobuf/proto"
)
type Client struct {
host string
port int
packetsStream *PacketStream
channels []*Channel
}
func NewClient(host string, port int) (*Client, error) {
c := &Client{
host: host,
port: port,
}
hostAddr := fmt.Sprintf("%s:%d", c.host, c.port)
log.Println("Dialing to:", hostAddr)
conn, err := tls.Dial("tcp", hostAddr, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
log.Println("Failed to dial:", err)
return nil, fmt.Errorf("Failed to dial: %s", err)
}
c.packetsStream = NewPacketStream(conn)
go c.dispatchResponses()
// Add a new heart beat controller automatically
hbc := NewHeartBeatController(c, "sender-0", "receiver-0")
hbc.Start()
return c, nil
}
func (c *Client) dispatchResponses() {
for {
packet := c.packetsStream.Read()
msg := CastMessage{}
if err := proto.Unmarshal(packet, &msg); err != nil {
log.Fatalln("Failed to unmarshal CastMessage:", err)
}
log.Printf("Recv: S=%s, D=%s, NS=%s, %s", *msg.SourceId, *msg.DestinationId, *msg.Namespace, *msg.PayloadUtf8)
var headers Payload
if err := json.Unmarshal([]byte(*msg.PayloadUtf8), &headers); err != nil {
log.Fatalln("Failed to unmarshal headers:", err)
}
for _, channel := range c.channels {
channel.message(&msg, &headers)
}
}
}
func (c *Client) NewChannel(sourceId, destinationId, namespace string) *Channel {
ch := Channel{
client: c,
sourceId: sourceId,
destinationId: destinationId,
namespace: namespace,
inFlight: make(map[int]chan Response),
listeners: make([]channelListener, 0),
}
c.channels = append(c.channels, &ch)
return &ch
}
func (c *Client) sendCastMessage(msg *CastMessage) error {
data, err := proto.Marshal(msg)
if err != nil {
return err
}
_, err = c.packetsStream.Write(data)
return err
}
// Send converts specified payload to JSON and sends wrapped message
func (c *Client) Send(sourceId, destinationId, namespace string, payload interface{}) error {
payloadJson, err := json.Marshal(payload)
if err != nil {
return nil
}
payloadStr := string(payloadJson)
msg := CastMessage{
ProtocolVersion: CastMessage_CASTV2_1_0.Enum(),
SourceId: &sourceId,
DestinationId: &destinationId,
Namespace: &namespace,
PayloadType: CastMessage_STRING.Enum(),
PayloadUtf8: &payloadStr,
}
log.Println("Send:", payloadStr)
return c.sendCastMessage(&msg)
}
|
package create_token
import (
"encoding/json"
"net/http"
"github.com/gorilla/mux"
"context"
"fmt"
"io/ioutil"
"strings"
"bytes"
"time"
)
var PathTemplate = "/api/v1/token"
func DecodeCreateTokenRequest(_ context.Context, r *http.Request) (interface{}, error) {
body, _ := ioutil.ReadAll(r.Body)
bodyReader := strings.NewReader(string(body))
var reqBody struct {
Key string `json: "key,omitempty" `
Payload map[string]interface{} `json: "payload,omitempty"`
Exp *time.Duration `json: "exp_sec,omitempty"`
}
reqBody.Key = r.Header.Get("X-Jwtack-Key")
if err := json.NewDecoder(bodyReader).Decode(&reqBody); err != nil {
fmt.Println("decodeEncodeTokenRequest ErrorBodyRaw:", string(body))
return nil, err
}
if reqBody.Exp != nil {
expSec := *reqBody.Exp * time.Second
reqBody.Exp = &expSec
}
return CreateTokenRequest{reqBody.Key, reqBody.Payload, reqBody.Exp}, nil
}
func EncodeCreateTokennRequest(_ context.Context, r *http.Request, request interface{}) (err error) {
var buf bytes.Buffer
cbr := request.(CreateTokenRequest)
if err = json.NewEncoder(&buf).Encode(cbr); err != nil {
return err
}
mr := mux.NewRouter()
mr.NewRoute().BuildOnly()
u, err := mr.Path(PathTemplate).URLPath()
if err != nil {
return
}
r.URL.Path = u.Path
r.Body = ioutil.NopCloser(&buf)
fmt.Println("r.Body", r.Body)
return
}
func DecodeCreateTokenResponse(_ context.Context, resp *http.Response) (interface{}, error) {
var response CreateTokenResponse
err := json.NewDecoder(resp.Body).Decode(&response)
return response, err
} |
package hugo
import (
"github.com/go-cmd/cmd"
"github.com/naoina/toml"
"github.com/pkg/errors"
"github.com/qiaogw/pkg/logs"
"github.com/qiaogw/pkg/tools"
"os"
"path/filepath"
)
//var (
// hugoConfig = config.Config.Hugo
//)
//func GetHugoConfig() *Config {
// return &hugoConfig
//}
func NewHugo() *SiteConfig {
return &SiteConfig{
Paginate: "4",
SummaryLength: "30",
DefaultContentLanguage: "zh",
}
}
func (c *SiteConfig) NewSite(hugoConfig *Config) (err error) {
path := filepath.Join(hugoConfig.Dir, c.Title)
envCmd := cmd.NewCmd("hugo", "new", "site", path)
status := <-envCmd.Start()
// Print each line of STDOUT from Cmd
for _, line := range status.Stdout {
logs.Debug(line)
}
themeDest := filepath.Join(path, "themes", c.Theme)
themeSrc := filepath.Join(hugoConfig.ThemeDir, c.Theme)
siteSrc := filepath.Join(themeSrc, "exampleSite")
err = tools.CopyDir(siteSrc, path)
if err != nil {
logs.Error(err)
return
}
err = tools.CopyDir(themeSrc, themeDest)
if err != nil {
logs.Error(err)
return
}
conf := make(map[string]interface{})
configFile := filepath.Join(hugoConfig.Dir, c.Title, "config.toml")
err = tools.GetConfigFromPath(configFile, &conf)
//err = json.Unmarshal(confValue, conf)
conf["baseurl"] = c.BaseURL
conf["title"] = c.Title
conf["theme"] = c.Theme
err = SaveConfig(configFile, conf)
if err != nil {
logs.Error(err)
return
}
err = c.BuildSite(hugoConfig)
if err != nil {
logs.Error(err)
}
return
}
func (c *SiteConfig) GetConfigFile(hugoConfig *Config) (str string, err error) {
configfile := filepath.Join(hugoConfig.Dir, c.Title, "config.toml")
str, err = tools.ReadFile(configfile)
return
}
//func (c *SiteConfig) GetConfigFileParams(hugoConfig *Config) (fileInfo interface{}, err error) {
// configfile := filepath.Join(hugoConfig.Dir, c.Title, "config.toml")
// //fileInfo, err = config.GetConfigFromPath(configfile)
// return
//}
func (c *SiteConfig) SetConfigFile(hugoConfig *Config, str string) (err error) {
configFile := filepath.Join(hugoConfig.Dir, c.Title, "config.toml")
err = tools.WriteFile(configFile, str, true)
if err == nil {
conf := make(map[string]interface{})
err = tools.GetConfigFromPath(configFile, &conf)
conf["baseurl"] = c.BaseURL
conf["title"] = c.Title
conf["theme"] = c.Theme
err = SaveConfig(configFile, conf)
if err == nil {
err = c.BuildSite(hugoConfig)
}
}
return
}
func (c *SiteConfig) BuildSite(hugoConfig *Config) (err error) {
logs.Debug("BuildSite")
path := filepath.Join(hugoConfig.Dir, c.Title)
envCmd := cmd.NewCmd("hugo")
envCmd.Dir = path
logs.Debug("BuildSite", path)
status := <-envCmd.Start()
// Print each line of STDOUT from Cmd
for _, line := range status.Stdout {
logs.Debug(line)
}
return status.Error
}
// SaveConfig save global parameters to configFile
func SaveConfig(path string, config interface{}) error {
dir := filepath.Dir(path)
if _, err := os.Stat(dir); os.IsNotExist(err) {
err := os.Mkdir(dir, 0775)
if err != nil {
return errors.Wrapf(err, "creating dir %s", dir)
}
}
cf, err := os.Create(path)
if err != nil {
//log.WithFields(log.Fields{"type": consts.IOError, "error": err}).Error("Create config file failed")
return err
}
defer cf.Close()
err = toml.NewEncoder(cf).Encode(config)
if err != nil {
return err
}
return nil
}
|
package authentication
// Token it's the token representation from the auth0 authentication api
// https://auth0.com/docs/api/authentication#get-token
// It's a dynamic object since it has multiple representantions
type Token map[string]interface{}
type Identity struct {
Connection string `json:"connection,omitempty"`
UserID int `json:"user_id,omitempty"`
Provider string `json:"provider,omitempty"`
IsSocial bool `json:"isSocial,omitempty"`
AccessToken string `json:"access_token"`
}
// User represents an user in auth0 management API
// https://auth0.com/docs/api/management/v2#!/Users/get_users
type User struct {
UserID string `json:"user_id,omitempty"`
Email string `json:"email,omitempty"`
EmailVerified bool `json:"email_verified,omitempty"`
Username string `json:"username,omitempty"`
PhoneNumber string `json:"phone_number,omitempty"`
PhoneVerified bool `json:"phone_verified,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
Identities []Identity `json:"identities,omitempty"`
AppMetadata map[string]interface{} `json:"app_metadata,omitempty"`
UserMetadata map[string]interface{} `json:"user_metadata,omitempty"`
Picture string `json:"picture,omitempty"`
Name string `json:"name,omitempty"`
Nickname string `json:"nickname,omitempty"`
Multifactor []string `json:"multifactor,omitempty"`
LastIP string `json:"last_ip,omitempty"`
LastLogin string `json:"last_login,omitempty"`
LoginsCount int `json:"logins_count,omitempty"`
Blocked bool `json:"blocked,omitempty"`
GivenName string `json:"given_name,omitempty"`
FamilyName string `json:"family_name,omitempty"`
}
|
package message_processor
import (
"context"
bot "github.com/go-telegram-bot-api/telegram-bot-api"
"github.com/mrdniwe/clc.wtf/internal/interfaces"
"github.com/mrdniwe/pasatyje/pkg/intf/tg"
)
type service struct {
linkSvc interfaces.LinkService
wl interfaces.WhitelistChecker
}
func (s *service) Process(ctx context.Context, msg *bot.Message) (*bot.MessageConfig, error) {
if !s.wl.IsUserAllowed(msg.From.ID) {
reply := bot.NewMessage(msg.Chat.ID, "")
reply.Text = "🚫 Вашему аккаунту запрещено создавать ссылки, обратитесь к @MrDniwe за разрешением"
return &reply, nil
}
p, err := linkParser(msg.Text)
if err != nil {
return nil, err
}
hash, err := s.linkSvc.CreateLink(ctx, p.Link, int64(msg.From.ID), p.Description)
if err != nil {
return nil, err
}
url, err := s.linkSvc.GenerateUrl(ctx, hash)
if err != nil {
return nil, err
}
resp := bot.NewMessage(msg.Chat.ID, url)
resp.ReplyToMessageID = msg.MessageID
return &resp, nil
}
func New(linkSvc interfaces.LinkService, wl interfaces.WhitelistChecker) tg.MsgProc {
return &service{
linkSvc: linkSvc,
wl: wl,
}
}
|
package liferay
import internal "github.com/mdelapenya/lpn/internal"
// Commerce implementation for Liferay nightly images with Commerce
type Commerce struct {
Tag string
}
// GetContainerName returns the name of the container generated by this type of image
func (c Commerce) GetContainerName() string {
return internal.LpnConfig.GetPortalContainerName("commerce")
}
// GetDeployFolder returns the deploy folder under Liferay Home
func (c Commerce) GetDeployFolder() string {
return c.GetLiferayHome() + "/deploy"
}
// GetDockerHubTagsURL returns the URL of the available tags on Docker Hub
func (c Commerce) GetDockerHubTagsURL() string {
return "liferay/commerce"
}
// GetFullyQualifiedName returns the fully qualified name of the image
func (c Commerce) GetFullyQualifiedName() string {
return "docker.io/" + c.GetRepository() + ":" + c.GetTag()
}
// GetLiferayHome returns the Liferay home for nightly builds with Commerce
func (c Commerce) GetLiferayHome() string {
return "/opt/liferay"
}
// GetRepository returns the repository for nightly builds with Commerce
func (c Commerce) GetRepository() string {
return internal.LpnConfig.GetPortalImageName("commerce")
}
// GetTag returns the tag of the image
func (c Commerce) GetTag() string {
return c.Tag
}
// GetType returns the type of the image
func (c Commerce) GetType() string {
return "commerce"
}
// GetUser returns the user running the main application
func (c Commerce) GetUser() string {
return "liferay"
}
|
package main
import (
"log"
"net"
"sort"
"sync"
"time"
)
type result struct {
addr string
min, max, total time.Duration
succ, fail int
}
func (r *result) ave() time.Duration {
return r.total / time.Duration(r.succ)
}
type byQualityDesc []result
func (p byQualityDesc) Len() int {
return len(p)
}
func (p byQualityDesc) Less(i, j int) bool {
if p[i].fail != p[j].fail {
return p[i].fail < p[j].fail
}
return p[i].ave() < p[j].ave()
}
func (p byQualityDesc) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// Pings a bunch of host:port to find the fastest connections.
func ping(servers []string) []result {
var wg sync.WaitGroup
chResult := make(chan result)
for _, server := range servers {
wg.Add(1)
go func(addr string) {
defer wg.Done()
r := result{
addr: addr,
}
for i := 0; i < 5; i++ {
start := time.Now()
if conn, err := net.DialTimeout("tcp", addr, 1*time.Second); err != nil {
log.Printf("addr=%s err=%s\n", addr, err.Error())
r.fail++
} else {
dur := time.Since(start)
conn.Close()
if r.succ == 0 || dur < r.min {
r.min = dur
}
if r.succ == 0 || dur > r.max {
r.max = dur
}
r.total += dur
r.succ++
}
}
chResult <- r
}(server)
}
results := []result{}
resCh := make(chan struct{})
go func() {
for r := range chResult {
results = append(results, r)
}
close(resCh)
}()
wg.Wait()
close(chResult)
<-resCh
sort.Sort(byQualityDesc(results))
return results
}
|
package epaxos
import (
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/google/btree"
pb "github.com/nvanbenschoten/epaxos/epaxos/epaxospb"
)
func (p *epaxos) maxInstance(r pb.ReplicaID) *instance {
if maxInstItem := p.commands[r].Max(); maxInstItem != nil {
return maxInstItem.(*instance)
}
return nil
}
func (p *epaxos) maxInstanceNum(r pb.ReplicaID) pb.InstanceNum {
if maxInst := p.maxInstance(r); maxInst != nil {
return maxInst.is.InstanceNum
}
return 0
}
func (p *epaxos) maxSeqNum(r pb.ReplicaID) pb.SeqNum {
if maxInst := p.maxInstance(r); maxInst != nil {
return maxInst.is.SeqNum
}
return 0
}
func (p *epaxos) maxDeps(r pb.ReplicaID) []pb.InstanceID {
if maxInst := p.maxInstance(r); maxInst != nil {
return maxInst.is.Deps
}
return nil
}
func (p *epaxos) getInstance(r pb.ReplicaID, i pb.InstanceNum) *instance {
if instItem := p.commands[r].Get(instanceKey(i)); instItem != nil {
return instItem.(*instance)
}
return nil
}
func (p *epaxos) hasAccepted(r pb.ReplicaID, i pb.InstanceNum) bool {
if inst := p.getInstance(r, i); inst != nil {
return inst.is.Status >= pb.InstanceState_Accepted
}
return false
}
func (p *epaxos) hasExecuted(r pb.ReplicaID, i pb.InstanceNum) bool {
if inst := p.getInstance(r, i); inst != nil {
return inst.is.Status == pb.InstanceState_Executed
}
return false
}
// HasExecuted implements the history interface.
func (p *epaxos) HasExecuted(e executableID) bool {
d := e.(pb.InstanceID)
return p.hasExecuted(d.ReplicaID, d.InstanceNum)
}
// seqAndDepsForCommand determines the locally known maximum interfering sequence
// number and dependencies for a given command.
func (p *epaxos) seqAndDepsForCommand(
cmd *pb.Command, ignoredInstance pb.InstanceID,
) (pb.SeqNum, map[pb.InstanceID]struct{}) {
var maxSeq pb.SeqNum
deps := make(map[pb.InstanceID]struct{})
cmdRage := rangeForCmd(cmd)
for rID, cmds := range p.commands {
// Adding to the writeRG and readRG allows us to minimize the number of
// dependencies we add for this command without building a directed graph
// and topological sorting. This relies on the interference relation for
// commands ove a given key-range being transitive. It also relies on the
// causality of subsequent instances within the same replica instance space.
// The logic here is very similar to that in CockroachDB's Command Queue.
cmds.Descend(func(i btree.Item) bool {
inst := i.(*instance)
if inst.is.InstanceID == ignoredInstance {
return true
}
addDep := func() {
dep := pb.InstanceID{
ReplicaID: rID,
InstanceNum: inst.is.InstanceNum,
}
deps[dep] = struct{}{}
}
if otherCmd := inst.is.Command; otherCmd.Interferes(*cmd) {
maxSeq = pb.MaxSeqNum(maxSeq, inst.is.SeqNum)
otherCmdRange := rangeForCmd(otherCmd)
if otherCmd.Writing {
// We add the other command's range to the RangeGroup and
// observe if it grows the group. If it does, that means
// that it is not a full transitive dependency of other
// dependencies of ours. If it is, that means that we do
// not need to depend on it because previous dependencies
// necessarily already have it as a dependency themself.
if p.rangeGroup.Add(otherCmdRange) {
addDep()
if p.rangeGroup.Len() == 1 && p.rangeGroup.Encloses(cmdRage) {
return false
}
}
} else {
// We check if the current RangeGroup overlaps the read
// dependency. Reads don't depend on reads, so this will
// only happen if a write was inserted that fully covers
// the read.
if !p.rangeGroup.Overlaps(otherCmdRange) {
addDep()
}
}
}
return true
})
p.rangeGroup.Clear()
}
return maxSeq, deps
}
func rangeForCmd(cmd *pb.Command) interval.Range {
startKey := cmd.Span.Key
endKey := cmd.Span.EndKey
if len(endKey) == 0 {
endKey = append(startKey, 0)
}
return interval.Range{
Start: interval.Comparable(startKey),
End: interval.Comparable(endKey),
}
}
func (p *epaxos) onRequest(cmd *pb.Command) *instance {
// Determine the smallest unused instance number.
i := p.maxInstanceNum(p.id) + 1
// Add a new instance for the command in the local commands.
maxLocalSeq, localDeps := p.seqAndDepsForCommand(cmd, pb.InstanceID{})
newInst := p.newInstance(p.id, i)
newInst.is.Command = cmd
newInst.is.SeqNum = maxLocalSeq + 1
newInst.is.Deps = depSliceFromMap(localDeps)
p.commands[p.id].ReplaceOrInsert(newInst)
// Transition the new instance into a preAccepted state.
newInst.transitionTo(pb.InstanceState_PreAccepted)
return newInst
}
func (p *epaxos) prepareToExecute(inst *instance) {
inst.assertState(pb.InstanceState_Committed)
p.executor.addExec(inst)
// TODO pull executor into a different goroutine and run asynchronously.
p.executor.run()
// p.truncateCommands()
}
// TODO reintroduce instance space truncation.
// func (p *epaxos) truncateCommands() {
// for r, cmds := range p.commands {
// var executedItems []btree.Item
// cmds.Ascend(func(i btree.Item) bool {
// if i.(*instance).is.Status == pb.InstanceState_Executed {
// executedItems = append(executedItems, i)
// return true
// }
// return false
// })
// if len(executedItems) > 0 {
// curMaxInstNum := p.maxTruncatedInstanceNum[r]
// for _, executedItem := range executedItems {
// inst := executedItem.(*instance)
// p.maxTruncatedSeqNum = pb.MaxSeqNum(p.maxTruncatedSeqNum, inst.is.SeqNum)
// curMaxInstNum = pb.MaxInstanceNum(curMaxInstNum, inst.is.InstanceNum)
// cmds.Delete(executedItem)
// }
// p.maxTruncatedInstanceNum[r] = curMaxInstNum
// }
// }
// }
|
package main
import (
"fmt"
"github.com/gin-contrib/static"
"github.com/gin-gonic/gin"
"github.com/gonitor/gonitor/config"
)
// CORSMiddleware configures the CORS middleware.
func CORSMiddleware() gin.HandlerFunc {
return func(context *gin.Context) {
context.Writer.Header().Set("Access-Control-Allow-Origin", "*")
context.Writer.Header().Set("Access-Control-Max-Age", "86400")
context.Writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE, UPDATE")
context.Writer.Header().Set("Access-Control-Allow-Headers", "X-Requested-With, Content-Type, Origin, Authorization, Accept, Client-Security-Token, Accept-Encoding, x-access-token")
context.Writer.Header().Set("Access-Control-Expose-Headers", "Content-Length")
context.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
if context.Request.Method == "OPTIONS" {
fmt.Println("OPTIONS")
context.AbortWithStatus(200)
} else {
context.Next()
}
}
}
func main() {
config.LoadEnvVariables()
router := gin.Default()
router.Use(CORSMiddleware())
config.SetRoutes(router)
router.Use(static.Serve("/", static.LocalFile("./view", true)))
router.Run(":9000")
}
|
package gate
import (
"strconv"
"strings"
// third pkgs
log "github.com/cihub/seelog"
yaml "gopkg.in/yaml.v2"
// company pkgs
)
// ------------------------------------------------------
// Database model -- Begin
type Ring struct {
Id uint16
Name string
}
type App struct {
Id uint16
Name string
}
type Instance struct {
Id uint32
Ip uint64
Port uint32
Weight uint32
RingId uint16 // logical Foreign Key
}
// Database model -- End
// ------------------------------------------------------
// ------------------------------------------------------
// YAML -- Begin
type AppNames []string
type Migration struct {
} //TODO extension
type Shard struct {
// IP:Port,Weight
Master string `yaml:"master"`
Slaves []string `yaml:"slaves,omitempty"`
}
type RingYAML struct {
Name string `yaml:"name"`
Datetime string `yaml:"datetime"`
ProtocolVersion string `yaml:"protocol_version"`
Owner string `yaml:"owner,omitempty"`
AlertReceivers []string `yaml:"alert_receivers,omitempty"`
Migration *Migration `yaml:"migration"`
AppNames AppNames `yaml:"app_names"` // simple ACL implementation
Shards []*Shard `yaml:"shards"`
}
// YAML -- End
// ------------------------------------------------------
func DecodeRingYAMLByStr(ringName, val string) (ring *RingYAML) {
buf := []byte(val)
return DecodeRingYAML(ringName, buf)
}
func DecodeRingYAML(ringName string, buf []byte) (ring *RingYAML) {
ring = &RingYAML{}
err := yaml.Unmarshal(buf, &ring)
if err != nil {
log.Error("Unmarshalling has one error for getting a RingYaml. Error:", err)
return nil
}
ring.Name = ringName
return
}
func DecodeAppsYAML(cnf []byte) (apps []string) {
apps = make([]string, 0)
err := yaml.Unmarshal(cnf, &apps)
if err != nil {
log.Error("Unmarshalling has one error for getting a list of app name. Error:", err)
return nil
}
return
}
// Just parse master
func (ring *RingYAML) getInstances() (ins map[string]uint32) {
ins = make(map[string]uint32)
for _, shard := range ring.Shards {
if addr, w := parseStrIns(shard.Master); addr != "" {
ins[addr] = w
}
}
return
}
// parameter: IP:Port,Weight
func parseStrIns(val string) (addr string, weight uint32) {
val = strings.TrimSpace(val)
a := strings.Split(val, ",")
addr = strings.TrimSpace(a[0])
w, err := strconv.Atoi(strings.TrimSpace(a[1]))
if err != nil {
log.Error("Parse Error:", val)
return
}
if w <= 0 {
log.Info("Weight <= 0! The input is: ", val, ". Please check the configuration.")
}
weight = uint32(w)
return
}
|
// Copyright 2019 The Berglas Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package berglas
import (
"context"
"fmt"
"cloud.google.com/go/iam"
secretmanager "cloud.google.com/go/secretmanager/apiv1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
const (
iamSecretManagerAccessor = "roles/secretmanager.secretAccessor"
)
// secretManagerIAM returns an IAM storage handle to the given secret since one
// does not exist in the secrets library.
func (c *Client) secretManagerIAM(project, name string) *iam.Handle {
return iam.InternalNewHandleClient(&secretManagerIAMClient{
raw: c.secretManagerClient,
}, fmt.Sprintf("projects/%s/secrets/%s", project, name))
}
// secretManagerIAMClient implements the iam.client interface.
type secretManagerIAMClient struct {
raw *secretmanager.Client
}
func (c *secretManagerIAMClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
return c.GetWithVersion(ctx, resource, 1)
}
func (c *secretManagerIAMClient) GetWithVersion(ctx context.Context, resource string, version int32) (*iampb.Policy, error) {
return c.raw.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{
Resource: resource,
Options: &iampb.GetPolicyOptions{
RequestedPolicyVersion: version,
},
})
}
func (c *secretManagerIAMClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
_, err := c.raw.SetIamPolicy(ctx, &iampb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}
func (c *secretManagerIAMClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
list, err := c.raw.TestIamPermissions(ctx, &iampb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
if err != nil {
return nil, err
}
return list.Permissions, nil
}
|
package main
import (
"fmt"
"designPattern/AAY_factory_builder/c_builder_superman/superman"
)
func main() {
adultSuperMan := superman.GetAdultSuperMan()
fmt.Println(adultSuperMan.SpecialTalent)
}
|
package wfs
import "os"
func Exists(p string) bool {
if _, err := os.Stat(p); err != nil {
return false
}
return true
}
func IsDir(p string) bool {
if s, err := os.Stat(p); err != nil {
return false
} else {
return s.Mode()&os.ModeDir > 0
}
}
|
package atgo
import (
"context"
"fmt"
"github.com/hashicorp/errwrap"
pay "github.com/wondenge/at-go/payments"
"go.uber.org/zap"
)
// Collect money into your Payment Wallet by initiating transactions that deduct
// money from a customers Debit or Credit Card.
// These APIs are currently only available in Nigeria on MasterCard and Verve cards.
func (c *Client) CardCheckout(ctx context.Context, p *pay.CardCheckoutPayload) (res *pay.CardCheckoutResponse, err error) {
if err := c.requestJSONBody(ctx, "POST", fmt.Sprintf("%s%s", c.PaymentEndpoint, "/card/checkout/charge"), p, res); err != nil {
err := errwrap.Wrapf("could not make new http request: {{err}}", err)
c.Log.Info("error", zap.Error(err))
}
return res, nil
}
// Allows your application to validate card checkout charge requests.
func (c *Client) CardCheckoutValidate(ctx context.Context, p *pay.CardCheckoutValidatePayload) (res *pay.CardCheckoutValidateResponse, err error) {
if err := c.requestJSONBody(ctx, "POST", fmt.Sprintf("%s%s", c.PaymentEndpoint, "/card/checkout/validate"), p, res); err != nil {
err := errwrap.Wrapf("could not make new http request: {{err}}", err)
c.Log.Info("error", zap.Error(err))
}
return res, nil
}
|
package runtime
import (
"fmt"
E "github.com/ionous/sashimi/event"
"github.com/ionous/sashimi/meta"
"github.com/ionous/sashimi/runtime/api"
"github.com/ionous/sashimi/runtime/internal"
"log"
"math/rand"
"strings"
)
type RuntimeConfig struct {
core internal.RuntimeCore
}
func NewConfig() *RuntimeConfig {
return &RuntimeConfig{}
}
func (cfg RuntimeConfig) MakeGame(m meta.Model) Game {
core := cfg.Finalize()
return Game{m, internal.NewGame(core, m)}
}
func (cfg RuntimeConfig) Finalize() internal.RuntimeCore {
core := cfg.core
if core.Rand == nil {
core.Rand = rand.New(rand.NewSource(1))
}
if core.Log == nil {
log := log.New(logAdapter{core.Output}, "game: ", log.Lshortfile)
core.Log = LogAdapter{
func(msg string) {
log.Output(4, msg)
}}
}
if core.Frame == nil {
core.Frame = &defaultFrame{core.Log, nil}
}
if core.LookupParents == nil {
core.LookupParents = noParents{}
}
if core.SaveLoad == nil {
core.SaveLoad = noSaveLoad{}
}
return core
}
type noSaveLoad struct{}
func (noSaveLoad) SaveGame(autosave bool) (string, error) {
return "", fmt.Errorf("not implemented")
}
type noParents struct{}
func (noParents) LookupParent(meta.Instance) (inst meta.Instance, rel meta.Property, okay bool) {
return
}
type logAdapter struct {
output api.Output
}
func (log logAdapter) Write(p []byte) (n int, err error) {
log.output.Log(string(p))
return len(p), nil
}
func (cfg *RuntimeConfig) SetCalls(calls api.LookupCallbacks) *RuntimeConfig {
cfg.core.LookupCallbacks = calls
return cfg
}
// StartFrame and EndFrame should be merged into Output
// -- and they should be renamed: BeginEvent() EndEvent()
//*maybe* Target should be mapped into prototype
// Class should be removed from E.Target
// only: how do we know that a thing is a "class" and should get "Class" resource?
// could potentially send target type to startframe
// right now it seems redicoulous that the game decides that.
func (cfg *RuntimeConfig) SetFrame(e api.EventFrame) *RuntimeConfig {
cfg.core.Frame = e
return cfg
}
func (cfg *RuntimeConfig) SetOutput(o api.Output) *RuntimeConfig {
cfg.core.Output = o
return cfg
}
func (cfg *RuntimeConfig) SetParentLookup(l api.LookupParents) *RuntimeConfig {
cfg.core.LookupParents = l
return cfg
}
func (cfg *RuntimeConfig) SetLog(log api.Log) *RuntimeConfig {
cfg.core.Log = log
return cfg
}
func (cfg *RuntimeConfig) SetRand(rand *rand.Rand) *RuntimeConfig {
cfg.core.Rand = rand
return cfg
}
func (cfg *RuntimeConfig) SetSaveLoad(s api.SaveLoad) *RuntimeConfig {
cfg.core.SaveLoad = s
return cfg
}
type defaultFrame struct {
log api.Log
parts []string
}
func (d *defaultFrame) BeginEvent(_, _ meta.Instance, path E.PathList, msg *E.Message) api.IEndEvent {
d.parts = append(d.parts, msg.String())
fullName := strings.Join(d.parts, "/")
d.log.Printf("sending `%s` to: %s.", fullName, path)
return d
}
func (d *defaultFrame) FlushFrame() {
}
func (d *defaultFrame) EndEvent() {
d.parts = d.parts[0 : len(d.parts)-1]
}
type LogAdapter struct {
print func(s string)
}
func (log LogAdapter) Printf(format string, v ...interface{}) {
log.print(fmt.Sprintf(format, v...))
}
func (log LogAdapter) Println(v ...interface{}) {
log.print(fmt.Sprintln(v...))
}
|
package email
import (
"regexp"
)
func IsValid(email string) bool{
re := regexp.MustCompile(`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,4}$`)
return re.MatchString(email)
} |
package msg
import (
"github.com/golang/protobuf/proto"
"github.com/wudiliujie/common/network"
"github.com/wudiliujie/common/network/protobuf"
)
var Processor = protobuf.NewProcessor()
func init() {
Processor.Register(PCK_C2SLogin_ID, func() network.IMessage { return new(C2SLogin) })
Processor.Register(PCK_S2CLogin_ID, func() network.IMessage { return new(S2CLogin) })
Processor.Register(PCK_S2CRoleInfo_ID, func() network.IMessage { return new(S2CRoleInfo) })
Processor.Register(PCK_S2SRegisterServerInfo_ID, func() network.IMessage { return new(S2SRegisterServerInfo) })
Processor.Register(PCK_S2SServerInfo_ID, func() network.IMessage { return new(S2SServerInfo) })
Processor.Register(PCK_S2CKill_ID, func() network.IMessage { return new(S2CKill) })
Processor.Register(PCK_C2SLoginEnd_ID, func() network.IMessage { return new(C2SLoginEnd) })
}
const PCK_C2SLogin_ID = 1 //登录
//登录
type C2SLogin struct {
//登录时间(秒)
LoginTime int64 `protobuf:"varint,1,opt,name=LoginTime,proto3" json:"LoginTime,omitempty"`
//账号角色id
UserId int64 `protobuf:"varint,2,opt,name=UserId,proto3" json:"UserId,omitempty"`
//Sign
Sign string `protobuf:"bytes,3,opt,name=Sign,proto3" json:"Sign,omitempty"`
}
func (m *C2SLogin) Reset() { *m = C2SLogin{} }
func (m *C2SLogin) String() string { return proto.CompactTextString(m) }
func (*C2SLogin) ProtoMessage() {}
func (m *C2SLogin) GetId() uint16 { return PCK_C2SLogin_ID }
const PCK_S2CLogin_ID = 2 //登录返回
//登录返回
type S2CLogin struct {
//返回结果
Tag int32 `protobuf:"varint,1,opt,name=Tag,proto3" json:"Tag,omitempty"`
}
func (m *S2CLogin) Reset() { *m = S2CLogin{} }
func (m *S2CLogin) String() string { return proto.CompactTextString(m) }
func (*S2CLogin) ProtoMessage() {}
func (m *S2CLogin) GetId() uint16 { return PCK_S2CLogin_ID }
//用户int属性
type IntAttr struct {
//Key
K int32 `protobuf:"varint,1,opt,name=K,proto3" json:"K,omitempty"`
//value
V int64 `protobuf:"varint,2,opt,name=V,proto3" json:"V,omitempty"`
}
func (m *IntAttr) Reset() { *m = IntAttr{} }
func (m *IntAttr) String() string { return proto.CompactTextString(m) }
func (*IntAttr) ProtoMessage() {}
//用户字符串属性
type StrAttr struct {
//Key
K int32 `protobuf:"varint,1,opt,name=K,proto3" json:"K,omitempty"`
//value
V string `protobuf:"bytes,2,opt,name=V,proto3" json:"V,omitempty"`
}
func (m *StrAttr) Reset() { *m = StrAttr{} }
func (m *StrAttr) String() string { return proto.CompactTextString(m) }
func (*StrAttr) ProtoMessage() {}
const PCK_S2CRoleInfo_ID = 3 //用户角色信息
//用户角色信息
type S2CRoleInfo struct {
//用户编号
UserId int64 `protobuf:"varint,1,opt,name=UserId,proto3" json:"UserId,omitempty"`
//用户属性
A []*IntAttr `protobuf:"bytes,2,rep,name=A,proto3" json:"A,omitempty"`
}
func (m *S2CRoleInfo) Reset() { *m = S2CRoleInfo{} }
func (m *S2CRoleInfo) String() string { return proto.CompactTextString(m) }
func (*S2CRoleInfo) ProtoMessage() {}
func (m *S2CRoleInfo) GetId() uint16 { return PCK_S2CRoleInfo_ID }
//服务器信息
type ServerInfo struct {
//服务器编号
ServerId int64 `protobuf:"varint,1,opt,name=ServerId,proto3" json:"ServerId,omitempty"`
//服务器类型
ServerType int64 `protobuf:"varint,2,opt,name=ServerType,proto3" json:"ServerType,omitempty"`
//服务器名字
ServerName string `protobuf:"bytes,3,opt,name=ServerName,proto3" json:"ServerName,omitempty"`
//服务器地址
ServerAddr string `protobuf:"bytes,4,opt,name=ServerAddr,proto3" json:"ServerAddr,omitempty"`
}
func (m *ServerInfo) Reset() { *m = ServerInfo{} }
func (m *ServerInfo) String() string { return proto.CompactTextString(m) }
func (*ServerInfo) ProtoMessage() {}
const PCK_S2SRegisterServerInfo_ID = 4 //注册服务器信息
//注册服务器信息
type S2SRegisterServerInfo struct {
//服务器信息
Info *ServerInfo `protobuf:"bytes,1,opt,name=Info,proto3" json:"Info,omitempty"`
}
func (m *S2SRegisterServerInfo) Reset() { *m = S2SRegisterServerInfo{} }
func (m *S2SRegisterServerInfo) String() string { return proto.CompactTextString(m) }
func (*S2SRegisterServerInfo) ProtoMessage() {}
func (m *S2SRegisterServerInfo) GetId() uint16 { return PCK_S2SRegisterServerInfo_ID }
const PCK_S2SServerInfo_ID = 5 //发送当前链接的游戏服务器信息到网关
//发送当前链接的游戏服务器信息到网关
type S2SServerInfo struct {
//服务器信息
Info []*ServerInfo `protobuf:"bytes,1,rep,name=Info,proto3" json:"Info,omitempty"`
}
func (m *S2SServerInfo) Reset() { *m = S2SServerInfo{} }
func (m *S2SServerInfo) String() string { return proto.CompactTextString(m) }
func (*S2SServerInfo) ProtoMessage() {}
func (m *S2SServerInfo) GetId() uint16 { return PCK_S2SServerInfo_ID }
//加载用户信息回调
type D2SLoadRoleCallback struct {
//加载结果
Tag int32 `protobuf:"varint,1,opt,name=Tag,proto3" json:"Tag,omitempty"`
//用户编号
UserId int64 `protobuf:"varint,2,opt,name=UserId,proto3" json:"UserId,omitempty"`
//登录区编号
LoginAreaId int64 `protobuf:"varint,3,opt,name=LoginAreaId,proto3" json:"LoginAreaId,omitempty"`
//数据库数据
Data *RoleDbData `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
}
func (m *D2SLoadRoleCallback) Reset() { *m = D2SLoadRoleCallback{} }
func (m *D2SLoadRoleCallback) String() string { return proto.CompactTextString(m) }
func (*D2SLoadRoleCallback) ProtoMessage() {}
//用户数据库数据
type RoleDbData struct {
//int属性
A *IntAttr `protobuf:"bytes,1,opt,name=A,proto3" json:"A,omitempty"`
//Str属性
B *StrAttr `protobuf:"bytes,2,opt,name=B,proto3" json:"B,omitempty"`
}
func (m *RoleDbData) Reset() { *m = RoleDbData{} }
func (m *RoleDbData) String() string { return proto.CompactTextString(m) }
func (*RoleDbData) ProtoMessage() {}
const PCK_S2CKill_ID = 6 //用户提下线
//用户提下线
type S2CKill struct {
//Tag
Tag int32 `protobuf:"varint,1,opt,name=Tag,proto3" json:"Tag,omitempty"`
}
func (m *S2CKill) Reset() { *m = S2CKill{} }
func (m *S2CKill) String() string { return proto.CompactTextString(m) }
func (*S2CKill) ProtoMessage() {}
func (m *S2CKill) GetId() uint16 { return PCK_S2CKill_ID }
const PCK_C2SLoginEnd_ID = 7 //用户登录成功
//用户登录成功
type C2SLoginEnd struct {
}
func (m *C2SLoginEnd) Reset() { *m = C2SLoginEnd{} }
func (m *C2SLoginEnd) String() string { return proto.CompactTextString(m) }
func (*C2SLoginEnd) ProtoMessage() {}
func (m *C2SLoginEnd) GetId() uint16 { return PCK_C2SLoginEnd_ID }
|
package leetcode
import (
"reflect"
"testing"
)
func TestPascalsTriangle(t *testing.T) {
tests := []struct {
input int
want [][]int
}{
{0, nil},
{1, [][]int{[]int{1}}},
{5, [][]int{[]int{1}, []int{1, 1}, []int{1, 2, 1}, []int{1, 3, 3, 1}, []int{1, 4, 6, 4, 1}}},
}
for _, tt := range tests {
if !reflect.DeepEqual(PascalsTriangle(tt.input), tt.want) {
t.Errorf("invalid output with input %d\n", tt.input)
}
}
}
|
// +build storage_sqlite storage_all !sqlite_fs,!storage_boltdb,!storage_badger,!storage_pgx
package sqlite
import (
"fmt"
"path"
"strings"
pub "github.com/go-ap/activitypub"
ap "github.com/go-ap/fedbox/activitypub"
"github.com/go-ap/handlers"
"github.com/go-ap/storage"
)
func isCollection(col string) bool {
return col == string(ap.ActorsType) || col == string(ap.ActivitiesType) || col == string(ap.ObjectsType)
}
func getStringFieldInJSONWheres(strs ap.CompStrs, props ...string) (string, []interface{}) {
if len(strs) == 0 {
return "", nil
}
var values = make([]interface{}, 0)
keyWhere := make([]string, 0)
for _, n := range strs {
switch n.Operator {
case "!":
for _, prop := range props {
if len(n.Str) == 0 || n.Str == pub.NilLangRef.String() {
keyWhere = append(keyWhere, fmt.Sprintf(`json_extract("raw", '$.%s') IS NOT NULL`, prop))
} else {
keyWhere = append(keyWhere, fmt.Sprintf(`json_extract("raw", '$.%s') NOT LIKE ?`, prop))
values = append(values, interface{}("%"+n.Str+"%"))
}
}
case "~":
for _, prop := range props {
keyWhere = append(keyWhere, fmt.Sprintf(`json_extract("raw", '$.%s') LIKE ?`, prop))
values = append(values, interface{}("%"+n.Str+"%"))
}
case "", "=":
fallthrough
default:
for _, prop := range props {
if len(n.Str) == 0 || n.Str == pub.NilLangRef.String() {
keyWhere = append(keyWhere, fmt.Sprintf(`json_extract("raw", '$.%s') IS NULL`, prop))
} else {
keyWhere = append(keyWhere, fmt.Sprintf(`json_extract("raw", '$.%s') = ?`, prop))
values = append(values, interface{}(n.Str))
}
}
}
}
return fmt.Sprintf("(%s)", strings.Join(keyWhere, " OR ")), values
}
func getStringFieldWheres(strs ap.CompStrs, fields ...string) (string, []interface{}) {
if len(strs) == 0 {
return "", nil
}
var values = make([]interface{}, 0)
keyWhere := make([]string, 0)
for _, t := range strs {
switch t.Operator {
case "!":
for _, field := range fields {
if len(t.Str) == 0 || t.Str == pub.NilLangRef.String() {
keyWhere = append(keyWhere, fmt.Sprintf(`"%s" IS NOT NULL`, field))
} else {
keyWhere = append(keyWhere, fmt.Sprintf(`"%s" NOT LIKE ?`, field))
values = append(values, interface{}("%"+t.Str+"%"))
}
}
case "~":
for _, field := range fields {
keyWhere = append(keyWhere, fmt.Sprintf(`"%s" LIKE ?`, field))
values = append(values, interface{}("%"+t.Str+"%"))
}
case "", "=":
for _, field := range fields {
if len(t.Str) == 0 || t.Str == pub.NilLangRef.String() {
keyWhere = append(keyWhere, fmt.Sprintf(`"%s" IS NULL`, field))
} else {
keyWhere = append(keyWhere, fmt.Sprintf(`"%s" = ?`, field))
values = append(values, interface{}(t.Str))
}
}
}
}
return fmt.Sprintf("(%s)", strings.Join(keyWhere, " OR ")), values
}
func getTypeWheres(strs ap.CompStrs) (string, []interface{}) {
return getStringFieldWheres(strs, "type")
}
func getContextWheres(strs ap.CompStrs) (string, []interface{}) {
return getStringFieldInJSONWheres(strs, "context")
}
func getURLWheres(strs ap.CompStrs) (string, []interface{}) {
clause, values := getStringFieldWheres(strs, "url")
jClause, jValues := getStringFieldInJSONWheres(strs, "url")
if len(jClause) > 0 {
if len(clause) > 0 {
clause += " OR "
}
clause += jClause
}
values = append(values, jValues...)
return clause, values
}
var MandatoryCollections = handlers.CollectionTypes{
handlers.Inbox,
handlers.Outbox,
}
func getIRIWheres(strs ap.CompStrs, id pub.IRI) (string, []interface{}) {
iriClause, iriValues := getStringFieldWheres(strs, "iri")
skipId := strings.Contains(iriClause, `"iri"`)
if skipId {
return iriClause, iriValues
}
if u, _ := id.URL(); u != nil {
u.RawQuery = ""
u.User = nil
id = pub.IRI(u.String())
}
// FIXME(marius): this is a hack that avoids trying to use clause on IRI, when iri == "/"
if len(id) > 1 {
if len(iriClause) > 0 {
iriClause += " OR "
}
if base := path.Base(id.String()); isCollection(base) {
iriClause += `"iri" LIKE ?`
iriValues = append(iriValues, interface{}("%"+id+"%"))
} else {
iriClause += `"iri" = ?`
iriValues = append(iriValues, interface{}(id))
}
}
return iriClause, iriValues
}
func getNamesWheres(strs ap.CompStrs) (string, []interface{}) {
return getStringFieldInJSONWheres(strs, "name", "preferredUsername")
}
func getInReplyToWheres(strs ap.CompStrs) (string, []interface{}) {
return getStringFieldInJSONWheres(strs, "inReplyTo")
}
func getAttributedToWheres(strs ap.CompStrs) (string, []interface{}) {
return getStringFieldInJSONWheres(strs, "attributedTo")
}
func getWhereClauses(f *ap.Filters) ([]string, []interface{}) {
var clauses = make([]string, 0)
var values = make([]interface{}, 0)
if typClause, typValues := getTypeWheres(f.Types()); len(typClause) > 0 {
values = append(values, typValues...)
clauses = append(clauses, typClause)
}
if iriClause, iriValues := getIRIWheres(f.IRIs(), f.GetLink()); len(iriClause) > 0 {
values = append(values, iriValues...)
clauses = append(clauses, iriClause)
}
if nameClause, nameValues := getNamesWheres(f.Names()); len(nameClause) > 0 {
values = append(values, nameValues...)
clauses = append(clauses, nameClause)
}
if replClause, replValues := getInReplyToWheres(f.InReplyTo()); len(replClause) > 0 {
values = append(values, replValues...)
clauses = append(clauses, replClause)
}
if authorClause, authorValues := getAttributedToWheres(f.AttributedTo()); len(authorClause) > 0 {
values = append(values, authorValues...)
clauses = append(clauses, authorClause)
}
if urlClause, urlValues := getURLWheres(f.URLs()); len(urlClause) > 0 {
values = append(values, urlValues...)
clauses = append(clauses, urlClause)
}
if ctxtClause, ctxtValues := getContextWheres(f.Context()); len(ctxtClause) > 0 {
values = append(values, ctxtValues...)
clauses = append(clauses, ctxtClause)
}
if len(clauses) == 0 {
if ap.FedBOXCollections.Contains(f.Collection) {
clauses = append(clauses, " true")
} else {
clauses = append(clauses, " false")
}
}
return clauses, values
}
func getLimit(f storage.Filterable) string {
if f, ok := f.(*ap.Filters); ok {
if f.MaxItems == 0 {
return ""
}
limit := fmt.Sprintf(" LIMIT %d", f.MaxItems)
if f.CurPage > 0 {
return fmt.Sprintf("%s OFFSET %d", limit, f.MaxItems*(f.CurPage-1))
}
}
return ""
}
|
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/Sirupsen/logrus"
"github.com/projectcalico/felix/hashutils"
. "github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
)
func (r *DefaultRuleRenderer) WorkloadEndpointToIptablesChains(
ifaceName string,
adminUp bool,
policies []string,
profileIDs []string,
) []*Chain {
return r.endpointToIptablesChains(
policies,
profileIDs,
ifaceName,
PolicyInboundPfx,
PolicyOutboundPfx,
ProfileInboundPfx,
ProfileOutboundPfx,
WorkloadToEndpointPfx,
WorkloadFromEndpointPfx,
"", // No fail-safe chains for workloads.
"", // No fail-safe chains for workloads.
chainTypeTracked,
adminUp,
)
}
func (r *DefaultRuleRenderer) HostEndpointToFilterChains(
ifaceName string,
policyNames []string,
profileIDs []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering filter host endpoint chain.")
return r.endpointToIptablesChains(
policyNames,
profileIDs,
ifaceName,
PolicyOutboundPfx,
PolicyInboundPfx,
ProfileOutboundPfx,
ProfileInboundPfx,
HostToEndpointPfx,
HostFromEndpointPfx,
ChainFailsafeOut,
ChainFailsafeIn,
chainTypeTracked,
true, // Host endpoints are always admin up.
)
}
func (r *DefaultRuleRenderer) HostEndpointToRawChains(
ifaceName string,
untrackedPolicyNames []string,
) []*Chain {
log.WithField("ifaceName", ifaceName).Debug("Rendering raw (untracked) host endpoint chain.")
return r.endpointToIptablesChains(
untrackedPolicyNames,
nil, // We don't render profiles into the raw chain.
ifaceName,
PolicyOutboundPfx,
PolicyInboundPfx,
ProfileOutboundPfx,
ProfileInboundPfx,
HostToEndpointPfx,
HostFromEndpointPfx,
ChainFailsafeOut,
ChainFailsafeIn,
chainTypeUntracked, // Render "untracked" version of chain for the raw table.
true, // Host endpoints are always admin up.
)
}
type endpointChainType int
const (
chainTypeTracked endpointChainType = iota
chainTypeUntracked
)
func (r *DefaultRuleRenderer) endpointToIptablesChains(
policyNames []string,
profileIds []string,
name string,
toPolicyPrefix PolicyChainNamePrefix,
fromPolicyPrefix PolicyChainNamePrefix,
toProfilePrefix ProfileChainNamePrefix,
fromProfilePrefix ProfileChainNamePrefix,
toEndpointPrefix string,
fromEndpointPrefix string,
toFailsafeChain string,
fromFailsafeChain string,
chainType endpointChainType,
adminUp bool,
) []*Chain {
toRules := []Rule{}
fromRules := []Rule{}
toChainName := EndpointChainName(toEndpointPrefix, name)
fromChainName := EndpointChainName(fromEndpointPrefix, name)
if !adminUp {
// Endpoint is admin-down, drop all traffic to/from it.
toRules = append(toRules, r.DropRules(Match(), "Endpoint admin disabled")...)
fromRules = append(fromRules, r.DropRules(Match(), "Endpoint admin disabled")...)
toEndpointChain := Chain{
Name: toChainName,
Rules: toRules,
}
fromEndpointChain := Chain{
Name: fromChainName,
Rules: fromRules,
}
return []*Chain{&toEndpointChain, &fromEndpointChain}
}
// First set up failsafes.
if toFailsafeChain != "" {
toRules = append(toRules, Rule{
Action: JumpAction{Target: toFailsafeChain},
})
}
if fromFailsafeChain != "" {
fromRules = append(fromRules, Rule{
Action: JumpAction{Target: fromFailsafeChain},
})
}
// Start by ensuring that the accept mark bit is clear, policies set that bit to indicate
// that they accepted the packet.
toRules = append(toRules, Rule{
Action: ClearMarkAction{
Mark: r.IptablesMarkAccept,
},
})
fromRules = append(fromRules, Rule{
Action: ClearMarkAction{
Mark: r.IptablesMarkAccept,
},
})
if len(policyNames) > 0 {
// Clear the "pass" mark. If a policy sets that mark, we'll skip the rest of the policies
// continue processing the profiles, if there are any.
toRules = append(toRules, Rule{
Comment: "Start of policies",
Action: ClearMarkAction{
Mark: r.IptablesMarkPass,
},
})
fromRules = append(fromRules, Rule{
Comment: "Start of policies",
Action: ClearMarkAction{
Mark: r.IptablesMarkPass,
},
})
// Then, jump to each policy in turn.
for _, polID := range policyNames {
toPolChainName := PolicyChainName(
toPolicyPrefix,
&proto.PolicyID{Name: polID},
)
// If a previous policy didn't set the "pass" mark, jump to the policy.
toRules = append(toRules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: JumpAction{Target: toPolChainName},
})
// If policy marked packet as accepted, it returns, setting the accept
// mark bit.
if chainType == chainTypeUntracked {
// For an untracked policy, map allow to "NOTRACK and ALLOW".
toRules = append(toRules, Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: NoTrackAction{},
})
}
// If accept bit is set, return from this chain. We don't immediately
// accept because there may be other policy still to apply.
toRules = append(toRules, Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if policy accepted",
})
fromPolChainName := PolicyChainName(
fromPolicyPrefix,
&proto.PolicyID{Name: polID},
)
// If a previous policy didn't set the "pass" mark, jump to the policy.
fromRules = append(fromRules, Rule{
Match: Match().MarkClear(r.IptablesMarkPass),
Action: JumpAction{Target: fromPolChainName},
})
// If policy marked packet as accepted, it returns, setting the accept
// mark bit.
if chainType == chainTypeUntracked {
// For an untracked policy, map allow to "NOTRACK and ALLOW".
fromRules = append(fromRules, Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: NoTrackAction{},
})
}
// If accept bit is set, return from this chain. We don't immediately
// accept because there may be other policy still to apply.
fromRules = append(fromRules, Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if policy accepted",
})
}
if chainType == chainTypeTracked {
// When rendering normal rules, if no policy marked the packet as "pass", drop the
// packet.
//
// For untracked rules, we don't do that because there may be tracked rules
// still to be applied to the packet in the filter table.
toRules = append(toRules, r.DropRules(
Match().MarkClear(r.IptablesMarkPass),
"Drop if no policies passed packet")...)
fromRules = append(fromRules, r.DropRules(
Match().MarkClear(r.IptablesMarkPass),
"Drop if no policies passed packet")...)
}
}
if chainType == chainTypeTracked {
// Then, jump to each profile in turn.
for _, profileID := range profileIds {
toProfChainName := ProfileChainName(toProfilePrefix, &proto.ProfileID{Name: profileID})
fromProfChainName := ProfileChainName(fromProfilePrefix, &proto.ProfileID{Name: profileID})
toRules = append(toRules,
Rule{Action: JumpAction{Target: toProfChainName}},
// If policy marked packet as accepted, it returns, setting the
// accept mark bit. If that is set, return from this chain.
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if profile accepted",
})
fromRules = append(fromRules,
Rule{Action: JumpAction{Target: fromProfChainName}},
// If policy marked packet as accepted, it returns, setting the
// accept mark bit. If that is set, return from this chain.
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: ReturnAction{},
Comment: "Return if profile accepted",
})
}
// When rendering normal rules, if no profile marked the packet as accepted, drop
// the packet.
//
// For untracked rules, we don't do that because there may be tracked rules
// still to be applied to the packet in the filter table.
toRules = append(toRules, r.DropRules(Match(), "Drop if no profiles matched")...)
fromRules = append(fromRules, r.DropRules(Match(), "Drop if no profiles matched")...)
}
toEndpointChain := Chain{
Name: toChainName,
Rules: toRules,
}
fromEndpointChain := Chain{
Name: fromChainName,
Rules: fromRules,
}
return []*Chain{&toEndpointChain, &fromEndpointChain}
}
func EndpointChainName(prefix string, ifaceName string) string {
return hashutils.GetLengthLimitedID(
prefix,
ifaceName,
MaxChainNameLength,
)
}
|
package repository
import (
"errors"
"github.com/Mangaba-Labs/ape-finance-api/pkg/domain/user"
"gorm.io/gorm"
)
// Repository concrete type
type Repository struct {
DB *gorm.DB // this can be any gorm instance
}
func (r Repository) FindAll() (users *gorm.DB, err error) {
users = r.DB.Find(&users)
err = users.Error
return
}
func (r Repository) FindOneByEmail(email string) (user user.User, err error) {
result := r.DB.First(&user, "email = ?", email)
err = result.Error
return
}
func (r Repository) FindById(id int) (user user.User, err error) {
result := r.DB.First(&user, "id = ?", id)
err = result.Error
return
}
func (r Repository) Delete(id int) (err error) {
result := r.DB.Delete(user.User{}, "id = ?", id)
err = result.Error
return
}
func (r Repository) Create(user *user.User) error {
result := r.DB.Create(user)
err := result.Error
rowsCount := result.RowsAffected
if err != nil || rowsCount <= 0 {
return errors.New("cannot create user")
}
return nil
}
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
graphql "github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
"github.com/jackc/pgx/v4"
)
func main() {
s := `
schema {
query: Query
}
type Query {
GetThread(): Thread
}
type Thread {
id: String
author: String
title: String
date_posted: String
}
`
fmt.Println(" Connecting to Postgres Database...")
conn, err := pgx.Connect(context.Background(), os.Getenv("postgres://postgres@localhost:5432"))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to connection to database: %v\n", err)
os.Exit(1)
}
defer conn.Close(context.Background())
schema := graphql.MustParseSchema(s, &Resolver{})
fmt.Println(" 🚀 - Server is now listening on port 8080")
http.Handle("/query", &relay.Handler{Schema: schema})
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
package br
import (
"regexp"
"strconv"
"strings"
"time"
"github.com/olebedev/when/rules"
)
func ExactMonthDate(s rules.Strategy) rules.Rule {
overwrite := s == rules.Override
return &rules.F{
RegExp: regexp.MustCompile("(?i)" +
"(?:\\W|^)" +
"(?:(?:(\\d{1,2})|(" + ORDINAL_WORDS_PATTERN[3:] + // skip '(?:'
")(?:\\sdia\\sde\\s|\\sde\\s|\\s))*" +
"(" + MONTH_OFFSET_PATTERN[3:] + // skip '(?:'
"(?:\\W|$)",
),
Applier: func(m *rules.Match, c *rules.Context, o *rules.Options, ref time.Time) (bool, error) {
_ = overwrite
num := strings.ToLower(strings.TrimSpace(m.Captures[0]))
ord := strings.ToLower(strings.TrimSpace(m.Captures[1]))
mon := strings.ToLower(strings.TrimSpace(m.Captures[2]))
monInt, ok := MONTH_OFFSET[mon]
if !ok {
return false, nil
}
c.Month = &monInt
if ord != "" {
ordInt, ok := ORDINAL_WORDS[ord]
if !ok {
return false, nil
}
c.Day = &ordInt
}
if num != "" {
n, err := strconv.ParseInt(num, 10, 8)
if err != nil {
return false, nil
}
num := int(n)
c.Day = &num
}
return true, nil
},
}
}
|
package pipa
import (
"io"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("RetryPolicy", func() {
It("should retry", func() {
var n int
err := (RetryPolicy{}).Perform(func() error {
n++
return io.EOF
})
Expect(err).To(Equal(io.EOF))
Expect(n).To(Equal(1))
err = (RetryPolicy{Times: 10, Sleep: time.Millisecond}).Perform(func() error {
n++
return io.EOF
})
Expect(err).To(Equal(io.EOF))
Expect(n).To(Equal(12))
err = (RetryPolicy{Times: 10, Sleep: time.Millisecond}).Perform(func() error {
if n++; n == 20 {
return nil
}
return io.EOF
})
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(20))
})
})
|
package Reverse_Linked_List
type ListNode struct {
Val int
Next *ListNode
}
//the first and better answer
func reverseList(head *ListNode) *ListNode {
return reverseListRecursive(head)
}
func reverseListRecursive(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
node := reverseListRecursive(head.Next)
head.Next.Next = head
head.Next = nil
return node
}
//non-recursive answer
func reverseListNonRecursive(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
var d = &ListNode{Next: head}
p, c := head, head.Next
for c != nil {
p.Next = c.Next
c.Next = d.Next
d.Next = c
c = p.Next
}
return d.Next
}
|
package Integer
type Integer int
func (a Integer) Less(b Integer) bool {
return a < b
}
// func (a *Integer) Add(b Integer) Integer {
// ret := (*a + b)
// return ret
// }
func (a *Integer) Add(b Integer) {
*a += b
}
|
// https://programmers.co.kr/learn/courses/30/lessons/42860
package main
func p42860(name string) int {
m := make([]int, len(name))
for i, v := range name {
m[i] = int(v - 65)
if m[i] > 13 {
m[i] = 26 - m[i]
}
}
var ans, max, length, idx int
for i, v := range m {
if v == 0 {
length++
if max < length {
max = length
idx = i
}
} else {
ans += v
length = 0
}
}
a := idx - max
if a < max {
ans -= max
ans += a
}
return ans + len(name) - 1
}
|
package main
import (
"flag"
"github.com/codeskyblue/go-sh"
)
func conversionSearchVersion(version string) string {
switch version {
case "2.2":
return "2.2_r1.1"
case "2.3":
return "2.3_r1.10"
case "2.3.7":
return "2.3.7_r1.0"
case "4.0.1":
return "4.0.1_r1.0"
case "4.0.3":
return "4.0.3_r1.0"
case "4.1.1":
return "4.1.1_r1.0"
case "4.1.2":
return "4.1.2_r1.0"
case "4.2.0":
return "4.2.0_r1.0"
case "4.3.0":
return "4.3.0_r3.1"
case "4.3.1":
return "4.3.1_r1.0"
case "4.4.0":
return "4.4.0_r1.0"
case "4.4.1":
return "4.4.1_r1.0"
case "4.4.2":
return "4.4.2_r1.0"
case "4.4.3":
return "4.4.3_r1.0"
case "4.4.4":
return "4.4.4_r1.0"
case "4.4w":
return "4.4w_r1.0"
case "5.0.0":
return "5.0.0_r2.0"
case "5.0.1":
return "5.0.1_r1.0"
case "5.1.0":
return "5.1.0_r1.0"
case "5.1.1":
return "5.1.1_r9.0"
case "6":
case "6.0":
return "6.0.0_r1.0"
case "7":
case "7.0":
return "7.0.0_r1.0"
case "7.1.0":
return "7.1.0_r1.0"
case "7.1.1":
return "7.1.1_r1.0"
}
return "7.0.0_r1.0"
}
func main() {
var fullSearch = flag.String("s", "", "Full Search")
var filePath = flag.String("f", "", "File Path")
var definition = flag.String("d", "", "Definition")
var symbol = flag.String("sy", "", "Symbol")
var version = flag.String("v", "7.0.0_r1.0", "Version")
flag.Parse()
sh.Command("open", "http://tools.oesf.biz/android-" + conversionSearchVersion(*version) + "/search?q=" + *fullSearch + "&defs=" + *definition + "&refs=" + *symbol + "&path=" + *filePath+"&hist=").Run()
}
|
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"github.com/emicklei/go-restful"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api"
)
func (h PipelineSonarHandler) GetPipelineSonarStatusHandler(request *restful.Request, resp *restful.Response) {
projectId := request.PathParameter("devops")
pipelineId := request.PathParameter("pipeline")
sonarStatus, err := h.pipelineSonarGetter.GetPipelineSonar(projectId, pipelineId)
if err != nil {
klog.Errorf("%+v", err)
api.HandleInternalError(resp, nil, err)
return
}
resp.WriteAsJson(sonarStatus)
}
func (h PipelineSonarHandler) GetMultiBranchesPipelineSonarStatusHandler(request *restful.Request, resp *restful.Response) {
projectId := request.PathParameter("devops")
pipelineId := request.PathParameter("pipeline")
branchId := request.PathParameter("branch")
sonarStatus, err := h.pipelineSonarGetter.GetMultiBranchPipelineSonar(projectId, pipelineId, branchId)
if err != nil {
klog.Errorf("%+v", err)
api.HandleInternalError(resp, nil, err)
return
}
resp.WriteAsJson(sonarStatus)
}
|
package main
import "fmt"
func main() {
myGreeting := map[string]string{
"Bob": "Good Morning",
"Vladamir": "Dobroe Ootro",
}
fmt.Println(myGreeting)
}
|
package test
import (
"io/ioutil"
"os"
"testing"
"github.com/CharellKing/z_gateway/idl"
"github.com/sirupsen/logrus"
)
func TestJson2Struct(t *testing.T) {
goPath := os.Getenv("GOPATH")
content, err := ioutil.ReadFile(goPath + "/src/github.com/CharellKing/z_gateway/idl/test/samples/module.json")
if err != nil {
logrus.Error(err)
}
module2Struct := json.NewModule2Struct(nil)
if err := module2Struct.Loads(content); err != nil {
logrus.Error(err)
}
module2Struct.ToStructs()
return
}
|
package pkg
import (
"log"
"path/filepath"
"testing"
)
func TestSalting(t *testing.T) {
hash := generateKey("This is a bad passphrase")
log.Printf("Hashed passphrase : length %d, %x\n", len(hash), hash)
hash = generateKey("This is a bad passphrase")
log.Printf("Hashed passphrase : length %d, %x\n", len(hash), hash)
}
func TestIV(t *testing.T) {
iv := generateInitVector()
log.Printf("Init Vector: length %d %x\n", len(iv), iv)
iv = generateInitVector()
log.Printf("Init Vector: length %d %x\n", len(iv), iv)
iv = generateInitVector()
log.Printf("Init Vector: length %d %x\n", len(iv), iv)
}
func TestEncryptFile(t *testing.T) {
Encrypt("Thisisabadpassph", "crypt_test.go", "/tmp/crypt_test_go.enc")
Encrypt("Thisisabadpassphrase", "crypt.go", "/tmp/crypt_go.enc")
}
func TestEncryptFileBig(t *testing.T) {
TestPackfilesBig(t)
Encrypt("Thisisabadpassph", filepath.Join(WorkDir, "bigpack.tgz"), filepath.Join(WorkDir, "bigpack.spm"))
}
func TestDecryptFile(t *testing.T) {
TestEncryptFile(t)
Decrypt("Thisisabadpassphrase", "/tmp/crypt_test_go.enc", "/tmp/crypt_test_go")
Decrypt("Thisisabadpassph", "/tmp/crypt_test_go.enc", "/tmp/crypt_test_go")
Decrypt("Thisisabadpassphrase", "/tmp/crypt_go.enc", "/tmp/crypt_go")
}
func TestDecryptFileBig(t *testing.T) {
TestEncryptFileBig(t)
Decrypt("Thisisabadpassph", filepath.Join(WorkDir, "bigpack.spm"), "/tmp/bigpack.tgz")
}
|
package server
import (
"bytes"
"context"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
)
func getTipCommit(repositoryID int) (string, error) {
repo, err := db.Repos.Get(context.Background(), api.RepoID(repositoryID))
if err != nil {
return "", err
}
cmd := gitserver.DefaultClient.Command("git", "rev-parse", "HEAD")
cmd.Repo = gitserver.Repo{Name: repo.Name}
out, err := cmd.CombinedOutput(context.Background())
if err != nil {
return "", err
}
return string(bytes.TrimSpace(out)), nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package memo
import (
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/sem/builtins"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// InferType derives the type of the given scalar expression and stores it in
// the expression's Type field. Depending upon the operator, the type may be
// fixed, or it may be dependent upon the expression children.
func InferType(mem *Memo, e opt.ScalarExpr) *types.T {
// Special-case Variable, since it's the only expression that needs the memo.
if e.Op() == opt.VariableOp {
return typeVariable(mem, e)
}
fn := typingFuncMap[e.Op()]
if fn == nil {
panic(errors.AssertionFailedf("type inference for %v is not yet implemented", log.Safe(e.Op())))
}
return fn(e)
}
// InferUnaryType infers the return type of a unary operator, given the type of
// its input.
func InferUnaryType(op opt.Operator, inputType *types.T) *types.T {
unaryOp := opt.UnaryOpReverseMap[op]
// Find the unary op that matches the type of the expression's child.
for _, op := range tree.UnaryOps[unaryOp] {
o := op.(*tree.UnaryOp)
if inputType.Equivalent(o.Typ) {
return o.ReturnType
}
}
panic(errors.AssertionFailedf("could not find type for unary expression %s", log.Safe(op)))
}
// InferBinaryType infers the return type of a binary expression, given the type
// of its inputs.
func InferBinaryType(op opt.Operator, leftType, rightType *types.T) *types.T {
o, ok := FindBinaryOverload(op, leftType, rightType)
if !ok {
panic(errors.AssertionFailedf("could not find type for binary expression %s", log.Safe(op)))
}
return o.ReturnType
}
// InferWhensType returns the type of a CASE expression, which is
// of the form:
// CASE [ <cond> ]
// WHEN <condval1> THEN <expr1>
// [ WHEN <condval2> THEN <expr2> ] ...
// [ ELSE <expr> ]
// END
// All possible values should have the same type, and that is the type of the
// case.
func InferWhensType(whens ScalarListExpr, orElse opt.ScalarExpr) *types.T {
result := orElse.DataType()
// Sanity check.
for _, when := range whens {
if !result.Equivalent(when.DataType()) {
panic(errors.AssertionFailedf("inconsistent Case return types %s %s", when.DataType(), result))
}
}
return result
}
// BinaryOverloadExists returns true if the given binary operator exists with the
// given arguments.
func BinaryOverloadExists(op opt.Operator, leftType, rightType *types.T) bool {
_, ok := FindBinaryOverload(op, leftType, rightType)
return ok
}
// BinaryAllowsNullArgs returns true if the given binary operator allows null
// arguments, and cannot therefore be folded away to null.
func BinaryAllowsNullArgs(op opt.Operator, leftType, rightType *types.T) bool {
o, ok := FindBinaryOverload(op, leftType, rightType)
if !ok {
panic(errors.AssertionFailedf("could not find overload for binary expression %s", log.Safe(op)))
}
return o.NullableArgs
}
// AggregateOverloadExists returns whether or not the given operator has a
// unary overload which takes the given type as input.
func AggregateOverloadExists(agg opt.Operator, typ *types.T) bool {
name := opt.AggregateOpReverseMap[agg]
_, overloads := builtins.GetBuiltinProperties(name)
for _, o := range overloads {
if o.Types.MatchAt(typ, 0) {
return true
}
}
return false
}
// FindFunction returns the function properties and overload of the function
// with the given name and argument types matching the children of the given
// input.
func FindFunction(
e opt.ScalarExpr, name string,
) (props *tree.FunctionProperties, overload *tree.Overload, ok bool) {
props, overloads := builtins.GetBuiltinProperties(name)
for o := range overloads {
overload = &overloads[o]
if overload.Types.Length() != e.ChildCount() {
continue
}
matches := true
for i, n := 0, e.ChildCount(); i < n; i++ {
typ := e.Child(i).(opt.ScalarExpr).DataType()
if !overload.Types.MatchAt(typ, i) {
matches = false
break
}
}
if matches {
return props, overload, true
}
}
return nil, nil, false
}
// FindWindowOverload finds a window function overload that matches the
// given window function expression. It panics if no match can be found.
func FindWindowOverload(e opt.ScalarExpr) (name string, overload *tree.Overload) {
name = opt.WindowOpReverseMap[e.Op()]
_, overload, ok := FindFunction(e, name)
if ok {
return name, overload
}
// NB: all aggregate functions can be used as window functions.
return FindAggregateOverload(e)
}
// FindAggregateOverload finds an aggregate function overload that matches the
// given aggregate function expression. It panics if no match can be found.
func FindAggregateOverload(e opt.ScalarExpr) (name string, overload *tree.Overload) {
name = opt.AggregateOpReverseMap[e.Op()]
_, overload, ok := FindFunction(e, name)
if ok {
return name, overload
}
panic(errors.AssertionFailedf("could not find overload for %s aggregate", name))
}
type typingFunc func(e opt.ScalarExpr) *types.T
// typingFuncMap is a lookup table from scalar operator type to a function
// which returns the data type of an instance of that operator.
var typingFuncMap map[opt.Operator]typingFunc
func init() {
typingFuncMap = make(map[opt.Operator]typingFunc)
typingFuncMap[opt.PlaceholderOp] = typeAsTypedExpr
typingFuncMap[opt.UnsupportedExprOp] = typeAsTypedExpr
typingFuncMap[opt.CoalesceOp] = typeCoalesce
typingFuncMap[opt.CaseOp] = typeCase
typingFuncMap[opt.WhenOp] = typeWhen
typingFuncMap[opt.CastOp] = typeCast
typingFuncMap[opt.SubqueryOp] = typeSubquery
typingFuncMap[opt.ColumnAccessOp] = typeColumnAccess
typingFuncMap[opt.IndirectionOp] = typeIndirection
typingFuncMap[opt.CollateOp] = typeCollate
typingFuncMap[opt.ArrayFlattenOp] = typeArrayFlatten
typingFuncMap[opt.IfErrOp] = typeIfErr
// Override default typeAsAggregate behavior for aggregate functions with
// a large number of possible overloads or where ReturnType depends on
// argument types.
typingFuncMap[opt.ArrayAggOp] = typeArrayAgg
typingFuncMap[opt.MaxOp] = typeAsFirstArg
typingFuncMap[opt.MinOp] = typeAsFirstArg
typingFuncMap[opt.ConstAggOp] = typeAsFirstArg
typingFuncMap[opt.ConstNotNullAggOp] = typeAsFirstArg
typingFuncMap[opt.AnyNotNullAggOp] = typeAsFirstArg
typingFuncMap[opt.FirstAggOp] = typeAsFirstArg
typingFuncMap[opt.LagOp] = typeAsFirstArg
typingFuncMap[opt.LeadOp] = typeAsFirstArg
typingFuncMap[opt.NthValueOp] = typeAsFirstArg
// Modifiers for aggregations pass through their argument.
typingFuncMap[opt.AggDistinctOp] = typeAsFirstArg
typingFuncMap[opt.AggFilterOp] = typeAsFirstArg
typingFuncMap[opt.WindowFromOffsetOp] = typeAsFirstArg
typingFuncMap[opt.WindowToOffsetOp] = typeAsFirstArg
for _, op := range opt.BinaryOperators {
typingFuncMap[op] = typeAsBinary
}
for _, op := range opt.UnaryOperators {
typingFuncMap[op] = typeAsUnary
}
for _, op := range opt.AggregateOperators {
// Fill in any that are not already added to the typingFuncMap above.
if typingFuncMap[op] == nil {
typingFuncMap[op] = typeAsAggregate
}
}
for _, op := range opt.WindowOperators {
if typingFuncMap[op] == nil {
typingFuncMap[op] = typeAsWindow
}
}
}
// typeVariable returns the type of a variable expression, which is stored in
// the query metadata and accessed by column id.
func typeVariable(mem *Memo, e opt.ScalarExpr) *types.T {
variable := e.(*VariableExpr)
typ := mem.Metadata().ColumnMeta(variable.Col).Type
if typ == nil {
panic(errors.AssertionFailedf("column %d does not have type", log.Safe(variable.Col)))
}
return typ
}
// typeArrayAgg returns an array type with element type equal to the type of the
// aggregate expression's first (and only) argument.
func typeArrayAgg(e opt.ScalarExpr) *types.T {
arrayAgg := e.(*ArrayAggExpr)
typ := arrayAgg.Input.DataType()
return types.MakeArray(typ)
}
// typeIndirection returns the type of the element of the array.
func typeIndirection(e opt.ScalarExpr) *types.T {
return e.Child(0).(opt.ScalarExpr).DataType().ArrayContents()
}
// typeCollate returns the collated string typed with the given locale.
func typeCollate(e opt.ScalarExpr) *types.T {
locale := e.(*CollateExpr).Locale
return types.MakeCollatedString(types.String, locale)
}
// typeArrayFlatten returns the type of the subquery as an array.
func typeArrayFlatten(e opt.ScalarExpr) *types.T {
input := e.Child(0).(RelExpr)
colID := e.(*ArrayFlattenExpr).RequestedCol
return types.MakeArray(input.Memo().Metadata().ColumnMeta(colID).Type)
}
// typeIfErr returns the type of the IfErrExpr. The type is boolean if
// there is no OrElse, and the type of Cond/OrElse otherwise.
func typeIfErr(e opt.ScalarExpr) *types.T {
if e.(*IfErrExpr).OrElse.ChildCount() == 0 {
return types.Bool
}
return e.(*IfErrExpr).Cond.DataType()
}
// typeAsFirstArg returns the type of the expression's 0th argument.
func typeAsFirstArg(e opt.ScalarExpr) *types.T {
return e.Child(0).(opt.ScalarExpr).DataType()
}
// typeAsTypedExpr returns the resolved type of the private field, with the
// assumption that it is a tree.TypedExpr.
func typeAsTypedExpr(e opt.ScalarExpr) *types.T {
return e.Private().(tree.TypedExpr).ResolvedType()
}
// typeAsUnary returns the type of a unary expression by hooking into the sql
// semantics code that searches for unary operator overloads.
func typeAsUnary(e opt.ScalarExpr) *types.T {
return InferUnaryType(e.Op(), e.Child(0).(opt.ScalarExpr).DataType())
}
// typeAsBinary returns the type of a binary expression by hooking into the sql
// semantics code that searches for binary operator overloads.
func typeAsBinary(e opt.ScalarExpr) *types.T {
leftType := e.Child(0).(opt.ScalarExpr).DataType()
rightType := e.Child(1).(opt.ScalarExpr).DataType()
return InferBinaryType(e.Op(), leftType, rightType)
}
// typeAsAggregate returns the type of an aggregate expression by hooking into
// the sql semantics code that searches for aggregate operator overloads.
func typeAsAggregate(e opt.ScalarExpr) *types.T {
// Only handle cases where the return type is not dependent on argument
// types (i.e. pass nil to the ReturnTyper). Aggregates with return types
// that depend on argument types are handled separately.
_, overload := FindAggregateOverload(e)
t := overload.ReturnType(nil)
if t == tree.UnknownReturnType {
panic(errors.AssertionFailedf("unknown aggregate return type. e:\n%s", e))
}
return t
}
// typeAsWindow returns the type of a window function expression similar to
// typeAsAggregate.
func typeAsWindow(e opt.ScalarExpr) *types.T {
_, overload := FindWindowOverload(e)
t := overload.ReturnType(nil)
if t == tree.UnknownReturnType {
panic(errors.AssertionFailedf("unknown window return type. e:\n%s", e))
}
return t
}
// typeCoalesce returns the type of a coalesce expression, which is equal to
// the type of its first non-null child.
func typeCoalesce(e opt.ScalarExpr) *types.T {
for _, arg := range e.(*CoalesceExpr).Args {
childType := arg.DataType()
if childType.Family() != types.UnknownFamily {
return childType
}
}
return types.Unknown
}
// typeCase returns the type of a CASE expression, which is
// of the form:
// CASE [ <cond> ]
// WHEN <condval1> THEN <expr1>
// [ WHEN <condval2> THEN <expr2> ] ...
// [ ELSE <expr> ]
// END
// The type is equal to the type of the WHEN <condval> THEN <expr> clauses, or
// the type of the ELSE <expr> value if all the previous types are unknown.
func typeCase(e opt.ScalarExpr) *types.T {
caseExpr := e.(*CaseExpr)
return InferWhensType(caseExpr.Whens, caseExpr.OrElse)
}
// typeWhen returns the type of a WHEN <condval> THEN <expr> clause inside a
// CASE statement.
func typeWhen(e opt.ScalarExpr) *types.T {
return e.(*WhenExpr).Value.DataType()
}
// typeCast returns the type of a CAST operator.
func typeCast(e opt.ScalarExpr) *types.T {
return e.(*CastExpr).Typ
}
// typeSubquery returns the type of a subquery, which is equal to the type of
// its first (and only) column.
func typeSubquery(e opt.ScalarExpr) *types.T {
input := e.Child(0).(RelExpr)
colID := input.Relational().OutputCols.SingleColumn()
return input.Memo().Metadata().ColumnMeta(colID).Type
}
func typeColumnAccess(e opt.ScalarExpr) *types.T {
colAccess := e.(*ColumnAccessExpr)
typ := colAccess.Input.DataType()
return typ.TupleContents()[colAccess.Idx]
}
// FindBinaryOverload finds the correct type signature overload for the
// specified binary operator, given the types of its inputs. If an overload is
// found, FindBinaryOverload returns true, plus a pointer to the overload.
// If an overload is not found, FindBinaryOverload returns false.
func FindBinaryOverload(op opt.Operator, leftType, rightType *types.T) (_ *tree.BinOp, ok bool) {
bin := opt.BinaryOpReverseMap[op]
// Find the binary op that matches the type of the expression's left and
// right children. No more than one match should ever be found. The
// TestTypingBinaryAssumptions test ensures this will be the case even if
// new operators or overloads are added.
for _, binOverloads := range tree.BinOps[bin] {
o := binOverloads.(*tree.BinOp)
if leftType.Family() == types.UnknownFamily {
if rightType.Equivalent(o.RightType) {
return o, true
}
} else if rightType.Family() == types.UnknownFamily {
if leftType.Equivalent(o.LeftType) {
return o, true
}
} else {
if leftType.Equivalent(o.LeftType) && rightType.Equivalent(o.RightType) {
return o, true
}
}
}
return nil, false
}
// FindUnaryOverload finds the correct type signature overload for the
// specified unary operator, given the type of its input. If an overload is
// found, FindUnaryOverload returns true, plus a pointer to the overload.
// If an overload is not found, FindUnaryOverload returns false.
func FindUnaryOverload(op opt.Operator, typ *types.T) (_ *tree.UnaryOp, ok bool) {
unary := opt.UnaryOpReverseMap[op]
for _, unaryOverloads := range tree.UnaryOps[unary] {
o := unaryOverloads.(*tree.UnaryOp)
if o.Typ.Equivalent(typ) {
return o, true
}
}
return nil, false
}
// FindComparisonOverload finds the correct type signature overload for the
// specified comparison operator, given the types of its inputs. If an overload
// is found, FindComparisonOverload returns a pointer to the overload and
// ok=true. It also returns "flipped" and "not" flags. The "flipped" flag
// indicates whether the original left and right operands should be flipped
// with the returned overload. The "not" flag indicates whether the result of
// the comparison operation should be negated. If an overload is not found,
// FindComparisonOverload returns ok=false.
func FindComparisonOverload(
op opt.Operator, leftType, rightType *types.T,
) (_ *tree.CmpOp, flipped, not, ok bool) {
op, flipped, not = NormalizeComparison(op)
comp := opt.ComparisonOpReverseMap[op]
if flipped {
leftType, rightType = rightType, leftType
}
// Find the comparison op that matches the type of the expression's left and
// right children. No more than one match should ever be found. The
// TestTypingComparisonAssumptions test ensures this will be the case even if
// new operators or overloads are added.
for _, cmpOverloads := range tree.CmpOps[comp] {
o := cmpOverloads.(*tree.CmpOp)
if leftType.Family() == types.UnknownFamily {
if rightType.Equivalent(o.RightType) {
return o, flipped, not, true
}
} else if rightType.Family() == types.UnknownFamily {
if leftType.Equivalent(o.LeftType) {
return o, flipped, not, true
}
} else {
if leftType.Equivalent(o.LeftType) && rightType.Equivalent(o.RightType) {
return o, flipped, not, true
}
}
}
return nil, false, false, false
}
// NormalizeComparison maps a given comparison operator into an equivalent
// operator that exists in the tree.CmpOps map, returning this new operator,
// along with "flipped" and "not" flags. The "flipped" flag indicates whether
// the left and right operands should be flipped with the new operator. The
// "not" flag indicates whether the result of the comparison operation should
// be negated.
func NormalizeComparison(op opt.Operator) (newOp opt.Operator, flipped, not bool) {
switch op {
case opt.NeOp:
// Ne(left, right) is implemented as !Eq(left, right).
return opt.EqOp, false, true
case opt.GtOp:
// Gt(left, right) is implemented as Lt(right, left)
return opt.LtOp, true, false
case opt.GeOp:
// Ge(left, right) is implemented as Le(right, left)
return opt.LeOp, true, false
case opt.NotInOp:
// NotIn(left, right) is implemented as !In(left, right)
return opt.InOp, false, true
case opt.NotLikeOp:
// NotLike(left, right) is implemented as !Like(left, right)
return opt.LikeOp, false, true
case opt.NotILikeOp:
// NotILike(left, right) is implemented as !ILike(left, right)
return opt.ILikeOp, false, true
case opt.NotSimilarToOp:
// NotSimilarTo(left, right) is implemented as !SimilarTo(left, right)
return opt.SimilarToOp, false, true
case opt.NotRegMatchOp:
// NotRegMatch(left, right) is implemented as !RegMatch(left, right)
return opt.RegMatchOp, false, true
case opt.NotRegIMatchOp:
// NotRegIMatch(left, right) is implemented as !RegIMatch(left, right)
return opt.RegIMatchOp, false, true
case opt.IsNotOp:
// IsNot(left, right) is implemented as !Is(left, right)
return opt.IsOp, false, true
}
return op, false, false
}
|
//go:build !android && !e2e_testing
// +build !android,!e2e_testing
package udp
import (
"encoding/binary"
"fmt"
"net"
"syscall"
"unsafe"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
"golang.org/x/sys/unix"
)
//TODO: make it support reload as best you can!
type StdConn struct {
sysFd int
l *logrus.Logger
batch int
}
var x int
// From linux/sock_diag.h
const (
_SK_MEMINFO_RMEM_ALLOC = iota
_SK_MEMINFO_RCVBUF
_SK_MEMINFO_WMEM_ALLOC
_SK_MEMINFO_SNDBUF
_SK_MEMINFO_FWD_ALLOC
_SK_MEMINFO_WMEM_QUEUED
_SK_MEMINFO_OPTMEM
_SK_MEMINFO_BACKLOG
_SK_MEMINFO_DROPS
_SK_MEMINFO_VARS
)
type _SK_MEMINFO [_SK_MEMINFO_VARS]uint32
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
syscall.ForkLock.RLock()
fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP)
if err == nil {
unix.CloseOnExec(fd)
}
syscall.ForkLock.RUnlock()
if err != nil {
unix.Close(fd)
return nil, fmt.Errorf("unable to open socket: %s", err)
}
var lip [16]byte
copy(lip[:], ip.To16())
if multi {
if err = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil {
return nil, fmt.Errorf("unable to set SO_REUSEPORT: %s", err)
}
}
//TODO: support multiple listening IPs (for limiting ipv6)
if err = unix.Bind(fd, &unix.SockaddrInet6{Addr: lip, Port: port}); err != nil {
return nil, fmt.Errorf("unable to bind to socket: %s", err)
}
//TODO: this may be useful for forcing threads into specific cores
//unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_INCOMING_CPU, x)
//v, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_INCOMING_CPU)
//l.Println(v, err)
return &StdConn{sysFd: fd, l: l, batch: batch}, err
}
func (u *StdConn) Rebind() error {
return nil
}
func (u *StdConn) SetRecvBuffer(n int) error {
return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, n)
}
func (u *StdConn) SetSendBuffer(n int) error {
return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, n)
}
func (u *StdConn) GetRecvBuffer() (int, error) {
return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_RCVBUF)
}
func (u *StdConn) GetSendBuffer() (int, error) {
return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_SNDBUF)
}
func (u *StdConn) LocalAddr() (*Addr, error) {
sa, err := unix.Getsockname(u.sysFd)
if err != nil {
return nil, err
}
addr := &Addr{}
switch sa := sa.(type) {
case *unix.SockaddrInet4:
addr.IP = net.IP{sa.Addr[0], sa.Addr[1], sa.Addr[2], sa.Addr[3]}.To16()
addr.Port = uint16(sa.Port)
case *unix.SockaddrInet6:
addr.IP = sa.Addr[0:]
addr.Port = uint16(sa.Port)
}
return addr, nil
}
func (u *StdConn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
plaintext := make([]byte, MTU)
h := &header.H{}
fwPacket := &firewall.Packet{}
udpAddr := &Addr{}
nb := make([]byte, 12, 12)
//TODO: should we track this?
//metric := metrics.GetOrRegisterHistogram("test.batch_read", nil, metrics.NewExpDecaySample(1028, 0.015))
msgs, buffers, names := u.PrepareRawMessages(u.batch)
read := u.ReadMulti
if u.batch == 1 {
read = u.ReadSingle
}
for {
n, err := read(msgs)
if err != nil {
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
//metric.Update(int64(n))
for i := 0; i < n; i++ {
udpAddr.IP = names[i][8:24]
udpAddr.Port = binary.BigEndian.Uint16(names[i][2:4])
r(udpAddr, plaintext[:0], buffers[i][:msgs[i].Len], h, fwPacket, lhf, nb, q, cache.Get(u.l))
}
}
}
func (u *StdConn) ReadSingle(msgs []rawMessage) (int, error) {
for {
n, _, err := unix.Syscall6(
unix.SYS_RECVMSG,
uintptr(u.sysFd),
uintptr(unsafe.Pointer(&(msgs[0].Hdr))),
0,
0,
0,
0,
)
if err != 0 {
return 0, &net.OpError{Op: "recvmsg", Err: err}
}
msgs[0].Len = uint32(n)
return 1, nil
}
}
func (u *StdConn) ReadMulti(msgs []rawMessage) (int, error) {
for {
n, _, err := unix.Syscall6(
unix.SYS_RECVMMSG,
uintptr(u.sysFd),
uintptr(unsafe.Pointer(&msgs[0])),
uintptr(len(msgs)),
unix.MSG_WAITFORONE,
0,
0,
)
if err != 0 {
return 0, &net.OpError{Op: "recvmmsg", Err: err}
}
return int(n), nil
}
}
func (u *StdConn) WriteTo(b []byte, addr *Addr) error {
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
p := (*[2]byte)(unsafe.Pointer(&rsa.Port))
p[0] = byte(addr.Port >> 8)
p[1] = byte(addr.Port)
copy(rsa.Addr[:], addr.IP)
for {
_, _, err := unix.Syscall6(
unix.SYS_SENDTO,
uintptr(u.sysFd),
uintptr(unsafe.Pointer(&b[0])),
uintptr(len(b)),
uintptr(0),
uintptr(unsafe.Pointer(&rsa)),
uintptr(unix.SizeofSockaddrInet6),
)
if err != 0 {
return &net.OpError{Op: "sendto", Err: err}
}
//TODO: handle incomplete writes
return nil
}
}
func (u *StdConn) ReloadConfig(c *config.C) {
b := c.GetInt("listen.read_buffer", 0)
if b > 0 {
err := u.SetRecvBuffer(b)
if err == nil {
s, err := u.GetRecvBuffer()
if err == nil {
u.l.WithField("size", s).Info("listen.read_buffer was set")
} else {
u.l.WithError(err).Warn("Failed to get listen.read_buffer")
}
} else {
u.l.WithError(err).Error("Failed to set listen.read_buffer")
}
}
b = c.GetInt("listen.write_buffer", 0)
if b > 0 {
err := u.SetSendBuffer(b)
if err == nil {
s, err := u.GetSendBuffer()
if err == nil {
u.l.WithField("size", s).Info("listen.write_buffer was set")
} else {
u.l.WithError(err).Warn("Failed to get listen.write_buffer")
}
} else {
u.l.WithError(err).Error("Failed to set listen.write_buffer")
}
}
}
func (u *StdConn) getMemInfo(meminfo *_SK_MEMINFO) error {
var vallen uint32 = 4 * _SK_MEMINFO_VARS
_, _, err := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(u.sysFd), uintptr(unix.SOL_SOCKET), uintptr(unix.SO_MEMINFO), uintptr(unsafe.Pointer(meminfo)), uintptr(unsafe.Pointer(&vallen)), 0)
if err != 0 {
return err
}
return nil
}
func (u *StdConn) Close() error {
//TODO: this will not interrupt the read loop
return syscall.Close(u.sysFd)
}
func NewUDPStatsEmitter(udpConns []Conn) func() {
// Check if our kernel supports SO_MEMINFO before registering the gauges
var udpGauges [][_SK_MEMINFO_VARS]metrics.Gauge
var meminfo _SK_MEMINFO
if err := udpConns[0].(*StdConn).getMemInfo(&meminfo); err == nil {
udpGauges = make([][_SK_MEMINFO_VARS]metrics.Gauge, len(udpConns))
for i := range udpConns {
udpGauges[i] = [_SK_MEMINFO_VARS]metrics.Gauge{
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rmem_alloc", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rcvbuf", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_alloc", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.sndbuf", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.fwd_alloc", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_queued", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.optmem", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.backlog", i), nil),
metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.drops", i), nil),
}
}
}
return func() {
for i, gauges := range udpGauges {
if err := udpConns[i].(*StdConn).getMemInfo(&meminfo); err == nil {
for j := 0; j < _SK_MEMINFO_VARS; j++ {
gauges[j].Update(int64(meminfo[j]))
}
}
}
}
}
|
package vmapis
import (
"github.com/gin-gonic/gin"
"goblog/vmcommon"
)
func Getvmlist(c *gin.Context) {
vmlist := vmcommon.GetVmList()
res := make(map[string]interface{})
res["res"] = vmlist
c.JSON(200, res)
}
func Createvm(c *gin.Context) {
create, err := vmcommon.Create("3ee18210-3761-4fdc-9141-f13879878725")
res := make(map[string]interface{})
res["res"] = create
res["err"] = err.Error()
c.JSON(200, res)
}
|
// This file was generated by counterfeiter
package fake_container_id_provider
import (
"sync"
"github.com/cloudfoundry-incubator/garden-shed/layercake"
"github.com/cloudfoundry-incubator/garden-shed/repository_fetcher"
)
type FakeContainerIDProvider struct {
ProvideIDStub func(path string) layercake.ID
provideIDMutex sync.RWMutex
provideIDArgsForCall []struct {
path string
}
provideIDReturns struct {
result1 layercake.ID
}
}
func (fake *FakeContainerIDProvider) ProvideID(path string) layercake.ID {
fake.provideIDMutex.Lock()
fake.provideIDArgsForCall = append(fake.provideIDArgsForCall, struct {
path string
}{path})
fake.provideIDMutex.Unlock()
if fake.ProvideIDStub != nil {
return fake.ProvideIDStub(path)
} else {
return fake.provideIDReturns.result1
}
}
func (fake *FakeContainerIDProvider) ProvideIDCallCount() int {
fake.provideIDMutex.RLock()
defer fake.provideIDMutex.RUnlock()
return len(fake.provideIDArgsForCall)
}
func (fake *FakeContainerIDProvider) ProvideIDArgsForCall(i int) string {
fake.provideIDMutex.RLock()
defer fake.provideIDMutex.RUnlock()
return fake.provideIDArgsForCall[i].path
}
func (fake *FakeContainerIDProvider) ProvideIDReturns(result1 layercake.ID) {
fake.ProvideIDStub = nil
fake.provideIDReturns = struct {
result1 layercake.ID
}{result1}
}
var _ repository_fetcher.ContainerIDProvider = new(FakeContainerIDProvider)
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package meta
import (
"context"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: LocalFeatures,
Desc: "Example to access DUT features from a local test",
Contacts: []string{"tast-owners@google.com", "seewaifu@google.com"},
BugComponent: "b:1034625",
})
}
func LocalFeatures(ctx context.Context, s *testing.State) {
dutFeatures := s.Features("")
s.Logf("DUT Features: %+v", dutFeatures)
}
|
package main
import "fmt"
var n int = 0
func Multiply(a, b int, reply *int) {
*reply = a * b
}
func main() {
reply := &n
Multiply(10, 5, reply)
fmt.Println("Multiply: ", *reply)
fmt.Printf("Value of n is %d", n)
}
|
package utils
import (
"net/http"
"github.com/stretchr/testify/mock"
)
// HTTPClient : used to define how a http client implementation should behave
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// HTTPClientDo : type to implement a Do function to the http client
type HTTPClientDo func(*http.Request) (*http.Response, error)
type httpClientMock struct {
mock.Mock
funcDo HTTPClientDo
}
// NewHTTPClientMock : create a generic http client for mocks
func NewHTTPClientMock(do HTTPClientDo) HTTPClient {
return &httpClientMock{
funcDo: do,
}
}
// Do : used to make requests
func (c *httpClientMock) Do(r *http.Request) (*http.Response, error) {
return c.funcDo(r)
}
|
package organize
import (
"errors"
"github.com/json-iterator/go"
"math/rand"
)
type Dynamic int
const (
CREATOR Dynamic = iota + 1
CREATOR_LEADER
CREATOR_SUPERIOR_LEADER
CREATOR_DEP
CREATOR_ROLE
CALLER
CALLER_LEADER
CALLER_SUPERIOR_LEADER
CALLER_DEP
CALLER_ROLE
)
func (static Dynamic) String() string {
mp := map[Dynamic]string{
CREATOR: "发起人", // 1
CREATOR_LEADER: "发起人上级", // 2
CREATOR_SUPERIOR_LEADER: "发起人上级的上级", // 3
CREATOR_DEP: "发起人同部门", // 4
CREATOR_ROLE: "发起人同角色", // 5
CALLER: "当前负责人", // 6
CALLER_LEADER: "当前负责人上级", // 7
CALLER_SUPERIOR_LEADER: "当前负责人上级的上级", // 8
CALLER_DEP: "负责人同部门", // 9
CALLER_ROLE: "负责人同角色", // 10
}
return mp[static]
}
func (static Dynamic) Int() int {
return int(static)
}
type ProcessOrganize struct {
Deps []int `json:"deps"`
Uids []int `json:"uids"`
Roles []int `json:"roles"`
Dynamic []Dynamic `json:"dynamic"`
Field string `json:"field"`
finalUsers []int `json:"-"`
hasFetch bool `json:"-"`
caller int `json:"-"`
creator int `json:"-"`
//callerInfo uims.User `json:"caller_info"`
//creatorInfo uims.User `json:"creator_info"`
_getLeaderIds []int `json:"-"`
}
func (static ProcessOrganize) Init() *ProcessOrganize {
return &ProcessOrganize{
Deps: []int{},
Uids: []int{},
Roles: []int{},
Dynamic: []Dynamic{},
_getLeaderIds: []int{},
finalUsers: []int{},
}
}
func (this *ProcessOrganize) SetCaller(uid int) {
this.caller = uid
}
func (this *ProcessOrganize) SetCreator(uid int) {
this.creator = uid
}
func (this *ProcessOrganize) HasPermission() (bool, error) {
if this.hasFetch == false {
if err := this.fetch(); err != nil {
return false, err
}
}
for _, u := range this.finalUsers {
if this.caller == u {
return true, nil
}
}
return false, nil
}
func (this *ProcessOrganize) fetch() error {
if this.hasFetch == true {
return nil
}
if this.creator == 0 {
return errors.New("Organize 缺少 creator")
}
if this.caller == 0 {
return errors.New("Organize 缺少 caller")
}
this.hasFetch = true
// @todo 缓存钉钉SDK
//rsp, err := uims.GetUsers([]int{this.caller, this.creator}, false)
//if err != nil {
// return err
//}
//for _, u := range rsp.Attachment.Users {
// if u.Id == this.creator {
// this.creatorInfo = u
// } else if u.Id == this.caller {
// this.callerInfo = u
// }
//}
//if len(this.Dynamic) > 0 {
// for _, d := range this.Dynamic {
// switch d {
// case CREATOR:
// this.Uids = append(this.Uids, this.creator)
// case CREATOR_LEADER:
// for _, id := range this.creatorInfo.Leaders {
// this.Uids = append(this.Uids, id)
// }
// case CREATOR_SUPERIOR_LEADER:
// this._getLeaderIds = append(this._getLeaderIds, this.creatorInfo.Leaders...)
// case CREATOR_DEP:
// for _, d := range this.creatorInfo.Depinfos {
// this.Deps = append(this.Deps, d.Id)
// }
// case CREATOR_ROLE:
// for _, r := range this.creatorInfo.Roleinfos {
// this.Roles = append(this.Roles, r.Id)
// }
// case CALLER:
// this.Uids = append(this.Uids, this.caller)
// case CALLER_LEADER:
// for _, id := range this.callerInfo.Leaders {
// this.Uids = append(this.Uids, id)
// }
// case CALLER_SUPERIOR_LEADER:
// this._getLeaderIds = append(this._getLeaderIds, this.callerInfo.Leaders...)
// case CALLER_DEP:
// for _, d := range this.callerInfo.Depinfos {
// this.Deps = append(this.Deps, d.Id)
// }
// case CALLER_ROLE:
// for _, r := range this.callerInfo.Roleinfos {
// this.Roles = append(this.Roles, r.Id)
// }
// }
// }
//}
//if len(this.Deps) > 0 {
// depRsp, err := uims.GetUsersByDeps(this.Deps, false)
// if err != nil {
// return err
// }
// for _, u := range depRsp.Attachment.Users {
// this.Uids = append(this.Uids, u.Id)
// }
//}
//if len(this.Roles) > 0 {
// roleRsp, err := uims.GetUsersByRoles(this.Roles)
// if err != nil {
// return err
// }
// for _, u := range roleRsp.Attachment.Users {
// this.Uids = append(this.Uids, u.Id)
// }
//}
//if len(this._getLeaderIds) > 0 {
// usersRsp, err := uims.GetUsers(this._getLeaderIds, false)
// if err != nil {
// return err
// }
// for _, u := range usersRsp.Attachment.Users {
// this.Uids = append(this.Uids, u.Leaders...)
// }
//}
mp := make(map[int]bool)
for _, id := range this.Uids {
if id == 0 {
continue
}
if _, found := mp[id]; found {
continue
}
mp[id] = true
this.finalUsers = append(this.finalUsers, id)
}
return nil
}
func (this *ProcessOrganize) AllUids() ([]int, error) {
if err := this.fetch(); err != nil {
return nil, err
}
return this.finalUsers, nil
}
func (this *ProcessOrganize) GetRandUid() (int, error) {
if err := this.fetch(); err != nil {
return 0, err
}
if len(this.finalUsers) == 0 {
return 0, nil
}
if len(this.finalUsers) == 1 {
return this.finalUsers[0], nil
}
sixah := this.finalUsers
rand.Shuffle(len(sixah), func(i, j int) { //调用算法
sixah[i], sixah[j] = sixah[j], sixah[i]
})
return sixah[0], nil
}
func (this *ProcessOrganize) ToDB() ([]byte, error) {
return jsoniter.Marshal(this)
}
func (this *ProcessOrganize) FromDB(data []byte) error {
this.Deps = make([]int, 0)
this.Uids = make([]int, 0)
this.Roles = make([]int, 0)
this.Dynamic = make([]Dynamic, 0)
this._getLeaderIds = make([]int, 0)
this.finalUsers = make([]int, 0)
err := jsoniter.Unmarshal(data, this)
return err
}
func (this *ProcessOrganize) Reset() {
this.Deps = make([]int, 0)
this.Uids = make([]int, 0)
this.Roles = make([]int, 0)
this.Dynamic = make([]Dynamic, 0)
this._getLeaderIds = make([]int, 0)
this.finalUsers = make([]int, 0)
}
|
package plugin
import (
"github.com/container-storage-interface/spec/lib/go/csi"
)
// Service Define CSI Interface
type Service interface {
csi.IdentityServer
csi.ControllerServer
csi.NodeServer
}
|
package font
// type (
// // PixelFontFace -
// PixelFontFace struct {
// tex *gfx.Texture
// PixelFontSettings
// }
// )
// // Glyph -
// func (o *PixelFontFace) Glyph(dot fixed.Point26_6, r rune) (bounds geom.Rect2i, mask *gfx.Texture, bearing geom.Point2i, advance int, ok bool) {
// ok = true
// if r < o.MinRune || r > o.MaxRune {
// r = o.UnavailableRune
// ok = false
// }
// offsRune := int(r - o.MinRune)
// gw := o.GlyphW
// gh := o.GlyphH
// offsX := offsRune % o.TilesX
// offsY := offsRune / o.TilesX
// offsX *= gw
// offsY *= gh
// bounds = geom.InitRect2i(offsX, offsY, gw, gh)
// mask = o.tex
// bearing = geom.InitPoint2i(o.BearingX, o.BearingY)
// advance = o.AdvanceX
// // dst := geom.InitRect2i(x, y, gw*o.scale, gh*o.scale)
// // sdf.renderer.CopyRegion(o.tex, src, dst)
// return
// }
|
package leetcode
import "testing"
func TestLeastInterval(t *testing.T) {
type args struct {
tasks []byte
n int
}
tests := []struct {
name string
args args
want int
}{
{
"With some input",
args {
tasks: []byte{'A','A','A','A','A','A', 'B', 'C', 'D','E','F','G'},
n: 2,
},
16,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := LeastInterval(tt.args.tasks, tt.args.n); got != tt.want {
t.Errorf("LeastInterval() = %v, want %v", got, tt.want)
}
})
}
}
|
// Package sort provides the needed in order to build sorting requests to the database.
package sorting
|
package nagiosplugin
// Nagios plugin exit status.
type Status uint
// The usual mapping from 0-3.
const (
OK Status = iota
WARNING
CRITICAL
UNKNOWN
)
// Returns string representation of a Status. Panics if given an invalid
// status (this will be recovered in check.Finish if it has been deferred).
func (s Status) String() string {
switch s {
case OK:
return "OK"
case WARNING:
return "WARNING"
case CRITICAL:
return "CRITICAL"
case UNKNOWN:
return "UNKNOWN"
}
panic("Invalid nagiosplugin.Status.")
}
// Result is a combination of a Status and infotext. A check can have
// multiple of these, and only the most important (greatest badness)
// will be reported on the first line of output or represented in the
// plugin's exit status.
type Result struct {
status Status
message string
}
|
package main
import "fmt"
func main() {
// *******************************************
// range works on a variety of data structures
// *******************************************
// on arrays / slices
array := [5]int{1, 2, 3, 4, 5}
for key, val := range array {
fmt.Println("key: ", key, " value: ", val)
}
// key can be ommited using _
nums := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
fmt.Println("Multipliers of 2 are: ")
for _, n := range nums {
if n%2 == 0 {
fmt.Println(n)
}
}
// range requires both key,val pairs in maps
chart := map[string]string{
"a": "apple",
"b": "banana",
}
for k, v := range chart {
fmt.Printf("%s -> %s\n", k, v)
}
// range with keys only
for key := range chart {
fmt.Println("key: ", key)
}
// range on string
// itirates over Unicode code points
for k, v := range "go" {
fmt.Println(k, v)
}
}
|
package server
import (
"context"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/rancher/dynamiclistener"
"github.com/rancher/dynamiclistener/server"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"github.com/harvester/harvester/pkg/webhook"
"github.com/harvester/harvester/pkg/webhook/clients"
"github.com/harvester/harvester/pkg/webhook/config"
"github.com/harvester/harvester/pkg/webhook/types"
)
var (
certName = "harvester-webhook-tls"
caName = "harvester-webhook-ca"
port = int32(443)
validationPath = "/v1/webhook/validation"
mutationPath = "/v1/webhook/mutation"
failPolicyFail = v1.Fail
sideEffectClassNone = v1.SideEffectClassNone
)
type AdmissionWebhookServer struct {
context context.Context
restConfig *rest.Config
options *config.Options
}
func New(ctx context.Context, restConfig *rest.Config, options *config.Options) *AdmissionWebhookServer {
return &AdmissionWebhookServer{
context: ctx,
restConfig: restConfig,
options: options,
}
}
func (s *AdmissionWebhookServer) ListenAndServe() error {
clients, err := clients.New(s.context, s.restConfig, s.options.Threadiness)
if err != nil {
return err
}
validationHandler, validationResources, err := Validation(clients, s.options)
if err != nil {
return err
}
mutationHandler, mutationResources, err := Mutation(clients, s.options)
if err != nil {
return err
}
router := mux.NewRouter()
router.Handle(validationPath, validationHandler)
router.Handle(mutationPath, mutationHandler)
if err := s.listenAndServe(clients, router, validationResources, mutationResources); err != nil {
return err
}
if err := clients.Start(s.context); err != nil {
return err
}
return nil
}
func (s *AdmissionWebhookServer) listenAndServe(clients *clients.Clients, handler http.Handler, validationResources []types.Resource, mutationResources []types.Resource) error {
apply := clients.Apply.WithDynamicLookup()
clients.Core.Secret().OnChange(s.context, "secrets", func(key string, secret *corev1.Secret) (*corev1.Secret, error) {
if secret == nil || secret.Name != caName || secret.Namespace != s.options.Namespace || len(secret.Data[corev1.TLSCertKey]) == 0 {
return nil, nil
}
logrus.Info("Sleeping for 15 seconds then applying webhook config")
// Sleep here to make sure server is listening and all caches are primed
time.Sleep(15 * time.Second)
logrus.Debugf("Building validation rules...")
validationRules := s.buildRules(validationResources)
logrus.Debugf("Building mutation rules...")
mutationRules := s.buildRules(mutationResources)
validatingWebhookConfiguration := &v1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: webhook.ValidatingWebhookName,
},
Webhooks: []v1.ValidatingWebhook{
{
Name: "validator.harvesterhci.io",
ClientConfig: v1.WebhookClientConfig{
Service: &v1.ServiceReference{
Namespace: s.options.Namespace,
Name: "harvester-webhook",
Path: &validationPath,
Port: &port,
},
CABundle: secret.Data[corev1.TLSCertKey],
},
Rules: validationRules,
FailurePolicy: &failPolicyFail,
SideEffects: &sideEffectClassNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"},
},
},
}
mutatingWebhookConfiguration := &v1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: webhook.MutatingWebhookName,
},
Webhooks: []v1.MutatingWebhook{
{
Name: "mutator.harvesterhci.io",
ClientConfig: v1.WebhookClientConfig{
Service: &v1.ServiceReference{
Namespace: s.options.Namespace,
Name: "harvester-webhook",
Path: &mutationPath,
Port: &port,
},
CABundle: secret.Data[corev1.TLSCertKey],
},
Rules: mutationRules,
FailurePolicy: &failPolicyFail,
SideEffects: &sideEffectClassNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"},
},
},
}
return secret, apply.WithOwner(secret).ApplyObjects(validatingWebhookConfiguration, mutatingWebhookConfiguration)
})
tlsName := fmt.Sprintf("harvester-webhook.%s.svc", s.options.Namespace)
return server.ListenAndServe(s.context, s.options.HTTPSListenPort, 0, handler, &server.ListenOpts{
Secrets: clients.Core.Secret(),
CertNamespace: s.options.Namespace,
CertName: certName,
CAName: caName,
TLSListenerConfig: dynamiclistener.Config{
SANs: []string{
tlsName,
},
FilterCN: dynamiclistener.OnlyAllow(tlsName),
},
})
}
func (s *AdmissionWebhookServer) buildRules(resources []types.Resource) []v1.RuleWithOperations {
rules := []v1.RuleWithOperations{}
for _, rsc := range resources {
logrus.Debugf("Add rule for %+v", rsc)
scope := rsc.Scope
rules = append(rules, v1.RuleWithOperations{
Operations: rsc.OperationTypes,
Rule: v1.Rule{
APIGroups: []string{rsc.APIGroup},
APIVersions: []string{rsc.APIVersion},
Resources: []string{rsc.Name},
Scope: &scope,
},
})
}
return rules
}
|
package bouncing
import (
"math"
)
func BouncingBall(h, bounce, window float64) int {
if h < 0 || bounce <= 0 || bounce >= 1 || window >= h || window <= 0 {
return -1
} else {
return int(math.Floor(math.Log(window/h)/math.Log(bounce))*2 + 1)
}
}
func BouncingBall_2(h, bounce, window float64) int {
if h < 0 || bounce <= 0 || bounce >= 1 || window >= h || window <= 0 {
return -1
} else {
count := 0
hh := h
for hh >= window {
count += 1
hh = hh * bounce
if hh >= window {
count += 1
}
}
return count
}
}
|
package server
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"go-elastic-hotels/services"
"net/http"
)
const (
ConnectionError = "Could not establish connection with Elastic Server"
searchName = "name"
ESSearchError = "Elastic server could not respond"
jsonMarshalError = "Could not marshal json"
writeError = "could not write response"
)
func GetHomeHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s", "Welcome to Hotel Api")
}
}
func SearchHotelHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
esClient, err := services.ConnectToESServer()
if err != nil {
http.Error(w, ConnectionError, http.StatusInternalServerError)
return
}
params := mux.Vars(r)["name"]
hotels, err := services.SearchForResult(ctx, esClient, searchName, params)
if err != nil {
http.Error(w, ESSearchError, http.StatusInternalServerError)
return
}
hotelsByte, err := json.MarshalIndent(&hotels, "", " ")
if err != nil {
http.Error(w, jsonMarshalError, http.StatusInternalServerError)
return
}
_, err = w.Write(hotelsByte)
if err != nil {
http.Error(w, writeError, http.StatusInternalServerError)
return
}
}
}
|
package openrtb_ext
type ImpExtGlobalsun struct {
PlacementID string `json:"placementId"`
}
|
package task
import (
"fmt"
"reflect"
"github.com/pivotal-cf/on-demand-services-sdk/bosh"
"gopkg.in/yaml.v2"
)
func ManifestsAreTheSame(generateManifest, oldManifest []byte) (bool, error) {
regeneratedManifest, err := marshalBoshManifest(generateManifest)
if err != nil {
return false, err
}
ignoreUpdateBlock(®eneratedManifest)
boshManifest, err := marshalBoshManifest(oldManifest)
if err != nil {
return false, err
}
ignoreUpdateBlock(&boshManifest)
manifestsSame := reflect.DeepEqual(regeneratedManifest, boshManifest)
return manifestsSame, nil
}
func marshalBoshManifest(rawManifest []byte) (bosh.BoshManifest, error) {
var boshManifest bosh.BoshManifest
err := yaml.Unmarshal(rawManifest, &boshManifest)
if err != nil {
return bosh.BoshManifest{}, fmt.Errorf("error detecting change in manifest, unable to unmarshal manifest: %s", err)
}
return boshManifest, nil
}
func ignoreUpdateBlock(manifest *bosh.BoshManifest) {
manifest.Update = nil
for i := range manifest.InstanceGroups {
manifest.InstanceGroups[i].Update = nil
}
}
|
package main
import (
"encoding/hex"
//"fmt"
"github.com/ontio/ontology/common"
)
type Event interface {
GetEventName() string
}
type CreateOrderEvent []interface{}
func (this CreateOrderEvent) GetEventName() string {
bytes, _ := hex.DecodeString(this[0].(string))
return string(bytes)
}
func (this CreateOrderEvent) GetOrder() *Order {
o := &Order{}
items := this[1].([]interface{})
o.Id.SetString(reverseString2(items[0].(string)), 16)
if bytes, err := hex.DecodeString(items[1].(string)); err == nil {
o.Type = string(bytes)
}
if bytes, err := hex.DecodeString(items[2].(string)); err == nil {
o.Business = string(bytes)
}
if addr, err := common.AddressFromHexString(items[3].(string)); err == nil {
o.Owner = addr.ToBase58()
}
o.Price.SetString(reverseString2(items[4].(string)), 16)
o.Amount.SetString(reverseString2(items[5].(string)), 16)
o.State.SetString(reverseString2(items[6].(string)), 16)
o.PreId.SetString(reverseString2(items[7].(string)), 16)
o.NextId.SetString(reverseString2(items[8].(string)), 16)
o.UnAmount.SetString(reverseString2(items[9].(string)), 16)
return o
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
)
// ExportGroupsInAndUsersInCategory Returns a csv file of users in format ready to import.
// https://canvas.instructure.com/doc/api/group_categories.html
//
// Path Parameters:
// # Path.GroupCategoryID (Required) ID
//
type ExportGroupsInAndUsersInCategory struct {
Path struct {
GroupCategoryID string `json:"group_category_id" url:"group_category_id,omitempty"` // (Required)
} `json:"path"`
}
func (t *ExportGroupsInAndUsersInCategory) GetMethod() string {
return "GET"
}
func (t *ExportGroupsInAndUsersInCategory) GetURLPath() string {
path := "group_categories/{group_category_id}/export"
path = strings.ReplaceAll(path, "{group_category_id}", fmt.Sprintf("%v", t.Path.GroupCategoryID))
return path
}
func (t *ExportGroupsInAndUsersInCategory) GetQuery() (string, error) {
return "", nil
}
func (t *ExportGroupsInAndUsersInCategory) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ExportGroupsInAndUsersInCategory) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ExportGroupsInAndUsersInCategory) HasErrors() error {
errs := []string{}
if t.Path.GroupCategoryID == "" {
errs = append(errs, "'Path.GroupCategoryID' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ExportGroupsInAndUsersInCategory) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package controllers
import (
"github.com/astaxie/beego/orm"
"myproject/models"
"fmt"
)
// TSql is
func (c *ModelController) TSql() {
o := orm.NewOrm()
// var maps [] orm.Params
// num, err := o.Raw("select * from up").Values(&maps)
// var up models.Up
// err := o.Raw("select * from Up where id=6").QueryRow(&up)
var up []models.Up
num, err := o.Raw("select * from Up").QueryRows(&up)
if err != nil {
fmt.Println("q")
}
c.Ctx.WriteString(fmt.Sprintf("共查询了 num:%d 条数据。。\n", num))
for _, m:= range up {
//c.Ctx.WriteString(fmt.Sprintf("%s\n", m["name"]),)
c.Ctx.WriteString(fmt.Sprintf("%s\n", m.Name))
}
//c.Ctx.WriteString(fmt.Sprintf("id:%d, name:%s",up.Id,up.Name))
} |
package main
import "fmt"
func main() {
//原地删除
/**
给定一个数组 nums 和一个值 val,你需要原地移除所有数值等于 val 的元素,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
元素的顺序可以改变。你不需要考虑数组中超出新长度后面的元素。
示例 1:
给定 nums = [3,2,2,3], val = 3,
函数应该返回新的长度 2, 并且 nums 中的前两个元素均为 2。
你不需要考虑数组中超出新长度后面的元素。
给定 nums = [0,1,2,2,3,0,4,2], val = 2,
*/
nums := []int{3, 2, 2, 3}
fmt.Println(removeElement(nums, 3))
fmt.Println("nums:", nums)
nums2 := []int{0, 1, 2, 2, 3, 0, 4, 2}
fmt.Println(removeElement2(nums2, 2))
fmt.Println("num2:", nums2)
}
func removeElement(nums []int, val int) int {
for i := 0; i < len(nums); {
if nums[i] == val {
nums = append(nums[:i], nums[i+1:]...)
} else {
i++
}
}
return len(nums)
}
func removeElement2(nums []int, val int) int {
cnt := 0
n := len(nums)
for i := 0; i < n; i++ {
if nums[i] == val {
cnt++
} else {
nums[i-cnt] = nums[i]
}
}
return n - cnt
}
|
package global
import (
"context"
"strconv"
"github.com/go-redis/redis/v8"
"shared/utility/errors"
"shared/utility/global"
"shared/utility/key"
)
const (
// field
FieldGuildID = "guild_id"
FieldGuildName = "guild_name"
)
type Guild struct {
*GuildID
*GuildName
*GuildUser
*GuildSet
}
func NewGuild(client *redis.Client) *Guild {
return &Guild{
GuildID: NewGuildID(client),
GuildName: NewGuildName(client),
GuildUser: NewGuildUser(client),
GuildSet: NewGuildSet(client),
}
}
// 生成公会ID
type GuildID struct {
key string
incr *global.IncrID
}
func NewGuildID(client *redis.Client) *GuildID {
return &GuildID{
key: KeyGuildID,
incr: global.NewIncrID(client),
}
}
func (g *GuildID) GenGuildID(ctx context.Context) (int64, error) {
return g.incr.GenID(ctx, g.key)
}
// 公会名称去重和索引
type GuildName struct {
key string
client *redis.Client
}
func NewGuildName(client *redis.Client) *GuildName {
return &GuildName{
key: KeyGuildName,
client: client,
}
}
func (g *GuildName) GuildNameExist(ctx context.Context, guildName string) (bool, error) {
_, err := g.client.HGet(ctx, g.key, guildName).Result()
if err == redis.Nil {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func (g *GuildName) AddGuildNameIfNotExist(ctx context.Context, guildID int64, guildName string) (bool, error) {
return g.client.HSetNX(ctx, g.key, guildName, guildID).Result()
}
func (g *GuildName) GetGuildID(ctx context.Context, guildName string) (int64, error) {
return g.client.HGet(ctx, g.key, guildName).Int64()
}
func (g *GuildName) DelGuildName(ctx context.Context, guildName string) error {
return g.client.HDel(ctx, g.key, guildName).Err()
}
// 玩家公会数据
type GuildUser struct {
key string
client *redis.Client
}
func NewGuildUser(client *redis.Client) *GuildUser {
return &GuildUser{
key: KeyGuildUser,
client: client,
}
}
func (g *GuildUser) SetUserGuildData(ctx context.Context, userID, guildID int64, guildName string) error {
return g.client.HSet(ctx, key.MakeRedisKey(g.key, userID), FieldGuildID, guildID, FieldGuildName, guildName).Err()
}
func (g *GuildUser) GetUserGuildData(ctx context.Context, userID int64) (*UserGuild, error) {
userGuild := &UserGuild{}
err := g.client.HGetAll(ctx, key.MakeRedisKey(g.key, userID)).Scan(userGuild)
if err != nil {
return nil, err
}
return userGuild, nil
}
func (g *GuildUser) DelUserGuildData(ctx context.Context, userID int64) error {
return g.client.Del(ctx, key.MakeRedisKey(g.key, userID)).Err()
}
// 玩家公会数据
type GuildSet struct {
key string
client *redis.Client
Lock *global.Locker
}
func NewGuildSet(client *redis.Client) *GuildSet {
return &GuildSet{
key: KeyGuildSet,
client: client,
Lock: global.NewLocker(client),
}
}
func (gs *GuildSet) GuildSetAdd(ctx context.Context, guildId int64) error {
hashKey := key.MakeRedisKey(gs.key)
err := gs.client.SAdd(ctx, hashKey, guildId).Err()
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (gs *GuildSet) GuildSetDelete(ctx context.Context, guildId int64) error {
hashKey := key.MakeRedisKey(gs.key)
err := gs.client.SRem(ctx, hashKey, guildId).Err()
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (gs *GuildSet) GuildSetRandomGet(ctx context.Context, num int64) ([]int64, error) {
hashKey := key.MakeRedisKey(gs.key)
result := make([]int64, 0, num)
ids, err := gs.client.SRandMemberN(ctx, hashKey, num).Result()
if err != nil {
return nil, errors.WrapTrace(err)
}
for _, id := range ids {
idInt, err := strconv.ParseInt(id, 10, 64)
if err != nil {
return nil, errors.WrapTrace(err)
}
result = append(result, idInt)
}
return result, nil
}
|
package mta
// MTA mta schema, the schema will contain the latest mta schema version
// and all the previous version will be as subset of the latest
// Todo - Add the missing properties to support the latest 3.2 version
type MTA struct {
// indicates MTA schema version, using semver.
SchemaVersion *string `yaml:"_schema-version" json:"_schema-version"`
// A globally unique ID of this MTA. Unlimited string of unicode characters.
ID string `yaml:"ID" json:"ID"`
// A non-translatable description of this MTA. This is not a text for application users
Description string `yaml:"description,omitempty" json:"description,omitempty"`
// Application version, using semantic versioning standard
Version string `yaml:"version,omitempty" json:"version,omitempty"`
// The provider or vendor of this software
Provider string `yaml:"provider,omitempty" json:"provider,omitempty"`
// A copyright statement from the provider
Copyright string `yaml:"copyright,omitempty" json:"copyright,omitempty"`
// list of modules
Modules []*Module `yaml:"modules,omitempty" json:"modules,omitempty"`
// Module type declarations
ModuleTypes []*ModuleTypes `yaml:"module-types,omitempty" json:"module-types,omitempty"`
// Resource declarations. Resources can be anything required to run the application which is not provided by the application itself
Resources []*Resource `yaml:"resources,omitempty" json:"resources,omitempty"`
// Resource type declarations
ResourceTypes []*ResourceTypes `yaml:"resource-types,omitempty" json:"resource-types,omitempty"`
// Parameters can be used to steer the behavior of tools which interpret this descriptor
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
// Experimental - use for pre/post hook
BuildParams *ProjectBuild `yaml:"build-parameters,omitempty" json:"build-parameters,omitempty"`
}
// Module - modules section.
type Module struct {
// An MTA internal module name. Names need to be unique within the MTA scope
Name string `yaml:"name" json:"name"`
// a globally unique type ID. Deployment tools will interpret this type ID
Type string `yaml:"type" json:"type"`
// a non-translatable description of this module. This is not a text for application users
Description string `yaml:"description,omitempty" json:"description,omitempty"`
// A file path which identifies the location of module artifacts.
Path string `yaml:"path,omitempty" json:"path,omitempty"`
// Provided property values can be accessed by "~{<name-of-provides-section>/<provided-property-name>}". Such expressions can be part of an arbitrary string
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
// THE 'includes' ELEMENT IS ONLY RELEVANT FOR DEVELOPMENT DESCRIPTORS (PRIO TO BUILD), NOT FOR DEPLOYMENT DESCRIPTORS!
Includes []Includes `yaml:"includes,omitempty" json:"includes,omitempty"`
// list of names either matching a resource name or a name provided by another module within the same MTA
Requires []Requires `yaml:"requires,omitempty" json:"requires,omitempty"`
// List of provided names (MTA internal)to which properties (= configuration data) can be attached
Provides []Provides `yaml:"provides,omitempty" json:"provides,omitempty"`
// Parameters can be used to steer the behavior of tools which interpret this descriptor. Parameters are not made available to the module at runtime
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
// Build-parameters are specifically steering the behavior of build tools.
BuildParams map[string]interface{} `yaml:"build-parameters,omitempty" json:"build-parameters,omitempty"`
// A list containing the names of the modules that must be deployed prior to this one.
DeployedAfter []string `yaml:"deployed-after,omitempty" json:"deployed-after,omitempty"`
// Defined and executed at specific phases of module deployment.
Hooks []Hook `yaml:"hooks,omitempty" json:"hooks,omitempty"`
}
// ModuleTypes module types declarations
type ModuleTypes struct {
// An MTA internal name of the module type. Can be specified in the 'type' element of modules
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// The name of the extended type. Can be another resource type defined in this descriptor or one of the default types supported by the deployer
Extends string `yaml:"extends,omitempty" json:"extends,omitempty"`
// Properties inherited by all resources of this type
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
// Parameters inherited by all resources of this type
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
}
// Provides List of provided names to which properties (configs data) can be attached.
type Provides struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// Indicates, that the provided properties shall be made publicly available by the deployer
Public bool `yaml:"public,omitempty" json:"public,omitempty"`
// property names and values make up the configuration data which is to be provided to requiring modules at runtime
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
}
// Requires list of names either matching a resource name or a name provided by another module within the same MTA.
type Requires struct {
// an MTA internal name which must match either a provided name, a resource name, or a module name within the same MTA
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// A group name which shall be use by a deployer to group properties for lookup by a module runtime.
Group string `yaml:"group,omitempty" json:"group,omitempty"`
// All required and found configuration data sets will be assembled into a JSON array and provided to the module by the lookup name as specified by the value of 'list'
List string `yaml:"list,omitempty" json:"list,omitempty"`
// Provided property values can be accessed by "~{<provided-property-name>}". Such expressions can be part of an arbitrary string
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
// Parameters can be used to influence the behavior of tools which interpret this descriptor. Parameters are not made available to requiring modules at runtime
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
// THE 'includes' ELEMENT IS ONLY RELEVANT FOR DEVELOPMENT DESCRIPTORS (PRIO TO BUILD), NOT FOR DEPLOYMENT DESCRIPTORS!
Includes []Includes `yaml:"includes,omitempty" json:"includes,omitempty"`
}
// Resource can be anything required to run the application which is not provided by the application itself.
type Resource struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// A type of a resource. This type is interpreted by and must be known to the deployer. Resources can be untyped
Type string `yaml:"type,omitempty" json:"type,omitempty"`
// A non-translatable description of this resource. This is not a text for application users
Description string `yaml:"description,omitempty" json:"description,omitempty"`
// Parameters can be used to influence the behavior of tools which interpret this descriptor. Parameters are not made available to requiring modules at runtime
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
// property names and values make up the configuration data which is to be provided to requiring modules at runtime
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
// THE 'includes' ELEMENT IS ONLY RELEVANT FOR DEVELOPMENT DESCRIPTORS (PRIO TO BUILD), NOT FOR DEPLOYMENT DESCRIPTORS!
Includes []Includes `yaml:"includes,omitempty" json:"includes,omitempty"`
// A resource can be declared to be optional, if the MTA can compensate for its non-existence
Optional bool `yaml:"optional,omitempty" json:"optional,omitempty"`
// If a resource is declared to be active, it is allocated and bound according to declared requirements
Active *bool `yaml:"active,omitempty" json:"active,omitempty"`
// A list containing the names of the resources that must be processed prior to this one.
ProcessedAfter []string `yaml:"processed-after,omitempty" json:"processed-after,omitempty"`
// list of names either matching a resource name or a name provided by another module within the same MTA
Requires []Requires `yaml:"requires,omitempty" json:"requires,omitempty"`
}
// ResourceTypes resources type declarations
type ResourceTypes struct {
// An MTA internal name of the module type. Can be specified in the 'type' element of modules
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// The name of the extended type. Can be another resource type defined in this descriptor or one of the default types supported by the deployer
Extends string `yaml:"extends,omitempty" json:"extends,omitempty"`
// Properties inherited by all resources of this type
Properties map[string]interface{} `yaml:"properties,omitempty" json:"properties,omitempty"`
PropertiesMetaData map[string]MetaData `yaml:"properties-metadata,omitempty" json:"properties-metadata,omitempty"`
// Parameters inherited by all resources of this type
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
}
// Includes The 'includes' element only relevant for development descriptor, not for deployment descriptor
type Includes struct {
// A name of an include section. This name will be used by a builder to generate a parameter section in the deployment descriptor
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// A path pointing to a file which contains a map of parameters, either in JSON or in YAML format.
Path string `yaml:"path,omitempty" json:"path,omitempty"`
}
// ProjectBuild - experimental use for pre/post build hook
type ProjectBuild struct {
BeforeAll []ProjectBuilder `yaml:"before-all,omitempty" json:"before-all,omitempty"`
AfterAll []ProjectBuilder `yaml:"after-all,omitempty" json:"after-all,omitempty"`
}
// ProjectBuilder - project builder descriptor
type ProjectBuilder struct {
Builder string `yaml:"builder,omitempty" json:"builder,omitempty"`
Timeout string `yaml:"timeout,omitempty" json:"timeout,omitempty"`
Commands []string `yaml:"commands,omitempty" json:"commands,omitempty"`
}
// Hook - defined and executed at specific phases of module deployment.
type Hook struct {
// An MTA internal name which can be used for documentation purposes and shown by the deployer.
Name string `yaml:"name,omitempty" json:"name,omitempty"`
// Defines the type of action that should be executed by the deployer.
Type string `yaml:"type,omitempty" json:"type,omitempty"`
// A list of strings that define the points at which the hook must be executed.
Phases []string `yaml:"phases,omitempty" json:"phases,omitempty"`
Parameters map[string]interface{} `yaml:"parameters,omitempty" json:"parameters,omitempty"`
ParametersMetaData map[string]MetaData `yaml:"parameters-metadata,omitempty" json:"parameters-metadata,omitempty"`
Requires []Requires `yaml:"requires,omitempty" json:"requires,omitempty"`
}
// MetaData - The properties-metadata and the parameters-metadata structure
type MetaData struct {
// If set to true, the value can be overwritten by an extension descriptor.
OverWritable *bool `yaml:"overwritable,omitempty" json:"overwritable,omitempty"`
// If set to false, a value must be present in the final deployment configuration.
Optional *bool `yaml:"optional,omitempty" json:"optional,omitempty"`
// An interface with which a UI-tool can query for possible parameter names together with the expected datatypes and default values.
Datatype interface{} `yaml:"datatype,omitempty" json:"datatype,omitempty"`
// Indicate sensitive information to a UI-tool which it can use, e.g., for masking a value
Sensitive bool `yaml:"sensitive,omitempty" json:"sensitive,omitempty"`
}
|
package postgresql_test
import (
"context"
"log"
"os"
"testing"
"github.com/adamluzsi/testcase"
"github.com/adamluzsi/testcase/assert"
"github.com/adamluzsi/testcase/random"
"github.com/adamluzsi/frameless/adapters/postgresql"
"github.com/adamluzsi/frameless/ports/guard/guardcontracts"
"github.com/adamluzsi/frameless/ports/migration"
)
func ExampleLocker() {
cm, err := postgresql.Connect(os.Getenv("DATABASE_URL"))
if err != nil {
panic(err)
}
l := postgresql.Locker{
Name: "my-lock",
Connection: cm,
}
ctx, err := l.Lock(context.Background())
if err != nil {
panic(err)
}
if err := l.Unlock(ctx); err != nil {
panic(err)
}
}
var _ migration.Migratable = postgresql.Locker{}
func TestLocker(t *testing.T) {
cm := GetConnection(t)
guardcontracts.Locker(func(tb testing.TB) guardcontracts.LockerSubject {
t := testcase.ToT(&tb)
l := postgresql.Locker{
Name: t.Random.StringNC(5, random.CharsetAlpha()),
Connection: cm,
}
assert.NoError(tb, l.Migrate(context.Background()))
return guardcontracts.LockerSubject{
Locker: l,
MakeContext: context.Background,
}
}).Test(t)
}
func ExampleLockerFactory() {
cm, err := postgresql.Connect(os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatal(err)
}
lockerFactory := postgresql.LockerFactory[string]{Connection: cm}
if err := lockerFactory.Migrate(context.Background()); err != nil {
log.Fatal(err)
}
locker := lockerFactory.LockerFor("hello world")
ctx, err := locker.Lock(context.Background())
if err != nil {
log.Fatal(err)
}
if err := locker.Unlock(ctx); err != nil {
log.Fatal(err)
}
}
var _ migration.Migratable = postgresql.LockerFactory[int]{}
func TestNewLockerFactory(t *testing.T) {
cm := GetConnection(t)
guardcontracts.LockerFactory[string](func(tb testing.TB) guardcontracts.LockerFactorySubject[string] {
lockerFactory := postgresql.LockerFactory[string]{Connection: cm}
assert.NoError(tb, lockerFactory.Migrate(context.Background()))
return guardcontracts.LockerFactorySubject[string]{
LockerFactory: lockerFactory,
MakeContext: context.Background,
MakeKey: testcase.ToT(&tb).Random.String,
}
}).Test(t)
guardcontracts.LockerFactory[int](func(tb testing.TB) guardcontracts.LockerFactorySubject[int] {
lockerFactory := postgresql.LockerFactory[int]{Connection: cm}
assert.NoError(tb, lockerFactory.Migrate(context.Background()))
return guardcontracts.LockerFactorySubject[int]{
LockerFactory: lockerFactory,
MakeContext: context.Background,
MakeKey: testcase.ToT(&tb).Random.Int,
}
}).Test(t)
}
|
package ch2
//在go源码中嵌入c代码,方式是放在包声明之后的注释中,并导入C这个包
//这个包实际是一个虚拟的包,只是为了告诉go build命令在编译这个go文件之前使用cgo对它进行预处理
//这种方式适用于调用少量的简单的C代码
//#include <stdio.h>
//void callC() {
// printf("Calling C code!\n");
//}
import "C"
import "fmt"
func callEmbedC() {
fmt.Println("A Go statement!")
C.callC()
fmt.Println("Another Go statement!")
}
|
package producer
import (
"gitlab.mytaxi.lk/pickme/k-stream/consumer"
"testing"
)
func TestMockProducer_Produce(t *testing.T) {
producer := NewMockProducer(t)
msg := &consumer.Record{
Key: []byte(string(`100`)),
Value: []byte(string(`100`)),
Partition: 1,
}
p, _, err := producer.Produce(msg)
if err != nil {
t.Error(err)
}
if p != 1 {
t.Fail()
}
}
func TestMockProducer_ProduceBatch(t *testing.T) {
producer := NewMockProducer(t)
msg1 := &consumer.Record{
Key: []byte(string(`100`)),
Value: []byte(string(`100`)),
Partition: 1,
}
msg2 := *msg1
msg2.Key = []byte(string(`100`))
err := producer.ProduceBatch([]*consumer.Record{msg1, &msg2})
if err != nil {
t.Error(err)
}
}
|
package jfroghelm
import (
"encoding/json"
"fmt"
"github.com/codefresh-io/nomios/pkg/hermes"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"net/http"
"time"
)
// JFrog struct
type JFrogHelm struct {
hermesSvc hermes.Service
}
type webhookPayload struct {
Artifactory struct {
Webhook struct {
Event string `json:event`
Data struct {
ModifiedBy string `json:modifiedBy`
Created int64 `json:created`
RepoPath struct {
RepoKey string `json:repoKey`
Name string `json:name`
} `json:repoPath`
} `json:data`
} `json:"webhook"`
} `json:"artifactory"`
}
// NewJFrog new jfrog handler
func NewJFrog(svc hermes.Service) *JFrogHelm {
return &JFrogHelm{svc}
}
func constructEventURI(payload *webhookPayload, account string) string {
uri := fmt.Sprintf("helm:jfrog:%s:%s:push", payload.Artifactory.Webhook.Data.RepoPath.RepoKey, payload.Artifactory.Webhook.Data.RepoPath.Name)
if account != "" {
uri = fmt.Sprintf("%s:%s", uri, account)
}
return uri
}
// HandleWebhook handle JFrog webhook
func (d *JFrogHelm) HandleWebhook(c *gin.Context) {
log.Info("Got JFrog Helm webhook event")
payload := webhookPayload{}
if err := c.BindJSON(&payload); err != nil {
log.WithError(err).Error("Failed to bind payload JSON to expected structure")
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if payload.Artifactory.Webhook.Event != "storage.afterCreate" {
log.Debug(fmt.Sprintf("Skip event %s", payload.Artifactory.Webhook.Event))
return
}
event := hermes.NewNormalizedEvent()
eventURI := constructEventURI(&payload, c.Query("account"))
payloadJSON, err := json.Marshal(payload)
if err != nil {
log.WithError(err).Error("Failed to covert webhook payload structure to JSON")
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// keep original JSON
event.Original = string(payloadJSON)
// get image push details
event.Variables["event"] = payload.Artifactory.Webhook.Event
event.Variables["namespace"] = payload.Artifactory.Webhook.Data.RepoPath.RepoKey
event.Variables["name"] = payload.Artifactory.Webhook.Data.RepoPath.Name
event.Variables["provider"] = "jfrog"
event.Variables["type"] = "helm"
event.Variables["pusher"] = payload.Artifactory.Webhook.Data.ModifiedBy
event.Variables["pushed_at"] = time.Unix(int64(payload.Artifactory.Webhook.Data.Created/1000), 0).Format(time.RFC3339)
// get secret from URL query
event.Secret = c.Query("secret")
log.Debug("Event url " + eventURI)
// invoke trigger
err = d.hermesSvc.TriggerEvent(eventURI, event)
if err != nil {
log.WithError(err).Error("Failed to trigger event pipelines")
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
c.Status(http.StatusOK)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aliyun
import (
"strconv"
"strings"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
)
type SCen struct {
multicloud.SResourceBase
multicloud.AliyunTags
client *SAliyunClient
Status string `json:"Status"`
ProtectionLevel string `json:"ProtectionLevel"`
CenID string `json:"CenId"`
CreationTime string `json:"CreationTime"`
CenBandwidthPackageIds CenBandwidthPackageIds `json:"CenBandwidthPackageIds"`
Name string `json:"Name"`
}
type SCens struct {
TotalCount int `json:"TotalCount"`
RequestID string `json:"RequestId"`
PageSize int `json:"PageSize"`
PageNumber int `json:"PageNumber"`
Cens sCens `json:"Cens"`
}
type CenBandwidthPackageIds struct {
CenBandwidthPackageID []string `json:"CenBandwidthPackageId"`
}
type sCens struct {
Cen []SCen `json:"Cen"`
}
type SCenChildInstances struct {
PageNumber int `json:"PageNumber"`
ChildInstances sCenChildInstances `json:"ChildInstances"`
TotalCount int `json:"TotalCount"`
PageSize int `json:"PageSize"`
RequestID string `json:"RequestId"`
}
type SCenChildInstance struct {
Status string `json:"Status"`
ChildInstanceOwnerID string `json:"ChildInstanceOwnerId"`
ChildInstanceID string `json:"ChildInstanceId"`
ChildInstanceRegionID string `json:"ChildInstanceRegionId"`
CenID string `json:"CenId"`
ChildInstanceType string `json:"ChildInstanceType"`
}
type sCenChildInstances struct {
ChildInstance []SCenChildInstance `json:"ChildInstance"`
}
type SCenAttachInstanceInput struct {
InstanceType string
InstanceId string
InstanceRegion string
ChildInstanceOwnerId string
}
func (client *SAliyunClient) DescribeCens(pageNumber int, pageSize int) (SCens, error) {
scens := SCens{}
params := map[string]string{}
params["Action"] = "DescribeCens"
params["PageNumber"] = strconv.Itoa(pageNumber)
params["PageSize"] = strconv.Itoa(pageSize)
resp, err := client.cbnRequest("DescribeCens", params)
if err != nil {
return scens, errors.Wrap(err, "DescribeCens")
}
err = resp.Unmarshal(&scens)
if err != nil {
return scens, errors.Wrap(err, "resp.Unmarshal")
}
return scens, nil
}
func (client *SAliyunClient) GetAllCens() ([]SCen, error) {
pageNumber := 0
sCen := []SCen{}
for {
pageNumber++
cens, err := client.DescribeCens(pageNumber, 20)
if err != nil {
return nil, errors.Wrapf(err, "client.DescribeCens(%d, 20)", pageNumber)
}
sCen = append(sCen, cens.Cens.Cen...)
if len(sCen) >= cens.TotalCount {
break
}
}
for i := 0; i < len(sCen); i++ {
sCen[i].client = client
}
return sCen, nil
}
func (client *SAliyunClient) CreateCen(opts *cloudprovider.SInterVpcNetworkCreateOptions) (string, error) {
params := map[string]string{}
params["Name"] = opts.Name
params["Description"] = opts.Desc
resp, err := client.cbnRequest("CreateCen", params)
if err != nil {
return "", errors.Wrap(err, "CreateCen")
}
type CentId struct {
CenId string `json:"CenId"`
}
centId := CentId{}
err = resp.Unmarshal(¢Id)
if err != nil {
return "", errors.Wrap(err, "resp.Unmarshal")
}
return centId.CenId, nil
}
func (client *SAliyunClient) DeleteCen(id string) error {
params := map[string]string{}
params["CenId"] = id
_, err := client.cbnRequest("DeleteCen", params)
if err != nil {
return errors.Wrap(err, "DeleteCen")
}
return nil
}
func (client *SAliyunClient) DescribeCenAttachedChildInstances(cenId string, pageNumber int, pageSize int) (SCenChildInstances, error) {
scenChilds := SCenChildInstances{}
params := map[string]string{}
params["CenId"] = cenId
params["PageNumber"] = strconv.Itoa(pageNumber)
params["PageSize"] = strconv.Itoa(pageSize)
resp, err := client.cbnRequest("DescribeCenAttachedChildInstances", params)
if err != nil {
return scenChilds, errors.Wrap(err, "DescribeCenAttachedChildInstances")
}
err = resp.Unmarshal(&scenChilds)
if err != nil {
return scenChilds, errors.Wrap(err, "resp.Unmarshal")
}
return scenChilds, nil
}
func (client *SAliyunClient) GetAllCenAttachedChildInstances(cenId string) ([]SCenChildInstance, error) {
pageNumber := 0
scenChilds := []SCenChildInstance{}
for {
pageNumber++
cenChilds, err := client.DescribeCenAttachedChildInstances(cenId, pageNumber, 20)
if err != nil {
return nil, errors.Wrapf(err, "client.DescribeCens(%d, 20)", pageNumber)
}
scenChilds = append(scenChilds, cenChilds.ChildInstances.ChildInstance...)
if len(scenChilds) >= cenChilds.TotalCount {
break
}
}
return scenChilds, nil
}
func (client *SAliyunClient) AttachCenChildInstance(cenId string, instance SCenAttachInstanceInput) error {
params := map[string]string{}
params["CenId"] = cenId
params["ChildInstanceId"] = instance.InstanceId
params["ChildInstanceRegionId"] = instance.InstanceRegion
params["ChildInstanceType"] = instance.InstanceType
params["ChildInstanceOwnerId"] = instance.ChildInstanceOwnerId
_, err := client.cbnRequest("AttachCenChildInstance", params)
if err != nil {
return errors.Wrap(err, "AttachCenChildInstance")
}
return nil
}
func (client *SAliyunClient) DetachCenChildInstance(cenId string, instance SCenAttachInstanceInput) error {
params := map[string]string{}
params["CenId"] = cenId
params["ChildInstanceId"] = instance.InstanceId
params["ChildInstanceRegionId"] = instance.InstanceRegion
params["ChildInstanceType"] = instance.InstanceType
params["ChildInstanceOwnerId"] = instance.ChildInstanceOwnerId
_, err := client.cbnRequest("DetachCenChildInstance", params)
if err != nil {
return errors.Wrap(err, "DetachCenChildInstance")
}
return nil
}
func (self *SCen) GetId() string {
return self.CenID
}
func (self *SCen) GetName() string {
return self.Name
}
func (self *SCen) GetGlobalId() string {
return self.GetId()
}
func (self *SCen) GetStatus() string {
switch self.Status {
case "Creating":
return api.INTER_VPC_NETWORK_STATUS_CREATING
case "Active":
return api.INTER_VPC_NETWORK_STATUS_AVAILABLE
case "Deleting":
return api.INTER_VPC_NETWORK_STATUS_DELETING
default:
return api.INTER_VPC_NETWORK_STATUS_UNKNOWN
}
}
func (self *SCen) Refresh() error {
scens, err := self.client.GetAllCens()
if err != nil {
return errors.Wrap(err, "self.client.GetAllCens()")
}
for i := range scens {
if scens[i].CenID == self.CenID {
return jsonutils.Update(self, scens[i])
}
}
return cloudprovider.ErrNotFound
}
func (self *SCen) GetAuthorityOwnerId() string {
return self.client.ownerId
}
func (self *SCen) GetICloudVpcIds() ([]string, error) {
childs, err := self.client.GetAllCenAttachedChildInstances(self.GetId())
if err != nil {
return nil, errors.Wrap(err, "self.client.GetAllCenAttachedChildInstances(self.GetId())")
}
vpcIds := []string{}
for i := range childs {
if childs[i].ChildInstanceType == "VPC" {
vpcIds = append(vpcIds, childs[i].ChildInstanceID)
}
}
return vpcIds, nil
}
func (self *SCen) AttachVpc(opts *cloudprovider.SInterVpcNetworkAttachVpcOption) error {
instance := SCenAttachInstanceInput{
InstanceType: "VPC",
InstanceId: opts.VpcId,
InstanceRegion: opts.VpcRegionId,
ChildInstanceOwnerId: opts.VpcAuthorityOwnerId,
}
err := self.client.AttachCenChildInstance(self.GetId(), instance)
if err != nil {
return errors.Wrapf(err, "self.client.AttachCenChildInstance(%s,%s)", self.GetId(), jsonutils.Marshal(opts).String())
}
return nil
}
func (self *SCen) DetachVpc(opts *cloudprovider.SInterVpcNetworkDetachVpcOption) error {
instance := SCenAttachInstanceInput{
InstanceType: "VPC",
InstanceId: opts.VpcId,
InstanceRegion: opts.VpcRegionId,
ChildInstanceOwnerId: opts.VpcAuthorityOwnerId,
}
err := self.client.DetachCenChildInstance(self.GetId(), instance)
if err != nil {
return errors.Wrapf(err, "self.client.DetachCenChildInstance(%s,%s)", self.GetId(), jsonutils.Marshal(opts).String())
}
return nil
}
func (self *SCen) Delete() error {
err := self.client.DeleteCen(self.GetId())
if err != nil {
return errors.Wrapf(err, "self.client.DeleteCen(%s)", self.GetId())
}
return nil
}
func (self *SCen) GetInstanceRouteEntries() ([]SCenRouteEntry, error) {
childInstance, err := self.client.GetAllCenAttachedChildInstances(self.GetId())
if err != nil {
return nil, errors.Wrap(err, "self.client.GetAllCenAttachedChildInstances(self.GetId())")
}
result := []SCenRouteEntry{}
for i := range childInstance {
routes, err := self.client.GetAllCenChildInstanceRouteEntries(self.GetId(), childInstance[i].ChildInstanceID, childInstance[i].ChildInstanceRegionID, childInstance[i].ChildInstanceType)
if err != nil {
return nil, errors.Wrap(err, "self.client.GetAllCenChildInstanceRouteEntries(self.GetId(), childInstance[i].ChildInstanceID, childInstance[i].ChildInstanceRegionID, childInstance[i].ChildInstanceType)")
}
for j := range routes {
// CEN 类型的路由是通过CEN从其他vpc/vbr的路由表中传播过来的
// 只关注路由的发源
if routes[j].Type != "CEN" {
routes[j].ChildInstance = &childInstance[i]
result = append(result, routes[j])
}
}
}
return result, nil
}
func (self *SCen) GetIRoutes() ([]cloudprovider.ICloudInterVpcNetworkRoute, error) {
result := []cloudprovider.ICloudInterVpcNetworkRoute{}
routeEntries, err := self.GetInstanceRouteEntries()
if err != nil {
return nil, errors.Wrap(err, "self.GetInstanceRouteEntries()")
}
for i := range routeEntries {
result = append(result, &routeEntries[i])
}
return result, nil
}
func (self *SCen) EnableRouteEntry(routeId string) error {
idContent := strings.Split(routeId, ":")
if len(idContent) != 2 {
return errors.Wrapf(cloudprovider.ErrNotSupported, "invalid aliyun generated cenRouteId %s", routeId)
}
routeTable := idContent[0]
cidr := idContent[1]
routeEntries, err := self.GetInstanceRouteEntries()
if err != nil {
return errors.Wrap(err, "self.GetInstanceRouteEntries()")
}
routeEntry := SCenRouteEntry{}
for i := range routeEntries {
if routeEntries[i].RouteTableID == routeTable && routeEntries[i].DestinationCidrBlock == cidr {
routeEntry = routeEntries[i]
break
}
}
if routeEntry.GetEnabled() {
return nil
}
err = self.client.PublishRouteEntries(self.GetId(), routeEntry.GetInstanceId(), routeTable, routeEntry.GetInstanceRegionId(), routeEntry.GetInstanceType(), cidr)
if err != nil {
return errors.Wrap(err, "self.client.PublishRouteEntries()")
}
return nil
}
func (self *SCen) DisableRouteEntry(routeId string) error {
idContent := strings.Split(routeId, ":")
if len(idContent) != 2 {
return errors.Wrapf(cloudprovider.ErrNotSupported, "invalid aliyun generated cenRouteId %s", routeId)
}
routeTable := idContent[0]
cidr := idContent[1]
routeEntries, err := self.GetInstanceRouteEntries()
if err != nil {
return errors.Wrap(err, "self.GetInstanceRouteEntries()")
}
routeEntry := SCenRouteEntry{}
for i := range routeEntries {
if routeEntries[i].RouteTableID == routeTable && routeEntries[i].DestinationCidrBlock == cidr {
routeEntry = routeEntries[i]
break
}
}
if !routeEntry.GetEnabled() {
return nil
}
err = self.client.WithdrawPublishedRouteEntries(self.GetId(), routeEntry.GetInstanceId(), routeTable, routeEntry.GetInstanceRegionId(), routeEntry.GetInstanceType(), cidr)
if err != nil {
return errors.Wrap(err, "self.client.PublishRouteEntries()")
}
return nil
}
|
package main
import (
"log"
"net/http"
"shopping-cart/pkg/database"
"shopping-cart/pkg/routes"
"github.com/gorilla/mux"
)
func main() {
database.Connect()
router := mux.NewRouter()
router = routes.SetRoutes(router)
log.Fatal(http.ListenAndServe(":8008", router))
}
|
package floats
import (
"github.com/shawnsmithdev/zermelo/v2"
"unsafe"
)
// unsafeFlipSortFlip converts float slices to unsigned, flips some bits to allow sorting, sorts and unflips.
// F and U must be the same bit size, and len(buf) must be >= len(x)
// This will not work if NaNs are present in x. Remove them first.
func unsafeFlipSortFlip[F Float, U zermelo.Unsigned](x, b []F, size uint) {
xu := unsafeSliceConvert[F, U](x)
bu := unsafeSliceConvert[F, U](b)
floatFlip[U](xu, U(1)<<(size-1))
zermelo.SortBYOB(xu, bu)
floatUnflip[U](xu, U(1)<<(size-1))
}
func floatFlip[U zermelo.Unsigned](y []U, topBit U) {
for idx, val := range y {
if val&topBit == topBit {
y[idx] = val ^ (^U(0))
} else {
y[idx] = val ^ topBit
}
}
}
func floatUnflip[U zermelo.Unsigned](y []U, topBit U) {
for idx, val := range y {
if val&topBit == topBit {
y[idx] = val ^ topBit
} else {
y[idx] = val ^ (^U(0))
}
}
}
// unsafeSliceConvert takes a slice of one type and returns a slice of another type using the same memory
// for the backing array. F and U obviously must be exactly the same size for this to work.
//
// This must only be used to temporarily treat elements in a slice as though they were of a different type.
// One must not modify the length or capacity of either the given or returned slice
// while the returned slice is still in scope.
//
// If x goes out of scope, the returned slice becomes invalid, as they share memory but the garbage collector is
// unaware of the returned slice and may invalidate that memory. Working around this may require
// use of `runtime.KeepAlive(x)`.
func unsafeSliceConvert[F any, U any](x []F) []U {
uPointer := (*U)(unsafe.Pointer(unsafe.SliceData(x)))
return unsafe.Slice(uPointer, cap(x))[:len(x)]
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package filecheck helps tests check permissions and ownership of on-disk files.
package filecheck
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"syscall"
"chromiumos/tast/local/sysutil"
"chromiumos/tast/testing"
)
// Pattern matches one or more paths.
// It can be used to verify that matched paths have expected ownership and permissions.
type Pattern struct {
match Matcher
uids, gids []uint32 // allowed IDs; nil or empty to not check
mode *os.FileMode // mode perm bits must exactly match
notMode *os.FileMode // none of these perm bits may be set
skipChildren bool // should children (if this is a dir) be skipped?
errors []string // set when the pattern is invalid
}
// NewPattern returns a new Pattern that verifies that paths matched by m meet the requirements specified by rs.
func NewPattern(m Matcher, opts ...Option) *Pattern {
pat := &Pattern{match: m}
for _, o := range opts {
o(pat)
}
return pat
}
// modeMask contains permission-related os.FileMode bits.
const modeMask = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
// check inspects fi and returns a list of problems.
func (p *Pattern) check(fi os.FileInfo) (problems []string) {
contains := func(allowed []uint32, id uint32) bool {
for _, aid := range allowed {
if id == aid {
return true
}
}
return false
}
if len(p.errors) > 0 {
problems = append(problems, p.errors...)
return
}
st := fi.Sys().(*syscall.Stat_t)
if len(p.uids) > 0 {
if !contains(p.uids, st.Uid) {
problems = append(problems, fmt.Sprintf("UID %v (want %v)", st.Uid, p.uids))
}
}
if len(p.gids) > 0 {
if !contains(p.gids, st.Gid) {
problems = append(problems, fmt.Sprintf("GID %v (want %v)", st.Gid, p.gids))
}
}
// Skip checking meaningless permissions on symbolic links.
if fi.Mode()&os.ModeSymlink == 0 {
mode := fi.Mode() & modeMask
if p.mode != nil && mode != *p.mode {
problems = append(problems, fmt.Sprintf("mode %04o (want %04o)", mode, *p.mode))
}
if p.notMode != nil {
if bad := mode & *p.notMode; bad != 0 {
problems = append(problems, fmt.Sprintf("mode %04o (%04o disallowed)", mode, bad))
}
}
}
return problems
}
func (p *Pattern) String() string {
var fields []string
if len(p.errors) > 0 {
fields = append(fields, fmt.Sprintf("error=%v", p.errors))
}
if len(p.uids) > 0 {
fields = append(fields, fmt.Sprintf("uids=%v", p.uids))
}
if len(p.gids) > 0 {
fields = append(fields, fmt.Sprintf("gids=%d", p.gids))
}
if p.mode != nil {
fields = append(fields, fmt.Sprintf("mode=%04o", *p.mode))
}
if p.notMode != nil {
fields = append(fields, fmt.Sprintf("notMode=%04o", *p.notMode))
}
if p.skipChildren {
fields = append(fields, "skipChildren")
}
return "[" + strings.Join(fields, " ") + "]"
}
// Option is used to configure a Pattern.
type Option func(*Pattern)
// UID requires that the path be owned by one of the supplied user IDs.
func UID(uids ...uint32) Option { return func(p *Pattern) { p.uids = uids } }
// GID requires that the path be owned by one of the supplied group IDs.
func GID(gids ...uint32) Option { return func(p *Pattern) { p.gids = gids } }
// Users returns options that permit a path to be owned by any of the supplied
// users (all of which must exist).
func Users(usernames ...string) Option {
uids := make([]uint32, len(usernames))
var err error
for i, u := range usernames {
uids[i], err = sysutil.GetUID(u)
if err != nil {
return func(p *Pattern) { p.errors = append(p.errors, fmt.Sprintf("Failed to find uid: %v", err)) }
}
}
return UID(uids...)
}
// Groups returns options that permit a path to be owned by any of the supplied
// groups (all of which must exist).
func Groups(gs ...string) Option {
gids := make([]uint32, len(gs))
var err error
for i, g := range gs {
gids[i], err = sysutil.GetGID(g)
if err != nil {
return func(p *Pattern) { p.errors = append(p.errors, fmt.Sprintf("Failed to find gid: %v", err)) }
}
}
return GID(gids...)
}
// checkMode returns false if m contains any non-permission-related bits.
func checkMode(m os.FileMode, p *Pattern) bool {
if invalid := m & ^modeMask; invalid != 0 {
p.errors = append(p.errors, fmt.Sprintf("invalid bit(s) %04o", m))
return false
}
return true
}
// Mode requires that permission-related bits in the path's mode exactly match m.
// Only 0777, setuid, setgid, and the sticky bit may be supplied.
func Mode(m os.FileMode) Option {
return func(p *Pattern) {
if checkMode(m, p) {
p.mode = &m
}
}
}
// NotMode requires that the permission-related bits in the path's mode contain none of the bits in nm.
// Only 0777, setuid, setgid, and the sticky bit may be supplied.
func NotMode(nm os.FileMode) Option {
return func(p *Pattern) {
if checkMode(nm, p) {
p.notMode = &nm
}
}
}
// SkipChildren indicates that any child paths should not be checked.
// The directory itself will still be checked. This has no effect for non-directories.
func SkipChildren() Option { return func(p *Pattern) { p.skipChildren = true } }
// Matcher matches a path relative to the root passed to Check.
type Matcher func(path string) bool
// AllPaths returns a Matcher that matches all paths.
func AllPaths() Matcher {
return func(p string) bool { return true }
}
// Path returns a Matcher that matches only the supplied path (relative to the root passed to Check).
func Path(path string) Matcher {
if path == "" || path[0] == '/' {
panic("Path must be relative")
}
return func(p string) bool { return p == path }
}
// Root returns a Matcher that matches the root path passed to Check.
func Root() Matcher {
return func(p string) bool { return p == "" }
}
// PathRegexp returns a Matcher that matches all paths matched by regular expression r.
// r is evaluated against paths relative to the root passed to Check.
func PathRegexp(r string) Matcher {
re := regexp.MustCompile(r)
return func(p string) bool { return re.MatchString(p) }
}
// Tree returns a Matcher that matches both path and its children.
// The path is relative to the root passed to Check.
func Tree(path string) Matcher {
if path == "" {
panic("Use AllPaths to match all paths")
}
pre := path + "/"
return func(p string) bool { return p == path || strings.HasPrefix(p, pre) }
}
// Check inspects all files within (and including) root.
// pats are executed in-order against each path.
// If a pattern matches a path, no later patterns are evaluated against it.
// If SkipChildren is included in a pattern , any matched directories' children are skipped.
// A map from absolute path names to strings describing problems is returned,
// along with the number of paths (not including ones skipped by SkipChildren) that were inspected.
func Check(ctx context.Context, root string, pats []*Pattern) (
problems map[string][]string, numPaths int, err error) {
problems = make(map[string][]string)
err = filepath.Walk(root, func(fullPath string, fi os.FileInfo, err error) error {
// Check for test timeout.
if ctx.Err() != nil {
return ctx.Err()
}
// If filepath.Walk encountered an error inspecting the file, skip it.
// This generally seems to happen due to a file getting deleted mid-run, but we also sometimes
// see "readdirent: input/output error" errors: https://crbug.com/908416
if err != nil {
testing.ContextLogf(ctx, "Failed to check %v: %v", fullPath, err)
return nil
}
relPath := ""
if fullPath != root {
relPath = fullPath[len(root+"/"):]
}
numPaths++
for _, pat := range pats {
if pat.match(relPath) {
if msgs := pat.check(fi); len(msgs) > 0 {
problems[fullPath] = append(problems[fullPath], msgs...)
}
if pat.skipChildren && fi.IsDir() {
return filepath.SkipDir
}
break
}
}
return nil
})
return problems, numPaths, err
}
|
package main
import "testing"
var Pool *ClientPool
func TestMain(m *testing.M) {
Pool = NewClientPool("127.0.0.1:9080")
// defer Pool.Close()
m.Run()
}
|
package database
import (
"fmt"
"strings"
)
// NewSQLInjectionError
func NewSQLInjectionError(s string, args ...string) error {
return &SqlInjectionError{fmt.Sprintf(s, args)}
}
// SqlInjectionError
type SqlInjectionError struct {
s string
}
func (e *SqlInjectionError) Error() string {
return e.s
}
func validate(conditions map[string]interface{}) error {
for _, val := range conditions {
var condition string
condition, ok := val.(string)
if !ok {
continue
}
err := validateSQLInjection(condition)
if err != nil {
return err
}
}
return nil
}
func validateSQLInjection(s string) error {
ok := strings.Contains(s, ";")
if !ok {
return NewSQLInjectionError("';' isn't allowed in %s", s)
}
return nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
)
func main() {
start := time.Now()
fmt.Printf("Result is %v \n", run())
log.Printf("Code took %s", time.Since(start))
}
func run() int {
f, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
operations := []string{}
for scanner.Scan() {
operations = append(operations, scanner.Text())
}
return calcTotal(operations)
}
func calcTotal(ops []string) int {
total := 0
for _, o := range ops {
total += getTotalOp(o)
}
return total
}
func getTotalOp(o string) int {
for strings.Contains(o, "(") {
p := strings.LastIndex(o, "(")
o = solveParenthesis(o, p)
}
sp := strings.Split(o, "*")
t := []int{}
for _, v := range sp {
so := strings.Split(v, " ")
intTotal := 0
sign := "+"
for _, val := range so {
if val == "+" || val == "*" {
sign = val
continue
}
a, _ := strconv.Atoi(val)
switch sign {
case "+":
intTotal += a
}
}
t = append(t, intTotal)
}
total := 1
for _, v := range t {
total *= v
}
return total
}
func solveParenthesis(o string, p int) string {
toSolve := ""
var closeP int
for i := p + 1; i < len(o); i++ {
switch string(o[i]) {
case ")":
closeP = i
default:
toSolve += string(o[i])
}
if closeP != 0 {
break
}
}
toSolve = o[p+1 : closeP]
total := getTotalOp(toSolve)
toSolve = fmt.Sprintf("(%s)", toSolve)
o = strings.ReplaceAll(o, toSolve, strconv.Itoa(total))
return o
}
|
package main
import (
"encoding/json"
"fmt"
"strconv"
"github.com/aasisodiya/aws/s3"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
// HandleRequest Method (Using Headers)
func HandleRequest(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
if request.HTTPMethod == "GET" {
if request.Headers["maxkeys"] == "" || request.Headers["bucketname"] == "" || request.Headers["region"] == "" {
APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Headers Parameters: [maxkeys, bucketname, region] all values are required\"}", StatusCode: 400}
return APIResponse, nil
}
fmt.Println("Fetching", request.Headers["maxkeys"], "Objects from Bucket", request.Headers["bucketname"])
maxkeys, err := strconv.Atoi(request.Headers["maxkeys"])
if err != nil {
APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Invalid query string: [maxkeys]\"}", StatusCode: 400}
return APIResponse, nil
}
result, err := s3.ListObjects(request.Headers["region"], request.Headers["bucketname"], maxkeys)
if err != nil {
fmt.Println(err.Error())
APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"" + err.Error() + "\"}", StatusCode: 502}
return APIResponse, nil
}
jsonResponse, err := json.Marshal(result)
if err != nil {
panic(err)
}
APIResponse := events.APIGatewayProxyResponse{Body: string(jsonResponse), StatusCode: 200}
return APIResponse, nil
}
APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Method Not Allowed!\"}", StatusCode: 405}
return APIResponse, nil
}
func main() {
lambda.Start(HandleRequest)
// request := events.APIGatewayProxyRequest {
// HTTPMethod: "GET",
// Headers: map[string]string{
// "bucketname":"test-bucket-delete-later",
// "maxkeys":"5",
// "region":"ap-south-1",
// },
// }
// response,_ := HandleRequest(request)
// fmt.Println(response)
}
// // HandleRequest Method (Using QueryParameters)(Not Recommended)
// func HandleRequest(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
// if request.HTTPMethod == "GET" {
// if request.QueryStringParameters["maxkeys"] == "" || request.QueryStringParameters["bucketname"] == "" {
// APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Query Parameters: [maxkeys, bucketname] both values are required\"}", StatusCode: 400}
// return APIResponse, nil
// }
// fmt.Println("Fetching", request.QueryStringParameters["maxkeys"], "Objects from Bucket", request.QueryStringParameters["bucketname"])
// maxkeys, err := strconv.Atoi(request.QueryStringParameters["maxkeys"])
// if err != nil {
// APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Invalid query string: [maxkeys]\"}", StatusCode: 400}
// return APIResponse, nil
// }
// result, err := s3.ListObjects("ap-south-1", request.QueryStringParameters["bucketname"], maxkeys)
// if err != nil {
// fmt.Println(err.Error())
// APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"" + err.Error() + "\"}", StatusCode: 502}
// return APIResponse, nil
// }
// jsonResponse, err := json.Marshal(result)
// if err != nil {
// panic(err)
// }
// APIResponse := events.APIGatewayProxyResponse{Body: string(jsonResponse), StatusCode: 200}
// return APIResponse, nil
// }
// APIResponse := events.APIGatewayProxyResponse{Body: "{\"message\": \"Method Not Allowed!\"}", StatusCode: 405}
// return APIResponse, nil
// }
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
jira "github.com/andygrunwald/go-jira"
pluginapi "github.com/mattermost/mattermost-plugin-api"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-server/v6/plugin"
"github.com/mattermost/mattermost-server/v6/plugin/plugintest"
"github.com/mattermost/mattermost-server/v6/plugin/plugintest/mock"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/trivago/tgo/tcontainer"
"github.com/mattermost/mattermost-plugin-jira/server/utils/kvstore"
)
const (
nonExistantIssueKey = "FAKE-1"
noPermissionsIssueKey = "SUDO-1"
attachCommentErrorKey = "ATTACH-1"
existingIssueKey = "REAL-1"
nonExistantProjectKey = "FP"
noIssueFoundError = "We couldn't find the issue key. Please confirm the issue key and try again. You may not have permissions to access this issue."
noPermissionsError = "You do not have the appropriate permissions to perform this action. Please contact your Jira administrator."
)
type testClient struct {
RESTService
UserService
ProjectService
SearchService
IssueService
}
func (client testClient) GetProject(key string) (*jira.Project, error) {
if key == nonExistantProjectKey {
return nil, errors.New("Project " + key + " not found")
}
return nil, nil
}
func (client testClient) GetTransitions(issueKey string) ([]jira.Transition, error) {
if issueKey == nonExistantIssueKey {
return []jira.Transition{}, errors.New(noIssueFoundError)
} else if issueKey == noPermissionsIssueKey {
return []jira.Transition{}, nil
}
return []jira.Transition{
{To: jira.Status{Name: "To Do"}},
{To: jira.Status{Name: "In Progress"}},
{To: jira.Status{Name: "In Testing"}},
}, nil
}
func (client testClient) DoTransition(issueKey string, transitionID string) error {
return nil
}
func (client testClient) GetIssue(issueKey string, options *jira.GetQueryOptions) (*jira.Issue, error) {
if issueKey == nonExistantIssueKey {
return nil, kvstore.ErrNotFound
}
return &jira.Issue{
Fields: &jira.IssueFields{
Reporter: &jira.User{},
Status: &jira.Status{},
},
}, nil
}
func (client testClient) AddComment(issueKey string, comment *jira.Comment) (*jira.Comment, error) {
if issueKey == noPermissionsIssueKey {
return nil, errors.New("you do not have the permission to comment on this issue")
} else if issueKey == attachCommentErrorKey {
return nil, errors.New("unanticipated error")
}
return nil, nil
}
func (client testClient) GetCreateMetaInfo(api plugin.API, options *jira.GetQueryOptions) (*jira.CreateMetaInfo, error) {
return &jira.CreateMetaInfo{
Projects: []*jira.MetaProject{
{
IssueTypes: []*jira.MetaIssueType{
{
Fields: tcontainer.MarshalMap{
"security": tcontainer.MarshalMap{
"allowedValues": []interface{}{
tcontainer.MarshalMap{
"id": "10001",
},
},
},
},
},
},
},
},
}, nil
}
func TestTransitionJiraIssue(t *testing.T) {
api := &plugintest.API{}
api.On("SendEphemeralPost", mock.AnythingOfType("string"), mock.AnythingOfType("*model.Post")).Return(&model.Post{})
p := Plugin{}
p.initializeRouter()
p.SetAPI(api)
p.client = pluginapi.NewClient(api, p.Driver)
p.userStore = getMockUserStoreKV()
p.instanceStore = p.getMockInstanceStoreKV(1)
tests := map[string]struct {
issueKey string
toState string
expectedMsg string
expectedErr error
}{
"Transitioning a non existent issue": {
issueKey: nonExistantIssueKey,
toState: "To Do",
expectedMsg: "",
expectedErr: errors.New(noIssueFoundError),
},
"Transitioning an issue where user does not have access": {
issueKey: noPermissionsIssueKey,
toState: "To Do",
expectedMsg: "",
expectedErr: errors.New(noPermissionsError),
},
"Looking for an invalid state": {
issueKey: existingIssueKey,
toState: "tofu",
expectedMsg: "",
expectedErr: errors.New("\"tofu\" is not a valid state. Please use one of: \"To Do, In Progress, In Testing\""),
},
"Matching multiple available states": {
issueKey: existingIssueKey,
toState: "in",
expectedMsg: "",
expectedErr: errors.New("please be more specific, \"in\" matched several states: \"In Progress, In Testing\""),
},
"Successfully transitioning to new state": {
issueKey: existingIssueKey,
toState: "inprog",
expectedMsg: fmt.Sprintf("[%s](%s/browse/%s) transitioned to `In Progress`", existingIssueKey, mockInstance1URL, existingIssueKey),
expectedErr: nil,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
actual, err := p.TransitionIssue(&InTransitionIssue{
InstanceID: testInstance1.InstanceID,
mattermostUserID: "connected_user",
IssueKey: tt.issueKey,
ToState: tt.toState,
})
assert.Equal(t, tt.expectedMsg, actual)
if tt.expectedErr != nil {
assert.Error(t, tt.expectedErr, err)
}
})
}
}
func TestRouteIssueTransition(t *testing.T) {
api := &plugintest.API{}
api.On("LogWarn", mockAnythingOfTypeBatch("string", 13)...).Return(nil)
api.On("LogDebug", mockAnythingOfTypeBatch("string", 11)...).Return(nil)
api.On("SendEphemeralPost", mock.AnythingOfType("string"), mock.AnythingOfType("*model.Post")).Return(&model.Post{})
p := Plugin{}
p.initializeRouter()
p.SetAPI(api)
p.client = pluginapi.NewClient(api, p.Driver)
p.userStore = getMockUserStoreKV()
tests := map[string]struct {
bb []byte
request *model.PostActionIntegrationRequest
expectedCode int
}{
"No request data": {
request: nil,
expectedCode: http.StatusUnauthorized,
},
"No UserID": {
request: &model.PostActionIntegrationRequest{
UserId: "",
},
expectedCode: http.StatusUnauthorized,
},
"No issueKey": {
request: &model.PostActionIntegrationRequest{
UserId: "userID",
},
expectedCode: http.StatusInternalServerError,
},
"No selected_option": {
request: &model.PostActionIntegrationRequest{
UserId: "userID",
Context: map[string]interface{}{"issueKey": "Some-Key"},
},
expectedCode: http.StatusInternalServerError,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
bb, err := json.Marshal(tt.request)
assert.Nil(t, err)
request := httptest.NewRequest("POST", makeAPIRoute(routeIssueTransition), strings.NewReader(string(bb)))
w := httptest.NewRecorder()
p.ServeHTTP(&plugin.Context{}, w, request)
assert.Equal(t, tt.expectedCode, w.Result().StatusCode, "no request data")
})
}
}
func TestRouteShareIssuePublicly(t *testing.T) {
validUserID := "1"
api := &plugintest.API{}
p := Plugin{}
p.initializeRouter()
api.On("SendEphemeralPost", mock.AnythingOfType("string"), mock.AnythingOfType("*model.Post")).Return(&model.Post{})
api.On("LogWarn", mockAnythingOfTypeBatch("string", 13)...).Return(nil)
api.On("LogDebug", mockAnythingOfTypeBatch("string", 11)...).Return(nil)
api.On("CreatePost", mock.AnythingOfType("*model.Post")).Return(&model.Post{}, nil)
api.On("DeleteEphemeralPost", validUserID, "").Return()
p.SetAPI(api)
p.client = pluginapi.NewClient(api, p.Driver)
p.instanceStore = p.getMockInstanceStoreKV(1)
p.userStore = getMockUserStoreKV()
tests := map[string]struct {
bb []byte
request *model.PostActionIntegrationRequest
expectedCode int
}{
"No request data": {
request: nil,
expectedCode: http.StatusUnauthorized,
},
"No UserID": {
request: &model.PostActionIntegrationRequest{
UserId: "",
},
expectedCode: http.StatusUnauthorized,
},
"No issueKey": {
request: &model.PostActionIntegrationRequest{
UserId: "userID",
},
expectedCode: http.StatusInternalServerError,
},
"No instanceId": {
request: &model.PostActionIntegrationRequest{
UserId: "userID",
Context: map[string]interface{}{
"issue_key": "TEST-10",
},
},
expectedCode: http.StatusInternalServerError,
},
"No connection": {
request: &model.PostActionIntegrationRequest{
UserId: "userID",
Context: map[string]interface{}{
"issue_key": "TEST-10",
"instance_id": "id",
},
},
expectedCode: http.StatusInternalServerError,
},
"Happy Path": {
request: &model.PostActionIntegrationRequest{
UserId: validUserID,
Context: map[string]interface{}{
"issue_key": "TEST-10",
"instance_id": testInstance1.InstanceID.String(),
},
},
expectedCode: http.StatusOK,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
bb, err := json.Marshal(tt.request)
assert.Nil(t, err)
request := httptest.NewRequest("POST", makeAPIRoute(routeSharePublicly), strings.NewReader(string(bb)))
w := httptest.NewRecorder()
p.ServeHTTP(&plugin.Context{}, w, request)
assert.Equal(t, tt.expectedCode, w.Result().StatusCode, "no request data")
})
}
}
func TestRouteAttachCommentToIssue(t *testing.T) {
api := &plugintest.API{}
api.On("LogWarn", mockAnythingOfTypeBatch("string", 13)...).Return(nil)
api.On("LogDebug", mockAnythingOfTypeBatch("string", 11)...).Return(nil)
api.On("GetPost", "error_post").Return(nil, &model.AppError{Id: "1"})
api.On("GetPost", "post_not_found").Return(nil, (*model.AppError)(nil))
api.On("GetPost", "0").Return(&model.Post{UserId: "0"}, (*model.AppError)(nil))
api.On("GetUser", "0").Return(nil, &model.AppError{Id: "1"})
api.On("GetPost", "1").Return(&model.Post{UserId: "1"}, (*model.AppError)(nil))
api.On("GetUser", "1").Return(&model.User{Username: "username"}, (*model.AppError)(nil))
api.On("CreatePost", mock.AnythingOfType("*model.Post")).Return(&model.Post{}, (*model.AppError)(nil))
api.On("PublishWebSocketEvent", "update_defaults", mock.AnythingOfType("map[string]interface {}"), mock.AnythingOfType("*model.WebsocketBroadcast"))
type requestStruct struct {
PostID string `json:"post_id"`
InstanceID string `json:"instance_id"`
CurrentTeam string `json:"current_team"`
IssueKey string `json:"issueKey"`
}
tests := map[string]struct {
method string
header string
request *requestStruct
expectedCode int
}{
"Wrong method": {
method: "GET",
header: "",
request: &requestStruct{},
expectedCode: http.StatusMethodNotAllowed,
},
"No header": {
method: "POST",
header: "",
request: &requestStruct{},
expectedCode: http.StatusUnauthorized,
},
"User not found": {
method: "POST",
header: "nobody",
request: &requestStruct{},
expectedCode: http.StatusInternalServerError,
},
"Failed to load post": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "error_post",
},
expectedCode: http.StatusInternalServerError,
},
"Post not found": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "post_not_found",
},
expectedCode: http.StatusInternalServerError,
},
"Post user not found": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "0",
},
expectedCode: http.StatusInternalServerError,
},
"No permissions to comment on issue": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "1",
IssueKey: noPermissionsIssueKey,
},
expectedCode: http.StatusInternalServerError,
},
"Failed to attach the comment": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "1",
IssueKey: attachCommentErrorKey,
},
expectedCode: http.StatusInternalServerError,
},
"Successfully created notification post": {
method: "POST",
header: "1",
request: &requestStruct{
PostID: "1",
IssueKey: existingIssueKey,
},
expectedCode: http.StatusOK,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
p := Plugin{}
p.initializeRouter()
p.SetAPI(api)
p.client = pluginapi.NewClient(api, p.Driver)
p.updateConfig(func(conf *config) {
conf.mattermostSiteURL = "https://somelink.com"
})
p.userStore = getMockUserStoreKV()
p.instanceStore = p.getMockInstanceStoreKV(1)
tt.request.InstanceID = testInstance1.InstanceID.String()
bb, err := json.Marshal(tt.request)
assert.Nil(t, err)
request := httptest.NewRequest(tt.method, makeAPIRoute(routeAPIAttachCommentToIssue), strings.NewReader(string(bb)))
request.Header.Add("Mattermost-User-Id", tt.header)
w := httptest.NewRecorder()
p.ServeHTTP(&plugin.Context{}, w, request)
assert.Equal(t, tt.expectedCode, w.Result().StatusCode, "no request data")
})
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.