_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5400
|
Email
|
train
|
func Email() string {
return strings.ToLower(FirstName(RandomGender)+LastName()) + StringNumberExt(1, "", 3) + "@" + randomFrom(jsonData.Domains)
}
|
go
|
{
"resource": ""
}
|
q5401
|
Country
|
train
|
func Country(countryStyle int64) string {
country := ""
switch countryStyle {
default:
case FullCountry:
country = randomFrom(jsonData.Countries)
break
case TwoCharCountry:
country = randomFrom(jsonData.CountriesTwoChars)
break
case ThreeCharCountry:
country = randomFrom(jsonData.CountriesThreeChars)
break
}
return country
}
|
go
|
{
"resource": ""
}
|
q5402
|
State
|
train
|
func State(typeOfState int) string {
if typeOfState == Small {
return randomFrom(jsonData.StatesSmall)
}
return randomFrom(jsonData.States)
}
|
go
|
{
"resource": ""
}
|
q5403
|
Street
|
train
|
func Street() string {
return fmt.Sprintf("%s %s", randomFrom(jsonData.People), randomFrom(jsonData.StreetTypes))
}
|
go
|
{
"resource": ""
}
|
q5404
|
StreetForCountry
|
train
|
func StreetForCountry(countrycode string) string {
switch countrycode {
case "US":
return Street()
case "GB":
return fmt.Sprintf("%s %s", randomFrom(jsonData.StreetNameGB), randomFrom(jsonData.StreetTypesGB))
}
return ""
}
|
go
|
{
"resource": ""
}
|
q5405
|
Address
|
train
|
func Address() string {
return fmt.Sprintf("%d %s,\n%s, %s, %s", Number(100), Street(), City(), State(Small), PostalCode("US"))
}
|
go
|
{
"resource": ""
}
|
q5406
|
StringSample
|
train
|
func StringSample(stringList ...string) string {
str := ""
if len(stringList) > 0 {
str = stringList[Number(0, len(stringList))]
}
return str
}
|
go
|
{
"resource": ""
}
|
q5407
|
IpV4Address
|
train
|
func IpV4Address() string {
blocks := []string{}
for i := 0; i < 4; i++ {
number := privateRand.Intn(255)
blocks = append(blocks, strconv.Itoa(number))
}
return strings.Join(blocks, ".")
}
|
go
|
{
"resource": ""
}
|
q5408
|
IpV6Address
|
train
|
func IpV6Address() string {
var ip net.IP
for i := 0; i < net.IPv6len; i++ {
number := uint8(privateRand.Intn(255))
ip = append(ip, number)
}
return ip.String()
}
|
go
|
{
"resource": ""
}
|
q5409
|
MacAddress
|
train
|
func MacAddress() string {
blocks := []string{}
for i := 0; i < 6; i++ {
number := fmt.Sprintf("%02x", privateRand.Intn(255))
blocks = append(blocks, number)
}
return strings.Join(blocks, ":")
}
|
go
|
{
"resource": ""
}
|
q5410
|
FullDate
|
train
|
func FullDate() string {
timestamp := time.Now()
year := timestamp.Year()
month := Number(1, 13)
maxDay := time.Date(year, time.Month(month+1), 0, 0, 0, 0, 0, time.UTC).Day()
day := Number(1, maxDay+1)
date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
return date.Format(DateOutputLayout)
}
|
go
|
{
"resource": ""
}
|
q5411
|
update
|
train
|
func (d *device) update() error {
if d.advParam != nil {
if err := d.hci.SendCmdWithAdvOff(d.advParam); err != nil {
return err
}
d.advParam = nil
}
if d.scanResp != nil {
if err := d.hci.SendCmdWithAdvOff(d.scanResp); err != nil {
return err
}
d.scanResp = nil
}
if d.advData != nil {
if err := d.hci.SendCmdWithAdvOff(d.advData); err != nil {
return err
}
d.advData = nil
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5412
|
LnxDeviceID
|
train
|
func LnxDeviceID(n int, chk bool) Option {
return func(d Device) error {
d.(*device).devID = n
d.(*device).chkLE = chk
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5413
|
LnxMaxConnections
|
train
|
func LnxMaxConnections(n int) Option {
return func(d Device) error {
d.(*device).maxConn = n
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5414
|
LnxSetAdvertisingEnable
|
train
|
func LnxSetAdvertisingEnable(en bool) Option {
return func(d Device) error {
dd := d.(*device)
if dd == nil {
return errors.New("device is not initialized")
}
if err := dd.update(); err != nil {
return err
}
return dd.hci.SetAdvertiseEnable(en)
}
}
|
go
|
{
"resource": ""
}
|
q5415
|
LnxSetAdvertisingData
|
train
|
func LnxSetAdvertisingData(c *cmd.LESetAdvertisingData) Option {
return func(d Device) error {
d.(*device).advData = c
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5416
|
LnxSetScanResponseData
|
train
|
func LnxSetScanResponseData(c *cmd.LESetScanResponseData) Option {
return func(d Device) error {
d.(*device).scanResp = c
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5417
|
LnxSetAdvertisingParameters
|
train
|
func LnxSetAdvertisingParameters(c *cmd.LESetAdvertisingParameters) Option {
return func(d Device) error {
d.(*device).advParam = c
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5418
|
LnxSendHCIRawCommand
|
train
|
func LnxSendHCIRawCommand(c cmd.CmdParam, rsp io.Writer) Option {
return func(d Device) error {
b, err := d.(*device).SendHCIRawCommand(c)
if rsp == nil {
return err
}
rsp.Write(b)
return err
}
}
|
go
|
{
"resource": ""
}
|
q5419
|
MacDeviceRole
|
train
|
func MacDeviceRole(r int) Option {
return func(d Device) error {
d.(*device).role = r
return nil
}
}
|
go
|
{
"resource": ""
}
|
q5420
|
IoW
|
train
|
func IoW(t, nr, size uintptr) uintptr {
return ioc(directionWrite, t, nr, size)
}
|
go
|
{
"resource": ""
}
|
q5421
|
IoRW
|
train
|
func IoRW(t, nr, size uintptr) uintptr {
return ioc(directionRead|directionWrite, t, nr, size)
}
|
go
|
{
"resource": ""
}
|
q5422
|
NewCharacteristic
|
train
|
func NewCharacteristic(u UUID, s *Service, props Property, h uint16, vh uint16) *Characteristic {
c := &Characteristic{
uuid: u,
svc: s,
props: props,
h: h,
vh: vh,
}
return c
}
|
go
|
{
"resource": ""
}
|
q5423
|
NewDescriptor
|
train
|
func NewDescriptor(u UUID, h uint16, char *Characteristic) *Descriptor {
cd := &Descriptor{
uuid: u,
h: h,
char: char,
}
return cd
}
|
go
|
{
"resource": ""
}
|
q5424
|
xpcToGo
|
train
|
func xpcToGo(v C.xpc_object_t) interface{} {
t := C.xpc_get_type(v)
switch t {
case C.TYPE_ARRAY:
a := make(Array, C.int(C.xpc_array_get_count(v)))
C.XpcArrayApply(unsafe.Pointer(&a), v)
return a
case C.TYPE_DATA:
return C.GoBytes(C.xpc_data_get_bytes_ptr(v), C.int(C.xpc_data_get_length(v)))
case C.TYPE_DICT:
d := make(Dict)
C.XpcDictApply(unsafe.Pointer(&d), v)
return d
case C.TYPE_INT64:
return int64(C.xpc_int64_get_value(v))
case C.TYPE_STRING:
return C.GoString(C.xpc_string_get_string_ptr(v))
case C.TYPE_UUID:
a := [16]byte{}
C.XpcUUIDGetBytes(unsafe.Pointer(&a), v)
return UUID(a[:])
default:
log.Fatalf("unexpected type %#v, value %#v", t, v)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5425
|
unmarshall
|
train
|
func (a *Advertisement) unmarshall(b []byte) error {
// Utility function for creating a list of uuids.
uuidList := func(u []UUID, d []byte, w int) []UUID {
for len(d) > 0 {
u = append(u, UUID{d[:w]})
d = d[w:]
}
return u
}
for len(b) > 0 {
if len(b) < 2 {
return errors.New("invalid advertise data")
}
l, t := b[0], b[1]
if len(b) < int(1+l) {
return errors.New("invalid advertise data")
}
d := b[2 : 1+l]
switch t {
case typeFlags:
// TODO: should we do anything about the discoverability here?
case typeSomeUUID16:
a.Services = uuidList(a.Services, d, 2)
case typeAllUUID16:
a.Services = uuidList(a.Services, d, 2)
case typeSomeUUID32:
a.Services = uuidList(a.Services, d, 4)
case typeAllUUID32:
a.Services = uuidList(a.Services, d, 4)
case typeSomeUUID128:
a.Services = uuidList(a.Services, d, 16)
case typeAllUUID128:
a.Services = uuidList(a.Services, d, 16)
case typeShortName:
a.LocalName = string(d)
case typeCompleteName:
a.LocalName = string(d)
case typeTxPower:
a.TxPowerLevel = int(d[0])
case typeServiceSol16:
a.SolicitedService = uuidList(a.SolicitedService, d, 2)
case typeServiceSol128:
a.SolicitedService = uuidList(a.SolicitedService, d, 16)
case typeServiceSol32:
a.SolicitedService = uuidList(a.SolicitedService, d, 4)
case typeManufacturerData:
a.ManufacturerData = make([]byte, len(d))
copy(a.ManufacturerData, d)
// case typeServiceData16,
// case typeServiceData32,
// case typeServiceData128:
default:
log.Printf("DATA: [ % X ]", d)
}
b = b[1+l:]
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5426
|
Bytes
|
train
|
func (a *AdvPacket) Bytes() [31]byte {
b := [31]byte{}
copy(b[:], a.b)
return b
}
|
go
|
{
"resource": ""
}
|
q5427
|
Len
|
train
|
func (a *AdvPacket) Len() int {
if len(a.b) > 31 {
return 31
}
return len(a.b)
}
|
go
|
{
"resource": ""
}
|
q5428
|
AppendFlags
|
train
|
func (a *AdvPacket) AppendFlags(f byte) *AdvPacket {
return a.AppendField(typeFlags, []byte{f})
}
|
go
|
{
"resource": ""
}
|
q5429
|
AppendName
|
train
|
func (a *AdvPacket) AppendName(n string) *AdvPacket {
typ := byte(typeCompleteName)
if len(a.b)+2+len(n) > MaxEIRPacketLength {
typ = byte(typeShortName)
}
return a.AppendField(typ, []byte(n))
}
|
go
|
{
"resource": ""
}
|
q5430
|
AppendManufacturerData
|
train
|
func (a *AdvPacket) AppendManufacturerData(id uint16, b []byte) *AdvPacket {
d := append([]byte{uint8(id), uint8(id >> 8)}, b...)
return a.AppendField(typeManufacturerData, d)
}
|
go
|
{
"resource": ""
}
|
q5431
|
AppendUUIDFit
|
train
|
func (a *AdvPacket) AppendUUIDFit(uu []UUID) bool {
// Iterate all UUIDs to see if they fit in the packet or not.
fit, l := true, len(a.b)
for _, u := range uu {
if u.Equal(attrGAPUUID) || u.Equal(attrGATTUUID) {
continue
}
l += 2 + u.Len()
if l > MaxEIRPacketLength {
fit = false
break
}
}
// Append the UUIDs until they no longer fit.
for _, u := range uu {
if u.Equal(attrGAPUUID) || u.Equal(attrGATTUUID) {
continue
}
if len(a.b)+2+u.Len() > MaxEIRPacketLength {
break
}
switch l = u.Len(); {
case l == 2 && fit:
a.AppendField(typeAllUUID16, u.b)
case l == 16 && fit:
a.AppendField(typeAllUUID128, u.b)
case l == 2 && !fit:
a.AppendField(typeSomeUUID16, u.b)
case l == 16 && !fit:
a.AppendField(typeSomeUUID128, u.b)
}
}
return fit
}
|
go
|
{
"resource": ""
}
|
q5432
|
Close
|
train
|
func (c *conn) Close() error {
h := c.hci
hh := c.attr
h.connsmu.Lock()
defer h.connsmu.Unlock()
_, found := h.conns[hh]
if !found {
// log.Printf("l2conn: 0x%04x already disconnected", hh)
return nil
}
if err, _ := h.c.Send(cmd.Disconnect{ConnectionHandle: hh, Reason: 0x13}); err != nil {
return fmt.Errorf("l2conn: failed to disconnect, %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5433
|
handleSignal
|
train
|
func (c *conn) handleSignal(a *aclData) error {
log.Printf("ignore l2cap signal:[ % X ]", a.b)
// FIXME: handle LE signaling channel (CID: 5)
return nil
}
|
go
|
{
"resource": ""
}
|
q5434
|
ParseUUID
|
train
|
func ParseUUID(s string) (UUID, error) {
s = strings.Replace(s, "-", "", -1)
b, err := hex.DecodeString(s)
if err != nil {
return UUID{}, err
}
if err := lenErr(len(b)); err != nil {
return UUID{}, err
}
return UUID{reverse(b)}, nil
}
|
go
|
{
"resource": ""
}
|
q5435
|
MustParseUUID
|
train
|
func MustParseUUID(s string) UUID {
u, err := ParseUUID(s)
if err != nil {
panic(err)
}
return u
}
|
go
|
{
"resource": ""
}
|
q5436
|
Equal
|
train
|
func (u UUID) Equal(v UUID) bool {
return bytes.Equal(u.b, v.b)
}
|
go
|
{
"resource": ""
}
|
q5437
|
reverse
|
train
|
func reverse(u []byte) []byte {
// Special-case 16 bit UUIDS for speed.
l := len(u)
if l == 2 {
return []byte{u[1], u[0]}
}
b := make([]byte, l)
for i := 0; i < l/2+1; i++ {
b[i], b[l-i-1] = u[l-i-1], u[i]
}
return b
}
|
go
|
{
"resource": ""
}
|
q5438
|
Handle
|
train
|
func (d *device) Handle(hh ...Handler) {
for _, h := range hh {
h(d)
}
}
|
go
|
{
"resource": ""
}
|
q5439
|
CentralConnected
|
train
|
func CentralConnected(f func(Central)) Handler {
return func(d Device) { d.(*device).centralConnected = f }
}
|
go
|
{
"resource": ""
}
|
q5440
|
CentralDisconnected
|
train
|
func CentralDisconnected(f func(Central)) Handler {
return func(d Device) { d.(*device).centralDisconnected = f }
}
|
go
|
{
"resource": ""
}
|
q5441
|
PeripheralDiscovered
|
train
|
func PeripheralDiscovered(f func(Peripheral, *Advertisement, int)) Handler {
return func(d Device) { d.(*device).peripheralDiscovered = f }
}
|
go
|
{
"resource": ""
}
|
q5442
|
PeripheralConnected
|
train
|
func PeripheralConnected(f func(Peripheral, error)) Handler {
return func(d Device) { d.(*device).peripheralConnected = f }
}
|
go
|
{
"resource": ""
}
|
q5443
|
PeripheralDisconnected
|
train
|
func PeripheralDisconnected(f func(Peripheral, error)) Handler {
return func(d Device) { d.(*device).peripheralDisconnected = f }
}
|
go
|
{
"resource": ""
}
|
q5444
|
Option
|
train
|
func (d *device) Option(opts ...Option) error {
var err error
for _, opt := range opts {
err = opt(d)
}
return err
}
|
go
|
{
"resource": ""
}
|
q5445
|
At
|
train
|
func (r *attrRange) At(h uint16) (a attr, ok bool) {
i := r.idx(int(h))
if i < 0 {
return attr{}, false
}
return r.aa[i], true
}
|
go
|
{
"resource": ""
}
|
q5446
|
Chunk
|
train
|
func (w *l2capWriter) Chunk() {
if w.chunked {
panic("l2capWriter: chunk called twice without committing")
}
w.chunked = true
if w.chunk == nil {
w.chunk = make([]byte, 0, w.mtu)
}
}
|
go
|
{
"resource": ""
}
|
q5447
|
Commit
|
train
|
func (w *l2capWriter) Commit() bool {
if !w.chunked {
panic("l2capWriter: commit without starting a chunk")
}
var success bool
if len(w.b)+len(w.chunk) <= w.mtu {
success = true
w.b = append(w.b, w.chunk...)
}
w.chunk = w.chunk[:0]
w.chunked = false
return success
}
|
go
|
{
"resource": ""
}
|
q5448
|
CommitFit
|
train
|
func (w *l2capWriter) CommitFit() {
if !w.chunked {
panic("l2capWriter: CommitFit without starting a chunk")
}
writeable := w.mtu - len(w.b)
if writeable > len(w.chunk) {
writeable = len(w.chunk)
}
w.b = append(w.b, w.chunk[:writeable]...)
w.chunk = w.chunk[:0]
w.chunked = false
}
|
go
|
{
"resource": ""
}
|
q5449
|
WriteByteFit
|
train
|
func (w *l2capWriter) WriteByteFit(b byte) bool {
return w.WriteFit([]byte{b})
}
|
go
|
{
"resource": ""
}
|
q5450
|
Writeable
|
train
|
func (w *l2capWriter) Writeable(pad int, b []byte) int {
if w.chunked {
return len(b)
}
avail := w.mtu - len(w.b) - pad
if avail > len(b) {
return len(b)
}
if avail < 0 {
return 0
}
return avail
}
|
go
|
{
"resource": ""
}
|
q5451
|
ChunkSeek
|
train
|
func (w *l2capWriter) ChunkSeek(offset uint16) bool {
if !w.chunked {
panic("l2capWriter: ChunkSeek requested without chunked write in progress")
}
if len(w.chunk) < int(offset) {
w.chunk = w.chunk[:0]
return false
}
w.chunk = w.chunk[offset:]
return true
}
|
go
|
{
"resource": ""
}
|
q5452
|
Validate
|
train
|
func Validate(passcode string, counter uint64, secret string) bool {
rv, _ := ValidateCustom(
passcode,
counter,
secret,
ValidateOpts{
Digits: otp.DigitsSix,
Algorithm: otp.AlgorithmSHA1,
},
)
return rv
}
|
go
|
{
"resource": ""
}
|
q5453
|
GenerateCode
|
train
|
func GenerateCode(secret string, counter uint64) (string, error) {
return GenerateCodeCustom(secret, counter, ValidateOpts{
Digits: otp.DigitsSix,
Algorithm: otp.AlgorithmSHA1,
})
}
|
go
|
{
"resource": ""
}
|
q5454
|
GenerateCodeCustom
|
train
|
func GenerateCodeCustom(secret string, counter uint64, opts ValidateOpts) (passcode string, err error) {
// As noted in issue #10 and #17 this adds support for TOTP secrets that are
// missing their padding.
secret = strings.TrimSpace(secret)
if n := len(secret) % 8; n != 0 {
secret = secret + strings.Repeat("=", 8-n)
}
// As noted in issue #24 Google has started producing base32 in lower case,
// but the StdEncoding (and the RFC), expect a dictionary of only upper case letters.
secret = strings.ToUpper(secret)
secretBytes, err := base32.StdEncoding.DecodeString(secret)
if err != nil {
return "", otp.ErrValidateSecretInvalidBase32
}
buf := make([]byte, 8)
mac := hmac.New(opts.Algorithm.Hash, secretBytes)
binary.BigEndian.PutUint64(buf, counter)
if debug {
fmt.Printf("counter=%v\n", counter)
fmt.Printf("buf=%v\n", buf)
}
mac.Write(buf)
sum := mac.Sum(nil)
// "Dynamic truncation" in RFC 4226
// http://tools.ietf.org/html/rfc4226#section-5.4
offset := sum[len(sum)-1] & 0xf
value := int64(((int(sum[offset]) & 0x7f) << 24) |
((int(sum[offset+1] & 0xff)) << 16) |
((int(sum[offset+2] & 0xff)) << 8) |
(int(sum[offset+3]) & 0xff))
l := opts.Digits.Length()
mod := int32(value % int64(math.Pow10(l)))
if debug {
fmt.Printf("offset=%v\n", offset)
fmt.Printf("value=%v\n", value)
fmt.Printf("mod'ed=%v\n", mod)
}
return opts.Digits.Format(mod), nil
}
|
go
|
{
"resource": ""
}
|
q5455
|
Generate
|
train
|
func Generate(opts GenerateOpts) (*otp.Key, error) {
// url encode the Issuer/AccountName
if opts.Issuer == "" {
return nil, otp.ErrGenerateMissingIssuer
}
if opts.AccountName == "" {
return nil, otp.ErrGenerateMissingAccountName
}
if opts.SecretSize == 0 {
opts.SecretSize = 10
}
if opts.Digits == 0 {
opts.Digits = otp.DigitsSix
}
// otpauth://totp/Example:alice@google.com?secret=JBSWY3DPEHPK3PXP&issuer=Example
v := url.Values{}
secret := make([]byte, opts.SecretSize)
_, err := rand.Read(secret)
if err != nil {
return nil, err
}
v.Set("secret", strings.TrimRight(base32.StdEncoding.EncodeToString(secret), "="))
v.Set("issuer", opts.Issuer)
v.Set("algorithm", opts.Algorithm.String())
v.Set("digits", opts.Digits.String())
u := url.URL{
Scheme: "otpauth",
Host: "hotp",
Path: "/" + opts.Issuer + ":" + opts.AccountName,
RawQuery: v.Encode(),
}
return otp.NewKeyFromURL(u.String())
}
|
go
|
{
"resource": ""
}
|
q5456
|
Validate
|
train
|
func Validate(passcode string, secret string) bool {
rv, _ := ValidateCustom(
passcode,
secret,
time.Now().UTC(),
ValidateOpts{
Period: 30,
Skew: 1,
Digits: otp.DigitsSix,
Algorithm: otp.AlgorithmSHA1,
},
)
return rv
}
|
go
|
{
"resource": ""
}
|
q5457
|
GenerateCode
|
train
|
func GenerateCode(secret string, t time.Time) (string, error) {
return GenerateCodeCustom(secret, t, ValidateOpts{
Period: 30,
Skew: 1,
Digits: otp.DigitsSix,
Algorithm: otp.AlgorithmSHA1,
})
}
|
go
|
{
"resource": ""
}
|
q5458
|
Issuer
|
train
|
func (k *Key) Issuer() string {
q := k.url.Query()
issuer := q.Get("issuer")
if issuer != "" {
return issuer
}
p := strings.TrimPrefix(k.url.Path, "/")
i := strings.Index(p, ":")
if i == -1 {
return ""
}
return p[:i]
}
|
go
|
{
"resource": ""
}
|
q5459
|
AccountName
|
train
|
func (k *Key) AccountName() string {
p := strings.TrimPrefix(k.url.Path, "/")
i := strings.Index(p, ":")
if i == -1 {
return p
}
return p[i+1:]
}
|
go
|
{
"resource": ""
}
|
q5460
|
Secret
|
train
|
func (k *Key) Secret() string {
q := k.url.Query()
return q.Get("secret")
}
|
go
|
{
"resource": ""
}
|
q5461
|
Format
|
train
|
func (d Digits) Format(in int32) string {
f := fmt.Sprintf("%%0%dd", d)
return fmt.Sprintf(f, in)
}
|
go
|
{
"resource": ""
}
|
q5462
|
build
|
train
|
func build() bool {
log.Println(okColor("Running build command!"))
args := strings.Split(*flag_build, " ")
if len(args) == 0 {
// If the user has specified and empty then we are done.
return true
}
cmd := exec.Command(args[0], args[1:]...)
if *flag_build_dir != "" {
cmd.Dir = *flag_build_dir
} else {
cmd.Dir = *flag_directory
}
output, err := cmd.CombinedOutput()
if err == nil {
log.Println(okColor("Build ok."))
} else {
log.Println(failColor("Error while building:\n"), failColor(string(output)))
}
return err == nil
}
|
go
|
{
"resource": ""
}
|
q5463
|
builder
|
train
|
func builder(jobs <-chan string, buildStarted chan<- string, buildDone chan<- bool) {
createThreshold := func() <-chan time.Time {
return time.After(time.Duration(WorkDelay * time.Millisecond))
}
threshold := createThreshold()
eventPath := ""
for {
select {
case eventPath = <-jobs:
threshold = createThreshold()
case <-threshold:
buildStarted <- eventPath
buildDone <- build()
}
}
}
|
go
|
{
"resource": ""
}
|
q5464
|
startCommand
|
train
|
func startCommand(command string) (cmd *exec.Cmd, stdout io.ReadCloser, stderr io.ReadCloser, err error) {
args := strings.Split(command, " ")
cmd = exec.Command(args[0], args[1:]...)
if *flag_run_dir != "" {
cmd.Dir = *flag_run_dir
}
if stdout, err = cmd.StdoutPipe(); err != nil {
err = fmt.Errorf("can't get stdout pipe for command: %s", err)
return
}
if stderr, err = cmd.StderrPipe(); err != nil {
err = fmt.Errorf("can't get stderr pipe for command: %s", err)
return
}
if err = cmd.Start(); err != nil {
err = fmt.Errorf("can't start command: %s", err)
return
}
return
}
|
go
|
{
"resource": ""
}
|
q5465
|
runner
|
train
|
func runner(commandTemplate string, buildStarted <-chan string, buildSuccess <-chan bool) {
var currentProcess *os.Process
pipeChan := make(chan io.ReadCloser)
go logger(pipeChan)
go func() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, fatalSignals...)
<-sigChan
log.Println(okColor("Received signal, terminating cleanly."))
if currentProcess != nil {
killProcess(currentProcess)
}
os.Exit(0)
}()
for {
eventPath := <-buildStarted
// append %0.s to use format specifier even if not supplied by user
// to suppress warning in returned string.
command := fmt.Sprintf("%0.s"+commandTemplate, eventPath)
if !*flag_command_stop {
if !<-buildSuccess {
continue
}
}
if currentProcess != nil {
killProcess(currentProcess)
}
if *flag_command_stop {
log.Println(okColor("Command stopped. Waiting for build to complete."))
if !<-buildSuccess {
continue
}
}
log.Println(okColor("Restarting the given command."))
cmd, stdoutPipe, stderrPipe, err := startCommand(command)
if err != nil {
log.Fatal(failColor("Could not start command: %s", err))
}
pipeChan <- stdoutPipe
pipeChan <- stderrPipe
currentProcess = cmd.Process
}
}
|
go
|
{
"resource": ""
}
|
q5466
|
Less
|
train
|
func (f FunctionNameList) Less(i, j int) bool {
if f[i].Str == "" && f[j].Str != "" {
return true
}
if f[i].Str != "" && f[j].Str == "" {
return false
}
if f[i].Str != "" && f[j].Str != "" {
if f[i].Str > f[j].Str {
return true
} else if f[i].Str < f[j].Str {
return false
}
}
return f[i].Name < f[j].Name
}
|
go
|
{
"resource": ""
}
|
q5467
|
NewWriter
|
train
|
func NewWriter(writer io.Writer, codec Codec, recordsPerBlock int64, schema string) (*Writer, error) {
blockBytes := make([]byte, 0)
blockBuffer := bytes.NewBuffer(blockBytes)
avroWriter := &Writer{
writer: writer,
syncMarker: [16]byte{'g', 'o', 'g', 'e', 'n', 'a', 'v', 'r', 'o', 'm', 'a', 'g', 'i', 'c', '1', '0'},
codec: codec,
recordsPerBlock: recordsPerBlock,
blockBuffer: blockBuffer,
}
var err error
if codec == Deflate {
avroWriter.compressedWriter, err = flate.NewWriter(avroWriter.blockBuffer, flate.DefaultCompression)
if err != nil {
return nil, err
}
} else if codec == Snappy {
avroWriter.compressedWriter = newSnappyWriter(avroWriter.blockBuffer)
} else {
avroWriter.compressedWriter = avroWriter.blockBuffer
}
err = avroWriter.writeHeader(schema)
if err != nil {
return nil, err
}
return avroWriter, nil
}
|
go
|
{
"resource": ""
}
|
q5468
|
WriteRecord
|
train
|
func (avroWriter *Writer) WriteRecord(record AvroRecord) error {
var err error
// Serialize the new record into the compressed writer
err = record.Serialize(avroWriter.compressedWriter)
if err != nil {
return err
}
avroWriter.nextBlockRecords += 1
// If the block if full, flush and reset the compressed writer,
// write the header and the block contents
if avroWriter.nextBlockRecords >= avroWriter.recordsPerBlock {
return avroWriter.Flush()
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5469
|
Flush
|
train
|
func (avroWriter *Writer) Flush() error {
if avroWriter.nextBlockRecords == 0 {
return nil
}
// Write out all of the buffered records as a new block
// Must be called before closing to ensure the last block is written
if fwWriter, ok := avroWriter.compressedWriter.(CloseableResettableWriter); ok {
fwWriter.Close()
fwWriter.Reset(avroWriter.blockBuffer)
}
if avroWriter.nextBlockRecords > 0 {
block := &avro.AvroContainerBlock{
NumRecords: avroWriter.nextBlockRecords,
RecordBytes: avroWriter.blockBuffer.Bytes(),
Sync: avroWriter.syncMarker,
}
err := block.Serialize(avroWriter.writer)
if err != nil {
return err
}
}
avroWriter.blockBuffer.Reset()
avroWriter.nextBlockRecords = 0
return nil
}
|
go
|
{
"resource": ""
}
|
q5470
|
NewNamespaceNamer
|
train
|
func NewNamespaceNamer(shortNames bool) *NamespaceNamer {
return &NamespaceNamer{shortNames: shortNames, re: regexp.MustCompile(invalidTokensExpr)}
}
|
go
|
{
"resource": ""
}
|
q5471
|
ToPublicName
|
train
|
func (n *NamespaceNamer) ToPublicName(name string) string {
if n.shortNames {
if parts := strings.Split(name, "."); len(parts) > 2 {
name = strings.Join(parts[len(parts)-2:], ".")
}
}
name = n.re.ReplaceAllString(name, " ")
return strings.Replace(strings.Title(name), " ", "", -1)
}
|
go
|
{
"resource": ""
}
|
q5472
|
CompileToVM
|
train
|
func (b *blockStartIRInstruction) CompileToVM(p *irProgram) ([]vm.Instruction, error) {
block := p.blocks[b.blockId]
return []vm.Instruction{
vm.Instruction{vm.Read, vm.Long},
vm.Instruction{vm.EvalEqual, 0},
vm.Instruction{vm.CondJump, block.end + 5},
vm.Instruction{vm.EvalGreater, 0},
vm.Instruction{vm.CondJump, block.start + 7},
vm.Instruction{vm.Read, vm.UnusedLong},
vm.Instruction{vm.MultLong, -1},
vm.Instruction{vm.PushLoop, 0},
}, nil
}
|
go
|
{
"resource": ""
}
|
q5473
|
CompileToVM
|
train
|
func (b *blockEndIRInstruction) CompileToVM(p *irProgram) ([]vm.Instruction, error) {
block := p.blocks[b.blockId]
return []vm.Instruction{
vm.Instruction{vm.PopLoop, 0},
vm.Instruction{vm.AddLong, -1},
vm.Instruction{vm.EvalEqual, 0},
vm.Instruction{vm.CondJump, block.start},
vm.Instruction{vm.Jump, block.start + 7},
}, nil
}
|
go
|
{
"resource": ""
}
|
q5474
|
RegisterDefinition
|
train
|
func (n *Namespace) RegisterDefinition(d Definition) error {
if curDef, ok := n.Definitions[d.AvroName()]; ok {
if !reflect.DeepEqual(curDef, d) {
return fmt.Errorf("Conflicting definitions for %v", d.AvroName())
}
return nil
}
n.Definitions[d.AvroName()] = d
for _, alias := range d.Aliases() {
if existing, ok := n.Definitions[alias]; ok {
return fmt.Errorf("Alias for %q is %q, but %q is already aliased with that name", d.AvroName(), alias, existing.AvroName())
}
n.Definitions[alias] = d
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5475
|
decodeRecordDefinition
|
train
|
func (n *Namespace) decodeRecordDefinition(namespace string, schemaMap map[string]interface{}) (Definition, error) {
typeStr, err := getMapString(schemaMap, "type")
if err != nil {
return nil, err
}
if typeStr != "record" {
return nil, fmt.Errorf("Type of record must be 'record'")
}
name, err := getMapString(schemaMap, "name")
if err != nil {
return nil, err
}
var rDocString string
if rDoc, ok := schemaMap["doc"]; ok {
rDocString, ok = rDoc.(string)
if !ok {
return nil, NewWrongMapValueTypeError("doc", "string", rDoc)
}
}
if _, ok := schemaMap["namespace"]; ok {
namespace, err = getMapString(schemaMap, "namespace")
if err != nil {
return nil, err
}
}
fieldList, err := getMapArray(schemaMap, "fields")
if err != nil {
return nil, err
}
decodedFields := make([]*Field, 0)
for i, f := range fieldList {
field, ok := f.(map[string]interface{})
if !ok {
return nil, NewWrongMapValueTypeError("fields", "map[]", field)
}
fieldName, err := getMapString(field, "name")
if err != nil {
return nil, err
}
t, ok := field["type"]
if !ok {
return nil, NewRequiredMapKeyError("type")
}
fieldType, err := n.decodeTypeDefinition(fieldName, namespace, t)
if err != nil {
return nil, err
}
var docString string
if doc, ok := field["doc"]; ok {
docString, ok = doc.(string)
if !ok {
return nil, NewWrongMapValueTypeError("doc", "string", doc)
}
}
var fieldTags string
if tags, ok := field["golang.tags"]; ok {
fieldTags, ok = tags.(string)
if !ok {
return nil, NewWrongMapValueTypeError("golang.tags", "string", tags)
}
}
def, hasDef := field["default"]
fieldStruct := NewField(fieldName, fieldType, def, hasDef, docString, field, i, fieldTags)
decodedFields = append(decodedFields, fieldStruct)
}
aliases, err := parseAliases(schemaMap, namespace)
if err != nil {
return nil, err
}
return NewRecordDefinition(ParseAvroName(namespace, name), aliases, decodedFields, rDocString, schemaMap), nil
}
|
go
|
{
"resource": ""
}
|
q5476
|
decodeEnumDefinition
|
train
|
func (n *Namespace) decodeEnumDefinition(namespace string, schemaMap map[string]interface{}) (Definition, error) {
typeStr, err := getMapString(schemaMap, "type")
if err != nil {
return nil, err
}
if typeStr != "enum" {
return nil, fmt.Errorf("Type of enum must be 'enum'")
}
if _, ok := schemaMap["namespace"]; ok {
namespace, err = getMapString(schemaMap, "namespace")
if err != nil {
return nil, err
}
}
name, err := getMapString(schemaMap, "name")
if err != nil {
return nil, err
}
symbolSlice, err := getMapArray(schemaMap, "symbols")
if err != nil {
return nil, err
}
symbolStr, ok := interfaceSliceToStringSlice(symbolSlice)
if !ok {
return nil, fmt.Errorf("'symbols' must be an array of strings")
}
aliases, err := parseAliases(schemaMap, namespace)
if err != nil {
return nil, err
}
var docString string
if doc, ok := schemaMap["doc"]; ok {
if docString, ok = doc.(string); !ok {
return nil, fmt.Errorf("'doc' must be a string")
}
}
return NewEnumDefinition(ParseAvroName(namespace, name), aliases, symbolStr, docString, schemaMap), nil
}
|
go
|
{
"resource": ""
}
|
q5477
|
decodeFixedDefinition
|
train
|
func (n *Namespace) decodeFixedDefinition(namespace string, schemaMap map[string]interface{}) (Definition, error) {
typeStr, err := getMapString(schemaMap, "type")
if err != nil {
return nil, err
}
if typeStr != "fixed" {
return nil, fmt.Errorf("Type of fixed must be 'fixed'")
}
if _, ok := schemaMap["namespace"]; ok {
namespace, err = getMapString(schemaMap, "namespace")
if err != nil {
return nil, err
}
}
name, err := getMapString(schemaMap, "name")
if err != nil {
return nil, err
}
sizeBytes, err := getMapFloat(schemaMap, "size")
if err != nil {
return nil, err
}
aliases, err := parseAliases(schemaMap, namespace)
if err != nil {
return nil, err
}
return NewFixedDefinition(ParseAvroName(namespace, name), aliases, int(sizeBytes), schemaMap), nil
}
|
go
|
{
"resource": ""
}
|
q5478
|
parseAliases
|
train
|
func parseAliases(objectMap map[string]interface{}, namespace string) ([]QualifiedName, error) {
aliases, ok := objectMap["aliases"]
if !ok {
return make([]QualifiedName, 0), nil
}
aliasList, ok := aliases.([]interface{})
if !ok {
return nil, fmt.Errorf("Field aliases expected to be array, got %v", aliases)
}
qualifiedAliases := make([]QualifiedName, 0, len(aliasList))
for _, alias := range aliasList {
aliasString, ok := alias.(string)
if !ok {
return nil, fmt.Errorf("Field aliases expected to be array of strings, got %v", aliases)
}
qualifiedAliases = append(qualifiedAliases, ParseAvroName(namespace, aliasString))
}
return qualifiedAliases, nil
}
|
go
|
{
"resource": ""
}
|
q5479
|
parseCmdLine
|
train
|
func parseCmdLine() config {
cfg := config{}
flag.StringVar(&cfg.packageName, "package", defaultPackageName, "Name of generated package.")
flag.BoolVar(&cfg.containers, "containers", defaultContainers, "Whether to generate container writer methods.")
flag.BoolVar(&cfg.shortUnions, "short-unions", defaultShortUnions, "Whether to use shorter names for Union types.")
flag.StringVar(&cfg.namespacedNames, "namespaced-names", defaultNamespacedNames, "Whether to generate namespaced names for types. Default is \"none\"; \"short\" uses the last part of the namespace (last word after a separator); \"full\" uses all namespace string.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [flags] <target directory> <schema files>\n\nWhere 'flags' are:\n", os.Args[0])
flag.PrintDefaults()
os.Exit(1)
}
flag.Parse()
if flag.NArg() < 2 {
flag.Usage()
}
cfg.namespacedNames = strings.ToLower(cfg.namespacedNames)
switch cfg.namespacedNames {
case nsNone, nsShort, nsFull:
default:
fmt.Fprintf(os.Stderr, "namespaced-names: invalid value '%s'\n\n", cfg.namespacedNames)
flag.Usage()
}
cfg.targetDir = flag.Arg(0)
cfg.files = make([]string, 0)
for _, glob := range flag.Args()[1:] {
files, err := filepath.Glob(glob)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing input file as glob: %v", err)
os.Exit(1)
}
cfg.files = append(cfg.files, files...)
}
return cfg
}
|
go
|
{
"resource": ""
}
|
q5480
|
Compile
|
train
|
func Compile(writer, reader schema.AvroType) (*vm.Program, error) {
log("Compile()\n writer:\n %v\n---\nreader: %v\n---\n", writer, reader)
program := &irProgram{
methods: make(map[string]*irMethod),
errors: make([]string, 0),
}
program.main = newIRMethod("main", program)
err := program.main.compileType(writer, reader)
if err != nil {
return nil, err
}
log("%v", program)
compiled, err := program.CompileToVM()
log("%v", compiled)
return compiled, err
}
|
go
|
{
"resource": ""
}
|
q5481
|
mergeMaps
|
train
|
func mergeMaps(m1, m2 map[string]interface{}) map[string]interface{} {
for k, v := range m2 {
if _, ok := m1[k]; !ok {
m1[k] = v
}
}
return m1
}
|
go
|
{
"resource": ""
}
|
q5482
|
UpdateMeta
|
train
|
func UpdateMeta(driver db.DB, metas []models.FeedMeta) error {
for _, meta := range metas {
meta.Hash = meta.LatestHash
meta.LastModifiedDate = meta.LatestLastModifiedDate
err := driver.UpsertFeedHash(meta)
if err != nil {
return fmt.Errorf("Failed to updte meta: %s, err: %s",
meta.URL, err)
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5483
|
MakeNvdMetaURLs
|
train
|
func MakeNvdMetaURLs(year int, xml bool) (url []string) {
formatTemplate := ""
if xml {
// https://nvd.nist.gov/vuln/data-feeds#XML_FEED
formatTemplate = "https://nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.meta"
} else {
// https: //nvd.nist.gov/vuln/data-feeds#JSON_FEED
formatTemplate = "https://nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-%s.meta"
}
if year == c.Latest {
for _, name := range []string{"modified", "recent"} {
url = append(url, fmt.Sprintf(formatTemplate, name))
}
} else {
feed := strconv.Itoa(year)
url = append(url, fmt.Sprintf(formatTemplate, feed))
}
return
}
|
go
|
{
"resource": ""
}
|
q5484
|
NewRDB
|
train
|
func NewRDB(dbType, dbpath string, debugSQL bool) (driver *RDBDriver, locked bool, err error) {
driver = &RDBDriver{
name: dbType,
}
log.Debugf("Opening DB (%s).", driver.Name())
if locked, err = driver.OpenDB(dbType, dbpath, debugSQL); err != nil {
return nil, locked, err
}
log.Debugf("Migrating DB (%s).", driver.Name())
if err := driver.MigrateDB(); err != nil {
return nil, false, err
}
return driver, false, nil
}
|
go
|
{
"resource": ""
}
|
q5485
|
CloseDB
|
train
|
func (r *RDBDriver) CloseDB() (err error) {
if err = r.conn.Close(); err != nil {
log.Errorf("Failed to close DB. Type: %s. err: %s", r.name, err)
return
}
return
}
|
go
|
{
"resource": ""
}
|
q5486
|
getMatchingCpes
|
train
|
func (r *RDBDriver) getMatchingCpes(uri string) ([]models.Cpe, error) {
// parse wfn, get vendor, product
specified, err := naming.UnbindURI(uri)
if err != nil {
return nil, err
}
// sleect from cpe by vendor, product
cpes := []models.Cpe{}
err = r.conn.Where(&models.Cpe{
CpeBase: models.CpeBase{
CpeWFN: models.CpeWFN{
Vendor: fmt.Sprintf("%s", specified.Get(common.AttributeVendor)),
Product: fmt.Sprintf("%s", specified.Get(common.AttributeProduct)),
},
}}).Find(&cpes).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, err
}
log.Debugf("specified: %s", uri)
filtered := []models.Cpe{}
checkedIDs := map[uint]bool{}
for _, cpe := range cpes {
match, err := match(uri, cpe)
if err != nil {
log.Debugf("Failed to compare the version:%s %s cpe_id:%d %#v",
err, uri, cpe.ID, cpe)
// Try to exact match by vendor, product and version if the version in CPE is not a semVer style.
if cpe.NvdJSONID != 0 {
if _, ok := checkedIDs[cpe.NvdJSONID]; ok {
continue
}
affects := []models.Affect{}
result := r.conn.Where(&models.Affect{NvdJSONID: cpe.NvdJSONID}).Find(&affects)
if result.Error != nil && result.Error != gorm.ErrRecordNotFound {
return nil, result.Error
}
ok, err := matchExactByAffects(uri, affects)
if err != nil {
return nil, err
}
if ok {
filtered = append(filtered, cpe)
}
checkedIDs[cpe.NvdJSONID] = true
}
} else if match {
filtered = append(filtered, cpe)
}
}
return filtered, nil
}
|
go
|
{
"resource": ""
}
|
q5487
|
GetCveIDsByCpeURI
|
train
|
func (r *RDBDriver) GetCveIDsByCpeURI(uri string) ([]string, error) {
filtered, err := r.getMatchingCpes(uri)
if err != nil {
return nil, err
}
cveIDs := make([]string, len(filtered))
// The cpes table is de-normalized. So the `First` is correct, don't be mislead.
for i, cpe := range filtered {
if cpe.JvnID != 0 {
jvn := models.Jvn{}
err = r.conn.Select("cve_id").Where("ID = ?", cpe.JvnID).First(&jvn).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, err
}
cveIDs[i] = jvn.CveID
} else if cpe.NvdXMLID != 0 {
nvd := models.NvdXML{}
err = r.conn.Select("cve_id").Where("ID = ?", cpe.NvdXMLID).First(&nvd).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, err
}
cveIDs[i] = nvd.CveID
} else if cpe.NvdJSONID != 0 {
json := models.NvdJSON{}
err = r.conn.Select("cve_id").Where("ID = ?", cpe.NvdJSONID).First(&json).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, err
}
cveIDs[i] = json.CveID
}
// If we didn't find a CVE something is weird.
if cveIDs[i] == "" {
log.Infof("Missing cve_id for %s (id: %d)", cpe.URI, cpe.ID)
}
}
return cveIDs, nil
}
|
go
|
{
"resource": ""
}
|
q5488
|
GetFetchedFeedMeta
|
train
|
func (r *RDBDriver) GetFetchedFeedMeta(url string) (*models.FeedMeta, error) {
meta := models.FeedMeta{}
m := &models.FeedMeta{
URL: url,
}
err := r.conn.Where(m).First(&meta).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, err
}
return &meta, nil
}
|
go
|
{
"resource": ""
}
|
q5489
|
UpsertFeedHash
|
train
|
func (r *RDBDriver) UpsertFeedHash(mm models.FeedMeta) error {
meta := models.FeedMeta{}
m := &models.FeedMeta{
URL: mm.URL,
}
err := r.conn.Where(m).First(&meta).Error
if err != nil && err != gorm.ErrRecordNotFound {
return err
}
tx := r.conn.Begin()
if tx.Error != nil {
return tx.Error
}
if err == gorm.ErrRecordNotFound {
m.Hash = mm.Hash
m.LastModifiedDate = mm.LastModifiedDate
if err := tx.Create(m).Error; err != nil {
return rollback(tx, err)
}
} else {
meta.Hash = mm.Hash
m.LastModifiedDate = mm.LastModifiedDate
if err := tx.Save(&meta).Error; err != nil {
return rollback(tx, err)
}
}
if err := tx.Commit().Error; err != nil {
return rollback(tx, err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5490
|
FetchFeedFiles
|
train
|
func FetchFeedFiles(reqs []FetchRequest) (results []FetchResult, err error) {
reqChan := make(chan FetchRequest, len(reqs))
resChan := make(chan FetchResult, len(reqs))
errChan := make(chan error, len(reqs))
defer close(reqChan)
defer close(resChan)
defer close(errChan)
for _, r := range reqs {
log.Infof("Fetching... %s", r.URL)
}
go func() {
for _, r := range reqs {
reqChan <- r
}
}()
concurrency := len(reqs)
tasks := util.GenWorkers(concurrency)
for range reqs {
tasks <- func() {
select {
case req := <-reqChan:
body, err := fetchFile(req, 20/len(reqs))
if err != nil {
errChan <- err
return
}
resChan <- FetchResult{
Year: req.Year,
URL: req.URL,
Body: body,
}
}
return
}
}
errs := []error{}
timeout := time.After(10 * 60 * time.Second)
for range reqs {
select {
case res := <-resChan:
results = append(results, res)
log.Infof("Fetched... %s", res.URL)
case err := <-errChan:
errs = append(errs, err)
case <-timeout:
return results, fmt.Errorf("Timeout Fetching")
}
}
if 0 < len(errs) {
return results, fmt.Errorf("%s", errs)
}
return results, nil
}
|
go
|
{
"resource": ""
}
|
q5491
|
GenWorkers
|
train
|
func GenWorkers(num int) chan<- func() {
tasks := make(chan func())
for i := 0; i < num; i++ {
go func() {
for f := range tasks {
f()
}
}()
}
return tasks
}
|
go
|
{
"resource": ""
}
|
q5492
|
GetDefaultLogDir
|
train
|
func GetDefaultLogDir() string {
defaultLogDir := "/var/log/vuls"
if runtime.GOOS == "windows" {
defaultLogDir = filepath.Join(os.Getenv("APPDATA"), "vuls")
}
return defaultLogDir
}
|
go
|
{
"resource": ""
}
|
q5493
|
ParseCpeURI
|
train
|
func ParseCpeURI(uri string) (*models.CpeBase, error) {
var wfn common.WellFormedName
var err error
if strings.HasPrefix(uri, "cpe:/") {
val := strings.TrimPrefix(uri, "cpe:/")
if strings.Contains(val, "/") {
uri = "cpe:/" + strings.Replace(val, "/", `\/`, -1)
}
wfn, err = naming.UnbindURI(uri)
if err != nil {
return nil, err
}
} else {
wfn, err = naming.UnbindFS(uri)
if err != nil {
return nil, err
}
}
return &models.CpeBase{
URI: naming.BindToURI(wfn),
FormattedString: naming.BindToFS(wfn),
WellFormedName: wfn.String(),
CpeWFN: models.CpeWFN{
Part: fmt.Sprintf("%s", wfn.Get(common.AttributePart)),
Vendor: fmt.Sprintf("%s", wfn.Get(common.AttributeVendor)),
Product: fmt.Sprintf("%s", wfn.Get(common.AttributeProduct)),
Version: fmt.Sprintf("%s", wfn.Get(common.AttributeVersion)),
Update: fmt.Sprintf("%s", wfn.Get(common.AttributeUpdate)),
Edition: fmt.Sprintf("%s", wfn.Get(common.AttributeEdition)),
Language: fmt.Sprintf("%s", wfn.Get(common.AttributeLanguage)),
SoftwareEdition: fmt.Sprintf("%s", wfn.Get(common.AttributeSwEdition)),
TargetSW: fmt.Sprintf("%s", wfn.Get(common.AttributeTargetSw)),
TargetHW: fmt.Sprintf("%s", wfn.Get(common.AttributeTargetHw)),
Other: fmt.Sprintf("%s", wfn.Get(common.AttributeOther)),
},
}, nil
}
|
go
|
{
"resource": ""
}
|
q5494
|
StringToFloat
|
train
|
func StringToFloat(str string) float64 {
if len(str) == 0 {
return 0
}
var f float64
var ignorableError error
if f, ignorableError = strconv.ParseFloat(str, 64); ignorableError != nil {
log.Errorf("Failed to cast CVSS score. score: %s, err; %s",
str,
ignorableError,
)
f = 0
}
return f
}
|
go
|
{
"resource": ""
}
|
q5495
|
NewDB
|
train
|
func NewDB(dbType, dbpath string, debugSQL bool) (DB, bool, error) {
switch dbType {
case dialectSqlite3, dialectMysql, dialectPostgreSQL:
return NewRDB(dbType, dbpath, debugSQL)
case dialectRedis:
return NewRedis(dbType, dbpath, debugSQL)
}
return nil, false, fmt.Errorf("Invalid database dialect, %s", dbType)
}
|
go
|
{
"resource": ""
}
|
q5496
|
Validate
|
train
|
func (c *Config) Validate() bool {
if c.DBType == "sqlite3" {
if ok, _ := valid.IsFilePath(c.DBPath); !ok {
log.Errorf("SQLite3 DB path must be a *Absolute* file path. dbpath: %s", c.DBPath)
return false
}
}
if len(c.DumpPath) != 0 {
if ok, _ := valid.IsFilePath(c.DumpPath); !ok {
log.Errorf("JSON path must be a *Absolute* file path. dumppath: %s", c.DumpPath)
return false
}
}
_, err := valid.ValidateStruct(c)
if err != nil {
log.Errorf("error: " + err.Error())
return false
}
return true
}
|
go
|
{
"resource": ""
}
|
q5497
|
Fetch
|
train
|
func Fetch(metas []models.FeedMeta) ([]Item, error) {
reqs := []fetcher.FetchRequest{}
for _, meta := range metas {
reqs = append(reqs, fetcher.FetchRequest{
URL: meta.URL,
})
}
results, err := fetcher.FetchFeedFiles(reqs)
if err != nil {
return nil,
fmt.Errorf("Failed to fetch. err: %s", err)
}
items := []Item{}
for _, res := range results {
var rdf rdf
if err = xml.Unmarshal([]byte(res.Body), &rdf); err != nil {
return nil, fmt.Errorf(
"Failed to unmarshal. url: %s, err: %s", res.URL, err)
}
items = append(items, rdf.Items...)
}
return items, nil
}
|
go
|
{
"resource": ""
}
|
q5498
|
FetchConvert
|
train
|
func FetchConvert(metas []models.FeedMeta) (cves []models.CveDetail, err error) {
items, err := Fetch(metas)
if err != nil {
return nil, err
}
return convert(items)
}
|
go
|
{
"resource": ""
}
|
q5499
|
FetchConvert
|
train
|
func FetchConvert(metas []models.FeedMeta) (cves []models.CveDetail, err error) {
reqs := []fetcher.FetchRequest{}
for _, meta := range metas {
reqs = append(reqs, fetcher.FetchRequest{
URL: meta.URL,
GZIP: true,
})
}
results, err := fetcher.FetchFeedFiles(reqs)
if err != nil {
return nil,
fmt.Errorf("Failed to fetch. err: %s", err)
}
for _, res := range results {
nvd := NvdXML{}
if err = xml.Unmarshal(res.Body, &nvd); err != nil {
return nil, fmt.Errorf(
"Failed to unmarshal. url: %s, err: %s",
res.URL, err)
}
for _, e := range nvd.Entries {
cve, err := convertToModel(e)
if err != nil {
return nil, fmt.Errorf("Failed to convert to model. cve: %s, err: %s",
e.CveID, err)
}
cves = append(cves, *cve)
}
}
return
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.