_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5500
|
SetLogger
|
train
|
func SetLogger(logDir string, quiet, debug, logJSON bool) {
stderrHundler := logger.StderrHandler
logFormat := logger.LogfmtFormat()
if logJSON {
logFormat = logger.JsonFormatEx(false, true)
stderrHundler = logger.StreamHandler(os.Stderr, logFormat)
}
lvlHundler := logger.LvlFilterHandler(logger.LvlInfo, stderrHundler)
if debug {
lvlHundler = logger.LvlFilterHandler(logger.LvlDebug, stderrHundler)
}
if quiet {
lvlHundler = logger.LvlFilterHandler(logger.LvlDebug, logger.DiscardHandler())
pp.SetDefaultOutput(ioutil.Discard)
}
if _, err := os.Stat(logDir); os.IsNotExist(err) {
if err := os.Mkdir(logDir, 0700); err != nil {
logger.Error("Failed to create log directory", "err", err)
}
}
var hundler logger.Handler
if _, err := os.Stat(logDir); err == nil {
logPath := filepath.Join(logDir, "cve-dictionary.log")
hundler = logger.MultiHandler(
logger.Must.FileHandler(logPath, logFormat),
lvlHundler,
)
} else {
hundler = lvlHundler
}
logger.Root().SetHandler(hundler)
}
|
go
|
{
"resource": ""
}
|
q5501
|
Fatalf
|
train
|
func Fatalf(format string, args ...interface{}) {
logger.Crit(fmt.Sprintf(format, args...))
}
|
go
|
{
"resource": ""
}
|
q5502
|
Start
|
train
|
func Start(logDir string, driver db.DB) error {
e := echo.New()
e.Debug = config.Conf.Debug
// Middleware
e.Use(middleware.Logger())
e.Use(middleware.Recover())
// setup access logger
logPath := filepath.Join(logDir, "access.log")
if _, err := os.Stat(logPath); os.IsNotExist(err) {
if _, err := os.Create(logPath); err != nil {
return err
}
}
f, err := os.OpenFile(logPath, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer f.Close()
e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Output: f,
}))
// Routes
e.GET("/health", health())
e.GET("/cves/:id", getCve(driver))
e.POST("/cpes", getCveByCpeName(driver))
bindURL := fmt.Sprintf("%s:%s", config.Conf.Bind, config.Conf.Port)
log.Infof("Listening on %s", bindURL)
e.Start(bindURL)
return nil
}
|
go
|
{
"resource": ""
}
|
q5503
|
NewRedis
|
train
|
func NewRedis(dbType, dbpath string, debugSQL bool) (driver *RedisDriver, locked bool, err error) {
driver = &RedisDriver{
name: dbType,
}
log.Debugf("Opening DB (%s).", driver.Name())
if err = driver.OpenDB(dbType, dbpath, debugSQL); err != nil {
return
}
return
}
|
go
|
{
"resource": ""
}
|
q5504
|
InsertJvn
|
train
|
func (r *RedisDriver) InsertJvn(cves []models.CveDetail) error {
log.Infof("Inserting fetched CVEs...")
var err error
var refreshedJvns []string
bar := pb.New(len(cves))
if c.Conf.Quiet {
bar.SetWriter(ioutil.Discard)
} else {
bar.SetWriter(os.Stderr)
}
bar.Start()
for chunked := range chunkSlice(cves, 10) {
var pipe redis.Pipeliner
pipe = r.conn.Pipeline()
for _, c := range chunked {
bar.Increment()
cpes := make([]models.Cpe, len(c.Jvn.Cpes))
copy(cpes, c.Jvn.Cpes)
c.Jvn.Cpes = nil
var jj []byte
if jj, err = json.Marshal(c.Jvn); err != nil {
return fmt.Errorf("Failed to marshal json. err: %s", err)
}
refreshedJvns = append(refreshedJvns, c.CveID)
if result := pipe.HSet(hashKeyPrefix+c.CveID, "Jvn", string(jj)); result.Err() != nil {
return fmt.Errorf("Failed to HSet CVE. err: %s", result.Err())
}
for _, cpe := range cpes {
if result := pipe.ZAdd(
fmt.Sprintf("%s%s::%s", hashKeyPrefix, cpe.Vendor, cpe.Product),
redis.Z{Score: 0, Member: c.CveID},
); result.Err() != nil {
return fmt.Errorf("Failed to ZAdd cpe. err: %s", result.Err())
}
}
var jc []byte
if jc, err = json.Marshal(cpes); err != nil {
return fmt.Errorf("Failed to marshal json. err: %s", err)
}
if result := pipe.HSet(cpeHashKeyPrefix+c.CveID, "Jvn", string(jc)); result.Err() != nil {
return fmt.Errorf("Failed to HSet CPE. err: %s", result.Err())
}
}
if _, err = pipe.Exec(); err != nil {
return fmt.Errorf("Failed to exec pipeline. err: %s", err)
}
}
bar.Finish()
log.Infof("Refreshed %d Jvns.", len(refreshedJvns))
// log.Debugf("%v", refreshedJvns)
return nil
}
|
go
|
{
"resource": ""
}
|
q5505
|
GetFetchedFeedMeta
|
train
|
func (r *RedisDriver) GetFetchedFeedMeta(url string) (*models.FeedMeta, error) {
var result *redis.StringStringMapCmd
if result = r.conn.HGetAll(hashKeyPrefix + "Meta"); result.Err() != nil {
return nil, result.Err()
}
meta := &models.FeedMeta{}
if s, ok := result.Val()[url]; ok {
if err := json.Unmarshal([]byte(s), meta); err != nil {
return nil, err
}
return meta, nil
}
return meta, nil
}
|
go
|
{
"resource": ""
}
|
q5506
|
UpsertFeedHash
|
train
|
func (r *RedisDriver) UpsertFeedHash(m models.FeedMeta) error {
jn, err := json.Marshal(m)
if err != nil {
return fmt.Errorf("Failed to marshal json. err: %s", err)
}
var pipe redis.Pipeliner
pipe = r.conn.Pipeline()
if result := pipe.HSet(hashKeyPrefix+"Meta", m.URL, jn); result.Err() != nil {
return fmt.Errorf("Failed to HSet META. err: %s", result.Err())
}
if _, err := pipe.Exec(); err != nil {
return fmt.Errorf("Failed to exec pipeline. err: %s", err)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5507
|
UpToDate
|
train
|
func (f FeedMeta) UpToDate() bool {
return !f.Newly() && f.Hash == f.LatestHash
}
|
go
|
{
"resource": ""
}
|
q5508
|
OutDated
|
train
|
func (f FeedMeta) OutDated() bool {
return !f.Newly() && f.Hash != f.LatestHash
}
|
go
|
{
"resource": ""
}
|
q5509
|
StatusForStdout
|
train
|
func (f FeedMeta) StatusForStdout() string {
if f.Newly() {
return "Newly"
} else if f.OutDated() {
red := color.New(color.FgRed, color.Bold).SprintFunc()
return red("Out-Dated")
} else if f.UpToDate() {
return color.GreenString("Up-to-Date")
}
return "Unknown"
}
|
go
|
{
"resource": ""
}
|
q5510
|
Year
|
train
|
func (f FeedMeta) Year() (year string, xml bool, err error) {
switch f.source() {
case nvdxml:
return strings.TrimSuffix(
strings.Split(f.URL, "nvdcve-2.0-")[1], ".xml.gz"), true, nil
case nvdjson:
return strings.TrimSuffix(
strings.Split(f.URL, "nvdcve-1.0-")[1], ".json.gz"), false, nil
case jvn:
if strings.HasSuffix(f.URL, "jvndb.rdf") {
return "modified", true, nil
} else if strings.HasSuffix(f.URL, "jvndb_new.rdf") {
return "recent", true, nil
} else {
return strings.TrimSuffix(
strings.Split(f.URL, "jvndb_")[1], ".rdf"), true, nil
}
default:
return "", false, fmt.Errorf("Failed to parse URL: %s", f.URL)
}
}
|
go
|
{
"resource": ""
}
|
q5511
|
ToTableWriterRow
|
train
|
func (f FeedMeta) ToTableWriterRow() []string {
y, _, _ := f.Year()
fetched, latest := f.modifiedTimesToStrs()
return []string{
f.color(f.source()),
f.color(y),
f.StatusForStdout(),
f.color(fetched),
f.color(latest),
}
}
|
go
|
{
"resource": ""
}
|
q5512
|
FetchConvert
|
train
|
func FetchConvert(metas []models.FeedMeta) (cves []models.CveDetail, err error) {
reqs := []fetcher.FetchRequest{}
for _, meta := range metas {
reqs = append(reqs, fetcher.FetchRequest{
URL: meta.URL,
GZIP: true,
})
}
results, err := fetcher.FetchFeedFiles(reqs)
if err != nil {
return nil,
fmt.Errorf("Failed to fetch. err: %s", err)
}
for _, res := range results {
nvd := NvdJSON{}
if err = json.Unmarshal(res.Body, &nvd); err != nil {
return nil, fmt.Errorf(
"Failed to unmarshal. url: %s, err: %s",
res.URL, err)
}
for _, item := range nvd.CveItems {
cve, err := convertToModel(&item)
if err != nil {
return nil, fmt.Errorf("Failed to convert to model. cve: %s, err: %s",
item.Cve.CveDataMeta.ID, err)
}
cves = append(cves, *cve)
}
}
return
}
|
go
|
{
"resource": ""
}
|
q5513
|
Register
|
train
|
func (c *Collection) Register(fns ...func(DB) error) error {
return c.register(false, fns...)
}
|
go
|
{
"resource": ""
}
|
q5514
|
RegisterTx
|
train
|
func (c *Collection) RegisterTx(fns ...func(DB) error) error {
return c.register(true, fns...)
}
|
go
|
{
"resource": ""
}
|
q5515
|
DiscoverSQLMigrations
|
train
|
func (c *Collection) DiscoverSQLMigrations(dir string) error {
dir, err := filepath.Abs(dir)
if err != nil {
return err
}
if c.isVisitedDir(dir) {
return nil
}
if _, err := os.Stat(dir); os.IsNotExist(err) {
return nil
}
var ms []*Migration
newMigration := func(version int64) *Migration {
for i := range ms {
m := ms[i]
if m.Version == version {
return m
}
}
ms = append(ms, &Migration{
Version: version,
})
return ms[len(ms)-1]
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, f := range files {
if f.IsDir() {
continue
}
fileName := f.Name()
if !strings.HasSuffix(fileName, ".sql") {
continue
}
idx := strings.IndexByte(fileName, '_')
if idx == -1 {
err := fmt.Errorf(
"file=%q must have name in format version_comment, e.g. 1_initial",
fileName)
return err
}
version, err := strconv.ParseInt(fileName[:idx], 10, 64)
if err != nil {
return err
}
m := newMigration(version)
filePath := filepath.Join(dir, fileName)
if strings.HasSuffix(fileName, ".up.sql") {
if m.Up != nil {
return fmt.Errorf("migration=%d already has Up func", version)
}
m.UpTx = strings.HasSuffix(fileName, ".tx.up.sql")
m.Up = newSQLMigration(filePath)
continue
}
if strings.HasSuffix(fileName, ".down.sql") {
if m.Down != nil {
return fmt.Errorf("migration=%d already has Down func", version)
}
m.DownTx = strings.HasSuffix(fileName, ".tx.down.sql")
m.Down = newSQLMigration(filePath)
continue
}
return fmt.Errorf(
"file=%q must have extension .up.sql or .down.sql", fileName)
}
for _, m := range ms {
c.addMigration(m)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5516
|
Format
|
train
|
func (r Result) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
var buf bytes.Buffer
fmt.Fprintf(&buf, "DNS lookup: %4d ms\n",
int(r.DNSLookup/time.Millisecond))
fmt.Fprintf(&buf, "TCP connection: %4d ms\n",
int(r.TCPConnection/time.Millisecond))
fmt.Fprintf(&buf, "TLS handshake: %4d ms\n",
int(r.TLSHandshake/time.Millisecond))
fmt.Fprintf(&buf, "Server processing: %4d ms\n",
int(r.ServerProcessing/time.Millisecond))
if r.total > 0 {
fmt.Fprintf(&buf, "Content transfer: %4d ms\n\n",
int(r.contentTransfer/time.Millisecond))
} else {
fmt.Fprintf(&buf, "Content transfer: %4s ms\n\n", "-")
}
fmt.Fprintf(&buf, "Name Lookup: %4d ms\n",
int(r.NameLookup/time.Millisecond))
fmt.Fprintf(&buf, "Connect: %4d ms\n",
int(r.Connect/time.Millisecond))
fmt.Fprintf(&buf, "Pre Transfer: %4d ms\n",
int(r.Pretransfer/time.Millisecond))
fmt.Fprintf(&buf, "Start Transfer: %4d ms\n",
int(r.StartTransfer/time.Millisecond))
if r.total > 0 {
fmt.Fprintf(&buf, "Total: %4d ms\n",
int(r.total/time.Millisecond))
} else {
fmt.Fprintf(&buf, "Total: %4s ms\n", "-")
}
io.WriteString(s, buf.String())
return
}
fallthrough
case 's', 'q':
d := r.durations()
list := make([]string, 0, len(d))
for k, v := range d {
// Handle when End function is not called
if (k == "ContentTransfer" || k == "Total") && r.t5.IsZero() {
list = append(list, fmt.Sprintf("%s: - ms", k))
continue
}
list = append(list, fmt.Sprintf("%s: %d ms", k, v/time.Millisecond))
}
io.WriteString(s, strings.Join(list, ", "))
}
}
|
go
|
{
"resource": ""
}
|
q5517
|
WithHTTPStat
|
train
|
func WithHTTPStat(ctx context.Context, r *Result) context.Context {
return withClientTrace(ctx, r)
}
|
go
|
{
"resource": ""
}
|
q5518
|
run
|
train
|
func (w *worker) run() {
defer func() {
w.logFunc(LogInfo, "worker done.")
w.wg.Done()
}()
// Enter loop to process URLs until stop signal is received
for {
var idleChan <-chan time.Time
w.logFunc(LogInfo, "waiting for pop...")
// Initialize the idle timeout channel, if required
if w.opts.WorkerIdleTTL > 0 {
idleChan = time.After(w.opts.WorkerIdleTTL)
}
select {
case <-w.stop:
w.logFunc(LogInfo, "stop signal received.")
return
case <-idleChan:
w.logFunc(LogInfo, "idle timeout received.")
w.sendResponse(nil, false, nil, true)
return
case batch := <-w.pop:
// Got a batch of urls to crawl, loop and check at each iteration if a stop
// is received.
for _, ctx := range batch {
w.logFunc(LogInfo, "popped: %s", ctx.url)
if ctx.IsRobotsURL() {
w.requestRobotsTxt(ctx)
} else if w.isAllowedPerRobotsPolicies(ctx.url) {
w.requestURL(ctx, ctx.HeadBeforeGet)
} else {
// Must still notify Crawler that this URL was processed, although not visited
w.opts.Extender.Disallowed(ctx)
w.sendResponse(ctx, false, nil, false)
}
// No need to check for idle timeout here, no idling while looping through
// a batch of URLs.
select {
case <-w.stop:
w.logFunc(LogInfo, "stop signal received.")
return
default:
// Nothing, just continue...
}
}
}
}
}
|
go
|
{
"resource": ""
}
|
q5519
|
isAllowedPerRobotsPolicies
|
train
|
func (w *worker) isAllowedPerRobotsPolicies(u *url.URL) bool {
if w.robotsGroup != nil {
// Is this URL allowed per robots.txt policy?
ok := w.robotsGroup.Test(u.Path)
if !ok {
w.logFunc(LogIgnored, "ignored on robots.txt policy: %s", u.String())
}
return ok
}
// No robots.txt = everything is allowed
return true
}
|
go
|
{
"resource": ""
}
|
q5520
|
requestURL
|
train
|
func (w *worker) requestURL(ctx *URLContext, headRequest bool) {
if res, ok := w.fetchURL(ctx, w.opts.UserAgent, headRequest); ok {
var harvested interface{}
var visited bool
// Close the body on function end
defer res.Body.Close()
// Any 2xx status code is good to go
if res.StatusCode >= 200 && res.StatusCode < 300 {
// Success, visit the URL
harvested = w.visitURL(ctx, res)
visited = true
} else {
// Error based on status code received
w.opts.Extender.Error(newCrawlErrorMessage(ctx, res.Status, CekHttpStatusCode))
w.logFunc(LogError, "ERROR status code for %s: %s", ctx.url, res.Status)
}
w.sendResponse(ctx, visited, harvested, false)
}
}
|
go
|
{
"resource": ""
}
|
q5521
|
requestRobotsTxt
|
train
|
func (w *worker) requestRobotsTxt(ctx *URLContext) {
// Ask if it should be fetched
if robData, reqRob := w.opts.Extender.RequestRobots(ctx, w.opts.RobotUserAgent); !reqRob {
w.logFunc(LogInfo, "using robots.txt from cache")
w.robotsGroup = w.getRobotsTxtGroup(ctx, robData, nil)
} else if res, ok := w.fetchURL(ctx, w.opts.UserAgent, false); ok {
// Close the body on function end
defer res.Body.Close()
w.robotsGroup = w.getRobotsTxtGroup(ctx, nil, res)
}
}
|
go
|
{
"resource": ""
}
|
q5522
|
getRobotsTxtGroup
|
train
|
func (w *worker) getRobotsTxtGroup(ctx *URLContext, b []byte, res *http.Response) (g *robotstxt.Group) {
var data *robotstxt.RobotsData
var e error
if res != nil {
var buf bytes.Buffer
io.Copy(&buf, res.Body)
res.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
data, e = robotstxt.FromResponse(res)
// Rewind the res.Body (by re-creating it from the bytes)
res.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
// Error or not, the robots.txt has been fetched, so notify
w.opts.Extender.FetchedRobots(ctx, res)
} else {
data, e = robotstxt.FromBytes(b)
}
// If robots data cannot be parsed, will return nil, which will allow access by default.
// Reasonable, since by default no robots.txt means full access, so invalid
// robots.txt is similar behavior.
if e != nil {
w.opts.Extender.Error(newCrawlError(nil, e, CekParseRobots))
w.logFunc(LogError, "ERROR parsing robots.txt for host %s: %s", w.host, e)
} else {
g = data.FindGroup(w.opts.RobotUserAgent)
}
return g
}
|
go
|
{
"resource": ""
}
|
q5523
|
setCrawlDelay
|
train
|
func (w *worker) setCrawlDelay() {
var robDelay time.Duration
if w.robotsGroup != nil {
robDelay = w.robotsGroup.CrawlDelay
}
w.lastCrawlDelay = w.opts.Extender.ComputeDelay(w.host,
&DelayInfo{
w.opts.CrawlDelay,
robDelay,
w.lastCrawlDelay,
},
w.lastFetch)
w.logFunc(LogInfo, "using crawl-delay: %v", w.lastCrawlDelay)
}
|
go
|
{
"resource": ""
}
|
q5524
|
fetchURL
|
train
|
func (w *worker) fetchURL(ctx *URLContext, agent string, headRequest bool) (res *http.Response, ok bool) {
var e error
var silent bool
for {
// Wait for crawl delay, if one is pending.
w.logFunc(LogTrace, "waiting for crawl delay")
if w.wait != nil {
<-w.wait
w.wait = nil
}
// Compute the next delay
w.setCrawlDelay()
// Compute the fetch duration
now := time.Now()
// Request the URL
if res, e = w.opts.Extender.Fetch(ctx, agent, headRequest); e != nil {
// Check if this is an ErrEnqueueRedirect, in which case we will enqueue
// the redirect-to URL.
if ue, ok := e.(*url.Error); ok {
// We have a *url.Error, check if it was returned because of an ErrEnqueueRedirect
if ue.Err == ErrEnqueueRedirect {
// Do not notify this error outside of this if block, this is not a
// "real" error. We either enqueue the new URL, or fail to parse it,
// and then stop processing the current URL.
silent = true
// Parse the URL in the context of the original URL (so that relative URLs are ok).
// Absolute URLs that point to another host are ok too.
if ur, e := ctx.url.Parse(ue.URL); e != nil {
// Notify error
w.opts.Extender.Error(newCrawlError(nil, e, CekParseRedirectURL))
w.logFunc(LogError, "ERROR parsing redirect URL %s: %s", ue.URL, e)
} else {
w.logFunc(LogTrace, "redirect to %s from %s, linked from %s", ur, ctx.URL(), ctx.SourceURL())
// Enqueue the redirect-to URL with the original source
rCtx := ctx.cloneForRedirect(ur, w.opts.URLNormalizationFlags)
w.enqueue <- rCtx
}
}
}
// No fetch, so set to nil
w.lastFetch = nil
if !silent {
// Notify error
w.opts.Extender.Error(newCrawlError(ctx, e, CekFetch))
w.logFunc(LogError, "ERROR fetching %s: %s", ctx.url, e)
}
// Return from this URL crawl
w.sendResponse(ctx, false, nil, false)
return nil, false
}
// Get the fetch duration
fetchDuration := time.Now().Sub(now)
// Crawl delay starts now.
w.wait = time.After(w.lastCrawlDelay)
// Keep trace of this last fetch info
w.lastFetch = &FetchInfo{
ctx,
fetchDuration,
res.StatusCode,
headRequest,
}
if headRequest {
// Close the HEAD request's body
defer res.Body.Close()
// Next up is GET request, maybe
headRequest = false
// Ask caller if we should proceed with a GET
if !w.opts.Extender.RequestGet(ctx, res) {
w.logFunc(LogIgnored, "ignored on HEAD filter policy: %s", ctx.url)
w.sendResponse(ctx, false, nil, false)
ok = false
break
}
} else {
ok = true
break
}
}
return
}
|
go
|
{
"resource": ""
}
|
q5525
|
sendResponse
|
train
|
func (w *worker) sendResponse(ctx *URLContext, visited bool, harvested interface{}, idleDeath bool) {
// Push harvested urls back to crawler, even if empty (uses the channel communication
// to decrement reference count of pending URLs)
if ctx == nil || !isRobotsURL(ctx.url) {
// If a stop signal has been received, ignore the response, since the push
// channel may be full and could block indefinitely.
select {
case <-w.stop:
w.logFunc(LogInfo, "ignoring send response, will stop.")
return
default:
// Nothing, just continue...
}
// No stop signal, send the response
res := &workerResponse{
ctx,
visited,
harvested,
w.host,
idleDeath,
}
w.push <- res
}
}
|
go
|
{
"resource": ""
}
|
q5526
|
visitURL
|
train
|
func (w *worker) visitURL(ctx *URLContext, res *http.Response) interface{} {
var doc *goquery.Document
var harvested interface{}
var doLinks bool
// Load a goquery document and call the visitor function
if bd, e := ioutil.ReadAll(res.Body); e != nil {
w.opts.Extender.Error(newCrawlError(ctx, e, CekReadBody))
w.logFunc(LogError, "ERROR reading body %s: %s", ctx.url, e)
} else {
if node, e := html.Parse(bytes.NewBuffer(bd)); e != nil {
w.opts.Extender.Error(newCrawlError(ctx, e, CekParseBody))
w.logFunc(LogError, "ERROR parsing %s: %s", ctx.url, e)
} else {
doc = goquery.NewDocumentFromNode(node)
doc.Url = res.Request.URL
}
// Re-assign the body so it can be consumed by the visitor function
res.Body = ioutil.NopCloser(bytes.NewBuffer(bd))
}
// Visit the document (with nil goquery doc if failed to load)
if harvested, doLinks = w.opts.Extender.Visit(ctx, res, doc); doLinks {
// Links were not processed by the visitor, so process links
if doc != nil {
harvested = w.processLinks(doc)
} else {
w.opts.Extender.Error(newCrawlErrorMessage(ctx, "No goquery document to process links.", CekProcessLinks))
w.logFunc(LogError, "ERROR processing links %s", ctx.url)
}
}
// Notify that this URL has been visited
w.opts.Extender.Visited(ctx, harvested)
return harvested
}
|
go
|
{
"resource": ""
}
|
q5527
|
processLinks
|
train
|
func (w *worker) processLinks(doc *goquery.Document) (result []*url.URL) {
baseURL, _ := doc.Find("base[href]").Attr("href")
urls := doc.Find("a[href]").Map(func(_ int, s *goquery.Selection) string {
val, _ := s.Attr("href")
if baseURL != "" {
val = handleBaseTag(doc.Url, baseURL, val)
}
return val
})
for _, s := range urls {
// If href starts with "#", then it points to this same exact URL, ignore (will fail to parse anyway)
if len(s) > 0 && !strings.HasPrefix(s, "#") {
if parsed, e := url.Parse(s); e == nil {
parsed = doc.Url.ResolveReference(parsed)
result = append(result, parsed)
} else {
w.logFunc(LogIgnored, "ignore on unparsable policy %s: %s", s, e.Error())
}
}
}
return
}
|
go
|
{
"resource": ""
}
|
q5528
|
Log
|
train
|
func (de *DefaultExtender) Log(logFlags LogFlags, msgLevel LogFlags, msg string) {
if logFlags&msgLevel == msgLevel {
log.Println(msg)
}
}
|
go
|
{
"resource": ""
}
|
q5529
|
ComputeDelay
|
train
|
func (de *DefaultExtender) ComputeDelay(host string, di *DelayInfo, lastFetch *FetchInfo) time.Duration {
if di.RobotsDelay > 0 {
return di.RobotsDelay
}
return di.OptsDelay
}
|
go
|
{
"resource": ""
}
|
q5530
|
Visit
|
train
|
func (de *DefaultExtender) Visit(ctx *URLContext, res *http.Response, doc *goquery.Document) (harvested interface{}, findLinks bool) {
return nil, true
}
|
go
|
{
"resource": ""
}
|
q5531
|
NewCrawlerWithOptions
|
train
|
func NewCrawlerWithOptions(opts *Options) *Crawler {
ret := new(Crawler)
ret.Options = opts
return ret
}
|
go
|
{
"resource": ""
}
|
q5532
|
init
|
train
|
func (c *Crawler) init(ctxs []*URLContext) {
// Initialize the internal hosts map
c.hosts = make(map[string]struct{}, len(ctxs))
for _, ctx := range ctxs {
// Add this normalized URL's host if it is not already there.
if _, ok := c.hosts[ctx.normalizedURL.Host]; !ok {
c.hosts[ctx.normalizedURL.Host] = struct{}{}
}
}
hostCount := len(c.hosts)
l := len(ctxs)
c.logFunc(LogTrace, "init() - seeds length: %d", l)
c.logFunc(LogTrace, "init() - host count: %d", hostCount)
c.logFunc(LogInfo, "robot user-agent: %s", c.Options.RobotUserAgent)
// Create a shiny new WaitGroup
c.wg = new(sync.WaitGroup)
// Initialize the visits fields
c.visited = make(map[string]struct{}, l)
c.pushPopRefCount, c.visits = 0, 0
// Create the workers map and the push channel (the channel used by workers
// to communicate back to the crawler)
c.stop = make(chan struct{})
if c.Options.SameHostOnly {
c.workers, c.push = make(map[string]*worker, hostCount),
make(chan *workerResponse, hostCount)
} else {
c.workers, c.push = make(map[string]*worker, c.Options.HostBufferFactor*hostCount),
make(chan *workerResponse, c.Options.HostBufferFactor*hostCount)
}
// Create and pass the enqueue channel
c.enqueue = make(chan interface{}, c.Options.EnqueueChanBuffer)
c.setExtenderEnqueueChan()
}
|
go
|
{
"resource": ""
}
|
q5533
|
setExtenderEnqueueChan
|
train
|
func (c *Crawler) setExtenderEnqueueChan() {
defer func() {
if err := recover(); err != nil {
// Panic can happen if the field exists on a pointer struct, but that
// pointer is nil.
c.logFunc(LogError, "cannot set the enqueue channel: %s", err)
}
}()
// Using reflection, check if the extender has a `EnqueueChan` field
// of type `chan<- interface{}`. If it does, set it to the crawler's
// enqueue channel.
v := reflect.ValueOf(c.Options.Extender)
el := v.Elem()
if el.Kind() != reflect.Struct {
c.logFunc(LogInfo, "extender is not a struct, cannot set the enqueue channel")
return
}
ec := el.FieldByName("EnqueueChan")
if !ec.IsValid() {
c.logFunc(LogInfo, "extender.EnqueueChan does not exist, cannot set the enqueue channel")
return
}
t := ec.Type()
if t.Kind() != reflect.Chan || t.ChanDir() != reflect.SendDir {
c.logFunc(LogInfo, "extender.EnqueueChan is not of type chan<-interface{}, cannot set the enqueue channel")
return
}
tt := t.Elem()
if tt.Kind() != reflect.Interface || tt.NumMethod() != 0 {
c.logFunc(LogInfo, "extender.EnqueueChan is not of type chan<-interface{}, cannot set the enqueue channel")
return
}
src := reflect.ValueOf(c.enqueue)
ec.Set(src)
}
|
go
|
{
"resource": ""
}
|
q5534
|
launchWorker
|
train
|
func (c *Crawler) launchWorker(ctx *URLContext) *worker {
// Initialize index and channels
i := len(c.workers) + 1
pop := newPopChannel()
// Create the worker
w := &worker{
host: ctx.normalizedURL.Host,
index: i,
push: c.push,
pop: pop,
stop: c.stop,
enqueue: c.enqueue,
wg: c.wg,
logFunc: getLogFunc(c.Options.Extender, c.Options.LogFlags, i),
opts: c.Options,
}
// Increment wait group count
c.wg.Add(1)
// Launch worker
go w.run()
c.logFunc(LogInfo, "worker %d launched for host %s", i, w.host)
c.workers[w.host] = w
return w
}
|
go
|
{
"resource": ""
}
|
q5535
|
isSameHost
|
train
|
func (c *Crawler) isSameHost(ctx *URLContext) bool {
// If there is a source URL, then just check if the new URL is from the same host
if ctx.normalizedSourceURL != nil {
return ctx.normalizedURL.Host == ctx.normalizedSourceURL.Host
}
// Otherwise, check if the URL is from one of the seed hosts
_, ok := c.hosts[ctx.normalizedURL.Host]
return ok
}
|
go
|
{
"resource": ""
}
|
q5536
|
enqueueUrls
|
train
|
func (c *Crawler) enqueueUrls(ctxs []*URLContext) (cnt int) {
for _, ctx := range ctxs {
var isVisited, enqueue bool
// Cannot directly enqueue a robots.txt URL, since it is managed as a special case
// in the worker (doesn't return a response to crawler).
if ctx.IsRobotsURL() {
continue
}
// Check if it has been visited before, using the normalized URL
_, isVisited = c.visited[ctx.normalizedURL.String()]
// Filter the URL
if enqueue = c.Options.Extender.Filter(ctx, isVisited); !enqueue {
// Filter said NOT to use this url, so continue with next
c.logFunc(LogIgnored, "ignore on filter policy: %s", ctx.normalizedURL)
continue
}
// Even if filter said to use the URL, it still MUST be absolute, http(s)-prefixed,
// and comply with the same host policy if requested.
if !ctx.normalizedURL.IsAbs() {
// Only absolute URLs are processed, so ignore
c.logFunc(LogIgnored, "ignore on absolute policy: %s", ctx.normalizedURL)
} else if !strings.HasPrefix(ctx.normalizedURL.Scheme, "http") {
c.logFunc(LogIgnored, "ignore on scheme policy: %s", ctx.normalizedURL)
} else if c.Options.SameHostOnly && !c.isSameHost(ctx) {
// Only allow URLs coming from the same host
c.logFunc(LogIgnored, "ignore on same host policy: %s", ctx.normalizedURL)
} else {
// All is good, visit this URL (robots.txt verification is done by worker)
// Possible caveat: if the normalization changes the host, it is possible
// that the robots.txt fetched for this host would differ from the one for
// the unnormalized host. However, this should be rare, and is a weird
// behaviour from the host (i.e. why would site.com differ in its rules
// from www.site.com) and can be fixed by using a different normalization
// flag. So this is an acceptable behaviour for gocrawl.
// Launch worker if required, based on the host of the normalized URL
w, ok := c.workers[ctx.normalizedURL.Host]
if !ok {
// No worker exists for this host, launch a new one
w = c.launchWorker(ctx)
// Automatically enqueue the robots.txt URL as first in line
if robCtx, e := ctx.getRobotsURLCtx(); e != nil {
c.Options.Extender.Error(newCrawlError(ctx, e, CekParseRobots))
c.logFunc(LogError, "ERROR parsing robots.txt from %s: %s", ctx.normalizedURL, e)
} else {
c.logFunc(LogEnqueued, "enqueue: %s", robCtx.url)
c.Options.Extender.Enqueued(robCtx)
w.pop.stack(robCtx)
}
}
cnt++
c.logFunc(LogEnqueued, "enqueue: %s", ctx.url)
c.Options.Extender.Enqueued(ctx)
w.pop.stack(ctx)
c.pushPopRefCount++
// Once it is stacked, it WILL be visited eventually, so add it to the visited slice
// (unless denied by robots.txt, but this is out of our hands, for all we
// care, it is visited).
if !isVisited {
// The visited map works with the normalized URL
c.visited[ctx.normalizedURL.String()] = struct{}{}
}
}
}
return
}
|
go
|
{
"resource": ""
}
|
q5537
|
collectUrls
|
train
|
func (c *Crawler) collectUrls() error {
defer func() {
c.logFunc(LogInfo, "waiting for goroutines to complete...")
c.wg.Wait()
c.logFunc(LogInfo, "crawler done.")
}()
for {
// By checking this after each channel reception, there is a bug if the worker
// wants to reenqueue following an error or a redirection. The pushPopRefCount
// temporarily gets to zero before the new URL is enqueued. Check the length
// of the enqueue channel to see if this is really over, or just this temporary
// state.
//
// Check if refcount is zero - MUST be before the select statement, so that if
// no valid seeds are enqueued, the crawler stops.
if c.pushPopRefCount == 0 && len(c.enqueue) == 0 {
c.logFunc(LogInfo, "sending STOP signals...")
close(c.stop)
return nil
}
select {
case res := <-c.push:
// Received a response, check if it contains URLs to enqueue
if res.visited {
c.visits++
if c.Options.MaxVisits > 0 && c.visits >= c.Options.MaxVisits {
// Limit reached, request workers to stop
c.logFunc(LogInfo, "sending STOP signals...")
close(c.stop)
return ErrMaxVisits
}
}
if res.idleDeath {
// The worker timed out from its Idle TTL delay, remove from active workers
delete(c.workers, res.host)
c.logFunc(LogInfo, "worker for host %s cleared on idle policy", res.host)
} else {
c.enqueueUrls(c.toURLContexts(res.harvestedURLs, res.ctx.url))
c.pushPopRefCount--
}
case enq := <-c.enqueue:
// Received a command to enqueue a URL, proceed
ctxs := c.toURLContexts(enq, nil)
c.logFunc(LogTrace, "receive url(s) to enqueue %v", toStringArrayContextURL(ctxs))
c.enqueueUrls(ctxs)
case <-c.stop:
return ErrInterrupted
}
}
}
|
go
|
{
"resource": ""
}
|
q5538
|
Stop
|
train
|
func (c *Crawler) Stop() {
defer func() {
if err := recover(); err != nil {
c.logFunc(LogError, "error when manually stopping crawler: %s", err)
}
}()
// this channel may be closed already
close(c.stop)
}
|
go
|
{
"resource": ""
}
|
q5539
|
Error
|
train
|
func (ce CrawlError) Error() string {
if ce.Err != nil {
return ce.Err.Error()
}
return ce.msg
}
|
go
|
{
"resource": ""
}
|
q5540
|
newCrawlError
|
train
|
func newCrawlError(ctx *URLContext, e error, kind CrawlErrorKind) *CrawlError {
return &CrawlError{ctx, e, kind, ""}
}
|
go
|
{
"resource": ""
}
|
q5541
|
newCrawlErrorMessage
|
train
|
func newCrawlErrorMessage(ctx *URLContext, msg string, kind CrawlErrorKind) *CrawlError {
return &CrawlError{ctx, nil, kind, msg}
}
|
go
|
{
"resource": ""
}
|
q5542
|
cloneForRedirect
|
train
|
func (uc *URLContext) cloneForRedirect(dst *url.URL, normFlags purell.NormalizationFlags) *URLContext {
var src, normalizedSrc *url.URL
if uc.sourceURL != nil {
src = &url.URL{}
*src = *uc.sourceURL
}
if src == nil && uc.url != nil {
// if the current context doesn't have a source URL, use its URL as
// source (e.g. for a seed URL that triggers a redirect)
src = &url.URL{}
*src = *uc.url
}
if uc.normalizedSourceURL != nil {
normalizedSrc = &url.URL{}
*normalizedSrc = *uc.normalizedSourceURL
}
if normalizedSrc == nil {
normalizedSrc = &url.URL{}
*normalizedSrc = *uc.normalizedURL
}
rawDst := &url.URL{}
*rawDst = *dst
purell.NormalizeURL(dst, normFlags)
return &URLContext{
HeadBeforeGet: uc.HeadBeforeGet,
State: uc.State,
url: rawDst,
normalizedURL: dst,
sourceURL: src,
normalizedSourceURL: normalizedSrc,
}
}
|
go
|
{
"resource": ""
}
|
q5543
|
NewOptions
|
train
|
func NewOptions(ext Extender) *Options {
// Use defaults except for Extender
return &Options{
DefaultUserAgent,
DefaultRobotUserAgent,
0,
DefaultEnqueueChanBuffer,
DefaultHostBufferFactor,
DefaultCrawlDelay,
DefaultIdleTTL,
true,
false,
DefaultNormalizationFlags,
LogError,
ext,
}
}
|
go
|
{
"resource": ""
}
|
q5544
|
toCamelInitCase
|
train
|
func toCamelInitCase(s string, initCase bool) string {
s = addWordBoundariesToNumbers(s)
s = strings.Trim(s, " ")
n := ""
capNext := initCase
for _, v := range s {
if v >= 'A' && v <= 'Z' {
n += string(v)
}
if v >= '0' && v <= '9' {
n += string(v)
}
if v >= 'a' && v <= 'z' {
if capNext {
n += strings.ToUpper(string(v))
} else {
n += string(v)
}
}
if v == '_' || v == ' ' || v == '-' {
capNext = true
} else {
capNext = false
}
}
return n
}
|
go
|
{
"resource": ""
}
|
q5545
|
ToLowerCamel
|
train
|
func ToLowerCamel(s string) string {
if s == "" {
return s
}
if r := rune(s[0]); r >= 'A' && r <= 'Z' {
s = strings.ToLower(string(r)) + s[1:]
}
return toCamelInitCase(s, false)
}
|
go
|
{
"resource": ""
}
|
q5546
|
Call
|
train
|
func (s *Session) Call(name string, a ...interface{}) error {
return s.Command(name, a...).Run()
}
|
go
|
{
"resource": ""
}
|
q5547
|
UnmarshalJSON
|
train
|
func (s *Session) UnmarshalJSON(data interface{}) (err error) {
bufrw := bytes.NewBuffer(nil)
s.Stdout = bufrw
if err = s.Run(); err != nil {
return
}
return json.NewDecoder(bufrw).Decode(data)
}
|
go
|
{
"resource": ""
}
|
q5548
|
UnmarshalXML
|
train
|
func (s *Session) UnmarshalXML(data interface{}) (err error) {
bufrw := bytes.NewBuffer(nil)
s.Stdout = bufrw
if err = s.Run(); err != nil {
return
}
return xml.NewDecoder(bufrw).Decode(data)
}
|
go
|
{
"resource": ""
}
|
q5549
|
NewEncoder
|
train
|
func NewEncoder(w io.WriteSeeker, sampleRate, bitDepth, numChans, audioFormat int) *Encoder {
return &Encoder{
w: w,
SampleRate: sampleRate,
BitDepth: bitDepth,
NumChans: numChans,
WavAudioFormat: audioFormat,
}
}
|
go
|
{
"resource": ""
}
|
q5550
|
AddLE
|
train
|
func (e *Encoder) AddLE(src interface{}) error {
e.WrittenBytes += binary.Size(src)
return binary.Write(e.w, binary.LittleEndian, src)
}
|
go
|
{
"resource": ""
}
|
q5551
|
AddBE
|
train
|
func (e *Encoder) AddBE(src interface{}) error {
e.WrittenBytes += binary.Size(src)
return binary.Write(e.w, binary.BigEndian, src)
}
|
go
|
{
"resource": ""
}
|
q5552
|
WriteFrame
|
train
|
func (e *Encoder) WriteFrame(value interface{}) error {
if !e.wroteHeader {
e.writeHeader()
}
if !e.pcmChunkStarted {
// sound header
if err := e.AddLE(riff.DataFormatID); err != nil {
return fmt.Errorf("error encoding sound header %v", err)
}
e.pcmChunkStarted = true
// write a temporary chunksize
e.pcmChunkSizePos = e.WrittenBytes
if err := e.AddLE(uint32(42)); err != nil {
return fmt.Errorf("%v when writing wav data chunk size header", err)
}
}
e.frames++
return e.AddLE(value)
}
|
go
|
{
"resource": ""
}
|
q5553
|
DecodeCueChunk
|
train
|
func DecodeCueChunk(d *Decoder, ch *riff.Chunk) error {
if ch == nil {
return fmt.Errorf("can't decode a nil chunk")
}
if d == nil {
return fmt.Errorf("nil decoder")
}
if ch.ID == CIDCue {
// read the entire chunk in memory
buf := make([]byte, ch.Size)
var err error
if _, err = ch.Read(buf); err != nil {
return fmt.Errorf("failed to read the CUE chunk - %v", err)
}
r := bytes.NewReader(buf)
var nbrCues uint32
if err := binary.Read(r, binary.LittleEndian, &nbrCues); err != nil {
return fmt.Errorf("failed to read the number of cues - %v", err)
}
if nbrCues > 0 {
if d.Metadata == nil {
d.Metadata = &Metadata{}
}
d.Metadata.CuePoints = []*CuePoint{}
scratch := make([]byte, 4)
for i := uint32(0); i < nbrCues; i++ {
c := &CuePoint{}
if _, err = r.Read(scratch); err != nil {
return fmt.Errorf("failed to read the cue point ID")
}
copy(c.ID[:], scratch[:4])
if err := binary.Read(r, binary.LittleEndian, &c.Position); err != nil {
return err
}
if _, err = r.Read(scratch); err != nil {
return fmt.Errorf("failed to read the data chunk id")
}
copy(c.DataChunkID[:], scratch[:4])
if err := binary.Read(r, binary.LittleEndian, &c.ChunkStart); err != nil {
return err
}
if err := binary.Read(r, binary.LittleEndian, &c.BlockStart); err != nil {
return err
}
if err := binary.Read(r, binary.LittleEndian, &c.SampleOffset); err != nil {
return err
}
d.Metadata.CuePoints = append(d.Metadata.CuePoints, c)
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5554
|
NewDecoder
|
train
|
func NewDecoder(r io.ReadSeeker) *Decoder {
return &Decoder{
r: r,
parser: riff.New(r),
}
}
|
go
|
{
"resource": ""
}
|
q5555
|
Seek
|
train
|
func (d *Decoder) Seek(offset int64, whence int) (int64, error) {
return d.r.Seek(offset, whence)
}
|
go
|
{
"resource": ""
}
|
q5556
|
Err
|
train
|
func (d *Decoder) Err() error {
if d.err == io.EOF {
return nil
}
return d.err
}
|
go
|
{
"resource": ""
}
|
q5557
|
EOF
|
train
|
func (d *Decoder) EOF() bool {
if d == nil || d.err == io.EOF {
return true
}
return false
}
|
go
|
{
"resource": ""
}
|
q5558
|
ReadMetadata
|
train
|
func (d *Decoder) ReadMetadata() {
if d.Metadata != nil {
return
}
d.ReadInfo()
if d.Err() != nil || d.Metadata != nil {
return
}
var (
chunk *riff.Chunk
err error
)
for err == nil {
chunk, err = d.parser.NextChunk()
if err != nil {
break
}
switch chunk.ID {
case CIDList:
if err = DecodeListChunk(d, chunk); err != nil {
if err != io.EOF {
d.err = err
}
}
if d.Metadata != nil && d.Metadata.SamplerInfo != nil {
// we got everything we were looking for
break
}
case CIDSmpl:
if err = DecodeSamplerChunk(d, chunk); err != nil {
if err != io.EOF {
d.err = err
}
}
case CIDCue:
if err = DecodeCueChunk(d, chunk); err != nil {
if err != io.EOF {
d.err = err
}
}
default:
// fmt.Println(string(chunk.ID[:]))
chunk.Drain()
}
}
}
|
go
|
{
"resource": ""
}
|
q5559
|
Duration
|
train
|
func (d *Decoder) Duration() (time.Duration, error) {
if d == nil || d.parser == nil {
return 0, errors.New("can't calculate the duration of a nil pointer")
}
return d.parser.Duration()
}
|
go
|
{
"resource": ""
}
|
q5560
|
readHeaders
|
train
|
func (d *Decoder) readHeaders() error {
if d == nil || d.NumChans > 0 {
return nil
}
id, size, err := d.parser.IDnSize()
if err != nil {
return err
}
d.parser.ID = id
if d.parser.ID != riff.RiffID {
return fmt.Errorf("%s - %s", d.parser.ID, riff.ErrFmtNotSupported)
}
d.parser.Size = size
if err := binary.Read(d.r, binary.BigEndian, &d.parser.Format); err != nil {
return err
}
var chunk *riff.Chunk
var rewindBytes int64
for err == nil {
chunk, err = d.parser.NextChunk()
if err != nil {
break
}
if chunk.ID == riff.FmtID {
chunk.DecodeWavHeader(d.parser)
d.NumChans = d.parser.NumChannels
d.BitDepth = d.parser.BitsPerSample
d.SampleRate = d.parser.SampleRate
d.WavAudioFormat = d.parser.WavAudioFormat
d.AvgBytesPerSec = d.parser.AvgBytesPerSec
if rewindBytes > 0 {
d.r.Seek(-(rewindBytes + int64(chunk.Size) + 8), 1)
}
break
} else if chunk.ID == CIDList {
// The list chunk can be in the header or footer
// because so many players don't support that chunk properly
// it is recommended to have it at the end of the file.
DecodeListChunk(d, chunk)
// unexpected chunk order, might be a bext chunk
rewindBytes += int64(chunk.Size) + 8
} else if chunk.ID == CIDSmpl {
DecodeSamplerChunk(d, chunk)
rewindBytes += int64(chunk.Size) + 8
} else {
// unexpected chunk order, might be a bext chunk
rewindBytes += int64(chunk.Size) + 8
// drain the chunk
io.CopyN(ioutil.Discard, d.r, int64(chunk.Size))
}
}
return d.err
}
|
go
|
{
"resource": ""
}
|
q5561
|
sampleFloat64DecodeFunc
|
train
|
func sampleFloat64DecodeFunc(bitsPerSample int) (func([]byte) float64, error) {
bytesPerSample := bitsPerSample / 8
switch bytesPerSample {
case 1:
// 8bit values are unsigned
return func(s []byte) float64 {
return float64(uint8(s[0]))
}, nil
case 2:
return func(s []byte) float64 {
return float64(int(s[0]) + int(s[1])<<8)
}, nil
case 3:
return func(s []byte) float64 {
var output int32
output |= int32(s[2]) << 0
output |= int32(s[1]) << 8
output |= int32(s[0]) << 16
return float64(output)
}, nil
case 4:
// TODO: fix the float64 conversion (current int implementation)
return func(s []byte) float64 {
return float64(int(s[0]) + int(s[1])<<8 + int(s[2])<<16 + int(s[3])<<24)
}, nil
default:
return nil, fmt.Errorf("unhandled byte depth:%d", bitsPerSample)
}
}
|
go
|
{
"resource": ""
}
|
q5562
|
Diff
|
train
|
func Diff(a, b interface{}) (desc []string) {
Pdiff((*sbuf)(&desc), a, b)
return desc
}
|
go
|
{
"resource": ""
}
|
q5563
|
Fdiff
|
train
|
func Fdiff(w io.Writer, a, b interface{}) {
Pdiff(&wprintfer{w}, a, b)
}
|
go
|
{
"resource": ""
}
|
q5564
|
Pdiff
|
train
|
func Pdiff(p Printfer, a, b interface{}) {
diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
}
|
go
|
{
"resource": ""
}
|
q5565
|
Ldiff
|
train
|
func Ldiff(l Logfer, a, b interface{}) {
Pdiff(&logprintfer{l}, a, b)
}
|
go
|
{
"resource": ""
}
|
q5566
|
keyEqual
|
train
|
func keyEqual(av, bv reflect.Value) bool {
if !av.IsValid() && !bv.IsValid() {
return true
}
if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
return false
}
switch kind := av.Kind(); kind {
case reflect.Bool:
a, b := av.Bool(), bv.Bool()
return a == b
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a, b := av.Int(), bv.Int()
return a == b
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
a, b := av.Uint(), bv.Uint()
return a == b
case reflect.Float32, reflect.Float64:
a, b := av.Float(), bv.Float()
return a == b
case reflect.Complex64, reflect.Complex128:
a, b := av.Complex(), bv.Complex()
return a == b
case reflect.Array:
for i := 0; i < av.Len(); i++ {
if !keyEqual(av.Index(i), bv.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
a, b := av.Pointer(), bv.Pointer()
return a == b
case reflect.Interface:
return keyEqual(av.Elem(), bv.Elem())
case reflect.String:
a, b := av.String(), bv.String()
return a == b
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
if !keyEqual(av.Field(i), bv.Field(i)) {
return false
}
}
return true
default:
panic("invalid map key type " + av.Type().String())
}
}
|
go
|
{
"resource": ""
}
|
q5567
|
TokenErrorf
|
train
|
func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
return Errorf(token.Pos, format, args...)
}
|
go
|
{
"resource": ""
}
|
q5568
|
ParseWithPosition
|
train
|
func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
ch := scanner.Scan(v, pos)
return parser.Parse(ch)
}
|
go
|
{
"resource": ""
}
|
q5569
|
Peek
|
train
|
func (p *Peeker) Peek() *Token {
if p.peeked == nil {
p.peeked = <-p.ch
}
return p.peeked
}
|
go
|
{
"resource": ""
}
|
q5570
|
Read
|
train
|
func (p *Peeker) Read() *Token {
token := p.Peek()
// As a special case, we will produce the EOF token forever once
// it is reached.
if token.Type != EOF {
p.peeked = nil
}
return token
}
|
go
|
{
"resource": ""
}
|
q5571
|
Close
|
train
|
func (p *Peeker) Close() {
for _ = range p.ch {
// discard
}
// Install a synthetic EOF token in 'peeked' in case someone
// erroneously calls Peek() or Read() after we've closed.
p.peeked = &Token{
Type: EOF,
Content: "",
}
}
|
go
|
{
"resource": ""
}
|
q5572
|
parseInterpolationSeq
|
train
|
func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
literalType := scanner.LITERAL
endType := scanner.EOF
if quoted {
// exceptions for quoted sequences
literalType = scanner.STRING
endType = scanner.CQUOTE
}
startPos := p.peeker.Peek().Pos
if quoted {
tok := p.peeker.Read()
if tok.Type != scanner.OQUOTE {
return nil, ExpectationError("open quote", tok)
}
}
var exprs []ast.Node
for {
tok := p.peeker.Read()
if tok.Type == endType {
break
}
switch tok.Type {
case literalType:
val, err := p.parseStringToken(tok)
if err != nil {
return nil, err
}
exprs = append(exprs, &ast.LiteralNode{
Value: val,
Typex: ast.TypeString,
Posx: tok.Pos,
})
case scanner.BEGIN:
expr, err := p.ParseInterpolation()
if err != nil {
return nil, err
}
exprs = append(exprs, expr)
default:
return nil, ExpectationError(`"${"`, tok)
}
}
if len(exprs) == 0 {
// If we have no parts at all then the input must've
// been an empty string.
exprs = append(exprs, &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: startPos,
})
}
// As a special case, if our "Output" contains only one expression
// and it's a literal string then we'll hoist it up to be our
// direct return value, so callers can easily recognize a string
// that has no interpolations at all.
if len(exprs) == 1 {
if lit, ok := exprs[0].(*ast.LiteralNode); ok {
if lit.Typex == ast.TypeString {
return lit, nil
}
}
}
return &ast.Output{
Exprs: exprs,
Posx: startPos,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5573
|
parseStringToken
|
train
|
func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
var backslashes bool
switch tok.Type {
case scanner.LITERAL:
backslashes = false
case scanner.STRING:
backslashes = true
default:
panic("unsupported string token type")
}
raw := []byte(tok.Content)
buf := make([]byte, 0, len(raw))
for i := 0; i < len(raw); i++ {
b := raw[i]
more := len(raw) > (i + 1)
if b == '$' {
if more && raw[i+1] == '$' {
// skip over the second dollar sign
i++
}
} else if backslashes && b == '\\' {
if !more {
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`unfinished backslash escape sequence`,
)
}
escapeType := raw[i+1]
switch escapeType {
case '\\':
// skip over the second slash
i++
case 'n':
b = '\n'
i++
case '"':
b = '"'
i++
default:
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`invalid backslash escape sequence`,
)
}
}
buf = append(buf, b)
}
return string(buf), nil
}
|
go
|
{
"resource": ""
}
|
q5574
|
parseBinaryOps
|
train
|
func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
if len(ops) == 0 {
// We've run out of operators, so now we'll just try to parse a term.
return p.ParseExpressionTerm()
}
thisLevel := ops[0]
remaining := ops[1:]
startPos := p.peeker.Peek().Pos
var lhs, rhs ast.Node
operator := ast.ArithmeticOpInvalid
var err error
// parse a term that might be the first operand of a binary
// expression or it might just be a standalone term, but
// we won't know until we've parsed it and can look ahead
// to see if there's an operator token.
lhs, err = p.parseBinaryOps(remaining)
if err != nil {
return nil, err
}
// We'll keep eating up arithmetic operators until we run
// out, so that operators with the same precedence will combine in a
// left-associative manner:
// a+b+c => (a+b)+c, not a+(b+c)
//
// Should we later want to have right-associative operators, a way
// to achieve that would be to call back up to ParseExpression here
// instead of iteratively parsing only the remaining operators.
for {
next := p.peeker.Peek()
var newOperator ast.ArithmeticOp
var ok bool
if newOperator, ok = thisLevel[next.Type]; !ok {
break
}
// Are we extending an expression started on
// the previous iteration?
if operator != ast.ArithmeticOpInvalid {
lhs = &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}
}
operator = newOperator
p.peeker.Read() // eat operator token
rhs, err = p.parseBinaryOps(remaining)
if err != nil {
return nil, err
}
}
if operator != ast.ArithmeticOpInvalid {
return &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}, nil
} else {
return lhs, nil
}
}
|
go
|
{
"resource": ""
}
|
q5575
|
internalEval
|
train
|
func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
// Copy the scope so we can add our builtins
if config == nil {
config = new(EvalConfig)
}
scope := registerBuiltins(config.GlobalScope)
implicitMap := map[ast.Type]map[ast.Type]string{
ast.TypeFloat: {
ast.TypeInt: "__builtin_FloatToInt",
ast.TypeString: "__builtin_FloatToString",
},
ast.TypeInt: {
ast.TypeFloat: "__builtin_IntToFloat",
ast.TypeString: "__builtin_IntToString",
},
ast.TypeString: {
ast.TypeInt: "__builtin_StringToInt",
ast.TypeFloat: "__builtin_StringToFloat",
ast.TypeBool: "__builtin_StringToBool",
},
ast.TypeBool: {
ast.TypeString: "__builtin_BoolToString",
},
}
// Build our own semantic checks that we always run
tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
ic := &IdentifierCheck{Scope: scope}
// Build up the semantic checks for execution
checks := make(
[]SemanticChecker,
len(config.SemanticChecks),
len(config.SemanticChecks)+2)
copy(checks, config.SemanticChecks)
checks = append(checks, ic.Visit)
checks = append(checks, tv.Visit)
// Run the semantic checks
for _, check := range checks {
if err := check(root); err != nil {
return nil, ast.TypeInvalid, err
}
}
// Execute
v := &evalVisitor{Scope: scope}
return v.Visit(root)
}
|
go
|
{
"resource": ""
}
|
q5576
|
evalNode
|
train
|
func evalNode(raw ast.Node) (EvalNode, error) {
switch n := raw.(type) {
case *ast.Index:
return &evalIndex{n}, nil
case *ast.Call:
return &evalCall{n}, nil
case *ast.Conditional:
return &evalConditional{n}, nil
case *ast.Output:
return &evalOutput{n}, nil
case *ast.LiteralNode:
return &evalLiteralNode{n}, nil
case *ast.VariableAccess:
return &evalVariableAccess{n}, nil
default:
en, ok := n.(EvalNode)
if !ok {
return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
}
return en, nil
}
}
|
go
|
{
"resource": ""
}
|
q5577
|
NewLiteralNode
|
train
|
func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
goType := reflect.TypeOf(value)
var hilType Type
switch goType.Kind() {
case reflect.Bool:
hilType = TypeBool
case reflect.Int:
hilType = TypeInt
case reflect.Float64:
hilType = TypeFloat
case reflect.String:
hilType = TypeString
default:
return nil, fmt.Errorf("unsupported literal node type: %T", value)
}
return &LiteralNode{
Value: value,
Typex: hilType,
Posx: pos,
}, nil
}
|
go
|
{
"resource": ""
}
|
q5578
|
MustNewLiteralNode
|
train
|
func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
node, err := NewLiteralNode(value, pos)
if err != nil {
panic(err)
}
return node
}
|
go
|
{
"resource": ""
}
|
q5579
|
IsUnknown
|
train
|
func (n *LiteralNode) IsUnknown() bool {
return IsUnknown(Variable{
Type: n.Typex,
Value: n.Value,
})
}
|
go
|
{
"resource": ""
}
|
q5580
|
IsUnknown
|
train
|
func IsUnknown(v Variable) bool {
// If it is unknown itself, return true
if v.Type == TypeUnknown {
return true
}
// If it is a container type, check the values
switch v.Type {
case TypeList:
for _, el := range v.Value.([]Variable) {
if IsUnknown(el) {
return true
}
}
case TypeMap:
for _, el := range v.Value.(map[string]Variable) {
if IsUnknown(el) {
return true
}
}
default:
}
// Not a container type or survive the above checks
return false
}
|
go
|
{
"resource": ""
}
|
q5581
|
Walk
|
train
|
func Walk(v interface{}, cb WalkFn) error {
walker := &interpolationWalker{F: cb}
return reflectwalk.Walk(v, walker)
}
|
go
|
{
"resource": ""
}
|
q5582
|
Scan
|
train
|
func Scan(s string, startPos ast.Pos) <-chan *Token {
ch := make(chan *Token)
go scan(s, ch, startPos)
return ch
}
|
go
|
{
"resource": ""
}
|
q5583
|
scanIdentifier
|
train
|
func scanIdentifier(s string) (string, int) {
byteLen := 0
runeLen := 0
for {
if byteLen >= len(s) {
break
}
nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
if !(nextRune == '_' ||
nextRune == '-' ||
nextRune == '.' ||
nextRune == '*' ||
unicode.IsNumber(nextRune) ||
unicode.IsLetter(nextRune) ||
unicode.IsMark(nextRune)) {
break
}
// If we reach a star, it must be between periods to be part
// of the same identifier.
if nextRune == '*' && s[byteLen-1] != '.' {
break
}
// If our previous character was a star, then the current must
// be period. Otherwise, undo that and exit.
if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
byteLen--
if s[byteLen-1] == '.' {
byteLen--
}
break
}
byteLen = byteLen + size
runeLen = runeLen + 1
}
return s[:byteLen], runeLen
}
|
go
|
{
"resource": ""
}
|
q5584
|
NewVariable
|
train
|
func NewVariable(v interface{}) (result Variable, err error) {
switch v := reflect.ValueOf(v); v.Kind() {
case reflect.String:
result.Type = TypeString
default:
err = fmt.Errorf("Unknown type: %s", v.Kind())
}
result.Value = v
return
}
|
go
|
{
"resource": ""
}
|
q5585
|
String
|
train
|
func (v Variable) String() string {
return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
}
|
go
|
{
"resource": ""
}
|
q5586
|
NewAvx512
|
train
|
func NewAvx512(a512srv *Avx512Server) hash.Hash {
uid := atomic.AddUint64(&uidCounter, 1)
return &Avx512Digest{uid: uid, a512srv: a512srv}
}
|
go
|
{
"resource": ""
}
|
q5587
|
Reset
|
train
|
func (d *Avx512Digest) Reset() {
d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true}
d.nx = 0
d.len = 0
d.final = false
}
|
go
|
{
"resource": ""
}
|
q5588
|
Write
|
train
|
func (d *Avx512Digest) Write(p []byte) (nn int, err error) {
if d.final {
return 0, errors.New("Avx512Digest already finalized. Reset first before writing again")
}
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]}
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]}
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
|
go
|
{
"resource": ""
}
|
q5589
|
Sum
|
train
|
func (d *Avx512Digest) Sum(in []byte) (result []byte) {
if d.final {
return append(in, d.result[:]...)
}
trail := make([]byte, 0, 128)
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
trail = append(d.x[:d.nx], tmp[0:56-len%64]...)
} else {
trail = append(d.x[:d.nx], tmp[0:64+56-len%64]...)
}
d.nx = 0
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
trail = append(trail, tmp[0:8]...)
sumCh := make(chan [Size]byte)
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh}
d.result = <-sumCh
d.final = true
return append(in, d.result[:]...)
}
|
go
|
{
"resource": ""
}
|
q5590
|
blockAvx512
|
train
|
func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte {
scratch := [512]byte{}
sha256X16Avx512(digests, &scratch, &table, mask, input)
output := [16][Size]byte{}
for i := 0; i < 16; i++ {
output[i] = getDigest(i, digests[:])
}
return output
}
|
go
|
{
"resource": ""
}
|
q5591
|
NewAvx512Server
|
train
|
func NewAvx512Server() *Avx512Server {
a512srv := &Avx512Server{}
a512srv.digests = make(map[uint64][Size]byte)
a512srv.blocksCh = make(chan blockInput)
// Start a single thread for reading from the input channel
go a512srv.Process()
return a512srv
}
|
go
|
{
"resource": ""
}
|
q5592
|
Process
|
train
|
func (a512srv *Avx512Server) Process() {
for {
select {
case block := <-a512srv.blocksCh:
if block.reset {
a512srv.reset(block.uid)
continue
}
index := block.uid & 0xf
// fmt.Println("Adding message:", block.uid, index)
if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs
//fmt.Println("Invoking Blocks()")
a512srv.blocks()
}
a512srv.totalIn++
a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg}
if block.final {
a512srv.lanes[index].outputCh = block.sumCh
}
if a512srv.totalIn == len(a512srv.lanes) {
// fmt.Println("Invoking Blocks() while FULL: ")
a512srv.blocks()
}
// TODO: test with larger timeout
case <-time.After(1 * time.Microsecond):
for _, lane := range a512srv.lanes {
if lane.block != nil { // check if there is any input to process
// fmt.Println("Invoking Blocks() on TIMEOUT: ")
a512srv.blocks()
break // we are done
}
}
}
}
}
|
go
|
{
"resource": ""
}
|
q5593
|
reset
|
train
|
func (a512srv *Avx512Server) reset(uid uint64) {
// Check if there is a message still waiting to be processed (and remove if so)
for i, lane := range a512srv.lanes {
if lane.uid == uid {
if lane.block != nil {
a512srv.lanes[i] = Avx512LaneInfo{} // clear message
a512srv.totalIn--
}
}
}
// Delete entry from hash map
delete(a512srv.digests, uid)
}
|
go
|
{
"resource": ""
}
|
q5594
|
blocks
|
train
|
func (a512srv *Avx512Server) blocks() (err error) {
inputs := [16][]byte{}
for i := range inputs {
inputs[i] = a512srv.lanes[i].block
}
mask := expandMask(genMask(inputs))
outputs := blockAvx512(a512srv.getDigests(), inputs, mask)
a512srv.totalIn = 0
for i := 0; i < len(outputs); i++ {
uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh
a512srv.digests[uid] = outputs[i]
a512srv.lanes[i] = Avx512LaneInfo{}
if outputCh != nil {
// Send back result
outputCh <- outputs[i]
delete(a512srv.digests, uid) // Delete entry from hashmap
}
}
return
}
|
go
|
{
"resource": ""
}
|
q5595
|
Sum
|
train
|
func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte {
sumCh := make(chan [32]byte)
a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh}
return <-sumCh
}
|
go
|
{
"resource": ""
}
|
q5596
|
Reset
|
train
|
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.h[5] = init5
d.h[6] = init6
d.h[7] = init7
d.nx = 0
d.len = 0
}
|
go
|
{
"resource": ""
}
|
q5597
|
New
|
train
|
func New() hash.Hash {
if blockfunc != blockfuncGeneric {
d := new(digest)
d.Reset()
return d
}
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
}
|
go
|
{
"resource": ""
}
|
q5598
|
Sum256
|
train
|
func Sum256(data []byte) (result [Size]byte) {
var d digest
d.Reset()
d.Write(data)
result = d.checkSum()
return
}
|
go
|
{
"resource": ""
}
|
q5599
|
Sum
|
train
|
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.