web-crawl-v1 / code /turbo_crawler.go
OpenTransformer's picture
Upload code/turbo_crawler.go with huggingface_hub
a13d04f verified
package main
import (
"bufio"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
)
type Record struct {
URL string `json:"url"`
Text string `json:"text"`
TS string `json:"ts"`
}
var (
visited = make(map[string]bool)
visitedMu sync.RWMutex
queue = make(chan string, 1000000)
stats struct {
pages, bytes, errors int64
}
client = &http.Client{
Timeout: 5 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 1000,
MaxIdleConnsPerHost: 50,
IdleConnTimeout: 30 * time.Second,
},
}
linkRe = regexp.MustCompile(`href=["']([^"']+)["']`)
scriptRe = regexp.MustCompile(`(?is)<script[^>]*>.*?</script>|<style[^>]*>.*?</style>`)
tagRe = regexp.MustCompile(`<[^>]+>`)
spaceRe = regexp.MustCompile(`\s+`)
)
func hash(s string) string {
h := md5.Sum([]byte(s))
return hex.EncodeToString(h[:8])
}
func extractText(html string) string {
text := scriptRe.ReplaceAllString(html, " ")
text = tagRe.ReplaceAllString(text, " ")
text = spaceRe.ReplaceAllString(text, " ")
text = strings.TrimSpace(text)
if len(text) > 50000 {
text = text[:50000]
}
return text
}
func extractLinks(html, baseURL string) []string {
base, err := url.Parse(baseURL)
if err != nil {
return nil
}
var links []string
matches := linkRe.FindAllStringSubmatch(html, 100)
for _, m := range matches {
if len(m) > 1 {
href := m[1]
if u, err := url.Parse(href); err == nil {
resolved := base.ResolveReference(u)
if resolved.Scheme == "http" || resolved.Scheme == "https" {
links = append(links, resolved.String())
}
}
}
}
return links
}
func worker(id int, output chan<- Record) {
for urlStr := range queue {
h := hash(urlStr)
visitedMu.RLock()
seen := visited[h]
visitedMu.RUnlock()
if seen {
continue
}
visitedMu.Lock()
visited[h] = true
visitedMu.Unlock()
resp, err := client.Get(urlStr)
if err != nil {
atomic.AddInt64(&stats.errors, 1)
continue
}
ct := resp.Header.Get("Content-Type")
if resp.StatusCode != 200 || !strings.Contains(ct, "text/html") {
resp.Body.Close()
atomic.AddInt64(&stats.errors, 1)
continue
}
body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
resp.Body.Close()
html := string(body)
text := extractText(html)
if len(text) > 200 {
output <- Record{URL: urlStr, Text: text, TS: time.Now().UTC().Format("2006-01-02T15:04:05")}
atomic.AddInt64(&stats.pages, 1)
atomic.AddInt64(&stats.bytes, int64(len(text)))
}
for _, link := range extractLinks(html, urlStr) {
select {
case queue <- link:
default:
}
}
}
}
func main() {
seeds := []string{
"https://en.wikipedia.org/wiki/Main_Page",
"https://en.wikipedia.org/wiki/Special:Random",
"https://news.ycombinator.com/",
"https://www.reddit.com/r/all/",
"https://www.reddit.com/r/programming/",
"https://stackoverflow.com/questions",
"https://medium.com/",
"https://dev.to/",
}
for _, s := range seeds {
queue <- s
}
os.MkdirAll("/workspace/go_crawl", 0755)
outPath := fmt.Sprintf("/workspace/go_crawl/crawl_%s.jsonl", time.Now().Format("20060102_150405"))
f, _ := os.Create(outPath)
defer f.Close()
w := bufio.NewWriter(f)
defer w.Flush()
output := make(chan Record, 10000)
go func() {
for rec := range output {
data, _ := json.Marshal(rec)
w.Write(data)
w.WriteByte('\n')
}
}()
start := time.Now()
go func() {
for {
time.Sleep(10 * time.Second)
elapsed := time.Since(start).Minutes()
if elapsed > 0 {
p := atomic.LoadInt64(&stats.pages)
b := atomic.LoadInt64(&stats.bytes)
e := atomic.LoadInt64(&stats.errors)
fmt.Printf("[%s] %d pgs | %.0fMB | %.1fMB/min | Q:%d | Err:%d\n",
time.Now().Format("15:04:05"), p, float64(b)/1024/1024, float64(b)/1024/1024/elapsed, len(queue), e)
}
}
}()
fmt.Printf("=== GO STDLIB CRAWLER ===\nWorkers: 1000\nOutput: %s\n\n", outPath)
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
worker(id, output)
}(i)
}
wg.Wait()
close(output)
}