_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6100
|
Lookup
|
train
|
func (db *database) Lookup(hash hashPrefix) (h hashPrefix, tds []ThreatDescriptor) {
if !hash.IsFull() {
panic("hash is not full")
}
db.ml.RLock()
for td, hs := range db.tfl {
if n := hs.Lookup(hash); n > 0 {
h = hash[:n]
tds = append(tds, td)
}
}
db.ml.RUnlock()
return h, tds
}
|
go
|
{
"resource": ""
}
|
q6101
|
setError
|
train
|
func (db *database) setError(err error) {
db.tfu = nil
db.ml.Lock()
if db.err == nil {
db.readyCh = make(chan struct{})
}
db.tfl, db.err, db.last = nil, err, time.Time{}
db.ml.Unlock()
}
|
go
|
{
"resource": ""
}
|
q6102
|
isStale
|
train
|
func (db *database) isStale(lastUpdate time.Time) bool {
if db.config.now().Sub(lastUpdate) > 2*(db.config.UpdatePeriod+jitter) {
return true
}
return false
}
|
go
|
{
"resource": ""
}
|
q6103
|
setStale
|
train
|
func (db *database) setStale() {
if db.err == nil {
db.readyCh = make(chan struct{})
}
db.err = errStale
}
|
go
|
{
"resource": ""
}
|
q6104
|
clearError
|
train
|
func (db *database) clearError() {
db.ml.Lock()
defer db.ml.Unlock()
if db.err != nil {
close(db.readyCh)
}
db.err = nil
}
|
go
|
{
"resource": ""
}
|
q6105
|
generateThreatsForUpdate
|
train
|
func (db *database) generateThreatsForUpdate() {
if db.tfu == nil {
db.tfu = make(threatsForUpdate)
}
db.ml.RLock()
for td, hs := range db.tfl {
phs := db.tfu[td]
phs.Hashes = hs.Export()
db.tfu[td] = phs
}
db.ml.RUnlock()
}
|
go
|
{
"resource": ""
}
|
q6106
|
generateThreatsForLookups
|
train
|
func (db *database) generateThreatsForLookups(last time.Time) {
tfl := make(threatsForLookup)
for td, phs := range db.tfu {
var hs hashSet
hs.Import(phs.Hashes)
tfl[td] = hs
phs.Hashes = nil // Clear hashes to keep memory usage low
db.tfu[td] = phs
}
db.ml.Lock()
wasBad := db.err != nil
db.tfl, db.last = tfl, last
db.ml.Unlock()
if wasBad {
db.clearError()
db.log.Printf("database is now healthy")
}
}
|
go
|
{
"resource": ""
}
|
q6107
|
saveDatabase
|
train
|
func saveDatabase(path string, db databaseFormat) (err error) {
var file *os.File
file, err = os.Create(path)
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); err == nil {
err = cerr
}
}()
gz, err := gzip.NewWriterLevel(file, gzip.BestCompression)
if err != nil {
return err
}
defer func() {
if zerr := gz.Close(); err == nil {
err = zerr
}
}()
encoder := gob.NewEncoder(gz)
if err = encoder.Encode(db); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6108
|
loadDatabase
|
train
|
func loadDatabase(path string) (db databaseFormat, err error) {
var file *os.File
file, err = os.Open(path)
if err != nil {
return db, err
}
defer func() {
if cerr := file.Close(); err == nil {
err = cerr
}
}()
gz, err := gzip.NewReader(file)
if err != nil {
return db, err
}
defer func() {
if zerr := gz.Close(); err == nil {
err = zerr
}
}()
decoder := gob.NewDecoder(gz)
if err = decoder.Decode(&db); err != nil {
return db, err
}
for _, dv := range db.Table {
if !bytes.Equal(dv.SHA256, dv.Hashes.SHA256()) {
return db, errors.New("safebrowsing: threat list SHA256 mismatch")
}
}
return db, nil
}
|
go
|
{
"resource": ""
}
|
q6109
|
update
|
train
|
func (tfu threatsForUpdate) update(resp *pb.FetchThreatListUpdatesResponse) error {
// For each update response do the removes and adds
for _, m := range resp.GetListUpdateResponses() {
td := ThreatDescriptor{
PlatformType: PlatformType(m.PlatformType),
ThreatType: ThreatType(m.ThreatType),
ThreatEntryType: ThreatEntryType(m.ThreatEntryType),
}
phs, ok := tfu[td]
switch m.ResponseType {
case pb.FetchThreatListUpdatesResponse_ListUpdateResponse_PARTIAL_UPDATE:
if !ok {
return errors.New("safebrowsing: partial update received for non-existent key")
}
case pb.FetchThreatListUpdatesResponse_ListUpdateResponse_FULL_UPDATE:
if len(m.Removals) > 0 {
return errors.New("safebrowsing: indices to be removed included in a full update")
}
phs = partialHashes{}
default:
return errors.New("safebrowsing: unknown response type")
}
// Hashes must be sorted for removal logic to work properly.
phs.Hashes.Sort()
for _, removal := range m.Removals {
idxs, err := decodeIndices(removal)
if err != nil {
return err
}
for _, i := range idxs {
if i < 0 || i >= int32(len(phs.Hashes)) {
return errors.New("safebrowsing: invalid removal index")
}
phs.Hashes[i] = ""
}
}
// If any removal was performed, compact the list of hashes.
if len(m.Removals) > 0 {
compactHashes := phs.Hashes[:0]
for _, h := range phs.Hashes {
if h != "" {
compactHashes = append(compactHashes, h)
}
}
phs.Hashes = compactHashes
}
for _, addition := range m.Additions {
hashes, err := decodeHashes(addition)
if err != nil {
return err
}
phs.Hashes = append(phs.Hashes, hashes...)
}
// Hashes must be sorted for SHA256 checksum to be correct.
phs.Hashes.Sort()
if err := phs.Hashes.Validate(); err != nil {
return err
}
if cs := m.GetChecksum(); cs != nil {
phs.SHA256 = cs.Sha256
}
if !bytes.Equal(phs.SHA256, phs.Hashes.SHA256()) {
return errors.New("safebrowsing: threat list SHA256 mismatch")
}
phs.State = m.NewClientState
tfu[td] = phs
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6110
|
unmarshal
|
train
|
func unmarshal(req *http.Request, pbReq proto.Message) (string, error) {
var mime string
alt := req.URL.Query().Get("alt")
if alt == "" {
alt = req.Header.Get("Content-Type")
}
switch alt {
case "json", mimeJSON:
mime = mimeJSON
case "proto", mimeProto:
mime = mimeProto
default:
return mime, errors.New("invalid interchange format")
}
switch req.Header.Get("Content-Type") {
case mimeJSON:
if err := jsonpb.Unmarshal(req.Body, pbReq); err != nil {
return mime, err
}
case mimeProto:
body, err := ioutil.ReadAll(req.Body)
if err != nil {
return mime, err
}
if err := proto.Unmarshal(body, pbReq); err != nil {
return mime, err
}
}
return mime, nil
}
|
go
|
{
"resource": ""
}
|
q6111
|
marshal
|
train
|
func marshal(resp http.ResponseWriter, pbResp proto.Message, mime string) error {
resp.Header().Set("Content-Type", mime)
switch mime {
case mimeProto:
body, err := proto.Marshal(pbResp)
if err != nil {
return err
}
if _, err := resp.Write(body); err != nil {
return err
}
case mimeJSON:
var m jsonpb.Marshaler
var b bytes.Buffer
if err := m.Marshal(&b, pbResp); err != nil {
return err
}
if _, err := resp.Write(b.Bytes()); err != nil {
return err
}
default:
return errors.New("invalid interchange format")
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6112
|
serveStatus
|
train
|
func serveStatus(resp http.ResponseWriter, req *http.Request, sb *safebrowsing.SafeBrowser) {
stats, sbErr := sb.Status()
errStr := ""
if sbErr != nil {
errStr = sbErr.Error()
}
buf, err := json.Marshal(struct {
Stats safebrowsing.Stats
Error string
}{stats, errStr})
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.Header().Set("Content-Type", mimeJSON)
resp.Write(buf)
}
|
go
|
{
"resource": ""
}
|
q6113
|
serveRedirector
|
train
|
func serveRedirector(resp http.ResponseWriter, req *http.Request, sb *safebrowsing.SafeBrowser, fs http.FileSystem) {
rawURL := req.URL.Query().Get("url")
if rawURL == "" || req.URL.Path != "/r" {
http.NotFound(resp, req)
return
}
parsedURL, err := url.Parse(rawURL)
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
threats, err := sb.LookupURLsContext(req.Context(), []string{rawURL})
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
if len(threats[0]) == 0 {
http.Redirect(resp, req, rawURL, http.StatusFound)
return
}
t := template.New("Safe Browsing Interstitial")
for _, threat := range threats[0] {
if tmpl, ok := threatTemplate[threat.ThreatType]; ok {
t, err = parseTemplates(fs, t, tmpl, "/interstitial.html")
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
err = t.Execute(resp, map[string]interface{}{
"Threat": threat,
"Url": parsedURL})
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
}
return
}
}
http.Error(resp, err.Error(), http.StatusInternalServerError)
}
|
go
|
{
"resource": ""
}
|
q6114
|
generateHashes
|
train
|
func generateHashes(url string) (map[hashPrefix]string, error) {
patterns, err := generatePatterns(url)
if err != nil {
return nil, err
}
hashes := make(map[hashPrefix]string)
for _, p := range patterns {
hashes[hashFromPattern(p)] = p
}
return hashes, nil
}
|
go
|
{
"resource": ""
}
|
q6115
|
generatePatterns
|
train
|
func generatePatterns(url string) ([]string, error) {
hosts, err := generateLookupHosts(url)
if err != nil {
return nil, err
}
paths, err := generateLookupPaths(url)
if err != nil {
return nil, err
}
var patterns []string
for _, h := range hosts {
for _, p := range paths {
patterns = append(patterns, h+p)
}
}
return patterns, nil
}
|
go
|
{
"resource": ""
}
|
q6116
|
isHex
|
train
|
func isHex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
|
go
|
{
"resource": ""
}
|
q6117
|
isUnicode
|
train
|
func isUnicode(s string) bool {
for _, c := range []byte(s) {
// For legacy reasons, 0x80 is not considered a Unicode character.
if c > 0x80 {
return true
}
}
return false
}
|
go
|
{
"resource": ""
}
|
q6118
|
escape
|
train
|
func escape(s string) string {
var b bytes.Buffer
for _, c := range []byte(s) {
if c < 0x20 || c >= 0x7f || c == ' ' || c == '#' || c == '%' {
b.WriteString(fmt.Sprintf("%%%02x", c))
} else {
b.WriteByte(c)
}
}
return b.String()
}
|
go
|
{
"resource": ""
}
|
q6119
|
unescape
|
train
|
func unescape(s string) string {
var b bytes.Buffer
for len(s) > 0 {
if len(s) >= 3 && s[0] == '%' && isHex(s[1]) && isHex(s[2]) {
b.WriteByte(unhex(s[1])<<4 | unhex(s[2]))
s = s[3:]
} else {
b.WriteByte(s[0])
s = s[1:]
}
}
return b.String()
}
|
go
|
{
"resource": ""
}
|
q6120
|
recursiveUnescape
|
train
|
func recursiveUnescape(s string) (string, error) {
const maxDepth = 1024
for i := 0; i < maxDepth; i++ {
t := unescape(s)
if t == s {
return s, nil
}
s = t
}
return "", errors.New("safebrowsing: unescaping is too recursive")
}
|
go
|
{
"resource": ""
}
|
q6121
|
normalizeEscape
|
train
|
func normalizeEscape(s string) (string, error) {
u, err := recursiveUnescape(s)
if err != nil {
return "", err
}
return escape(u), nil
}
|
go
|
{
"resource": ""
}
|
q6122
|
parseHost
|
train
|
func parseHost(hostish string) (host string, err error) {
i := strings.LastIndex(hostish, "@")
if i < 0 {
host = hostish
} else {
host = hostish[i+1:]
}
if strings.HasPrefix(host, "[") {
// Parse an IP-Literal per RFC 3986 and RFC 6874.
// For example: "[fe80::1] or "[fe80::1%25en0]"
i := strings.LastIndex(host, "]")
if i < 0 {
return "", errors.New("safebrowsing: missing ']' in host")
}
}
// Remove the port if it is there.
host = portRegexp.ReplaceAllString(host, "")
// Convert internationalized hostnames to IDNA.
u := unescape(host)
if isUnicode(u) {
host, err = idna.ToASCII(u)
if err != nil {
return "", err
}
}
// Remove any superfluous '.' characters in the hostname.
host = dotsRegexp.ReplaceAllString(host, ".")
host = strings.Trim(host, ".")
// Canonicalize IP addresses.
if iphost := parseIPAddress(host); iphost != "" {
host = iphost
} else {
host = strings.ToLower(host)
}
return host, nil
}
|
go
|
{
"resource": ""
}
|
q6123
|
parseURL
|
train
|
func parseURL(urlStr string) (parsedURL *url.URL, err error) {
// For legacy reasons, this is a simplified version of the net/url logic.
//
// Few cases where net/url was not helpful:
// 1. URLs are are expected to have no escaped encoding in the host but to
// be escaped in the path. Safe Browsing allows escaped characters in both.
// 2. Also it has different behavior with and without a scheme for absolute
// paths. Safe Browsing test web URLs only; and a scheme is optional.
// If missing, we assume that it is an "http".
// 3. We strip off the fragment and the escaped query as they are not
// required for building patterns for Safe Browsing.
parsedURL = new(url.URL)
// Remove the URL fragment.
// Also, we decode and encode the URL.
// The '#' in a fragment is not friendly to that.
rest, _ := split(urlStr, "#", true)
// Start by stripping any leading and trailing whitespace.
rest = strings.TrimSpace(rest)
// Remove any embedded tabs and CR/LF characters which aren't escaped.
rest = strings.Replace(rest, "\t", "", -1)
rest = strings.Replace(rest, "\r", "", -1)
rest = strings.Replace(rest, "\n", "", -1)
rest, err = normalizeEscape(rest)
if err != nil {
return nil, err
}
parsedURL.Scheme, rest = getScheme(rest)
rest, parsedURL.RawQuery = split(rest, "?", true)
// Add HTTP as scheme if none.
var hostish string
if !strings.HasPrefix(rest, "//") && parsedURL.Scheme != "" {
return nil, errors.New("safebrowsing: invalid path")
}
if parsedURL.Scheme == "" {
parsedURL.Scheme = "http"
hostish, rest = split(rest, "/", false)
} else {
hostish, rest = split(rest[2:], "/", false)
}
if hostish == "" {
return nil, errors.New("safebrowsing: missing hostname")
}
parsedURL.Host, err = parseHost(hostish)
if err != nil {
return nil, err
}
// Format the path.
p := path.Clean(rest)
if p == "." {
p = "/"
} else if rest[len(rest)-1] == '/' && p[len(p)-1] != '/' {
p += "/"
}
parsedURL.Path = p
return parsedURL, nil
}
|
go
|
{
"resource": ""
}
|
q6124
|
generateLookupHosts
|
train
|
func generateLookupHosts(urlStr string) ([]string, error) {
// Safe Browsing policy asks to generate lookup hosts for the URL.
// Those are formed by the domain and also up to 4 hostnames suffixes.
// The last component or sometimes the pair isn't examined alone,
// since it's the TLD or country code. The database for TLDs is here:
// https://publicsuffix.org/list/
//
// Note that we do not need to be clever about stopping at the "real" TLD.
// We just check a few extra components regardless. It's not significantly
// slower on the server side to check some extra hashes. Also the client
// does not need to keep a database of TLDs.
const maxHostComponents = 7
host, err := canonicalHost(urlStr)
if err != nil {
return nil, err
}
// handle IPv4 and IPv6 addresses.
ip := net.ParseIP(strings.Trim(host, "[]"))
if ip != nil {
return []string{host}, nil
}
hostComponents := strings.Split(host, ".")
numComponents := len(hostComponents) - maxHostComponents
if numComponents < 1 {
numComponents = 1
}
hosts := []string{host}
for i := numComponents; i < len(hostComponents)-1; i++ {
hosts = append(hosts, strings.Join(hostComponents[i:], "."))
}
return hosts, nil
}
|
go
|
{
"resource": ""
}
|
q6125
|
generateLookupPaths
|
train
|
func generateLookupPaths(urlStr string) ([]string, error) {
const maxPathComponents = 4
parsedURL, err := parseURL(urlStr)
if err != nil {
return nil, err
}
path := parsedURL.Path
paths := []string{"/"}
var pathComponents []string
for _, p := range strings.Split(path, "/") {
if p != "" {
pathComponents = append(pathComponents, p)
}
}
numComponents := len(pathComponents)
if numComponents > maxPathComponents {
numComponents = maxPathComponents
}
for i := 1; i < numComponents; i++ {
paths = append(paths, "/"+strings.Join(pathComponents[:i], "/")+"/")
}
if path != "/" {
paths = append(paths, path)
}
if len(parsedURL.RawQuery) > 0 {
paths = append(paths, path+"?"+parsedURL.RawQuery)
}
return paths, nil
}
|
go
|
{
"resource": ""
}
|
q6126
|
hashFromPattern
|
train
|
func hashFromPattern(pattern string) hashPrefix {
hash := sha256.New()
hash.Write([]byte(pattern))
return hashPrefix(hash.Sum(nil))
}
|
go
|
{
"resource": ""
}
|
q6127
|
HasPrefix
|
train
|
func (h hashPrefix) HasPrefix(other hashPrefix) bool {
return strings.HasPrefix(string(h), string(other))
}
|
go
|
{
"resource": ""
}
|
q6128
|
IsValid
|
train
|
func (h hashPrefix) IsValid() bool {
return len(h) >= minHashPrefixLength && len(h) <= maxHashPrefixLength
}
|
go
|
{
"resource": ""
}
|
q6129
|
decodeHashes
|
train
|
func decodeHashes(input *pb.ThreatEntrySet) ([]hashPrefix, error) {
switch input.CompressionType {
case pb.CompressionType_RAW:
raw := input.GetRawHashes()
if raw == nil {
return nil, errors.New("safebrowsing: nil raw hashes")
}
if raw.PrefixSize < minHashPrefixLength || raw.PrefixSize > maxHashPrefixLength {
return nil, errors.New("safebrowsing: invalid hash prefix length")
}
if len(raw.RawHashes)%int(raw.PrefixSize) != 0 {
return nil, errors.New("safebrowsing: invalid raw hashes")
}
hashes := make([]hashPrefix, len(raw.RawHashes)/int(raw.PrefixSize))
for i := range hashes {
hashes[i] = hashPrefix(raw.RawHashes[:raw.PrefixSize])
raw.RawHashes = raw.RawHashes[raw.PrefixSize:]
}
return hashes, nil
case pb.CompressionType_RICE:
values, err := decodeRiceIntegers(input.GetRiceHashes())
if err != nil {
return nil, err
}
hashes := make([]hashPrefix, 0, len(values))
var buf [4]byte
for _, h := range values {
binary.LittleEndian.PutUint32(buf[:], h)
hashes = append(hashes, hashPrefix(buf[:]))
}
return hashes, nil
default:
return nil, errors.New("safebrowsing: invalid compression type")
}
}
|
go
|
{
"resource": ""
}
|
q6130
|
decodeIndices
|
train
|
func decodeIndices(input *pb.ThreatEntrySet) ([]int32, error) {
switch input.CompressionType {
case pb.CompressionType_RAW:
raw := input.GetRawIndices()
if raw == nil {
return nil, errors.New("safebrowsing: invalid raw indices")
}
return raw.Indices, nil
case pb.CompressionType_RICE:
values, err := decodeRiceIntegers(input.GetRiceIndices())
if err != nil {
return nil, err
}
indices := make([]int32, 0, len(values))
for _, v := range values {
indices = append(indices, int32(v))
}
return indices, nil
default:
return nil, errors.New("safebrowsing: invalid compression type")
}
}
|
go
|
{
"resource": ""
}
|
q6131
|
decodeRiceIntegers
|
train
|
func decodeRiceIntegers(rice *pb.RiceDeltaEncoding) ([]uint32, error) {
if rice == nil {
return nil, errors.New("safebrowsing: missing rice encoded data")
}
if rice.RiceParameter < 0 || rice.RiceParameter > 32 {
return nil, errors.New("safebrowsing: invalid k parameter")
}
values := []uint32{uint32(rice.FirstValue)}
br := newBitReader(rice.EncodedData)
rd := newRiceDecoder(br, uint32(rice.RiceParameter))
for i := 0; i < int(rice.NumEntries); i++ {
delta, err := rd.ReadValue()
if err != nil {
return nil, err
}
values = append(values, values[i]+delta)
}
if br.BitsRemaining() >= 8 {
return nil, errors.New("safebrowsing: unconsumed rice encoded data")
}
return values, nil
}
|
go
|
{
"resource": ""
}
|
q6132
|
BitsRemaining
|
train
|
func (br *bitReader) BitsRemaining() int {
n := 8 * len(br.buf)
for m := br.mask | 1; m != 1; m >>= 1 {
n--
}
return n
}
|
go
|
{
"resource": ""
}
|
q6133
|
setDefaults
|
train
|
func (c *Config) setDefaults() bool {
if c.ServerURL == "" {
c.ServerURL = DefaultServerURL
}
if len(c.ThreatLists) == 0 {
c.ThreatLists = DefaultThreatLists
}
if c.UpdatePeriod <= 0 {
c.UpdatePeriod = DefaultUpdatePeriod
}
if c.RequestTimeout <= 0 {
c.RequestTimeout = DefaultRequestTimeout
}
if c.compressionTypes == nil {
c.compressionTypes = []pb.CompressionType{pb.CompressionType_RAW, pb.CompressionType_RICE}
}
return true
}
|
go
|
{
"resource": ""
}
|
q6134
|
NewSafeBrowser
|
train
|
func NewSafeBrowser(conf Config) (*SafeBrowser, error) {
conf = conf.copy()
if !conf.setDefaults() {
return nil, errors.New("safebrowsing: invalid configuration")
}
// Create the SafeBrowsing object.
if conf.api == nil {
var err error
conf.api, err = newNetAPI(conf.ServerURL, conf.APIKey, conf.ProxyURL)
if err != nil {
return nil, err
}
}
if conf.now == nil {
conf.now = time.Now
}
sb := &SafeBrowser{
config: conf,
api: conf.api,
c: cache{now: conf.now},
}
// TODO: Verify that config.ThreatLists is a subset of the list obtained
// by "/v4/threatLists" API endpoint.
// Convert threat lists slice to a map for O(1) lookup.
sb.lists = make(map[ThreatDescriptor]bool)
for _, td := range conf.ThreatLists {
sb.lists[td] = true
}
// Setup the logger.
w := conf.Logger
if conf.Logger == nil {
w = ioutil.Discard
}
sb.log = log.New(w, "safebrowsing: ", log.Ldate|log.Ltime|log.Lshortfile)
delay := time.Duration(0)
// If database file is provided, use that to initialize.
if !sb.db.Init(&sb.config, sb.log) {
ctx, cancel := context.WithTimeout(context.Background(), sb.config.RequestTimeout)
delay, _ = sb.db.Update(ctx, sb.api)
cancel()
} else {
if age := sb.db.SinceLastUpdate(); age < sb.config.UpdatePeriod {
delay = sb.config.UpdatePeriod - age
}
}
// Start the background list updater.
sb.done = make(chan bool)
go sb.updater(delay)
return sb, nil
}
|
go
|
{
"resource": ""
}
|
q6135
|
Status
|
train
|
func (sb *SafeBrowser) Status() (Stats, error) {
stats := Stats{
QueriesByDatabase: atomic.LoadInt64(&sb.stats.QueriesByDatabase),
QueriesByCache: atomic.LoadInt64(&sb.stats.QueriesByCache),
QueriesByAPI: atomic.LoadInt64(&sb.stats.QueriesByAPI),
QueriesFail: atomic.LoadInt64(&sb.stats.QueriesFail),
DatabaseUpdateLag: sb.db.UpdateLag(),
}
return stats, sb.db.Status()
}
|
go
|
{
"resource": ""
}
|
q6136
|
WaitUntilReady
|
train
|
func (sb *SafeBrowser) WaitUntilReady(ctx context.Context) error {
if atomic.LoadUint32(&sb.closed) == 1 {
return errClosed
}
select {
case <-sb.db.Ready():
return nil
case <-ctx.Done():
return ctx.Err()
case <-sb.done:
return errClosed
}
}
|
go
|
{
"resource": ""
}
|
q6137
|
Close
|
train
|
func (sb *SafeBrowser) Close() error {
if atomic.LoadUint32(&sb.closed) == 0 {
atomic.StoreUint32(&sb.closed, 1)
close(sb.done)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6138
|
doRequest
|
train
|
func (a *netAPI) doRequest(ctx context.Context, requestPath string, req proto.Message, resp proto.Message) error {
p, err := proto.Marshal(req)
if err != nil {
return err
}
u := *a.url // Make a copy of URL
u.Path = requestPath
httpReq, err := http.NewRequest("POST", u.String(), bytes.NewReader(p))
httpReq.Header.Add("Content-Type", "application/x-protobuf")
httpReq = httpReq.WithContext(ctx)
httpResp, err := a.client.Do(httpReq)
if err != nil {
return err
}
defer httpResp.Body.Close()
if httpResp.StatusCode != 200 {
return fmt.Errorf("safebrowsing: unexpected server response code: %d", httpResp.StatusCode)
}
body, err := ioutil.ReadAll(httpResp.Body)
if err != nil {
return err
}
return proto.Unmarshal(body, resp)
}
|
go
|
{
"resource": ""
}
|
q6139
|
ListUpdate
|
train
|
func (a *netAPI) ListUpdate(ctx context.Context, req *pb.FetchThreatListUpdatesRequest) (*pb.FetchThreatListUpdatesResponse, error) {
resp := new(pb.FetchThreatListUpdatesResponse)
return resp, a.doRequest(ctx, fetchUpdatePath, req, resp)
}
|
go
|
{
"resource": ""
}
|
q6140
|
HashLookup
|
train
|
func (a *netAPI) HashLookup(ctx context.Context, req *pb.FindFullHashesRequest) (*pb.FindFullHashesResponse, error) {
resp := new(pb.FindFullHashesResponse)
return resp, a.doRequest(ctx, findHashPath, req, resp)
}
|
go
|
{
"resource": ""
}
|
q6141
|
MarshalJSON
|
train
|
func (a *JSONAttachment) MarshalJSON() ([]byte, error) {
type EmbeddedJSONAttachment JSONAttachment
return json.Marshal(&struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}{
EmbeddedJSONAttachment: (*EmbeddedJSONAttachment)(a),
Duration: a.Duration.Seconds(),
})
}
|
go
|
{
"resource": ""
}
|
q6142
|
UnmarshalJSON
|
train
|
func (a *JSONAttachment) UnmarshalJSON(data []byte) error {
type EmbeddedJSONAttachment JSONAttachment
var raw struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}
raw.EmbeddedJSONAttachment = (*EmbeddedJSONAttachment)(a)
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
if raw.Duration > 0 {
nsec := int64(raw.Duration * float64(time.Second))
raw.EmbeddedJSONAttachment.Duration = time.Duration(nsec)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6143
|
ToJSON
|
train
|
func (f *JSONFeed) ToJSON() (string, error) {
data, err := json.MarshalIndent(f, "", " ")
if err != nil {
return "", err
}
return string(data), nil
}
|
go
|
{
"resource": ""
}
|
q6144
|
JSONFeed
|
train
|
func (f *JSON) JSONFeed() *JSONFeed {
feed := &JSONFeed{
Version: jsonFeedVersion,
Title: f.Title,
Description: f.Description,
}
if f.Link != nil {
feed.HomePageUrl = f.Link.Href
}
if f.Author != nil {
feed.Author = &JSONAuthor{
Name: f.Author.Name,
}
}
for _, e := range f.Items {
feed.Items = append(feed.Items, newJSONItem(e))
}
return feed
}
|
go
|
{
"resource": ""
}
|
q6145
|
newRssItem
|
train
|
func newRssItem(i *Item) *RssItem {
item := &RssItem{
Title: i.Title,
Link: i.Link.Href,
Description: i.Description,
Guid: i.Id,
PubDate: anyTimeFormat(time.RFC1123Z, i.Created, i.Updated),
}
if len(i.Content) > 0 {
item.Content = &RssContent{Content: i.Content}
}
if i.Source != nil {
item.Source = i.Source.Href
}
// Define a closure
if i.Enclosure != nil && i.Enclosure.Type != "" && i.Enclosure.Length != "" {
item.Enclosure = &RssEnclosure{Url: i.Enclosure.Url, Type: i.Enclosure.Type, Length: i.Enclosure.Length}
}
if i.Author != nil {
item.Author = i.Author.Name
}
return item
}
|
go
|
{
"resource": ""
}
|
q6146
|
RssFeed
|
train
|
func (r *Rss) RssFeed() *RssFeed {
pub := anyTimeFormat(time.RFC1123Z, r.Created, r.Updated)
build := anyTimeFormat(time.RFC1123Z, r.Updated)
author := ""
if r.Author != nil {
author = r.Author.Email
if len(r.Author.Name) > 0 {
author = fmt.Sprintf("%s (%s)", r.Author.Email, r.Author.Name)
}
}
var image *RssImage
if r.Image != nil {
image = &RssImage{Url: r.Image.Url, Title: r.Image.Title, Link: r.Image.Link, Width: r.Image.Width, Height: r.Image.Height}
}
channel := &RssFeed{
Title: r.Title,
Link: r.Link.Href,
Description: r.Description,
ManagingEditor: author,
PubDate: pub,
LastBuildDate: build,
Copyright: r.Copyright,
Image: image,
}
for _, i := range r.Items {
channel.Items = append(channel.Items, newRssItem(i))
}
return channel
}
|
go
|
{
"resource": ""
}
|
q6147
|
Add
|
train
|
func (f *Feed) Add(item *Item) {
f.Items = append(f.Items, item)
}
|
go
|
{
"resource": ""
}
|
q6148
|
anyTimeFormat
|
train
|
func anyTimeFormat(format string, times ...time.Time) string {
for _, t := range times {
if !t.IsZero() {
return t.Format(format)
}
}
return ""
}
|
go
|
{
"resource": ""
}
|
q6149
|
ToAtom
|
train
|
func (f *Feed) ToAtom() (string, error) {
a := &Atom{f}
return ToXML(a)
}
|
go
|
{
"resource": ""
}
|
q6150
|
WriteAtom
|
train
|
func (f *Feed) WriteAtom(w io.Writer) error {
return WriteXML(&Atom{f}, w)
}
|
go
|
{
"resource": ""
}
|
q6151
|
ToRss
|
train
|
func (f *Feed) ToRss() (string, error) {
r := &Rss{f}
return ToXML(r)
}
|
go
|
{
"resource": ""
}
|
q6152
|
WriteRss
|
train
|
func (f *Feed) WriteRss(w io.Writer) error {
return WriteXML(&Rss{f}, w)
}
|
go
|
{
"resource": ""
}
|
q6153
|
ToJSON
|
train
|
func (f *Feed) ToJSON() (string, error) {
j := &JSON{f}
return j.ToJSON()
}
|
go
|
{
"resource": ""
}
|
q6154
|
WriteJSON
|
train
|
func (f *Feed) WriteJSON(w io.Writer) error {
j := &JSON{f}
feed := j.JSONFeed()
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(feed)
}
|
go
|
{
"resource": ""
}
|
q6155
|
Sort
|
train
|
func (f *Feed) Sort(less func(a, b *Item) bool) {
lessFunc := func(i, j int) bool {
return less(f.Items[i], f.Items[j])
}
sort.SliceStable(f.Items, lessFunc)
}
|
go
|
{
"resource": ""
}
|
q6156
|
NewUUID
|
train
|
func NewUUID() *UUID {
u := &UUID{}
_, err := rand.Read(u[:16])
if err != nil {
panic(err)
}
u[8] = (u[8] | 0x80) & 0xBf
u[6] = (u[6] | 0x40) & 0x4f
return u
}
|
go
|
{
"resource": ""
}
|
q6157
|
AtomFeed
|
train
|
func (a *Atom) AtomFeed() *AtomFeed {
updated := anyTimeFormat(time.RFC3339, a.Updated, a.Created)
feed := &AtomFeed{
Xmlns: ns,
Title: a.Title,
Link: &AtomLink{Href: a.Link.Href, Rel: a.Link.Rel},
Subtitle: a.Description,
Id: a.Link.Href,
Updated: updated,
Rights: a.Copyright,
}
if a.Author != nil {
feed.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: a.Author.Name, Email: a.Author.Email}}
}
for _, e := range a.Items {
feed.Entries = append(feed.Entries, newAtomEntry(e))
}
return feed
}
|
go
|
{
"resource": ""
}
|
q6158
|
add
|
train
|
func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
}
|
go
|
{
"resource": ""
}
|
q6159
|
discoverTypes
|
train
|
func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
loadGo15Types()
} else {
loadGo17Types()
}
}
|
go
|
{
"resource": ""
}
|
q6160
|
TypeByName
|
train
|
func TypeByName(typeName string) Type {
initOnce.Do(discoverTypes)
return Type2(types[typeName])
}
|
go
|
{
"resource": ""
}
|
q6161
|
TypeByPackageName
|
train
|
func TypeByPackageName(pkgPath string, name string) Type {
initOnce.Do(discoverTypes)
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil
}
return Type2(pkgTypes[name])
}
|
go
|
{
"resource": ""
}
|
q6162
|
DynamicLargeObjectCreateFile
|
train
|
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
lo, err := c.largeObjectCreate(opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &DynamicLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
|
go
|
{
"resource": ""
}
|
q6163
|
DynamicLargeObjectCreate
|
train
|
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.DynamicLargeObjectCreateFile(opts)
}
|
go
|
{
"resource": ""
}
|
q6164
|
DynamicLargeObjectDelete
|
train
|
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
return c.LargeObjectDelete(container, path)
}
|
go
|
{
"resource": ""
}
|
q6165
|
DynamicLargeObjectMove
|
train
|
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
info, headers, err := c.Object(dstContainer, srcObjectName)
if err != nil {
return err
}
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil {
return err
}
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6166
|
createDLOManifest
|
train
|
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error {
headers := make(Headers)
headers["X-Object-Manifest"] = prefix
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
if err != nil {
return err
}
if err := manifest.Close(); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6167
|
newAuth
|
train
|
func newAuth(c *Connection) (Authenticator, error) {
AuthVersion := c.AuthVersion
if AuthVersion == 0 {
if strings.Contains(c.AuthUrl, "v3") {
AuthVersion = 3
} else if strings.Contains(c.AuthUrl, "v2") {
AuthVersion = 2
} else if strings.Contains(c.AuthUrl, "v1") {
AuthVersion = 1
} else {
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
}
}
switch AuthVersion {
case 1:
return &v1Auth{}, nil
case 2:
return &v2Auth{
// Guess as to whether using API key or
// password it will try both eventually so
// this is just an optimization.
useApiKey: len(c.ApiKey) >= 32,
}, nil
case 3:
return &v3Auth{}, nil
}
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
}
|
go
|
{
"resource": ""
}
|
q6168
|
Request
|
train
|
func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
req, err := http.NewRequest("GET", c.AuthUrl, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.UserAgent)
req.Header.Set("X-Auth-Key", c.ApiKey)
req.Header.Set("X-Auth-User", c.UserName)
return req, nil
}
|
go
|
{
"resource": ""
}
|
q6169
|
Response
|
train
|
func (auth *v1Auth) Response(resp *http.Response) error {
auth.Headers = resp.Header
return nil
}
|
go
|
{
"resource": ""
}
|
q6170
|
StorageUrl
|
train
|
func (auth *v1Auth) StorageUrl(Internal bool) string {
storageUrl := auth.Headers.Get("X-Storage-Url")
if Internal {
newUrl, err := url.Parse(storageUrl)
if err != nil {
return storageUrl
}
newUrl.Host = "snet-" + newUrl.Host
storageUrl = newUrl.String()
}
return storageUrl
}
|
go
|
{
"resource": ""
}
|
q6171
|
Request
|
train
|
func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
auth.Region = c.Region
// Toggle useApiKey if not first run and not OK yet
if auth.notFirst && !auth.useApiKeyOk {
auth.useApiKey = !auth.useApiKey
}
auth.notFirst = true
// Create a V2 auth request for the body of the connection
var v2i interface{}
if !auth.useApiKey {
// Normal swift authentication
v2 := v2AuthRequest{}
v2.Auth.PasswordCredentials.UserName = c.UserName
v2.Auth.PasswordCredentials.Password = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
} else {
// Rackspace special with API Key
v2 := v2AuthRequestRackspace{}
v2.Auth.ApiKeyCredentials.UserName = c.UserName
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
}
body, err := json.Marshal(v2i)
if err != nil {
return nil, err
}
url := c.AuthUrl
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += "tokens"
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
|
go
|
{
"resource": ""
}
|
q6172
|
Response
|
train
|
func (auth *v2Auth) Response(resp *http.Response) error {
auth.Auth = new(v2AuthResponse)
err := readJson(resp, auth.Auth)
// If successfully read Auth then no need to toggle useApiKey any more
if err == nil {
auth.useApiKeyOk = true
}
return err
}
|
go
|
{
"resource": ""
}
|
q6173
|
endpointUrl
|
train
|
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
for _, catalog := range auth.Auth.Access.ServiceCatalog {
if catalog.Type == Type {
for _, endpoint := range catalog.Endpoints {
if auth.Region == "" || (auth.Region == endpoint.Region) {
switch endpointType {
case EndpointTypeInternal:
return endpoint.InternalUrl
case EndpointTypePublic:
return endpoint.PublicUrl
case EndpointTypeAdmin:
return endpoint.AdminUrl
default:
return ""
}
}
}
}
}
return ""
}
|
go
|
{
"resource": ""
}
|
q6174
|
Expires
|
train
|
func (auth *v2Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
|
go
|
{
"resource": ""
}
|
q6175
|
newTimeoutReader
|
train
|
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
return &timeoutReader{
reader: reader,
timeout: timeout,
cancel: cancel,
}
}
|
go
|
{
"resource": ""
}
|
q6176
|
newWatchdogReader
|
train
|
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
return &watchdogReader{
timeout: timeout,
reader: reader,
timer: timer,
chunkSize: watchdogChunkSize,
}
}
|
go
|
{
"resource": ""
}
|
q6177
|
StaticLargeObjectCreateFile
|
train
|
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
info, err := c.cachedQueryInfo()
if err != nil || !info.SupportsSLO() {
return nil, SLONotSupported
}
realMinChunkSize := info.SLOMinSegmentSize()
if realMinChunkSize > opts.MinChunkSize {
opts.MinChunkSize = realMinChunkSize
}
lo, err := c.largeObjectCreate(opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &StaticLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
|
go
|
{
"resource": ""
}
|
q6178
|
StaticLargeObjectCreate
|
train
|
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.StaticLargeObjectCreateFile(opts)
}
|
go
|
{
"resource": ""
}
|
q6179
|
StaticLargeObjectDelete
|
train
|
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
info, err := c.cachedQueryInfo()
if err != nil || !info.SupportsSLO() {
return SLONotSupported
}
return c.LargeObjectDelete(container, path)
}
|
go
|
{
"resource": ""
}
|
q6180
|
StaticLargeObjectMove
|
train
|
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
swiftInfo, err := c.cachedQueryInfo()
if err != nil || !swiftInfo.SupportsSLO() {
return SLONotSupported
}
info, headers, err := c.Object(srcContainer, srcObjectName)
if err != nil {
return err
}
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
if err != nil {
return err
}
//copy only metadata during move (other headers might not be safe for copying)
headers = headers.ObjectMetadata().ObjectHeaders()
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
return err
}
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6181
|
createSLOManifest
|
train
|
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
sloSegments := make([]swiftSegment, len(segments))
for i, segment := range segments {
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
sloSegments[i].Etag = segment.Hash
sloSegments[i].Size = segment.Bytes
}
content, err := json.Marshal(sloSegments)
if err != nil {
return err
}
values := url.Values{}
values.Set("multipart-manifest", "put")
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
return err
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6182
|
manage
|
train
|
func (c *RsConnection) manage(p swift.RequestOpts) (resp *http.Response, headers swift.Headers, err error) {
p.OnReAuth = func() (string, error) {
if c.cdnUrl == "" {
c.cdnUrl = c.Auth.CdnUrl()
}
if c.cdnUrl == "" {
return "", errors.New("The X-CDN-Management-Url does not exist on the authenticated platform")
}
return c.cdnUrl, nil
}
if c.Authenticated() {
_, err = p.OnReAuth()
if err != nil {
return nil, nil, err
}
}
return c.Connection.Call(c.cdnUrl, p)
}
|
go
|
{
"resource": ""
}
|
q6183
|
ContainerCDNDisable
|
train
|
func (c *RsConnection) ContainerCDNDisable(container string) error {
h := swift.Headers{"X-CDN-Enabled": "false"}
_, _, err := c.manage(swift.RequestOpts{
Container: container,
Operation: "PUT",
ErrorMap: swift.ContainerErrorMap,
NoResponse: true,
Headers: h,
})
return err
}
|
go
|
{
"resource": ""
}
|
q6184
|
cancelRequest
|
train
|
func cancelRequest(transport http.RoundTripper, req *http.Request) {
if tr, ok := transport.(interface {
CancelRequest(*http.Request)
}); ok {
tr.CancelRequest(req)
}
}
|
go
|
{
"resource": ""
}
|
q6185
|
resetTimer
|
train
|
func resetTimer(t *time.Timer, d time.Duration) {
t.Reset(d)
}
|
go
|
{
"resource": ""
}
|
q6186
|
largeObjectCreate
|
train
|
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
var (
segmentPath string
segmentContainer string
segments []Object
currentLength int64
err error
)
if opts.SegmentPrefix != "" {
segmentPath = opts.SegmentPrefix
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
return nil, err
}
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
if opts.Flags&os.O_TRUNC != 0 {
c.LargeObjectDelete(opts.Container, opts.ObjectName)
} else {
currentLength = info.Bytes
if headers.IsLargeObject() {
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
if err != nil {
return nil, err
}
if len(segments) > 0 {
segmentPath = gopath.Dir(segments[0].Name)
}
} else {
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
return nil, err
}
segments = append(segments, info)
}
}
} else if err != ObjectNotFound {
return nil, err
}
// segmentContainer is not empty when the manifest already existed
if segmentContainer == "" {
if opts.SegmentContainer != "" {
segmentContainer = opts.SegmentContainer
} else {
segmentContainer = opts.Container + "_segments"
}
}
file := &largeObjectCreateFile{
conn: c,
checkHash: opts.CheckHash,
container: opts.Container,
objectName: opts.ObjectName,
chunkSize: opts.ChunkSize,
minChunkSize: opts.MinChunkSize,
headers: opts.Headers,
segmentContainer: segmentContainer,
prefix: segmentPath,
segments: segments,
currentLength: currentLength,
}
if file.chunkSize == 0 {
file.chunkSize = 10 * 1024 * 1024
}
if file.minChunkSize > file.chunkSize {
file.chunkSize = file.minChunkSize
}
if opts.Flags&os.O_APPEND != 0 {
file.filePos = currentLength
}
return file, nil
}
|
go
|
{
"resource": ""
}
|
q6187
|
LargeObjectDelete
|
train
|
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
_, headers, err := c.Object(container, objectName)
if err != nil {
return err
}
var objects [][]string
if headers.IsLargeObject() {
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
if err != nil {
return err
}
for _, obj := range segments {
objects = append(objects, []string{segmentContainer, obj.Name})
}
}
objects = append(objects, []string{container, objectName})
info, err := c.cachedQueryInfo()
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
filenames := make([]string, len(objects))
for i, obj := range objects {
filenames[i] = obj[0] + "/" + obj[1]
}
_, err = c.doBulkDelete(filenames)
// Don't fail on ObjectNotFound because eventual consistency
// makes this situation normal.
if err != nil && err != Forbidden && err != ObjectNotFound {
return err
}
} else {
for _, obj := range objects {
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
return err
}
}
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6188
|
Seek
|
train
|
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
file.filePos = offset
case 1:
file.filePos += offset
case 2:
file.filePos = file.currentLength + offset
default:
return -1, fmt.Errorf("invalid value for whence")
}
if file.filePos < 0 {
return -1, fmt.Errorf("negative offset")
}
return file.filePos, nil
}
|
go
|
{
"resource": ""
}
|
q6189
|
Write
|
train
|
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
var sz int64
var relativeFilePos int
writeSegmentIdx := 0
for i, obj := range file.segments {
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
relativeFilePos = int(file.filePos - sz)
break
}
writeSegmentIdx++
sz += obj.Bytes
}
sizeToWrite := len(buf)
for offset := 0; offset < sizeToWrite; {
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
if err != nil {
return 0, err
}
if writeSegmentIdx < len(file.segments) {
file.segments[writeSegmentIdx] = *newSegment
} else {
file.segments = append(file.segments, *newSegment)
}
offset += n
writeSegmentIdx++
relativeFilePos = 0
}
file.filePos += int64(sizeToWrite)
file.currentLength = 0
for _, obj := range file.segments {
file.currentLength += obj.Bytes
}
return sizeToWrite, nil
}
|
go
|
{
"resource": ""
}
|
q6190
|
Metadata
|
train
|
func (h Headers) Metadata(metaPrefix string) Metadata {
m := Metadata{}
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
for key, value := range h {
if strings.HasPrefix(key, metaPrefix) {
metaKey := strings.ToLower(key[len(metaPrefix):])
m[metaKey] = value
}
}
return m
}
|
go
|
{
"resource": ""
}
|
q6191
|
nsToFloatString
|
train
|
func nsToFloatString(ns int64) string {
if ns < 0 {
return "-" + nsToFloatString(-ns)
}
result := fmt.Sprintf("%010d", ns)
split := len(result) - 9
result, decimals := result[:split], result[split:]
decimals = strings.TrimRight(decimals, "0")
if decimals != "" {
result += "."
result += decimals
}
return result
}
|
go
|
{
"resource": ""
}
|
q6192
|
floatStringToNs
|
train
|
func floatStringToNs(s string) (int64, error) {
const zeros = "000000000"
if point := strings.IndexRune(s, '.'); point >= 0 {
tail := s[point+1:]
if fill := 9 - len(tail); fill < 0 {
tail = tail[:9]
} else {
tail += zeros[:fill]
}
s = s[:point] + tail
} else if len(s) > 0 { // Make sure empty string produces an error
s += zeros
}
return strconv.ParseInt(s, 10, 64)
}
|
go
|
{
"resource": ""
}
|
q6193
|
newError
|
train
|
func newError(StatusCode int, Text string) *Error {
return &Error{
StatusCode: StatusCode,
Text: Text,
}
}
|
go
|
{
"resource": ""
}
|
q6194
|
newErrorf
|
train
|
func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error {
return newError(StatusCode, fmt.Sprintf(Text, Parameters...))
}
|
go
|
{
"resource": ""
}
|
q6195
|
drainAndClose
|
train
|
func drainAndClose(rd io.ReadCloser, err *error) {
if rd == nil {
return
}
_, _ = io.Copy(ioutil.Discard, rd)
cerr := rd.Close()
if err != nil && *err == nil {
*err = cerr
}
}
|
go
|
{
"resource": ""
}
|
q6196
|
parseHeaders
|
train
|
func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error {
if errorMap != nil {
if err, ok := errorMap[resp.StatusCode]; ok {
drainAndClose(resp.Body, nil)
return err
}
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
drainAndClose(resp.Body, nil)
return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status)
}
return nil
}
|
go
|
{
"resource": ""
}
|
q6197
|
doTimeoutRequest
|
train
|
func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) {
// Do the request in the background so we can check the timeout
type result struct {
resp *http.Response
err error
}
done := make(chan result, 1)
go func() {
resp, err := c.client.Do(req)
done <- result{resp, err}
}()
// Wait for the read or the timeout
select {
case r := <-done:
return r.resp, r.err
case <-timer.C:
// Kill the connection on timeout so we don't leak sockets or goroutines
cancelRequest(c.Transport, req)
return nil, TimeoutError
}
panic("unreachable") // For Go 1.0
}
|
go
|
{
"resource": ""
}
|
q6198
|
setDefaults
|
train
|
func (c *Connection) setDefaults() {
if c.UserAgent == "" {
c.UserAgent = DefaultUserAgent
}
if c.Retries == 0 {
c.Retries = DefaultRetries
}
if c.ConnectTimeout == 0 {
c.ConnectTimeout = 10 * time.Second
}
if c.Timeout == 0 {
c.Timeout = 60 * time.Second
}
if c.Transport == nil {
t := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
// Half of linux's default open files limit (1024).
MaxIdleConnsPerHost: 512,
}
SetExpectContinueTimeout(t, 5*time.Second)
c.Transport = t
}
if c.client == nil {
c.client = &http.Client{
// CheckRedirect: redirectPolicyFunc,
Transport: c.Transport,
}
}
}
|
go
|
{
"resource": ""
}
|
q6199
|
Authenticate
|
train
|
func (c *Connection) Authenticate() (err error) {
c.authLock.Lock()
defer c.authLock.Unlock()
return c.authenticate()
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.