_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q17100 | Open | train | func (s *Service) Open(e SealedData) (byt []byte, err error) {
// once function is complete, check if we are returning err or not.
// if we are, return emit a failure metric, if not a success metric.
defer func() {
if err == nil {
s.metricsClient.Inc("success", 1, 1)
} else {
s.metricsClient.Inc("failure", 1, 1)
}
}()
// convert nonce to an array
nonce, err := nonceSliceToArray(e.NonceBytes())
if err != nil {
return nil, err
}
// decrypt
var decrypted []byte
decrypted, ok := secretbox.Open(decrypted, e.CiphertextBytes(), nonce, s.secretKey)
if !ok {
return nil, fmt.Errorf("unable to decrypt message")
}
return decrypted, nil
} | go | {
"resource": ""
} |
q17101 | Watch | train | func (s *EtcdElection) Watch(ctx context.Context, startIndex uint64) chan bool {
results := make(chan bool)
var cancel atomic.Value
var stop int32
go func() {
select {
case <-ctx.Done():
cancel.Load().(context.CancelFunc)()
return
}
}()
// Create our initial watcher
watcher := s.api.Watcher(s.conf.Election, nil)
s.wg.Loop(func() bool {
// this ensures we exit properly if the watcher isn't connected to etcd
if atomic.LoadInt32(&stop) == 1 {
return false
}
ctx, c := context.WithTimeout(context.Background(), time.Second*10)
cancel.Store(c)
resp, err := watcher.Next(ctx)
if err != nil {
if err == context.Canceled {
close(results)
return false
}
if cErr, ok := err.(etcd.Error); ok {
if cErr.Code == etcd.ErrorCodeEventIndexCleared {
logrus.WithField("category", "election").
Infof("EtcdElection %s - new index is %d",
err, cErr.Index+1)
// Re-create the watcher with a newer index until we catch up
watcher = s.api.Watcher(s.conf.Election, nil)
return true
}
}
if err != context.DeadlineExceeded {
logrus.WithField("category", "election").
Errorf("EtcdElection etcd error: %s", err)
time.Sleep(time.Second * 10)
}
} else {
logrus.WithField("category", "election").
Debugf("EtcdElection etcd event: %+v\n", resp)
results <- true
}
return true
})
return results
} | go | {
"resource": ""
} |
q17102 | EncodedStringToKey | train | func EncodedStringToKey(encodedKey string) (*[SecretKeyLength]byte, error) {
// decode base64-encoded key
keySlice, err := base64.StdEncoding.DecodeString(encodedKey)
if err != nil {
return nil, err
}
// convert to array and return
return KeySliceToArray(keySlice)
} | go | {
"resource": ""
} |
q17103 | SealedDataToString | train | func SealedDataToString(sealedData SealedData) (string, error) {
b, err := json.Marshal(sealedData)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(b), nil
} | go | {
"resource": ""
} |
q17104 | StringToSealedData | train | func StringToSealedData(encodedBytes string) (SealedData, error) {
bytes, err := base64.URLEncoding.DecodeString(encodedBytes)
if err != nil {
return nil, err
}
var sb SealedBytes
err = json.Unmarshal(bytes, &sb)
if err != nil {
return nil, err
}
return &sb, nil
} | go | {
"resource": ""
} |
q17105 | Broadcast | train | func (b *broadcast) Broadcast() {
b.mutex.Lock()
for _, channel := range b.clients {
channel <- struct{}{}
}
b.mutex.Unlock()
} | go | {
"resource": ""
} |
q17106 | Wait | train | func (b *broadcast) Wait(name string) {
b.mutex.Lock()
channel, ok := b.clients[name]
if !ok {
b.clients[name] = make(chan struct{}, 10000)
channel = b.clients[name]
}
b.mutex.Unlock()
// Wait for a new event or done is closed
select {
case <-channel:
return
case <-b.done:
return
}
} | go | {
"resource": ""
} |
q17107 | WaitChan | train | func (b *broadcast) WaitChan(name string) chan struct{} {
b.mutex.Lock()
channel, ok := b.clients[name]
if !ok {
b.clients[name] = make(chan struct{}, 10000)
channel = b.clients[name]
}
b.mutex.Unlock()
return channel
} | go | {
"resource": ""
} |
q17108 | NewNonceCache | train | func NewNonceCache(capacity int, cacheTTL int, clock holster.Clock) *NonceCache {
return &NonceCache{
cache: holster.NewTTLMapWithClock(capacity, clock),
cacheTTL: cacheTTL,
clock: clock,
}
} | go | {
"resource": ""
} |
q17109 | InCache | train | func (n *NonceCache) InCache(nonce string) bool {
n.Lock()
defer n.Unlock()
// check if the nonce is already in the cache
_, exists := n.cache.Get(nonce)
if exists {
return true
}
// it's not, so let's put it in the cache
n.cache.Set(nonce, "", n.cacheTTL)
return false
} | go | {
"resource": ""
} |
q17110 | NewLRUCache | train | func NewLRUCache(maxEntries int) *LRUCache {
return &LRUCache{
MaxEntries: maxEntries,
ll: list.New(),
cache: make(map[interface{}]*list.Element),
}
} | go | {
"resource": ""
} |
q17111 | Add | train | func (c *LRUCache) Add(key Key, value interface{}) bool {
return c.addRecord(&cacheRecord{key: key, value: value})
} | go | {
"resource": ""
} |
q17112 | AddWithTTL | train | func (c *LRUCache) AddWithTTL(key Key, value interface{}, TTL time.Duration) bool {
expireAt := time.Now().UTC().Add(TTL)
return c.addRecord(&cacheRecord{
key: key,
value: value,
expireAt: &expireAt,
})
} | go | {
"resource": ""
} |
q17113 | addRecord | train | func (c *LRUCache) addRecord(record *cacheRecord) bool {
defer c.mutex.Unlock()
c.mutex.Lock()
// If the key already exist, set the new value
if ee, ok := c.cache[record.key]; ok {
c.ll.MoveToFront(ee)
temp := ee.Value.(*cacheRecord)
*temp = *record
return true
}
ele := c.ll.PushFront(record)
c.cache[record.key] = ele
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
c.removeOldest()
}
return false
} | go | {
"resource": ""
} |
q17114 | Stats | train | func (c *LRUCache) Stats() LRUCacheStats {
defer func() {
c.stats = LRUCacheStats{}
c.mutex.Unlock()
}()
c.mutex.Lock()
c.stats.Size = int64(len(c.cache))
return c.stats
} | go | {
"resource": ""
} |
q17115 | Keys | train | func (c *LRUCache) Keys() (keys []interface{}) {
defer c.mutex.Unlock()
c.mutex.Lock()
for key := range c.cache {
keys = append(keys, key)
}
return
} | go | {
"resource": ""
} |
q17116 | Peek | train | func (c *LRUCache) Peek(key interface{}) (value interface{}, ok bool) {
defer c.mutex.Unlock()
c.mutex.Lock()
if ele, hit := c.cache[key]; hit {
entry := ele.Value.(*cacheRecord)
return entry.value, true
}
return nil, false
} | go | {
"resource": ""
} |
q17117 | Update | train | func (p *PriorityQueue) Update(el *PQItem, priority int) {
heap.Remove(p.impl, el.index)
el.Priority = priority
heap.Push(p.impl, el)
} | go | {
"resource": ""
} |
q17118 | Bytes | train | func (c *CSPRNG) Bytes(bytes int) ([]byte, error) {
n := make([]byte, bytes)
// get bytes-bit random number from /dev/urandom
_, err := io.ReadFull(rand.Reader, n)
if err != nil {
return nil, err
}
return n, nil
} | go | {
"resource": ""
} |
q17119 | Run | train | func (wg *WaitGroup) Run(callBack func(interface{}) error, data interface{}) {
wg.wg.Add(1)
go func() {
err := callBack(data)
if err == nil {
wg.wg.Done()
return
}
wg.mutex.Lock()
wg.errs = append(wg.errs, err)
wg.wg.Done()
wg.mutex.Unlock()
}()
} | go | {
"resource": ""
} |
q17120 | Go | train | func (wg *WaitGroup) Go(cb func()) {
wg.wg.Add(1)
go func() {
cb()
wg.wg.Done()
}()
} | go | {
"resource": ""
} |
q17121 | Loop | train | func (wg *WaitGroup) Loop(callBack func() bool) {
wg.wg.Add(1)
go func() {
for {
if !callBack() {
wg.wg.Done()
break
}
}
}()
} | go | {
"resource": ""
} |
q17122 | Wait | train | func (wg *WaitGroup) Wait() []error {
wg.wg.Wait()
wg.mutex.Lock()
defer wg.mutex.Unlock()
if len(wg.errs) == 0 {
return nil
}
return wg.errs
} | go | {
"resource": ""
} |
q17123 | generateKey | train | func generateKey(keypath string, salt []byte, keyiter int) (key *[secret.SecretKeyLength]byte, isPass bool, err error) {
// if a keypath is given try and use it
if keypath != "" {
key, err := secret.ReadKeyFromDisk(keypath)
if err != nil {
return nil, false, fmt.Errorf("unable to build secret service: %v", err)
}
return key, false, nil
}
// otherwise read in a passphrase from disk and use that, remember to reset your terminal afterwards
var passphrase string
fmt.Printf("Passphrase: ")
fmt.Scanln(&passphrase)
// derive key and return it
keySlice := pbkdf2.Key([]byte(passphrase), salt, keyiter, 32, sha256.New)
keyBytes, err := secret.KeySliceToArray(keySlice)
if err != nil {
return nil, true, err
}
return keyBytes, true, nil
} | go | {
"resource": ""
} |
q17124 | writeCiphertext | train | func writeCiphertext(salt []byte, keyiter int, isPass bool, sealed secret.SealedData, filename string) error {
// fill in the ciphertext fields
ec := EncodedCiphertext{
CiphertextNonce: sealed.NonceBytes(),
Ciphertext: sealed.CiphertextBytes(),
CipherAlgorithm: "salsa20_poly1305",
}
// if we used a passphrase, also set the passphrase fields
if isPass == true {
ec.KeySalt = salt
ec.KeyIter = keyiter
ec.KeyAlgorithm = "pbkdf#2"
}
// marshal encoded ciphertext into a json string
b, err := json.MarshalIndent(ec, "", " ")
if err != nil {
return err
}
// write to disk with read only permissions for the current user
return ioutil.WriteFile(filename, b, 0600)
} | go | {
"resource": ""
} |
q17125 | readCiphertext | train | func readCiphertext(filename string) ([]byte, int, *secret.SealedBytes, error) {
plaintextBytes, err := ioutil.ReadFile(filename)
if err != nil {
return nil, 0, nil, err
}
var ec EncodedCiphertext
err = json.Unmarshal(plaintextBytes, &ec)
if err != nil {
return nil, 0, nil, err
}
sealedBytes := &secret.SealedBytes{
Ciphertext: ec.Ciphertext,
Nonce: ec.CiphertextNonce,
}
return ec.KeySalt, ec.KeyIter, sealedBytes, nil
} | go | {
"resource": ""
} |
q17126 | Begin | train | func (m *opMap) Begin(name string) *opResult {
for {
myOp := &opResult{opMap: m, name: name}
myOp.mu.Lock()
opIface, loaded := m.ops.LoadOrStore(name, myOp)
if !loaded { // no one else doing ops with this key
return myOp
}
op := opIface.(*opResult)
// someone else doing ops with this key, wait for
// the result
op.mu.RLock()
if op.success {
return nil
}
// if we are here, we will retry the operation
}
} | go | {
"resource": ""
} |
q17127 | renameAndUpdateDiskUsage | train | func (fs *Datastore) renameAndUpdateDiskUsage(tmpPath, path string) error {
fi, err := os.Stat(path)
// Destination exists, we need to discount it from diskUsage
if fs != nil && err == nil {
atomic.AddInt64(&fs.diskUsage, -fi.Size())
} else if !os.IsNotExist(err) {
return err
}
// Rename and add new file's diskUsage. If the rename fails,
// it will either a) Re-add the size of an existing file, which
// was sustracted before b) Add 0 if there is no existing file.
err = os.Rename(tmpPath, path)
fs.updateDiskUsage(path, true)
return err
} | go | {
"resource": ""
} |
q17128 | doDelete | train | func (fs *Datastore) doDelete(key datastore.Key) error {
_, path := fs.encode(key)
fSize := fileSize(path)
switch err := os.Remove(path); {
case err == nil:
atomic.AddInt64(&fs.diskUsage, -fSize)
fs.checkpointDiskUsage()
return nil
case os.IsNotExist(err):
return datastore.ErrNotFound
default:
return err
}
} | go | {
"resource": ""
} |
q17129 | folderSize | train | func folderSize(path string, deadline time.Time) (int64, initAccuracy, error) {
var du int64
folder, err := os.Open(path)
if err != nil {
return 0, "", err
}
defer folder.Close()
stat, err := folder.Stat()
if err != nil {
return 0, "", err
}
files, err := folder.Readdirnames(-1)
if err != nil {
return 0, "", err
}
totalFiles := len(files)
i := 0
filesProcessed := 0
maxFiles := DiskUsageFilesAverage
if maxFiles <= 0 {
maxFiles = totalFiles
}
// randomize file order
// https://stackoverflow.com/a/42776696
for i := len(files) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
files[i], files[j] = files[j], files[i]
}
accuracy := exactA
for {
// Do not process any files after deadline is over
if time.Now().After(deadline) {
accuracy = timedoutA
break
}
if i >= totalFiles || filesProcessed >= maxFiles {
if filesProcessed >= maxFiles {
accuracy = approxA
}
break
}
// Stat the file
fname := files[i]
subpath := filepath.Join(path, fname)
st, err := os.Stat(subpath)
if err != nil {
return 0, "", err
}
// Find folder size recursively
if st.IsDir() {
du2, acc, err := folderSize(filepath.Join(subpath), deadline)
if err != nil {
return 0, "", err
}
accuracy = combineAccuracy(acc, accuracy)
du += du2
filesProcessed++
} else { // in any other case, add the file size
du += st.Size()
filesProcessed++
}
i++
}
nonProcessed := totalFiles - filesProcessed
// Avg is total size in this folder up to now / total files processed
// it includes folders ant not folders
avg := 0.0
if filesProcessed > 0 {
avg = float64(du) / float64(filesProcessed)
}
duEstimation := int64(avg * float64(nonProcessed))
du += duEstimation
du += stat.Size()
//fmt.Println(path, "total:", totalFiles, "totalStat:", i, "totalFile:", filesProcessed, "left:", nonProcessed, "avg:", int(avg), "est:", int(duEstimation), "du:", du)
return du, accuracy, nil
} | go | {
"resource": ""
} |
q17130 | updateDiskUsage | train | func (fs *Datastore) updateDiskUsage(path string, add bool) {
fsize := fileSize(path)
if !add {
fsize = -fsize
}
if fsize != 0 {
atomic.AddInt64(&fs.diskUsage, fsize)
fs.checkpointDiskUsage()
}
} | go | {
"resource": ""
} |
q17131 | DiskUsage | train | func (fs *Datastore) DiskUsage() (uint64, error) {
// it may differ from real disk values if
// the filesystem has allocated for blocks
// for a directory because it has many files in it
// we don't account for "resized" directories.
// In a large datastore, the differences should be
// are negligible though.
du := atomic.LoadInt64(&fs.diskUsage)
return uint64(du), nil
} | go | {
"resource": ""
} |
q17132 | deactivate | train | func (fs *Datastore) deactivate() error {
fs.shutdownLock.Lock()
defer fs.shutdownLock.Unlock()
if fs.shutdown {
return nil
}
fs.shutdown = true
close(fs.checkpointCh)
<-fs.done
return nil
} | go | {
"resource": ""
} |
q17133 | StoreStrLen_or_IndPtr | train | func (p *Parameter) StoreStrLen_or_IndPtr(v api.SQLLEN) *api.SQLLEN {
p.StrLen_or_IndPtr = v
return &p.StrLen_or_IndPtr
} | go | {
"resource": ""
} |
q17134 | utf16toutf8 | train | func utf16toutf8(s []uint16) []byte {
for i, v := range s {
if v == 0 {
s = s[0:i]
break
}
}
buf := make([]byte, 0, len(s)*2) // allow 2 bytes for every rune
b := make([]byte, 4)
for i := 0; i < len(s); i++ {
var rr rune
switch r := s[i]; {
case surr1 <= r && r < surr2 && i+1 < len(s) &&
surr2 <= s[i+1] && s[i+1] < surr3:
// valid surrogate sequence
rr = utf16.DecodeRune(rune(r), rune(s[i+1]))
i++
case surr1 <= r && r < surr3:
// invalid surrogate sequence
rr = replacementChar
default:
// normal rune
rr = rune(r)
}
b := b[:cap(b)]
n := utf8.EncodeRune(b, rr)
b = b[:n]
buf = append(buf, b...)
}
return buf
} | go | {
"resource": ""
} |
q17135 | serve | train | func serve(baseDirectoryPath string, fnR AdaptRouter, fnT TemplateData) (ln net.Listener) {
// Init router
var r = httprouter.New()
// Static files
r.ServeFiles("/static/*filepath", http.Dir(filepath.Join(baseDirectoryPath, "resources", "static")))
// Dynamic pages
r.GET("/templates/*page", handleTemplates(fnT))
// Adapt router
if fnR != nil {
fnR(r)
}
// Parse templates
var err error
if templates, err = astitemplate.ParseDirectory(filepath.Join(baseDirectoryPath, "resources", "templates"), ".html"); err != nil {
astilog.Fatal(err)
}
// Listen
if ln, err = net.Listen("tcp", "127.0.0.1:"); err != nil {
astilog.Fatal(err)
}
astilog.Debugf("Listening on %s", ln.Addr())
// Serve
go http.Serve(ln, r)
return
} | go | {
"resource": ""
} |
q17136 | handleTemplates | train | func handleTemplates(fn TemplateData) httprouter.Handle {
return func(rw http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Check if template exists
var name = p.ByName("page") + ".html"
if templates.Lookup(name) == nil {
rw.WriteHeader(http.StatusNotFound)
return
}
// Get data
var d interface{}
var err error
if fn != nil {
if d, err = fn(name, r, p); err != nil {
astilog.Errorf("%s while retrieving data for template %s", err, name)
rw.WriteHeader(http.StatusInternalServerError)
return
}
}
// Execute template
if err = templates.ExecuteTemplate(rw, name, d); err != nil {
astilog.Errorf("%s while handling template %s", err, name)
rw.WriteHeader(http.StatusInternalServerError)
return
}
}
} | go | {
"resource": ""
} |
q17137 | Start | train | func (ws *WireService) Start() {
ws.quit = make(chan struct{})
best, err := ws.chain.BestBlock()
if err != nil {
log.Error(err)
}
log.Infof("Starting wire service at height %d", int(best.height))
out:
for {
select {
case m := <-ws.msgChan:
switch msg := m.(type) {
case newPeerMsg:
ws.handleNewPeerMsg(msg.peer)
case donePeerMsg:
ws.handleDonePeerMsg(msg.peer)
case headersMsg:
ws.handleHeadersMsg(&msg)
case merkleBlockMsg:
ws.handleMerkleBlockMsg(&msg)
case invMsg:
ws.handleInvMsg(&msg)
case txMsg:
ws.handleTxMsg(&msg)
case updateFiltersMsg:
ws.handleUpdateFiltersMsg()
default:
log.Warningf("Unknown message type sent to WireService message chan: %T", msg)
}
case <-ws.quit:
break out
}
}
} | go | {
"resource": ""
} |
q17138 | handleMessages | train | func handleMessages(w *astilectron.Window, messageHandler MessageHandler) astilectron.ListenerMessage {
return func(e *astilectron.EventMessage) (v interface{}) {
// Unmarshal message
var m MessageIn
var err error
if err = e.Unmarshal(&m); err != nil {
astilog.Errorf("Unmarshaling message %+v failed", *e)
return
}
// Handle message
messageHandler(w, m)
return
}
} | go | {
"resource": ""
} |
q17139 | EstimateSpendFee | train | func (w *SPVWallet) EstimateSpendFee(amount int64, feeLevel wallet.FeeLevel) (uint64, error) {
// Since this is an estimate we can use a dummy output address. Let's use a long one so we don't under estimate.
addr, err := btc.DecodeAddress("bc1qxtq7ha2l5qg70atpwp3fus84fx3w0v2w4r2my7gt89ll3w0vnlgspu349h", w.params)
if err != nil {
return 0, err
}
tx, err := w.buildTx(amount, addr, feeLevel, nil)
if err != nil {
return 0, err
}
var outval int64
for _, output := range tx.TxOut {
outval += output.Value
}
var inval int64
utxos, err := w.txstore.Utxos().GetAll()
if err != nil {
return 0, err
}
for _, input := range tx.TxIn {
for _, utxo := range utxos {
if utxo.Op.Hash.IsEqual(&input.PreviousOutPoint.Hash) && utxo.Op.Index == input.PreviousOutPoint.Index {
inval += utxo.Value
break
}
}
}
if inval < outval {
return 0, errors.New("Error building transaction: inputs less than outputs")
}
return uint64(inval - outval), err
} | go | {
"resource": ""
} |
q17140 | GimmeFilter | train | func (ts *TxStore) GimmeFilter() (*bloom.Filter, error) {
ts.PopulateAdrs()
// get all utxos to add outpoints to filter
allUtxos, err := ts.Utxos().GetAll()
if err != nil {
return nil, err
}
allStxos, err := ts.Stxos().GetAll()
if err != nil {
return nil, err
}
ts.addrMutex.Lock()
elem := uint32(len(ts.adrs)+len(allUtxos)+len(allStxos)) + uint32(len(ts.watchedScripts))
f := bloom.NewFilter(elem, 0, 0.00003, wire.BloomUpdateAll)
// note there could be false positives since we're just looking
// for the 20 byte PKH without the opcodes.
for _, a := range ts.adrs { // add 20-byte pubkeyhash
f.Add(a.ScriptAddress())
}
ts.addrMutex.Unlock()
for _, u := range allUtxos {
f.AddOutPoint(&u.Op)
}
for _, s := range allStxos {
f.AddOutPoint(&s.Utxo.Op)
}
for _, w := range ts.watchedScripts {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(w, ts.params)
if err != nil {
continue
}
f.Add(addrs[0].ScriptAddress())
}
return f, nil
} | go | {
"resource": ""
} |
q17141 | CheckDoubleSpends | train | func (ts *TxStore) CheckDoubleSpends(argTx *wire.MsgTx) ([]*chainhash.Hash, error) {
var dubs []*chainhash.Hash // slice of all double-spent txs
argTxid := argTx.TxHash()
txs, err := ts.Txns().GetAll(true)
if err != nil {
return dubs, err
}
for _, compTx := range txs {
if compTx.Height < 0 {
continue
}
r := bytes.NewReader(compTx.Bytes)
msgTx := wire.NewMsgTx(1)
msgTx.BtcDecode(r, 1, wire.WitnessEncoding)
compTxid := msgTx.TxHash()
for _, argIn := range argTx.TxIn {
// iterate through inputs of compTx
for _, compIn := range msgTx.TxIn {
if outPointsEqual(argIn.PreviousOutPoint, compIn.PreviousOutPoint) && !compTxid.IsEqual(&argTxid) {
// found double spend
dubs = append(dubs, &compTxid)
break // back to argIn loop
}
}
}
}
return dubs, nil
} | go | {
"resource": ""
} |
q17142 | PopulateAdrs | train | func (ts *TxStore) PopulateAdrs() error {
keys := ts.keyManager.GetKeys()
ts.addrMutex.Lock()
ts.adrs = []btcutil.Address{}
for _, k := range keys {
addr, err := k.Address(ts.params)
if err != nil {
continue
}
ts.adrs = append(ts.adrs, addr)
}
ts.addrMutex.Unlock()
ts.watchedScripts, _ = ts.WatchedScripts().GetAll()
txns, _ := ts.Txns().GetAll(true)
ts.txidsMutex.Lock()
for _, t := range txns {
ts.txids[t.Txid] = t.Height
}
ts.txidsMutex.Unlock()
return nil
} | go | {
"resource": ""
} |
q17143 | inDeadZone | train | func inDeadZone(pos, size uint32) bool {
msb := nextPowerOfTwo(size)
last := size - 1 // last valid position is 1 less than size
if pos > (msb<<1)-2 { // greater than root; not even in the tree
log.Debug(" ?? greater than root ")
return true
}
h := msb
for pos >= h {
h = h>>1 | msb
last = last>>1 | msb
}
return pos > last
} | go | {
"resource": ""
} |
q17144 | MarkKeyAsUsed | train | func (km *KeyManager) MarkKeyAsUsed(scriptAddress []byte) error {
if err := km.datastore.MarkKeyAsUsed(scriptAddress); err != nil {
return err
}
return km.lookahead()
} | go | {
"resource": ""
} |
q17145 | calcRequiredWork | train | func (b *Blockchain) calcRequiredWork(header wire.BlockHeader, height int32, prevHeader StoredHeader) (uint32, error) {
// If this is not a difficulty adjustment period
if height%epochLength != 0 {
// If we are on testnet
if b.params.ReduceMinDifficulty {
// If it's been more than 20 minutes since the last header return the minimum difficulty
if header.Timestamp.After(prevHeader.header.Timestamp.Add(targetSpacing * 2)) {
return b.params.PowLimitBits, nil
} else { // Otherwise return the difficulty of the last block not using special difficulty rules
for {
var err error = nil
for err == nil && int32(prevHeader.height)%epochLength != 0 && prevHeader.header.Bits == b.params.PowLimitBits {
var sh StoredHeader
sh, err = b.db.GetPreviousHeader(prevHeader.header)
// Error should only be non-nil if prevHeader is the checkpoint.
// In that case we should just return checkpoint bits
if err == nil {
prevHeader = sh
}
}
return prevHeader.header.Bits, nil
}
}
}
// Just return the bits from the last header
return prevHeader.header.Bits, nil
}
// We are on a difficulty adjustment period so we need to correctly calculate the new difficulty.
epoch, err := b.GetEpoch()
if err != nil {
log.Error(err)
return 0, err
}
return calcDiffAdjust(*epoch, prevHeader.header, b.params), nil
} | go | {
"resource": ""
} |
q17146 | GetCommonAncestor | train | func (b *Blockchain) GetCommonAncestor(bestHeader, prevBestHeader StoredHeader) (*StoredHeader, error) {
var err error
rollback := func(parent StoredHeader, n int) (StoredHeader, error) {
for i := 0; i < n; i++ {
parent, err = b.db.GetPreviousHeader(parent.header)
if err != nil {
return parent, err
}
}
return parent, nil
}
majority := bestHeader
minority := prevBestHeader
if bestHeader.height > prevBestHeader.height {
majority, err = rollback(majority, int(bestHeader.height-prevBestHeader.height))
if err != nil {
return nil, err
}
} else if prevBestHeader.height > bestHeader.height {
minority, err = rollback(minority, int(prevBestHeader.height-bestHeader.height))
if err != nil {
return nil, err
}
}
for {
majorityHash := majority.header.BlockHash()
minorityHash := minority.header.BlockHash()
if majorityHash.IsEqual(&minorityHash) {
return &majority, nil
}
majority, err = b.db.GetPreviousHeader(majority.header)
if err != nil {
return nil, err
}
minority, err = b.db.GetPreviousHeader(minority.header)
if err != nil {
return nil, err
}
}
} | go | {
"resource": ""
} |
q17147 | Rollback | train | func (b *Blockchain) Rollback(t time.Time) error {
b.lock.Lock()
defer b.lock.Unlock()
checkpoint := GetCheckpoint(b.crationDate, b.params)
checkPointHash := checkpoint.Header.BlockHash()
sh, err := b.db.GetBestHeader()
if err != nil {
return err
}
// If t is greater than the timestamp at the tip then do nothing
if sh.header.Timestamp.Before(t) {
return nil
}
// If the tip is our checkpoint then do nothing
checkHash := sh.header.BlockHash()
if checkHash.IsEqual(&checkPointHash) {
return nil
}
rollbackHeight := uint32(0)
for i := 0; i < 1000000000; i++ {
sh, err = b.db.GetPreviousHeader(sh.header)
if err != nil {
return err
}
checkHash := sh.header.BlockHash()
// If we rolled back to the checkpoint then stop here and set the checkpoint as the tip
if checkHash.IsEqual(&checkPointHash) {
rollbackHeight = checkpoint.Height
break
}
// If we hit a header created before t then stop here and set this header as the tip
if sh.header.Timestamp.Before(t) {
rollbackHeight = sh.height
break
}
}
err = b.db.DeleteAfter(rollbackHeight)
if err != nil {
return err
}
return b.db.Put(sh, true)
} | go | {
"resource": ""
} |
q17148 | checkProofOfWork | train | func checkProofOfWork(header wire.BlockHeader, p *chaincfg.Params) bool {
target := blockchain.CompactToBig(header.Bits)
// The target must more than 0. Why can you even encode negative...
if target.Sign() <= 0 {
log.Debugf("Block target %064x is neagtive(??)\n", target.Bytes())
return false
}
// The target must be less than the maximum allowed (difficulty 1)
if target.Cmp(p.PowLimit) > 0 {
log.Debugf("Block target %064x is "+
"higher than max of %064x", target, p.PowLimit.Bytes())
return false
}
// The header hash must be less than the claimed target in the header.
blockHash := header.BlockHash()
hashNum := blockchain.HashToBig(&blockHash)
if hashNum.Cmp(target) > 0 {
log.Debugf("Block hash %064x is higher than "+
"required target of %064x", hashNum, target)
return false
}
return true
} | go | {
"resource": ""
} |
q17149 | calcDiffAdjust | train | func calcDiffAdjust(start, end wire.BlockHeader, p *chaincfg.Params) uint32 {
duration := end.Timestamp.UnixNano() - start.Timestamp.UnixNano()
if duration < minRetargetTimespan {
log.Debugf("Whoa there, block %s off-scale high 4X diff adjustment!",
end.BlockHash().String())
duration = minRetargetTimespan
} else if duration > maxRetargetTimespan {
log.Debugf("Uh-oh! block %s off-scale low 0.25X diff adjustment!\n",
end.BlockHash().String())
duration = maxRetargetTimespan
}
// calculation of new 32-byte difficulty target
// first turn the previous target into a big int
prevTarget := blockchain.CompactToBig(end.Bits)
// new target is old * duration...
newTarget := new(big.Int).Mul(prevTarget, big.NewInt(duration))
// divided by 2 weeks
newTarget.Div(newTarget, big.NewInt(int64(targetTimespan)))
// clip again if above minimum target (too easy)
if newTarget.Cmp(p.PowLimit) > 0 {
newTarget.Set(p.PowLimit)
}
return blockchain.BigToCompact(newTarget)
} | go | {
"resource": ""
} |
q17150 | provision | train | func provision(baseDirectoryPath string, fnA RestoreAssets, fnP CustomProvision) (err error) {
// Provision resources
// TODO Handle upgrades and therefore removing the resources folder accordingly
var pr = filepath.Join(baseDirectoryPath, "resources")
if _, err = os.Stat(pr); os.IsNotExist(err) {
// Restore assets
astilog.Debugf("Restoring assets in %s", baseDirectoryPath)
if err = fnA(baseDirectoryPath, "resources"); err != nil {
err = errors.Wrapf(err, "restoring assets in %s failed", baseDirectoryPath)
return
}
} else if err != nil {
err = errors.Wrapf(err, "stating %s failed", pr)
return
} else {
astilog.Debugf("%s already exists, skipping restoring assets...", pr)
}
// Custom provision
if fnP != nil {
if err = fnP(baseDirectoryPath); err != nil {
err = errors.Wrap(err, "custom provisioning failed")
return
}
}
return
} | go | {
"resource": ""
} |
q17151 | getNewAddress | train | func (pm *PeerManager) getNewAddress() (net.Addr, error) {
// If we have a trusted peer we'll just return it
if pm.trustedPeer == nil {
pm.peerMutex.Lock()
defer pm.peerMutex.Unlock()
// We're going to loop here and pull addresses from the addrManager until we get one that we
// are not currently connect to or haven't recently tried.
loop:
for tries := 0; tries < 100; tries++ {
ka := pm.addrManager.GetAddress()
if ka == nil {
continue
}
// only allow recent nodes (10mins) after we failed 30
// times
if tries < 30 && time.Since(ka.LastAttempt()) < 10*time.Minute {
continue
}
// allow nondefault ports after 50 failed tries.
if tries < 50 && fmt.Sprintf("%d", ka.NetAddress().Port) != pm.peerConfig.ChainParams.DefaultPort {
continue
}
knownAddress := ka.NetAddress()
// Don't return addresses we're still connected to
for _, p := range pm.connectedPeers {
if p.NA().IP.String() == knownAddress.IP.String() {
continue loop
}
}
addr := &net.TCPAddr{
Port: int(knownAddress.Port),
IP: knownAddress.IP,
}
pm.addrManager.Attempt(knownAddress)
return addr, nil
}
return nil, errors.New("failed to find appropriate address to return")
} else {
return pm.trustedPeer, nil
}
} | go | {
"resource": ""
} |
q17152 | queryDNSSeeds | train | func (pm *PeerManager) queryDNSSeeds() {
wg := new(sync.WaitGroup)
for _, seed := range pm.peerConfig.ChainParams.DNSSeeds {
wg.Add(1)
go func(host string) {
returnedAddresses := 0
var addrs []string
var err error
if pm.proxy != nil {
for i := 0; i < 5; i++ {
ips, err := TorLookupIP(host)
if err != nil {
wg.Done()
return
}
for _, ip := range ips {
addrs = append(addrs, ip.String())
}
}
} else {
addrs, err = net.LookupHost(host)
if err != nil {
wg.Done()
return
}
}
for _, addr := range addrs {
netAddr := wire.NewNetAddressIPPort(net.ParseIP(addr), defaultPort, 0)
pm.addrManager.AddAddress(netAddr, pm.sourceAddr)
returnedAddresses++
}
log.Debugf("%s returned %s addresses\n", host, strconv.Itoa(returnedAddresses))
wg.Done()
}(seed.Host)
}
wg.Wait()
} | go | {
"resource": ""
} |
q17153 | getMoreAddresses | train | func (pm *PeerManager) getMoreAddresses() {
if pm.addrManager.NeedMoreAddresses() {
pm.peerMutex.RLock()
defer pm.peerMutex.RUnlock()
if len(pm.connectedPeers) > 0 {
log.Debug("Querying peers for more addresses")
for _, p := range pm.connectedPeers {
p.QueueMessage(wire.NewMsgGetAddr(), nil)
}
} else {
pm.queryDNSSeeds()
}
}
} | go | {
"resource": ""
} |
q17154 | geomFromPtr | train | func geomFromPtr(ptr *C.GEOSGeometry) *Geometry {
g := &Geometry{g: ptr}
runtime.SetFinalizer(g, func(g *Geometry) {
cGEOSGeom_destroy(ptr)
})
return g
} | go | {
"resource": ""
} |
q17155 | geomFromPtrUnowned | train | func geomFromPtrUnowned(ptr *C.GEOSGeometry) (*Geometry, error) {
if ptr == nil {
return nil, Error()
}
return &Geometry{g: ptr}, nil
} | go | {
"resource": ""
} |
q17156 | Project | train | func (g *Geometry) Project(p *Geometry) float64 {
// XXX: error if wrong geometry types
return float64(cGEOSProject(g.g, p.g))
} | go | {
"resource": ""
} |
q17157 | ProjectNormalized | train | func (g *Geometry) ProjectNormalized(p *Geometry) float64 {
// XXX: error if wrong geometry types
return float64(cGEOSProjectNormalized(g.g, p.g))
} | go | {
"resource": ""
} |
q17158 | Interpolate | train | func (g *Geometry) Interpolate(dist float64) (*Geometry, error) {
return geomFromC("Interpolate", cGEOSInterpolate(g.g, C.double(dist)))
} | go | {
"resource": ""
} |
q17159 | LineInterpolatePoint | train | func (g *Geometry) LineInterpolatePoint(dist float64) (*Geometry, error) {
// This code ported from LWGEOM_line_interpolate_point in postgis,
// by jsunday@rochgrp.com and strk@refractions.net.
if dist < 0 || dist > 1 {
return nil, ErrLineInterpolatePointDist
}
typ, err := g.Type()
if err != nil {
return nil, err
}
if typ != LINESTRING {
return nil, ErrLineInterpolatePointType
}
empty, err := g.IsEmpty()
if err != nil {
return nil, err
}
if empty {
pt, err := NewPoint()
if err != nil {
return nil, err
}
return pt, nil
}
// If distance is one of two extremes, return the point on that end.
if dist == 0.0 || dist == 1.0 {
var (
pt *Geometry
err error
)
if dist == 0.0 {
pt, err = g.StartPoint()
} else {
pt, err = g.EndPoint()
}
if err != nil {
return nil, err
}
return pt, nil
}
// Interpolate a point on the line.
nsegs, err := g.NPoint()
if err != nil {
return nil, err
}
nsegs--
length, err := g.Length()
if err != nil {
return nil, err
}
var tlength float64
for i := 0; i < nsegs; i++ {
a, err := g.Point(i)
if err != nil {
return nil, err
}
b, err := g.Point(i + 1)
if err != nil {
return nil, err
}
// Find the relative length of this segment.
slength, err := a.Distance(b)
if err != nil {
return nil, err
}
slength /= length
if dist < tlength+slength {
dseg := (dist - tlength) / slength
pt, err := interpolatePoint2D(a, b, dseg)
if err != nil {
return nil, err
}
return pt, nil
}
tlength += slength
}
// Return the last point on the line. This shouldn't happen, but could if
// there's some floating point rounding errors.
return g.EndPoint()
} | go | {
"resource": ""
} |
q17160 | OffsetCurve | train | func (g *Geometry) OffsetCurve(distance float64, opts BufferOpts) (*Geometry, error) {
return geomFromC("OffsetCurve", cGEOSOffsetCurve(g.g, C.double(distance), C.int(opts.QuadSegs), C.int(opts.JoinStyle), C.double(opts.MitreLimit)))
} | go | {
"resource": ""
} |
q17161 | SimplifyP | train | func (g *Geometry) SimplifyP(tolerance float64) (*Geometry, error) {
return g.simplify("simplify", cGEOSTopologyPreserveSimplify, tolerance)
} | go | {
"resource": ""
} |
q17162 | Snap | train | func (g *Geometry) Snap(other *Geometry, tolerance float64) (*Geometry, error) {
return geomFromC("Snap", cGEOSSnap(g.g, other.g, C.double(tolerance)))
} | go | {
"resource": ""
} |
q17163 | Intersection | train | func (g *Geometry) Intersection(other *Geometry) (*Geometry, error) {
return g.binaryTopo("Intersection", cGEOSIntersection, other)
} | go | {
"resource": ""
} |
q17164 | Difference | train | func (g *Geometry) Difference(other *Geometry) (*Geometry, error) {
return g.binaryTopo("Difference", cGEOSDifference, other)
} | go | {
"resource": ""
} |
q17165 | SymDifference | train | func (g *Geometry) SymDifference(other *Geometry) (*Geometry, error) {
return g.binaryTopo("SymDifference", cGEOSSymDifference, other)
} | go | {
"resource": ""
} |
q17166 | Union | train | func (g *Geometry) Union(other *Geometry) (*Geometry, error) {
return g.binaryTopo("Union", cGEOSUnion, other)
} | go | {
"resource": ""
} |
q17167 | Disjoint | train | func (g *Geometry) Disjoint(other *Geometry) (bool, error) {
return g.binaryPred("Disjoint", cGEOSDisjoint, other)
} | go | {
"resource": ""
} |
q17168 | Touches | train | func (g *Geometry) Touches(other *Geometry) (bool, error) {
return g.binaryPred("Touches", cGEOSTouches, other)
} | go | {
"resource": ""
} |
q17169 | Intersects | train | func (g *Geometry) Intersects(other *Geometry) (bool, error) {
return g.binaryPred("Intersects", cGEOSIntersects, other)
} | go | {
"resource": ""
} |
q17170 | Crosses | train | func (g *Geometry) Crosses(other *Geometry) (bool, error) {
return g.binaryPred("Crosses", cGEOSCrosses, other)
} | go | {
"resource": ""
} |
q17171 | Within | train | func (g *Geometry) Within(other *Geometry) (bool, error) {
return g.binaryPred("Within", cGEOSWithin, other)
} | go | {
"resource": ""
} |
q17172 | Contains | train | func (g *Geometry) Contains(other *Geometry) (bool, error) {
return g.binaryPred("Contains", cGEOSContains, other)
} | go | {
"resource": ""
} |
q17173 | Overlaps | train | func (g *Geometry) Overlaps(other *Geometry) (bool, error) {
return g.binaryPred("Overlaps", cGEOSOverlaps, other)
} | go | {
"resource": ""
} |
q17174 | Equals | train | func (g *Geometry) Equals(other *Geometry) (bool, error) {
return g.binaryPred("Equals", cGEOSEquals, other)
} | go | {
"resource": ""
} |
q17175 | Covers | train | func (g *Geometry) Covers(other *Geometry) (bool, error) {
return g.binaryPred("Covers", cGEOSCovers, other)
} | go | {
"resource": ""
} |
q17176 | CoveredBy | train | func (g *Geometry) CoveredBy(other *Geometry) (bool, error) {
return g.binaryPred("CoveredBy", cGEOSCoveredBy, other)
} | go | {
"resource": ""
} |
q17177 | EqualsExact | train | func (g *Geometry) EqualsExact(other *Geometry, tolerance float64) (bool, error) {
return boolFromC("EqualsExact", cGEOSEqualsExact(g.g, other.g, C.double(tolerance)))
} | go | {
"resource": ""
} |
q17178 | Type | train | func (g *Geometry) Type() (GeometryType, error) {
i := cGEOSGeomTypeId(g.g)
if i == -1 {
// XXX: error
return -1, Error()
}
return cGeomTypeIds[i], nil
} | go | {
"resource": ""
} |
q17179 | SetSRID | train | func (g *Geometry) SetSRID(srid int) {
cGEOSSetSRID(g.g, C.int(srid))
} | go | {
"resource": ""
} |
q17180 | Coords | train | func (g *Geometry) Coords() ([]Coord, error) {
ptr := cGEOSGeom_getCoordSeq(g.g)
if ptr == nil {
return nil, Error()
}
//cs := coordSeqFromPtr(ptr)
cs := &coordSeq{c: ptr}
return coordSlice(cs)
} | go | {
"resource": ""
} |
q17181 | Point | train | func (g *Geometry) Point(n int) (*Geometry, error) {
return geomFromC("Point", cGEOSGeomGetPointN(g.g, C.int(n)))
} | go | {
"resource": ""
} |
q17182 | Distance | train | func (g *Geometry) Distance(other *Geometry) (float64, error) {
return g.binaryFloat("Distance", cGEOSDistance, other)
} | go | {
"resource": ""
} |
q17183 | RelatePat | train | func (g *Geometry) RelatePat(other *Geometry, pat string) (bool, error) {
cs := C.CString(pat)
defer C.free(unsafe.Pointer(cs))
return boolFromC("RelatePat", cGEOSRelatePattern(g.g, other.g, cs))
} | go | {
"resource": ""
} |
q17184 | coordSlice | train | func coordSlice(cs *coordSeq) ([]Coord, error) {
size, err := cs.size()
if err != nil {
return nil, err
}
coords := make([]Coord, size)
for i := 0; i < size; i++ {
x, err := cs.x(i)
if err != nil {
return nil, err
}
y, err := cs.y(i)
if err != nil {
return nil, err
}
coords[i] = Coord{X: x, Y: y}
}
return coords, nil
} | go | {
"resource": ""
} |
q17185 | newWktDecoder | train | func newWktDecoder() *wktDecoder {
r := cGEOSWKTReader_create()
if r == nil {
return nil
}
d := &wktDecoder{r}
runtime.SetFinalizer(d, (*wktDecoder).destroy)
return d
} | go | {
"resource": ""
} |
q17186 | decode | train | func (d *wktDecoder) decode(wkt string) (*Geometry, error) {
cstr := C.CString(wkt)
defer C.free(unsafe.Pointer(cstr))
g := cGEOSWKTReader_read(d.r, cstr)
if g == nil {
return nil, Error()
}
return geomFromPtr(g), nil
} | go | {
"resource": ""
} |
q17187 | encode | train | func (e *wktEncoder) encode(g *Geometry) (string, error) {
cstr := cGEOSWKTWriter_write(e.w, g.g)
if cstr == nil {
return "", Error()
}
return C.GoString(cstr), nil
} | go | {
"resource": ""
} |
q17188 | PrepareGeometry | train | func PrepareGeometry(g *Geometry) *PGeometry {
ptr := cGEOSPrepare(g.g)
p := &PGeometry{ptr}
runtime.SetFinalizer(p, (*PGeometry).destroy)
return p
} | go | {
"resource": ""
} |
q17189 | Contains | train | func (p *PGeometry) Contains(other *Geometry) (bool, error) {
return p.predicate("contains", cGEOSPreparedContains, other)
} | go | {
"resource": ""
} |
q17190 | ContainsP | train | func (p *PGeometry) ContainsP(other *Geometry) (bool, error) {
return p.predicate("contains", cGEOSPreparedContainsProperly, other)
} | go | {
"resource": ""
} |
q17191 | CoveredBy | train | func (p *PGeometry) CoveredBy(other *Geometry) (bool, error) {
return p.predicate("covered by", cGEOSPreparedCoveredBy, other)
} | go | {
"resource": ""
} |
q17192 | Covers | train | func (p *PGeometry) Covers(other *Geometry) (bool, error) {
return p.predicate("covers", cGEOSPreparedCovers, other)
} | go | {
"resource": ""
} |
q17193 | Crosses | train | func (p *PGeometry) Crosses(other *Geometry) (bool, error) {
return p.predicate("crosses", cGEOSPreparedCrosses, other)
} | go | {
"resource": ""
} |
q17194 | Disjoint | train | func (p *PGeometry) Disjoint(other *Geometry) (bool, error) {
return p.predicate("disjoint", cGEOSPreparedDisjoint, other)
} | go | {
"resource": ""
} |
q17195 | Intersects | train | func (p *PGeometry) Intersects(other *Geometry) (bool, error) {
return p.predicate("intersects", cGEOSPreparedIntersects, other)
} | go | {
"resource": ""
} |
q17196 | Overlaps | train | func (p *PGeometry) Overlaps(other *Geometry) (bool, error) {
return p.predicate("overlaps", cGEOSPreparedOverlaps, other)
} | go | {
"resource": ""
} |
q17197 | Touches | train | func (p *PGeometry) Touches(other *Geometry) (bool, error) {
return p.predicate("touches", cGEOSPreparedTouches, other)
} | go | {
"resource": ""
} |
q17198 | Within | train | func (p *PGeometry) Within(other *Geometry) (bool, error) {
return p.predicate("within", cGEOSPreparedWithin, other)
} | go | {
"resource": ""
} |
q17199 | Zadd | train | func (client *Client) Zadd(key string, value []byte, score float64) (bool, error) {
res, err := client.sendCommand("ZADD", key, strconv.FormatFloat(score, 'f', -1, 64), string(value))
if err != nil {
return false, err
}
return res.(int64) == 1, nil
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.