File size: 3,347 Bytes
e36aeda | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build mips || mipsle
// Export some functions via linkname to assembly in sync/atomic.
//
//go:linkname Xadd64
//go:linkname Xchg64
//go:linkname Cas64
//go:linkname Load64
//go:linkname Store64
//go:linkname Or64
//go:linkname And64
package atomic
import (
"internal/cpu"
"unsafe"
)
// TODO implement lock striping
var lock struct {
state uint32
pad [cpu.CacheLinePadSize - 4]byte
}
//go:noescape
func spinLock(state *uint32)
//go:noescape
func spinUnlock(state *uint32)
//go:nosplit
func lockAndCheck(addr *uint64) {
// ensure 8-byte alignment
if uintptr(unsafe.Pointer(addr))&7 != 0 {
panicUnaligned()
}
// force dereference before taking lock
_ = *addr
spinLock(&lock.state)
}
//go:nosplit
func unlock() {
spinUnlock(&lock.state)
}
//go:nosplit
func Xadd64(addr *uint64, delta int64) (new uint64) {
lockAndCheck(addr)
new = *addr + uint64(delta)
*addr = new
unlock()
return
}
//go:nosplit
func Xchg64(addr *uint64, new uint64) (old uint64) {
lockAndCheck(addr)
old = *addr
*addr = new
unlock()
return
}
//go:nosplit
func Cas64(addr *uint64, old, new uint64) (swapped bool) {
lockAndCheck(addr)
if (*addr) == old {
*addr = new
unlock()
return true
}
unlock()
return false
}
//go:nosplit
func Load64(addr *uint64) (val uint64) {
lockAndCheck(addr)
val = *addr
unlock()
return
}
//go:nosplit
func Store64(addr *uint64, val uint64) {
lockAndCheck(addr)
*addr = val
unlock()
return
}
//go:nosplit
func Or64(addr *uint64, val uint64) (old uint64) {
for {
old = *addr
if Cas64(addr, old, old|val) {
return old
}
}
}
//go:nosplit
func And64(addr *uint64, val uint64) (old uint64) {
for {
old = *addr
if Cas64(addr, old, old&val) {
return old
}
}
}
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func Load(ptr *uint32) uint32
//go:noescape
func Load8(ptr *uint8) uint8
// NO go:noescape annotation; *ptr escapes if result escapes (#31525)
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
//go:noescape
func LoadAcq(ptr *uint32) uint32
//go:noescape
func LoadAcquintptr(ptr *uintptr) uintptr
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func Or8(ptr *uint8, val uint8)
//go:noescape
func And(ptr *uint32, val uint32)
//go:noescape
func Or(ptr *uint32, val uint32)
//go:noescape
func And32(ptr *uint32, val uint32) uint32
//go:noescape
func Or32(ptr *uint32, val uint32) uint32
//go:noescape
func Anduintptr(ptr *uintptr, val uintptr) uintptr
//go:noescape
func Oruintptr(ptr *uintptr, val uintptr) uintptr
//go:noescape
func Store(ptr *uint32, val uint32)
//go:noescape
func Store8(ptr *uint8, val uint8)
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
//go:noescape
func StoreRel(ptr *uint32, val uint32)
//go:noescape
func StoreReluintptr(ptr *uintptr, val uintptr)
//go:noescape
func CasRel(addr *uint32, old, new uint32) bool
|