_id
stringlengths 2
7
| title
stringlengths 1
118
| partition
stringclasses 3
values | text
stringlengths 52
85.5k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5300
|
ListContainers
|
train
|
func (m *containerManager) ListContainers() ([]instances.Instance, error) {
containers, err := m.server.FilterContainers(m.namespace.Prefix())
if err != nil {
return nil, errors.Trace(err)
}
var result []instances.Instance
for _, i := range containers {
result = append(result, &lxdInstance{i.Name, m.server.ContainerServer})
}
return result, nil
}
|
go
|
{
"resource": ""
}
|
q5301
|
getContainerSpec
|
train
|
func (m *containerManager) getContainerSpec(
instanceConfig *instancecfg.InstanceConfig,
cons constraints.Value,
series string,
networkConfig *container.NetworkConfig,
storageConfig *container.StorageConfig,
callback environs.StatusCallbackFunc,
) (ContainerSpec, error) {
imageSources, err := m.getImageSources()
if err != nil {
return ContainerSpec{}, errors.Trace(err)
}
// Lock around finding an image.
// The provisioner works concurrently to create containers.
// If an image needs to be copied from a remote, we don't want many
// goroutines attempting to do it at once.
m.imageMutex.Lock()
found, err := m.server.FindImage(series, jujuarch.HostArch(), imageSources, true, callback)
m.imageMutex.Unlock()
if err != nil {
return ContainerSpec{}, errors.Annotatef(err, "acquiring LXD image")
}
name, err := m.namespace.Hostname(instanceConfig.MachineId)
if err != nil {
return ContainerSpec{}, errors.Trace(err)
}
nics, unknown, err := m.networkDevicesFromConfig(networkConfig)
if err != nil {
return ContainerSpec{}, errors.Trace(err)
}
logger.Debugf("configuring container %q with network devices: %v", name, nics)
// If the default LXD bridge was supplied in network config,
// but without a CIDR, attempt to ensure it is configured for IPv4.
// If there are others with incomplete info, log a warning.
if len(unknown) > 0 {
if len(unknown) == 1 && unknown[0] == network.DefaultLXDBridge && m.server.networkAPISupport {
mod, err := m.server.EnsureIPv4(network.DefaultLXDBridge)
if err != nil {
return ContainerSpec{}, errors.Annotate(err, "ensuring default bridge IPv4 config")
}
if mod {
logger.Infof(`added "auto" IPv4 configuration to default LXD bridge`)
}
} else {
logger.Warningf("no CIDR was detected for the following networks: %v", unknown)
}
}
// If there was no incoming interface info, then at this point we know
// that nics were generated by falling back to either a single "eth0",
// or devices from the profile.
// Ensure that the devices are represented in the cloud-init user-data.
if len(networkConfig.Interfaces) == 0 {
interfaces, err := InterfaceInfoFromDevices(nics)
if err != nil {
return ContainerSpec{}, errors.Trace(err)
}
networkConfig.Interfaces = interfaces
}
// CloudInitUserData creates our own ENI/netplan.
// We need to disable cloud-init networking to make it work.
userData, err := containerinit.CloudInitUserData(instanceConfig, networkConfig)
if err != nil {
return ContainerSpec{}, errors.Trace(err)
}
cfg := map[string]string{
UserDataKey: string(userData),
NetworkConfigKey: cloudinit.CloudInitNetworkConfigDisabled,
AutoStartKey: "true",
// Extra info to indicate the origin of this container.
JujuModelKey: m.modelUUID,
}
spec := ContainerSpec{
Name: name,
Image: found,
Config: cfg,
Profiles: instanceConfig.Profiles,
Devices: nics,
}
spec.ApplyConstraints(m.server.serverVersion, cons)
return spec, nil
}
|
go
|
{
"resource": ""
}
|
q5302
|
getImageSources
|
train
|
func (m *containerManager) getImageSources() ([]ServerSpec, error) {
imURL := m.imageMetadataURL
// Unless the configuration explicitly requests the daily stream,
// an empty image metadata URL results in a search of the default sources.
if imURL == "" && m.imageStream != "daily" {
logger.Debugf("checking default image metadata sources")
return []ServerSpec{CloudImagesRemote, CloudImagesDailyRemote}, nil
}
// Otherwise only check the daily stream.
if imURL == "" {
return []ServerSpec{CloudImagesDailyRemote}, nil
}
imURL, err := imagemetadata.ImageMetadataURL(imURL, m.imageStream)
if err != nil {
return nil, errors.Annotatef(err, "generating image metadata source")
}
imURL = EnsureHTTPS(imURL)
remote := ServerSpec{
Name: strings.Replace(imURL, "https://", "", 1),
Host: imURL,
Protocol: SimpleStreamsProtocol,
}
// If the daily stream was configured with custom image metadata URL,
// only use the Ubuntu daily as a fallback.
if m.imageStream == "daily" {
return []ServerSpec{remote, CloudImagesDailyRemote}, nil
}
return []ServerSpec{remote, CloudImagesRemote, CloudImagesDailyRemote}, nil
}
|
go
|
{
"resource": ""
}
|
q5303
|
networkDevicesFromConfig
|
train
|
func (m *containerManager) networkDevicesFromConfig(netConfig *container.NetworkConfig) (map[string]device, []string, error) {
if len(netConfig.Interfaces) > 0 {
return DevicesFromInterfaceInfo(netConfig.Interfaces)
} else if netConfig.Device != "" {
return map[string]device{
"eth0": newNICDevice("eth0", netConfig.Device, network.GenerateVirtualMACAddress(), netConfig.MTU),
}, nil, nil
}
nics, err := m.server.GetNICsFromProfile(lxdDefaultProfileName)
return nics, nil, errors.Trace(err)
}
|
go
|
{
"resource": ""
}
|
q5304
|
LXDProfileNames
|
train
|
func (m *containerManager) LXDProfileNames(containerName string) ([]string, error) {
return m.server.GetContainerProfiles(containerName)
}
|
go
|
{
"resource": ""
}
|
q5305
|
Map
|
train
|
func (i *injector) Map(val interface{}) TypeMapper {
i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
return i
}
|
go
|
{
"resource": ""
}
|
q5306
|
Set
|
train
|
func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
i.values[typ] = val
return i
}
|
go
|
{
"resource": ""
}
|
q5307
|
Levels
|
train
|
func (p *SLSHook) Levels() []logrus.Level {
if p.AcceptedLevels == nil {
return allLevels
}
return p.AcceptedLevels
}
|
go
|
{
"resource": ""
}
|
q5308
|
newFileWriter
|
train
|
func newFileWriter() *fileLogWriter {
w := &fileLogWriter{
Daily: true,
MaxDays: 7,
Rotate: true,
RotatePerm: "0440",
Level: LevelDebug,
Perm: "0660",
}
return w
}
|
go
|
{
"resource": ""
}
|
q5309
|
WriteMsg
|
train
|
func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
if level > w.Level {
return nil
}
h, d := formatTimeHeader(when)
msg = string(h) + msg + "\n"
if w.Rotate {
w.RLock()
if w.needRotate(len(msg), d) {
w.RUnlock()
w.Lock()
if w.needRotate(len(msg), d) {
if err := w.doRotate(when); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
}
}
w.Unlock()
} else {
w.RUnlock()
}
}
w.Lock()
_, err := w.fileWriter.Write([]byte(msg))
if err == nil {
w.maxLinesCurLines++
w.maxSizeCurSize += len(msg)
}
w.Unlock()
return err
}
|
go
|
{
"resource": ""
}
|
q5310
|
Log10
|
train
|
func Log10(z, x *decimal.Big) *decimal.Big {
if logSpecials(z, x) {
return z
}
// If x is a power of 10 the result is the exponent and exact.
var tpow bool
if m, u := decimal.Raw(x); *m != c.Inflated {
tpow = arith.PowOfTen(*m)
} else {
tpow = arith.PowOfTenBig(u)
}
if tpow {
ctx := decimal.Context{Precision: precision(z)}
return ctx.Set(z, z.SetMantScale(int64(adjusted(x)), 0))
}
return log(z, x, true)
}
|
go
|
{
"resource": ""
}
|
q5311
|
Log
|
train
|
func Log(z, x *decimal.Big) *decimal.Big {
if logSpecials(z, x) {
return z
}
if x.IsInt() {
if v, ok := x.Uint64(); ok {
switch v {
case 1:
// ln 1 = 0
return z.SetMantScale(0, 0)
case 10:
// Specialized function.
return ln10(z, precision(z))
}
}
}
return log(z, x, false)
}
|
go
|
{
"resource": ""
}
|
q5312
|
Value
|
train
|
func (d *Decimal) Value() (driver.Value, error) {
if d.V == nil {
if d.Zero {
return "0", nil
}
return nil, nil
}
v := d.V
if v.IsNaN(0) {
return "NaN", nil
}
if v.IsInf(0) {
return nil, errors.New("Decimal.Value: DECIMAL does not accept Infinities")
}
dl := v.Precision() // length of d
sl := int(v.Scale()) // length of fractional part
if il := dl - sl; il > MaxIntegralDigits {
if !d.Round {
return nil, &LengthError{Part: "integral", N: il, max: MaxIntegralDigits}
}
// Rounding down the integral part automatically chops off the fractional
// part.
return v.Round(MaxIntegralDigits).String(), nil
}
if sl > MaxFractionalDigits {
if !d.Round {
return nil, &LengthError{Part: "fractional", N: sl, max: MaxFractionalDigits}
}
v.Round(dl - (sl - MaxFractionalDigits))
}
return v.String(), nil
}
|
go
|
{
"resource": ""
}
|
q5313
|
Length
|
train
|
func Length(x uint64) int {
if x < 10 {
return 1
}
// From https://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
r := int((bits.Len64(x) * 1233) >> 12)
if p, _ := Pow10(uint64(r)); x < p {
return r
}
return r + 1
}
|
go
|
{
"resource": ""
}
|
q5314
|
BigLength
|
train
|
func BigLength(x *big.Int) int {
if x.Sign() == 0 {
return 1
}
var (
m uint64
nb = uint64(x.BitLen())
)
// overflowCutoff is the largest number where N * 0x268826A1 <= 1<<63 - 1
const overflowCutoff = 14267572532
if nb > overflowCutoff {
// Given the identity ``log_n a + log_n b = log_n a*b''
// and ``(1<<63 - 1) / overflowCutoff < overFlowCutoff''
// we can break nb into two factors: overflowCutoff and X.
// overflowCutoff / log10(2)
m = 1<<32 - 1
nb = (nb / overflowCutoff) + (nb % overflowCutoff)
}
// 0x268826A1/2^31 is an approximation of log10(2). See ilog10.
// The more accurate approximation 0x268826A13EF3FE08/2^63 overflows.
m += ((nb + 1) * 0x268826A1) >> 31
if x.CmpAbs(BigPow10(m)) < 0 {
return int(m)
}
return int(m + 1)
}
|
go
|
{
"resource": ""
}
|
q5315
|
Abs
|
train
|
func Abs(x int64) uint64 {
m := x >> 63
return uint64((x ^ m) - m)
}
|
go
|
{
"resource": ""
}
|
q5316
|
CmpBits
|
train
|
func CmpBits(x, y []big.Word) (r int) {
// Copied from math/big.nat.go
m := len(x)
n := len(y)
if m != n || m == 0 {
switch {
case m < n:
r = -1
case m > n:
r = 1
}
return
}
i := m - 1
for i > 0 && x[i] == y[i] {
i--
}
switch {
case x[i] < y[i]:
r = -1
case x[i] > y[i]:
r = 1
}
return
}
|
go
|
{
"resource": ""
}
|
q5317
|
Words
|
train
|
func Words(x uint64) []big.Word {
if bits.UintSize == 32 {
return []big.Word{big.Word(x), big.Word(x >> 32)}
}
return []big.Word{big.Word(x)}
}
|
go
|
{
"resource": ""
}
|
q5318
|
Add
|
train
|
func Add(z, x *big.Int, y uint64) *big.Int {
zw := z.Bits()
switch xw := x.Bits(); {
default:
zw = add(zw, xw, big.Word(y))
case len(xw) == 0:
zw = setW(zw, big.Word(y))
case y == 0:
zw = set(zw, xw)
}
return z.SetBits(zw)
}
|
go
|
{
"resource": ""
}
|
q5319
|
Sub
|
train
|
func Sub(z, x *big.Int, y uint64) *big.Int {
zw := z.Bits()
switch xw := x.Bits(); {
default:
zw = sub(zw, xw, big.Word(y))
case y == 0:
zw = set(zw, xw)
case len(xw) == 0:
panic("underflow")
}
return z.SetBits(zw)
}
|
go
|
{
"resource": ""
}
|
q5320
|
Set
|
train
|
func Set(z *big.Int, z1, z0 uint64) *big.Int {
ww := makeWord(z.Bits(), 128/bits.UintSize)
if bits.UintSize == 32 {
ww[3] = big.Word(z1 >> 32)
ww[2] = big.Word(z1)
ww[1] = big.Word(z0 >> 32)
ww[0] = big.Word(z0)
} else {
ww[1] = big.Word(z1)
ww[0] = big.Word(z0)
}
return z.SetBits(ww)
}
|
go
|
{
"resource": ""
}
|
q5321
|
add
|
train
|
func add(z, x []big.Word, y big.Word) []big.Word {
m := len(x)
const n = 1
// m > 0 && y > 0
z = makeWord(z, m+1)
var c big.Word
// addVV(z[0:m], x, y) but WW since len(y) == 1
c, z[0] = addWW(x[0], y, 0)
if m > n {
c = addVW(z[n:m], x[n:], c)
}
z[m] = c
return norm(z)
}
|
go
|
{
"resource": ""
}
|
q5322
|
sub
|
train
|
func sub(z, x []big.Word, y big.Word) []big.Word {
m := len(x)
const n = 1
// m > 0 && y > 0
z = makeWord(z, m)
// subVV(z[0:m], x, y) but WW since len(y) == 1
var c big.Word
c, z[0] = subWW(x[0], y, 0)
if m > n {
c = subVW(z[n:], x[n:], c)
}
if c != 0 {
panic("underflow")
}
return norm(z)
}
|
go
|
{
"resource": ""
}
|
q5323
|
addVW
|
train
|
func addVW(z, x []big.Word, y big.Word) (c big.Word) {
c = y
for i, xi := range x[:len(z)] {
zi := xi + c
z[i] = zi
c = xi &^ zi >> (bits.UintSize - 1)
}
return c
}
|
go
|
{
"resource": ""
}
|
q5324
|
E
|
train
|
func E(z *decimal.Big) *decimal.Big {
ctx := decimal.Context{Precision: precision(z)}
if ctx.Precision <= constPrec {
return ctx.Set(z, _E)
}
ctx.Precision += 5
var (
sum = z.SetUint64(2)
fac = new(decimal.Big).SetUint64(1)
term = new(decimal.Big)
prev = new(decimal.Big)
)
for i := uint64(2); sum.Cmp(prev) != 0; i++ {
// Use term as our intermediate storage for our factorial. SetUint64
// should be marginally faster than ctx.Add(incr, incr, one), but either
// the costly call to Quo makes it difficult to notice.
term.SetUint64(i)
ctx.Mul(fac, fac, term)
ctx.Quo(term, one, fac)
prev.Copy(sum)
ctx.Add(sum, sum, term)
}
ctx.Precision -= 5
return ctx.Set(z, sum)
}
|
go
|
{
"resource": ""
}
|
q5325
|
Pi
|
train
|
func Pi(z *decimal.Big) *decimal.Big {
return pi(z, decimal.Context{Precision: precision(z)})
}
|
go
|
{
"resource": ""
}
|
q5326
|
pi
|
train
|
func pi(z *decimal.Big, ctx decimal.Context) *decimal.Big {
if ctx.Precision <= constPrec {
return ctx.Set(z, _Pi)
}
var (
lasts = new(decimal.Big)
t = new(decimal.Big).SetUint64(3)
s = z.SetUint64(3)
n = new(decimal.Big).SetUint64(1)
na = new(decimal.Big)
d = new(decimal.Big)
da = new(decimal.Big).SetUint64(24)
)
for s.Cmp(lasts) != 0 {
lasts.Copy(s)
ctx.Add(n, n, na)
ctx.Add(na, na, eight)
ctx.Add(d, d, da)
ctx.Add(da, da, thirtyTwo)
ctx.Mul(t, t, n)
ctx.Quo(t, t, d)
ctx.Add(s, s, t)
}
return ctx.Round(z) // z == s
}
|
go
|
{
"resource": ""
}
|
q5327
|
PowOfTenBig
|
train
|
func PowOfTenBig(x *big.Int) bool {
if x.Bit(0) != 0 {
return x.Cmp(c.OneInt) == 0
}
if x.Sign() == 0 {
return true
}
q := new(big.Int).Set(x)
r := new(big.Int)
for len := BigLength(x); len > 20; len-- {
q.QuoRem(q, c.TenInt, r)
if r.Sign() != 0 {
return false
}
}
return PowOfTen(q.Uint64())
}
|
go
|
{
"resource": ""
}
|
q5328
|
Floor
|
train
|
func Floor(z, x *decimal.Big) *decimal.Big {
if z.CheckNaNs(x, nil) {
return z
}
ctx := z.Context
ctx.RoundingMode = decimal.ToNegativeInf
return ctx.RoundToInt(z.Copy(x))
}
|
go
|
{
"resource": ""
}
|
q5329
|
Ceil
|
train
|
func Ceil(z, x *decimal.Big) *decimal.Big {
// ceil(x) = -floor(-x)
return z.Neg(Floor(z, misc.CopyNeg(z, x)))
}
|
go
|
{
"resource": ""
}
|
q5330
|
allZeros
|
train
|
func allZeros(b []byte) bool {
for _, c := range b {
if c != '0' {
return false
}
}
return true
}
|
go
|
{
"resource": ""
}
|
q5331
|
formatCompact
|
train
|
func formatCompact(x uint64) []byte {
var b [20]byte
return strconv.AppendUint(b[0:0], uint64(x), 10)
}
|
go
|
{
"resource": ""
}
|
q5332
|
formatSci
|
train
|
func (f *formatter) formatSci(b []byte, adj int, e byte) {
f.WriteByte(b[0])
if len(b) > 1 {
f.WriteByte('.')
f.Write(b[1:])
}
// If negative, the call to strconv.Itoa will add the minus sign for us.
f.WriteByte(e)
if adj > 0 {
f.WriteByte('+')
}
f.WriteString(strconv.Itoa(adj))
}
|
go
|
{
"resource": ""
}
|
q5333
|
formatPlain
|
train
|
func (f *formatter) formatPlain(b []byte, exp int) {
const zeroRadix = "0."
switch radix := len(b) + exp; {
// log10(b) == scale, so immediately before b: 0.123456
case radix == 0:
f.WriteString(zeroRadix)
f.Write(b)
// log10(b) > scale, so somewhere inside b: 123.456
case radix > 0:
f.Write(b[:radix])
if radix < len(b) {
f.WriteByte('.')
f.Write(b[radix:])
}
// log10(b) < scale, so before p "0s" and before b: 0.00000123456
default:
f.WriteString(zeroRadix)
io.CopyN(f, zeroReader{}, -int64(radix))
end := len(b)
if f.prec < end {
end = f.prec
}
f.Write(b[:end])
}
}
|
go
|
{
"resource": ""
}
|
q5334
|
alias
|
train
|
func alias(z, x *big.Int) *big.Int {
if z != x {
// We have to check the first element of their internal slices since
// Big doesn't store a pointer to a big.Int.
zb, xb := z.Bits(), x.Bits()
if cap(zb) > 0 && cap(xb) > 0 && &zb[0:cap(zb)][cap(zb)-1] != &xb[0:cap(xb)][cap(xb)-1] {
return z
}
}
return new(big.Int)
}
|
go
|
{
"resource": ""
}
|
q5335
|
bigScalex
|
train
|
func bigScalex(z, x *big.Int, scale int) *big.Int {
if scale > 0 {
return arith.MulBigPow10(z, x, uint64(scale))
}
return z.Quo(x, arith.BigPow10(uint64(-scale)))
}
|
go
|
{
"resource": ""
}
|
q5336
|
Sqrt
|
train
|
func Sqrt(z, x *decimal.Big) *decimal.Big {
if z.CheckNaNs(x, nil) {
return z
}
ideal := -((-x.Scale() - (-x.Scale() & 1)) / 2)
if xs := x.Sign(); xs <= 0 {
if xs == 0 {
return z.SetMantScale(0, ideal).CopySign(z, x)
}
z.Context.Conditions |= decimal.InvalidOperation
return z.SetNaN(false)
}
// Already checked for negative numbers.
if x.IsInf(+1) {
return z.SetInf(false)
}
var (
prec = precision(z)
ctx = decimal.Context{Precision: prec}
rnd = z.Context.Conditions&decimal.Rounded != 0
ixt = z.Context.Conditions&decimal.Inexact != 0
)
// Source for the following algorithm:
//
// T. E. Hull and A. Abrham. 1985. Properly rounded variable precision
// square root. ACM Trans. Math. Softw. 11, 3 (September 1985), 229-237.
// DOI: https://doi.org/10.1145/214408.214413
var (
xprec = x.Precision()
// The algorithm requires a normalized ``f ∈ [0.1, 1)'' Of the two ways
// to normalize f, adjusting its scale is the quickest. However, it then
// requires us to increment approx's scale by e/2 instead of simply
// setting it to e/2.
f = new(decimal.Big).Copy(x).SetScale(xprec)
e = -x.Scale() + xprec
tmp decimal.Big // scratch space
)
if e&1 == 0 {
ctx.FMA(z, approx2, f, approx1) // approx := .259 + .819f
} else {
f.SetScale(f.Scale() + 1) // f := f/10
e++ // e := e + 1
ctx.FMA(z, approx4, f, approx3) // approx := .0819 + 2.59f
}
maxp := prec + 5 // extra prec to skip weird +/- 0.5 adjustments
ctx.Precision = 3
for {
// p := min(2*p - 2, maxp)
ctx.Precision = min(2*ctx.Precision-2, maxp)
// approx := .5*(approx + f/approx)
ctx.Mul(z, ptFive, ctx.Add(&tmp, z, ctx.Quo(&tmp, f, z)))
if ctx.Precision == maxp {
break
}
}
// The paper also specifies an additional code block for adjusting approx.
// This code never went into the branches that modified approx, and rounding
// to half even does the same thing. The GDA spec requires us to use
// rounding mode half even (speleotrove.com/decimal/daops.html#refsqrt)
// anyway.
ctx.Reduce(z.SetScale(z.Scale() - e/2))
if z.Precision() <= prec {
if !rnd {
z.Context.Conditions &= ^decimal.Rounded
}
if !ixt {
z.Context.Conditions &= ^decimal.Inexact
}
}
ctx.Precision = prec
return ctx.Round(z)
}
|
go
|
{
"resource": ""
}
|
q5337
|
Err
|
train
|
func (c Context) Err() error {
if m := c.Conditions & c.Traps; m != 0 {
return m
}
return nil
}
|
go
|
{
"resource": ""
}
|
q5338
|
WithContext
|
train
|
func WithContext(c Context) *Big {
z := new(Big)
z.Context = c
return z
}
|
go
|
{
"resource": ""
}
|
q5339
|
WithPrecision
|
train
|
func WithPrecision(p int) *Big {
z := new(Big)
switch {
case p > 0 && p <= UnlimitedPrecision:
z.Context.Precision = p
case p == 0:
z.Context.Precision = DefaultPrecision
default:
z.setNaN(InvalidContext, qnan, invctxpgtu)
}
return z
}
|
go
|
{
"resource": ""
}
|
q5340
|
Add
|
train
|
func (c Context) Add(z, x, y *Big) *Big {
if debug {
x.validate()
y.validate()
}
if z.invalidContext(c) {
return z
}
if x.IsFinite() && y.IsFinite() {
z.form = c.add(z, x, x.form, y, y.form)
return c.round(z)
}
// NaN + NaN
// NaN + y
// x + NaN
if z.checkNaNs(x, y, addition) {
return z
}
if x.form&inf != 0 {
if y.form&inf != 0 && x.form^y.form == signbit {
// +Inf + -Inf
// -Inf + +Inf
return z.setNaN(InvalidOperation, qnan, addinfinf)
}
// ±Inf + y
// +Inf + +Inf
// -Inf + -Inf
return z.Set(x)
}
// x + ±Inf
return z.Set(y)
}
|
go
|
{
"resource": ""
}
|
q5341
|
mul
|
train
|
func (c Context) mul(z, x, y *Big) *Big {
if debug {
x.validate()
y.validate()
}
sign := x.form&signbit ^ y.form&signbit
if x.IsFinite() && y.IsFinite() {
z.form = finite | sign
z.exp = x.exp + y.exp
// Multiplication is simple, so inline it.
if x.isCompact() {
if y.isCompact() {
hi, lo := arith.Mul64(x.compact, y.compact)
if hi == 0 {
z.compact = lo
if lo == cst.Inflated {
z.unscaled.SetUint64(cst.Inflated)
}
z.precision = arith.Length(lo)
return z
}
arith.Set(&z.unscaled, hi, lo)
} else { // y.isInflated
arith.Mul(&z.unscaled, &y.unscaled, x.compact)
}
} else if y.isCompact() { // x.isInflated
arith.Mul(&z.unscaled, &x.unscaled, y.compact)
} else {
z.unscaled.Mul(&x.unscaled, &y.unscaled)
}
return z.norm()
}
// NaN * NaN
// NaN * y
// x * NaN
if z.checkNaNs(x, y, multiplication) {
return z
}
if (x.IsInf(0) && !y.isZero()) ||
(y.IsInf(0) && !x.isZero()) ||
(y.IsInf(0) && x.IsInf(0)) {
// ±Inf * y
// x * ±Inf
// ±Inf * ±Inf
return z.SetInf(sign != 0)
}
// 0 * ±Inf
// ±Inf * 0
return z.setNaN(InvalidOperation, qnan, mul0inf)
}
|
go
|
{
"resource": ""
}
|
q5342
|
Quantize
|
train
|
func (c Context) Quantize(z *Big, n int) *Big {
if debug {
z.validate()
}
if z.invalidContext(c) {
return z
}
n = -n
if z.isSpecial() {
if z.form&inf != 0 {
return z.setNaN(InvalidOperation, qnan, quantinf)
}
z.checkNaNs(z, z, quantization)
return z
}
if n > c.maxScale() || n < c.etiny() {
return z.setNaN(InvalidOperation, qnan, quantminmax)
}
if z.isZero() {
z.exp = n
return z
}
shift := z.exp - n
if z.Precision()+shift > precision(c) {
return z.setNaN(InvalidOperation, qnan, quantprec)
}
z.exp = n
if shift == 0 {
return z
}
if shift < 0 {
z.Context.Conditions |= Rounded
}
m := c.RoundingMode
neg := z.form & signbit
if z.isCompact() {
if shift > 0 {
if zc, ok := arith.MulPow10(z.compact, uint64(shift)); ok {
return z.setTriple(zc, neg, n)
}
// shift < 0
} else if yc, ok := arith.Pow10(uint64(-shift)); ok {
z.quo(m, z.compact, neg, yc, 0)
return z
}
z.unscaled.SetUint64(z.compact)
z.compact = cst.Inflated
}
if shift > 0 {
arith.MulBigPow10(&z.unscaled, &z.unscaled, uint64(shift))
z.precision = arith.BigLength(&z.unscaled)
} else {
var r big.Int
z.quoBig(m, &z.unscaled, neg, arith.BigPow10(uint64(-shift)), 0, &r)
}
return z
}
|
go
|
{
"resource": ""
}
|
q5343
|
Reduce
|
train
|
func (c Context) Reduce(z *Big) *Big {
if debug {
z.validate()
}
c.Round(z)
return c.simpleReduce(z)
}
|
go
|
{
"resource": ""
}
|
q5344
|
simpleReduce
|
train
|
func (c Context) simpleReduce(z *Big) *Big {
if z.isSpecial() {
// Same semantics as plus(z), i.e. z + 0.
z.checkNaNs(z, z, reduction)
return z
}
if z.isZero() {
z.exp = 0
z.precision = 1
return z
}
if z.compact == cst.Inflated {
if z.unscaled.Bit(0) != 0 {
return z
}
var r big.Int
for z.precision >= 20 {
z.unscaled.QuoRem(&z.unscaled, cst.OneMillionInt, &r)
if r.Sign() != 0 {
// TODO(eric): which is less expensive? Copying z.unscaled into
// a temporary or reconstructing if we can't divide by N?
z.unscaled.Mul(&z.unscaled, cst.OneMillionInt)
z.unscaled.Add(&z.unscaled, &r)
break
}
z.exp += 6
z.precision -= 6
// Try to avoid reconstruction for odd numbers.
if z.unscaled.Bit(0) != 0 {
break
}
}
for z.precision >= 20 {
z.unscaled.QuoRem(&z.unscaled, cst.TenInt, &r)
if r.Sign() != 0 {
z.unscaled.Mul(&z.unscaled, cst.TenInt)
z.unscaled.Add(&z.unscaled, &r)
break
}
z.exp++
z.precision--
if z.unscaled.Bit(0) != 0 {
break
}
}
if z.precision >= 20 {
return z.norm()
}
z.compact = z.unscaled.Uint64()
}
for ; z.compact >= 10000 && z.compact%10000 == 0; z.precision -= 4 {
z.compact /= 10000
z.exp += 4
}
for ; z.compact%10 == 0; z.precision-- {
z.compact /= 10
z.exp++
}
return z
}
|
go
|
{
"resource": ""
}
|
q5345
|
Rem
|
train
|
func (c Context) Rem(z, x, y *Big) *Big {
if debug {
x.validate()
y.validate()
}
if z.invalidContext(c) {
return z
}
if x.IsFinite() && y.IsFinite() {
if y.isZero() {
if x.isZero() {
// 0 / 0
return z.setNaN(InvalidOperation|DivisionUndefined, qnan, quo00)
}
// x / 0
return z.setNaN(InvalidOperation|DivisionByZero, qnan, remx0)
}
if x.isZero() {
// 0 / y
return z.setZero(x.form&signbit, min(x.exp, y.exp))
}
// TODO(eric): See if we can get rid of tmp. See issue #72.
var tmp Big
_, z = c.quorem(&tmp, z, x, y)
z.exp = min(x.exp, y.exp)
tmp.exp = 0
if tmp.Precision() > precision(c) {
return z.setNaN(DivisionImpossible, qnan, quointprec)
}
return c.round(z)
}
// NaN / NaN
// NaN / y
// x / NaN
if z.checkNaNs(x, y, division) {
return z
}
if x.form&inf != 0 {
if y.form&inf != 0 {
// ±Inf / ±Inf
return z.setNaN(InvalidOperation, qnan, quoinfinf)
}
// ±Inf / y
return z.setNaN(InvalidOperation, qnan, reminfy)
}
// x / ±Inf
return z.Set(x)
}
|
go
|
{
"resource": ""
}
|
q5346
|
RoundToInt
|
train
|
func (c Context) RoundToInt(z *Big) *Big {
if z.isSpecial() || z.exp >= 0 {
return z
}
c.Precision = z.Precision()
return c.Quantize(z, 0)
}
|
go
|
{
"resource": ""
}
|
q5347
|
Set
|
train
|
func (c Context) Set(z, x *Big) *Big {
return c.Round(z.Copy(x))
}
|
go
|
{
"resource": ""
}
|
q5348
|
SetString
|
train
|
func (c Context) SetString(z *Big, s string) (*Big, bool) {
if _, ok := z.SetString(s); !ok {
return nil, false
}
return c.Round(z), true
}
|
go
|
{
"resource": ""
}
|
q5349
|
Sub
|
train
|
func (c Context) Sub(z, x, y *Big) *Big {
if debug {
x.validate()
y.validate()
}
if z.invalidContext(c) {
return z
}
if x.IsFinite() && y.IsFinite() {
z.form = c.add(z, x, x.form, y, y.form^signbit)
return c.round(z)
}
// NaN - NaN
// NaN - y
// x - NaN
if z.checkNaNs(x, y, subtraction) {
return z
}
if x.form&inf != 0 {
if y.form&inf != 0 && (x.form&signbit == y.form&signbit) {
// -Inf - -Inf
// -Inf - -Inf
return z.setNaN(InvalidOperation, qnan, subinfinf)
}
// ±Inf - y
// -Inf - +Inf
// +Inf - -Inf
return z.Set(x)
}
// x - ±Inf
return z.Neg(y)
}
|
go
|
{
"resource": ""
}
|
q5350
|
CmpTotalAbs
|
train
|
func CmpTotalAbs(x, y *decimal.Big) int {
xs := ord(x, true)
ys := ord(y, true)
if xs != ys {
if xs > ys {
return +1
}
return -1
}
if xs != 0 {
return 0
}
return x.CmpAbs(y)
}
|
go
|
{
"resource": ""
}
|
q5351
|
CopyAbs
|
train
|
func CopyAbs(z, x *decimal.Big) *decimal.Big {
return z.CopySign(x, pos)
}
|
go
|
{
"resource": ""
}
|
q5352
|
CopyNeg
|
train
|
func CopyNeg(z, x *decimal.Big) *decimal.Big {
if x.Signbit() {
return z.CopySign(x, pos)
}
return z.CopySign(x, neg)
}
|
go
|
{
"resource": ""
}
|
q5353
|
Max
|
train
|
func Max(x ...*decimal.Big) *decimal.Big {
m := x[0]
for _, v := range x[1:] {
if v.Cmp(m) > 0 {
m = v
}
}
return m
}
|
go
|
{
"resource": ""
}
|
q5354
|
MaxAbs
|
train
|
func MaxAbs(x ...*decimal.Big) *decimal.Big {
m := x[0]
for _, v := range x[1:] {
if v.CmpAbs(m) > 0 {
m = v
}
}
return m
}
|
go
|
{
"resource": ""
}
|
q5355
|
Min
|
train
|
func Min(x ...*decimal.Big) *decimal.Big {
m := x[0]
for _, v := range x[1:] {
if v.Cmp(m) < 0 {
m = v
}
}
return m
}
|
go
|
{
"resource": ""
}
|
q5356
|
MinAbs
|
train
|
func MinAbs(x ...*decimal.Big) *decimal.Big {
m := x[0]
for _, v := range x[1:] {
if v.CmpAbs(m) < 0 {
m = v
}
}
return m
}
|
go
|
{
"resource": ""
}
|
q5357
|
maxfor
|
train
|
func maxfor(z *big.Int, n, sign int) {
arith.Sub(z, arith.BigPow10(uint64(n)), 1)
if sign < 0 {
z.Neg(z)
}
}
|
go
|
{
"resource": ""
}
|
q5358
|
NextMinus
|
train
|
func NextMinus(z, x *decimal.Big) *decimal.Big {
if z.CheckNaNs(x, nil) {
return z
}
if x.IsInf(0) {
if x.IsInf(-1) {
return z.SetInf(true)
}
_, m := decimal.Raw(z)
maxfor(m, precision(z), +1)
return z.SetBigMantScale(m, -etop(z))
}
ctx := z.Context
ctx.RoundingMode = decimal.ToNegativeInf
ctx.Set(z, x)
ctx.Sub(z, x, new(decimal.Big).SetMantScale(1, -etiny(z)+1))
z.Context.Conditions &= ctx.Conditions
return z
}
|
go
|
{
"resource": ""
}
|
q5359
|
NextPlus
|
train
|
func NextPlus(z, x *decimal.Big) *decimal.Big {
if z.CheckNaNs(x, nil) {
return z
}
if x.IsInf(0) {
if x.IsInf(+1) {
return z.SetInf(false)
}
_, m := decimal.Raw(z)
maxfor(m, precision(z), -1)
return z.SetBigMantScale(m, -etop(z))
}
ctx := z.Context
ctx.RoundingMode = decimal.ToPositiveInf
ctx.Set(z, x)
ctx.Add(z, x, new(decimal.Big).SetMantScale(1, -etiny(z)+1))
z.Context.Conditions &= ctx.Conditions
return z
}
|
go
|
{
"resource": ""
}
|
q5360
|
SetSignbit
|
train
|
func SetSignbit(z *decimal.Big, sign bool) *decimal.Big {
if sign {
return z.CopySign(z, neg)
}
return z.CopySign(z, pos)
}
|
go
|
{
"resource": ""
}
|
q5361
|
Wallis
|
train
|
func Wallis(z *decimal.Big, g Generator) *decimal.Big {
if !g.Next() {
return z
}
ws, ok := g.(Walliser)
if !ok {
ws = walliser{prec: precision(z) + 5}
}
a, a_1, b, b_1, p, eps := ws.Wallis()
t := g.Term()
a_1.SetUint64(1)
a.Copy(t.B)
b.SetUint64(1)
ctx := z.Context
if c, ok := g.(Contexter); ok {
ctx = c.Context()
}
for g.Next() && p.IsFinite() {
t = g.Term()
z.Copy(a)
ctx.FMA(a, a, t.B, ctx.Mul(a_1, a_1, t.A))
a_1.Copy(z)
z.Copy(b)
ctx.FMA(b, b, t.B, ctx.Mul(b_1, b_1, t.A))
b_1.Copy(z)
ctx.Quo(z, a, b)
if ctx.Sub(p, z, p).CmpAbs(eps) <= 0 {
break
}
p.Copy(z)
}
return z
}
|
go
|
{
"resource": ""
}
|
q5362
|
ParseCases
|
train
|
func ParseCases(r io.Reader) (cases []Case, err error) {
s := bufio.NewScanner(r)
s.Split(bufio.ScanLines)
for s.Scan() {
p := s.Bytes()
// Skip empty lines and comments.
if len(p) == 0 || p[0] == '#' {
continue
}
c, err := ParseCase(p)
if err != nil {
return nil, err
}
cases = append(cases, c)
}
return cases, s.Err()
}
|
go
|
{
"resource": ""
}
|
q5363
|
ShortString
|
train
|
func (c Case) ShortString(length int) string {
return fmt.Sprintf("%s%d [%s, %s]: %s(%s) = %s %s",
c.Prefix, c.Prec, c.Trap, c.Mode, c.Op,
join(c.Inputs, ", ", length), trunc(c.Output, length), c.Excep)
}
|
go
|
{
"resource": ""
}
|
q5364
|
IsNaN
|
train
|
func (i Data) IsNaN() (nan, signal bool) {
if len(i) == 1 {
return (i == "S" || i == "Q"), i == "S"
}
if i[0] == '-' {
i = i[1:]
}
return strings.EqualFold(string(i), "nan") ||
strings.EqualFold(string(i), "qnan") ||
strings.EqualFold(string(i), "snan"), i[0] == 's' || i[0] == 'S'
}
|
go
|
{
"resource": ""
}
|
q5365
|
IsInf
|
train
|
func (i Data) IsInf() (int, bool) {
if len(i) != 4 {
return 0, false
}
if strings.EqualFold(string(i), "-Inf") {
return -1, true
}
if strings.EqualFold(string(i), "+Inf") {
return +1, true
}
return 0, false
}
|
go
|
{
"resource": ""
}
|
q5366
|
CheckNaNs
|
train
|
func (z *Big) CheckNaNs(x, y *Big) bool {
return z.invalidContext(z.Context) || z.checkNaNs(x, y, 0)
}
|
go
|
{
"resource": ""
}
|
q5367
|
Abs
|
train
|
func (z *Big) Abs(x *Big) *Big {
if debug {
x.validate()
}
if !z.invalidContext(z.Context) && !z.checkNaNs(x, x, absvalue) {
z.Context.round(z.copyAbs(x))
}
return z
}
|
go
|
{
"resource": ""
}
|
q5368
|
cmp
|
train
|
func cmp(x, y *Big, abs bool) int {
if debug {
x.validate()
y.validate()
}
if x == y {
return 0
}
// NaN cmp x
// z cmp NaN
// NaN cmp NaN
if (x.form|y.form)&nan != 0 {
return 0
}
// Fast path: Catches non-finite forms like zero and ±Inf, possibly signed.
xs := x.ord(abs)
ys := y.ord(abs)
if xs != ys {
if xs > ys {
return +1
}
return -1
}
switch xs {
case 0, +2, -2:
return 0
default:
r := cmpabs(x, y)
if xs < 0 && !abs {
r = -r
}
return r
}
}
|
go
|
{
"resource": ""
}
|
q5369
|
Copy
|
train
|
func (z *Big) Copy(x *Big) *Big {
if debug {
x.validate()
}
if z != x {
sign := x.form & signbit
z.copyAbs(x)
z.form |= sign
}
return z
}
|
go
|
{
"resource": ""
}
|
q5370
|
copyAbs
|
train
|
func (z *Big) copyAbs(x *Big) *Big {
if z != x {
z.precision = x.Precision()
z.exp = x.exp
z.compact = x.compact
if x.IsFinite() && x.isInflated() {
z.unscaled.Set(&x.unscaled)
}
}
z.form = x.form & ^signbit
return z
}
|
go
|
{
"resource": ""
}
|
q5371
|
CopySign
|
train
|
func (z *Big) CopySign(x, y *Big) *Big {
if debug {
x.validate()
y.validate()
}
// Pre-emptively capture signbit in case z == y.
sign := y.form & signbit
z.copyAbs(x)
z.form |= sign
return z
}
|
go
|
{
"resource": ""
}
|
q5372
|
Float64
|
train
|
func (x *Big) Float64() (f float64, ok bool) {
if debug {
x.validate()
}
if !x.IsFinite() {
switch x.form {
case pinf, ninf:
return math.Inf(int(x.form & signbit)), true
case snan, qnan:
return math.NaN(), true
case ssnan, sqnan:
return math.Copysign(math.NaN(), -1), true
}
}
const (
maxPow10 = 22 // largest exact power of 10
maxMantissa = 1<<53 + 1 // largest exact mantissa
)
switch xc := x.compact; {
case !x.isCompact():
fallthrough
//lint:ignore ST1015 convoluted, but on purpose
default:
f, _ = strconv.ParseFloat(x.String(), 64)
ok = !math.IsInf(f, 0) && !math.IsNaN(f)
case xc == 0:
ok = true
case x.IsInt():
if xc, ok := x.Int64(); ok {
f = float64(xc)
} else if xc, ok := x.Uint64(); ok {
f = float64(xc)
}
ok = xc < maxMantissa || (xc&(xc-1)) == 0
case x.exp == 0:
f = float64(xc)
ok = xc < maxMantissa || (xc&(xc-1)) == 0
case x.exp > 0:
f = float64(x.compact) * math.Pow10(x.exp)
ok = x.compact < maxMantissa && x.exp < maxPow10
case x.exp < 0:
f = float64(x.compact) / math.Pow10(-x.exp)
ok = x.compact < maxMantissa && x.exp > -maxPow10
}
if x.form&signbit != 0 {
f = math.Copysign(f, -1)
}
return f, ok
}
|
go
|
{
"resource": ""
}
|
q5373
|
Float
|
train
|
func (x *Big) Float(z *big.Float) *big.Float {
if debug {
x.validate()
}
if z == nil {
z = new(big.Float)
}
switch x.form {
case finite, finite | signbit:
if x.isZero() {
z.SetUint64(0)
} else {
z.SetRat(x.Rat(nil))
}
case pinf, ninf:
z.SetInf(x.form == pinf)
default: // snan, qnan, ssnan, sqnan:
z.SetUint64(0)
}
return z
}
|
go
|
{
"resource": ""
}
|
q5374
|
Int64
|
train
|
func (x *Big) Int64() (int64, bool) {
if debug {
x.validate()
}
if !x.IsFinite() {
return 0, false
}
// x might be too large to fit into an int64 *now*, but rescaling x might
// shrink it enough. See issue #20.
if !x.isCompact() {
xb := x.Int(nil)
return xb.Int64(), xb.IsInt64()
}
u := x.compact
if x.exp != 0 {
var ok bool
if u, ok = scalex(u, x.exp); !ok {
return 0, false
}
}
su := int64(u)
if su >= 0 || x.Signbit() && su == -su {
if x.Signbit() {
su = -su
}
return su, true
}
return 0, false
}
|
go
|
{
"resource": ""
}
|
q5375
|
Uint64
|
train
|
func (x *Big) Uint64() (uint64, bool) {
if debug {
x.validate()
}
if !x.IsFinite() || x.Signbit() {
return 0, false
}
// x might be too large to fit into an uint64 *now*, but rescaling x might
// shrink it enough. See issue #20.
if !x.isCompact() {
xb := x.Int(nil)
return xb.Uint64(), xb.IsUint64()
}
b := x.compact
if x.exp == 0 {
return b, true
}
return scalex(b, x.exp)
}
|
go
|
{
"resource": ""
}
|
q5376
|
IsNormal
|
train
|
func (x *Big) IsNormal() bool {
return x.IsFinite() && x.adjusted() >= x.Context.minScale()
}
|
go
|
{
"resource": ""
}
|
q5377
|
IsSubnormal
|
train
|
func (x *Big) IsSubnormal() bool {
return x.IsFinite() && x.adjusted() < x.Context.minScale()
}
|
go
|
{
"resource": ""
}
|
q5378
|
IsInt
|
train
|
func (x *Big) IsInt() bool {
if debug {
x.validate()
}
if !x.IsFinite() {
return false
}
// 0, 5000, 40
if x.isZero() || x.exp >= 0 {
return true
}
xp := x.Precision()
exp := x.exp
// 0.001
// 0.5
if -exp >= xp {
return false
}
// 44.00
// 1.000
if x.isCompact() {
for v := x.compact; v%10 == 0; v /= 10 {
exp++
}
// Avoid the overhead of copying x.unscaled if we know for a fact it's not
// an integer.
} else if x.unscaled.Bit(0) == 0 {
v := new(big.Int).Set(&x.unscaled)
r := new(big.Int)
for {
v.QuoRem(v, c.TenInt, r)
if r.Sign() != 0 {
break
}
exp++
}
}
return exp >= 0
}
|
go
|
{
"resource": ""
}
|
q5379
|
Neg
|
train
|
func (z *Big) Neg(x *Big) *Big {
if debug {
x.validate()
}
if !z.invalidContext(z.Context) && !z.checkNaNs(x, x, negation) {
xform := x.form // copy in case z == x
z.copyAbs(x)
if !z.IsFinite() || z.compact != 0 || z.Context.RoundingMode == ToNegativeInf {
z.form = xform ^ signbit
}
}
return z.Context.round(z)
}
|
go
|
{
"resource": ""
}
|
q5380
|
Precision
|
train
|
func (x *Big) Precision() int {
// Cannot call validate since validate calls this method.
if !x.IsFinite() {
return 0
}
if x.precision == 0 {
return 1
}
return x.precision
}
|
go
|
{
"resource": ""
}
|
q5381
|
Rat
|
train
|
func (x *Big) Rat(z *big.Rat) *big.Rat {
if debug {
x.validate()
}
if z == nil {
z = new(big.Rat)
}
if !x.IsFinite() {
return z.SetInt64(0)
}
// Fast path for decimals <= math.MaxInt64.
if x.IsInt() {
if u, ok := x.Int64(); ok {
// If profiled we can call scalex ourselves and save the overhead of
// calling Int64. But I doubt it'll matter much.
return z.SetInt64(u)
}
}
num := new(big.Int)
if x.isCompact() {
num.SetUint64(x.compact)
} else {
num.Set(&x.unscaled)
}
if x.exp > 0 {
arith.MulBigPow10(num, num, uint64(x.exp))
}
if x.Signbit() {
num.Neg(num)
}
denom := c.OneInt
if x.exp < 0 {
denom = new(big.Int)
if shift, ok := arith.Pow10(uint64(-x.exp)); ok {
denom.SetUint64(shift)
} else {
denom.Set(arith.BigPow10(uint64(-x.exp)))
}
}
return z.SetFrac(num, denom)
}
|
go
|
{
"resource": ""
}
|
q5382
|
Scan
|
train
|
func (z *Big) Scan(state fmt.ScanState, verb rune) error {
return z.scan(byteReader{state})
}
|
go
|
{
"resource": ""
}
|
q5383
|
SetBigMantScale
|
train
|
func (z *Big) SetBigMantScale(value *big.Int, scale int) *Big {
// Do this first in case value == z.unscaled. Don't want to clobber the sign.
z.form = finite
if value.Sign() < 0 {
z.form |= signbit
}
z.unscaled.Abs(value)
z.compact = c.Inflated
z.precision = arith.BigLength(value)
if z.unscaled.IsUint64() {
if v := z.unscaled.Uint64(); v != c.Inflated {
z.compact = v
}
}
z.exp = -scale
return z
}
|
go
|
{
"resource": ""
}
|
q5384
|
SetFloat
|
train
|
func (z *Big) SetFloat(x *big.Float) *Big {
if x.IsInf() {
if x.Signbit() {
z.form = ninf
} else {
z.form = pinf
}
return z
}
neg := x.Signbit()
if x.Sign() == 0 {
if neg {
z.form |= signbit
}
z.compact = 0
z.precision = 1
return z
}
z.exp = 0
x0 := new(big.Float).Copy(x).SetPrec(big.MaxPrec)
x0.Abs(x0)
if !x.IsInt() {
for !x0.IsInt() {
x0.Mul(x0, c.TenFloat)
z.exp--
}
}
if mant, acc := x0.Uint64(); acc == big.Exact {
z.compact = mant
z.precision = arith.Length(mant)
} else {
z.compact = c.Inflated
x0.Int(&z.unscaled)
z.precision = arith.BigLength(&z.unscaled)
}
z.form = finite
if neg {
z.form |= signbit
}
return z
}
|
go
|
{
"resource": ""
}
|
q5385
|
SetFloat64
|
train
|
func (z *Big) SetFloat64(x float64) *Big {
if x == 0 {
var sign form
if math.Signbit(x) {
sign = signbit
}
return z.setZero(sign, 0)
}
if math.IsNaN(x) {
var sign form
if math.Signbit(x) {
sign = signbit
}
return z.setNaN(0, qnan|sign, 0)
}
if math.IsInf(x, 0) {
if math.IsInf(x, 1) {
z.form = pinf
} else {
z.form = ninf
}
return z
}
// The gist of the following is lifted from math/big/rat.go, but adapted for
// base-10 decimals.
const expMask = 1<<11 - 1
bits := math.Float64bits(x)
mantissa := bits & (1<<52 - 1)
exp := int((bits >> 52) & expMask)
if exp == 0 { // denormal
exp -= 1022
} else { // normal
mantissa |= 1 << 52
exp -= 1023
}
if mantissa == 0 {
return z.SetUint64(0)
}
shift := 52 - exp
for mantissa&1 == 0 && shift > 0 {
mantissa >>= 1
shift--
}
z.exp = 0
z.form = finite | form(bits>>63)
if shift > 0 {
z.unscaled.SetUint64(uint64(shift))
z.unscaled.Exp(c.FiveInt, &z.unscaled, nil)
arith.Mul(&z.unscaled, &z.unscaled, mantissa)
z.exp = -shift
} else {
// TODO(eric): figure out why this doesn't work for _some_ numbers. See
// https://github.com/ericlagergren/decimal/issues/89
//
// z.compact = mantissa << uint(-shift)
// z.precision = arith.Length(z.compact)
z.compact = c.Inflated
z.unscaled.SetUint64(mantissa)
z.unscaled.Lsh(&z.unscaled, uint(-shift))
}
return z.norm()
}
|
go
|
{
"resource": ""
}
|
q5386
|
SetInf
|
train
|
func (z *Big) SetInf(signbit bool) *Big {
if signbit {
z.form = ninf
} else {
z.form = pinf
}
return z
}
|
go
|
{
"resource": ""
}
|
q5387
|
SetMantScale
|
train
|
func (z *Big) SetMantScale(value int64, scale int) *Big {
z.SetUint64(arith.Abs(value))
z.exp = -scale // compiler should optimize out z.exp = 0 in SetUint64
if value < 0 {
z.form |= signbit
}
return z
}
|
go
|
{
"resource": ""
}
|
q5388
|
setNaN
|
train
|
func (z *Big) setNaN(c Condition, f form, p Payload) *Big {
z.form = f
z.compact = uint64(p)
z.Context.Conditions |= c
if z.Context.OperatingMode == Go {
panic(ErrNaN{Msg: z.Context.Conditions.String()})
}
return z
}
|
go
|
{
"resource": ""
}
|
q5389
|
SetNaN
|
train
|
func (z *Big) SetNaN(signal bool) *Big {
if signal {
z.form = snan
} else {
z.form = qnan
}
z.compact = 0 // payload
return z
}
|
go
|
{
"resource": ""
}
|
q5390
|
SetRat
|
train
|
func (z *Big) SetRat(x *big.Rat) *Big {
if x.IsInt() {
return z.Context.round(z.SetBigMantScale(x.Num(), 0))
}
var num, denom Big
num.SetBigMantScale(x.Num(), 0)
denom.SetBigMantScale(x.Denom(), 0)
return z.Quo(&num, &denom)
}
|
go
|
{
"resource": ""
}
|
q5391
|
SetScale
|
train
|
func (z *Big) SetScale(scale int) *Big {
z.exp = -scale
return z
}
|
go
|
{
"resource": ""
}
|
q5392
|
ord
|
train
|
func (x *Big) ord(abs bool) int {
if x.form&inf != 0 {
if x.form == pinf || abs {
return +2
}
return -2
}
r := x.Sign()
if abs && r < 0 {
r = -r
}
return r
}
|
go
|
{
"resource": ""
}
|
q5393
|
Signbit
|
train
|
func (x *Big) Signbit() bool {
if debug {
x.validate()
}
return x.form&signbit != 0
}
|
go
|
{
"resource": ""
}
|
q5394
|
String
|
train
|
func (x *Big) String() string {
if x == nil {
return "<nil>"
}
var (
b = new(strings.Builder)
f = formatter{w: b, prec: x.Precision(), width: noWidth}
e = sciE[x.Context.OperatingMode]
)
b.Grow(x.Precision())
f.format(x, normal, e)
return b.String()
}
|
go
|
{
"resource": ""
}
|
q5395
|
validate
|
train
|
func (x *Big) validate() {
defer func() {
if err := recover(); err != nil {
pc, _, _, ok := runtime.Caller(4)
if caller := runtime.FuncForPC(pc); ok && caller != nil {
fmt.Println("called by:", caller.Name())
}
type Big struct {
Context Context
unscaled big.Int
compact uint64
exp int
precision int
form form
}
fmt.Printf("%#v\n", (*Big)(x))
panic(err)
}
}()
switch x.form {
case finite, finite | signbit:
if x.isInflated() {
if x.unscaled.IsUint64() && x.unscaled.Uint64() != c.Inflated {
panic(fmt.Sprintf("inflated but unscaled == %d", x.unscaled.Uint64()))
}
if x.unscaled.Sign() < 0 {
panic("x.unscaled.Sign() < 0")
}
if bl, xp := arith.BigLength(&x.unscaled), x.precision; bl != xp {
panic(fmt.Sprintf("BigLength (%d) != x.Precision (%d)", bl, xp))
}
}
if x.isCompact() {
if bl, xp := arith.Length(x.compact), x.Precision(); bl != xp {
panic(fmt.Sprintf("BigLength (%d) != x.Precision() (%d)", bl, xp))
}
}
case snan, ssnan, qnan, sqnan, pinf, ninf:
// OK
case nan:
panic(x.form.String())
default:
panic(fmt.Sprintf("invalid form %s", x.form))
}
}
|
go
|
{
"resource": ""
}
|
q5396
|
Digits
|
train
|
func Digits(digits int) string {
max := int(math.Pow10(digits)) - 1
num := privateRand.Intn(max)
format := fmt.Sprintf("%%0%dd", digits)
return fmt.Sprintf(format, num)
}
|
go
|
{
"resource": ""
}
|
q5397
|
BoundedDigits
|
train
|
func BoundedDigits(digits, low, high int) string {
if low > high {
low, high = high, low
}
max := int(math.Pow10(digits)) - 1
if high > max {
high = max
}
num := privateRand.Intn(high-low+1) + low
format := fmt.Sprintf("%%0%dd", digits)
return fmt.Sprintf(format, num)
}
|
go
|
{
"resource": ""
}
|
q5398
|
Title
|
train
|
func Title(gender int) string {
var title = ""
switch gender {
case Male:
title = randomFrom(jsonData.MaleTitles)
break
case Female:
title = randomFrom(jsonData.FemaleTitles)
break
default:
title = FirstName(privateRand.Intn(2))
break
}
return title
}
|
go
|
{
"resource": ""
}
|
q5399
|
FirstName
|
train
|
func FirstName(gender int) string {
var name = ""
switch gender {
case Male:
name = randomFrom(jsonData.FirstNamesMale)
break
case Female:
name = randomFrom(jsonData.FirstNamesFemale)
break
default:
name = FirstName(rand.Intn(2))
break
}
return name
}
|
go
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.