internal/runtime/atomic.Int32.Load (method)

53 uses

	internal/runtime/atomic (current package)
		types.go#L20: func (i *Int32) Load() int32 {

	runtime
		chan.go#L790: 	async := debug.asynctimerchan.Load() != 0
		chan.go#L808: 		async := debug.asynctimerchan.Load() != 0
		cpuprof.go#L113: 	if prof.hz.Load() != 0 { // implies cpuprof.log != nil
		metrics.go#L288: 				out.scalar = uint64(gcController.gcPercent.Load())
		mgc.go#L611: 		if gcController.gcPercent.Load() < 0 {
		mgcpacer.go#L491: 	gcPercent := c.gcPercent.Load()
		mgcpacer.go#L1203: 	if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
		mgcpacer.go#L1248: 	out := c.gcPercent.Load()
		mprof.go#L800: 	if debug.runtimeContentionStacks.Load() == 0 {
		panic.go#L743: 		if debug.panicnil.Load() != 1 {
		proc.go#L373: 	if !gp.preempt && sched.npidle.Load() > 0 {
		proc.go#L3030: 	if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) { // TODO: fast atomic
		proc.go#L3060: 	if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
		proc.go#L3093: 	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
		proc.go#L3360: 	if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
		proc.go#L3928: 	npidle := int(sched.npidle.Load())
		proc.go#L4942: 		for pendingPreemptSignals.Load() > 0 {
		proc.go#L5371: 	n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
		proc.go#L5409: 	if prof.hz.Load() == 0 {
		proc.go#L5501: 	if prof.hz.Load() != 0 {
		proc.go#L5547: 	if prof.hz.Load() != hz {
		proc.go#L6081: 		if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
		proc.go#L6083: 			if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
		proc.go#L6247: 			if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
		proc.go#L6341: 	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
		proc.go#L6437: 		for ; n != 0 && sched.npidle.Load() != 0; n-- {
		proc.go#L7156: 	if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
		signal_unix.go#L516: 	if prof.hz.Load() != 0 {
		signal_unix.go#L539: 	if prof.hz.Load() != 0 {
		signal_unix.go#L750: 	if crashing.Load() == 0 {
		signal_unix.go#L760: 		if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
		signal_unix.go#L764: 		} else if crashing.Load() == 0 {
		signal_unix.go#L781: 		if crashing.Load() < mcount()-int32(extraMLength.Load()) {
		signal_unix.go#L802: 			maxCrashing := crashing.Load()
		signal_unix.go#L803: 			for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
		signal_unix.go#L807: 				if c := crashing.Load(); c > maxCrashing {
		signal_unix.go#L815: 			c := crashing.Load()
		signal_unix.go#L819: 				c = crashing.Load()
		time.go#L425: 	async := debug.asynctimerchan.Load() != 0
		time.go#L455: 		if t.period == 0 && t.isSending.Load() > 0 {
		time.go#L506: 	async := debug.asynctimerchan.Load() != 0
		time.go#L559: 		if oldPeriod == 0 && t.isSending.Load() > 0 {
		time.go#L912: 	zombies := ts.zombies.Load()
		time.go#L942: 		force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
		time.go#L1051: 	async := debug.asynctimerchan.Load() != 0
		trace.go#L254: 	if debug.traceallocfree.Load() != 0 {
		trace.go#L275: 	for trace.exitingSyscall.Load() != 0 {