Source File
panic.go
Belonging Package
runtime
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
)
// throwType indicates the current type of ongoing throw, which affects the
// amount of detail printed to stderr. Higher values include more detail.
type throwType uint32
const (
// throwTypeNone means that we are not throwing.
throwTypeNone throwType = iota
// throwTypeUser is a throw due to a problem with the application.
//
// These throws do not include runtime frames, system goroutines, or
// frame metadata.
throwTypeUser
// throwTypeRuntime is a throw due to a problem with Go itself.
//
// These throws include as much information as possible to aid in
// debugging the runtime, including runtime frames, system goroutines,
// and frame metadata.
throwTypeRuntime
)
// We have two different ways of doing defers. The older way involves creating a
// defer record at the time that a defer statement is executing and adding it to a
// defer chain. This chain is inspected by the deferreturn call at all function
// exits in order to run the appropriate defer calls. A cheaper way (which we call
// open-coded defers) is used for functions in which no defer statements occur in
// loops. In that case, we simply store the defer function/arg information into
// specific stack slots at the point of each defer statement, as well as setting a
// bit in a bitmask. At each function exit, we add inline code to directly make
// the appropriate defer calls based on the bitmask and fn/arg information stored
// on the stack. During panic/Goexit processing, the appropriate defer calls are
// made using extra funcdata info that indicates the exact stack slots that
// contain the bitmask and defer fn/args.
// Check to make sure we can really generate a panic. If the panic
// was generated from the runtime, or from inside malloc, then convert
// to a throw of msg.
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func ( uintptr, string) {
if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc()), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw()
}
// TODO: is this redundant? How could we be in malloc
// but not in the runtime? runtime/internal/*, maybe?
:= getg()
if != nil && .m != nil && .m.mallocing != 0 {
throw()
}
}
// Same as above, but calling from the runtime is allowed.
//
// Using this function is necessary for any panic that may be
// generated by runtime.sigpanic, since those are always called by the
// runtime.
func ( string) {
// panic allocates, so to avoid recursive malloc, turn panics
// during malloc into throws.
:= getg()
if != nil && .m != nil && .m.mallocing != 0 {
throw()
}
}
// Many of the following panic entry-points turn into throws when they
// happen in various runtime contexts. These should never happen in
// the runtime, and if they do, they indicate a serious issue and
// should not be caught by user code.
//
// The panic{Index,Slice,divide,shift} functions are called by
// code generated by the compiler for out of bounds index expressions,
// out of bounds slice expressions, division by zero, and shift by negative.
// The panicdivide (again), panicoverflow, panicfloat, and panicmem
// functions are called by the signal handler when a signal occurs
// indicating the respective problem.
//
// Since panic{Index,Slice,shift} are never called directly, and
// since the runtime package should never have an out of bounds slice
// or array reference or negative shift, if we see those functions called from the
// runtime package we turn the panic into a throw. That will dump the
// entire runtime stack for easier debugging.
//
// The entry points called by the signal handler will be called from
// runtime.sigpanic, so we can't disallow calls from the runtime to
// these (they always look like they're called from the runtime).
// Hence, for these, we just check for clearly bad runtime conditions.
//
// The panic{Index,Slice} functions are implemented in assembly and tail call
// to the goPanic{Index,Slice} functions below. This is done so we can use
// a space-minimal register calling convention.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
//
//go:yeswritebarrierrec
func ( int, int) {
panicCheck1(getcallerpc(), "index out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsIndex})
}
//go:yeswritebarrierrec
func ( uint, int) {
panicCheck1(getcallerpc(), "index out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
//
//go:yeswritebarrierrec
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceAcap})
}
//go:yeswritebarrierrec
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
//
//go:yeswritebarrierrec
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSliceB})
}
//go:yeswritebarrierrec
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3Alen})
}
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3Alen})
}
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3Acap})
}
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3B})
}
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func ( int, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: true, y: , code: boundsSlice3C})
}
func ( uint, int) {
panicCheck1(getcallerpc(), "slice bounds out of range")
panic(boundsError{x: int64(), signed: false, y: , code: boundsSlice3C})
}
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func ( int, int) {
panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array")
panic(boundsError{x: int64(), signed: true, y: , code: boundsConvert})
}
// Implemented in assembly, as they take arguments in registers.
// Declared here to mark them as ABIInternal.
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
func ( uint, int)
func ( int, int)
var shiftError = error(errorString("negative shift amount"))
//go:yeswritebarrierrec
func () {
panicCheck1(getcallerpc(), "negative shift amount")
panic(shiftError)
}
var divideError = error(errorString("integer divide by zero"))
//go:yeswritebarrierrec
func () {
panicCheck2("integer divide by zero")
panic(divideError)
}
var overflowError = error(errorString("integer overflow"))
func () {
panicCheck2("integer overflow")
panic(overflowError)
}
var floatError = error(errorString("floating point error"))
func () {
panicCheck2("floating point error")
panic(floatError)
}
var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
func () {
panicCheck2("invalid memory address or nil pointer dereference")
panic(memoryError)
}
func ( uintptr) {
panicCheck2("invalid memory address or nil pointer dereference")
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: })
}
// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
func ( func()) {
:= getg()
if .m.curg != {
// go code on the system stack can't defer
throw("defer on system stack")
}
:= newdefer()
.link = ._defer
._defer =
.fn =
.pc = getcallerpc()
// We must not be preempted between calling getcallersp and
// storing it to d.sp because getcallersp's result is a
// uintptr stack pointer.
.sp = getcallersp()
// deferproc returns 0 normally.
// a deferred func that stops a panic
// makes the deferproc return 1.
// the code the compiler generates always
// checks the return value and jumps to the
// end of the function if deferproc returns != 0.
return0()
// No code can go here - the C return register has
// been set and must not be clobbered.
}
var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
//go:noinline
func ( int) {
switch abi.RF_State() {
case abi.RF_DONE:
panic(rangeDoneError)
case abi.RF_PANIC:
panic(rangePanicError)
case abi.RF_EXHAUSTED:
panic(rangeExhaustedError)
case abi.RF_MISSING_PANIC:
panic(rangeMissingPanicError)
}
throw("unexpected state passed to panicrangestate")
}
// deferrangefunc is called by functions that are about to
// execute a range-over-function loop in which the loop body
// may execute a defer statement. That defer needs to add to
// the chain for the current function, not the func literal synthesized
// to represent the loop body. To do that, the original function
// calls deferrangefunc to obtain an opaque token representing
// the current frame, and then the loop body uses deferprocat
// instead of deferproc to add to that frame's defer lists.
//
// The token is an 'any' with underlying type *atomic.Pointer[_defer].
// It is the atomically-updated head of a linked list of _defer structs
// representing deferred calls. At the same time, we create a _defer
// struct on the main g._defer list with d.head set to this head pointer.
//
// The g._defer list is now a linked list of deferred calls,
// but an atomic list hanging off:
//
// g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// with each -> indicating a d.link pointer, and where drangefunc
// has the d.rangefunc = true bit set.
// Note that the function being ranged over may have added
// its own defers (d4 and d3), so drangefunc need not be at the
// top of the list when deferprocat is used. This is why we pass
// the atomic head explicitly.
//
// To keep misbehaving programs from crashing the runtime,
// deferprocat pushes new defers onto the .head list atomically.
// The fact that it is a separate list from the main goroutine
// defer list means that the main goroutine's defers can still
// be handled non-atomically.
//
// In the diagram, dY and dX are meant to be processed when
// drangefunc would be processed, which is to say the defer order
// should be d4, d3, dY, dX, d2, d1. To make that happen,
// when defer processing reaches a d with rangefunc=true,
// it calls deferconvert to atomically take the extras
// away from d.head and then adds them to the main list.
//
// That is, deferconvert changes this list:
//
// g._defer => drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// into this list:
//
// g._defer => dY -> dX -> d2 -> d1 -> nil
//
// It also poisons *drangefunc.head so that any future
// deferprocat using that head will throw.
// (The atomic head is ordinary garbage collected memory so that
// it's not a problem if user code holds onto it beyond
// the lifetime of drangefunc.)
//
// TODO: We could arrange for the compiler to call into the
// runtime after the loop finishes normally, to do an eager
// deferconvert, which would catch calling the loop body
// and having it defer after the loop is done. If we have a
// more general catch of loop body misuse, though, this
// might not be worth worrying about in addition.
//
// See also ../cmd/compile/internal/rangefunc/rewrite.go.
func () any {
:= getg()
if .m.curg != {
// go code on the system stack can't defer
throw("defer on system stack")
}
:= findfunc(getcallerpc())
if .deferreturn == 0 {
throw("no deferreturn")
}
:= newdefer()
.link = ._defer
._defer =
.pc = .entry() + uintptr(.deferreturn)
// We must not be preempted between calling getcallersp and
// storing it to d.sp because getcallersp's result is a
// uintptr stack pointer.
.sp = getcallersp()
.rangefunc = true
.head = new(atomic.Pointer[_defer])
return .head
}
// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
func () *_defer {
return (*_defer)(unsafe.Pointer(uintptr(1)))
}
// deferprocat is like deferproc but adds to the atomic list represented by frame.
// See the doc comment for deferrangefunc for details.
func ( func(), any) {
:= .(*atomic.Pointer[_defer])
if raceenabled {
racewritepc(unsafe.Pointer(), getcallerpc(), abi.FuncPCABIInternal())
}
:= newdefer()
.fn =
for {
.link = .Load()
if .link == badDefer() {
throw("defer after range func returned")
}
if .CompareAndSwap(.link, ) {
break
}
}
// Must be last - see deferproc above.
return0()
}
// deferconvert converts the rangefunc defer list of d0 into an ordinary list
// following d0.
// See the doc comment for deferrangefunc for details.
func ( *_defer) {
:= .head
if raceenabled {
racereadpc(unsafe.Pointer(), getcallerpc(), abi.FuncPCABIInternal())
}
:= .link
.rangefunc = false
var *_defer
for {
= .Load()
if .CompareAndSwap(, badDefer()) {
break
}
}
if == nil {
return
}
for := ; ; = .link {
.sp = .sp
.pc = .pc
if .link == nil {
.link =
break
}
}
.link =
return
}
// deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized.
// All other fields can contain junk.
// Nosplit because of the uninitialized pointer fields on the stack.
//
//go:nosplit
func ( *_defer) {
:= getg()
if .m.curg != {
// go code on the system stack can't defer
throw("defer on system stack")
}
// fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
.heap = false
.rangefunc = false
.sp = getcallersp()
.pc = getcallerpc()
// The lines below implement:
// d.panic = nil
// d.fd = nil
// d.link = gp._defer
// d.head = nil
// gp._defer = d
// But without write barriers. The first three are writes to
// the stack so they don't need a write barrier, and furthermore
// are to uninitialized memory, so they must not use a write barrier.
// The fourth write does not require a write barrier because we
// explicitly mark all the defer structures, so we don't need to
// keep track of pointers to them with a write barrier.
*(*uintptr)(unsafe.Pointer(&.link)) = uintptr(unsafe.Pointer(._defer))
*(*uintptr)(unsafe.Pointer(&.head)) = 0
*(*uintptr)(unsafe.Pointer(&._defer)) = uintptr(unsafe.Pointer())
return0()
// No code can go here - the C return register has
// been set and must not be clobbered.
}
// Each P holds a pool for defers.
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
func () *_defer {
var *_defer
:= acquirem()
:= .p.ptr()
if len(.deferpool) == 0 && sched.deferpool != nil {
lock(&sched.deferlock)
for len(.deferpool) < cap(.deferpool)/2 && sched.deferpool != nil {
:= sched.deferpool
sched.deferpool = .link
.link = nil
.deferpool = append(.deferpool, )
}
unlock(&sched.deferlock)
}
if := len(.deferpool); > 0 {
= .deferpool[-1]
.deferpool[-1] = nil
.deferpool = .deferpool[:-1]
}
releasem()
, = nil, nil
if == nil {
// Allocate new defer.
= new(_defer)
}
.heap = true
return
}
// popDefer pops the head of gp's defer list and frees it.
func ( *g) {
:= ._defer
.fn = nil // Can in theory point to the stack
// We must not copy the stack between the updating gp._defer and setting
// d.link to nil. Between these two steps, d is not on any defer list, so
// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
// if we were to copy the stack, d could then contain a stale pointer.
._defer = .link
.link = nil
// After this point we can copy the stack.
if !.heap {
return
}
:= acquirem()
:= .p.ptr()
if len(.deferpool) == cap(.deferpool) {
// Transfer half of local cache to the central cache.
var , *_defer
for len(.deferpool) > cap(.deferpool)/2 {
:= len(.deferpool)
:= .deferpool[-1]
.deferpool[-1] = nil
.deferpool = .deferpool[:-1]
if == nil {
=
} else {
.link =
}
=
}
lock(&sched.deferlock)
.link = sched.deferpool
sched.deferpool =
unlock(&sched.deferlock)
}
* = _defer{}
.deferpool = append(.deferpool, )
releasem()
, = nil, nil
}
// deferreturn runs deferred functions for the caller's frame.
// The compiler inserts a call to this at the end of any
// function which calls defer.
func () {
var _panic
.deferreturn = true
.start(getcallerpc(), unsafe.Pointer(getcallersp()))
for {
, := .nextDefer()
if ! {
break
}
()
}
}
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
// is not a panic, any recover calls in those deferred functions will return nil.
//
// Calling Goexit from the main goroutine terminates that goroutine
// without func main returning. Since func main has not returned,
// the program continues execution of other goroutines.
// If all other goroutines exit, the program crashes.
func () {
// Create a panic object for Goexit, so we can recognize when it might be
// bypassed by a recover().
var _panic
.goexit = true
.start(getcallerpc(), unsafe.Pointer(getcallersp()))
for {
, := .nextDefer()
if ! {
break
}
()
}
goexit1()
}
// Call all Error and String methods before freezing the world.
// Used when crashing with panicking.
func ( *_panic) {
defer func() {
:= "panic while printing panic value"
switch r := recover().(type) {
case nil:
// nothing to do
case string:
throw( + ": " + )
default:
throw( + ": type " + toRType(efaceOf(&)._type).string())
}
}()
for != nil {
switch v := .arg.(type) {
case error:
.arg = .Error()
case stringer:
.arg = .String()
}
= .link
}
}
// Print all currently active panics. Used when crashing.
// Should only be called after preprintpanics.
func ( *_panic) {
if .link != nil {
(.link)
if !.link.goexit {
print("\t")
}
}
if .goexit {
return
}
print("panic: ")
printpanicval(.arg)
if .recovered {
print(" [recovered]")
}
print("\n")
}
// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
// uint32 and a pointer to the byte following the varint.
//
// The implementation is the same with runtime.readvarint, except that this function
// uses unsafe.Pointer for speed.
func ( unsafe.Pointer) (uint32, unsafe.Pointer) {
var uint32
var int
for {
:= *(*uint8)()
= add(, unsafe.Sizeof())
if < 128 {
return + uint32()<<,
}
+= uint32(&0x7F) << ( & 31)
+= 7
if > 28 {
panic("Bad varint")
}
}
}
// A PanicNilError happens when code calls panic(nil).
//
// Before Go 1.21, programs that called panic(nil) observed recover returning nil.
// Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
// Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
type PanicNilError struct {
// This field makes PanicNilError structurally different from
// any other struct in this package, and the _ makes it different
// from any struct in other packages too.
// This avoids any accidental conversions being possible
// between this struct and some other struct sharing the same fields,
// like happened in go.dev/issue/56603.
_ [0]*PanicNilError
}
func (*PanicNilError) () string { return "panic called with nil argument" }
func (*PanicNilError) () {}
var panicnil = &godebugInc{name: "panicnil"}
// The implementation of the predeclared function panic.
// The compiler emits calls to this function.
//
// gopanic should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - go.undefinedlabs.com/scopeagent
// - github.com/goplus/igop
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname gopanic
func ( any) {
if == nil {
if debug.panicnil.Load() != 1 {
= new(PanicNilError)
} else {
panicnil.IncNonDefault()
}
}
:= getg()
if .m.curg != {
print("panic: ")
printpanicval()
print("\n")
throw("panic on system stack")
}
if .m.mallocing != 0 {
print("panic: ")
printpanicval()
print("\n")
throw("panic during malloc")
}
if .m.preemptoff != "" {
print("panic: ")
printpanicval()
print("\n")
print("preempt off reason: ")
print(.m.preemptoff)
print("\n")
throw("panic during preemptoff")
}
if .m.locks != 0 {
print("panic: ")
printpanicval()
print("\n")
throw("panic holding locks")
}
var _panic
.arg =
runningPanicDefers.Add(1)
.start(getcallerpc(), unsafe.Pointer(getcallersp()))
for {
, := .nextDefer()
if ! {
break
}
()
}
// If we're tracing, flush the current generation to make the trace more
// readable.
//
// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
// Currently it would hang. Not handled now because it is very unlikely, and
// already unrecoverable.
if traceEnabled() {
traceAdvance(false)
}
// ran out of deferred calls - old-school panic now
// Because it is unsafe to call arbitrary user code after freezing
// the world, we call preprintpanics to invoke all necessary Error
// and String methods to prepare the panic strings before startpanic.
preprintpanics(&)
fatalpanic(&) // should not return
*(*int)(nil) = 0 // not reached
}
// start initializes a panic to start unwinding the stack.
//
// If p.goexit is true, then start may return multiple times.
func ( *_panic) ( uintptr, unsafe.Pointer) {
:= getg()
// Record the caller's PC and SP, so recovery can identify panics
// that have been recovered. Also, so that if p is from Goexit, we
// can restart its defer processing loop if a recovered panic tries
// to jump past it.
.startPC = getcallerpc()
.startSP = unsafe.Pointer(getcallersp())
if .deferreturn {
.sp =
if := (*savedOpenDeferState)(.param); != nil {
// recovery saved some state for us, so that we can resume
// calling open-coded defers without unwinding the stack.
.param = nil
.retpc = .retpc
.deferBitsPtr = (*byte)(add(, .deferBitsOffset))
.slotsPtr = add(, .slotsOffset)
}
return
}
.link = ._panic
._panic = (*_panic)(noescape(unsafe.Pointer()))
// Initialize state machine, and find the first frame with a defer.
//
// Note: We could use startPC and startSP here, but callers will
// never have defer statements themselves. By starting at their
// caller instead, we avoid needing to unwind through an extra
// frame. It also somewhat simplifies the terminating condition for
// deferreturn.
.lr, .fp = ,
.nextFrame()
}
// nextDefer returns the next deferred function to invoke, if any.
//
// Note: The "ok bool" result is necessary to correctly handle when
// the deferred function itself was nil (e.g., "defer (func())(nil)").
func ( *_panic) () (func(), bool) {
:= getg()
if !.deferreturn {
if ._panic != {
throw("bad panic stack")
}
if .recovered {
mcall(recovery) // does not return
throw("recovery failed")
}
}
// The assembler adjusts p.argp in wrapper functions that shouldn't
// be visible to recover(), so we need to restore it each iteration.
.argp = add(.startSP, sys.MinFrameSize)
for {
for .deferBitsPtr != nil {
:= *.deferBitsPtr
// Check whether any open-coded defers are still pending.
//
// Note: We need to check this upfront (rather than after
// clearing the top bit) because it's possible that Goexit
// invokes a deferred call, and there were still more pending
// open-coded defers in the frame; but then the deferred call
// panic and invoked the remaining defers in the frame, before
// recovering and restarting the Goexit loop.
if == 0 {
.deferBitsPtr = nil
break
}
// Find index of top bit set.
:= 7 - uintptr(sys.LeadingZeros8())
// Clear bit and store it back.
&^= 1 <<
*.deferBitsPtr =
return *(*func())(add(.slotsPtr, *goarch.PtrSize)), true
}
:
if := ._defer; != nil && .sp == uintptr(.sp) {
if .rangefunc {
deferconvert()
popDefer()
goto
}
:= .fn
// TODO(mdempsky): Instead of having each deferproc call have
// its own "deferreturn(); return" sequence, we should just make
// them reuse the one we emit for open-coded defers.
.retpc = .pc
// Unlink and free.
popDefer()
return , true
}
if !.nextFrame() {
return nil, false
}
}
}
// nextFrame finds the next frame that contains deferred calls, if any.
func ( *_panic) () ( bool) {
if .lr == 0 {
return false
}
:= getg()
systemstack(func() {
var uintptr
if := ._defer; != nil {
= .sp
}
var unwinder
.initAt(.lr, uintptr(.fp), 0, , 0)
for {
if !.valid() {
.lr = 0
return // ok == false
}
// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
// every frame containing a defer (not just open-coded defers),
// then we can simply loop until we find the next frame where
// it's non-zero.
if .frame.sp == {
break // found a frame with linked defers
}
if .initOpenCodedDefers(.frame.fn, unsafe.Pointer(.frame.varp)) {
break // found a frame with open-coded defers
}
.next()
}
.lr = .frame.lr
.sp = unsafe.Pointer(.frame.sp)
.fp = unsafe.Pointer(.frame.fp)
= true
})
return
}
func ( *_panic) ( funcInfo, unsafe.Pointer) bool {
:= funcdata(, abi.FUNCDATA_OpenCodedDeferInfo)
if == nil {
return false
}
if .deferreturn == 0 {
throw("missing deferreturn")
}
, := readvarintUnsafe()
:= (*uint8)(add(, -uintptr()))
if * == 0 {
return false // has open-coded defers, but none pending
}
, := readvarintUnsafe()
.retpc = .entry() + uintptr(.deferreturn)
.deferBitsPtr =
.slotsPtr = add(, -uintptr())
return true
}
// The implementation of the predeclared function recover.
// Cannot split the stack because it needs to reliably
// find the stack segment of its caller.
//
// TODO(rsc): Once we commit to CopyStackAlways,
// this doesn't need to be nosplit.
//
//go:nosplit
func ( uintptr) any {
// Must be in a function running as part of a deferred call during the panic.
// Must be called from the topmost function of the call
// (the function used in the defer statement).
// p.argp is the argument pointer of that topmost deferred function call.
// Compare against argp reported by caller.
// If they match, the caller is the one who can recover.
:= getg()
:= ._panic
if != nil && !.goexit && !.recovered && == uintptr(.argp) {
.recovered = true
return .arg
}
return nil
}
//go:linkname sync_throw sync.throw
func ( string) {
throw()
}
//go:linkname sync_fatal sync.fatal
func ( string) {
fatal()
}
// throw triggers a fatal error that dumps a stack trace and exits.
//
// throw should be used for runtime-internal fatal errors where Go itself,
// rather than user code, may be at fault for the failure.
//
// NOTE: temporarily marked "go:noinline" pending investigation/fix of
// issue #67274, so as to fix longtest builders.
//
// throw should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/cockroachdb/pebble
// - github.com/dgraph-io/ristretto
// - github.com/outcaste-io/ristretto
// - github.com/pingcap/br
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname throw
//go:nosplit
func ( string) {
// Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
print("fatal error: ")
printindented() // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeRuntime)
}
// fatal triggers a fatal error that dumps a stack trace and exits.
//
// fatal is equivalent to throw, but is used when user code is expected to be
// at fault for the failure, such as racing map writes.
//
// fatal does not include runtime frames, system goroutines, or frame metadata
// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
//
//go:nosplit
func ( string) {
// Everything fatal does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
print("fatal error: ")
printindented() // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeUser)
}
// runningPanicDefers is non-zero while running deferred functions for panic.
// This is used to try hard to get a panic stack trace out when exiting.
var runningPanicDefers atomic.Uint32
// panicking is non-zero when crashing the program for an unrecovered panic.
var panicking atomic.Uint32
// paniclk is held while printing the panic information and stack trace,
// so that two concurrent panics don't overlap their output.
var paniclk mutex
// Unwind the stack after a deferred function calls recover
// after a panic. Then arrange to continue running as though
// the caller of the deferred function returned normally.
//
// However, if unwinding the stack would skip over a Goexit call, we
// return into the Goexit loop instead, so it can continue processing
// defers instead.
func ( *g) {
:= ._panic
, , := .retpc, uintptr(.sp), uintptr(.fp)
, := , .deferBitsPtr != nil && *.deferBitsPtr != 0
// Unwind the panic stack.
for ; != nil && uintptr(.startSP) < ; = .link {
// Don't allow jumping past a pending Goexit.
// Instead, have its _panic.start() call return again.
//
// TODO(mdempsky): In this case, Goexit will resume walking the
// stack where it left off, which means it will need to rewalk
// frames that we've already processed.
//
// There's a similar issue with nested panics, when the inner
// panic supercedes the outer panic. Again, we end up needing to
// walk the same stack frames.
//
// These are probably pretty rare occurrences in practice, and
// they don't seem any worse than the existing logic. But if we
// move the unwinding state into _panic, we could detect when we
// run into where the last panic started, and then just pick up
// where it left off instead.
//
// With how subtle defer handling is, this might not actually be
// worthwhile though.
if .goexit {
, = .startPC, uintptr(.startSP)
= false // goexit is unwinding the stack anyway
break
}
runningPanicDefers.Add(-1)
}
._panic =
if == nil { // must be done with signal
.sig = 0
}
if .param != nil {
throw("unexpected gp.param")
}
if {
// If we're returning to deferreturn and there are more open-coded
// defers for it to call, save enough state for it to be able to
// pick up where p0 left off.
.param = unsafe.Pointer(&savedOpenDeferState{
retpc: .retpc,
// We need to save deferBitsPtr and slotsPtr too, but those are
// stack pointers. To avoid issues around heap objects pointing
// to the stack, save them as offsets from SP.
deferBitsOffset: uintptr(unsafe.Pointer(.deferBitsPtr)) - uintptr(.sp),
slotsOffset: uintptr(.slotsPtr) - uintptr(.sp),
})
}
// TODO(mdempsky): Currently, we rely on frames containing "defer"
// to end with "CALL deferreturn; RET". This allows deferreturn to
// finish running any pending defers in the frame.
//
// But we should be able to tell whether there are still pending
// defers here. If there aren't, we can just jump directly to the
// "RET" instruction. And if there are, we don't need an actual
// "CALL deferreturn" instruction; we can simulate it with something
// like:
//
// if usesLR {
// lr = pc
// } else {
// sp -= sizeof(pc)
// *(*uintptr)(sp) = pc
// }
// pc = funcPC(deferreturn)
//
// So that we effectively tail call into deferreturn, such that it
// then returns to the simple "RET" epilogue. That would save the
// overhead of the "deferreturn" call when there aren't actually any
// pending defers left, and shrink the TEXT size of compiled
// binaries. (Admittedly, both of these are modest savings.)
// Ensure we're recovering within the appropriate stack.
if != 0 && ( < .stack.lo || .stack.hi < ) {
print("recover: ", hex(), " not in [", hex(.stack.lo), ", ", hex(.stack.hi), "]\n")
throw("bad recovery")
}
// Make the deferproc for this d return again,
// this time returning 1. The calling function will
// jump to the standard return epilogue.
.sched.sp =
.sched.pc =
.sched.lr = 0
// Restore the bp on platforms that support frame pointers.
// N.B. It's fine to not set anything for platforms that don't
// support frame pointers, since nothing consumes them.
switch {
case goarch.IsAmd64 != 0:
// on x86, fp actually points one word higher than the top of
// the frame since the return address is saved on the stack by
// the caller
.sched.bp = - 2*goarch.PtrSize
case goarch.IsArm64 != 0:
// on arm64, the architectural bp points one word higher
// than the sp. fp is totally useless to us here, because it
// only gets us to the caller's fp.
.sched.bp = - goarch.PtrSize
}
// The value in ret is delivered IN A REGISTER, even if there is a
// stack ABI.
.sched.ret = 1
gogo(&.sched)
}
// fatalthrow implements an unrecoverable runtime throw. It freezes the
// system, prints stack traces starting from its caller, and terminates the
// process.
//
//go:nosplit
func ( throwType) {
:= getcallerpc()
:= getcallersp()
:= getg()
if .m.throwing == throwTypeNone {
.m.throwing =
}
// Switch to the system stack to avoid any stack growth, which may make
// things worse if the runtime is in a bad state.
systemstack(func() {
if isSecureMode() {
exit(2)
}
startpanic_m()
if dopanic_m(, , ) {
// crash uses a decent amount of nosplit stack and we're already
// low on stack in throw, so crash on the system stack (unlike
// fatalpanic).
crash()
}
exit(2)
})
*(*int)(nil) = 0 // not reached
}
// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
// that if msgs != nil, fatalpanic also prints panic messages and decrements
// runningPanicDefers once main is blocked from exiting.
//
//go:nosplit
func ( *_panic) {
:= getcallerpc()
:= getcallersp()
:= getg()
var bool
// Switch to the system stack to avoid any stack growth, which
// may make things worse if the runtime is in a bad state.
systemstack(func() {
if startpanic_m() && != nil {
// There were panic messages and startpanic_m
// says it's okay to try to print them.
// startpanic_m set panicking, which will
// block main from exiting, so now OK to
// decrement runningPanicDefers.
runningPanicDefers.Add(-1)
printpanics()
}
= dopanic_m(, , )
})
if {
// By crashing outside the above systemstack call, debuggers
// will not be confused when generating a backtrace.
// Function crash is marked nosplit to avoid stack growth.
crash()
}
systemstack(func() {
exit(2)
})
*(*int)(nil) = 0 // not reached
}
// startpanic_m prepares for an unrecoverable panic.
//
// It returns true if panic messages should be printed, or false if
// the runtime is in bad shape and should just print stacks.
//
// It must not have write barriers even though the write barrier
// explicitly ignores writes once dying > 0. Write barriers still
// assume that g.m.p != nil, and this function may not have P
// in some contexts (e.g. a panic in a signal handler for a signal
// sent to an M with no P).
//
//go:nowritebarrierrec
func () bool {
:= getg()
if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n")
}
// Disallow malloc during an unrecoverable panic. A panic
// could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations).
.m.mallocing++
// If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below.
if .m.locks < 0 {
.m.locks = 1
}
switch .m.dying {
case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf.
.m.dying = 1
panicking.Add(1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
schedtrace(true)
}
freezetheworld()
return true
case 1:
// Something failed while panicking.
// Just print a stack trace and exit.
.m.dying = 2
print("panic during panic\n")
return false
case 2:
// This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully.
.m.dying = 3
print("stack trace unavailable\n")
exit(4)
fallthrough
default:
// Can't even print! Just exit.
exit(5)
return false // Need to return something.
}
}
var didothers bool
var deadlock mutex
// gp is the crashing g running on this M, but may be a user G, while getg() is
// always g0.
func ( *g, , uintptr) bool {
if .sig != 0 {
:= signame(.sig)
if != "" {
print("[signal ", )
} else {
print("[signal ", hex(.sig))
}
print(" code=", hex(.sigcode0), " addr=", hex(.sigcode1), " pc=", hex(.sigpc), "]\n")
}
, , := gotraceback()
if > 0 {
if != .m.curg {
= true
}
if != .m.g0 {
print("\n")
goroutineheader()
traceback(, , 0, )
} else if >= 2 || .m.throwing >= throwTypeRuntime {
print("\nruntime stack:\n")
traceback(, , 0, )
}
if !didothers && {
didothers = true
tracebackothers()
}
}
unlock(&paniclk)
if panicking.Add(-1) != 0 {
// Some other m is panicking too.
// Let it print what it needs to print.
// Wait forever without chewing up cpu.
// It will exit when it's done.
lock(&deadlock)
lock(&deadlock)
}
printDebugLog()
return
}
// canpanic returns false if a signal should throw instead of
// panicking.
//
//go:nosplit
func () bool {
:= getg()
:= acquirem()
// Is it okay for gp to panic instead of crashing the program?
// Yes, as long as it is running Go code, not runtime code,
// and not stuck in a system call.
if != .curg {
releasem()
return false
}
// N.B. mp.locks != 1 instead of 0 to account for acquirem.
if .locks != 1 || .mallocing != 0 || .throwing != throwTypeNone || .preemptoff != "" || .dying != 0 {
releasem()
return false
}
:= readgstatus()
if &^_Gscan != _Grunning || .syscallsp != 0 {
releasem()
return false
}
if GOOS == "windows" && .libcallsp != 0 {
releasem()
return false
}
releasem()
return true
}
// shouldPushSigpanic reports whether pc should be used as sigpanic's
// return PC (pushing a frame for the call). Otherwise, it should be
// left alone so that LR is used as sigpanic's return PC, effectively
// replacing the top-most frame with sigpanic. This is used by
// preparePanic.
func ( *g, , uintptr) bool {
if == 0 {
// Probably a call to a nil func. The old LR is more
// useful in the stack trace. Not pushing the frame
// will make the trace look like a call to sigpanic
// instead. (Otherwise the trace will end at sigpanic
// and we won't get to see who faulted.)
return false
}
// If we don't recognize the PC as code, but we do recognize
// the link register as code, then this assumes the panic was
// caused by a call to non-code. In this case, we want to
// ignore this call to make unwinding show the context.
//
// If we running C code, we're not going to recognize pc as a
// Go function, so just assume it's good. Otherwise, traceback
// may try to read a stale LR that looks like a Go code
// pointer and wander into the woods.
if .m.incgo || findfunc().valid() {
// This wasn't a bad call, so use PC as sigpanic's
// return PC.
return true
}
if findfunc().valid() {
// This was a bad call, but the LR is good, so use the
// LR as sigpanic's return PC.
return false
}
// Neither the PC or LR is good. Hopefully pushing a frame
// will work.
return true
}
// isAbortPC reports whether pc is the program counter at which
// runtime.abort raises a signal.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
//go:nosplit
func ( uintptr) bool {
:= findfunc()
if !.valid() {
return false
}
return .funcID == abi.FuncID_abort
}
![]() |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |