func unsafe.Sizeof
177 uses
unsafe (current package)
unsafe.go#L198: func Sizeof(x ArbitraryType) uintptr
internal/abi
type.go#L544: uadd := unsafe.Sizeof(*t)
type.go#L546: uadd += unsafe.Sizeof(UncommonType{})
type.go#L558: uadd := unsafe.Sizeof(*t)
type.go#L560: uadd += unsafe.Sizeof(UncommonType{})
internal/poll
splice_linux.go#L185: _ [24 - unsafe.Sizeof(splicePipeFields{})%24]byte
net
cgo_unix.go#L318: resStateSize := unsafe.Sizeof(_C_struct___res_state{})
os
dirent_linux.go#L13: return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino))
dirent_linux.go#L17: return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen))
reflect
type.go#L2569: ut.Moff = uint32(unsafe.Sizeof(uncommonType{}))
runtime
alg.go#L38: size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
arena.go#L231: return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
arena.go#L1117: *(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
cgo_sigaction.go#L29: msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
cgo_sigaction.go#L32: asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
cgo_sigaction.go#L83: msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
cgo_sigaction.go#L86: asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
chan.go#L29: hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
covercounter.go#L15: u32sz := unsafe.Sizeof(uint32(0))
debuglog.go#L79: l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
debuglog.go#L728: state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
defs_linux_amd64.go#L136: _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte
defs_linux_amd64.go#L161: _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte
heapdump.go#L680: memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
iface.go#L77: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
iface.go#L423: x = mallocgc(unsafe.Sizeof(val), stringType, true)
iface.go#L443: x = mallocgc(unsafe.Sizeof(val), sliceType, true)
iface.go#L525: newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
iface.go#L631: newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
malloc.go#L559: const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
malloc.go#L755: l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
malloc.go#L760: sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
malloc.go#L762: sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
malloc.go#L771: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L773: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L899: sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
map.go#L888: if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
mbitmap.go#L723: spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
mcheckmark.go#L48: bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
mfinal.go#L29: fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L114: if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
mfinal.go#L214: framesz := unsafe.Sizeof((any)(nil)) + f.nret
mfixalloc.go#L60: size = max(size, unsafe.Sizeof(mlink{}))
mgc.go#L178: if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
mgcmark.go#L181: scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
mgcstack.go#L128: obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
mgcstack.go#L138: if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcstack.go#L141: if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcwork.go#L328: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
mgcwork.go#L443: memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
mheap.go#L201: pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
mheap.go#L542: sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
mheap.go#L743: h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
mheap.go#L744: h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
mheap.go#L745: h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
mheap.go#L746: h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
mheap.go#L747: h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
mheap.go#L748: h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
mheap.go#L749: h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys)
mheap.go#L750: h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
mheap.go#L2355: const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
minmax.go#L55: switch unsafe.Sizeof(x) {
minmax.go#L65: switch unsafe.Sizeof(x) {
mpagealloc.go#L403: const l2Size = unsafe.Sizeof(*p.chunks[0])
mpagealloc.go#L472: sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0]))
mpagealloc.go#L973: pallocSumBytes = unsafe.Sizeof(pallocSum(0))
mpagealloc_64bit.go#L196: scSize := unsafe.Sizeof(atomicScavChunkData{})
mpagealloc_64bit.go#L250: nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
mpagecache.go#L12: const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
mprof.go#L229: size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
mprof.go#L234: size += unsafe.Sizeof(memRecord{})
mprof.go#L236: size += unsafe.Sizeof(blockRecord{})
mprof.go#L248: stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
mprof.go#L261: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L270: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L282: bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
mprof.go#L1077: racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
mprof.go#L1080: msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
mprof.go#L1083: asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
mprof.go#L1191: racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
mprof.go#L1194: msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
mprof.go#L1197: asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
mranges.go#L258: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
mranges.go#L385: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
mranges.go#L455: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
mspanset.go#L316: return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
mstats.go#L341: if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
mwbbuf.go#L87: b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
mwbbuf.go#L90: if (b.end-b.next)%unsafe.Sizeof(b.buf[0]) != 0 {
mwbbuf.go#L198: n := (pp.wbBuf.next - start) / unsafe.Sizeof(pp.wbBuf.buf[0])
netpoll.go#L691: const pdSize = unsafe.Sizeof(pollDesc{})
netpoll_epoll.go#L74: oneSize := int32(unsafe.Sizeof(one))
netpoll_epoll.go#L149: read(int32(netpollEventFd), noescape(unsafe.Pointer(&one)), int32(unsafe.Sizeof(one)))
os_linux.go#L107: r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
os_linux.go#L279: n = read(fd, noescape(unsafe.Pointer(&auxvreadbuf[0])), int32(unsafe.Sizeof(auxvreadbuf)))
os_linux.go#L434: rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
os_linux.go#L520: if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
panic.go#L696: fd = add(fd, unsafe.Sizeof(b))
pinner.go#L83: pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
print.go#L272: minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
proc.go#L2794: msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
proc.go#L2797: asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
proc.go#L5027: memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
runtime1.go#L168: if unsafe.Sizeof(a) != 1 {
runtime1.go#L171: if unsafe.Sizeof(b) != 1 {
runtime1.go#L174: if unsafe.Sizeof(c) != 2 {
runtime1.go#L177: if unsafe.Sizeof(d) != 2 {
runtime1.go#L180: if unsafe.Sizeof(e) != 4 {
runtime1.go#L183: if unsafe.Sizeof(f) != 4 {
runtime1.go#L186: if unsafe.Sizeof(g) != 8 {
runtime1.go#L189: if unsafe.Sizeof(h) != 8 {
runtime1.go#L192: if unsafe.Sizeof(i) != 4 {
runtime1.go#L195: if unsafe.Sizeof(j) != 8 {
runtime1.go#L198: if unsafe.Sizeof(k) != goarch.PtrSize {
runtime1.go#L201: if unsafe.Sizeof(l) != goarch.PtrSize {
runtime1.go#L204: if unsafe.Sizeof(x1) != 1 {
runtime1.go#L210: if unsafe.Sizeof(y1) != 2 {
sema.go#L53: pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
sema.go#L684: if sz != unsafe.Sizeof(notifyList{}) {
sema.go#L685: print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
stack.go#L152: _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
stkframe.go#L238: unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect {
string.go#L215: uintptr(len(a))*unsafe.Sizeof(a[0]),
string.go#L220: msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
string.go#L223: asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
symtab.go#L889: ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
symtab.go#L1179: return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
symtab.go#L1223: p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4
trace.go#L672: sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
traceback.go#L1635: msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
traceback.go#L1638: asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
traceback.go#L1660: msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
traceback.go#L1663: asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
tracebuf.go#L95: w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
tracebuf.go#L178: arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
tracemap.go#L125: meta := (*traceMapNode)(unsafe.Pointer(tab.mem.alloc(unsafe.Sizeof(traceMapNode{}))))
traceregion.go#L40: const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
traceregion.go#L80: block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
traceregion.go#L105: sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
traceregion.go#L108: sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
tracestack.go#L142: id, _ := t.tab.put(noescape(unsafe.Pointer(&pcs[0])), uintptr(len(pcs))*unsafe.Sizeof(uintptr(0)))
tracestack.go#L160: stack := unsafe.Slice((*uintptr)(unsafe.Pointer(&node.data[0])), uintptr(len(node.data))/unsafe.Sizeof(uintptr(0)))
vdso_linux.go#L53: vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{})
vdso_linux.go#L54: vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{})
vdso_linux.go#L61: vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32
vdso_linux.go#L113: pt := (*elfPhdr)(add(pt, uintptr(i)*unsafe.Sizeof(elfPhdr{})))
slices
slices.go#L442: elemSize := unsafe.Sizeof(a[0])
sync
pool.go#L77: pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
pool.go#L301: lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
runtime.go#L53: runtime_notifyListCheck(unsafe.Sizeof(n))
syscall
exec_linux.go#L165: RawSyscall(SYS_WRITE, uintptr(mapPipe[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
exec_linux.go#L330: pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0)
exec_linux.go#L366: pid, _, err1 = RawSyscall(SYS_READ, uintptr(mapPipe[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
exec_linux.go#L370: if pid != unsafe.Sizeof(err2) {
exec_linux.go#L658: RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
exec_unix.go#L220: n, err = readlen(p[0], (*byte)(unsafe.Pointer(&err1)), int(unsafe.Sizeof(err1)))
exec_unix.go#L227: if n == int(unsafe.Sizeof(err1)) {
lsf_linux.go#L79: return setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, unsafe.Pointer(&p), unsafe.Sizeof(p))
lsf_linux.go#L85: return setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, unsafe.Pointer(&dummy), unsafe.Sizeof(dummy))
sockcmsg_linux.go#L34: if uintptr(len(m.Data)) < unsafe.Sizeof(Ucred{}) {
syscall_linux.go#L761: return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
syscall_linux.go#L959: iov.SetLen(int(unsafe.Sizeof(*regsout)))
syscall_linux.go#L966: iov.SetLen(int(unsafe.Sizeof(*regs)))
syscall_linux.go#L1006: return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
syscall_linux.go#L1010: return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
syscall_unix.go#L496: return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
 |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |