const internal/goarch.PtrSize

337 uses

	internal/goarch (current package)
		goarch.go#L33: const PtrSize = 4 << (^uintptr(0) >> 63)
		goarch.go#L49: const Int64Align = PtrSize
		goarch_amd64.go#L12: 	_StackAlign          = PtrSize

	internal/abi
		abi.go#L75: 	if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
		abi.go#L80: 		offset = goarch.PtrSize - argSize

	internal/concurrent
		hashtriemap.go#L52: 	hashShift := 8 * goarch.PtrSize
		hashtriemap.go#L80: 		hashShift = 8 * goarch.PtrSize
		hashtriemap.go#L193: 		hashShift = 8 * goarch.PtrSize
		hashtriemap.go#L256: 		if hashShift == 8*goarch.PtrSize {

	internal/reflectlite
		swapper.go#L40: 		if size == goarch.PtrSize {
		value.go#L103: 	if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {

	reflect
		abi.go#L170: 		ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
		abi.go#L179: 		ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
		abi.go#L183: 		a.stackAssign(goarch.PtrSize, goarch.PtrSize)
		abi.go#L205: 		switch goarch.PtrSize {
		abi.go#L218: 		return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
		abi.go#L220: 		return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
		abi.go#L222: 		return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
		abi.go#L265: 	if ptrMap != 0 && size != goarch.PtrSize {
		abi.go#L416: 			spill += goarch.PtrSize
		abi.go#L433: 	spill = align(spill, goarch.PtrSize)
		abi.go#L438: 	retOffset := align(in.stackBytes, goarch.PtrSize)
		swapper.go#L41: 		if size == goarch.PtrSize {
		type.go#L1850: 		mt.KeySize = uint8(goarch.PtrSize)
		type.go#L1856: 		mt.ValueSize = uint8(goarch.PtrSize)
		type.go#L2125: 	size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
		type.go#L2131: 		nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
		type.go#L2135: 		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
		type.go#L2137: 		base := uintptr(abi.MapBucketCount / goarch.PtrSize)
		type.go#L2142: 		base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
		type.go#L2147: 		base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
		type.go#L2152: 		ptrdata = (word + 1) * goarch.PtrSize
		type.go#L2161: 		Align_:   goarch.PtrSize,
		type.go#L2182: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2183: 	words := typ.Size_ / goarch.PtrSize
		type.go#L2206: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2672: 				n := (ft.Offset - off) / goarch.PtrSize
		type.go#L2845: 	case typ.Kind_&abi.KindGCProg == 0 && array.Size_ <= abi.MaxPtrmaskBytes*8*goarch.PtrSize:
		type.go#L2849: 		n := (array.PtrBytes/goarch.PtrSize + 7) / 8
		type.go#L2851: 		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
		type.go#L2862: 		elemPtrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2863: 		elemWords := typ.Size_ / goarch.PtrSize
		type.go#L2988: 		Align_: goarch.PtrSize,
		type.go#L2993: 		Size_:    align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
		type.go#L2994: 		PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
		type.go#L3029: 	if bv.n%(8*goarch.PtrSize) == 0 {
		type.go#L3033: 		for i := 0; i < goarch.PtrSize; i++ {
		type.go#L3049: 		for bv.n < uint32(offset/goarch.PtrSize) {
		type.go#L3056: 		for bv.n < uint32(offset/goarch.PtrSize) {
		value.go#L112: 	if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
		value.go#L568: 	frameSize = align(frameSize, goarch.PtrSize)
		value.go#L1092: 	methodFrameSize = align(methodFrameSize, goarch.PtrSize)

	runtime
		alg.go#L15: 	c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
		alg.go#L16: 	c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
		alg.go#L460: const hashRandomBytes = goarch.PtrSize / 4 * 64
		arena.go#L231: 	return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
		arena.go#L564: 	nb := typ.PtrBytes / goarch.PtrSize
		arena.go#L617: 	h.low = offset / goarch.PtrSize % ptrBits
		arena.go#L620: 	h.offset = offset - h.low*goarch.PtrSize
		arena.go#L646: 	idx := h.offset / (ptrBits * goarch.PtrSize)
		arena.go#L656: 	h.offset += ptrBits * goarch.PtrSize
		arena.go#L666: 	words := size / goarch.PtrSize
		arena.go#L682: 	zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
		arena.go#L696: 	idx := h.offset / (ptrBits * goarch.PtrSize)
		arena.go#L709: 	h.offset += ptrBits * goarch.PtrSize
		arena.go#L718: 		idx := h.offset / (ptrBits * goarch.PtrSize)
		arena.go#L729: 		h.offset += ptrBits * goarch.PtrSize
		arena.go#L737: 		if goarch.PtrSize == 8 {
		cgocall.go#L625: 		p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
		cgocheck.go#L200: 	skipMask := off / goarch.PtrSize / 8
		cgocheck.go#L201: 	skipBytes := skipMask * goarch.PtrSize * 8
		cgocheck.go#L207: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		cgocheck.go#L208: 		if i&(goarch.PtrSize*8-1) == 0 {
		cgocheck.go#L215: 			off -= goarch.PtrSize
		heapdump.go#L249: 			dumpint(uint64(offset + i*goarch.PtrSize))
		heapdump.go#L299: 		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
		heapdump.go#L308: 		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
		heapdump.go#L315: 		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
		heapdump.go#L322: 		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
		heapdump.go#L515: 	dumpint(goarch.PtrSize)
		heapdump.go#L724: 	nptr := size / goarch.PtrSize
		heapdump.go#L745: 		i := (addr - p) / goarch.PtrSize
		iface.go#L77: 	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
		iface.go#L114: 		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
		iface.go#L147: 		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
		iface.go#L175: 		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
		iface.go#L687: 		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
		malloc.go#L147: 	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
		malloc.go#L248: 	heapArenaWords = heapArenaBytes / goarch.PtrSize
		malloc.go#L256: 	heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
		malloc.go#L442: 	if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
		malloc.go#L463: 	if goarch.PtrSize == 8 {
		malloc.go#L771: 		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L773: 			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L782: 				size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
		malloc.go#L786: 				newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L791: 				*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
		malloc.go#L1106: 			} else if goarch.PtrSize == 4 && size == 12 {
		malloc.go#L1620: 		persistent.off = alignUp(goarch.PtrSize, align)
		map.go#L100: 	noCheck = 1<<(8*goarch.PtrSize) - 1
		map.go#L179: 	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
		map.go#L189: 	top := uint8(hash >> (goarch.PtrSize*8 - 8))
		map.go#L202: 	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
		map.go#L206: 	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
		map.go#L888: 	if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
		map.go#L1414: 	if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
		map.go#L1418: 	if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
		map_fast32.go#L334: 			if goarch.PtrSize == 4 && t.Key.Pointers() {
		map_fast32.go#L460: 				if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
		map_fast64.go#L335: 				if goarch.PtrSize == 8 {
		map_fast64.go#L465: 					if goarch.PtrSize == 8 {
		map_faststr.go#L30: 			for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L39: 					return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L46: 		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L55: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L72: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
		map_faststr.go#L74: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
		map_faststr.go#L95: 		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L101: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L134: 			for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L143: 					return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L150: 		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L159: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L176: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
		map_faststr.go#L178: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
		map_faststr.go#L199: 		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L205: 				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L269: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
		map_faststr.go#L307: 	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
		map_faststr.go#L313: 	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
		map_faststr.go#L348: 		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L358: 			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L434: 		x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
		map_faststr.go#L442: 			y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
		map_faststr.go#L447: 			e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
		map_faststr.go#L448: 			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
		map_faststr.go#L474: 					dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
		map_faststr.go#L487: 				dst.k = add(dst.k, 2*goarch.PtrSize)
		mbarrier.go#L257: 	if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
		mbitmap.go#L101: 	minSizeForMallocHeader = goarch.PtrSize * ptrBits
		mbitmap.go#L254: 	if goarch.PtrSize == 8 {
		mbitmap.go#L262: 	return tp, tp.addr + uintptr(i)*goarch.PtrSize
		mbitmap.go#L285: 		if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
		mbitmap.go#L289: 			tp.addr += ptrBits * goarch.PtrSize
		mbitmap.go#L298: 		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
		mbitmap.go#L299: 		if tp.addr+goarch.PtrSize*ptrBits > limit {
		mbitmap.go#L300: 			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
		mbitmap.go#L322: 		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
		mbitmap.go#L324: 		if tp.addr+goarch.PtrSize*ptrBits > limit {
		mbitmap.go#L325: 			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
		mbitmap.go#L338: 		tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
		mbitmap.go#L340: 		tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
		mbitmap.go#L357: 		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
		mbitmap.go#L358: 		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
		mbitmap.go#L360: 	if tp.addr+goarch.PtrSize*ptrBits > limit {
		mbitmap.go#L361: 		bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
		mbitmap.go#L419: 	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
		mbitmap.go#L505: 	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
		mbitmap.go#L588: 	bitmapSize := spanSize / goarch.PtrSize / 8
		mbitmap.go#L589: 	elems := int(bitmapSize / goarch.PtrSize)
		mbitmap.go#L603: 	bitmapSize := spanSize / goarch.PtrSize / 8
		mbitmap.go#L614: 	i := (addr - span.base()) / goarch.PtrSize / ptrBits
		mbitmap.go#L615: 	j := (addr - span.base()) / goarch.PtrSize % ptrBits
		mbitmap.go#L616: 	bits := span.elemsize / goarch.PtrSize
		mbitmap.go#L617: 	word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
		mbitmap.go#L618: 	word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
		mbitmap.go#L646: 	bits := span.elemsize / goarch.PtrSize
		mbitmap.go#L650: 	case goarch.PtrSize:
		mbitmap.go#L651: 		src = (1 << (dataSize / goarch.PtrSize)) - 1
		mbitmap.go#L654: 			src |= src0 << (i / goarch.PtrSize)
		mbitmap.go#L662: 	o := (x - span.base()) / goarch.PtrSize
		mbitmap.go#L723: 			spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
		mbitmap.go#L725: 			spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
		mbitmap.go#L761: 		off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
		mbitmap.go#L764: 			off -= goarch.PtrSize
		mbitmap.go#L765: 			size += goarch.PtrSize
		mbitmap.go#L768: 		size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
		mbitmap.go#L770: 			size = goarch.PtrSize
		mbitmap.go#L790: 	for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
		mbitmap.go#L796: 				j := off / goarch.PtrSize
		mbitmap.go#L847: 	for i := off; i < off+size; i += goarch.PtrSize {
		mbitmap.go#L853: 				j := off / goarch.PtrSize
		mbitmap.go#L895: 	for i := off; i < off+size; i += goarch.PtrSize {
		mbitmap.go#L901: 				j := off / goarch.PtrSize
		mbitmap.go#L1319: const ptrBits = 8 * goarch.PtrSize
		mbitmap.go#L1330: 	word := maskOffset / goarch.PtrSize
		mbitmap.go#L1335: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		mbitmap.go#L1340: 				i += 7 * goarch.PtrSize
		mbitmap.go#L1396: 	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
		mbitmap.go#L1397: 		if i&(goarch.PtrSize*8-1) == 0 {
		mbitmap.go#L1438: 		if goarch.PtrSize == 8 {
		mbitmap.go#L1455: 	n := (size/goarch.PtrSize + 7) / 8
		mbitmap.go#L1556: 		const maxBits = goarch.PtrSize*8 - 7
		mbitmap.go#L1599: 					for nb <= goarch.PtrSize*8 {
		mbitmap.go#L1681: 	bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
		mbitmap.go#L1767: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1768: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1769: 				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
		mbitmap.go#L1770: 				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
		mbitmap.go#L1779: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1780: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1781: 				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
		mbitmap.go#L1782: 				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
		mbitmap.go#L1802: 		maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
		mbitmap.go#L1808: 			maskFromHeap[(addr-base)/goarch.PtrSize] = 1
		mbitmap.go#L1828: 			maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
		mbitmap.go#L1835: 				maskFromType[(addr-base)/goarch.PtrSize] = 1
		mbitmap.go#L1897: 			size := uintptr(locals.n) * goarch.PtrSize
		mbitmap.go#L1899: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1900: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1901: 				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
		mbitmap.go#L1902: 				mask[i/goarch.PtrSize] = locals.ptrbit(off)
		mcheckmark.go#L28: 	b [heapArenaBytes / goarch.PtrSize / 8]uint8
		mfinal.go#L29: 	fin     [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
		mfinal.go#L47: var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
		mfinal.go#L114: 				if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
		mfinal.go#L116: 					unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
		mfinal.go#L117: 					unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
		mfinal.go#L118: 					unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
		mfinal.go#L119: 					unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
		mfinal.go#L506: 	nret = alignUp(nret, goarch.PtrSize)
		mgcmark.go#L263: 	if rootBlockBytes%(8*goarch.PtrSize) != 0 {
		mgcmark.go#L276: 	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
		mgcmark.go#L399: 					scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mgcmark.go#L403: 					scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mgcmark.go#L882: 		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L899: 			scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L904: 			scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L910: 			scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L1056: 		size := uintptr(locals.n) * goarch.PtrSize
		mgcmark.go#L1062: 		scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
		mgcmark.go#L1354: 		bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
		mgcmark.go#L1356: 			i += goarch.PtrSize * 8
		mgcmark.go#L1372: 			i += goarch.PtrSize
		mgcmark.go#L1446: 		scanSize = addr - b + goarch.PtrSize
		mgcmark.go#L1487: 				word := (p - b) / goarch.PtrSize
		mgcmark.go#L1512: 	for i := uintptr(0); i < n; i += goarch.PtrSize {
		mgcmark.go#L1514: 			word := i / goarch.PtrSize
		mgcmark.go#L1523: 				if i%(goarch.PtrSize*8) != 0 {
		mgcmark.go#L1526: 				i += goarch.PtrSize*8 - goarch.PtrSize
		mgcmark.go#L1589: 	if obj&(goarch.PtrSize-1) != 0 {
		mgcmark.go#L1661: 		size = off + goarch.PtrSize
		mgcmark.go#L1663: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		mgcmark.go#L1667: 		if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
		mgcstack.go#L110: 	obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
		mgcwork.go#L328: 	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
		mheap.go#L524: 		n := 64 * 1024 / goarch.PtrSize
		mheap.go#L530: 		sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
		mheap.go#L1407: 				s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
		mheap.go#L1987: 			scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mheap.go#L2169: 			scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mprof.go#L592: 		pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
		mranges.go#L258: 	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
		mranges.go#L385: 			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
		mranges.go#L455: 		ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
		mspanset.go#L104: 			newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
		mspanset.go#L108: 				memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
		mspanset.go#L299: 	return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
		mwbbuf.go#L132: 	if b.next+goarch.PtrSize > b.end {
		mwbbuf.go#L136: 	b.next += goarch.PtrSize
		mwbbuf.go#L143: 	if b.next+2*goarch.PtrSize > b.end {
		mwbbuf.go#L147: 	b.next += 2 * goarch.PtrSize
		os_linux.go#L245: 	auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
		panic.go#L904: 			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
		panic.go#L1217: 		gp.sched.bp = fp - 2*goarch.PtrSize
		panic.go#L1222: 		gp.sched.bp = sp - goarch.PtrSize
		preempt.go#L323: 	asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
		print.go#L273: 	for i := uintptr(0); p+i < end; i += goarch.PtrSize {
		proc.go#L157: 	if goarch.PtrSize == 8 {
		proc.go#L687: 	return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
		proc.go#L2393: 	gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
		proc.go#L5014: 	totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
		proc.go#L5024: 		*(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
		proc.go#L7289: 			p := add(firstFunc, uintptr(i)*goarch.PtrSize)
		runtime1.go#L63: 	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
		runtime1.go#L198: 	if unsafe.Sizeof(k) != goarch.PtrSize {
		runtime1.go#L201: 	if unsafe.Sizeof(l) != goarch.PtrSize {
		runtime2.go#L542: 	tlsSize  = tlsSlots * goarch.PtrSize
		signal_amd64.go#L83: 	sp -= goarch.PtrSize
		signal_linux_amd64.go#L55: 	*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
		slice.go#L216: 	case et.Size_ == goarch.PtrSize:
		slice.go#L217: 		lenmem = uintptr(oldLen) * goarch.PtrSize
		slice.go#L218: 		newlenmem = uintptr(newLen) * goarch.PtrSize
		slice.go#L219: 		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
		slice.go#L220: 		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
		slice.go#L221: 		newcap = int(capmem / goarch.PtrSize)
		slice.go#L224: 		if goarch.PtrSize == 8 {
		stack.go#L122: 	uintptrMask = 1<<(8*goarch.PtrSize) - 1
		stack.go#L625: 				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
		stack.go#L632: 			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
		stack.go#L671: 	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
		stack.go#L696: 		size := uintptr(locals.n) * goarch.PtrSize
		stack.go#L733: 			for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
		stack.go#L734: 				if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
		stack.go#L764: 		if oldfp == gp.sched.sp-goarch.PtrSize {
		stack.go#L765: 			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
		stack.go#L1051: 		sp -= goarch.PtrSize
		stkframe.go#L79: 	return uintptr(argMap.n) * goarch.PtrSize
		stkframe.go#L97: 		argMap.n = f.args / goarch.PtrSize
		stkframe.go#L112: 			minSP -= goarch.PtrSize
		stkframe.go#L137: 		retValid := *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
		stkframe.go#L146: 			n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
		stkframe.go#L221: 			print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n")
		stkframe.go#L248: 			p = add(p, goarch.PtrSize)
		symtab.go#L599: 		hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
		symtab.go#L954: 	return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
		symtab.go#L1154: 	if debugPcln && x&(goarch.PtrSize-1) != 0 {
		sys_x86.go#L18: 	sp -= goarch.PtrSize
		traceback.go#L182: 			frame.sp += goarch.PtrSize
		traceback.go#L329: 			frame.fp += goarch.PtrSize
		traceback.go#L377: 				lrPtr = frame.fp - goarch.PtrSize
		traceback.go#L386: 		frame.varp -= goarch.PtrSize
		traceback.go#L407: 		frame.varp -= goarch.PtrSize
		traceback.go#L1286: 	const expand = 32 * goarch.PtrSize
		traceback.go#L1287: 	const maxExpand = 256 * goarch.PtrSize
		tracemap.go#L110: 		m = &n.children[hashIter>>(8*goarch.PtrSize-2)]
		tracestack.go#L258: 		pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
		tracetype.go#L30: 	id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize)

	runtime/internal/math
		math.go#L14: 	if a|b < 1<<(4*goarch.PtrSize) || a == 0 {

	runtime/internal/sys
		consts.go#L25: const Int64Align = goarch.PtrSize