131
|
1 // Copyright 2018 The Go Authors. All rights reserved.
|
|
2 // Use of this source code is governed by a BSD-style
|
|
3 // license that can be found in the LICENSE file.
|
|
4
|
|
5 package runtime
|
|
6
|
|
7 import (
|
|
8 "runtime/internal/sys"
|
|
9 "unsafe"
|
|
10 )
|
|
11
|
145
|
12 // For gccgo, use go:linkname to export compiler-called functions.
|
|
13 //
|
|
14 //go:linkname mapaccess1_fast64
|
|
15 //go:linkname mapaccess2_fast64
|
|
16 //go:linkname mapassign_fast64
|
|
17 //go:linkname mapassign_fast64ptr
|
|
18 //go:linkname mapdelete_fast64
|
|
19
|
131
|
20 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
21 if raceenabled && h != nil {
|
|
22 callerpc := getcallerpc()
|
|
23 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
|
|
24 }
|
|
25 if h == nil || h.count == 0 {
|
|
26 return unsafe.Pointer(&zeroVal[0])
|
|
27 }
|
|
28 if h.flags&hashWriting != 0 {
|
|
29 throw("concurrent map read and map write")
|
|
30 }
|
|
31 var b *bmap
|
|
32 if h.B == 0 {
|
|
33 // One-bucket table. No need to hash.
|
|
34 b = (*bmap)(h.buckets)
|
|
35 } else {
|
145
|
36 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
131
|
37 m := bucketMask(h.B)
|
|
38 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
39 if c := h.oldbuckets; c != nil {
|
|
40 if !h.sameSizeGrow() {
|
|
41 // There used to be half as many buckets; mask down one more power of two.
|
|
42 m >>= 1
|
|
43 }
|
|
44 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
45 if !evacuated(oldb) {
|
|
46 b = oldb
|
|
47 }
|
|
48 }
|
|
49 }
|
|
50 for ; b != nil; b = b.overflow(t) {
|
|
51 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
145
|
52 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
|
53 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
|
131
|
54 }
|
|
55 }
|
|
56 }
|
|
57 return unsafe.Pointer(&zeroVal[0])
|
|
58 }
|
|
59
|
|
60 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|
61 if raceenabled && h != nil {
|
|
62 callerpc := getcallerpc()
|
|
63 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
|
|
64 }
|
|
65 if h == nil || h.count == 0 {
|
|
66 return unsafe.Pointer(&zeroVal[0]), false
|
|
67 }
|
|
68 if h.flags&hashWriting != 0 {
|
|
69 throw("concurrent map read and map write")
|
|
70 }
|
|
71 var b *bmap
|
|
72 if h.B == 0 {
|
|
73 // One-bucket table. No need to hash.
|
|
74 b = (*bmap)(h.buckets)
|
|
75 } else {
|
145
|
76 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
131
|
77 m := bucketMask(h.B)
|
|
78 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
79 if c := h.oldbuckets; c != nil {
|
|
80 if !h.sameSizeGrow() {
|
|
81 // There used to be half as many buckets; mask down one more power of two.
|
|
82 m >>= 1
|
|
83 }
|
|
84 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
85 if !evacuated(oldb) {
|
|
86 b = oldb
|
|
87 }
|
|
88 }
|
|
89 }
|
|
90 for ; b != nil; b = b.overflow(t) {
|
|
91 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
145
|
92 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
|
93 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
|
131
|
94 }
|
|
95 }
|
|
96 }
|
|
97 return unsafe.Pointer(&zeroVal[0]), false
|
|
98 }
|
|
99
|
|
100 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
101 if h == nil {
|
|
102 panic(plainError("assignment to entry in nil map"))
|
|
103 }
|
|
104 if raceenabled {
|
|
105 callerpc := getcallerpc()
|
|
106 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
|
|
107 }
|
|
108 if h.flags&hashWriting != 0 {
|
|
109 throw("concurrent map writes")
|
|
110 }
|
145
|
111 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
131
|
112
|
145
|
113 // Set hashWriting after calling t.hasher for consistency with mapassign.
|
|
114 h.flags ^= hashWriting
|
131
|
115
|
|
116 if h.buckets == nil {
|
|
117 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
118 }
|
|
119
|
|
120 again:
|
|
121 bucket := hash & bucketMask(h.B)
|
|
122 if h.growing() {
|
|
123 growWork_fast64(t, h, bucket)
|
|
124 }
|
|
125 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
126
|
|
127 var insertb *bmap
|
|
128 var inserti uintptr
|
|
129 var insertk unsafe.Pointer
|
|
130
|
145
|
131 bucketloop:
|
131
|
132 for {
|
|
133 for i := uintptr(0); i < bucketCnt; i++ {
|
145
|
134 if isEmpty(b.tophash[i]) {
|
131
|
135 if insertb == nil {
|
|
136 insertb = b
|
|
137 inserti = i
|
|
138 }
|
145
|
139 if b.tophash[i] == emptyRest {
|
|
140 break bucketloop
|
|
141 }
|
131
|
142 continue
|
|
143 }
|
|
144 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
145 if k != key {
|
|
146 continue
|
|
147 }
|
|
148 insertb = b
|
|
149 inserti = i
|
|
150 goto done
|
|
151 }
|
|
152 ovf := b.overflow(t)
|
|
153 if ovf == nil {
|
|
154 break
|
|
155 }
|
|
156 b = ovf
|
|
157 }
|
|
158
|
|
159 // Did not find mapping for key. Allocate new cell & add entry.
|
|
160
|
|
161 // If we hit the max load factor or we have too many overflow buckets,
|
|
162 // and we're not already in the middle of growing, start growing.
|
|
163 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
164 hashGrow(t, h)
|
|
165 goto again // Growing the table invalidates everything, so try again
|
|
166 }
|
|
167
|
|
168 if insertb == nil {
|
|
169 // all current buckets are full, allocate a new one.
|
|
170 insertb = h.newoverflow(t, b)
|
|
171 inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
172 }
|
|
173 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
174
|
|
175 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
|
176 // store new key at insert position
|
|
177 *(*uint64)(insertk) = key
|
|
178
|
|
179 h.count++
|
|
180
|
|
181 done:
|
145
|
182 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
|
131
|
183 if h.flags&hashWriting == 0 {
|
|
184 throw("concurrent map writes")
|
|
185 }
|
|
186 h.flags &^= hashWriting
|
145
|
187 return elem
|
131
|
188 }
|
|
189
|
|
190 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|
191 if h == nil {
|
|
192 panic(plainError("assignment to entry in nil map"))
|
|
193 }
|
|
194 if raceenabled {
|
|
195 callerpc := getcallerpc()
|
|
196 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
|
|
197 }
|
|
198 if h.flags&hashWriting != 0 {
|
|
199 throw("concurrent map writes")
|
|
200 }
|
145
|
201 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
131
|
202
|
145
|
203 // Set hashWriting after calling t.hasher for consistency with mapassign.
|
|
204 h.flags ^= hashWriting
|
131
|
205
|
|
206 if h.buckets == nil {
|
|
207 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
208 }
|
|
209
|
|
210 again:
|
|
211 bucket := hash & bucketMask(h.B)
|
|
212 if h.growing() {
|
|
213 growWork_fast64(t, h, bucket)
|
|
214 }
|
|
215 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
216
|
|
217 var insertb *bmap
|
|
218 var inserti uintptr
|
|
219 var insertk unsafe.Pointer
|
|
220
|
145
|
221 bucketloop:
|
131
|
222 for {
|
|
223 for i := uintptr(0); i < bucketCnt; i++ {
|
145
|
224 if isEmpty(b.tophash[i]) {
|
131
|
225 if insertb == nil {
|
|
226 insertb = b
|
|
227 inserti = i
|
|
228 }
|
145
|
229 if b.tophash[i] == emptyRest {
|
|
230 break bucketloop
|
|
231 }
|
131
|
232 continue
|
|
233 }
|
|
234 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
235 if k != key {
|
|
236 continue
|
|
237 }
|
|
238 insertb = b
|
|
239 inserti = i
|
|
240 goto done
|
|
241 }
|
|
242 ovf := b.overflow(t)
|
|
243 if ovf == nil {
|
|
244 break
|
|
245 }
|
|
246 b = ovf
|
|
247 }
|
|
248
|
|
249 // Did not find mapping for key. Allocate new cell & add entry.
|
|
250
|
|
251 // If we hit the max load factor or we have too many overflow buckets,
|
|
252 // and we're not already in the middle of growing, start growing.
|
|
253 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
254 hashGrow(t, h)
|
|
255 goto again // Growing the table invalidates everything, so try again
|
|
256 }
|
|
257
|
|
258 if insertb == nil {
|
|
259 // all current buckets are full, allocate a new one.
|
|
260 insertb = h.newoverflow(t, b)
|
|
261 inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
262 }
|
|
263 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
|
264
|
|
265 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
|
266 // store new key at insert position
|
|
267 *(*unsafe.Pointer)(insertk) = key
|
|
268
|
|
269 h.count++
|
|
270
|
|
271 done:
|
145
|
272 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
|
131
|
273 if h.flags&hashWriting == 0 {
|
|
274 throw("concurrent map writes")
|
|
275 }
|
|
276 h.flags &^= hashWriting
|
145
|
277 return elem
|
131
|
278 }
|
|
279
|
|
280 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
|
281 if raceenabled && h != nil {
|
|
282 callerpc := getcallerpc()
|
|
283 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
|
|
284 }
|
|
285 if h == nil || h.count == 0 {
|
|
286 return
|
|
287 }
|
|
288 if h.flags&hashWriting != 0 {
|
|
289 throw("concurrent map writes")
|
|
290 }
|
|
291
|
145
|
292 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
131
|
293
|
145
|
294 // Set hashWriting after calling t.hasher for consistency with mapdelete
|
|
295 h.flags ^= hashWriting
|
131
|
296
|
|
297 bucket := hash & bucketMask(h.B)
|
|
298 if h.growing() {
|
|
299 growWork_fast64(t, h, bucket)
|
|
300 }
|
|
301 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
145
|
302 bOrig := b
|
131
|
303 search:
|
|
304 for ; b != nil; b = b.overflow(t) {
|
|
305 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
|
145
|
306 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
|
131
|
307 continue
|
|
308 }
|
|
309 // Only clear key if there are pointers in it.
|
145
|
310 if t.key.ptrdata != 0 {
|
131
|
311 memclrHasPointers(k, t.key.size)
|
|
312 }
|
145
|
313 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
|
|
314 if t.elem.ptrdata != 0 {
|
|
315 memclrHasPointers(e, t.elem.size)
|
|
316 } else {
|
|
317 memclrNoHeapPointers(e, t.elem.size)
|
|
318 }
|
|
319 b.tophash[i] = emptyOne
|
|
320 // If the bucket now ends in a bunch of emptyOne states,
|
|
321 // change those to emptyRest states.
|
|
322 if i == bucketCnt-1 {
|
|
323 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
|
324 goto notLast
|
|
325 }
|
131
|
326 } else {
|
145
|
327 if b.tophash[i+1] != emptyRest {
|
|
328 goto notLast
|
|
329 }
|
131
|
330 }
|
145
|
331 for {
|
|
332 b.tophash[i] = emptyRest
|
|
333 if i == 0 {
|
|
334 if b == bOrig {
|
|
335 break // beginning of initial bucket, we're done.
|
|
336 }
|
|
337 // Find previous bucket, continue at its last entry.
|
|
338 c := b
|
|
339 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
|
340 }
|
|
341 i = bucketCnt - 1
|
|
342 } else {
|
|
343 i--
|
|
344 }
|
|
345 if b.tophash[i] != emptyOne {
|
|
346 break
|
|
347 }
|
|
348 }
|
|
349 notLast:
|
131
|
350 h.count--
|
|
351 break search
|
|
352 }
|
|
353 }
|
|
354
|
|
355 if h.flags&hashWriting == 0 {
|
|
356 throw("concurrent map writes")
|
|
357 }
|
|
358 h.flags &^= hashWriting
|
|
359 }
|
|
360
|
|
361 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
|
|
362 // make sure we evacuate the oldbucket corresponding
|
|
363 // to the bucket we're about to use
|
|
364 evacuate_fast64(t, h, bucket&h.oldbucketmask())
|
|
365
|
|
366 // evacuate one more oldbucket to make progress on growing
|
|
367 if h.growing() {
|
|
368 evacuate_fast64(t, h, h.nevacuate)
|
|
369 }
|
|
370 }
|
|
371
|
|
372 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|
373 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
|
|
374 newbit := h.noldbuckets()
|
|
375 if !evacuated(b) {
|
|
376 // TODO: reuse overflow buckets instead of using new ones, if there
|
|
377 // is no iterator using the old buckets. (If !oldIterator.)
|
|
378
|
|
379 // xy contains the x and y (low and high) evacuation destinations.
|
|
380 var xy [2]evacDst
|
|
381 x := &xy[0]
|
|
382 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
|
|
383 x.k = add(unsafe.Pointer(x.b), dataOffset)
|
145
|
384 x.e = add(x.k, bucketCnt*8)
|
131
|
385
|
|
386 if !h.sameSizeGrow() {
|
|
387 // Only calculate y pointers if we're growing bigger.
|
|
388 // Otherwise GC can see bad pointers.
|
|
389 y := &xy[1]
|
|
390 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
|
|
391 y.k = add(unsafe.Pointer(y.b), dataOffset)
|
145
|
392 y.e = add(y.k, bucketCnt*8)
|
131
|
393 }
|
|
394
|
|
395 for ; b != nil; b = b.overflow(t) {
|
|
396 k := add(unsafe.Pointer(b), dataOffset)
|
145
|
397 e := add(k, bucketCnt*8)
|
|
398 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
|
131
|
399 top := b.tophash[i]
|
145
|
400 if isEmpty(top) {
|
131
|
401 b.tophash[i] = evacuatedEmpty
|
|
402 continue
|
|
403 }
|
|
404 if top < minTopHash {
|
|
405 throw("bad map state")
|
|
406 }
|
|
407 var useY uint8
|
|
408 if !h.sameSizeGrow() {
|
|
409 // Compute hash to make our evacuation decision (whether we need
|
145
|
410 // to send this key/elem to bucket x or bucket y).
|
|
411 hash := t.hasher(k, uintptr(h.hash0))
|
131
|
412 if hash&newbit != 0 {
|
|
413 useY = 1
|
|
414 }
|
|
415 }
|
|
416
|
|
417 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
|
418 dst := &xy[useY] // evacuation destination
|
|
419
|
|
420 if dst.i == bucketCnt {
|
|
421 dst.b = h.newoverflow(t, dst.b)
|
|
422 dst.i = 0
|
|
423 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
145
|
424 dst.e = add(dst.k, bucketCnt*8)
|
131
|
425 }
|
|
426 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
|
427
|
|
428 // Copy key.
|
145
|
429 if t.key.ptrdata != 0 && writeBarrier.enabled {
|
131
|
430 if sys.PtrSize == 8 {
|
|
431 // Write with a write barrier.
|
|
432 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
|
|
433 } else {
|
|
434 // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
|
|
435 // Give up and call typedmemmove.
|
|
436 typedmemmove(t.key, dst.k, k)
|
|
437 }
|
|
438 } else {
|
|
439 *(*uint64)(dst.k) = *(*uint64)(k)
|
|
440 }
|
|
441
|
145
|
442 typedmemmove(t.elem, dst.e, e)
|
131
|
443 dst.i++
|
|
444 // These updates might push these pointers past the end of the
|
145
|
445 // key or elem arrays. That's ok, as we have the overflow pointer
|
131
|
446 // at the end of the bucket to protect against pointing past the
|
|
447 // end of the bucket.
|
|
448 dst.k = add(dst.k, 8)
|
145
|
449 dst.e = add(dst.e, uintptr(t.elemsize))
|
131
|
450 }
|
|
451 }
|
145
|
452 // Unlink the overflow buckets & clear key/elem to help GC.
|
|
453 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
|
131
|
454 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
|
|
455 // Preserve b.tophash because the evacuation
|
|
456 // state is maintained there.
|
|
457 ptr := add(b, dataOffset)
|
|
458 n := uintptr(t.bucketsize) - dataOffset
|
|
459 memclrHasPointers(ptr, n)
|
|
460 }
|
|
461 }
|
|
462
|
|
463 if oldbucket == h.nevacuate {
|
|
464 advanceEvacuationMark(h, t, newbit)
|
|
465 }
|
|
466 }
|