Commit 6a0c8e77 by Ian Lance Taylor

compiler, runtime: panic on uncomparable map key, even if map is empty

    
    This ports https://golang.org/cl/155918 from the master repo.
    
        runtime: panic on uncomparable map key, even if map is empty
    
        Reorg map flags a bit so we don't need any extra space for the extra flag.
    
    This is a pre-req for updating libgo to the Go 1.12beta2 release.
    
    Reviewed-on: https://go-review.googlesource.com/c/157858

From-SVN: r267950
parent a81a6d58
0d64279c01a37b2579c0c62ca4f2c3e3f81de07c 87005025fcd0d7e7908b3aae7062b52cb80eb0f3
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.
...@@ -5839,6 +5839,25 @@ Struct_type::do_needs_key_update() ...@@ -5839,6 +5839,25 @@ Struct_type::do_needs_key_update()
return false; return false;
} }
// Return whether computing the hash value of an instance of this
// struct type might panic.
bool
Struct_type::do_hash_might_panic()
{
const Struct_field_list* fields = this->fields_;
if (fields == NULL)
return false;
for (Struct_field_list::const_iterator pf = fields->begin();
pf != fields->end();
++pf)
{
if (pf->type()->hash_might_panic())
return true;
}
return false;
}
// Return whether this struct type is permitted to be in the heap. // Return whether this struct type is permitted to be in the heap.
bool bool
...@@ -7979,21 +7998,18 @@ Map_type::make_map_type_descriptor_type() ...@@ -7979,21 +7998,18 @@ Map_type::make_map_type_descriptor_type()
Type* ptdt = Type::make_type_descriptor_ptr_type(); Type* ptdt = Type::make_type_descriptor_ptr_type();
Type* uint8_type = Type::lookup_integer_type("uint8"); Type* uint8_type = Type::lookup_integer_type("uint8");
Type* uint16_type = Type::lookup_integer_type("uint16"); Type* uint16_type = Type::lookup_integer_type("uint16");
Type* bool_type = Type::lookup_bool_type(); Type* uint32_type = Type::lookup_integer_type("uint32");
Struct_type* sf = Struct_type* sf =
Type::make_builtin_struct_type(11, Type::make_builtin_struct_type(8,
"", tdt, "", tdt,
"key", ptdt, "key", ptdt,
"elem", ptdt, "elem", ptdt,
"bucket", ptdt, "bucket", ptdt,
"keysize", uint8_type, "keysize", uint8_type,
"indirectkey", bool_type,
"valuesize", uint8_type, "valuesize", uint8_type,
"indirectvalue", bool_type,
"bucketsize", uint16_type, "bucketsize", uint16_type,
"reflexivekey", bool_type, "flags", uint32_type);
"needkeyupdate", bool_type);
ret = Type::make_builtin_named_type("MapType", sf); ret = Type::make_builtin_named_type("MapType", sf);
} }
...@@ -8011,6 +8027,7 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name) ...@@ -8011,6 +8027,7 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
Type* mtdt = Map_type::make_map_type_descriptor_type(); Type* mtdt = Map_type::make_map_type_descriptor_type();
Type* uint8_type = Type::lookup_integer_type("uint8"); Type* uint8_type = Type::lookup_integer_type("uint8");
Type* uint16_type = Type::lookup_integer_type("uint16"); Type* uint16_type = Type::lookup_integer_type("uint16");
Type* uint32_type = Type::lookup_integer_type("uint32");
int64_t keysize; int64_t keysize;
if (!this->key_type_->backend_type_size(gogo, &keysize)) if (!this->key_type_->backend_type_size(gogo, &keysize))
...@@ -8078,11 +8095,6 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name) ...@@ -8078,11 +8095,6 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
vals->push_back(Expression::make_integer_int64(keysize, uint8_type, bloc)); vals->push_back(Expression::make_integer_int64(keysize, uint8_type, bloc));
++p; ++p;
go_assert(p->is_field_name("indirectkey"));
vals->push_back(Expression::make_boolean(keysize > Map_type::max_key_size,
bloc));
++p;
go_assert(p->is_field_name("valuesize")); go_assert(p->is_field_name("valuesize"));
if (valsize > Map_type::max_val_size) if (valsize > Map_type::max_val_size)
vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc)); vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc));
...@@ -8090,24 +8102,26 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name) ...@@ -8090,24 +8102,26 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
vals->push_back(Expression::make_integer_int64(valsize, uint8_type, bloc)); vals->push_back(Expression::make_integer_int64(valsize, uint8_type, bloc));
++p; ++p;
go_assert(p->is_field_name("indirectvalue"));
vals->push_back(Expression::make_boolean(valsize > Map_type::max_val_size,
bloc));
++p;
go_assert(p->is_field_name("bucketsize")); go_assert(p->is_field_name("bucketsize"));
vals->push_back(Expression::make_integer_int64(bucketsize, uint16_type, vals->push_back(Expression::make_integer_int64(bucketsize, uint16_type,
bloc)); bloc));
++p; ++p;
go_assert(p->is_field_name("reflexivekey")); go_assert(p->is_field_name("flags"));
vals->push_back(Expression::make_boolean(this->key_type_->is_reflexive(), // As with the other fields, the flag bits must match the reflect
bloc)); // and runtime packages.
unsigned long flags = 0;
++p; if (keysize > Map_type::max_key_size)
go_assert(p->is_field_name("needkeyupdate")); flags |= 1;
vals->push_back(Expression::make_boolean(this->key_type_->needs_key_update(), if (valsize > Map_type::max_val_size)
bloc)); flags |= 2;
if (this->key_type_->is_reflexive())
flags |= 4;
if (this->key_type_->needs_key_update())
flags |= 8;
if (this->key_type_->hash_might_panic())
flags |= 16;
vals->push_back(Expression::make_integer_ul(flags, uint32_type, bloc));
++p; ++p;
go_assert(p == fields->end()); go_assert(p == fields->end());
......
...@@ -635,6 +635,12 @@ class Type ...@@ -635,6 +635,12 @@ class Type
needs_key_update() needs_key_update()
{ return this->do_needs_key_update(); } { return this->do_needs_key_update(); }
// Return whether the hash function of this type might panic. This
// is only called for types used as a key in a map type.
bool
hash_might_panic()
{ return this->do_hash_might_panic(); }
// Whether the type is permitted in the heap. // Whether the type is permitted in the heap.
bool bool
in_heap() in_heap()
...@@ -1074,6 +1080,10 @@ class Type ...@@ -1074,6 +1080,10 @@ class Type
{ return false; } { return false; }
virtual bool virtual bool
do_hash_might_panic()
{ return false; }
virtual bool
do_in_heap() do_in_heap()
{ return true; } { return true; }
...@@ -2601,6 +2611,9 @@ class Struct_type : public Type ...@@ -2601,6 +2611,9 @@ class Struct_type : public Type
do_needs_key_update(); do_needs_key_update();
bool bool
do_hash_might_panic();
bool
do_in_heap(); do_in_heap();
unsigned int unsigned int
...@@ -2779,6 +2792,10 @@ class Array_type : public Type ...@@ -2779,6 +2792,10 @@ class Array_type : public Type
{ return this->element_type_->needs_key_update(); } { return this->element_type_->needs_key_update(); }
bool bool
do_hash_might_panic()
{ return this->length_ != NULL && this->element_type_->hash_might_panic(); }
bool
do_in_heap() do_in_heap()
{ return this->length_ == NULL || this->element_type_->in_heap(); } { return this->length_ == NULL || this->element_type_->in_heap(); }
...@@ -3170,6 +3187,11 @@ class Interface_type : public Type ...@@ -3170,6 +3187,11 @@ class Interface_type : public Type
do_needs_key_update() do_needs_key_update()
{ return true; } { return true; }
// Hashing an unhashable type stored in an interface might panic.
bool
do_hash_might_panic()
{ return true; }
unsigned int unsigned int
do_hash_for_method(Gogo*, int) const; do_hash_for_method(Gogo*, int) const;
......
...@@ -347,16 +347,13 @@ type interfaceType struct { ...@@ -347,16 +347,13 @@ type interfaceType struct {
// mapType represents a map type. // mapType represents a map type.
type mapType struct { type mapType struct {
rtype rtype
key *rtype // map key type key *rtype // map key type
elem *rtype // map element (value) type elem *rtype // map element (value) type
bucket *rtype // internal bucket structure bucket *rtype // internal bucket structure
keysize uint8 // size of key slot keysize uint8 // size of key slot
indirectkey uint8 // store ptr to key instead of key itself valuesize uint8 // size of value slot
valuesize uint8 // size of value slot bucketsize uint16 // size of bucket
indirectvalue uint8 // store ptr to value instead of value itself flags uint32
bucketsize uint16 // size of bucket
reflexivekey bool // true if k==k for all keys
needkeyupdate bool // true if we need to update key on an overwrite
} }
// ptrType represents a pointer type. // ptrType represents a pointer type.
...@@ -1506,6 +1503,8 @@ func MapOf(key, elem Type) Type { ...@@ -1506,6 +1503,8 @@ func MapOf(key, elem Type) Type {
s := "map[" + *ktyp.string + "]" + *etyp.string s := "map[" + *ktyp.string + "]" + *etyp.string
// Make a map type. // Make a map type.
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap)) mt := **(**mapType)(unsafe.Pointer(&imap))
mt.string = &s mt.string = &s
...@@ -1520,23 +1519,29 @@ func MapOf(key, elem Type) Type { ...@@ -1520,23 +1519,29 @@ func MapOf(key, elem Type) Type {
mt.ptrToThis = nil mt.ptrToThis = nil
mt.bucket = bucketOf(ktyp, etyp) mt.bucket = bucketOf(ktyp, etyp)
mt.flags = 0
if ktyp.size > maxKeySize { if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize) mt.keysize = uint8(ptrSize)
mt.indirectkey = 1 mt.flags |= 1 // indirect key
} else { } else {
mt.keysize = uint8(ktyp.size) mt.keysize = uint8(ktyp.size)
mt.indirectkey = 0
} }
if etyp.size > maxValSize { if etyp.size > maxValSize {
mt.valuesize = uint8(ptrSize) mt.valuesize = uint8(ptrSize)
mt.indirectvalue = 1 mt.flags |= 2 // indirect value
} else { } else {
mt.valuesize = uint8(etyp.size) mt.valuesize = uint8(etyp.size)
mt.indirectvalue = 0
} }
mt.bucketsize = uint16(mt.bucket.size) mt.bucketsize = uint16(mt.bucket.size)
mt.reflexivekey = isReflexive(ktyp) if isReflexive(ktyp) {
mt.needkeyupdate = needKeyUpdate(ktyp) mt.flags |= 4
}
if needKeyUpdate(ktyp) {
mt.flags |= 8
}
if hashMightPanic(ktyp) {
mt.flags |= 16
}
// Canonicalize before storing in lookupCache // Canonicalize before storing in lookupCache
ti := toType(&mt.rtype) ti := toType(&mt.rtype)
...@@ -1715,6 +1720,27 @@ func needKeyUpdate(t *rtype) bool { ...@@ -1715,6 +1720,27 @@ func needKeyUpdate(t *rtype) bool {
} }
} }
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *rtype) bool {
switch t.Kind() {
case Interface:
return true
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
return hashMightPanic(tt.elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.fields {
if hashMightPanic(f.typ) {
return true
}
}
return false
default:
return false
}
}
// Make sure these routines stay in sync with ../../runtime/map.go! // Make sure these routines stay in sync with ../../runtime/map.go!
// These types exist only for GC, so we only fill out GC relevant info. // These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string // Currently, that's just size and the GC program. We also fill in string
......
...@@ -414,14 +414,17 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -414,14 +414,17 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled && h != nil { if msanenabled && h != nil {
msanread(key, t.key.size) msanread(key, t.key.size)
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]) return unsafe.Pointer(&zeroVal[0])
} }
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map read and map write") throw("concurrent map read and map write")
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0)) hash := hashfn(key, uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
...@@ -442,12 +445,12 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -442,12 +445,12 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if equalfn(key, k) { if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue { if t.indirectvalue() {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return v return v
...@@ -472,14 +475,17 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -472,14 +475,17 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if msanenabled && h != nil { if msanenabled && h != nil {
msanread(key, t.key.size) msanread(key, t.key.size)
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false return unsafe.Pointer(&zeroVal[0]), false
} }
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map read and map write") throw("concurrent map read and map write")
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0)) hash := hashfn(key, uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
...@@ -500,12 +506,12 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -500,12 +506,12 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if equalfn(key, k) { if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue { if t.indirectvalue() {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return v, true return v, true
...@@ -547,12 +553,12 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe ...@@ -547,12 +553,12 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if equalfn(key, k) { if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue { if t.indirectvalue() {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
return k, v return k, v
...@@ -634,14 +640,14 @@ again: ...@@ -634,14 +640,14 @@ again:
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if !equalfn(key, k) { if !equalfn(key, k) {
continue continue
} }
// already have a mapping for key. Update it. // already have a mapping for key. Update it.
if t.needkeyupdate { if t.needkeyupdate() {
typedmemmove(t.key, k, key) typedmemmove(t.key, k, key)
} }
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
...@@ -672,12 +678,12 @@ again: ...@@ -672,12 +678,12 @@ again:
} }
// store new key/value at insert position // store new key/value at insert position
if t.indirectkey { if t.indirectkey() {
kmem := newobject(t.key) kmem := newobject(t.key)
*(*unsafe.Pointer)(insertk) = kmem *(*unsafe.Pointer)(insertk) = kmem
insertk = kmem insertk = kmem
} }
if t.indirectvalue { if t.indirectvalue() {
vmem := newobject(t.elem) vmem := newobject(t.elem)
*(*unsafe.Pointer)(val) = vmem *(*unsafe.Pointer)(val) = vmem
} }
...@@ -690,7 +696,7 @@ done: ...@@ -690,7 +696,7 @@ done:
throw("concurrent map writes") throw("concurrent map writes")
} }
h.flags &^= hashWriting h.flags &^= hashWriting
if t.indirectvalue { if t.indirectvalue() {
val = *((*unsafe.Pointer)(val)) val = *((*unsafe.Pointer)(val))
} }
return val return val
...@@ -706,15 +712,18 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -706,15 +712,18 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if msanenabled && h != nil { if msanenabled && h != nil {
msanread(key, t.key.size) msanread(key, t.key.size)
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
}
return return
} }
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
hashfn := t.key.hashfn
equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0)) hash := hashfn(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic, // Set hashWriting after calling alg.hash, since alg.hash may panic,
...@@ -735,20 +744,20 @@ search: ...@@ -735,20 +744,20 @@ search:
} }
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k k2 := k
if t.indirectkey { if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if !equalfn(key, k2) { if !equalfn(key, k2) {
continue continue
} }
// Only clear key if there are pointers in it. // Only clear key if there are pointers in it.
if t.indirectkey { if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil *(*unsafe.Pointer)(k) = nil
} else if t.key.kind&kindNoPointers == 0 { } else if t.key.kind&kindNoPointers == 0 {
memclrHasPointers(k, t.key.size) memclrHasPointers(k, t.key.size)
} }
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue { if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil *(*unsafe.Pointer)(v) = nil
} else if t.elem.kind&kindNoPointers == 0 { } else if t.elem.kind&kindNoPointers == 0 {
memclrHasPointers(v, t.elem.size) memclrHasPointers(v, t.elem.size)
...@@ -894,7 +903,7 @@ next: ...@@ -894,7 +903,7 @@ next:
continue continue
} }
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
if t.indirectkey { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
...@@ -906,7 +915,7 @@ next: ...@@ -906,7 +915,7 @@ next:
// through the oldbucket, skipping any keys that will go // through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two // to the other new bucket (each oldbucket expands to two
// buckets during a grow). // buckets during a grow).
if t.reflexivekey || equalfn(k, k) { if t.reflexivekey() || equalfn(k, k) {
// If the item in the oldbucket is not destined for // If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it. // the current new bucket in the iteration, skip it.
hash := hashfn(k, uintptr(h.hash0)) hash := hashfn(k, uintptr(h.hash0))
...@@ -927,13 +936,13 @@ next: ...@@ -927,13 +936,13 @@ next:
} }
} }
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
!(t.reflexivekey || equalfn(k, k)) { !(t.reflexivekey() || equalfn(k, k)) {
// This is the golden data, we can return it. // This is the golden data, we can return it.
// OR // OR
// key!=key, so the entry can't be deleted or updated, so we can just return it. // key!=key, so the entry can't be deleted or updated, so we can just return it.
// That's lucky for us because when key!=key we can't look it up successfully. // That's lucky for us because when key!=key we can't look it up successfully.
it.key = k it.key = k
if t.indirectvalue { if t.indirectvalue() {
v = *((*unsafe.Pointer)(v)) v = *((*unsafe.Pointer)(v))
} }
it.value = v it.value = v
...@@ -1157,7 +1166,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1157,7 +1166,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
throw("bad map state") throw("bad map state")
} }
k2 := k k2 := k
if t.indirectkey { if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
var useY uint8 var useY uint8
...@@ -1165,7 +1174,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1165,7 +1174,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y). // to send this key/value to bucket x or bucket y).
hash := t.key.hashfn(k2, uintptr(h.hash0)) hash := t.key.hashfn(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey && !t.key.equalfn(k2, k2) { if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably // If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover, // will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the // it isn't reproducible. Reproducibility is required in the
...@@ -1200,12 +1209,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1200,12 +1209,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
} }
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey { if t.indirectkey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else { } else {
typedmemmove(t.key, dst.k, k) // copy value typedmemmove(t.key, dst.k, k) // copy value
} }
if t.indirectvalue { if t.indirectvalue() {
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v) *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
} else { } else {
typedmemmove(t.elem, dst.v, v) typedmemmove(t.elem, dst.v, v)
...@@ -1271,12 +1280,12 @@ func reflect_makemap(t *maptype, cap int) *hmap { ...@@ -1271,12 +1280,12 @@ func reflect_makemap(t *maptype, cap int) *hmap {
if !ismapkey(t.key) { if !ismapkey(t.key) {
throw("runtime.reflect_makemap: unsupported map key type") throw("runtime.reflect_makemap: unsupported map key type")
} }
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) || if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong") throw("key size wrong")
} }
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) || if t.elem.size > maxValueSize && (!t.indirectvalue() || t.valuesize != uint8(sys.PtrSize)) ||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { t.elem.size <= maxValueSize && (t.indirectvalue() || t.valuesize != uint8(t.elem.size)) {
throw("value size wrong") throw("value size wrong")
} }
if t.key.align > bucketCnt { if t.key.align > bucketCnt {
......
...@@ -86,17 +86,32 @@ type interfacetype struct { ...@@ -86,17 +86,32 @@ type interfacetype struct {
} }
type maptype struct { type maptype struct {
typ _type typ _type
key *_type key *_type
elem *_type elem *_type
bucket *_type // internal type representing a hash bucket bucket *_type // internal type representing a hash bucket
keysize uint8 // size of key slot keysize uint8 // size of key slot
indirectkey bool // store ptr to key instead of key itself valuesize uint8 // size of value slot
valuesize uint8 // size of value slot bucketsize uint16 // size of bucket
indirectvalue bool // store ptr to value instead of value itself flags uint32
bucketsize uint16 // size of bucket }
reflexivekey bool // true if k==k for all keys
needkeyupdate bool // true if we need to update key on an overwrite // Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
return mt.flags&1 != 0
}
func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself
return mt.flags&2 != 0
}
func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
return mt.flags&4 != 0
}
func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
return mt.flags&8 != 0
}
func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
return mt.flags&16 != 0
} }
type arraytype struct { type arraytype struct {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment