Commit 10172a64 by Ian Lance Taylor

compiler, runtime, reflect: generate hash functions only for map keys

    
    Right now we generate hash functions for all types, just in case they
    are used as map keys. That's a lot of wasted effort and binary size
    for types which will never be used as a map key. Instead, generate
    hash functions only for types that we know are map keys.
    
    Just doing that is a bit too simple, since maps with an interface type
    as a key might have to hash any concrete key type that implements that
    interface. So for that case, implement hashing of such types at
    runtime (instead of with generated code). It will be slower, but only
    for maps with interface types as keys, and maybe only a bit slower as
    the aeshash time probably dominates the dispatch time.
    
    Reorg where we keep the equals and hash functions. Move the hash function
    from the key type to the map type, saving a field in every non-map type.
    That leaves only one function in the alg structure, so get rid of that and
    just keep the equal function in the type descriptor itself.
    
    While we're here, reorganize the rtype struct to more closely match
    the gc version.
    
    This is the gofrontend version of https://golang.org/cl/191198.
    
    Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843

From-SVN: r279848
parent 9279b5ba
b5c950fb98042fe434edca0c2403234692f25cd4
9163fa28b89222cd851c0d24bd6a1384d1379c55
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
......@@ -2722,7 +2722,7 @@ Gogo::clear_file_scope()
// parse tree is lowered.
void
Gogo::queue_hash_function(Type* type, Named_type* name, int64_t size,
Gogo::queue_hash_function(Type* type, int64_t size,
const std::string& hash_name,
Function_type* hash_fntype)
{
......@@ -2730,7 +2730,7 @@ Gogo::queue_hash_function(Type* type, Named_type* name, int64_t size,
go_assert(!this->in_global_scope());
Specific_type_function::Specific_type_function_kind kind =
Specific_type_function::SPECIFIC_HASH;
Specific_type_function* tsf = new Specific_type_function(type, name, size,
Specific_type_function* tsf = new Specific_type_function(type, NULL, size,
kind, hash_name,
hash_fntype);
this->specific_type_functions_.push_back(tsf);
......@@ -2783,10 +2783,7 @@ Specific_type_functions::type(Type* t)
if (nt->is_alias())
return TRAVERSE_CONTINUE;
if (t->needs_specific_type_functions(this->gogo_))
{
t->equal_function(this->gogo_, nt, NULL);
t->hash_function(this->gogo_, nt, NULL);
}
t->equal_function(this->gogo_, nt, NULL);
// If this is a struct type, we don't want to make functions
// for the unnamed struct.
......@@ -2820,10 +2817,15 @@ Specific_type_functions::type(Type* t)
case Type::TYPE_STRUCT:
case Type::TYPE_ARRAY:
if (t->needs_specific_type_functions(this->gogo_))
{
t->equal_function(this->gogo_, NULL, NULL);
t->hash_function(this->gogo_, NULL, NULL);
}
t->equal_function(this->gogo_, NULL, NULL);
break;
case Type::TYPE_MAP:
{
Type* key_type = t->map_type()->key_type();
if (key_type->needs_specific_type_functions(this->gogo_))
key_type->hash_function(this->gogo_, NULL);
}
break;
default:
......@@ -2846,8 +2848,8 @@ Gogo::write_specific_type_functions()
Specific_type_function* tsf = this->specific_type_functions_.back();
this->specific_type_functions_.pop_back();
if (tsf->kind == Specific_type_function::SPECIFIC_HASH)
tsf->type->write_hash_function(this, tsf->name, tsf->size,
tsf->fnname, tsf->fntype);
tsf->type->write_hash_function(this, tsf->size, tsf->fnname,
tsf->fntype);
else
tsf->type->write_equal_function(this, tsf->name, tsf->size,
tsf->fnname, tsf->fntype);
......
......@@ -602,8 +602,7 @@ class Gogo
// is used when a type-specific hash function is needed when not at
// top level.
void
queue_hash_function(Type* type, Named_type* name, int64_t size,
const std::string& hash_name,
queue_hash_function(Type* type, int64_t size, const std::string& hash_name,
Function_type* hash_fntype);
// Queue up a type-specific equal function to be written out. This
......@@ -879,7 +878,7 @@ class Gogo
// Return the name of the hash function for TYPE.
std::string
hash_function_name(const Type*, const Named_type*);
hash_function_name(const Type*);
// Return the name of the equal function for TYPE.
std::string
......
......@@ -287,16 +287,12 @@ Gogo::stub_method_name(const Package* package, const std::string& mname)
return ret;
}
// Return the name of the hash function for TYPE. If NAME is not NULL
// it is the name of the type.
// Return the name of the hash function for TYPE.
std::string
Gogo::hash_function_name(const Type* type, const Named_type* name)
Gogo::hash_function_name(const Type* type)
{
const Type* rtype = type;
if (name != NULL)
rtype = name;
std::string tname = rtype->mangled_name(this);
std::string tname = type->mangled_name(this);
return tname + "..hash";
}
......
......@@ -1061,7 +1061,7 @@ class Type
// Get the hash function for a type. Returns NULL if the type is
// not comparable.
Named_object*
hash_function(Gogo*, Named_type* name, Function_type* hash_fntype);
hash_function(Gogo*, Function_type* hash_fntype);
// Write the equal function for a type.
void
......@@ -1071,8 +1071,7 @@ class Type
// Write the hash function for a type.
void
write_hash_function(Gogo*, Named_type*, int64_t size,
const std::string& hash_name,
write_hash_function(Gogo*, int64_t size, const std::string& hash_name,
Function_type* hash_fntype);
// Return the alignment required by the memequalN function.
......@@ -1284,8 +1283,7 @@ class Type
// Build the hash function for a type that needs specific functions.
Named_object*
build_hash_function(Gogo*, Named_type*, int64_t size,
Function_type* hash_fntype);
build_hash_function(Gogo*, int64_t size, Function_type* hash_fntype);
// Build the equal function for a type that needs specific functions.
Named_object*
......@@ -1299,9 +1297,6 @@ class Type
write_identity_equal(Gogo*, int64_t size);
void
write_named_hash(Gogo*, Named_type*, Function_type* hash_fntype);
void
write_named_equal(Gogo*, Named_type*);
// Build a composite literal for the uncommon type information.
......@@ -2628,7 +2623,7 @@ class Struct_type : public Type
// Write the hash function for this type.
void
write_hash_function(Gogo*, Named_type*, Function_type*);
write_hash_function(Gogo*, Function_type*);
// Write the equality function for this type.
void
......@@ -2815,7 +2810,7 @@ class Array_type : public Type
// Write the hash function for this type.
void
write_hash_function(Gogo*, Named_type*, Function_type*);
write_hash_function(Gogo*, Function_type*);
// Write the equality function for this type.
void
......
......@@ -110,33 +110,14 @@ const (
// available in the memory directly following the rtype value.
//
// tflag values must be kept in sync with copies in:
// cmd/compile/internal/gc/reflect.go
// cmd/link/internal/ld/decodesym.go
// go/types.cc
// runtime/type.go
type tflag uint8
const (
// tflagUncommon means that there is a pointer, *uncommonType,
// just beyond the outer type structure.
//
// For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
// then t has uncommonType data and it can be accessed as:
//
// type tUncommon struct {
// structType
// u uncommonType
// }
// u := &(*tUncommon)(unsafe.Pointer(t)).u
tflagUncommon tflag = 1 << 0
// tflagExtraStar means the name in the str field has an
// extraneous '*' prefix. This is because for most types T in
// a program, the type *T also exists and reusing the str data
// saves binary size.
tflagExtraStar tflag = 1 << 1
// tflagNamed means the type has a name.
tflagNamed tflag = 1 << 2
// tflagRegularMemory means that equal and hash functions can treat
// this type as a single region of t.size bytes.
tflagRegularMemory tflag = 1 << 3
)
// rtype is the common implementation of most values.
......@@ -147,16 +128,15 @@ type rtype struct {
size uintptr
ptrdata uintptr // number of bytes in the type that can contain pointers
hash uint32 // hash of type; avoids computation in hash tables
kind uint8 // enumeration for C
tflag tflag // extra type information flags
align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
_ uint8 // unused/padding
hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
kind uint8 // enumeration for C
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte // garbage collection data
string *string // string form; unnecessary but undeniably useful
string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, may be zero
}
......
......@@ -261,6 +261,20 @@ const (
UnsafePointer
)
// tflag is used by an rtype to signal what extra type information is
// available in the memory directly following the rtype value.
//
// tflag values must be kept in sync with copies in:
// go/types.cc
// runtime/type.go
type tflag uint8
const (
// tflagRegularMemory means that equal and hash functions can treat
// this type as a single region of t.size bytes.
tflagRegularMemory tflag = 1 << 3
)
// rtype is the common implementation of most values.
// It is embedded in other struct types.
//
......@@ -269,16 +283,15 @@ type rtype struct {
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
hash uint32 // hash of type; avoids computation in hash tables
kind uint8 // enumeration for C
align int8 // alignment of variable with this type
tflag tflag // extra type information flags
align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
_ uint8 // unused/padding
hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
kind uint8 // enumeration for C
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte // garbage collection data
string *string // string form; unnecessary but undeniably useful
string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
}
......@@ -350,9 +363,11 @@ type interfaceType struct {
// mapType represents a map type.
type mapType struct {
rtype
key *rtype // map key type
elem *rtype // map element (value) type
bucket *rtype // internal bucket structure
key *rtype // map key type
elem *rtype // map element (value) type
bucket *rtype // internal bucket structure
// function for hashing keys (ptr to key, seed) -> hash
hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
valuesize uint8 // size of value slot
bucketsize uint16 // size of bucket
......@@ -1178,31 +1193,7 @@ func (t *rtype) ConvertibleTo(u Type) bool {
}
func (t *rtype) Comparable() bool {
switch t.Kind() {
case Bool, Int, Int8, Int16, Int32, Int64,
Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
Float32, Float64, Complex64, Complex128,
Chan, Interface, Ptr, String, UnsafePointer:
return true
case Func, Map, Slice:
return false
case Array:
return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for i := range tt.fields {
if !tt.fields[i].typ.Comparable() {
return false
}
}
return true
default:
panic("reflect: impossible")
}
return t.equal != nil
}
// implements reports whether the type V implements the interface type T.
......@@ -1457,6 +1448,7 @@ func ChanOf(dir ChanDir, t Type) Type {
var ichan interface{} = (chan unsafe.Pointer)(nil)
prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := *prototype
ch.tflag = tflagRegularMemory
ch.dir = uintptr(dir)
ch.string = &s
......@@ -1481,8 +1473,6 @@ func ChanOf(dir ChanDir, t Type) Type {
return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
// MapOf returns the map type with the given key and element types.
// For example, if k represents int and e represents string,
// MapOf(k, e) represents map[int]string.
......@@ -1493,7 +1483,7 @@ func MapOf(key, elem Type) Type {
ktyp := key.(*rtype)
etyp := elem.(*rtype)
if !ismapkey(ktyp) {
if ktyp.equal == nil {
panic("reflect.MapOf: invalid key type " + ktyp.String())
}
......@@ -1530,6 +1520,9 @@ func MapOf(key, elem Type) Type {
mt.ptrToThis = nil
mt.bucket = bucketOf(ktyp, etyp)
mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
return typehash(ktyp, p, seed)
}
mt.flags = 0
if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize)
......@@ -1851,7 +1844,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
}
b := &rtype{
align: int8(maxAlign),
align: uint8(maxAlign),
fieldAlign: uint8(maxAlign),
size: size,
kind: uint8(Struct),
......@@ -1949,9 +1942,8 @@ func StructOf(fields []StructField) Type {
var (
hash = uint32(12)
size uintptr
typalign int8
typalign uint8
comparable = true
hashable = true
fs = make([]structField, len(fields))
repr = make([]byte, 0, 64)
......@@ -2036,12 +2028,11 @@ func StructOf(fields []StructField) Type {
repr = append(repr, ';')
}
comparable = comparable && (ft.equalfn != nil)
hashable = hashable && (ft.hashfn != nil)
comparable = comparable && (ft.equal != nil)
offset := align(size, uintptr(ft.fieldAlign))
if int8(ft.fieldAlign) > typalign {
typalign = int8(ft.fieldAlign)
if ft.fieldAlign > typalign {
typalign = ft.fieldAlign
}
size = offset + ft.size
f.offsetEmbed |= offset << 1
......@@ -2118,11 +2109,12 @@ func StructOf(fields []StructField) Type {
}
typ.string = &str
typ.tflag = 0 // TODO: set tflagRegularMemory
typ.hash = hash
typ.size = size
typ.ptrdata = typeptrdata(typ.common())
typ.align = typalign
typ.fieldAlign = uint8(typalign)
typ.fieldAlign = typalign
if hasGCProg {
lastPtrField := 0
......@@ -2189,32 +2181,18 @@ func StructOf(fields []StructField) Type {
}
typ.ptrdata = typeptrdata(typ.common())
if hashable {
typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
o := seed
for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe")
o = ft.typ.hashfn(pi, o)
}
return o
}
} else {
typ.hashfn = nil
}
typ.equal = nil
if comparable {
typ.equalfn = func(p, q unsafe.Pointer) bool {
typ.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe")
qi := add(q, ft.offset(), "&x.field safe")
if !ft.typ.equalfn(pi, qi) {
if !ft.typ.equal(pi, qi) {
return false
}
}
return true
}
} else {
typ.equalfn = nil
}
switch {
......@@ -2322,6 +2300,7 @@ func ArrayOf(count int, elem Type) Type {
var iarray interface{} = [1]unsafe.Pointer{}
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype
array.tflag = typ.tflag & tflagRegularMemory
array.string = &s
// gccgo uses a different hash.
......@@ -2427,21 +2406,12 @@ func ArrayOf(count int, elem Type) Type {
array.ptrdata = array.size // overestimate but ok; must match program
}
switch {
case count == 1 && !ifaceIndir(typ):
// array of 1 direct iface type can be direct
array.kind |= kindDirectIface
default:
array.kind &^= kindDirectIface
}
etyp := typ.common()
esize := typ.size
if typ.equalfn == nil {
array.equalfn = nil
} else {
eequal := typ.equalfn
array.equalfn = func(p, q unsafe.Pointer) bool {
array.equal = nil
if eequal := etyp.equal; eequal != nil {
array.equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < count; i++ {
pi := arrayAt(p, i, esize, "i < count")
qi := arrayAt(q, i, esize, "i < count")
......@@ -2453,17 +2423,12 @@ func ArrayOf(count int, elem Type) Type {
}
}
if typ.hashfn == nil {
array.hashfn = nil
} else {
ehash := typ.hashfn
array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
o := seed
for i := 0; i < count; i++ {
o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
}
return o
}
switch {
case count == 1 && !ifaceIndir(typ):
// array of 1 direct iface type can be direct
array.kind |= kindDirectIface
default:
array.kind &^= kindDirectIface
}
ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
......
......@@ -2543,6 +2543,9 @@ func typedmemmove(t *rtype, dst, src unsafe.Pointer)
//go:noescape
func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
//go:noescape
func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
......
......@@ -69,6 +69,9 @@ func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
// runtime variable to check if the processor we're running on
// actually supports the instructions used by the AES-based
// hash implementation.
var useAeshash bool
// in C code
......@@ -134,14 +137,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
return h
}
t := *(**_type)(tab)
fn := t.hashfn
if fn == nil {
if t.equal == nil {
// Check hashability here. We could do this check inside
// typehash, but we want to report the topmost type in
// the error text (e.g. in a struct with a field of slice type
// we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), h^c0)
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * fn(a.data, h^c0)
return c1 * typehash(t, a.data, h^c0)
}
}
......@@ -151,17 +157,74 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
if t == nil {
return h
}
fn := t.hashfn
if fn == nil {
if t.equal == nil {
// See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), h^c0)
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * fn(a.data, h^c0)
return c1 * typehash(t, a.data, h^c0)
}
}
// typehash computes the hash of the object of type t at address p.
// h is the seed.
// This function is seldom used. Most maps use for hashing either
// fixed functions (e.g. f32hash) or compiler-generated functions
// (e.g. for a type like struct { x, y string }). This implementation
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.tflag&tflagRegularMemory != 0 {
return memhash(p, h, t.size)
}
switch t.kind & kindMask {
case kindFloat32:
return f32hash(p, h)
case kindFloat64:
return f64hash(p, h)
case kindComplex64:
return c64hash(p, h)
case kindComplex128:
return c128hash(p, h)
case kindString:
return strhash(p, h)
case kindInterface:
i := (*interfacetype)(unsafe.Pointer(t))
if len(i.methods) == 0 {
return nilinterhash(p, h)
}
return interhash(p, h)
case kindArray:
a := (*arraytype)(unsafe.Pointer(t))
for i := uintptr(0); i < a.len; i++ {
h = typehash(a.elem, add(p, i*a.elem.size), h)
}
return h
case kindStruct:
s := (*structtype)(unsafe.Pointer(t))
for _, f := range s.fields {
// TODO: maybe we could hash several contiguous fields all at once.
if f.name != nil && *f.name == "_" {
continue
}
h = typehash(f.typ, add(p, f.offset()), h)
}
return h
default:
// Should never happen, as typehash should only be called
// with comparable types.
panic(errorString("hash of unhashable type " + t.string()))
}
}
//go:linkname reflect_typehash reflect.typehash
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return typehash(t, p, h)
}
func memequal0(p, q unsafe.Pointer) bool {
return true
}
......@@ -209,7 +272,7 @@ func efaceeq(x, y eface) bool {
if t == nil {
return true
}
eq := t.equalfn
eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
......@@ -230,7 +293,7 @@ func ifaceeq(x, y iface) bool {
if t != *(**_type)(y.tab) {
return false
}
eq := t.equalfn
eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
......@@ -251,7 +314,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
if xt != t {
return false
}
eq := t.equalfn
eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
......@@ -272,7 +335,7 @@ func ifaceefaceeq(x iface, y eface) bool {
if xt != y._type {
return false
}
eq := xt.equalfn
eq := xt.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + xt.string()))
}
......@@ -289,7 +352,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if x._type != t {
return false
}
eq := t.equalfn
eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
......
......@@ -421,18 +421,16 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
hash := hashfn(key, uintptr(h.hash0))
hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -459,7 +457,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
......@@ -486,18 +484,16 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if msanenabled && h != nil {
msanread(key, t.key.size)
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
hash := hashfn(key, uintptr(h.hash0))
hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -524,7 +520,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
......@@ -546,9 +542,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 {
return nil, nil
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0))
hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -575,7 +569,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
......@@ -625,11 +619,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0))
hash := t.hasher(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic,
// Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write.
h.flags ^= hashWriting
......@@ -666,7 +658,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if !equalfn(key, k) {
if !t.key.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
......@@ -735,11 +727,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
hashfn(key, 0) // see issue 23734
t.hasher(key, 0) // see issue 23734
}
return
}
......@@ -747,9 +737,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
throw("concurrent map writes")
}
hash := hashfn(key, uintptr(h.hash0))
hash := t.hasher(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic,
// Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting
......@@ -774,7 +764,7 @@ search:
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
if !equalfn(key, k2) {
if !t.key.equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
......@@ -925,8 +915,6 @@ func mapiternext(it *hiter) {
b := it.bptr
i := it.i
checkBucket := it.checkBucket
hashfn := t.key.hashfn
equalfn := t.key.equalfn
next:
if b == nil {
......@@ -980,10 +968,10 @@ next:
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
if t.reflexivekey() || equalfn(k, k) {
if t.reflexivekey() || t.key.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := hashfn(k, uintptr(h.hash0))
hash := t.hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
......@@ -1001,7 +989,7 @@ next:
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
!(t.reflexivekey() || equalfn(k, k)) {
!(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
......@@ -1238,8 +1226,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.key.hashfn(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) {
hash := t.hasher(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
......@@ -1333,16 +1321,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
}
}
func ismapkey(t *_type) bool {
return t.hashfn != nil
}
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
if !ismapkey(t.key) {
if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
......@@ -1445,10 +1429,5 @@ func reflectlite_maplen(h *hmap) int {
return h.count
}
//go:linkname reflect_ismapkey reflect.ismapkey
func reflect_ismapkey(t *_type) bool {
return ismapkey(t)
}
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go
var zeroVal [maxZero]byte
......@@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) {
})
}
}
var BoolSink bool
func BenchmarkMapInterfaceString(b *testing.B) {
m := map[interface{}]bool{}
for i := 0; i < 100; i++ {
m[fmt.Sprintf("%d", i)] = true
}
key := (interface{})("A")
b.ResetTimer()
for i := 0; i < b.N; i++ {
BoolSink = m[key]
}
}
func BenchmarkMapInterfacePtr(b *testing.B) {
m := map[interface{}]bool{}
for i := 0; i < 100; i++ {
i := i
m[&i] = true
}
key := new(int)
b.ResetTimer()
for i := 0; i < b.N; i++ {
BoolSink = m[key]
}
}
......@@ -33,7 +33,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -73,7 +73,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -108,9 +108,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
......@@ -198,9 +198,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
......@@ -289,9 +289,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
......@@ -408,7 +408,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.key.hashfn(k, uintptr(h.hash0))
hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
......
......@@ -33,7 +33,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -73,7 +73,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -108,9 +108,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
......@@ -198,9 +198,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
......@@ -289,9 +289,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
throw("concurrent map writes")
}
hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
......@@ -408,7 +408,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.key.hashfn(k, uintptr(h.hash0))
hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
......
......@@ -83,7 +83,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
dohash:
hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -178,7 +178,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -218,9 +218,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
throw("concurrent map writes")
}
key := stringStructOf(&s)
hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
......@@ -314,9 +314,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
}
key := stringStructOf(&ky)
hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
......@@ -436,7 +436,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.key.hashfn(k, uintptr(h.hash0))
hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
......
......@@ -1172,3 +1172,64 @@ func TestMapTombstones(t *testing.T) {
}
runtime.MapTombstoneCheck(m)
}
type canString int
func (c canString) String() string {
return fmt.Sprintf("%d", int(c))
}
func TestMapInterfaceKey(t *testing.T) {
// Test all the special cases in runtime.typehash.
type GrabBag struct {
f32 float32
f64 float64
c64 complex64
c128 complex128
s string
i0 interface{}
i1 interface {
String() string
}
a [4]string
}
m := map[interface{}]bool{}
// Put a bunch of data in m, so that a bad hash is likely to
// lead to a bad bucket, which will lead to a missed lookup.
for i := 0; i < 1000; i++ {
m[i] = true
}
m[GrabBag{f32: 1.0}] = true
if !m[GrabBag{f32: 1.0}] {
panic("f32 not found")
}
m[GrabBag{f64: 1.0}] = true
if !m[GrabBag{f64: 1.0}] {
panic("f64 not found")
}
m[GrabBag{c64: 1.0i}] = true
if !m[GrabBag{c64: 1.0i}] {
panic("c64 not found")
}
m[GrabBag{c128: 1.0i}] = true
if !m[GrabBag{c128: 1.0i}] {
panic("c128 not found")
}
m[GrabBag{s: "foo"}] = true
if !m[GrabBag{s: "foo"}] {
panic("string not found")
}
m[GrabBag{i0: "foo"}] = true
if !m[GrabBag{i0: "foo"}] {
panic("interface{} not found")
}
m[GrabBag{i1: canString(5)}] = true
if !m[GrabBag{i1: canString(5)}] {
panic("interface{String() string} not found")
}
m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
panic("array not found")
}
}
......@@ -12,18 +12,32 @@ import (
"unsafe"
)
// tflag is documented in reflect/type.go.
//
// tflag values must be kept in sync with copies in:
// go/types.cc
// reflect/type.go
// internal/reflectlite/type.go
type tflag uint8
const (
tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
)
type _type struct {
size uintptr
ptrdata uintptr
hash uint32
kind uint8
align int8
tflag tflag
align uint8
fieldAlign uint8
_ uint8
hashfn func(unsafe.Pointer, uintptr) uintptr
equalfn func(unsafe.Pointer, unsafe.Pointer) bool
kind uint8
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
// gcdata stores the GC type data for the garbage collector.
// If the KindGCProg bit is set in kind, gcdata is a GC program.
// Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
gcdata *byte
_string *string
*uncommontype
......@@ -74,10 +88,12 @@ type interfacetype struct {
}
type maptype struct {
typ _type
key *_type
elem *_type
bucket *_type // internal type representing a hash bucket
typ _type
key *_type
elem *_type
bucket *_type // internal type representing a hash bucket
// function for hashing keys (ptr to key, seed) -> hash
hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket
......
......@@ -36,8 +36,6 @@ static const String reflection_string =
const byte unsafe_Pointer_gc[] = { 1 };
extern const FuncVal runtime_pointerhash_descriptor
__asm__ (GOSYM_PREFIX "runtime.pointerhash..f");
extern const FuncVal runtime_pointerequal_descriptor
__asm__ (GOSYM_PREFIX "runtime.pointerequal..f");
......@@ -49,17 +47,15 @@ const struct _type unsafe_Pointer =
sizeof (void *),
/* hash */
78501163U,
/* kind */
kindUnsafePointer | kindDirectIface,
/* tflag */
tflagRegularMemory,
/* align */
__alignof (void *),
/* fieldAlign */
offsetof (struct field_align, p) - 1,
/* _ */
0,
/* hashfn */
&runtime_pointerhash_descriptor,
/* equalfn */
/* kind */
kindUnsafePointer | kindDirectIface,
/* equal */
&runtime_pointerequal_descriptor,
/* gcdata */
unsafe_Pointer_gc,
......@@ -101,16 +97,14 @@ const struct ptrtype pointer_unsafe_Pointer =
sizeof (void *),
/* hash */
1256018616U,
/* kind */
kindPtr | kindDirectIface,
/* tflag */
tflagRegularMemory,
/* align */
__alignof (void *),
/* fieldAlign */
offsetof (struct field_align, p) - 1,
/* _ */
0,
/*_hashfn */
&runtime_pointerhash_descriptor,
/* kind */
kindPtr | kindDirectIface,
/* equalfn */
&runtime_pointerequal_descriptor,
/* gcdata */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment