Commit 6b223191 by Benjamin Kosnik Committed by Benjamin Kosnik

profiler_container_size.h: Fix include guard, formatting fixes.

2009-12-09  Benjamin Kosnik  <bkoz@redhat.com>

	* include/profile/impl/profiler_container_size.h: Fix include
	guard, formatting fixes.
	* include/profile/impl/profiler_vector_size.h: Same.
	* include/profile/impl/profiler_hash_func.h: Same.
	* include/profile/impl/profiler_trace.h: Same.
	* include/profile/impl/profiler_vector_to_list.h: Same.
	* include/profile/impl/profiler.h: Same.
	* include/profile/impl/profiler_state.h: Same.
	* include/profile/impl/profiler_map_to_unordered_map.h: Same.
	* include/profile/impl/profiler_hashtable_size.h: Same.
	* include/profile/impl/profiler_node.h: Same.

From-SVN: r155123
parent 14aa6352
2009-12-09 Benjamin Kosnik <bkoz@redhat.com>
* include/profile/impl/profiler_container_size.h: Fix include
guard, formatting fixes.
* include/profile/impl/profiler_vector_size.h: Same.
* include/profile/impl/profiler_hash_func.h: Same.
* include/profile/impl/profiler_trace.h: Same.
* include/profile/impl/profiler_vector_to_list.h: Same.
* include/profile/impl/profiler.h: Same.
* include/profile/impl/profiler_state.h: Same.
* include/profile/impl/profiler_map_to_unordered_map.h: Same.
* include/profile/impl/profiler_hashtable_size.h: Same.
* include/profile/impl/profiler_node.h: Same.
2009-12-09 Roman Odaisky <to.roma.from.bugcc@qwertty.com> 2009-12-09 Roman Odaisky <to.roma.from.bugcc@qwertty.com>
PR libstdc++/42273 PR libstdc++/42273
......
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_H
#define PROFCXX_PROFILER_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstddef> #include <cstddef>
...@@ -311,4 +311,4 @@ namespace __gnu_profile ...@@ -311,4 +311,4 @@ namespace __gnu_profile
#include "profile/impl/profiler_vector_size.h" #include "profile/impl/profiler_vector_size.h"
#include "profile/impl/profiler_vector_to_list.h" #include "profile/impl/profiler_vector_to_list.h"
#endif // PROFCXX_PROFILER_H__ #endif // _GLIBCXX_PROFILE_PROFILER_H
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_CONTAINER_SIZE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_CONTAINER_SIZE_H
#define PROFCXX_PROFILER_CONTAINER_SIZE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_CONTAINER_SIZE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdlib> #include <cstdlib>
...@@ -53,198 +53,208 @@ ...@@ -53,198 +53,208 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief A container size instrumentation line in the object table. */
/** @brief A container size instrumentation line in the object table. */ class __container_size_info: public __object_info_base
class __container_size_info: public __object_info_base {
{ public:
public: __container_size_info();
__container_size_info(); __container_size_info(const __container_size_info& __o);
__container_size_info(const __container_size_info& __o); __container_size_info(__stack_t __stack, size_t __num);
__container_size_info(__stack_t __stack, size_t __num); virtual ~__container_size_info() { }
virtual ~__container_size_info() {}
void __write(FILE* f) const;
void __write(FILE* f) const; float __magnitude() const { return static_cast<float>(_M_cost); }
float __magnitude() const { return static_cast<float>(_M_cost); } const char* __advice() const;
const char* __advice() const;
void __merge(const __container_size_info& __o);
void __merge(const __container_size_info& __o);
// Call if a container is destructed or cleaned. // Call if a container is destructed or cleaned.
void __destruct(size_t __num, size_t __inum); void __destruct(size_t __num, size_t __inum);
// Estimate the cost of resize/rehash.
float __resize_cost(size_t __from, size_t __to) { return __from; } // Estimate the cost of resize/rehash.
// Call if container is resized. float __resize_cost(size_t __from, size_t __to) { return __from; }
void __resize(size_t __from, size_t __to);
// Call if container is resized.
private: void __resize(size_t __from, size_t __to);
size_t _M_init;
size_t _M_max; // range of # buckets private:
size_t _M_min; size_t _M_init;
size_t _M_total; size_t _M_max; // Range of # buckets.
size_t _M_item_min; // range of # items size_t _M_min;
size_t _M_item_max; size_t _M_total;
size_t _M_item_total; size_t _M_item_min; // Range of # items.
size_t _M_count; size_t _M_item_max;
size_t _M_resize; size_t _M_item_total;
size_t _M_cost; size_t _M_count;
}; size_t _M_resize;
size_t _M_cost;
inline const char* __container_size_info::__advice() const };
{
const size_t __max_chars_size_t_printed = 20; inline const char*
const char* __message_pattern = __container_size_info::__advice() const
{
const size_t __max_chars_size_t_printed = 20;
const char* __message_pattern =
"change initial container size from %d to %d"; "change initial container size from %d to %d";
size_t __message_size = (strlen(__message_pattern) size_t __message_size = (strlen(__message_pattern)
+ 2 * __max_chars_size_t_printed + 2 * __max_chars_size_t_printed
- 2 * 2); - 2 * 2);
char* __message = new char[__message_size + 1]; char* __message = new char[__message_size + 1];
if (_M_init < _M_item_max) if (_M_init < _M_item_max)
snprintf(__message, __message_size, __message_pattern, _M_init, snprintf(__message, __message_size, __message_pattern, _M_init,
_M_item_max); _M_item_max);
else else
snprintf(__message, __message_size, __message_pattern, _M_init, snprintf(__message, __message_size, __message_pattern, _M_init,
_M_item_max); _M_item_max);
return __message; return __message;
}
inline void __container_size_info::__destruct(size_t __num, size_t __inum)
{
_M_max = __max(_M_max, __num);
_M_item_max = __max(_M_item_max, __inum);
if (_M_min == 0) {
_M_min = __num;
_M_item_min = __inum;
} else {
_M_min = __min(_M_min, __num);
_M_item_min = __min(_M_item_min, __inum);
} }
_M_total += __num;
_M_item_total += __inum;
_M_count += 1;
}
inline void __container_size_info::__resize(size_t __from, size_t __to) inline void
{ __container_size_info::__destruct(size_t __num, size_t __inum)
_M_cost += this->__resize_cost(__from, __to); {
_M_resize += 1; _M_max = __max(_M_max, __num);
_M_max = __max(_M_max, __to); _M_item_max = __max(_M_item_max, __inum);
} if (_M_min == 0)
{
inline __container_size_info::__container_size_info(__stack_t __stack, _M_min = __num;
size_t __num) _M_item_min = __inum;
: __object_info_base(__stack), _M_init(0), _M_max(0), _M_item_max(0), }
_M_min(0), _M_item_min(0), _M_total(0), _M_item_total(0), _M_cost(0), else
_M_count(0), _M_resize(0) {
{ _M_min = __min(_M_min, __num);
_M_init = _M_max = __num; _M_item_min = __min(_M_item_min, __inum);
_M_item_min = _M_item_max = _M_item_total = _M_total = 0; }
_M_min = 0; _M_total += __num;
_M_count = 0; _M_item_total += __inum;
_M_resize = 0; _M_count += 1;
} }
inline void __container_size_info::__merge(const __container_size_info& __o)
{
_M_init = __max(_M_init, __o._M_init);
_M_max = __max(_M_max, __o._M_max);
_M_item_max = __max(_M_item_max, __o._M_item_max);
_M_min = __min(_M_min, __o._M_min);
_M_item_min = __min(_M_item_min, __o._M_item_min);
_M_total += __o._M_total;
_M_item_total += __o._M_item_total;
_M_count += __o._M_count;
_M_cost += __o._M_cost;
_M_resize += __o._M_resize;
}
inline __container_size_info::__container_size_info()
: _M_init(0), _M_max(0), _M_item_max(0), _M_min(0), _M_item_min(0),
_M_total(0), _M_item_total(0), _M_cost(0), _M_count(0), _M_resize(0)
{
}
inline __container_size_info::__container_size_info( inline void
const __container_size_info& __o) __container_size_info::__resize(size_t __from, size_t __to)
: __object_info_base(__o) {
{ _M_cost += this->__resize_cost(__from, __to);
_M_init = __o._M_init; _M_resize += 1;
_M_max = __o._M_max; _M_max = __max(_M_max, __to);
_M_item_max = __o._M_item_max; }
_M_min = __o._M_min;
_M_item_min = __o._M_item_min;
_M_total = __o._M_total;
_M_item_total = __o._M_item_total;
_M_cost = __o._M_cost;
_M_count = __o._M_count;
_M_resize = __o._M_resize;
}
/** @brief A container size instrumentation line in the stack table. */
class __container_size_stack_info: public __container_size_info
{
public:
__container_size_stack_info(const __container_size_info& __o)
: __container_size_info(__o) {}
};
/** @brief Container size instrumentation trace producer. */
class __trace_container_size
: public __trace_base<__container_size_info, __container_size_stack_info>
{
public:
~__trace_container_size() {}
__trace_container_size()
: __trace_base<__container_size_info, __container_size_stack_info>() {};
// Insert a new node at construct with object, callstack and initial size.
void __insert(const __object_t __obj, __stack_t __stack, size_t __num);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj, size_t __num, size_t __inum);
void __construct(const void* __obj, size_t __inum);
// Call at resize to set resize/cost information.
void __resize(const void* __obj, int __from, int __to);
};
inline void __trace_container_size::__insert(const __object_t __obj,
__stack_t __stack, size_t __num)
{
__add_object(__obj, __container_size_info(__stack, __num));
}
inline void __container_size_info::__write(FILE* __f) const inline void
{ __container_size_info::__merge(const __container_size_info& __o)
fprintf(__f, "%Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu\n", {
_M_init, _M_count, _M_cost, _M_resize, _M_min, _M_max, _M_total, _M_init = __max(_M_init, __o._M_init);
_M_item_min, _M_item_max, _M_item_total); _M_max = __max(_M_max, __o._M_max);
} _M_item_max = __max(_M_item_max, __o._M_item_max);
_M_min = __min(_M_min, __o._M_min);
_M_item_min = __min(_M_item_min, __o._M_item_min);
_M_total += __o._M_total;
_M_item_total += __o._M_item_total;
_M_count += __o._M_count;
_M_cost += __o._M_cost;
_M_resize += __o._M_resize;
}
inline void __trace_container_size::__destruct(const void* __obj, inline __container_size_info::__container_size_info()
size_t __num, size_t __inum) : _M_init(0), _M_max(0), _M_item_max(0), _M_min(0), _M_item_min(0),
{ _M_total(0), _M_item_total(0), _M_cost(0), _M_count(0), _M_resize(0)
if (!__is_on()) return; { }
inline __container_size_info::__container_size_info(__stack_t __stack,
size_t __num)
: __object_info_base(__stack), _M_init(0), _M_max(0), _M_item_max(0),
_M_min(0), _M_item_min(0), _M_total(0), _M_item_total(0), _M_cost(0),
_M_count(0), _M_resize(0)
{
_M_init = _M_max = __num;
_M_item_min = _M_item_max = _M_item_total = _M_total = 0;
_M_min = 0;
_M_count = 0;
_M_resize = 0;
}
__object_t __obj_handle = static_cast<__object_t>(__obj); inline __container_size_info::__container_size_info(const __container_size_info& __o)
: __object_info_base(__o)
{
_M_init = __o._M_init;
_M_max = __o._M_max;
_M_item_max = __o._M_item_max;
_M_min = __o._M_min;
_M_item_min = __o._M_item_min;
_M_total = __o._M_total;
_M_item_total = __o._M_item_total;
_M_cost = __o._M_cost;
_M_count = __o._M_count;
_M_resize = __o._M_resize;
}
__container_size_info* __object_info = __get_object_info(__obj_handle); /** @brief A container size instrumentation line in the stack table. */
if (!__object_info) class __container_size_stack_info: public __container_size_info
return; {
public:
__container_size_stack_info(const __container_size_info& __o)
: __container_size_info(__o) { }
};
/** @brief Container size instrumentation trace producer. */
class __trace_container_size
: public __trace_base<__container_size_info, __container_size_stack_info>
{
public:
__trace_container_size()
: __trace_base<__container_size_info, __container_size_stack_info>() { };
~__trace_container_size() { }
// Insert a new node at construct with object, callstack and initial size.
void __insert(const __object_t __obj, __stack_t __stack, size_t __num);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj, size_t __num, size_t __inum);
void __construct(const void* __obj, size_t __inum);
// Call at resize to set resize/cost information.
void __resize(const void* __obj, int __from, int __to);
};
inline void
__trace_container_size::__insert(const __object_t __obj,
__stack_t __stack, size_t __num)
{ __add_object(__obj, __container_size_info(__stack, __num)); }
inline void
__container_size_info::__write(FILE* __f) const
{
fprintf(__f, "%Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu %Zu\n",
_M_init, _M_count, _M_cost, _M_resize, _M_min, _M_max, _M_total,
_M_item_min, _M_item_max, _M_item_total);
}
__object_info->__destruct(__num, __inum); inline void
__retire_object(__obj_handle); __trace_container_size::__destruct(const void* __obj, size_t __num,
} size_t __inum)
{
if (!__is_on()) return;
inline void __trace_container_size::__resize(const void* __obj, int __from, __object_t __obj_handle = static_cast<__object_t>(__obj);
int __to)
{ __container_size_info* __object_info = __get_object_info(__obj_handle);
if (!__is_on()) return; if (!__object_info)
return;
__container_size_info* __object_info = __get_object_info(__obj); __object_info->__destruct(__num, __inum);
if (!__object_info) __retire_object(__obj_handle);
return; }
inline void
__trace_container_size::__resize(const void* __obj, int __from, int __to)
{
if (!__is_on()) return;
__object_info->__resize(__from, __to); __container_size_info* __object_info = __get_object_info(__obj);
} if (!__object_info)
return;
__object_info->__resize(__from, __to);
}
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_CONTAINER_SIZE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_CONTAINER_SIZE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_HASH_FUNC_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_HASH_FUNC_H
#define PROFCXX_PROFILER_HASH_FUNC_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_HASH_FUNC_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdlib> #include <cstdlib>
...@@ -52,141 +52,143 @@ ...@@ -52,141 +52,143 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief A hash performance instrumentation line in the object table. */
class __hashfunc_info: public __object_info_base
{
public:
__hashfunc_info() :_M_longest_chain(0), _M_accesses(0), _M_hops(0) { }
__hashfunc_info(const __hashfunc_info& o);
__hashfunc_info(__stack_t __stack)
: __object_info_base(__stack), _M_longest_chain(0),
_M_accesses(0), _M_hops(0) { }
virtual ~__hashfunc_info() { }
void __merge(const __hashfunc_info& __o);
void __destruct(size_t __chain, size_t __accesses, size_t __hops);
void __write(FILE* __f) const;
float __magnitude() const { return static_cast<float>(_M_hops); }
const char* __advice() const { return "change hash function"; }
private:
size_t _M_longest_chain;
size_t _M_accesses;
size_t _M_hops;
};
inline __hashfunc_info::__hashfunc_info(const __hashfunc_info& __o)
: __object_info_base(__o)
{
_M_longest_chain = __o._M_longest_chain;
_M_accesses = __o._M_accesses;
_M_hops = __o._M_hops;
}
/** @brief A hash performance instrumentation line in the object table. */ inline void
class __hashfunc_info: public __object_info_base __hashfunc_info::__merge(const __hashfunc_info& __o)
{ {
public: _M_longest_chain = __max(_M_longest_chain, __o._M_longest_chain);
__hashfunc_info() _M_accesses += __o._M_accesses;
:_M_longest_chain(0), _M_accesses(0), _M_hops(0) {} _M_hops += __o._M_hops;
__hashfunc_info(const __hashfunc_info& o); }
__hashfunc_info(__stack_t __stack)
: __object_info_base(__stack),
_M_longest_chain(0), _M_accesses(0), _M_hops(0){}
virtual ~__hashfunc_info() {}
void __merge(const __hashfunc_info& __o);
void __destruct(size_t __chain, size_t __accesses, size_t __hops);
void __write(FILE* __f) const;
float __magnitude() const { return static_cast<float>(_M_hops); }
const char* __advice() const { return "change hash function"; }
private:
size_t _M_longest_chain;
size_t _M_accesses;
size_t _M_hops;
};
inline __hashfunc_info::__hashfunc_info(const __hashfunc_info& __o)
: __object_info_base(__o)
{
_M_longest_chain = __o._M_longest_chain;
_M_accesses = __o._M_accesses;
_M_hops = __o._M_hops;
}
inline void __hashfunc_info::__merge(const __hashfunc_info& __o)
{
_M_longest_chain = __max(_M_longest_chain, __o._M_longest_chain);
_M_accesses += __o._M_accesses;
_M_hops += __o._M_hops;
}
inline void __hashfunc_info::__destruct(size_t __chain, size_t __accesses,
size_t __hops)
{
_M_longest_chain = __max(_M_longest_chain, __chain);
_M_accesses += __accesses;
_M_hops += __hops;
}
/** @brief A hash performance instrumentation line in the stack table. */
class __hashfunc_stack_info: public __hashfunc_info {
public:
__hashfunc_stack_info(const __hashfunc_info& __o) : __hashfunc_info(__o) {}
};
/** @brief Hash performance instrumentation producer. */
class __trace_hash_func
: public __trace_base<__hashfunc_info, __hashfunc_stack_info>
{
public:
__trace_hash_func();
~__trace_hash_func() {}
// Insert a new node at construct with object, callstack and initial size.
void __insert(__object_t __obj, __stack_t __stack);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj, size_t __chain,
size_t __accesses, size_t __hops);
};
inline __trace_hash_func::__trace_hash_func()
: __trace_base<__hashfunc_info, __hashfunc_stack_info>()
{
__id = "hash-distr";
}
inline void __trace_hash_func::__insert(__object_t __obj, __stack_t __stack)
{
__add_object(__obj, __hashfunc_info(__stack));
}
inline void __hashfunc_info::__write(FILE* __f) const
{
fprintf(__f, "%Zu %Zu %Zu\n", _M_hops, _M_accesses, _M_longest_chain);
}
inline void __trace_hash_func::__destruct(const void* __obj, size_t __chain,
size_t __accesses, size_t __hops)
{
if (!__is_on()) return;
// First find the item from the live objects and update the informations. inline void
__hashfunc_info* __objs = __get_object_info(__obj); __hashfunc_info::__destruct(size_t __chain, size_t __accesses, size_t __hops)
if (!__objs) {
return; _M_longest_chain = __max(_M_longest_chain, __chain);
_M_accesses += __accesses;
_M_hops += __hops;
}
__objs->__destruct(__chain, __accesses, __hops); /** @brief A hash performance instrumentation line in the stack table. */
__retire_object(__obj); class __hashfunc_stack_info: public __hashfunc_info
} {
public:
__hashfunc_stack_info(const __hashfunc_info& __o) : __hashfunc_info(__o) { }
};
/** @brief Hash performance instrumentation producer. */
class __trace_hash_func
: public __trace_base<__hashfunc_info, __hashfunc_stack_info>
{
public:
__trace_hash_func();
~__trace_hash_func() { }
// Insert a new node at construct with object, callstack and initial size.
void __insert(__object_t __obj, __stack_t __stack);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj, size_t __chain, size_t __accesses,
size_t __hops);
};
inline __trace_hash_func::__trace_hash_func()
: __trace_base<__hashfunc_info, __hashfunc_stack_info>()
{ __id = "hash-distr"; }
inline void
__trace_hash_func::__insert(__object_t __obj, __stack_t __stack)
{ __add_object(__obj, __hashfunc_info(__stack)); }
inline void
__hashfunc_info::__write(FILE* __f) const
{ fprintf(__f, "%Zu %Zu %Zu\n", _M_hops, _M_accesses, _M_longest_chain); }
inline void
__trace_hash_func::__destruct(const void* __obj, size_t __chain,
size_t __accesses, size_t __hops)
{
if (!__is_on())
return;
// First find the item from the live objects and update the informations.
__hashfunc_info* __objs = __get_object_info(__obj);
if (!__objs)
return;
__objs->__destruct(__chain, __accesses, __hops);
__retire_object(__obj);
}
//////////////////////////////////////////////////////////////////////////////
// Initialization and report.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_hash_func_init() // Initialization and report.
{ inline void
__tables<0>::_S_hash_func = new __trace_hash_func(); __trace_hash_func_init()
} { __tables<0>::_S_hash_func = new __trace_hash_func(); }
inline void __trace_hash_func_report(FILE* __f, inline void
__warning_vector_t& __warnings) __trace_hash_func_report(FILE* __f, __warning_vector_t& __warnings)
{ {
if (__tables<0>::_S_hash_func) { if (__tables<0>::_S_hash_func)
__tables<0>::_S_hash_func->__collect_warnings(__warnings); {
__tables<0>::_S_hash_func->__write(__f); __tables<0>::_S_hash_func->__collect_warnings(__warnings);
__tables<0>::_S_hash_func->__write(__f);
}
} }
}
//////////////////////////////////////////////////////////////////////////////
// Implementations of instrumentation hooks.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_hash_func_construct(const void* __obj) // Implementations of instrumentation hooks.
{ inline void
if (!__profcxx_init()) return; __trace_hash_func_construct(const void* __obj)
{
if (!__profcxx_init())
return;
__tables<0>::_S_hash_func->__insert(__obj, __get_stack()); __tables<0>::_S_hash_func->__insert(__obj, __get_stack());
} }
inline void __trace_hash_func_destruct(const void* __obj, size_t __chain, inline void
size_t __accesses, size_t __hops) __trace_hash_func_destruct(const void* __obj, size_t __chain,
{ size_t __accesses, size_t __hops)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__tables<0>::_S_hash_func->__destruct(__obj, __chain, __accesses, __hops); __tables<0>::_S_hash_func->__destruct(__obj, __chain, __accesses, __hops);
} }
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_HASH_FUNC_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_HASH_FUNC_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_HASHTABLE_SIZE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_HASHTABLE_SIZE_H
#define PROFCXX_PROFILER_HASHTABLE_SIZE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_HASHTABLE_SIZE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdlib> #include <cstdlib>
...@@ -54,62 +54,58 @@ ...@@ -54,62 +54,58 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief Hashtable size instrumentation trace producer. */
/** @brief Hashtable size instrumentation trace producer. */ class __trace_hashtable_size : public __trace_container_size
class __trace_hashtable_size : public __trace_container_size
{
public:
__trace_hashtable_size() : __trace_container_size()
{ {
__id = "hashtable-size"; public:
__trace_hashtable_size() : __trace_container_size()
{ __id = "hashtable-size"; }
};
// Initialization and report.
inline void
__trace_hashtable_size_init()
{ __tables<0>::_S_hashtable_size = new __trace_hashtable_size(); }
inline void
__trace_hashtable_size_report(FILE* __f, __warning_vector_t& __warnings)
{
if (__tables<0>::_S_hashtable_size)
{
__tables<0>::_S_hashtable_size->__collect_warnings(__warnings);
__tables<0>::_S_hashtable_size->__write(__f);
}
} }
};
//////////////////////////////////////////////////////////////////////////////
// Initialization and report.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_hashtable_size_init() // Implementations of instrumentation hooks.
{ inline void
__tables<0>::_S_hashtable_size = new __trace_hashtable_size(); __trace_hashtable_size_construct(const void* __obj, size_t __num)
} {
if (!__profcxx_init())
return;
inline void __trace_hashtable_size_report(FILE* __f, __tables<0>::_S_hashtable_size->__insert(__obj, __get_stack(), __num);
__warning_vector_t& __warnings)
{
if (__tables<0>::_S_hashtable_size) {
__tables<0>::_S_hashtable_size->__collect_warnings(__warnings);
__tables<0>::_S_hashtable_size->__write(__f);
} }
}
//////////////////////////////////////////////////////////////////////////////
// Implementations of instrumentation hooks.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_hashtable_size_construct(const void* __obj, size_t __num) inline void
{ __trace_hashtable_size_destruct(const void* __obj, size_t __num,
if (!__profcxx_init()) return; size_t __inum)
{
__tables<0>::_S_hashtable_size->__insert(__obj, __get_stack(), __num); if (!__profcxx_init())
} return;
inline void __trace_hashtable_size_destruct(const void* __obj, size_t __num,
size_t __inum)
{
if (!__profcxx_init()) return;
__tables<0>::_S_hashtable_size->__destruct(__obj, __num, __inum); __tables<0>::_S_hashtable_size->__destruct(__obj, __num, __inum);
} }
inline void __trace_hashtable_size_resize(const void* __obj, size_t __from, inline void
size_t __to) __trace_hashtable_size_resize(const void* __obj, size_t __from, size_t __to)
{ {
if (!__profcxx_init()) return; if (!__profcxx_init())
return;
__tables<0>::_S_hashtable_size->__resize(__obj, __from, __to); __tables<0>::_S_hashtable_size->__resize(__obj, __from, __to);
} }
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_HASHTABLE_SIZE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_HASHTABLE_SIZE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Silvius Rus. // Written by Silvius Rus.
#ifndef PROFCXX_PROFILER_MAP_TO_UNORDERED_MAP_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_MAP_TO_UNORDERED_MAP_H
#define PROFCXX_PROFILER_MAP_TO_UNORDERED_MAP_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_MAP_TO_UNORDERED_MAP_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdlib> #include <cstdlib>
...@@ -52,254 +52,275 @@ ...@@ -52,254 +52,275 @@
namespace __gnu_profile namespace __gnu_profile
{ {
// Cost model.
// Cost model. XXX: this must be taken from the machine model instead. // Map operations:
// Map operations: // - insert: 1.5 * log(size)
// - insert: 1.5 * log(size) // - erase: 1.5 * log(size)
// - erase: 1.5 * log(size) // - find: log(size)
// - find: log(size) // - iterate: 2.3
// - iterate: 2.3 // Unordered map operations:
// Unordered map operations: // - insert: 12
// - insert: 12 // - erase: 12
// - erase: 12 // - find: 10
// - find: 10 // - iterate: 1.7
// - iterate: 1.7 // XXX: this must be taken from the machine model instead.
const float __map_insert_cost_factor = 1.5;
const float __map_insert_cost_factor = 1.5; const float __map_erase_cost_factor = 1.5;
const float __map_erase_cost_factor = 1.5; const float __map_find_cost_factor = 1;
const float __map_find_cost_factor = 1; const float __map_iterate_cost = 2.3;
const float __map_iterate_cost = 2.3;
const float __umap_insert_cost = 12.0;
const float __umap_insert_cost = 12.0; const float __umap_erase_cost = 12.0;
const float __umap_erase_cost = 12.0; const float __umap_find_cost = 10.0;
const float __umap_find_cost = 10.0; const float __umap_iterate_cost = 1.7;
const float __umap_iterate_cost = 1.7;
inline int
inline int __log2(size_t __size) __log2(size_t __size)
{ {
for (int __bit_count = sizeof(size_t) - 1; __bit_count >= 0; --__bit_count) { int __bit_count = sizeof(size_t) - 1;
if ((2 << __bit_count) & __size) { for (; __bit_count >= 0; --__bit_count)
return __bit_count; {
} if ((2 << __bit_count) & __size)
return __bit_count;
}
return 0;
} }
return 0;
}
inline float __map_insert_cost(size_t __size)
{
return __map_insert_cost_factor * static_cast<float>(__log2(__size));
}
inline float __map_erase_cost(size_t __size)
{
return __map_erase_cost_factor * static_cast<float>(__log2(__size));
}
inline float __map_find_cost(size_t __size)
{
return __map_find_cost_factor * static_cast<float>(__log2(__size));
}
/** @brief A map-to-unordered_map instrumentation line in the object table. */
class __map2umap_info: public __object_info_base
{
public:
__map2umap_info()
: _M_insert(0), _M_erase(0), _M_find(0), _M_iterate(0),
_M_map_cost(0.0), _M_umap_cost(0.0), _M_valid(true) {}
__map2umap_info(__stack_t __stack)
: __object_info_base(__stack), _M_insert(0), _M_erase(0), _M_find(0),
_M_iterate(0), _M_map_cost(0.0), _M_umap_cost(0.0), _M_valid(true) {}
virtual ~__map2umap_info() {}
__map2umap_info(const __map2umap_info& o);
void __merge(const __map2umap_info& o);
void __write(FILE* __f) const;
float __magnitude() const { return _M_map_cost - _M_umap_cost; }
const char* __advice() const;
void __record_insert(size_t __size, size_t __count);
void __record_erase(size_t __size, size_t __count);
void __record_find(size_t __size);
void __record_iterate(size_t __count);
void __record_invalidate();
private:
size_t _M_insert;
size_t _M_erase;
size_t _M_find;
size_t _M_iterate;
float _M_umap_cost;
float _M_map_cost;
bool _M_valid;
};
inline const char* __map2umap_info::__advice() const
{
return "change std::map to std::unordered_map";
}
inline __map2umap_info::__map2umap_info(const __map2umap_info& __o)
: __object_info_base(__o),
_M_insert(__o._M_insert),
_M_erase(__o._M_erase),
_M_find(__o._M_find),
_M_iterate(__o._M_iterate),
_M_map_cost(__o._M_map_cost),
_M_umap_cost(__o._M_umap_cost),
_M_valid(__o._M_valid)
{}
inline void __map2umap_info::__merge(const __map2umap_info& __o)
{
_M_insert += __o._M_insert;
_M_erase += __o._M_erase;
_M_find += __o._M_find;
_M_map_cost += __o._M_map_cost;
_M_umap_cost += __o._M_umap_cost;
_M_valid &= __o._M_valid;
}
inline void __map2umap_info:: __record_insert(size_t __size, size_t __count)
{
_M_insert += __count;
_M_map_cost += __count * __map_insert_cost(__size);
_M_umap_cost += __count * __umap_insert_cost;
}
inline void __map2umap_info:: __record_erase(size_t __size, size_t __count)
{
_M_erase += __count;
_M_map_cost += __count * __map_erase_cost(__size);
_M_umap_cost += __count * __umap_erase_cost;
}
inline void __map2umap_info:: __record_find(size_t __size)
{
_M_find += 1;
_M_map_cost += __map_find_cost(__size);
_M_umap_cost += __umap_find_cost;
}
inline void __map2umap_info:: __record_iterate(size_t __count)
{
_M_iterate += __count;
_M_map_cost += __count * __map_iterate_cost;
_M_umap_cost += __count * __umap_iterate_cost;
}
inline void __map2umap_info:: __record_invalidate() inline float
{ __map_insert_cost(size_t __size)
_M_valid = false; { return __map_insert_cost_factor * static_cast<float>(__log2(__size)); }
}
inline float
__map_erase_cost(size_t __size)
{ return __map_erase_cost_factor * static_cast<float>(__log2(__size)); }
inline float
__map_find_cost(size_t __size)
{ return __map_find_cost_factor * static_cast<float>(__log2(__size)); }
/** @brief A map-to-unordered_map instrumentation line in the object table. */
class __map2umap_info: public __object_info_base
{
public:
__map2umap_info()
: _M_insert(0), _M_erase(0), _M_find(0), _M_iterate(0),
_M_map_cost(0.0), _M_umap_cost(0.0), _M_valid(true) { }
__map2umap_info(__stack_t __stack)
: __object_info_base(__stack), _M_insert(0), _M_erase(0), _M_find(0),
_M_iterate(0), _M_map_cost(0.0), _M_umap_cost(0.0), _M_valid(true) { }
virtual ~__map2umap_info() { }
__map2umap_info(const __map2umap_info& o);
void __merge(const __map2umap_info& o);
void __write(FILE* __f) const;
float __magnitude() const { return _M_map_cost - _M_umap_cost; }
const char* __advice() const;
void __record_insert(size_t __size, size_t __count);
void __record_erase(size_t __size, size_t __count);
void __record_find(size_t __size);
void __record_iterate(size_t __count);
void __record_invalidate();
private:
size_t _M_insert;
size_t _M_erase;
size_t _M_find;
size_t _M_iterate;
float _M_umap_cost;
float _M_map_cost;
bool _M_valid;
};
inline __map2umap_info::__map2umap_info(const __map2umap_info& __o)
: __object_info_base(__o),
_M_insert(__o._M_insert),
_M_erase(__o._M_erase),
_M_find(__o._M_find),
_M_iterate(__o._M_iterate),
_M_map_cost(__o._M_map_cost),
_M_umap_cost(__o._M_umap_cost),
_M_valid(__o._M_valid)
{ }
inline const char*
__map2umap_info::__advice() const
{ return "change std::map to std::unordered_map"; }
inline void
__map2umap_info::__merge(const __map2umap_info& __o)
{
_M_insert += __o._M_insert;
_M_erase += __o._M_erase;
_M_find += __o._M_find;
_M_map_cost += __o._M_map_cost;
_M_umap_cost += __o._M_umap_cost;
_M_valid &= __o._M_valid;
}
inline void __map2umap_info::__write(FILE* __f) const inline void
{ __map2umap_info:: __record_insert(size_t __size, size_t __count)
fprintf(__f, "%Zu %Zu %Zu %Zu %.0f %.0f %s\n", {
_M_insert, _M_erase, _M_find, _M_iterate, _M_map_cost, _M_umap_cost, _M_insert += __count;
_M_valid ? "valid" : "invalid"); _M_map_cost += __count * __map_insert_cost(__size);
} _M_umap_cost += __count * __umap_insert_cost;
}
/** @brief A map-to-unordered_map instrumentation line in the stack table. */ inline void
class __map2umap_stack_info: public __map2umap_info __map2umap_info:: __record_erase(size_t __size, size_t __count)
{ {
public: _M_erase += __count;
__map2umap_stack_info(const __map2umap_info& o) : __map2umap_info(o) {} _M_map_cost += __count * __map_erase_cost(__size);
}; _M_umap_cost += __count * __umap_erase_cost;
}
/** @brief Map-to-unordered_map instrumentation producer. */ inline void
class __trace_map2umap __map2umap_info:: __record_find(size_t __size)
: public __trace_base<__map2umap_info, __map2umap_stack_info> {
{ _M_find += 1;
public: _M_map_cost += __map_find_cost(__size);
__trace_map2umap(); _M_umap_cost += __umap_find_cost;
}; }
inline __trace_map2umap::__trace_map2umap() inline void
: __trace_base<__map2umap_info, __map2umap_stack_info>() __map2umap_info:: __record_iterate(size_t __count)
{ {
__id = "map-to-unordered-map"; _M_iterate += __count;
} _M_map_cost += __count * __map_iterate_cost;
_M_umap_cost += __count * __umap_iterate_cost;
}
inline void __trace_map_to_unordered_map_init() inline void
{ __map2umap_info:: __record_invalidate()
__tables<0>::_S_map2umap = new __trace_map2umap(); {
} _M_valid = false;
}
inline void __trace_map_to_unordered_map_report( inline void
FILE* __f, __warning_vector_t& __warnings) __map2umap_info::__write(FILE* __f) const
{ {
if (__tables<0>::_S_map2umap) { fprintf(__f, "%Zu %Zu %Zu %Zu %.0f %.0f %s\n",
__tables<0>::_S_map2umap->__collect_warnings(__warnings); _M_insert, _M_erase, _M_find, _M_iterate, _M_map_cost, _M_umap_cost,
__tables<0>::_S_map2umap->__write(__f); _M_valid ? "valid" : "invalid");
} }
}
////////////////////////////////////////////////////////////////////////////// /** @brief A map-to-unordered_map instrumentation line in the stack table. */
// Implementations of instrumentation hooks. class __map2umap_stack_info: public __map2umap_info
////////////////////////////////////////////////////////////////////////////// {
public:
__map2umap_stack_info(const __map2umap_info& o) : __map2umap_info(o) { }
};
/** @brief Map-to-unordered_map instrumentation producer. */
class __trace_map2umap
: public __trace_base<__map2umap_info, __map2umap_stack_info>
{
public:
__trace_map2umap();
};
inline __trace_map2umap::__trace_map2umap()
: __trace_base<__map2umap_info, __map2umap_stack_info>()
{ __id = "map-to-unordered-map"; }
inline void
__trace_map_to_unordered_map_init()
{ __tables<0>::_S_map2umap = new __trace_map2umap(); }
inline void
__trace_map_to_unordered_map_report(FILE* __f, __warning_vector_t& __warnings)
{
if (__tables<0>::_S_map2umap)
{
__tables<0>::_S_map2umap->__collect_warnings(__warnings);
__tables<0>::_S_map2umap->__write(__f);
}
}
inline void __trace_map_to_unordered_map_construct(const void* __obj) // Implementations of instrumentation hooks.
{ inline void
if (!__profcxx_init()) return; __trace_map_to_unordered_map_construct(const void* __obj)
{
if (!__profcxx_init())
return;
__tables<0>::_S_map2umap->__add_object(__obj, __tables<0>::_S_map2umap->__add_object(__obj,
__map2umap_info(__get_stack())); __map2umap_info(__get_stack()));
} }
inline void __trace_map_to_unordered_map_destruct(const void* __obj) inline void
{ __trace_map_to_unordered_map_destruct(const void* __obj)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__tables<0>::_S_map2umap->__retire_object(__obj); __tables<0>::_S_map2umap->__retire_object(__obj);
} }
inline void __trace_map_to_unordered_map_insert(const void* __obj, inline void
size_t __size, size_t __count) __trace_map_to_unordered_map_insert(const void* __obj, size_t __size,
{ size_t __count)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj); __map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj);
if (__info) __info->__record_insert(__size, __count); if (__info)
} __info->__record_insert(__size, __count);
}
inline void __trace_map_to_unordered_map_erase(const void* __obj, inline void
size_t __size, size_t __count) __trace_map_to_unordered_map_erase(const void* __obj, size_t __size,
{ size_t __count)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj); __map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj);
if (__info) __info->__record_erase(__size, __count); if (__info)
} __info->__record_erase(__size, __count);
}
inline void __trace_map_to_unordered_map_find(const void* __obj, size_t __size) inline void
{ __trace_map_to_unordered_map_find(const void* __obj, size_t __size)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj); __map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj);
if (__info) __info->__record_find(__size); if (__info)
} __info->__record_find(__size);
}
inline void __trace_map_to_unordered_map_iterate(const void* __obj, inline void
size_t __count) __trace_map_to_unordered_map_iterate(const void* __obj, size_t __count)
{ {
if (!__profcxx_init()) return; if (!__profcxx_init())
return;
__map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj); __map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj);
if (__info) __info->__record_iterate(__count); if (__info)
} __info->__record_iterate(__count);
}
inline void __trace_map_to_unordered_map_invalidate(const void* __obj) inline void
{ __trace_map_to_unordered_map_invalidate(const void* __obj)
if (!__profcxx_init()) return; {
if (!__profcxx_init())
return;
__map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj); __map2umap_info* __info = __tables<0>::_S_map2umap->__get_object_info(__obj);
if (__info) __info->__record_invalidate(); if (__info)
} __info->__record_invalidate();
}
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_MAP_TO_UNORDERED_MAP_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_MAP_TO_UNORDERED_MAP_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_NODE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_NODE_H
#define PROFCXX_PROFILER_NODE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_NODE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdio> #include <cstdio>
...@@ -53,120 +53,116 @@ ...@@ -53,120 +53,116 @@
namespace __gnu_profile namespace __gnu_profile
{ {
typedef const void* __object_t; typedef const void* __object_t;
typedef void* __instruction_address_t; typedef void* __instruction_address_t;
typedef std::_GLIBCXX_STD_PR::vector<__instruction_address_t> __stack_npt; typedef std::_GLIBCXX_STD_PR::vector<__instruction_address_t> __stack_npt;
typedef __stack_npt* __stack_t; typedef __stack_npt* __stack_t;
size_t __stack_max_depth(); size_t __stack_max_depth();
inline __stack_t __get_stack() inline __stack_t __get_stack()
{ {
#if defined HAVE_EXECINFO_H #if defined HAVE_EXECINFO_H
size_t __max_depth = __stack_max_depth(); size_t __max_depth = __stack_max_depth();
if (__max_depth == 0) if (__max_depth == 0)
return NULL; return NULL;
__stack_npt __buffer(__max_depth); __stack_npt __buffer(__max_depth);
int __depth = backtrace(&__buffer[0], __max_depth); int __depth = backtrace(&__buffer[0], __max_depth);
__stack_t __stack = new __stack_npt(__depth); __stack_t __stack = new __stack_npt(__depth);
memcpy(&(*__stack)[0], &__buffer[0], __depth * sizeof(__object_t)); memcpy(&(*__stack)[0], &__buffer[0], __depth * sizeof(__object_t));
return __stack; return __stack;
#else #else
return NULL; return NULL;
#endif #endif
}
inline __size(const __stack_t& __stack)
{
if (!__stack) {
return 0;
} else {
return __stack->size();
} }
}
inline void __write(FILE* __f, const __stack_t __stack) inline __size(const __stack_t& __stack)
{ {
if (!__stack) { if (!__stack)
return; return 0;
} else
return __stack->size();
__stack_npt::const_iterator __it;
for (__it = __stack->begin(); __it != __stack->end(); ++__it) {
fprintf(__f, "%p ", *__it);
} }
}
/** @brief Hash function for summary trace using call stack as index. */ inline void __write(FILE* __f, const __stack_t __stack)
class __stack_hash
{
public:
size_t operator()(const __stack_t __s) const
{ {
if (!__s) { if (!__stack)
return 0; return;
}
uintptr_t __index = 0;
__stack_npt::const_iterator __it; __stack_npt::const_iterator __it;
for (__it = __s->begin(); __it != __s->end(); ++__it) { for (__it = __stack->begin(); __it != __stack->end(); ++__it)
__index += reinterpret_cast<uintptr_t>(*__it); fprintf(__f, "%p ", *__it);
}
return __index;
} }
bool operator() (const __stack_t __stack1, const __stack_t __stack2) const /** @brief Hash function for summary trace using call stack as index. */
class __stack_hash
{ {
if (!__stack1 && !__stack2) return true; public:
if (!__stack1 || !__stack2) return false; size_t operator()(const __stack_t __s) const
if (__stack1->size() != __stack2->size()) return false; {
if (!__s)
return 0;
uintptr_t __index = 0;
__stack_npt::const_iterator __it;
for (__it = __s->begin(); __it != __s->end(); ++__it)
{
__index += reinterpret_cast<uintptr_t>(*__it);
}
return __index;
}
size_t __byte_size = __stack1->size() * sizeof(__stack_npt::value_type); bool operator() (const __stack_t __stack1, const __stack_t __stack2) const
return memcmp(&(*__stack1)[0], &(*__stack2)[0], __byte_size) == 0; {
} if (!__stack1 && !__stack2) return true;
}; if (!__stack1 || !__stack2) return false;
if (__stack1->size() != __stack2->size()) return false;
/** @brief Base class for a line in the object table. */ size_t __byte_size = __stack1->size() * sizeof(__stack_npt::value_type);
class __object_info_base return memcmp(&(*__stack1)[0], &(*__stack2)[0], __byte_size) == 0;
{ }
public: };
__object_info_base() {}
__object_info_base(__stack_t __stack);
__object_info_base(const __object_info_base& o);
virtual ~__object_info_base() {}
bool __is_valid() const { return _M_valid; }
__stack_t __stack() const { return _M_stack; }
virtual void __write(FILE* f) const = 0;
protected:
__stack_t _M_stack;
bool _M_valid;
};
inline __object_info_base::__object_info_base(__stack_t __stack)
{
_M_stack = __stack;
_M_valid = true;
}
inline __object_info_base::__object_info_base(const __object_info_base& __o) /** @brief Base class for a line in the object table. */
{ class __object_info_base
_M_stack = __o._M_stack; {
_M_valid = __o._M_valid; public:
} __object_info_base() { }
__object_info_base(__stack_t __stack);
__object_info_base(const __object_info_base& o);
virtual ~__object_info_base() { }
bool __is_valid() const { return _M_valid; }
__stack_t __stack() const { return _M_stack; }
virtual void __write(FILE* f) const = 0;
protected:
__stack_t _M_stack;
bool _M_valid;
};
inline __object_info_base::__object_info_base(__stack_t __stack)
{
_M_stack = __stack;
_M_valid = true;
}
/** @brief Base class for a line in the stack table. */ inline __object_info_base::__object_info_base(const __object_info_base& __o)
template<typename __object_info> {
class __stack_info_base _M_stack = __o._M_stack;
{ _M_valid = __o._M_valid;
public: }
__stack_info_base() {}
__stack_info_base(const __object_info& __info) = 0;
virtual ~__stack_info_base() {}
void __merge(const __object_info& __info) = 0;
virtual float __magnitude() const = 0;
virtual const char* __get_id() const = 0;
};
/** @brief Base class for a line in the stack table. */
template<typename __object_info>
class __stack_info_base
{
public:
__stack_info_base() { }
__stack_info_base(const __object_info& __info) = 0;
virtual ~__stack_info_base() { }
void __merge(const __object_info& __info) = 0;
virtual float __magnitude() const = 0;
virtual const char* __get_id() const = 0;
};
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_NODE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_NODE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_STATE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_STATE_H
#define PROFCXX_PROFILER_STATE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_STATE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdio> #include <cstdio>
...@@ -45,63 +45,64 @@ ...@@ -45,63 +45,64 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief Profiling mode on/off state. */
template<int _Unused=0>
class __state
{
private:
enum __state_type { __ON, __OFF, __INVALID };
__state_type _M_state;
public:
static __state<_Unused>* _S_diag_state;
__state() : _M_state(__INVALID) { }
~__state() { }
bool __is_on() { return _M_state == __ON; }
bool __is_off() { return _M_state == __OFF; }
bool __is_invalid() { return _M_state == __INVALID; }
void __turn_on() { _M_state = __ON; }
void __turn_off() { _M_state = __OFF; }
};
template<int _Unused>
__state<_Unused>* __state<_Unused>::_S_diag_state = NULL;
inline bool
__is_on()
{
return __state<0>::_S_diag_state && __state<0>::_S_diag_state->__is_on();
}
/** @brief Profiling mode on/off state. */ inline bool
template <int _Unused=0> __is_off()
class __state {
{ return __state<0>::_S_diag_state && __state<0>::_S_diag_state->__is_off();
public: }
static __state<_Unused>* _S_diag_state;
__state() : _M_state(__INVALID) {}
~__state() {}
bool __is_on() { return _M_state == __ON; }
bool __is_off() { return _M_state == __OFF; }
bool __is_invalid() { return _M_state == __INVALID; }
void __turn_on() { _M_state = __ON; }
void __turn_off() { _M_state = __OFF; }
private:
enum __state_type { __ON, __OFF, __INVALID };
__state_type _M_state;
};
template <int _Unused>
__state<_Unused>* __state<_Unused>::_S_diag_state = NULL;
inline bool __is_on()
{
return __state<0>::_S_diag_state && __state<0>::_S_diag_state->__is_on();
}
inline bool __is_off()
{
return __state<0>::_S_diag_state && __state<0>::_S_diag_state->__is_off();
}
inline bool __is_invalid() inline bool
{ __is_invalid()
return (!__state<0>::_S_diag_state {
|| __state<0>::_S_diag_state->__is_invalid()); return (!__state<0>::_S_diag_state || __state<0>::_S_diag_state->__is_invalid());
} }
inline void __turn_on() inline void
{ __turn_on()
if (!__state<0>::_S_diag_state) { {
__state<0>::_S_diag_state = new __state<0>(); if (!__state<0>::_S_diag_state)
__state<0>::_S_diag_state = new __state<0>();
__state<0>::_S_diag_state->__turn_on();
} }
__state<0>::_S_diag_state->__turn_on();
}
inline void __turn_off() inline void
{ __turn_off()
if (!__state<0>::_S_diag_state) { {
__state<0>::_S_diag_state = new __state<0>(); if (!__state<0>::_S_diag_state)
__state<0>::_S_diag_state = new __state<0>();
__state<0>::_S_diag_state->__turn_off();
} }
__state<0>::_S_diag_state->__turn_off();
}
} // end namespace __gnu_profile } // end namespace __gnu_profile
#endif /* PROFCXX_PROFILER_STATE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_STATE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_TRACE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_TRACE_H
#define PROFCXX_PROFILER_TRACE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_TRACE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cerrno> #include <cerrno>
...@@ -65,500 +65,528 @@ ...@@ -65,500 +65,528 @@
namespace __gnu_profile namespace __gnu_profile
{ {
#if defined _GLIBCXX_PROFILE_THREADS && defined HAVE_TLS #if defined _GLIBCXX_PROFILE_THREADS && defined HAVE_TLS
#define _GLIBCXX_IMPL_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER #define _GLIBCXX_IMPL_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
typedef pthread_mutex_t __mutex_t; typedef pthread_mutex_t __mutex_t;
/** @brief Pthread mutex wrapper. */
template <int _Unused=0> /** @brief Pthread mutex wrapper. */
class __mutex { template<int _Unused=0>
public: class __mutex
static __mutex_t __global_lock; {
static void __lock(__mutex_t& __m) { pthread_mutex_lock(&__m); } public:
static void __unlock(__mutex_t& __m) { pthread_mutex_unlock(&__m); } static __mutex_t __global_lock;
}; static void __lock(__mutex_t& __m) { pthread_mutex_lock(&__m); }
static void __unlock(__mutex_t& __m) { pthread_mutex_unlock(&__m); }
};
#else #else
#define _GLIBCXX_IMPL_MUTEX_INITIALIZER 0 #define _GLIBCXX_IMPL_MUTEX_INITIALIZER 0
typedef int __mutex_t; typedef int __mutex_t;
/** @brief Mock mutex interface. */
template <int _Unused=0> /** @brief Mock mutex interface. */
class __mutex { template<int _Unused=0>
public: class __mutex
static __mutex_t __global_lock; {
static void __lock(__mutex_t& __m) {} public:
static void __unlock(__mutex_t& __m) {} static __mutex_t __global_lock;
}; static void __lock(__mutex_t& __m) { }
static void __unlock(__mutex_t& __m) { }
};
#endif #endif
template <int _Unused> template<int _Unused>
__mutex_t __mutex<_Unused>::__global_lock = _GLIBCXX_IMPL_MUTEX_INITIALIZER; __mutex_t __mutex<_Unused>::__global_lock = _GLIBCXX_IMPL_MUTEX_INITIALIZER;
/** @brief Representation of a warning. */
struct __warning_data
{
float __magnitude;
__stack_t __context;
const char* __warning_id;
const char* __warning_message;
__warning_data();
__warning_data(float __m, __stack_t __c, const char* __id,
const char* __msg);
bool operator>(const struct __warning_data& other) const;
};
inline __warning_data::__warning_data()
: __magnitude(0.0), __context(NULL), __warning_id(NULL),
__warning_message(NULL)
{
}
inline __warning_data::__warning_data(float __m, __stack_t __c,
const char* __id, const char* __msg)
: __magnitude(__m), __context(__c), __warning_id(__id),
__warning_message(__msg)
{
}
inline bool __warning_data::operator>(const struct __warning_data& other) const
{
return __magnitude > other.__magnitude;
}
typedef std::_GLIBCXX_STD_PR::vector<__warning_data> __warning_vector_t;
// Defined in profiler_<diagnostic name>.h.
class __trace_hash_func;
class __trace_hashtable_size;
class __trace_map2umap;
class __trace_vector_size;
class __trace_vector_to_list;
void __trace_vector_size_init();
void __trace_hashtable_size_init();
void __trace_hash_func_init();
void __trace_vector_to_list_init();
void __trace_map_to_unordered_map_init();
void __trace_vector_size_report(FILE*, __warning_vector_t&);
void __trace_hashtable_size_report(FILE*, __warning_vector_t&);
void __trace_hash_func_report(FILE*, __warning_vector_t&);
void __trace_vector_to_list_report(FILE*, __warning_vector_t&);
void __trace_map_to_unordered_map_report(FILE*, __warning_vector_t&);
// Utility functions.
inline size_t __max(size_t __a, size_t __b)
{
return __a >= __b ? __a : __b;
}
inline size_t __min(size_t __a, size_t __b)
{
return __a <= __b ? __a : __b;
}
/** @brief Storage for diagnostic table entries. Has only static fields. */ /** @brief Representation of a warning. */
template <int _Unused=0> struct __warning_data
class __tables {
{ float __magnitude;
public: __stack_t __context;
static __trace_hash_func* _S_hash_func; const char* __warning_id;
static __trace_hashtable_size* _S_hashtable_size; const char* __warning_message;
static __trace_map2umap* _S_map2umap; __warning_data();
static __trace_vector_size* _S_vector_size; __warning_data(float __m, __stack_t __c, const char* __id,
static __trace_vector_to_list* _S_vector_to_list; const char* __msg);
}; bool operator>(const struct __warning_data& other) const;
};
template <int _Unused>
__trace_hash_func* __tables<_Unused>::_S_hash_func = NULL; inline __warning_data::__warning_data()
template <int _Unused> : __magnitude(0.0), __context(NULL), __warning_id(NULL),
__trace_hashtable_size* __tables<_Unused>::_S_hashtable_size = NULL; __warning_message(NULL)
template <int _Unused> { }
__trace_map2umap* __tables<_Unused>::_S_map2umap = NULL;
template <int _Unused> inline __warning_data::__warning_data(float __m, __stack_t __c,
__trace_vector_size* __tables<_Unused>::_S_vector_size = NULL; const char* __id, const char* __msg)
template <int _Unused> : __magnitude(__m), __context(__c), __warning_id(__id),
__trace_vector_to_list* __tables<_Unused>::_S_vector_to_list = NULL; __warning_message(__msg)
{ }
/** @brief Storage for user defined parameters. Has only static fields. */
template <int _Unused=0> inline bool
class __settings { __warning_data::operator>(const struct __warning_data& other) const
public: { return __magnitude > other.__magnitude; }
static const char* _S_trace_file_name;
static size_t _S_max_warn_count; typedef std::_GLIBCXX_STD_PR::vector<__warning_data> __warning_vector_t;
static size_t _S_max_stack_depth;
static size_t _S_max_mem; // Defined in profiler_<diagnostic name>.h.
}; class __trace_hash_func;
class __trace_hashtable_size;
template <int _Unused> class __trace_map2umap;
const char* __settings<_Unused>::_S_trace_file_name = class __trace_vector_size;
class __trace_vector_to_list;
void __trace_vector_size_init();
void __trace_hashtable_size_init();
void __trace_hash_func_init();
void __trace_vector_to_list_init();
void __trace_map_to_unordered_map_init();
void __trace_vector_size_report(FILE*, __warning_vector_t&);
void __trace_hashtable_size_report(FILE*, __warning_vector_t&);
void __trace_hash_func_report(FILE*, __warning_vector_t&);
void __trace_vector_to_list_report(FILE*, __warning_vector_t&);
void __trace_map_to_unordered_map_report(FILE*, __warning_vector_t&);
// Utility functions.
inline size_t
__max(size_t __a, size_t __b) { return __a >= __b ? __a : __b; }
inline size_t
__min(size_t __a, size_t __b) { return __a <= __b ? __a : __b; }
/** @brief Storage for diagnostic table entries. Has only static fields. */
template<int _Unused=0>
class __tables
{
public:
static __trace_hash_func* _S_hash_func;
static __trace_hashtable_size* _S_hashtable_size;
static __trace_map2umap* _S_map2umap;
static __trace_vector_size* _S_vector_size;
static __trace_vector_to_list* _S_vector_to_list;
};
template<int _Unused>
__trace_hash_func* __tables<_Unused>::_S_hash_func = NULL;
template<int _Unused>
__trace_hashtable_size* __tables<_Unused>::_S_hashtable_size = NULL;
template<int _Unused>
__trace_map2umap* __tables<_Unused>::_S_map2umap = NULL;
template<int _Unused>
__trace_vector_size* __tables<_Unused>::_S_vector_size = NULL;
template<int _Unused>
__trace_vector_to_list* __tables<_Unused>::_S_vector_to_list = NULL;
/** @brief Storage for user defined parameters. Has only static fields. */
template<int _Unused=0>
class __settings
{
public:
static const char* _S_trace_file_name;
static size_t _S_max_warn_count;
static size_t _S_max_stack_depth;
static size_t _S_max_mem;
};
template<int _Unused>
const char* __settings<_Unused>::_S_trace_file_name =
_GLIBCXX_PROFILE_TRACE_PATH_ROOT; _GLIBCXX_PROFILE_TRACE_PATH_ROOT;
template <int _Unused>
size_t __settings<_Unused>::_S_max_warn_count = template<int _Unused>
size_t __settings<_Unused>::_S_max_warn_count =
_GLIBCXX_PROFILE_MAX_WARN_COUNT; _GLIBCXX_PROFILE_MAX_WARN_COUNT;
template <int _Unused>
size_t __settings<_Unused>::_S_max_stack_depth = template<int _Unused>
size_t __settings<_Unused>::_S_max_stack_depth =
_GLIBCXX_PROFILE_MAX_STACK_DEPTH; _GLIBCXX_PROFILE_MAX_STACK_DEPTH;
template <int _Unused>
size_t __settings<_Unused>::_S_max_mem = template<int _Unused>
size_t __settings<_Unused>::_S_max_mem =
_GLIBCXX_PROFILE_MEM_PER_DIAGNOSTIC; _GLIBCXX_PROFILE_MEM_PER_DIAGNOSTIC;
inline size_t __stack_max_depth() inline size_t
{ __stack_max_depth() { return __settings<0>::_S_max_stack_depth; }
return __settings<0>::_S_max_stack_depth;
} inline size_t
__max_mem() { return __settings<0>::_S_max_mem; }
/** @brief Base class for all trace producers. */
template<typename __object_info, typename __stack_info>
class __trace_base
{
public:
__trace_base();
virtual ~__trace_base() { }
void __add_object(__object_t object, __object_info __info);
__object_info* __get_object_info(__object_t __object);
void __retire_object(__object_t __object);
void __write(FILE* f);
void __collect_warnings(__warning_vector_t& warnings);
void __lock_object_table();
void __lock_stack_table();
void __unlock_object_table();
void __unlock_stack_table();
private:
__mutex_t __object_table_lock;
__mutex_t __stack_table_lock;
typedef _GLIBCXX_IMPL_UNORDERED_MAP<__object_t,
__object_info> __object_table_t;
typedef _GLIBCXX_IMPL_UNORDERED_MAP<__stack_t, __stack_info, __stack_hash,
__stack_hash> __stack_table_t;
__object_table_t __object_table;
__stack_table_t __stack_table;
size_t __stack_table_byte_size;
protected:
const char* __id;
};
template<typename __object_info, typename __stack_info>
void
__trace_base<__object_info, __stack_info>::__collect_warnings(__warning_vector_t& warnings)
{
typename __stack_table_t::iterator __i = __stack_table.begin();
for ( ; __i != __stack_table.end(); ++__i)
{
warnings.push_back(__warning_data((*__i).second.__magnitude(),
(*__i).first,
__id,
(*__i).second.__advice()));
}
}
inline size_t __max_mem() template<typename __object_info, typename __stack_info>
{ void
return __settings<0>::_S_max_mem; __trace_base<__object_info, __stack_info>::__lock_object_table()
} { __mutex<0>::__lock(this->__object_table_lock); }
template<typename __object_info, typename __stack_info>
void
__trace_base<__object_info, __stack_info>::__lock_stack_table()
{ __mutex<0>::__lock(this->__stack_table_lock); }
template<typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__unlock_object_table()
{ __mutex<0>::__unlock(this->__object_table_lock); }
template<typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__unlock_stack_table()
{ __mutex<0>::__unlock(this->__stack_table_lock); }
template<typename __object_info, typename __stack_info>
__trace_base<__object_info, __stack_info>::__trace_base()
{
// Do not pick the initial size too large, as we don't know which
// diagnostics are more active.
__object_table.rehash(10000);
__stack_table.rehash(10000);
__stack_table_byte_size = 0;
__id = NULL;
__stack_table_lock = _GLIBCXX_IMPL_MUTEX_INITIALIZER;
__object_table_lock = __stack_table_lock;
}
/** @brief Base class for all trace producers. */ template<typename __object_info, typename __stack_info>
template <typename __object_info, typename __stack_info> void
class __trace_base __trace_base<__object_info, __stack_info>::__add_object(__object_t __object, __object_info __info)
{
public:
__trace_base();
virtual ~__trace_base() {}
void __add_object(__object_t object, __object_info __info);
__object_info* __get_object_info(__object_t __object);
void __retire_object(__object_t __object);
void __write(FILE* f);
void __collect_warnings(__warning_vector_t& warnings);
void __lock_object_table();
void __lock_stack_table();
void __unlock_object_table();
void __unlock_stack_table();
private:
__mutex_t __object_table_lock;
__mutex_t __stack_table_lock;
typedef _GLIBCXX_IMPL_UNORDERED_MAP<__object_t,
__object_info> __object_table_t;
typedef _GLIBCXX_IMPL_UNORDERED_MAP<__stack_t, __stack_info, __stack_hash,
__stack_hash> __stack_table_t;
__object_table_t __object_table;
__stack_table_t __stack_table;
size_t __stack_table_byte_size;
protected:
const char* __id;
};
template <typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__collect_warnings(
__warning_vector_t& warnings)
{
typename __stack_table_t::iterator __i = __stack_table.begin();
for ( ; __i != __stack_table.end(); ++__i )
{ {
warnings.push_back(__warning_data((*__i).second.__magnitude(), typedef typename __object_table_t::value_type value_type;
(*__i).first, if (__max_mem() == 0
__id, || __object_table.size() * sizeof(__object_info) <= __max_mem())
(*__i).second.__advice())); {
__lock_object_table();
__object_table.insert(value_type(__object, __info));
__unlock_object_table();
}
} }
}
template <typename __object_info, typename __stack_info> template<typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__lock_object_table() __object_info*
{ __trace_base<__object_info, __stack_info>::__get_object_info(__object_t __object)
__mutex<0>::__lock(this->__object_table_lock); {
} // XXX: Revisit this to see if we can decrease mutex spans.
// Without this mutex, the object table could be rehashed during an
template <typename __object_info, typename __stack_info> // insertion on another thread, which could result in a segfault.
void __trace_base<__object_info, __stack_info>::__lock_stack_table()
{
__mutex<0>::__lock(this->__stack_table_lock);
}
template <typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__unlock_object_table()
{
__mutex<0>::__unlock(this->__object_table_lock);
}
template <typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__unlock_stack_table()
{
__mutex<0>::__unlock(this->__stack_table_lock);
}
template <typename __object_info, typename __stack_info>
__trace_base<__object_info, __stack_info>::__trace_base()
{
// Do not pick the initial size too large, as we don't know which diagnostics
// are more active.
__object_table.rehash(10000);
__stack_table.rehash(10000);
__stack_table_byte_size = 0;
__id = NULL;
__object_table_lock = __stack_table_lock = _GLIBCXX_IMPL_MUTEX_INITIALIZER;
}
template <typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__add_object(
__object_t __object, __object_info __info)
{
if (__max_mem() == 0
|| __object_table.size() * sizeof(__object_info) <= __max_mem()) {
__lock_object_table(); __lock_object_table();
__object_table.insert( typename __object_table_t::iterator __object_it =
typename __object_table_t::value_type(__object, __info)); __object_table.find(__object);
__unlock_object_table();
if (__object_it == __object_table.end())
{
__unlock_object_table();
return NULL;
}
else
{
__unlock_object_table();
return &__object_it->second;
}
} }
}
template <typename __object_info, typename __stack_info> template<typename __object_info, typename __stack_info>
__object_info* __trace_base<__object_info, __stack_info>::__get_object_info( void
__object_t __object) __trace_base<__object_info, __stack_info>::__retire_object(__object_t __object)
{ {
// XXX: Revisit this to see if we can decrease mutex spans. __lock_object_table();
// Without this mutex, the object table could be rehashed during an __lock_stack_table();
// insertion on another thread, which could result in a segfault. typename __object_table_t::iterator __object_it =
__lock_object_table();
typename __object_table_t::iterator __object_it =
__object_table.find(__object); __object_table.find(__object);
if (__object_it == __object_table.end()){
__unlock_object_table(); if (__object_it != __object_table.end())
return NULL; {
} else { const __object_info& __info = __object_it->second;
const __stack_t& __stack = __info.__stack();
typename __stack_table_t::iterator __stack_it =
__stack_table.find(__stack);
if (__stack_it == __stack_table.end())
{
// First occurence of this call context.
if (__max_mem() == 0 || __stack_table_byte_size < __max_mem())
{
__stack_table_byte_size +=
(sizeof(__instruction_address_t) * __size(__stack)
+ sizeof(__stack) + sizeof(__stack_info));
__stack_table.insert(make_pair(__stack, __stack_info(__info)));
}
}
else
{
// Merge object info into info summary for this call context.
__stack_it->second.__merge(__info);
delete __stack;
}
__object_table.erase(__object);
}
__unlock_stack_table();
__unlock_object_table(); __unlock_object_table();
return &__object_it->second;
} }
}
template <typename __object_info, typename __stack_info> template<typename __object_info, typename __stack_info>
void __trace_base<__object_info, __stack_info>::__retire_object( void
__object_t __object) __trace_base<__object_info, __stack_info>::__write(FILE* __f)
{ {
__lock_object_table(); typename __stack_table_t::iterator __it;
__lock_stack_table();
typename __object_table_t::iterator __object_it = for (__it = __stack_table.begin(); __it != __stack_table.end(); __it++)
__object_table.find(__object); {
if (__object_it != __object_table.end()){ if (__it->second.__is_valid())
const __object_info& __info = __object_it->second; {
const __stack_t& __stack = __info.__stack(); fprintf(__f, __id);
typename __stack_table_t::iterator __stack_it = fprintf(__f, "|");
__stack_table.find(__stack); __gnu_profile::__write(__f, __it->first);
if (__stack_it == __stack_table.end()) { fprintf(__f, "|");
// First occurence of this call context. __it->second.__write(__f);
if (__max_mem() == 0 || __stack_table_byte_size < __max_mem()) { }
__stack_table_byte_size +=
(sizeof(__instruction_address_t) * __size(__stack)
+ sizeof(__stack) + sizeof(__stack_info));
__stack_table.insert(make_pair(__stack, __stack_info(__info)));
} }
} else {
// Merge object info into info summary for this call context.
__stack_it->second.__merge(__info);
delete __stack;
}
__object_table.erase(__object);
} }
__unlock_stack_table();
__unlock_object_table();
}
template <typename __object_info, typename __stack_info> inline size_t
void __trace_base<__object_info, __stack_info>::__write(FILE* __f) __env_to_size_t(const char* __env_var, size_t __default_value)
{ {
typename __stack_table_t::iterator __it; char* __env_value = getenv(__env_var);
if (__env_value)
for (__it = __stack_table.begin(); __it != __stack_table.end(); __it++) { {
if (__it->second.__is_valid()) { long int __converted_value = strtol(__env_value, NULL, 10);
fprintf(__f, __id); if (errno || __converted_value < 0)
fprintf(__f, "|"); {
__gnu_profile::__write(__f, __it->first); fprintf(stderr, "Bad value for environment variable '%s'.",
fprintf(__f, "|"); __env_var);
__it->second.__write(__f); abort();
} }
else
{
return static_cast<size_t>(__converted_value);
}
}
else
{
return __default_value;
}
} }
}
inline size_t __env_to_size_t(const char* __env_var, size_t __default_value) inline void
{ __set_max_stack_trace_depth()
char* __env_value = getenv(__env_var); {
if (__env_value) { __settings<0>::_S_max_stack_depth = __env_to_size_t(_GLIBCXX_PROFILE_MAX_STACK_DEPTH_ENV_VAR, __settings<0>::_S_max_stack_depth);
long int __converted_value = strtol(__env_value, NULL, 10);
if (errno || __converted_value < 0) {
fprintf(stderr, "Bad value for environment variable '%s'.", __env_var);
abort();
} else {
return static_cast<size_t>(__converted_value);
}
} else {
return __default_value;
} }
}
inline void __set_max_stack_trace_depth()
{
__settings<0>::_S_max_stack_depth = __env_to_size_t(
_GLIBCXX_PROFILE_MAX_STACK_DEPTH_ENV_VAR,
__settings<0>::_S_max_stack_depth);
}
inline void __set_max_mem()
{
__settings<0>::_S_max_mem = __env_to_size_t(
_GLIBCXX_PROFILE_MEM_PER_DIAGNOSTIC_ENV_VAR, __settings<0>::_S_max_mem);
}
inline int __log_magnitude(float f) inline void
{ __set_max_mem()
const float log_base = 10.0; {
int result = 0; __settings<0>::_S_max_mem = __env_to_size_t(_GLIBCXX_PROFILE_MEM_PER_DIAGNOSTIC_ENV_VAR, __settings<0>::_S_max_mem);
int sign = 1;
if (f < 0) {
f = -f;
sign = -1;
}
while (f > log_base) {
++result;
f /= 10.0;
} }
return sign * result;
}
struct __warn inline int
{ __log_magnitude(float f)
FILE* __file;
__warn(FILE* __f) { __file = __f; }
void operator() (const __warning_data& __info)
{ {
fprintf(__file, __info.__warning_id); const float log_base = 10.0;
fprintf(__file, ": improvement = %d", __log_magnitude(__info.__magnitude)); int result = 0;
fprintf(__file, ": call stack = "); int sign = 1;
__gnu_profile::__write(__file, __info.__context); if (f < 0)
fprintf(__file, ": advice = %s\n", __info.__warning_message); {
f = -f;
sign = -1;
}
while (f > log_base)
{
++result;
f /= 10.0;
}
return sign * result;
} }
};
inline FILE* __open_output_file(const char* extension) struct __warn
{ {
// The path is made of _S_trace_file_name + "." + extension. FILE* __file;
size_t root_len = strlen(__settings<0>::_S_trace_file_name);
size_t ext_len = strlen(extension); __warn(FILE* __f) { __file = __f; }
char* file_name = new char[root_len + 1 + ext_len + 1];
char* p = file_name; void operator() (const __warning_data& __info)
memcpy(file_name, __settings<0>::_S_trace_file_name, root_len); {
*(file_name + root_len) = '.'; fprintf(__file, __info.__warning_id);
memcpy(file_name + root_len + 1, extension, ext_len + 1); fprintf(__file, ": improvement = %d",
FILE* out_file = fopen(file_name, "w"); __log_magnitude(__info.__magnitude));
if (out_file) { fprintf(__file, ": call stack = ");
return out_file; __gnu_profile::__write(__file, __info.__context);
} else { fprintf(__file, ": advice = %s\n", __info.__warning_message);
fprintf(stderr, "Could not open trace file '%s'.", file_name); }
abort(); };
inline FILE*
__open_output_file(const char* extension)
{
// The path is made of _S_trace_file_name + "." + extension.
size_t root_len = strlen(__settings<0>::_S_trace_file_name);
size_t ext_len = strlen(extension);
char* file_name = new char[root_len + 1 + ext_len + 1];
char* p = file_name;
memcpy(file_name, __settings<0>::_S_trace_file_name, root_len);
*(file_name + root_len) = '.';
memcpy(file_name + root_len + 1, extension, ext_len + 1);
FILE* out_file = fopen(file_name, "w");
if (out_file)
return out_file;
else
{
fprintf(stderr, "Could not open trace file '%s'.", file_name);
abort();
}
} }
}
/** @brief Final report method, registered with "atexit".
*
* This can also be called directly by user code, including signal handlers.
* It is protected against deadlocks by the reentrance guard in profiler.h.
* However, when called from a signal handler that triggers while within
* __gnu_profile (under the guarded zone), no output will be produced.
*/
inline void __report(void)
{
__mutex<0>::__lock(__mutex<0>::__global_lock);
__warning_vector_t __warnings; /** @brief Final report method, registered with "atexit".
*
* This can also be called directly by user code, including signal handlers.
* It is protected against deadlocks by the reentrance guard in profiler.h.
* However, when called from a signal handler that triggers while within
* __gnu_profile (under the guarded zone), no output will be produced.
*/
inline void
__report(void)
{
__mutex<0>::__lock(__mutex<0>::__global_lock);
FILE* __raw_file = __open_output_file("raw"); __warning_vector_t __warnings;
__trace_vector_size_report(__raw_file, __warnings);
__trace_hashtable_size_report(__raw_file, __warnings);
__trace_hash_func_report(__raw_file, __warnings);
__trace_vector_to_list_report(__raw_file, __warnings);
__trace_map_to_unordered_map_report(__raw_file, __warnings);
fclose(__raw_file);
// Sort data by magnitude. FILE* __raw_file = __open_output_file("raw");
// XXX: instead of sorting, should collect only top N for better performance. __trace_vector_size_report(__raw_file, __warnings);
size_t __cutoff = __min(__settings<0>::_S_max_warn_count, __trace_hashtable_size_report(__raw_file, __warnings);
__warnings.size()); __trace_hash_func_report(__raw_file, __warnings);
__trace_vector_to_list_report(__raw_file, __warnings);
__trace_map_to_unordered_map_report(__raw_file, __warnings);
fclose(__raw_file);
std::sort(__warnings.begin(), __warnings.end(), // Sort data by magnitude.
std::greater<__warning_vector_t::value_type>());
__warnings.resize(__cutoff);
FILE* __warn_file = __open_output_file("txt"); // XXX: instead of sorting, should collect only top N for better
std::for_each(__warnings.begin(), __warnings.end(), __warn(__warn_file)); // performance.
fclose(__warn_file); size_t __cutoff = __min(__settings<0>::_S_max_warn_count,
__warnings.size());
__mutex<0>::__unlock(__mutex<0>::__global_lock); std::sort(__warnings.begin(), __warnings.end(),
} std::greater<__warning_vector_t::value_type>());
__warnings.resize(__cutoff);
inline void __set_trace_path() FILE* __warn_file = __open_output_file("txt");
{ std::for_each(__warnings.begin(), __warnings.end(), __warn(__warn_file));
char* __env_trace_file_name = getenv(_GLIBCXX_PROFILE_TRACE_ENV_VAR); fclose(__warn_file);
if (__env_trace_file_name) { __mutex<0>::__unlock(__mutex<0>::__global_lock);
__settings<0>::_S_trace_file_name = __env_trace_file_name;
} }
// Make sure early that we can create the trace file. inline void
fclose(__open_output_file("txt")); __set_trace_path()
} {
char* __env_trace_file_name = getenv(_GLIBCXX_PROFILE_TRACE_ENV_VAR);
inline void __set_max_warn_count() if (__env_trace_file_name)
{ __settings<0>::_S_trace_file_name = __env_trace_file_name;
char* __env_max_warn_count_str = getenv(
_GLIBCXX_PROFILE_MAX_WARN_COUNT_ENV_VAR);
if (__env_max_warn_count_str) { // Make sure early that we can create the trace file.
__settings<0>::_S_max_warn_count = static_cast<size_t>( fclose(__open_output_file("txt"));
atoi(__env_max_warn_count_str));
} }
}
inline void __profcxx_init_unconditional()
{
__mutex<0>::__lock(__mutex<0>::__global_lock);
__set_max_warn_count();
if (__is_invalid()) {
if (__settings<0>::_S_max_warn_count == 0) {
__turn_off();
} else { inline void
__set_max_warn_count()
__set_max_stack_trace_depth(); {
__set_max_mem(); char* __env_max_warn_count_str = getenv(_GLIBCXX_PROFILE_MAX_WARN_COUNT_ENV_VAR);
__set_trace_path();
__trace_vector_size_init();
__trace_hashtable_size_init();
__trace_hash_func_init();
__trace_vector_to_list_init();
__trace_map_to_unordered_map_init();
atexit(__report);
__turn_on();
if (__env_max_warn_count_str)
{
int i = atoi(__env_max_warn_count_str);
__settings<0>::_S_max_warn_count = static_cast<size_t>(i);
} }
} }
__mutex<0>::__unlock(__mutex<0>::__global_lock); inline void
} __profcxx_init_unconditional()
{
__mutex<0>::__lock(__mutex<0>::__global_lock);
__set_max_warn_count();
if (__is_invalid())
{
if (__settings<0>::_S_max_warn_count == 0)
{
__turn_off();
}
else
{
__set_max_stack_trace_depth();
__set_max_mem();
__set_trace_path();
__trace_vector_size_init();
__trace_hashtable_size_init();
__trace_hash_func_init();
__trace_vector_to_list_init();
__trace_map_to_unordered_map_init();
atexit(__report);
__turn_on();
}
}
/** @brief This function must be called by each instrumentation point. __mutex<0>::__unlock(__mutex<0>::__global_lock);
*
* The common path is inlined fully.
*/
inline bool __profcxx_init(void)
{
if (__is_invalid()) {
__profcxx_init_unconditional();
} }
return __is_on(); /** @brief This function must be called by each instrumentation point.
} *
* The common path is inlined fully.
*/
inline bool
__profcxx_init(void)
{
if (__is_invalid())
__profcxx_init_unconditional();
return __is_on();
}
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_TRACE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_TRACE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_VECTOR_SIZE_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_VECTOR_SIZE_H
#define PROFCXX_PROFILER_VECTOR_SIZE_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_VECTOR_SIZE_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdlib> #include <cstdlib>
...@@ -54,59 +54,49 @@ ...@@ -54,59 +54,49 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief Hashtable size instrumentation trace producer. */
/** @brief Hashtable size instrumentation trace producer. */ class __trace_vector_size : public __trace_container_size
class __trace_vector_size : public __trace_container_size {
{ public:
public: __trace_vector_size() : __trace_container_size() { __id = "vector-size"; }
__trace_vector_size() : __trace_container_size() { __id = "vector-size"; } };
};
// Initialization and report.
////////////////////////////////////////////////////////////////////////////// inline void
// Initialization and report. __trace_vector_size_init()
////////////////////////////////////////////////////////////////////////////// { __tables<0>::_S_vector_size = new __trace_vector_size(); }
inline void __trace_vector_size_init() inline void
{ __trace_vector_size_report(FILE* __f, __warning_vector_t& __warnings)
__tables<0>::_S_vector_size = new __trace_vector_size(); {
} if (__tables<0>::_S_vector_size)
{
inline void __trace_vector_size_report(FILE* __f, __tables<0>::_S_vector_size->__collect_warnings(__warnings);
__warning_vector_t& __warnings) __tables<0>::_S_vector_size->__write(__f);
{ }
if (__tables<0>::_S_vector_size) {
__tables<0>::_S_vector_size->__collect_warnings(__warnings);
__tables<0>::_S_vector_size->__write(__f);
} }
}
//////////////////////////////////////////////////////////////////////////////
// Implementations of instrumentation hooks.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_vector_size_construct(const void* __obj, size_t __num)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_size->__insert(__obj, __get_stack(), __num);
}
inline void __trace_vector_size_destruct(const void* __obj, size_t __num,
size_t __inum)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_size->__destruct(__obj, __num, __inum);
}
inline void __trace_vector_size_resize(const void* __obj, size_t __from, // Implementations of instrumentation hooks.
size_t __to) inline void
{ __trace_vector_size_construct(const void* __obj, size_t __num)
if (!__profcxx_init()) return; {
if (!__profcxx_init()) return;
__tables<0>::_S_vector_size->__insert(__obj, __get_stack(), __num);
}
__tables<0>::_S_vector_size->__resize(__obj, __from, __to); inline void
} __trace_vector_size_destruct(const void* __obj, size_t __num, size_t __inum)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_size->__destruct(__obj, __num, __inum);
}
inline void
__trace_vector_size_resize(const void* __obj, size_t __from, size_t __to)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_size->__resize(__obj, __from, __to);
}
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_VECTOR_SIZE_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_VECTOR_SIZE_H */
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
// Written by Lixia Liu and Silvius Rus. // Written by Lixia Liu and Silvius Rus.
#ifndef PROFCXX_PROFILER_VECTOR_TO_LIST_H__ #ifndef _GLIBCXX_PROFILE_PROFILER_VECTOR_TO_LIST_H
#define PROFCXX_PROFILER_VECTOR_TO_LIST_H__ 1 #define _GLIBCXX_PROFILE_PROFILER_VECTOR_TO_LIST_H 1
#ifdef __GXX_EXPERIMENTAL_CXX0X__ #ifdef __GXX_EXPERIMENTAL_CXX0X__
#include <cstdio> #include <cstdio>
...@@ -52,267 +52,258 @@ ...@@ -52,267 +52,258 @@
namespace __gnu_profile namespace __gnu_profile
{ {
/** @brief A vector-to-list instrumentation line in the object table. */
class __vector2list_info: public __object_info_base
{
public:
__vector2list_info()
:_M_shift_count(0), _M_iterate(0), _M_resize(0), _M_list_cost(0),
_M_vector_cost(0), _M_valid(true) { }
__vector2list_info(__stack_t __stack)
: __object_info_base(__stack), _M_shift_count(0), _M_iterate(0),
_M_resize(0), _M_list_cost(0), _M_vector_cost(0), _M_valid(true) { }
virtual ~__vector2list_info() { }
__vector2list_info(const __vector2list_info& __o);
void __merge(const __vector2list_info& __o);
void __write(FILE* __f) const;
float __magnitude() const { return _M_vector_cost - _M_list_cost; }
const char* __advice() const { return "change std::vector to std::list"; }
size_t __shift_count() { return _M_shift_count; }
size_t __iterate() { return _M_iterate; }
float __list_cost() { return _M_list_cost; }
size_t __resize() { return _M_resize; }
void __set_list_cost(float __lc) { _M_list_cost = __lc; }
void __set_vector_cost(float __vc) { _M_vector_cost = __vc; }
bool __is_valid() { return _M_valid; }
void __set_invalid() { _M_valid = false; }
void __opr_insert(size_t __pos, size_t __num);
void __opr_iterate(size_t __num) { _M_iterate += __num; }
void __resize(size_t __from, size_t __to);
private:
size_t _M_shift_count;
size_t _M_iterate;
size_t _M_resize;
float _M_list_cost;
float _M_vector_cost;
bool _M_valid;
};
inline __vector2list_info::__vector2list_info(const __vector2list_info& __o)
: __object_info_base(__o)
{
_M_shift_count = __o._M_shift_count;
_M_iterate = __o._M_iterate;
_M_vector_cost = __o._M_vector_cost;
_M_list_cost = __o._M_list_cost;
_M_valid = __o._M_valid;
_M_resize = __o._M_resize;
}
/** @brief A vector-to-list instrumentation line in the object table. */ inline void
class __vector2list_info: public __object_info_base __vector2list_info::__merge(const __vector2list_info& __o)
{ {
public: _M_shift_count += __o._M_shift_count;
__vector2list_info() _M_iterate += __o._M_iterate;
:_M_shift_count(0), _M_iterate(0), _M_resize(0), _M_list_cost(0), _M_vector_cost += __o._M_vector_cost;
_M_vector_cost(0), _M_valid(true) {} _M_list_cost += __o._M_list_cost;
__vector2list_info(__stack_t __stack) _M_valid &= __o._M_valid;
: __object_info_base(__stack), _M_shift_count(0), _M_iterate(0), _M_resize += __o._M_resize;
_M_resize(0), _M_list_cost(0), _M_vector_cost(0), _M_valid(true) {}
virtual ~__vector2list_info() {}
__vector2list_info(const __vector2list_info& __o);
void __merge(const __vector2list_info& __o);
void __write(FILE* __f) const;
float __magnitude() const { return _M_vector_cost - _M_list_cost; }
const char* __advice() const { return "change std::vector to std::list"; }
size_t __shift_count() { return _M_shift_count; }
size_t __iterate() { return _M_iterate; }
float __list_cost() { return _M_list_cost; }
size_t __resize() { return _M_resize; }
void __set_list_cost(float __lc) { _M_list_cost = __lc; }
void __set_vector_cost(float __vc) { _M_vector_cost = __vc; }
bool __is_valid() { return _M_valid; }
void __set_invalid() { _M_valid = false; }
void __opr_insert(size_t __pos, size_t __num);
void __opr_iterate(size_t __num) { _M_iterate += __num; }
void __resize(size_t __from, size_t __to);
private:
size_t _M_shift_count;
size_t _M_iterate;
size_t _M_resize;
float _M_list_cost;
float _M_vector_cost;
bool _M_valid;
};
inline __vector2list_info::__vector2list_info(const __vector2list_info& __o)
: __object_info_base(__o)
{
_M_shift_count = __o._M_shift_count;
_M_iterate = __o._M_iterate;
_M_vector_cost = __o._M_vector_cost;
_M_list_cost = __o._M_list_cost;
_M_valid = __o._M_valid;
_M_resize = __o._M_resize;
}
inline void __vector2list_info::__merge(const __vector2list_info& __o)
{
_M_shift_count += __o._M_shift_count;
_M_iterate += __o._M_iterate;
_M_vector_cost += __o._M_vector_cost;
_M_list_cost += __o._M_list_cost;
_M_valid &= __o._M_valid;
_M_resize += __o._M_resize;
}
inline void __vector2list_info::__opr_insert(size_t __pos, size_t __num)
{
_M_shift_count += __num - __pos;
}
inline void __vector2list_info::__resize(size_t __from, size_t __to)
{
_M_resize += __from;
}
/** @brief A vector-to-list instrumentation line in the stack table. */
class __vector2list_stack_info: public __vector2list_info {
public:
__vector2list_stack_info(const __vector2list_info& __o)
: __vector2list_info(__o) {}
};
/** @brief Vector-to-list instrumentation producer. */
class __trace_vector_to_list
: public __trace_base<__vector2list_info, __vector2list_stack_info>
{
public:
__trace_vector_to_list();
~__trace_vector_to_list() {}
// Insert a new node at construct with object, callstack and initial size.
void __insert(__object_t __obj, __stack_t __stack);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj);
// Find the node in the live map.
__vector2list_info* __find(const void* __obj);
// Collect cost of operations.
void __opr_insert(const void* __obj, size_t __pos, size_t __num);
void __opr_iterate(const void* __obj, size_t __num);
void __invalid_operator(const void* __obj);
void __resize(const void* __obj, size_t __from, size_t __to);
float __vector_cost(size_t __shift, size_t __iterate, size_t __resize);
float __list_cost(size_t __shift, size_t __iterate, size_t __resize);
};
inline __trace_vector_to_list::__trace_vector_to_list()
: __trace_base<__vector2list_info, __vector2list_stack_info>()
{
__id = "vector-to-list";
}
inline void __trace_vector_to_list::__insert(__object_t __obj,
__stack_t __stack)
{
__add_object(__obj, __vector2list_info(__stack));
}
inline void __vector2list_info::__write(FILE* __f) const
{
fprintf(__f, "%Zu %Zu %Zu %.0f %.0f\n",
_M_shift_count, _M_resize, _M_iterate, _M_vector_cost, _M_list_cost);
}
// Cost model. XXX: get this from the cost model database instead.
// Vector operation cost:
// - Cost per shift: 1
// - Cost per access: 1
// - Cost per resize: 1
// List operation cost:
// - Cost per shift: 0
// - Cost per access: 10
// - Cost per resize: 0
inline float __trace_vector_to_list::__vector_cost(size_t __shift,
size_t __iterate,
size_t __resize)
{
return __shift * 1 + __iterate * 1 + __resize * 1;
}
inline float __trace_vector_to_list::__list_cost(size_t __shift,
size_t __iterate,
size_t __resize)
{
return __shift * 0 + __iterate * 10 + __resize * 0;
}
inline void __trace_vector_to_list::__destruct(const void* __obj)
{
if (!__is_on())
return;
__vector2list_info* __res = __get_object_info(__obj);
if (!__res)
return;
float __vc = __vector_cost(__res->__shift_count(), __res->__iterate(),
__res->__resize());
float __lc = __list_cost(__res->__shift_count(), __res->__iterate(),
__res->__resize());
__res->__set_vector_cost(__vc);
__res->__set_list_cost(__lc);
__retire_object(__obj);
}
inline void __trace_vector_to_list::__opr_insert(const void* __obj,
size_t __pos, size_t __num)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__opr_insert(__pos, __num);
}
inline void __trace_vector_to_list::__opr_iterate(const void* __obj,
size_t __num)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__opr_iterate(__num);
}
inline void __trace_vector_to_list::__invalid_operator(const void* __obj)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__set_invalid();
}
inline void __trace_vector_to_list::__resize(const void* __obj, size_t __from,
size_t __to)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__resize(__from, __to);
}
//////////////////////////////////////////////////////////////////////////////
// Initialization and report.
//////////////////////////////////////////////////////////////////////////////
inline void __trace_vector_to_list_init()
{
__tables<0>::_S_vector_to_list = new __trace_vector_to_list();
}
inline void __trace_vector_to_list_report(FILE* __f,
__warning_vector_t& __warnings)
{
if (__tables<0>::_S_vector_to_list) {
__tables<0>::_S_vector_to_list->__collect_warnings(__warnings);
__tables<0>::_S_vector_to_list->__write(__f);
} }
}
////////////////////////////////////////////////////////////////////////////// inline void
// Implementations of instrumentation hooks. __vector2list_info::__opr_insert(size_t __pos, size_t __num)
////////////////////////////////////////////////////////////////////////////// { _M_shift_count += __num - __pos; }
inline void
__vector2list_info::__resize(size_t __from, size_t __to)
{ _M_resize += __from; }
/** @brief A vector-to-list instrumentation line in the stack table. */
class __vector2list_stack_info: public __vector2list_info
{
public:
__vector2list_stack_info(const __vector2list_info& __o)
: __vector2list_info(__o) { }
};
/** @brief Vector-to-list instrumentation producer. */
class __trace_vector_to_list
: public __trace_base<__vector2list_info, __vector2list_stack_info>
{
public:
__trace_vector_to_list();
~__trace_vector_to_list() { }
// Insert a new node at construct with object, callstack and initial size.
void __insert(__object_t __obj, __stack_t __stack);
// Call at destruction/clean to set container final size.
void __destruct(const void* __obj);
// Find the node in the live map.
__vector2list_info* __find(const void* __obj);
// Collect cost of operations.
void __opr_insert(const void* __obj, size_t __pos, size_t __num);
void __opr_iterate(const void* __obj, size_t __num);
void __invalid_operator(const void* __obj);
void __resize(const void* __obj, size_t __from, size_t __to);
float __vector_cost(size_t __shift, size_t __iterate, size_t __resize);
float __list_cost(size_t __shift, size_t __iterate, size_t __resize);
};
inline __trace_vector_to_list::__trace_vector_to_list()
: __trace_base<__vector2list_info, __vector2list_stack_info>()
{ __id = "vector-to-list"; }
inline void
__trace_vector_to_list::__insert(__object_t __obj, __stack_t __stack)
{ __add_object(__obj, __vector2list_info(__stack)); }
inline void
__vector2list_info::__write(FILE* __f) const
{
fprintf(__f, "%Zu %Zu %Zu %.0f %.0f\n", _M_shift_count, _M_resize,
_M_iterate, _M_vector_cost, _M_list_cost);
}
inline void __trace_vector_to_list_construct(const void* __obj) // Cost model.
{ // Vector operation cost:
if (!__profcxx_init()) return; // - Cost per shift: 1
// - Cost per access: 1
// - Cost per resize: 1
// List operation cost:
// - Cost per shift: 0
// - Cost per access: 10
// - Cost per resize: 0
// XXX: get this from the cost model database instead.
inline float
__trace_vector_to_list::__vector_cost(size_t __shift, size_t __iterate,
size_t __resize)
{ return __shift * 1 + __iterate * 1 + __resize * 1; }
inline float
__trace_vector_to_list::__list_cost(size_t __shift, size_t __iterate,
size_t __resize)
{ return __shift * 0 + __iterate * 10 + __resize * 0; }
inline void
__trace_vector_to_list::__destruct(const void* __obj)
{
if (!__is_on())
return;
__vector2list_info* __res = __get_object_info(__obj);
if (!__res)
return;
float __vc = __vector_cost(__res->__shift_count(), __res->__iterate(),
__res->__resize());
float __lc = __list_cost(__res->__shift_count(), __res->__iterate(),
__res->__resize());
__res->__set_vector_cost(__vc);
__res->__set_list_cost(__lc);
__retire_object(__obj);
}
__tables<0>::_S_vector_to_list->__insert(__obj, __get_stack()); inline void
} __trace_vector_to_list::__opr_insert(const void* __obj, size_t __pos,
size_t __num)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__opr_insert(__pos, __num);
}
inline void __trace_vector_to_list_destruct(const void* __obj) inline void
{ __trace_vector_to_list::__opr_iterate(const void* __obj, size_t __num)
if (!__profcxx_init()) return; {
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__opr_iterate(__num);
}
__tables<0>::_S_vector_to_list->__destruct(__obj); inline void
} __trace_vector_to_list::__invalid_operator(const void* __obj)
{
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__set_invalid();
}
inline void __trace_vector_to_list_insert(const void* __obj, inline void
size_t __pos, size_t __num) __trace_vector_to_list::__resize(const void* __obj, size_t __from,
{ size_t __to)
if (!__profcxx_init()) return; {
__vector2list_info* __res = __get_object_info(__obj);
if (__res)
__res->__resize(__from, __to);
}
__tables<0>::_S_vector_to_list->__opr_insert(__obj, __pos, __num);
}
// Initialization and report.
inline void
__trace_vector_to_list_init()
{ __tables<0>::_S_vector_to_list = new __trace_vector_to_list(); }
inline void __trace_vector_to_list_iterate(const void* __obj, size_t __num) inline void
{ __trace_vector_to_list_report(FILE* __f, __warning_vector_t& __warnings)
if (!__profcxx_init()) return; {
if (__tables<0>::_S_vector_to_list)
{
__tables<0>::_S_vector_to_list->__collect_warnings(__warnings);
__tables<0>::_S_vector_to_list->__write(__f);
}
}
__tables<0>::_S_vector_to_list->__opr_iterate(__obj, __num); // Implementations of instrumentation hooks.
} inline void
__trace_vector_to_list_construct(const void* __obj)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__insert(__obj, __get_stack());
}
inline void __trace_vector_to_list_invalid_operator(const void* __obj) inline void
{ __trace_vector_to_list_destruct(const void* __obj)
if (!__profcxx_init()) return; {
if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__destruct(__obj);
}
__tables<0>::_S_vector_to_list->__invalid_operator(__obj); inline void
} __trace_vector_to_list_insert(const void* __obj, size_t __pos, size_t __num)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__opr_insert(__obj, __pos, __num);
}
inline void __trace_vector_to_list_resize(const void* __obj, inline void
size_t __from, size_t __to) __trace_vector_to_list_iterate(const void* __obj, size_t __num)
{ {
if (!__profcxx_init()) return; if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__opr_iterate(__obj, __num);
}
__tables<0>::_S_vector_to_list->__resize(__obj, __from, __to); inline void
} __trace_vector_to_list_invalid_operator(const void* __obj)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__invalid_operator(__obj);
}
inline void
__trace_vector_to_list_resize(const void* __obj, size_t __from, size_t __to)
{
if (!__profcxx_init()) return;
__tables<0>::_S_vector_to_list->__resize(__obj, __from, __to);
}
} // namespace __gnu_profile } // namespace __gnu_profile
#endif /* PROFCXX_PROFILER_VECTOR_TO_LIST_H__ */ #endif /* _GLIBCXX_PROFILE_PROFILER_VECTOR_TO_LIST_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment