Commit 54384f7f by Johannes Singler Committed by Johannes Singler

base.h (_EqualFromLess): Correct uglification of member variables and method arguments.

2009-09-16  Johannes Singler  <singler@ira.uka.de>

        * include/parallel/base.h (_EqualFromLess):
        Correct uglification of member variables and method arguments.
        * include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise.
        * include/parallel/find_selectors.h (__find_first_of_selector):
        Likewise.
        * include/parallel/iterator.h (_IteratorTriple): Likewise.
        * include/parallel/multiseq_selection.h
        (_Lexicographic, _LexicographicReverse): Likewise.
        * include/parallel/multiway_mergesort.h (_Piece, _PMWMSSortingData):
        Likewise.
        * include/parallel/random_number.h (_RandomNumber): Likewise.
        * include/parallel/random_shuffle.h (_DRandomShufflingGlobalData):
        Likewise.
        * include/parallel/set_operations.h (__symmetric_difference_func,
        __difference_func, __intersection_func, __union_func,
        parallel_set_union, parallel_set_intersection, parallel_set_difference,
        parallel_set_symmetric_difference): Likewise.
        * include/parallel/tags.h (parallel_tag): Likewise.
        * include/parallel/workstealing.h (_Job): Likewise.
        * include/parallel/multiway_merge.h
        (__multiway_merge_k_variant_sentinel_switch:operator())
        correct uglification of _*LoserTree*.
        * include/parallel/losertree.h (_*LoserTree*): Likewise; correct
        uglification of member variables and method arguments.
        * include/parallel/par_loop.h: Correct uglification of finish_iterator.
        * include/parallel/for_each_selectors.h: Likewise.
        * include/parallel/omp_loop.h: Likewise.
        * include/parallel/algo.h: Likewise; uglify c_rand_number.

From-SVN: r151754
parent e038c37b
2009-09-16 Johannes Singler <singler@ira.uka.de>
* include/parallel/base.h (_EqualFromLess):
Correct uglification of member variables and method arguments.
* include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise.
* include/parallel/find_selectors.h (__find_first_of_selector):
Likewise.
* include/parallel/iterator.h (_IteratorTriple): Likewise.
* include/parallel/multiseq_selection.h
(_Lexicographic, _LexicographicReverse): Likewise.
* include/parallel/multiway_mergesort.h (_Piece, _PMWMSSortingData):
Likewise.
* include/parallel/random_number.h (_RandomNumber): Likewise.
* include/parallel/random_shuffle.h (_DRandomShufflingGlobalData):
Likewise.
* include/parallel/set_operations.h (__symmetric_difference_func,
__difference_func, __intersection_func, __union_func,
parallel_set_union, parallel_set_intersection, parallel_set_difference,
parallel_set_symmetric_difference): Likewise.
* include/parallel/tags.h (parallel_tag): Likewise.
* include/parallel/workstealing.h (_Job): Likewise.
* include/parallel/multiway_merge.h
(__multiway_merge_k_variant_sentinel_switch:operator())
correct uglification of _*LoserTree*.
* include/parallel/losertree.h (_*LoserTree*): Likewise; correct
uglification of member variables and method arguments.
* include/parallel/par_loop.h: Correct uglification of finish_iterator.
* include/parallel/for_each_selectors.h: Likewise.
* include/parallel/omp_loop.h: Likewise.
* include/parallel/algo.h: Likewise; uglify c_rand_number.
2009-09-16 Johannes Singler <singler@ira.uka.de>
* include/parallel/base.h (_PseudoSequenceIterator, _PseudoSequence):
Replace redundant _Self.
* include/parallel/iterator.h (_IteratorPair, _IteratorTriple):
......
......@@ -1229,7 +1229,7 @@ namespace __parallel
unary_op, __functionality,
__gnu_parallel::_DummyReduct(),
__dummy, __dummy, -1, __parallelism_tag);
return __functionality.finish_iterator;
return __functionality._M_finish_iterator;
}
else
return transform(__begin, __end, __result, unary_op,
......@@ -1322,7 +1322,7 @@ namespace __parallel
__gnu_parallel::_DummyReduct(),
__dummy, __dummy, -1,
__parallelism_tag);
return __functionality.finish_iterator;
return __functionality._M_finish_iterator;
}
else
return transform(__begin1, __end1, __begin2, __result, __binary_op,
......@@ -1629,7 +1629,7 @@ namespace __parallel
/** @brief Functor wrapper for std::rand(). */
template<typename _MustBeInt = int>
struct c_rand_number
struct _CRandNumber
{
int
operator()(int __limit)
......@@ -1641,7 +1641,7 @@ namespace __parallel
inline void
random_shuffle(_RAIter __begin, _RAIter __end)
{
c_rand_number<> __r;
_CRandNumber<> __r;
// Parallelization still possible.
__gnu_parallel::random_shuffle(__begin, __end, __r);
}
......
......@@ -75,7 +75,7 @@ template<typename _RAIter>
_RestrictedBoundedConcurrentQueue<_Piece> _M_leftover_parts;
/** @brief Number of threads involved in this algorithm. */
_ThreadIndex __num_threads;
_ThreadIndex _M_num_threads;
/** @brief Pointer to a counter of elements left over to sort. */
volatile _DifferenceType* _M_elements_leftover;
......@@ -250,7 +250,7 @@ template<typename _RAIter, typename _Compare>
_Settings::get().sort_qsb_base_case_maximal_n;
if (__base_case_n < 2)
__base_case_n = 2;
_ThreadIndex __num_threads = __tl.__num_threads;
_ThreadIndex __num_threads = __tl._M_num_threads;
// Every thread has its own random number generator.
_RandomNumber __rng(__iam + 1);
......@@ -451,7 +451,7 @@ template<typename _RAIter, typename _Compare>
for (int __i = 0; __i < __num_threads; ++__i)
{
__tls[__i]->_M_elements_leftover = &_M_elements_leftover;
__tls[__i]->__num_threads = __num_threads;
__tls[__i]->_M_num_threads = __num_threads;
__tls[__i]->_M_global = std::make_pair(__begin, __end);
// Just in case nothing is left to assign.
......
......@@ -159,14 +159,14 @@ template<typename _Compare, typename _T1, typename _T2>
class _EqualFromLess : public std::binary_function<_T1, _T2, bool>
{
private:
_Compare& __comp;
_Compare& _M_comp;
public:
_EqualFromLess(_Compare& _comp) : __comp(_comp) { }
_EqualFromLess(_Compare& __comp) : _M_comp(__comp) { }
bool operator()(const _T1& __a, const _T2& __b)
{
return !__comp(__a, __b) && !__comp(__b, __a);
return !_M_comp(__a, __b) && !_M_comp(__b, __a);
}
};
......
......@@ -151,11 +151,11 @@ namespace __gnu_parallel
template<typename _ForwardIterator>
struct __find_first_of_selector : public __generic_find_selector
{
_ForwardIterator __begin;
_ForwardIterator __end;
_ForwardIterator _M_begin;
_ForwardIterator _M_end;
explicit __find_first_of_selector(_ForwardIterator __begin, _ForwardIterator __end)
: __begin(__begin), __end(__end) { }
: _M_begin(__begin), _M_end(__end) { }
/** @brief Test on one __position.
* @param __i1 _Iterator on first sequence.
......@@ -166,8 +166,8 @@ namespace __gnu_parallel
bool
operator()(_RAIter1 __i1, _RAIter2 __i2, _Pred __pred)
{
for (_ForwardIterator __pos_in_candidates = __begin;
__pos_in_candidates != __end; ++__pos_in_candidates)
for (_ForwardIterator __pos_in_candidates = _M_begin;
__pos_in_candidates != _M_end; ++__pos_in_candidates)
if (__pred(*__i1, *__pos_in_candidates))
return true;
return false;
......@@ -184,7 +184,7 @@ namespace __gnu_parallel
_M_sequential_algorithm(_RAIter1 __begin1,
_RAIter1 __end1,
_RAIter2 __begin2, _Pred __pred)
{ return std::make_pair(find_first_of(__begin1, __end1, __begin, __end, __pred,
{ return std::make_pair(find_first_of(__begin1, __end1, _M_begin, _M_end, __pred,
sequential_tag()), __begin2); }
};
}
......
......@@ -45,7 +45,7 @@ namespace __gnu_parallel
/** @brief _Iterator on last element processed; needed for some
* algorithms (e. g. std::transform()).
*/
_It finish_iterator;
_It _M_finish_iterator;
};
......@@ -124,7 +124,7 @@ namespace __gnu_parallel
bool
operator()(_Op& __o, _It __i)
{
*__i.__third = __o(*__i.__first, *__i.__second);
*__i._M_third = __o(*__i._M_first, *__i._M_second);
return true;
}
};
......
......@@ -125,70 +125,70 @@ namespace __gnu_parallel
typedef _IteratorTriple* pointer;
typedef _IteratorTriple& reference;
_Iterator1 __first;
_Iterator2 __second;
_Iterator3 __third;
_Iterator1 _M_first;
_Iterator2 _M_second;
_Iterator3 _M_third;
_IteratorTriple() { }
_IteratorTriple(const _Iterator1& _first, const _Iterator2& _second,
const _Iterator3& _third)
_IteratorTriple(const _Iterator1& __first, const _Iterator2& __second,
const _Iterator3& __third)
{
__first = _first;
__second = _second;
__third = _third;
_M_first = __first;
_M_second = __second;
_M_third = __third;
}
// Pre-increment operator.
_IteratorTriple&
operator++()
{
++__first;
++__second;
++__third;
++_M_first;
++_M_second;
++_M_third;
return *this;
}
// Post-increment operator.
const _IteratorTriple
operator++(int)
{ return _IteratorTriple(__first++, __second++, __third++); }
{ return _IteratorTriple(_M_first++, _M_second++, _M_third++); }
// Pre-decrement operator.
_IteratorTriple&
operator--()
{
--__first;
--__second;
--__third;
--_M_first;
--_M_second;
--_M_third;
return *this;
}
// Post-decrement operator.
const _IteratorTriple
operator--(int)
{ return _IteratorTriple(__first--, __second--, __third--); }
{ return _IteratorTriple(_M_first--, _M_second--, _M_third--); }
// Type conversion.
operator _Iterator3() const
{ return __third; }
{ return _M_third; }
_IteratorTriple&
operator=(const _IteratorTriple& __other)
{
__first = __other.__first;
__second = __other.__second;
__third = __other.__third;
_M_first = __other._M_first;
_M_second = __other._M_second;
_M_third = __other._M_third;
return *this;
}
_IteratorTriple
operator+(difference_type __delta) const
{ return _IteratorTriple(__first + __delta, __second + __delta, __third + __delta); }
{ return _IteratorTriple(_M_first + __delta, _M_second + __delta, _M_third + __delta); }
difference_type
operator-(const _IteratorTriple& __other) const
{ return __first - __other.__first; }
{ return _M_first - __other._M_first; }
};
}
......
......@@ -56,19 +56,19 @@ namespace __gnu_parallel
: public std::binary_function<std::pair<_T1, _T2>, std::pair<_T1, _T2>, bool>
{
private:
_Compare& __comp;
_Compare& _M_comp;
public:
_Lexicographic(_Compare& _comp) : __comp(_comp) { }
_Lexicographic(_Compare& __comp) : _M_comp(__comp) { }
bool
operator()(const std::pair<_T1, _T2>& __p1,
const std::pair<_T1, _T2>& __p2) const
{
if (__comp(__p1.first, __p2.first))
if (_M_comp(__p1.first, __p2.first))
return true;
if (__comp(__p2.first, __p1.first))
if (_M_comp(__p2.first, __p1.first))
return false;
// Firsts are equal.
......@@ -81,19 +81,19 @@ namespace __gnu_parallel
class _LexicographicReverse : public std::binary_function<_T1, _T2, bool>
{
private:
_Compare& __comp;
_Compare& _M_comp;
public:
_LexicographicReverse(_Compare& _comp) : __comp(_comp) { }
_LexicographicReverse(_Compare& __comp) : _M_comp(__comp) { }
bool
operator()(const std::pair<_T1, _T2>& __p1,
const std::pair<_T1, _T2>& __p2) const
{
if (__comp(__p2.first, __p1.first))
if (_M_comp(__p2.first, __p1.first))
return true;
if (__comp(__p1.first, __p2.first))
if (_M_comp(__p1.first, __p2.first))
return false;
// Firsts are equal.
......
......@@ -950,7 +950,7 @@ struct __multiway_merge_k_variant_sentinel_switch
typename __gnu_cxx::__conditional_type<
_LoserTreeTraits<_ValueType>::_M_use_pointer
, _LoserTreePointer<__stable, _ValueType, _Compare>
, LoserTree<__stable, _ValueType, _Compare>
, _LoserTree<__stable, _ValueType, _Compare>
>::__type >(__seqs_begin, __seqs_end, __target, __length, __comp);
}
};
......
......@@ -49,10 +49,10 @@ template<typename _DifferenceTp>
typedef _DifferenceTp _DifferenceType;
/** @brief Begin of subsequence. */
_DifferenceType __begin;
_DifferenceType _M_begin;
/** @brief End of subsequence. */
_DifferenceType __end;
_DifferenceType _M_end;
};
/** @brief Data accessed by all threads.
......@@ -66,7 +66,7 @@ template<typename _RAIter>
typedef typename _TraitsType::difference_type _DifferenceType;
/** @brief Number of threads involved. */
_ThreadIndex __num_threads;
_ThreadIndex _M_num_threads;
/** @brief Input __begin. */
_RAIter _M_source;
......@@ -141,40 +141,40 @@ template<typename _RAIter, typename _Compare,
# pragma omp barrier
std::vector<std::pair<_SortingPlacesIterator, _SortingPlacesIterator> >
seqs(__sd->__num_threads);
for (_ThreadIndex __s = 0; __s < __sd->__num_threads; __s++)
seqs(__sd->_M_num_threads);
for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; __s++)
seqs[__s] = std::make_pair(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]));
std::vector<_SortingPlacesIterator> _M_offsets(__sd->__num_threads);
std::vector<_SortingPlacesIterator> _M_offsets(__sd->_M_num_threads);
// if not last thread
if (__iam < __sd->__num_threads - 1)
if (__iam < __sd->_M_num_threads - 1)
multiseq_partition(seqs.begin(), seqs.end(),
__sd->_M_starts[__iam + 1], _M_offsets.begin(), __comp);
for (int __seq = 0; __seq < __sd->__num_threads; __seq++)
for (int __seq = 0; __seq < __sd->_M_num_threads; __seq++)
{
// for each sequence
if (__iam < (__sd->__num_threads - 1))
__sd->_M_pieces[__iam][__seq].__end = _M_offsets[__seq] - seqs[__seq].first;
if (__iam < (__sd->_M_num_threads - 1))
__sd->_M_pieces[__iam][__seq]._M_end = _M_offsets[__seq] - seqs[__seq].first;
else
// very end of this sequence
__sd->_M_pieces[__iam][__seq].__end =
__sd->_M_pieces[__iam][__seq]._M_end =
__sd->_M_starts[__seq + 1] - __sd->_M_starts[__seq];
}
# pragma omp barrier
for (_ThreadIndex __seq = 0; __seq < __sd->__num_threads; __seq++)
for (_ThreadIndex __seq = 0; __seq < __sd->_M_num_threads; __seq++)
{
// For each sequence.
if (__iam > 0)
__sd->_M_pieces[__iam][__seq].__begin = __sd->_M_pieces[__iam - 1][__seq].__end;
__sd->_M_pieces[__iam][__seq]._M_begin = __sd->_M_pieces[__iam - 1][__seq]._M_end;
else
// Absolute beginning.
__sd->_M_pieces[__iam][__seq].__begin = 0;
__sd->_M_pieces[__iam][__seq]._M_begin = 0;
}
}
};
......@@ -204,16 +204,16 @@ template<typename _RAIter, typename _Compare,
# pragma omp single
__gnu_sequential::sort(__sd->_M_samples,
__sd->_M_samples + (__num_samples * __sd->__num_threads),
__sd->_M_samples + (__num_samples * __sd->_M_num_threads),
__comp);
# pragma omp barrier
for (_ThreadIndex __s = 0; __s < __sd->__num_threads; ++__s)
for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; ++__s)
{
// For each sequence.
if (__num_samples * __iam > 0)
__sd->_M_pieces[__iam][__s].__begin =
__sd->_M_pieces[__iam][__s]._M_begin =
std::lower_bound(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]),
......@@ -222,10 +222,10 @@ template<typename _RAIter, typename _Compare,
- __sd->_M_temporary[__s];
else
// Absolute beginning.
__sd->_M_pieces[__iam][__s].__begin = 0;
__sd->_M_pieces[__iam][__s]._M_begin = 0;
if ((__num_samples * (__iam + 1)) < (__num_samples * __sd->__num_threads))
__sd->_M_pieces[__iam][__s].__end =
if ((__num_samples * (__iam + 1)) < (__num_samples * __sd->_M_num_threads))
__sd->_M_pieces[__iam][__s]._M_end =
std::lower_bound(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]),
......@@ -234,7 +234,7 @@ template<typename _RAIter, typename _Compare,
- __sd->_M_temporary[__s];
else
// Absolute __end.
__sd->_M_pieces[__iam][__s].__end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s];
__sd->_M_pieces[__iam][__s]._M_end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s];
}
}
};
......@@ -346,29 +346,29 @@ template<bool __stable, bool __exact, typename _RAIter,
// No barrier here: Synchronization is done by the splitting routine.
_DifferenceType __num_samples =
_Settings::get().sort_mwms_oversampling * __sd->__num_threads - 1;
_Settings::get().sort_mwms_oversampling * __sd->_M_num_threads - 1;
_SplitConsistently
<__exact, _RAIter, _Compare, _SortingPlacesIterator>()
(__iam, __sd, __comp, __num_samples);
// Offset from __target __begin, __length after merging.
_DifferenceType __offset = 0, __length_am = 0;
for (_ThreadIndex __s = 0; __s < __sd->__num_threads; __s++)
for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; __s++)
{
__length_am += __sd->_M_pieces[__iam][__s].__end - __sd->_M_pieces[__iam][__s].__begin;
__offset += __sd->_M_pieces[__iam][__s].__begin;
__length_am += __sd->_M_pieces[__iam][__s]._M_end - __sd->_M_pieces[__iam][__s]._M_begin;
__offset += __sd->_M_pieces[__iam][__s]._M_begin;
}
typedef std::vector<
std::pair<_SortingPlacesIterator, _SortingPlacesIterator> >
seq_vector_type;
seq_vector_type seqs(__sd->__num_threads);
seq_vector_type seqs(__sd->_M_num_threads);
for (int __s = 0; __s < __sd->__num_threads; ++__s)
for (int __s = 0; __s < __sd->_M_num_threads; ++__s)
{
seqs[__s] =
std::make_pair(__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s].__begin,
__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s].__end);
std::make_pair(__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s]._M_begin,
__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s]._M_end);
}
__possibly_stable_multiway_merge<
......@@ -424,7 +424,7 @@ template<bool __stable, bool __exact, typename _RAIter,
# pragma omp single
{
__sd.__num_threads = __num_threads;
__sd._M_num_threads = __num_threads;
__sd._M_source = __begin;
__sd._M_temporary = new _ValueType*[__num_threads];
......
......@@ -108,7 +108,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f.finish_iterator = __begin + __length;
__f._M_finish_iterator = __begin + __length;
return __o;
}
......
......@@ -122,7 +122,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f.finish_iterator = __begin + __length;
__f._M_finish_iterator = __begin + __length;
delete[] __thread_results;
delete[] __constructed;
......
......@@ -43,9 +43,9 @@ namespace __gnu_parallel
private:
std::tr1::mt19937 _M_mt;
uint64 _M_supremum;
uint64 _RAND_SUP;
uint64 _M_rand_sup;
double _M_supremum_reciprocal;
double _RAND_SUP_REC;
double _M_rand_sup_reciprocal;
// Assumed to be twice as long as the usual random number.
uint64 __cache;
......@@ -72,9 +72,9 @@ namespace __gnu_parallel
/** @brief Default constructor. Seed with 0. */
_RandomNumber()
: _M_mt(0), _M_supremum(0x100000000ULL),
_RAND_SUP(1ULL << (sizeof(uint32) * 8)),
_M_supremum_reciprocal(double(_M_supremum) / double(_RAND_SUP)),
_RAND_SUP_REC(1.0 / double(_RAND_SUP)),
_M_rand_sup(1ULL << (sizeof(uint32) * 8)),
_M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
_M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
__cache(0), __bits_left(0) { }
/** @brief Constructor.
......@@ -83,9 +83,9 @@ namespace __gnu_parallel
* interval @__c [0,_M_supremum). */
_RandomNumber(uint32 __seed, uint64 _M_supremum = 0x100000000ULL)
: _M_mt(__seed), _M_supremum(_M_supremum),
_RAND_SUP(1ULL << (sizeof(uint32) * 8)),
_M_supremum_reciprocal(double(_M_supremum) / double(_RAND_SUP)),
_RAND_SUP_REC(1.0 / double(_RAND_SUP)),
_M_rand_sup(1ULL << (sizeof(uint32) * 8)),
_M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
_M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
__cache(0), __bits_left(0) { }
/** @brief Generate unsigned random 32-bit integer. */
......@@ -99,17 +99,17 @@ namespace __gnu_parallel
operator()(uint64 local_supremum)
{
return __scale_down(_M_mt(), local_supremum,
double(local_supremum * _RAND_SUP_REC));
double(local_supremum * _M_rand_sup_reciprocal));
}
/** @brief Generate a number of random bits, run-time parameter.
* @param bits Number of bits to generate. */
unsigned long
__genrand_bits(int bits)
__genrand_bits(int __bits)
{
unsigned long __res = __cache & ((1 << bits) - 1);
__cache = __cache >> bits;
__bits_left -= bits;
unsigned long __res = __cache & ((1 << __bits) - 1);
__cache = __cache >> __bits;
__bits_left -= __bits;
if (__bits_left < 32)
{
__cache |= ((uint64(_M_mt())) << __bits_left);
......
......@@ -63,7 +63,7 @@ template<typename _RAIter>
/** @brief Two-dimensional array to hold the thread-bin distribution.
*
* Dimensions (__num_threads + 1) __x (_M_num_bins + 1). */
* Dimensions (_M_num_threads + 1) __x (_M_num_bins + 1). */
_DifferenceType** _M_dist;
/** @brief Start indexes of the threads' __chunks. */
......@@ -91,7 +91,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
struct _DRSSorterPU
{
/** @brief Number of threads participating in total. */
int __num_threads;
int _M_num_threads;
/** @brief Begin __index for bins taken care of by this thread. */
_BinIndex _M_bins_begin;
......@@ -135,7 +135,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_BinIndex* __oracles = new _BinIndex[__length];
_DifferenceType* _M_dist = new _DifferenceType[_M_sd->_M_num_bins + 1];
_BinIndex* _M_bin_proc = new _BinIndex[_M_sd->_M_num_bins];
_ValueType** _M_temporaries = new _ValueType*[d->__num_threads];
_ValueType** _M_temporaries = new _ValueType*[d->_M_num_threads];
// Compute oracles and count appearances.
for (_BinIndex __b = 0; __b < _M_sd->_M_num_bins + 1; ++__b)
......@@ -161,11 +161,11 @@ template<typename _RAIter, typename RandomNumberGenerator>
# pragma omp single
{
// Sum up bins, _M_sd->_M_dist[__s + 1][d->__num_threads] now contains the
// Sum up bins, _M_sd->_M_dist[__s + 1][d->_M_num_threads] now contains the
// total number of items in bin __s
for (_BinIndex __s = 0; __s < _M_sd->_M_num_bins; ++__s)
__gnu_sequential::partial_sum(_M_sd->_M_dist[__s + 1],
_M_sd->_M_dist[__s + 1] + d->__num_threads + 1,
_M_sd->_M_dist[__s + 1] + d->_M_num_threads + 1,
_M_sd->_M_dist[__s + 1]);
}
......@@ -173,15 +173,15 @@ template<typename _RAIter, typename RandomNumberGenerator>
_SequenceIndex __offset = 0, __global_offset = 0;
for (_BinIndex __s = 0; __s < d->_M_bins_begin; ++__s)
__global_offset += _M_sd->_M_dist[__s + 1][d->__num_threads];
__global_offset += _M_sd->_M_dist[__s + 1][d->_M_num_threads];
# pragma omp barrier
for (_BinIndex __s = d->_M_bins_begin; __s < d->__bins_end; ++__s)
{
for (int __t = 0; __t < d->__num_threads + 1; ++__t)
for (int __t = 0; __t < d->_M_num_threads + 1; ++__t)
_M_sd->_M_dist[__s + 1][__t] += __offset;
__offset = _M_sd->_M_dist[__s + 1][d->__num_threads];
__offset = _M_sd->_M_dist[__s + 1][d->_M_num_threads];
}
_M_sd->_M_temporaries[__iam] = static_cast<_ValueType*>(
......@@ -194,7 +194,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_M_dist[__b] = _M_sd->_M_dist[__b][__iam];
for (_BinIndex __b = 0; __b < _M_sd->_M_num_bins; ++__b)
_M_bin_proc[__b] = _M_sd->_M_bin_proc[__b];
for (_ThreadIndex __t = 0; __t < d->__num_threads; ++__t)
for (_ThreadIndex __t = 0; __t < d->_M_num_threads; ++__t)
_M_temporaries[__t] = _M_sd->_M_temporaries[__t];
_RAIter _M_source = _M_sd->_M_source;
......@@ -206,7 +206,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_BinIndex target_bin = __oracles[__i];
_ThreadIndex target_p = _M_bin_proc[target_bin];
// Last column [d->__num_threads] stays unchanged.
// Last column [d->_M_num_threads] stays unchanged.
::new(&(_M_temporaries[target_p][_M_dist[target_bin + 1]++]))
_ValueType(*(_M_source + __i + __start));
}
......@@ -223,12 +223,12 @@ template<typename _RAIter, typename RandomNumberGenerator>
{
_ValueType* __begin =
_M_sd->_M_temporaries[__iam] +
((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->__num_threads]),
((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->_M_num_threads]),
* __end =
_M_sd->_M_temporaries[__iam] + _M_sd->_M_dist[__b + 1][d->__num_threads];
_M_sd->_M_temporaries[__iam] + _M_sd->_M_dist[__b + 1][d->_M_num_threads];
__sequential_random_shuffle(__begin, __end, __rng);
std::copy(__begin, __end, _M_sd->_M_source + __global_offset +
((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->__num_threads]));
((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->_M_num_threads]));
}
::operator delete(_M_sd->_M_temporaries[__iam]);
......@@ -364,7 +364,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
__pus[__i].__bins_end = bin_cursor;
for (; __j < bin_cursor; ++__j)
_M_sd._M_bin_proc[__j] = __i;
__pus[__i].__num_threads = __num_threads;
__pus[__i]._M_num_threads = __num_threads;
__pus[__i]._M_seed = __rng(std::numeric_limits<uint32>::max());
__pus[__i]._M_sd = &_M_sd;
}
......
......@@ -71,9 +71,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
symmetric_difference_func(_Compare __c) : __comp(__c) {}
symmetric_difference_func(_Compare __comp) : _M_comp(__comp) {}
_Compare __comp;
_Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b,
......@@ -82,13 +82,13 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
++__r;
}
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{
*__r = *__c;
++__c;
......@@ -111,12 +111,12 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{
++__a;
++__counter;
}
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{
++__c;
++__counter;
......@@ -150,9 +150,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
__difference_func(_Compare __c) : __comp(__c) {}
__difference_func(_Compare __comp) : _M_comp(__comp) {}
_Compare __comp;
_Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b, _IIter __c, _IIter d,
......@@ -160,13 +160,13 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
++__r;
}
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
......@@ -185,12 +185,12 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{
++__a;
++__counter;
}
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{ ++__a; ++__c; }
......@@ -218,9 +218,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
__intersection_func(_Compare __c) : __comp(__c) {}
__intersection_func(_Compare __comp) : _M_comp(__comp) {}
_Compare __comp;
_Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b, _IIter __c, _IIter d,
......@@ -228,9 +228,9 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{ ++__a; }
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
......@@ -252,9 +252,9 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{ ++__a; }
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
......@@ -282,9 +282,9 @@ template<class _IIter, class _OutputIterator, class _Compare>
typedef typename std::iterator_traits<_IIter>::difference_type
_DifferenceType;
__union_func(_Compare __c) : __comp(__c) {}
__union_func(_Compare __comp) : _M_comp(__comp) {}
_Compare __comp;
_Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, const _IIter __b, _IIter __c,
......@@ -292,12 +292,12 @@ template<class _IIter, class _OutputIterator, class _Compare>
{
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
}
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{
*__r = *__c;
++__c;
......@@ -321,9 +321,9 @@ template<class _IIter, class _OutputIterator, class _Compare>
while (__a != __b && __c != d)
{
if (__comp(*__a, *__c))
if (_M_comp(*__a, *__c))
{ ++__a; }
else if (__comp(*__c, *__a))
else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
......@@ -400,14 +400,14 @@ template<typename _IIter,
_IIter __offset[2];
const _DifferenceType __rank = __borders[__iam + 1];
multiseq_partition(__sequence, __sequence + 2, __rank, __offset, __op.__comp);
multiseq_partition(__sequence, __sequence + 2, __rank, __offset, __op._M_comp);
// allowed to read?
// together
// *(__offset[ 0 ] - 1) == *__offset[ 1 ]
if (__offset[ 0 ] != __begin1 && __offset[ 1 ] != __end2
&& !__op.__comp(*(__offset[ 0 ] - 1), *__offset[ 1 ])
&& !__op.__comp(*__offset[ 1 ], *(__offset[ 0 ] - 1)))
&& !__op._M_comp(*(__offset[ 0 ] - 1), *__offset[ 1 ])
&& !__op._M_comp(*__offset[ 1 ], *(__offset[ 0 ] - 1)))
{
// Avoid split between globally equal elements: move one to
// front in first sequence.
......@@ -476,10 +476,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_union(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
_OutputIterator __result, _Compare __comp)
_OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
__union_func< _IIter, _OutputIterator, _Compare>(__comp));
__union_func< _IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
......@@ -488,10 +488,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_intersection(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
_OutputIterator __result, _Compare __comp)
_OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
__intersection_func<_IIter, _OutputIterator, _Compare>(__comp));
__intersection_func<_IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
......@@ -500,10 +500,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_difference(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
_OutputIterator __result, _Compare __comp)
_OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
__difference_func<_IIter, _OutputIterator, _Compare>(__comp));
__difference_func<_IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
......@@ -512,11 +512,11 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_symmetric_difference(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
_OutputIterator __result, _Compare __comp)
_OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
symmetric_difference_func<_IIter, _OutputIterator, _Compare>
(__comp));
(_M_comp));
}
}
......
......@@ -46,37 +46,37 @@ namespace __gnu_parallel
struct parallel_tag
{
private:
_ThreadIndex __num_threads;
_ThreadIndex _M_num_threads;
public:
/** @brief Default constructor. Use default number of threads. */
parallel_tag()
{
this->__num_threads = 0;
this->_M_num_threads = 0;
}
/** @brief Default constructor. Recommend number of threads to use.
* @param __num_threads Desired number of threads. */
parallel_tag(_ThreadIndex __num_threads)
{
this->__num_threads = __num_threads;
this->_M_num_threads = __num_threads;
}
/** @brief Find __out desired number of threads.
* @return Desired number of threads. */
inline _ThreadIndex __get_num_threads()
{
if(__num_threads == 0)
if(_M_num_threads == 0)
return omp_get_max_threads();
else
return __num_threads;
return _M_num_threads;
}
/** @brief Set the desired number of threads.
* @param __num_threads Desired number of threads. */
inline void set_num_threads(_ThreadIndex __num_threads)
{
this->__num_threads = __num_threads;
this->_M_num_threads = __num_threads;
}
};
......
......@@ -59,17 +59,17 @@ template<typename _DifferenceTp>
*
* Changed by owning and stealing thread. By stealing thread,
* always incremented. */
_GLIBCXX_JOB_VOLATILE _DifferenceType __first;
_GLIBCXX_JOB_VOLATILE _DifferenceType _M_first;
/** @brief Last element.
*
* Changed by owning thread only. */
_GLIBCXX_JOB_VOLATILE _DifferenceType __last;
_GLIBCXX_JOB_VOLATILE _DifferenceType _M_last;
/** @brief Number of elements, i.e. @__c __last-__first+1.
/** @brief Number of elements, i.e. @__c _M_last-_M_first+1.
*
* Changed by owning thread only. */
_GLIBCXX_JOB_VOLATILE _DifferenceType __load;
_GLIBCXX_JOB_VOLATILE _DifferenceType _M_load;
};
/** @brief Work stealing algorithm for random access iterators.
......@@ -177,21 +177,21 @@ template<typename _RAIter,
__iam_working = true;
// How many jobs per thread? last thread gets the rest.
__my_job.__first =
__my_job._M_first =
static_cast<_DifferenceType>(__iam * (__length / __num_threads));
__my_job.__last = (__iam == (__num_threads - 1)) ?
__my_job._M_last = (__iam == (__num_threads - 1)) ?
(__length - 1) : ((__iam + 1) * (__length / __num_threads) - 1);
__my_job.__load = __my_job.__last - __my_job.__first + 1;
__my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
// Init __result with __first __value (to have a base value for reduction).
if (__my_job.__first <= __my_job.__last)
// Init __result with _M_first __value (to have a base value for reduction).
if (__my_job._M_first <= __my_job._M_last)
{
// Cannot use volatile variable directly.
_DifferenceType __my_first = __my_job.__first;
_DifferenceType __my_first = __my_job._M_first;
__result = __f(__op, __begin + __my_first);
++__my_job.__first;
--__my_job.__load;
++__my_job._M_first;
--__my_job._M_load;
}
_RAIter __current;
......@@ -206,18 +206,18 @@ template<typename _RAIter,
# pragma omp flush(__busy)
// Thread has own work to do
while (__my_job.__first <= __my_job.__last)
while (__my_job._M_first <= __my_job._M_last)
{
// fetch-and-add call
// Reserve __current __job block (size __chunk_size) in my queue.
_DifferenceType current_job =
__fetch_and_add<_DifferenceType>(&(__my_job.__first), __chunk_size);
__fetch_and_add<_DifferenceType>(&(__my_job._M_first), __chunk_size);
// Update __load, to make the three values consistent,
// __first might have been changed in the meantime
__my_job.__load = __my_job.__last - __my_job.__first + 1;
// Update _M_load, to make the three values consistent,
// _M_first might have been changed in the meantime
__my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
for (_DifferenceType job_counter = 0;
job_counter < __chunk_size && current_job <= __my_job.__last;
job_counter < __chunk_size && current_job <= __my_job._M_last;
++job_counter)
{
// Yes: process it!
......@@ -248,9 +248,9 @@ template<typename _RAIter,
__yield();
# pragma omp flush(__busy)
__victim = rand_gen();
__supposed_first = __job[__victim * __stride].__first;
__supposed_last = __job[__victim * __stride].__last;
__supposed_load = __job[__victim * __stride].__load;
__supposed_first = __job[__victim * __stride]._M_first;
__supposed_last = __job[__victim * __stride]._M_last;
__supposed_load = __job[__victim * __stride]._M_load;
}
while (__busy > 0
&& ((__supposed_load <= 0)
......@@ -268,13 +268,13 @@ template<typename _RAIter,
// Push __victim's __start forward.
_DifferenceType __stolen_first =
__fetch_and_add<_DifferenceType>(
&(__job[__victim * __stride].__first), __steal);
&(__job[__victim * __stride]._M_first), __steal);
_DifferenceType stolen_try =
__stolen_first + __steal - _DifferenceType(1);
__my_job.__first = __stolen_first;
__my_job.__last = __gnu_parallel::min(stolen_try, __supposed_last);
__my_job.__load = __my_job.__last - __my_job.__first + 1;
__my_job._M_first = __stolen_first;
__my_job._M_last = __gnu_parallel::min(stolen_try, __supposed_last);
__my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
// Has potential work again.
# pragma omp atomic
......@@ -295,7 +295,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform)
__f.finish_iterator = __begin + __length;
__f._M_finish_iterator = __begin + __length;
omp_destroy_lock(&__output_lock);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment