Commit 721641c4 by Johannes Singler Committed by Johannes Singler

base.h: Correct some comments accidentally changed during uglification.

2009-09-16  Johannes Singler  <singler@ira.uka.de>

        * include/parallel/base.h: Correct some comments accidentally changed
        during uglification.
        * include/parallel/find.h: Likewise.
        * include/parallel/find_selectors.h: Likewise.
        * include/parallel/multiway_merge.h: Likewise.
        * include/parallel/multiway_mergesort.h: Likewise.
        * include/parallel/partial_sum.h: Likewise.
        * include/parallel/partition.h: Likewise.
        * include/parallel/queue.h: Likewise.
        * include/parallel/random_shuffle.h: Likewise.
        * include/parallel/settings.h: Likewise.
        * include/parallel/sort.h: Likewise.
        * include/parallel/tags.h: Likewise.
        * include/parallel/workstealing.h: Likewise.

From-SVN: r151756
parent 839f2864
2009-09-16 Johannes Singler <singler@ira.uka.de> 2009-09-16 Johannes Singler <singler@ira.uka.de>
* include/parallel/base.h: Correct some comments accidentally changed
during uglification.
* include/parallel/find.h: Likewise.
* include/parallel/find_selectors.h: Likewise.
* include/parallel/multiway_merge.h: Likewise.
* include/parallel/multiway_mergesort.h: Likewise.
* include/parallel/partial_sum.h: Likewise.
* include/parallel/partition.h: Likewise.
* include/parallel/queue.h: Likewise.
* include/parallel/random_shuffle.h: Likewise.
* include/parallel/settings.h: Likewise.
* include/parallel/sort.h: Likewise.
* include/parallel/tags.h: Likewise.
* include/parallel/workstealing.h: Likewise.
2009-09-16 Johannes Singler <singler@ira.uka.de>
* include/parallel/base.h (_EqualFromLess): * include/parallel/base.h (_EqualFromLess):
Correct uglification of member variables and method arguments. Correct uglification of member variables and method arguments.
* include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise. * include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise.
......
...@@ -110,7 +110,7 @@ template<typename _Size> ...@@ -110,7 +110,7 @@ template<typename _Size>
return __k; return __k;
} }
/** @brief Encode two integers into one __gnu_parallel::_CASable. /** @brief Encode two integers into one gnu_parallel::_CASable.
* @param __a First integer, to be encoded in the most-significant @__c * @param __a First integer, to be encoded in the most-significant @__c
* _CASable_bits/2 bits. * _CASable_bits/2 bits.
* @param __b Second integer, to be encoded in the least-significant * @param __b Second integer, to be encoded in the least-significant
...@@ -124,7 +124,7 @@ __encode2(int __a, int __b) //must all be non-negative, actually ...@@ -124,7 +124,7 @@ __encode2(int __a, int __b) //must all be non-negative, actually
return (((_CASable)__a) << (_CASable_bits / 2)) | (((_CASable)__b) << 0); return (((_CASable)__a) << (_CASable_bits / 2)) | (((_CASable)__b) << 0);
} }
/** @brief Decode two integers from one __gnu_parallel::_CASable. /** @brief Decode two integers from one gnu_parallel::_CASable.
* @param __x __gnu_parallel::_CASable to decode integers from. * @param __x __gnu_parallel::_CASable to decode integers from.
* @param __a First integer, to be decoded from the most-significant * @param __a First integer, to be decoded from the most-significant
* @__c _CASable_bits/2 bits of @__c __x. * @__c _CASable_bits/2 bits of @__c __x.
...@@ -210,7 +210,7 @@ template<typename _Operation, typename _FirstArgumentType, ...@@ -210,7 +210,7 @@ template<typename _Operation, typename _FirstArgumentType,
{ return _M_op(_M_value, __x); } { return _M_op(_M_value, __x); }
// _GLIBCXX_RESOLVE_LIB_DEFECTS // _GLIBCXX_RESOLVE_LIB_DEFECTS
// 109. Missing binders for non-const __sequence __elements // 109. Missing binders for non-const sequence elements
_ResultType _ResultType
operator()(_SecondArgumentType& __x) const operator()(_SecondArgumentType& __x) const
{ return _M_op(_M_value, __x); } { return _M_op(_M_value, __x); }
...@@ -239,7 +239,7 @@ template<typename _Operation, typename _FirstArgumentType, ...@@ -239,7 +239,7 @@ template<typename _Operation, typename _FirstArgumentType,
{ return _M_op(__x, _M_value); } { return _M_op(__x, _M_value); }
// _GLIBCXX_RESOLVE_LIB_DEFECTS // _GLIBCXX_RESOLVE_LIB_DEFECTS
// 109. Missing binders for non-const __sequence __elements // 109. Missing binders for non-const sequence elements
_ResultType _ResultType
operator()(_FirstArgumentType& __x) operator()(_FirstArgumentType& __x)
{ return _M_op(__x, _M_value); } { return _M_op(__x, _M_value); }
......
...@@ -226,7 +226,7 @@ template<typename _RAIter1, ...@@ -226,7 +226,7 @@ template<typename _RAIter1,
# pragma omp single # pragma omp single
__num_threads = omp_get_num_threads(); __num_threads = omp_get_num_threads();
// Not within first __k __elements -> start parallel. // Not within first __k elements -> start parallel.
_ThreadIndex __iam = omp_get_thread_num(); _ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __block_size = __s.find_initial_block_size; _DifferenceType __block_size = __s.find_initial_block_size;
......
...@@ -49,7 +49,7 @@ namespace __gnu_parallel ...@@ -49,7 +49,7 @@ namespace __gnu_parallel
*/ */
struct __find_if_selector : public __generic_find_selector struct __find_if_selector : public __generic_find_selector
{ {
/** @brief Test on one __position. /** @brief Test on one position.
* @param __i1 _Iterator on first sequence. * @param __i1 _Iterator on first sequence.
* @param __i2 _Iterator on second sequence (unused). * @param __i2 _Iterator on second sequence (unused).
* @param __pred Find predicate. * @param __pred Find predicate.
...@@ -76,10 +76,10 @@ namespace __gnu_parallel ...@@ -76,10 +76,10 @@ namespace __gnu_parallel
sequential_tag()), __begin2); } sequential_tag()), __begin2); }
}; };
/** @brief Test predicate on two adjacent __elements. */ /** @brief Test predicate on two adjacent elements. */
struct __adjacent_find_selector : public __generic_find_selector struct __adjacent_find_selector : public __generic_find_selector
{ {
/** @brief Test on one __position. /** @brief Test on one position.
* @param __i1 _Iterator on first sequence. * @param __i1 _Iterator on first sequence.
* @param __i2 _Iterator on second sequence (unused). * @param __i2 _Iterator on second sequence (unused).
* @param __pred Find predicate. * @param __pred Find predicate.
...@@ -119,7 +119,7 @@ namespace __gnu_parallel ...@@ -119,7 +119,7 @@ namespace __gnu_parallel
struct __mismatch_selector : public __generic_find_selector struct __mismatch_selector : public __generic_find_selector
{ {
/** /**
* @brief Test on one __position. * @brief Test on one position.
* @param __i1 _Iterator on first sequence. * @param __i1 _Iterator on first sequence.
* @param __i2 _Iterator on second sequence (unused). * @param __i2 _Iterator on second sequence (unused).
* @param __pred Find predicate. * @param __pred Find predicate.
...@@ -147,7 +147,7 @@ namespace __gnu_parallel ...@@ -147,7 +147,7 @@ namespace __gnu_parallel
}; };
/** @brief Test predicate on several __elements. */ /** @brief Test predicate on several elements. */
template<typename _ForwardIterator> template<typename _ForwardIterator>
struct __find_first_of_selector : public __generic_find_selector struct __find_first_of_selector : public __generic_find_selector
{ {
...@@ -157,7 +157,7 @@ namespace __gnu_parallel ...@@ -157,7 +157,7 @@ namespace __gnu_parallel
explicit __find_first_of_selector(_ForwardIterator __begin, _ForwardIterator __end) explicit __find_first_of_selector(_ForwardIterator __begin, _ForwardIterator __end)
: _M_begin(__begin), _M_end(__end) { } : _M_begin(__begin), _M_end(__end) { }
/** @brief Test on one __position. /** @brief Test on one position.
* @param __i1 _Iterator on first sequence. * @param __i1 _Iterator on first sequence.
* @param __i2 _Iterator on second sequence (unused). * @param __i2 _Iterator on second sequence (unused).
* @param __pred Find predicate. */ * @param __pred Find predicate. */
......
...@@ -72,13 +72,13 @@ template<typename _RAIter, typename _Compare> ...@@ -72,13 +72,13 @@ template<typename _RAIter, typename _Compare>
operator<=(_GuardedIterator<_RAIter, _Compare>& __bi1, operator<=(_GuardedIterator<_RAIter, _Compare>& __bi1,
_GuardedIterator<_RAIter, _Compare>& __bi2); _GuardedIterator<_RAIter, _Compare>& __bi2);
/** @brief _Iterator wrapper supporting an implicit __supremum at the end /** @brief _Iterator wrapper supporting an implicit supremum at the end
* of the sequence, dominating all comparisons. * of the sequence, dominating all comparisons.
* *
* The implicit __supremum comes with __a performance cost. * The implicit supremum comes with __a performance cost.
* *
* Deriving from _RAIter is not possible since * Deriving from _RAIter is not possible since
* _RAIter need not be __a class. * _RAIter need not be a class.
*/ */
template<typename _RAIter, typename _Compare> template<typename _RAIter, typename _Compare>
class _GuardedIterator class _GuardedIterator
...@@ -573,7 +573,7 @@ template<typename LT, ...@@ -573,7 +573,7 @@ template<typename LT,
for (_DifferenceType __i = 0; __i < __length; ++__i) for (_DifferenceType __i = 0; __i < __length; ++__i)
{ {
//take __out //take out
source = __lt.__get_min_source(); source = __lt.__get_min_source();
*(__target++) = *(__seqs_begin[source].first++); *(__target++) = *(__seqs_begin[source].first++);
...@@ -785,9 +785,9 @@ struct _LoserTreeTraits ...@@ -785,9 +785,9 @@ struct _LoserTreeTraits
}; };
/** /**
* @brief Switch for 3-way merging with __sentinels turned __off. * @brief Switch for 3-way merging with __sentinels turned off.
* *
* Note that 3-way merging is always __stable! * Note that 3-way merging is always stable!
*/ */
template< template<
bool __sentinels /*default == false*/, bool __sentinels /*default == false*/,
...@@ -811,7 +811,7 @@ struct __multiway_merge_3_variant_sentinel_switch ...@@ -811,7 +811,7 @@ struct __multiway_merge_3_variant_sentinel_switch
/** /**
* @brief Switch for 3-way merging with __sentinels turned on. * @brief Switch for 3-way merging with __sentinels turned on.
* *
* Note that 3-way merging is always __stable! * Note that 3-way merging is always stable!
*/ */
template< template<
typename _RAIterIterator, typename _RAIterIterator,
...@@ -834,9 +834,9 @@ struct __multiway_merge_3_variant_sentinel_switch ...@@ -834,9 +834,9 @@ struct __multiway_merge_3_variant_sentinel_switch
}; };
/** /**
* @brief Switch for 4-way merging with __sentinels turned __off. * @brief Switch for 4-way merging with __sentinels turned off.
* *
* Note that 4-way merging is always __stable! * Note that 4-way merging is always stable!
*/ */
template< template<
bool __sentinels /*default == false*/, bool __sentinels /*default == false*/,
...@@ -860,7 +860,7 @@ struct __multiway_merge_4_variant_sentinel_switch ...@@ -860,7 +860,7 @@ struct __multiway_merge_4_variant_sentinel_switch
/** /**
* @brief Switch for 4-way merging with __sentinels turned on. * @brief Switch for 4-way merging with __sentinels turned on.
* *
* Note that 4-way merging is always __stable! * Note that 4-way merging is always stable!
*/ */
template< template<
typename _RAIterIterator, typename _RAIterIterator,
...@@ -919,7 +919,7 @@ struct __multiway_merge_k_variant_sentinel_switch ...@@ -919,7 +919,7 @@ struct __multiway_merge_k_variant_sentinel_switch
}; };
/** /**
* @brief Switch for k-way merging with __sentinels turned __off. * @brief Switch for k-way merging with __sentinels turned off.
*/ */
template< template<
bool __stable, bool __stable,
......
...@@ -233,7 +233,7 @@ template<typename _RAIter, typename _Compare, ...@@ -233,7 +233,7 @@ template<typename _RAIter, typename _Compare,
__comp) __comp)
- __sd->_M_temporary[__s]; - __sd->_M_temporary[__s];
else else
// Absolute __end. // Absolute end.
__sd->_M_pieces[__iam][__s]._M_end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s]; __sd->_M_pieces[__iam][__s]._M_end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s];
} }
} }
......
...@@ -207,7 +207,7 @@ template<typename _IIter, ...@@ -207,7 +207,7 @@ template<typename _IIter,
switch (_Settings::get().partial_sum_algorithm) switch (_Settings::get().partial_sum_algorithm)
{ {
case LINEAR: case LINEAR:
// Need an initial __offset. // Need an initial offset.
return __parallel_partial_sum_linear(__begin, __end, __result, __bin_op, __n); return __parallel_partial_sum_linear(__begin, __end, __result, __bin_op, __n);
default: default:
// Partial_sum algorithm not implemented. // Partial_sum algorithm not implemented.
......
...@@ -77,7 +77,7 @@ template<typename _RAIter, typename _Predicate> ...@@ -77,7 +77,7 @@ template<typename _RAIter, typename _Predicate>
omp_lock_t __result_lock; omp_lock_t __result_lock;
omp_init_lock(&__result_lock); omp_init_lock(&__result_lock);
//at least two __chunks per thread //at least two chunks per thread
if(__right - __left + 1 >= 2 * __num_threads * __chunk_size) if(__right - __left + 1 >= 2 * __num_threads * __chunk_size)
# pragma omp parallel num_threads(__num_threads) # pragma omp parallel num_threads(__num_threads)
{ {
......
...@@ -77,7 +77,7 @@ namespace __gnu_parallel ...@@ -77,7 +77,7 @@ namespace __gnu_parallel
~_RestrictedBoundedConcurrentQueue() ~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; } { delete[] _M_base; }
/** @brief Pushes one element into the queue at the front __end. /** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */ * Must not be called concurrently with pop_front(). */
void void
push_front(const _Tp& __t) push_front(const _Tp& __t)
...@@ -94,7 +94,7 @@ namespace __gnu_parallel ...@@ -94,7 +94,7 @@ namespace __gnu_parallel
__fetch_and_add(&_M_borders, __encode2(1, 0)); __fetch_and_add(&_M_borders, __encode2(1, 0));
} }
/** @brief Pops one element from the queue at the front __end. /** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */ * Must not be called concurrently with pop_front(). */
bool bool
pop_front(_Tp& __t) pop_front(_Tp& __t)
...@@ -118,7 +118,7 @@ namespace __gnu_parallel ...@@ -118,7 +118,7 @@ namespace __gnu_parallel
return false; return false;
} }
/** @brief Pops one element from the queue at the front __end. /** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */ * Must not be called concurrently with pop_front(). */
bool bool
pop_back(_Tp& __t) //queue behavior pop_back(_Tp& __t) //queue behavior
......
...@@ -93,10 +93,10 @@ template<typename _RAIter, typename RandomNumberGenerator> ...@@ -93,10 +93,10 @@ template<typename _RAIter, typename RandomNumberGenerator>
/** @brief Number of threads participating in total. */ /** @brief Number of threads participating in total. */
int _M_num_threads; int _M_num_threads;
/** @brief Begin __index for bins taken care of by this thread. */ /** @brief Begin index for bins taken care of by this thread. */
_BinIndex _M_bins_begin; _BinIndex _M_bins_begin;
/** @brief End __index for bins taken care of by this thread. */ /** @brief End index for bins taken care of by this thread. */
_BinIndex __bins_end; _BinIndex __bins_end;
/** @brief Random _M_seed for this thread. */ /** @brief Random _M_seed for this thread. */
...@@ -384,7 +384,7 @@ template<typename _RAIter, typename RandomNumberGenerator> ...@@ -384,7 +384,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
delete[] __pus; delete[] __pus;
} }
/** @brief Sequential __cache-efficient random shuffle. /** @brief Sequential cache-efficient random shuffle.
* @param __begin Begin iterator of sequence. * @param __begin Begin iterator of sequence.
* @param __end End iterator of sequence. * @param __end End iterator of sequence.
* @param __rng Random number generator to use. * @param __rng Random number generator to use.
......
...@@ -197,7 +197,7 @@ namespace __gnu_parallel ...@@ -197,7 +197,7 @@ namespace __gnu_parallel
/// Minimal input size for partial_sort. /// Minimal input size for partial_sort.
_SequenceIndex partial_sort_minimal_n; _SequenceIndex partial_sort_minimal_n;
/// Ratio for partial_sum. Assume "sum and write __result" to be /// Ratio for partial_sum. Assume "sum and write result" to be
/// this factor slower than just "sum". /// this factor slower than just "sum".
float partial_sum_dilation; float partial_sum_dilation;
......
...@@ -85,7 +85,7 @@ namespace __gnu_parallel ...@@ -85,7 +85,7 @@ namespace __gnu_parallel
} }
/** /**
* @brief Choose multiway mergesort with __exact splitting, * @brief Choose multiway mergesort with exact splitting,
* for parallel sorting. * for parallel sorting.
* @param __begin Begin iterator of input sequence. * @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence. * @param __end End iterator of input sequence.
...@@ -163,7 +163,7 @@ namespace __gnu_parallel ...@@ -163,7 +163,7 @@ namespace __gnu_parallel
/** /**
* @brief Choose multiway mergesort with __exact splitting, * @brief Choose multiway mergesort with exact splitting,
* for parallel sorting. * for parallel sorting.
* @param __begin Begin iterator of input sequence. * @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence. * @param __end End iterator of input sequence.
......
...@@ -62,7 +62,7 @@ namespace __gnu_parallel ...@@ -62,7 +62,7 @@ namespace __gnu_parallel
this->_M_num_threads = __num_threads; this->_M_num_threads = __num_threads;
} }
/** @brief Find __out desired number of threads. /** @brief Find out desired number of threads.
* @return Desired number of threads. */ * @return Desired number of threads. */
inline _ThreadIndex __get_num_threads() inline _ThreadIndex __get_num_threads()
{ {
...@@ -111,7 +111,7 @@ namespace __gnu_parallel ...@@ -111,7 +111,7 @@ namespace __gnu_parallel
/** @brief Forces parallel merging /** @brief Forces parallel merging
* with __exact splitting, at compile time. */ * with exact splitting, at compile time. */
struct exact_tag : public parallel_tag struct exact_tag : public parallel_tag
{ {
exact_tag() { } exact_tag() { }
...@@ -120,7 +120,7 @@ namespace __gnu_parallel ...@@ -120,7 +120,7 @@ namespace __gnu_parallel
}; };
/** @brief Forces parallel merging /** @brief Forces parallel merging
* with __exact splitting, at compile time. */ * with exact splitting, at compile time. */
struct sampling_tag : public parallel_tag struct sampling_tag : public parallel_tag
{ {
sampling_tag() { } sampling_tag() { }
...@@ -139,7 +139,7 @@ namespace __gnu_parallel ...@@ -139,7 +139,7 @@ namespace __gnu_parallel
}; };
/** @brief Forces parallel sorting using multiway mergesort /** @brief Forces parallel sorting using multiway mergesort
* with __exact splitting at compile time. */ * with exact splitting at compile time. */
struct multiway_mergesort_exact_tag : public parallel_tag struct multiway_mergesort_exact_tag : public parallel_tag
{ {
multiway_mergesort_exact_tag() { } multiway_mergesort_exact_tag() { }
......
...@@ -127,7 +127,7 @@ template<typename _RAIter, ...@@ -127,7 +127,7 @@ template<typename _RAIter,
omp_lock_t __output_lock; omp_lock_t __output_lock;
omp_init_lock(&__output_lock); omp_init_lock(&__output_lock);
// Write __base __value to output. // Write base value to output.
__output = __base; __output = __base;
// No more threads than jobs, at least one thread. // No more threads than jobs, at least one thread.
...@@ -142,7 +142,7 @@ template<typename _RAIter, ...@@ -142,7 +142,7 @@ template<typename _RAIter,
{ {
__num_threads = omp_get_num_threads(); __num_threads = omp_get_num_threads();
// Create __job description array. // Create job description array.
__job = new _Job<_DifferenceType>[__num_threads * __stride]; __job = new _Job<_DifferenceType>[__num_threads * __stride];
} }
...@@ -154,7 +154,7 @@ template<typename _RAIter, ...@@ -154,7 +154,7 @@ template<typename _RAIter,
// Thread id. // Thread id.
_ThreadIndex __iam = omp_get_thread_num(); _ThreadIndex __iam = omp_get_thread_num();
// This __job. // This job.
_Job<_DifferenceType>& __my_job = __job[__iam * __stride]; _Job<_DifferenceType>& __my_job = __job[__iam * __stride];
// Random number (for work stealing). // Random number (for work stealing).
...@@ -184,7 +184,7 @@ template<typename _RAIter, ...@@ -184,7 +184,7 @@ template<typename _RAIter,
(__length - 1) : ((__iam + 1) * (__length / __num_threads) - 1); (__length - 1) : ((__iam + 1) * (__length / __num_threads) - 1);
__my_job._M_load = __my_job._M_last - __my_job._M_first + 1; __my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
// Init __result with _M_first __value (to have a base value for reduction). // Init result with _M_first __value (to have a base value for reduction).
if (__my_job._M_first <= __my_job._M_last) if (__my_job._M_first <= __my_job._M_last)
{ {
// Cannot use volatile variable directly. // Cannot use volatile variable directly.
...@@ -199,17 +199,17 @@ template<typename _RAIter, ...@@ -199,17 +199,17 @@ template<typename _RAIter,
# pragma omp barrier # pragma omp barrier
// Actual work phase // Actual work phase
// Work on own or stolen __start // Work on own or stolen current start
while (__busy > 0) while (__busy > 0)
{ {
// Work until no productive thread __left. // Work until no productive thread left.
# pragma omp flush(__busy) # pragma omp flush(__busy)
// Thread has own work to do // Thread has own work to do
while (__my_job._M_first <= __my_job._M_last) while (__my_job._M_first <= __my_job._M_last)
{ {
// fetch-and-add call // fetch-and-add call
// Reserve __current __job block (size __chunk_size) in my queue. // Reserve current job block (size __chunk_size) in my queue.
_DifferenceType current_job = _DifferenceType current_job =
__fetch_and_add<_DifferenceType>(&(__my_job._M_first), __chunk_size); __fetch_and_add<_DifferenceType>(&(__my_job._M_first), __chunk_size);
...@@ -265,7 +265,7 @@ template<typename _RAIter, ...@@ -265,7 +265,7 @@ template<typename _RAIter,
// Number of elements to steal (at least one). // Number of elements to steal (at least one).
__steal = (__supposed_load < 2) ? 1 : __supposed_load / 2; __steal = (__supposed_load < 2) ? 1 : __supposed_load / 2;
// Push __victim's __start forward. // Push __victim's current start forward.
_DifferenceType __stolen_first = _DifferenceType __stolen_first =
__fetch_and_add<_DifferenceType>( __fetch_and_add<_DifferenceType>(
&(__job[__victim * __stride]._M_first), __steal); &(__job[__victim * __stride]._M_first), __steal);
...@@ -285,7 +285,7 @@ template<typename _RAIter, ...@@ -285,7 +285,7 @@ template<typename _RAIter,
} }
# pragma omp flush(__busy) # pragma omp flush(__busy)
} // end while __busy > 0 } // end while __busy > 0
// Add accumulated __result to output. // Add accumulated result to output.
omp_set_lock(&__output_lock); omp_set_lock(&__output_lock);
__output = __r(__output, __result); __output = __r(__output, __result);
omp_unset_lock(&__output_lock); omp_unset_lock(&__output_lock);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment