Commit 25e00ab6 by Jonathan Wakely Committed by Jonathan Wakely

re PR libstdc++/57641 (std::timed_mutex.try_lock_until() is broken)

	PR libstdc++/57641
	* include/std/mutex (timed_mutex, recursive_timed_mutex): Move common
	functionality to new __timed_mutex_impl mixin. Overload try_lock_until
	to handle conversion between different clocks. Replace constrained
	__try_lock_for_impl overloads with conditional increment.
	* include/std/shared_mutex (shared_mutex::_Mutex): Use the new mixin.
	* testsuite/30_threads/timed_mutex/try_lock_until/57641.cc: New.

From-SVN: r200180
parent bef8491a
2013-06-18 Jonathan Wakely <jwakely.gcc@gmail.com>
PR libstdc++/57641
* include/std/mutex (timed_mutex, recursive_timed_mutex): Move common
functionality to new __timed_mutex_impl mixin. Overload try_lock_until
to handle conversion between different clocks. Replace constrained
__try_lock_for_impl overloads with conditional increment.
* include/std/shared_mutex (shared_mutex::_Mutex): Use the new mixin.
* testsuite/30_threads/timed_mutex/try_lock_until/57641.cc: New.
2013-06-17 Jonathan Wakely <jwakely.gcc@gmail.com>
Chris Jefferson <chris@bubblescope.net>
......
......@@ -199,15 +199,57 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
};
#if _GTHREAD_USE_MUTEX_TIMEDLOCK
/// timed_mutex
class timed_mutex : private __mutex_base
{
template<typename _Derived>
class __timed_mutex_impl
{
protected:
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
typedef chrono::steady_clock __clock_t;
typedef chrono::steady_clock __clock_t;
#else
typedef chrono::high_resolution_clock __clock_t;
typedef chrono::high_resolution_clock __clock_t;
#endif
template<typename _Rep, typename _Period>
bool
_M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
{
auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
if (ratio_greater<__clock_t::period, _Period>())
++__rt;
return _M_try_lock_until(__clock_t::now() + __rt);
}
template<typename _Duration>
bool
_M_try_lock_until(const chrono::time_point<__clock_t,
_Duration>& __atime)
{
chrono::time_point<__clock_t, chrono::seconds> __s =
chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
__gthread_time_t __ts = {
static_cast<std::time_t>(__s.time_since_epoch().count()),
static_cast<long>(__ns.count())
};
auto __mutex = static_cast<_Derived*>(this)->native_handle();
return !__gthread_mutex_timedlock(__mutex, &__ts);
}
template<typename _Clock, typename _Duration>
bool
_M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
{ return _M_try_lock_for(__atime - _Clock::now()); }
};
/// timed_mutex
class timed_mutex
: private __mutex_base, public __timed_mutex_impl<timed_mutex>
{
public:
typedef __native_type* native_handle_type;
......@@ -237,25 +279,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template <class _Rep, class _Period>
bool
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
{ return __try_lock_for_impl(__rtime); }
{ return _M_try_lock_for(__rtime); }
template <class _Clock, class _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
{
chrono::time_point<_Clock, chrono::seconds> __s =
chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
__gthread_time_t __ts = {
static_cast<std::time_t>(__s.time_since_epoch().count()),
static_cast<long>(__ns.count())
};
return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
}
{ return _M_try_lock_until(__atime); }
void
unlock()
......@@ -267,40 +296,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
native_handle_type
native_handle()
{ return &_M_mutex; }
private:
template<typename _Rep, typename _Period>
typename enable_if<
ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
template <typename _Rep, typename _Period>
typename enable_if<
!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
};
/// recursive_timed_mutex
class recursive_timed_mutex : private __recursive_mutex_base
class recursive_timed_mutex
: private __recursive_mutex_base,
public __timed_mutex_impl<recursive_timed_mutex>
{
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
typedef chrono::steady_clock __clock_t;
#else
typedef chrono::high_resolution_clock __clock_t;
#endif
public:
typedef __native_type* native_handle_type;
......@@ -330,25 +332,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template <class _Rep, class _Period>
bool
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
{ return __try_lock_for_impl(__rtime); }
{ return _M_try_lock_for(__rtime); }
template <class _Clock, class _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
{
chrono::time_point<_Clock, chrono::seconds> __s =
chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
__gthread_time_t __ts = {
static_cast<std::time_t>(__s.time_since_epoch().count()),
static_cast<long>(__ns.count())
};
return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
}
{ return _M_try_lock_until(__atime); }
void
unlock()
......@@ -360,29 +349,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
native_handle_type
native_handle()
{ return &_M_mutex; }
private:
template<typename _Rep, typename _Period>
typename enable_if<
ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
template <typename _Rep, typename _Period>
typename enable_if<
!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
};
#endif
#endif // _GLIBCXX_HAS_GTHREADS
......
......@@ -56,55 +56,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
class shared_mutex
{
#if _GTHREAD_USE_MUTEX_TIMEDLOCK
struct _Mutex : mutex
struct _Mutex : mutex, __timed_mutex_impl<_Mutex>
{
typedef chrono::steady_clock __clock_t;
template <class _Rep, class _Period>
template<typename _Rep, typename _Period>
bool
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
{ return __try_lock_for_impl(__rtime); }
{ return _M_try_lock_for(__rtime); }
template <class _Clock, class _Duration>
template<typename _Clock, typename _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
{
chrono::time_point<_Clock, chrono::seconds> __s =
chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
__gthread_time_t __ts = {
static_cast<std::time_t>(__s.time_since_epoch().count()),
static_cast<long>(__ns.count())
};
return !__gthread_mutex_timedlock(native_handle(), &__ts);
}
private:
template<typename _Rep, typename _Period>
typename enable_if<
ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
template <typename _Rep, typename _Period>
typename enable_if<
!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
__clock_t::time_point __atime = __clock_t::now()
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
return try_lock_until(__atime);
}
{ return _M_try_lock_until(__atime); }
};
#else
typedef mutex _Mutex;
......
// { dg-do run { target *-*-freebsd* *-*-netbsd* *-*-linux* *-*-gnu* *-*-solaris* *-*-cygwin *-*-darwin* powerpc-ibm-aix* } }
// { dg-options " -std=gnu++0x -pthread" { target *-*-freebsd* *-*-netbsd* *-*-linux* *-*-gnu* powerpc-ibm-aix* } }
// { dg-options " -std=gnu++0x -pthreads" { target *-*-solaris* } }
// { dg-options " -std=gnu++0x " { target *-*-cygwin *-*-darwin* } }
// { dg-require-cstdint "" }
// { dg-require-gthreads-timed "" }
// Copyright (C) 2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <mutex>
#include <chrono>
#include <thread>
#include <testsuite_hooks.h>
// PR libstdc++/57641
namespace C = std::chrono;
// custom clock with epoch 10s before system_clock's
struct clock
{
typedef C::system_clock::rep rep;
typedef C::system_clock::period period;
typedef C::system_clock::duration duration;
typedef C::time_point<clock> time_point;
static constexpr bool is_steady = C::system_clock::is_steady;
static time_point
now()
{
auto sys_time = C::system_clock::now().time_since_epoch();
return time_point(sys_time + C::seconds(10));
}
};
std::timed_mutex mx;
bool test = false;
void f()
{
test = mx.try_lock_until(clock::now() + C::milliseconds(1));
}
int main()
{
bool test = false;
std::lock_guard<std::timed_mutex> l(mx);
auto start = C::system_clock::now();
std::thread t(f);
t.join();
auto stop = C::system_clock::now();
VERIFY( (stop - start) < C::seconds(9) );
VERIFY( !test );
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment