"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "locks/unix/proc_mutex.c" between
apr-1.6.5.tar.bz2 and apr-1.7.0.tar.bz2

About: APR (Apache Portable Runtime) project offers software libraries that provide a predictable and consistent interface to underlying platform-specific implementations (APR core library).

proc_mutex.c  (apr-1.6.5.tar.bz2):proc_mutex.c  (apr-1.7.0.tar.bz2)
skipping to change at line 49 skipping to change at line 49
#if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE
static apr_status_t proc_mutex_no_perms_set(apr_proc_mutex_t *mutex, static apr_status_t proc_mutex_no_perms_set(apr_proc_mutex_t *mutex,
apr_fileperms_t perms, apr_fileperms_t perms,
apr_uid_t uid, apr_uid_t uid,
apr_gid_t gid) apr_gid_t gid)
{ {
return APR_ENOTIMPL; return APR_ENOTIMPL;
} }
#endif #endif
#if APR_HAS_FCNTL_SERIALIZE \
|| APR_HAS_FLOCK_SERIALIZE \
|| (APR_HAS_SYSVSEM_SERIALIZE \
&& !defined(HAVE_SEMTIMEDOP)) \
|| (APR_HAS_POSIXSEM_SERIALIZE \
&& !defined(HAVE_SEM_TIMEDWAIT)) \
|| (APR_HAS_PROC_PTHREAD_SERIALIZE \
&& !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \
&& !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED))
static apr_status_t proc_mutex_spinsleep_timedacquire(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{
#define SLEEP_TIME apr_time_from_msec(10)
apr_status_t rv;
for (;;) {
rv = apr_proc_mutex_trylock(mutex);
if (!APR_STATUS_IS_EBUSY(rv)) {
if (rv == APR_SUCCESS) {
mutex->curr_locked = 1;
}
break;
}
if (timeout <= 0) {
rv = APR_TIMEUP;
break;
}
if (timeout > SLEEP_TIME) {
apr_sleep(SLEEP_TIME);
timeout -= SLEEP_TIME;
}
else {
apr_sleep(timeout);
timeout = 0;
}
}
return rv;
}
#endif
#if APR_HAS_POSIXSEM_SERIALIZE #if APR_HAS_POSIXSEM_SERIALIZE
#ifndef SEM_FAILED #ifndef SEM_FAILED
#define SEM_FAILED (-1) #define SEM_FAILED (-1)
#endif #endif
static apr_status_t proc_mutex_posix_cleanup(void *mutex_) static apr_status_t proc_mutex_posix_cleanup(void *mutex_)
{ {
apr_proc_mutex_t *mutex = mutex_; apr_proc_mutex_t *mutex = mutex_;
skipping to change at line 185 skipping to change at line 224
if (rc < 0) { if (rc < 0) {
if (errno == EAGAIN) { if (errno == EAGAIN) {
return APR_EBUSY; return APR_EBUSY;
} }
return errno; return errno;
} }
mutex->curr_locked = 1; mutex->curr_locked = 1;
return APR_SUCCESS; return APR_SUCCESS;
} }
#if defined(HAVE_SEM_TIMEDWAIT)
static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{
if (timeout <= 0) {
apr_status_t rv = proc_mutex_posix_tryacquire(mutex);
return (rv == APR_EBUSY) ? APR_TIMEUP : rv;
}
else {
int rc;
struct timespec abstime;
timeout += apr_time_now();
abstime.tv_sec = apr_time_sec(timeout);
abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
do {
rc = sem_timedwait(mutex->os.psem_interproc, &abstime);
} while (rc < 0 && errno == EINTR);
if (rc < 0) {
if (errno == ETIMEDOUT) {
return APR_TIMEUP;
}
return errno;
}
}
mutex->curr_locked = 1;
return APR_SUCCESS;
}
#endif
static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex) static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex)
{ {
mutex->curr_locked = 0; mutex->curr_locked = 0;
if (sem_post(mutex->os.psem_interproc) < 0) { if (sem_post(mutex->os.psem_interproc) < 0) {
/* any failure is probably fatal, so no big deal to leave /* any failure is probably fatal, so no big deal to leave
* ->curr_locked at 0. */ * ->curr_locked at 0. */
return errno; return errno;
} }
return APR_SUCCESS; return APR_SUCCESS;
} }
skipping to change at line 206 skipping to change at line 276
static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods = static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods =
{ {
#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(POSIXSEM_IS_GLOBAL ) #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(POSIXSEM_IS_GLOBAL )
APR_PROCESS_LOCK_MECH_IS_GLOBAL, APR_PROCESS_LOCK_MECH_IS_GLOBAL,
#else #else
0, 0,
#endif #endif
proc_mutex_posix_create, proc_mutex_posix_create,
proc_mutex_posix_acquire, proc_mutex_posix_acquire,
proc_mutex_posix_tryacquire, proc_mutex_posix_tryacquire,
#if defined(HAVE_SEM_TIMEDWAIT)
proc_mutex_posix_timedacquire,
#else
proc_mutex_spinsleep_timedacquire,
#endif
proc_mutex_posix_release, proc_mutex_posix_release,
proc_mutex_posix_cleanup, proc_mutex_posix_cleanup,
proc_mutex_no_child_init, proc_mutex_no_child_init,
proc_mutex_no_perms_set, proc_mutex_no_perms_set,
APR_LOCK_POSIXSEM, APR_LOCK_POSIXSEM,
"posixsem" "posixsem"
}; };
#endif /* Posix sem implementation */ #endif /* Posix sem implementation */
skipping to change at line 304 skipping to change at line 379
if (rc < 0) { if (rc < 0) {
if (errno == EAGAIN) { if (errno == EAGAIN) {
return APR_EBUSY; return APR_EBUSY;
} }
return errno; return errno;
} }
mutex->curr_locked = 1; mutex->curr_locked = 1;
return APR_SUCCESS; return APR_SUCCESS;
} }
#if defined(HAVE_SEMTIMEDOP)
static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{
if (timeout <= 0) {
apr_status_t rv = proc_mutex_sysv_tryacquire(mutex);
return (rv == APR_EBUSY) ? APR_TIMEUP : rv;
}
else {
int rc;
struct timespec reltime;
reltime.tv_sec = apr_time_sec(timeout);
reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
do {
rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1,
&reltime);
} while (rc < 0 && errno == EINTR);
if (rc < 0) {
if (errno == EAGAIN) {
return APR_TIMEUP;
}
return errno;
}
}
mutex->curr_locked = 1;
return APR_SUCCESS;
}
#endif
static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex) static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex)
{ {
int rc; int rc;
mutex->curr_locked = 0; mutex->curr_locked = 0;
do { do {
rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1); rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1);
} while (rc < 0 && errno == EINTR); } while (rc < 0 && errno == EINTR);
if (rc < 0) { if (rc < 0) {
return errno; return errno;
skipping to change at line 346 skipping to change at line 452
static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods =
{ {
#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL) #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL)
APR_PROCESS_LOCK_MECH_IS_GLOBAL, APR_PROCESS_LOCK_MECH_IS_GLOBAL,
#else #else
0, 0,
#endif #endif
proc_mutex_sysv_create, proc_mutex_sysv_create,
proc_mutex_sysv_acquire, proc_mutex_sysv_acquire,
proc_mutex_sysv_tryacquire, proc_mutex_sysv_tryacquire,
#if defined(HAVE_SEMTIMEDOP)
proc_mutex_sysv_timedacquire,
#else
proc_mutex_spinsleep_timedacquire,
#endif
proc_mutex_sysv_release, proc_mutex_sysv_release,
proc_mutex_sysv_cleanup, proc_mutex_sysv_cleanup,
proc_mutex_no_child_init, proc_mutex_no_child_init,
proc_mutex_sysv_perms_set, proc_mutex_sysv_perms_set,
APR_LOCK_SYSVSEM, APR_LOCK_SYSVSEM,
"sysvsem" "sysvsem"
}; };
#endif /* SysV sem implementation */ #endif /* SysV sem implementation */
#if APR_HAS_PROC_PTHREAD_SERIALIZE #if APR_HAS_PROC_PTHREAD_SERIALIZE
#ifndef APR_USE_PROC_PTHREAD_MUTEX_COND
#define APR_USE_PROC_PTHREAD_MUTEX_COND \
(defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \
&& !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK))
#endif
/* The mmap()ed pthread_interproc is the native pthread_mutex_t followed /* The mmap()ed pthread_interproc is the native pthread_mutex_t followed
* by a refcounter to track children using it. We want to avoid calling * by a refcounter to track children using it. We want to avoid calling
* pthread_mutex_destroy() on the shared mutex area while it is in use by * pthread_mutex_destroy() on the shared mutex area while it is in use by
* another process, because this may mark the shared pthread_mutex_t as * another process, because this may mark the shared pthread_mutex_t as
* invalid for everyone, including forked children (unlike "sysvsem" for * invalid for everyone, including forked children (unlike "sysvsem" for
* example), causing unexpected errors or deadlocks (PR 49504). So the * example), causing unexpected errors or deadlocks (PR 49504). So the
* last process (parent or child) referencing the mutex will effectively * last process (parent or child) referencing the mutex will effectively
* destroy it. * destroy it.
*/ */
typedef struct { typedef struct {
#define proc_pthread_cast(m) \
((proc_pthread_mutex_t *)(m)->os.pthread_interproc)
pthread_mutex_t mutex; pthread_mutex_t mutex;
#define proc_pthread_mutex(m) \
(proc_pthread_cast(m)->mutex)
#if APR_USE_PROC_PTHREAD_MUTEX_COND
pthread_cond_t cond;
#define proc_pthread_mutex_cond(m) \
(proc_pthread_cast(m)->cond)
apr_int32_t cond_locked;
#define proc_pthread_mutex_cond_locked(m) \
(proc_pthread_cast(m)->cond_locked)
apr_uint32_t cond_num_waiters;
#define proc_pthread_mutex_cond_num_waiters(m) \
(proc_pthread_cast(m)->cond_num_waiters)
#define proc_pthread_mutex_is_cond(m) \
((m)->pthread_refcounting && proc_pthread_mutex_cond_locked(m) != -1)
#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
apr_uint32_t refcount; apr_uint32_t refcount;
} proc_pthread_mutex_t;
#define proc_pthread_mutex_refcount(m) \ #define proc_pthread_mutex_refcount(m) \
(((proc_pthread_mutex_t *)(m)->os.pthread_interproc)->refcount) (proc_pthread_cast(m)->refcount)
} proc_pthread_mutex_t;
static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex) static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex)
{ {
if (mutex->pthread_refcounting) { if (mutex->pthread_refcounting) {
apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex)); apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex));
return 1; return 1;
} }
return 0; return 0;
} }
skipping to change at line 397 skipping to change at line 530
return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex)); return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex));
} }
return 0; return 0;
} }
static apr_status_t proc_pthread_mutex_unref(void *mutex_) static apr_status_t proc_pthread_mutex_unref(void *mutex_)
{ {
apr_proc_mutex_t *mutex=mutex_; apr_proc_mutex_t *mutex=mutex_;
apr_status_t rv; apr_status_t rv;
#if APR_USE_PROC_PTHREAD_MUTEX_COND
if (proc_pthread_mutex_is_cond(mutex)) {
mutex->curr_locked = 0;
}
else
#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
if (mutex->curr_locked == 1) { if (mutex->curr_locked == 1) {
if ((rv = pthread_mutex_unlock(mutex->os.pthread_interproc))) { if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
return rv; return rv;
} }
} }
if (!proc_pthread_mutex_dec(mutex)) { if (!proc_pthread_mutex_dec(mutex)) {
if ((rv = pthread_mutex_destroy(mutex->os.pthread_interproc))) { #if APR_USE_PROC_PTHREAD_MUTEX_COND
if (proc_pthread_mutex_is_cond(mutex) &&
(rv = pthread_cond_destroy(&proc_pthread_mutex_cond(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
return rv;
}
#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
return rv; return rv;
} }
} }
return APR_SUCCESS; return APR_SUCCESS;
} }
static apr_status_t proc_mutex_pthread_cleanup(void *mutex_) static apr_status_t proc_mutex_pthread_cleanup(void *mutex_)
skipping to change at line 458 skipping to change at line 607
if (new_mutex->os.pthread_interproc == MAP_FAILED) { if (new_mutex->os.pthread_interproc == MAP_FAILED) {
new_mutex->os.pthread_interproc = NULL; new_mutex->os.pthread_interproc = NULL;
rv = errno; rv = errno;
close(fd); close(fd);
return rv; return rv;
} }
close(fd); close(fd);
new_mutex->pthread_refcounting = 1; new_mutex->pthread_refcounting = 1;
new_mutex->curr_locked = -1; /* until the mutex has been created */ new_mutex->curr_locked = -1; /* until the mutex has been created */
#if APR_USE_PROC_PTHREAD_MUTEX_COND
proc_pthread_mutex_cond_locked(new_mutex) = -1;
#endif
if ((rv = pthread_mutexattr_init(&mattr))) { if ((rv = pthread_mutexattr_init(&mattr))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
proc_mutex_pthread_cleanup(new_mutex); proc_mutex_pthread_cleanup(new_mutex);
return rv; return rv;
} }
if ((rv = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) { if ((rv = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
proc_mutex_pthread_cleanup(new_mutex); proc_mutex_pthread_cleanup(new_mutex);
pthread_mutexattr_destroy(&mattr); pthread_mutexattr_destroy(&mattr);
return rv; return rv;
} }
#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
#ifdef HAVE_PTHREAD_MUTEX_ROBUST #ifdef HAVE_PTHREAD_MUTEX_ROBUST
if ((rv = pthread_mutexattr_setrobust_np(&mattr, rv = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST);
PTHREAD_MUTEX_ROBUST_NP))) { #else
rv = pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP);
#endif
if (rv) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
proc_mutex_pthread_cleanup(new_mutex); proc_mutex_pthread_cleanup(new_mutex);
pthread_mutexattr_destroy(&mattr); pthread_mutexattr_destroy(&mattr);
return rv; return rv;
} }
if ((rv = pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT))) { if ((rv = pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
proc_mutex_pthread_cleanup(new_mutex); proc_mutex_pthread_cleanup(new_mutex);
pthread_mutexattr_destroy(&mattr); pthread_mutexattr_destroy(&mattr);
return rv; return rv;
} }
#endif /* HAVE_PTHREAD_MUTEX_ROBUST */ #endif /* HAVE_PTHREAD_MUTEX_ROBUST[_NP] */
if ((rv = pthread_mutex_init(new_mutex->os.pthread_interproc, &mattr))) { if ((rv = pthread_mutex_init(&proc_pthread_mutex(new_mutex), &mattr))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
proc_mutex_pthread_cleanup(new_mutex); proc_mutex_pthread_cleanup(new_mutex);
pthread_mutexattr_destroy(&mattr); pthread_mutexattr_destroy(&mattr);
return rv; return rv;
} }
proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */ proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */
new_mutex->curr_locked = 0; /* mutex created now */ new_mutex->curr_locked = 0; /* mutex created now */
skipping to change at line 534 skipping to change at line 690
const char *fname) const char *fname)
{ {
(*mutex)->curr_locked = 0; (*mutex)->curr_locked = 0;
if (proc_pthread_mutex_inc(*mutex)) { if (proc_pthread_mutex_inc(*mutex)) {
apr_pool_cleanup_register(pool, *mutex, proc_pthread_mutex_unref, apr_pool_cleanup_register(pool, *mutex, proc_pthread_mutex_unref,
apr_pool_cleanup_null); apr_pool_cleanup_null);
} }
return APR_SUCCESS; return APR_SUCCESS;
} }
static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex) static apr_status_t proc_mutex_pthread_acquire_ex(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{ {
apr_status_t rv; apr_status_t rv;
if ((rv = pthread_mutex_lock(mutex->os.pthread_interproc))) { #if APR_USE_PROC_PTHREAD_MUTEX_COND
if (proc_pthread_mutex_is_cond(mutex)) {
if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
/* Okay, our owner died. Let's try to make it consistent again. */
if (rv == EOWNERDEAD) {
proc_pthread_mutex_dec(mutex);
#ifdef HAVE_PTHREAD_MUTEX_ROBUST #ifdef HAVE_PTHREAD_MUTEX_ROBUST
/* Okay, our owner died. Let's try to make it consistent again. */ pthread_mutex_consistent(&proc_pthread_mutex(mutex));
if (rv == EOWNERDEAD) { #else
proc_pthread_mutex_dec(mutex); pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
pthread_mutex_consistent_np(mutex->os.pthread_interproc); #endif
}
else
#endif
return rv;
}
if (!proc_pthread_mutex_cond_locked(mutex)) {
rv = APR_SUCCESS;
}
else if (!timeout) {
rv = APR_TIMEUP;
}
else {
struct timespec abstime;
if (timeout > 0) {
timeout += apr_time_now();
abstime.tv_sec = apr_time_sec(timeout);
abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds
*/
}
proc_pthread_mutex_cond_num_waiters(mutex)++;
do {
if (timeout < 0) {
rv = pthread_cond_wait(&proc_pthread_mutex_cond(mutex),
&proc_pthread_mutex(mutex));
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
break;
}
}
else {
rv = pthread_cond_timedwait(&proc_pthread_mutex_cond(mutex),
&proc_pthread_mutex(mutex),
&abstime);
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
if (rv == ETIMEDOUT) {
rv = APR_TIMEUP;
}
break;
}
}
} while (proc_pthread_mutex_cond_locked(mutex));
proc_pthread_mutex_cond_num_waiters(mutex)--;
}
if (rv != APR_SUCCESS) {
pthread_mutex_unlock(&proc_pthread_mutex(mutex));
return rv;
}
proc_pthread_mutex_cond_locked(mutex) = 1;
rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex));
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
return rv;
}
}
else
#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
{
if (timeout < 0) {
rv = pthread_mutex_lock(&proc_pthread_mutex(mutex));
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
}
}
else if (!timeout) {
rv = pthread_mutex_trylock(&proc_pthread_mutex(mutex));
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
if (rv == EBUSY) {
return APR_TIMEUP;
}
}
} }
else else
#if defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)
{
struct timespec abstime;
timeout += apr_time_now();
abstime.tv_sec = apr_time_sec(timeout);
abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
rv = pthread_mutex_timedlock(&proc_pthread_mutex(mutex), &abstime);
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
if (rv == ETIMEDOUT) {
return APR_TIMEUP;
}
}
}
if (rv) {
#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
/* Okay, our owner died. Let's try to make it consistent again. */
if (rv == EOWNERDEAD) {
proc_pthread_mutex_dec(mutex);
#ifdef HAVE_PTHREAD_MUTEX_ROBUST
pthread_mutex_consistent(&proc_pthread_mutex(mutex));
#else
pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
#endif
}
else
#endif
return rv;
}
#else /* !HAVE_PTHREAD_MUTEX_TIMEDLOCK */
return proc_mutex_spinsleep_timedacquire(mutex, timeout);
#endif #endif
return rv;
} }
mutex->curr_locked = 1; mutex->curr_locked = 1;
return APR_SUCCESS; return APR_SUCCESS;
} }
static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex)
{
return proc_mutex_pthread_acquire_ex(mutex, -1);
}
static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex) static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex)
{ {
apr_status_t rv = proc_mutex_pthread_acquire_ex(mutex, 0);
return (rv == APR_TIMEUP) ? APR_EBUSY : rv;
}
static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{
return proc_mutex_pthread_acquire_ex(mutex, (timeout <= 0) ? 0 : timeout);
}
static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex)
{
apr_status_t rv; apr_status_t rv;
if ((rv = pthread_mutex_trylock(mutex->os.pthread_interproc))) { #if APR_USE_PROC_PTHREAD_MUTEX_COND
if (proc_pthread_mutex_is_cond(mutex)) {
if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
if (rv == EBUSY) { #if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
return APR_EBUSY; /* Okay, our owner died. Let's try to make it consistent again. */
} if (rv == EOWNERDEAD) {
proc_pthread_mutex_dec(mutex);
#ifdef HAVE_PTHREAD_MUTEX_ROBUST #ifdef HAVE_PTHREAD_MUTEX_ROBUST
/* Okay, our owner died. Let's try to make it consistent again. */ pthread_mutex_consistent(&proc_pthread_mutex(mutex));
if (rv == EOWNERDEAD) { #else
proc_pthread_mutex_dec(mutex); pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
pthread_mutex_consistent_np(mutex->os.pthread_interproc); #endif
}
else
#endif
return rv;
} }
else
if (!proc_pthread_mutex_cond_locked(mutex)) {
rv = APR_EINVAL;
}
else if (!proc_pthread_mutex_cond_num_waiters(mutex)) {
rv = APR_SUCCESS;
}
else {
rv = pthread_cond_signal(&proc_pthread_mutex_cond(mutex));
#ifdef HAVE_ZOS_PTHREADS
if (rv) {
rv = errno;
}
#endif
}
if (rv != APR_SUCCESS) {
pthread_mutex_unlock(&proc_pthread_mutex(mutex));
return rv;
}
proc_pthread_mutex_cond_locked(mutex) = 0;
}
#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
mutex->curr_locked = 0;
if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif #endif
return rv; return rv;
} }
mutex->curr_locked = 1;
return APR_SUCCESS; return APR_SUCCESS;
} }
static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex) static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods =
{
APR_PROCESS_LOCK_MECH_IS_GLOBAL,
proc_mutex_pthread_create,
proc_mutex_pthread_acquire,
proc_mutex_pthread_tryacquire,
proc_mutex_pthread_timedacquire,
proc_mutex_pthread_release,
proc_mutex_pthread_cleanup,
proc_mutex_pthread_child_init,
proc_mutex_no_perms_set,
APR_LOCK_PROC_PTHREAD,
"pthread"
};
#if APR_USE_PROC_PTHREAD_MUTEX_COND
static apr_status_t proc_mutex_pthread_cond_create(apr_proc_mutex_t *new_mutex,
const char *fname)
{ {
apr_status_t rv; apr_status_t rv;
pthread_condattr_t cattr;
mutex->curr_locked = 0; rv = proc_mutex_pthread_create(new_mutex, fname);
if ((rv = pthread_mutex_unlock(mutex->os.pthread_interproc))) { if (rv != APR_SUCCESS) {
return rv;
}
if ((rv = pthread_condattr_init(&cattr))) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
apr_pool_cleanup_run(new_mutex->pool, new_mutex,
apr_proc_mutex_cleanup);
return rv;
}
if ((rv = pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED))) {
#ifdef HAVE_ZOS_PTHREADS #ifdef HAVE_ZOS_PTHREADS
rv = errno; rv = errno;
#endif #endif
pthread_condattr_destroy(&cattr);
apr_pool_cleanup_run(new_mutex->pool, new_mutex,
apr_proc_mutex_cleanup);
return rv; return rv;
} }
if ((rv = pthread_cond_init(&proc_pthread_mutex_cond(new_mutex),
&cattr))) {
#ifdef HAVE_ZOS_PTHREADS
rv = errno;
#endif
pthread_condattr_destroy(&cattr);
apr_pool_cleanup_run(new_mutex->pool, new_mutex,
apr_proc_mutex_cleanup);
return rv;
}
pthread_condattr_destroy(&cattr);
proc_pthread_mutex_cond_locked(new_mutex) = 0;
proc_pthread_mutex_cond_num_waiters(new_mutex) = 0;
return APR_SUCCESS; return APR_SUCCESS;
} }
static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods = static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_cond_methods =
{ {
APR_PROCESS_LOCK_MECH_IS_GLOBAL, APR_PROCESS_LOCK_MECH_IS_GLOBAL,
proc_mutex_pthread_create, proc_mutex_pthread_cond_create,
proc_mutex_pthread_acquire, proc_mutex_pthread_acquire,
proc_mutex_pthread_tryacquire, proc_mutex_pthread_tryacquire,
proc_mutex_pthread_timedacquire,
proc_mutex_pthread_release, proc_mutex_pthread_release,
proc_mutex_pthread_cleanup, proc_mutex_pthread_cleanup,
proc_mutex_pthread_child_init, proc_mutex_pthread_child_init,
proc_mutex_no_perms_set, proc_mutex_no_perms_set,
APR_LOCK_PROC_PTHREAD, APR_LOCK_PROC_PTHREAD,
"pthread" "pthread"
}; };
#endif
#endif #endif
#if APR_HAS_FCNTL_SERIALIZE #if APR_HAS_FCNTL_SERIALIZE
static struct flock proc_mutex_lock_it; static struct flock proc_mutex_lock_it;
static struct flock proc_mutex_unlock_it; static struct flock proc_mutex_unlock_it;
static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *); static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *);
skipping to change at line 764 skipping to change at line 1149
static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods = static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods =
{ {
#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL) #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL)
APR_PROCESS_LOCK_MECH_IS_GLOBAL, APR_PROCESS_LOCK_MECH_IS_GLOBAL,
#else #else
0, 0,
#endif #endif
proc_mutex_fcntl_create, proc_mutex_fcntl_create,
proc_mutex_fcntl_acquire, proc_mutex_fcntl_acquire,
proc_mutex_fcntl_tryacquire, proc_mutex_fcntl_tryacquire,
proc_mutex_spinsleep_timedacquire,
proc_mutex_fcntl_release, proc_mutex_fcntl_release,
proc_mutex_fcntl_cleanup, proc_mutex_fcntl_cleanup,
proc_mutex_no_child_init, proc_mutex_no_child_init,
proc_mutex_fcntl_perms_set, proc_mutex_fcntl_perms_set,
APR_LOCK_FCNTL, APR_LOCK_FCNTL,
"fcntl" "fcntl"
}; };
#endif /* fcntl implementation */ #endif /* fcntl implementation */
skipping to change at line 937 skipping to change at line 1323
static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods = static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods =
{ {
#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL) #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL)
APR_PROCESS_LOCK_MECH_IS_GLOBAL, APR_PROCESS_LOCK_MECH_IS_GLOBAL,
#else #else
0, 0,
#endif #endif
proc_mutex_flock_create, proc_mutex_flock_create,
proc_mutex_flock_acquire, proc_mutex_flock_acquire,
proc_mutex_flock_tryacquire, proc_mutex_flock_tryacquire,
proc_mutex_spinsleep_timedacquire,
proc_mutex_flock_release, proc_mutex_flock_release,
proc_mutex_flock_cleanup, proc_mutex_flock_cleanup,
proc_mutex_flock_child_init, proc_mutex_flock_child_init,
proc_mutex_flock_perms_set, proc_mutex_flock_perms_set,
APR_LOCK_FLOCK, APR_LOCK_FLOCK,
"flock" "flock"
}; };
#endif /* flock implementation */ #endif /* flock implementation */
skipping to change at line 1043 skipping to change at line 1430
if (ospmutex) { if (ospmutex) {
if (ospmutex->pthread_interproc == NULL) { if (ospmutex->pthread_interproc == NULL) {
return APR_EINVAL; return APR_EINVAL;
} }
new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
} }
#else #else
return APR_ENOTIMPL; return APR_ENOTIMPL;
#endif #endif
break; break;
case APR_LOCK_DEFAULT_TIMED:
#if APR_HAS_PROC_PTHREAD_SERIALIZE \
&& (APR_USE_PROC_PTHREAD_MUTEX_COND \
|| defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \
&& defined(HAVE_PTHREAD_MUTEX_ROBUST)
#if APR_USE_PROC_PTHREAD_MUTEX_COND
new_mutex->meth = &mutex_proc_pthread_cond_methods;
#else
new_mutex->meth = &mutex_proc_pthread_methods;
#endif
if (ospmutex) {
if (ospmutex->pthread_interproc == NULL) {
return APR_EINVAL;
}
new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
}
break;
#elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP)
new_mutex->meth = &mutex_sysv_methods;
if (ospmutex) {
if (ospmutex->crossproc == -1) {
return APR_EINVAL;
}
new_mutex->os.crossproc = ospmutex->crossproc;
}
break;
#elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT)
new_mutex->meth = &mutex_posixsem_methods;
if (ospmutex) {
if (ospmutex->psem_interproc == NULL) {
return APR_EINVAL;
}
new_mutex->os.psem_interproc = ospmutex->psem_interproc;
}
break;
#endif
/* fall trough */
case APR_LOCK_DEFAULT: case APR_LOCK_DEFAULT:
#if APR_USE_FLOCK_SERIALIZE #if APR_USE_FLOCK_SERIALIZE
new_mutex->meth = &mutex_flock_methods; new_mutex->meth = &mutex_flock_methods;
if (ospmutex) { if (ospmutex) {
if (ospmutex->crossproc == -1) { if (ospmutex->crossproc == -1) {
return APR_EINVAL; return APR_EINVAL;
} }
new_mutex->os.crossproc = ospmutex->crossproc; new_mutex->os.crossproc = ospmutex->crossproc;
} }
#elif APR_USE_SYSVSEM_SERIALIZE #elif APR_USE_SYSVSEM_SERIALIZE
skipping to change at line 1158 skipping to change at line 1582
APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex) APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
{ {
return mutex->meth->acquire(mutex); return mutex->meth->acquire(mutex);
} }
APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex) APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
{ {
return mutex->meth->tryacquire(mutex); return mutex->meth->tryacquire(mutex);
} }
APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
apr_interval_time_t timeout)
{
#if APR_HAS_TIMEDLOCKS
return mutex->meth->timedacquire(mutex, timeout);
#else
return APR_ENOTIMPL;
#endif
}
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex) APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{ {
return mutex->meth->release(mutex); return mutex->meth->release(mutex);
} }
APR_DECLARE(apr_status_t) apr_proc_mutex_cleanup(void *mutex) APR_DECLARE(apr_status_t) apr_proc_mutex_cleanup(void *mutex)
{ {
return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex); return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex);
} }
 End of changes. 47 change blocks. 
33 lines changed or deleted 468 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)