[lxc-devel] [RFC PATCH] lxclock: Replace named sempahore with flock

Dwight Engen dwight.engen at oracle.com
Fri May 24 16:21:27 UTC 2013


On Fri, 24 May 2013 08:09:21 -0500
Serge Hallyn <serge.hallyn at ubuntu.com> wrote:

> The problem: if a task is killed while holding a posix semaphore,
> there appears to be no way to have the semaphore be reliably
> autmoatically released.  The only trick which seemed promising
> is to store the pid of the lock holder in some file and have
> later lock seekers check whether that task has died.
> 
> Instead of going down that route, this patch switches from a
> named posix semaphore to flock.  The advantage is that when
> the task is killed, its fds are closed and locks are automatically
> released.
> 
> The disadvantage of flock is that we can't rely on it to exclude
> threads.  Therefore c->slock must now always be wrapped inside
> c->privlock.
> 
> This patch survived basic testing with the lxcapi_create patchset,
> where now killing lxc-create while it was holding the lock did
> not lock up future api commands.
> 
> Signed-off-by: Serge Hallyn <serge.hallyn at ubuntu.com>
> ---
>  src/lxc/lxccontainer.c |  63 ++++++++----
>  src/lxc/lxccontainer.h |   4 +-
>  src/lxc/lxclock.c      | 200 +++++++++++++++++++++++++++++++-------
>  src/lxc/lxclock.h      |  58 +++++++----
>  src/tests/locktests.c  | 255
> +++++++++++++++---------------------------------- 5 files changed,
> 327 insertions(+), 253 deletions(-)
> 
> diff --git a/src/lxc/lxccontainer.c b/src/lxc/lxccontainer.c
> index 2934afa..b2de0e6 100644
> --- a/src/lxc/lxccontainer.c
> +++ b/src/lxc/lxccontainer.c
> @@ -42,7 +42,7 @@
>  #include <arpa/inet.h>
>  #include <ifaddrs.h>
>  
> -static pthread_mutex_t thread_mutex = PTHREAD_MUTEX_INITIALIZER;
> +extern pthread_mutex_t thread_mutex;
>  
>  /* Define unshare() if missing from the C library */
>  /* this is also in attach.c and lxccontainer.c: commonize it in
> utils.c */ @@ -63,9 +63,11 @@ int unshare(int);
>  lxc_log_define(lxc_container, lxc);
>  
>  /* LOCKING
> - * c->privlock protects the struct lxc_container from multiple
> threads.
> - * c->slock protects the on-disk container data
> - * thread_mutex protects process data (ex: fd table) from multiple
> threads
> + * 1. c->privlock protects the struct lxc_container from multiple
> threads.
> + * 2. c->slock protects the on-disk container data
> + * 3. thread_mutex protects process data (ex: fd table) from
> multiple threads
> + * slock is an flock, which does not exclude threads.  Therefore
> slock should
> + * always be wrapped inside privlock.
>   * NOTHING mutexes two independent programs with their own struct
>   * lxc_container for the same c->name, between API calls.  For
> instance,
>   * c->config_read(); c->start();  Between those calls, data on disk
> @@ -96,14 +98,12 @@ static void lxc_container_free(struct
> lxc_container *c) c->error_string = NULL;
>  	}
>  	if (c->slock) {
> -		sem_close(c->slock);
> +		lxc_putlock(c->slock);
>  		c->slock = NULL;
>  	}
>  	if (c->privlock) {
> -		sem_t *l = c->privlock;
> +		lxc_putlock(c->privlock);
>  		c->privlock = NULL;
> -		sem_destroy(l);
> -		free(l);
>  	}
>  	if (c->name) {
>  		free(c->name);
> @@ -217,11 +217,15 @@ static const char *lxcapi_state(struct
> lxc_container *c) 
>  	if (!c)
>  		return NULL;
> -	if (lxclock(c->slock, 0))
> +	if (lxclock(c->privlock, 0))
>  		return NULL;
> +	if (lxclock(c->slock, 0))
> +		goto bad;
>  	s = lxc_getstate(c->name, c->config_path);

This isn't new with your change, but there is an inconsistency here:
before calling lxc_getstate we are taking the disk lock. In the
calling sequences

  lxcapi_[gs]et_cgroup_item()
    is_stopped_nolock()
      lxc_getstate()

we only take the privlock (thread) lock, so do we consider the state
(which will come from the container's freezer cgroup or the container
itself) to be "on disk" or not? Logically I would think its not "on
disk" and so we wouldn't need to take slock here, only privlock, but
that might interfere with doing a freeze. If we're worried about that
then the lxcapi_[gs]et_cgroup_item should take the disk lock too IMO.

Hmm, just noticed that lxcapi_wait() -> lxc_wait() does lxc_getstate
without taking any lock. Maybe we should have this change just focus on
the locking primitives themselves and then we can take a pass at making
sure they are used consistently :)

I don't think the locking in lxcapi_start() can be right either, it
looks to me like we give up privlock right after getting a local
pointer to c->lxc_conf but then deref the pointer to the config that
could be changing under us.

>  	ret = lxc_state2str(s);
>  	lxcunlock(c->slock);
> +bad:
> +	lxcunlock(c->privlock);
>  
>  	return ret;
>  }
> @@ -251,10 +255,15 @@ static bool lxcapi_freeze(struct lxc_container
> *c) if (!c)
>  		return false;
>  
> -	if (lxclock(c->slock, 0))
> +	if (lxclock(c->privlock, 0))
>  		return false;
> +	if (lxclock(c->slock, 0)) {
> +		lxcunlock(c->privlock);
> +		return false;
> +	}
>  	ret = lxc_freeze(c->name, c->config_path);
>  	lxcunlock(c->slock);
> +	lxcunlock(c->privlock);
>  	if (ret)
>  		return false;
>  	return true;
> @@ -266,10 +275,15 @@ static bool lxcapi_unfreeze(struct
> lxc_container *c) if (!c)
>  		return false;
>  
> -	if (lxclock(c->slock, 0))
> +	if (lxclock(c->privlock, 0))
>  		return false;
> +	if (lxclock(c->slock, 0)) {
> +		lxcunlock(c->privlock);
> +		return false;
> +	}
>  	ret = lxc_unfreeze(c->name, c->config_path);
>  	lxcunlock(c->slock);
> +	lxcunlock(c->privlock);
>  	if (ret)
>  		return false;
>  	return true;
> @@ -281,10 +295,15 @@ static pid_t lxcapi_init_pid(struct
> lxc_container *c) if (!c)
>  		return -1;
>  
> -	if (lxclock(c->slock, 0))
> +	if (lxclock(c->privlock, 0))
>  		return -1;
> +	if (lxclock(c->slock, 0)) {
> +		lxcunlock(c->privlock);
> +		return -1;
> +	}
>  	ret = lxc_cmd_get_init_pid(c->name, c->config_path);

I'm not sure this needs locking at all, the connected to container is
just going to return its handler->pid and there is no locking around
setting that in start.c

>  	lxcunlock(c->slock);
> +	lxcunlock(c->privlock);
>  	return ret;
>  }
>  
> @@ -309,10 +328,15 @@ static bool lxcapi_load_config(struct
> lxc_container *c, const char *alt_file) fname = alt_file;
>  	if (!fname)
>  		return false;
> -	if (lxclock(c->slock, 0))
> +	if (lxclock(c->privlock, 0))
> +		return false;
> +	if (lxclock(c->slock, 0)) {
> +		lxcunlock(c->privlock);
>  		return false;
> +	}
>  	ret = load_config_locked(c, fname);
>  	lxcunlock(c->slock);
> +	lxcunlock(c->privlock);
>  	return ret;
>  }
>  
> @@ -604,9 +628,12 @@ static bool lxcapi_create(struct lxc_container
> *c, char *t, char *const argv[]) /* we're going to fork.  but since
> we'll wait for our child, we
>  	 * don't need to lxc_container_get */
>  
> +	if (lxclock(c->privlock, 0))
> +		goto out;
>  	if (lxclock(c->slock, 0)) {
>  		ERROR("failed to grab global container lock for
> %s\n", c->name);
> -		goto out;
> +		lxcunlock(c->privlock);
> +		goto out_unlock1;
>  	}
>  
>  	pid = fork();
> @@ -687,6 +714,8 @@ static bool lxcapi_create(struct lxc_container
> *c, char *t, char *const argv[]) 
>  out_unlock:
>  	lxcunlock(c->slock);
> +out_unlock1:
> +	lxcunlock(c->privlock);
>  out:
>  	if (tpath)
>  		free(tpath);
> @@ -1710,14 +1739,12 @@ struct lxc_container *lxc_container_new(const
> char *name, const char *configpath strcpy(c->name, name);
>  
>  	c->numthreads = 1;
> -	c->slock = lxc_newlock(name);
> -	if (!c->slock) {
> +	if (!(c->slock = lxc_newlock(c->config_path, name))) {
>  		fprintf(stderr, "failed to create lock\n");
>  		goto err;
>  	}
>  
> -	c->privlock = lxc_newlock(NULL);
> -	if (!c->privlock) {
> +	if (!(c->privlock = lxc_newlock(NULL, NULL))) {
>  		fprintf(stderr, "failed to alloc privlock\n");
>  		goto err;
>  	}
> diff --git a/src/lxc/lxccontainer.h b/src/lxc/lxccontainer.h
> index 22ccb0b..0ab036f 100644
> --- a/src/lxc/lxccontainer.h
> +++ b/src/lxc/lxccontainer.h
> @@ -16,8 +16,8 @@ struct lxc_container {
>  	// private fields
>  	char *name;
>  	char *configfile;
> -	sem_t *slock;
> -	sem_t *privlock;
> +	struct lxc_lock *slock;
> +	struct lxc_lock *privlock;
>  	int numthreads; /* protected by privlock. */
>  	struct lxc_conf *lxc_conf; // maybe we'll just want the
> whole lxc_handler? 
> diff --git a/src/lxc/lxclock.c b/src/lxc/lxclock.c
> index 0cbe843..bddd1e9 100644
> --- a/src/lxc/lxclock.c
> +++ b/src/lxc/lxclock.c
> @@ -17,36 +17,48 @@
>   * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
>   */
>  
> +#include <pthread.h>
>  #include "lxclock.h"
>  #include <malloc.h>
>  #include <stdio.h>
> +#include <errno.h>
> +#include <unistd.h>
> +#include <lxc/utils.h>
> +#include <lxc/log.h>
>  
>  #define OFLAG (O_CREAT | O_RDWR)
>  #define SEMMODE 0660
>  #define SEMVALUE 1
>  #define SEMVALUE_LOCKED 0
> -#define LXCLOCK_PREFIX "/lxcapi."
>  
> +lxc_log_define(lxc_lock, lxc);
>  
> -static char *lxclock_name(const char *container)
> +pthread_mutex_t thread_mutex = PTHREAD_MUTEX_INITIALIZER;
> +
> +static char *lxclock_name(const char *p, const char *n)
>  {
>  	int ret;
> -	int len = strlen(container) + strlen(LXCLOCK_PREFIX) + 1;
> +	// $lxcpath/locks/$lxcname + '\0'
> +	int len = strlen(p) + strlen(n) + strlen("/locks/") + 1;
>  	char *dest = malloc(len);
>  	if (!dest)
>  		return NULL;
> -	ret = snprintf(dest, len, "%s%s", LXCLOCK_PREFIX, container);
> +	ret = snprintf(dest, len, "%s/locks", p);
>  	if (ret < 0 || ret >= len) {
>  		free(dest);
>  		return NULL;
>  	}
> -	return dest;
> -}
> +	if (mkdir_p(dest, 0755) < 0) {
> +		free(dest);
> +		return NULL;
> +	}
>  
> -static void lxcfree_name(char *name)
> -{
> -	if (name)
> -		free(name);
> +	ret = snprintf(dest, len, "%s/locks/%s", p, n);
> +	if (ret < 0 || ret >= len) {
> +		free(dest);
> +		return NULL;
> +	}
> +	return dest;
>  }
>  
>  static sem_t *lxc_new_unnamed_sem(void)
> @@ -65,44 +77,158 @@ static sem_t *lxc_new_unnamed_sem(void)
>  	return s;
>  }
>  
> -sem_t *lxc_newlock(const char *name)
> +struct lxc_lock *lxc_newlock(const char *lxcpath, const char *name)
>  {
> -	char *lname;
> -	sem_t *lock;
> +	struct lxc_lock *l;
> +	int ret = pthread_mutex_lock(&thread_mutex);

What is taking this lock here protecting? It looks like its all thread
local allocations? Maybe I'm missing something but I'm confused why we
need the pthread mutex around any of these locking primitives. Won't
this potentially cause a threadA trying to operate on unlocked
container1 to block when threadB is in a critical section with
unrelated container2?

> +	if (ret != 0) {
> +		ERROR("pthread_mutex_lock returned:%d %s", ret,
> strerror(ret));
> +		return NULL;
> +	}
>  
> -	if (!name)
> -		return lxc_new_unnamed_sem();
> +	l = malloc(sizeof(*l));
> +	if (!l)
> +		goto out;
>  
> -	lname = lxclock_name(name);
> -	if (!lname)
> -		return NULL;
> -	lock = sem_open(lname, OFLAG, SEMMODE, SEMVALUE);
> -	lxcfree_name(lname);
> -	if (lock == SEM_FAILED)
> -		return NULL;
> -	return lock;
> +	if (!name) {
> +		l->type = LXC_LOCK_ANON_SEM;
> +		l->u.sem = lxc_new_unnamed_sem();
> +		goto out;
> +	}
> +
> +	l->type = LXC_LOCK_FLOCK;
> +	l->u.f.fname = lxclock_name(lxcpath, name);
> +	if (!l->u.f.fname) {
> +		free(l);
> +		l = NULL;
> +		goto out;
> +	}
> +	l->u.f.fd = -1;
> +
> +out:
> +	pthread_mutex_unlock(&thread_mutex);
> +	return l;
>  }
>  
> -int lxclock(sem_t *sem, int timeout)
> +int lxclock(struct lxc_lock *l, int timeout)
>  {
> -	int ret;
> +	int saved_errno = errno;
> +	int ret = pthread_mutex_lock(&thread_mutex);
> +	if (ret != 0) {
> +		ERROR("pthread_mutex_lock returned:%d %s", ret,
> strerror(ret));
> +		return ret;
> +	}
>  
> -	if (!timeout) {
> -		ret = sem_wait(sem);
> -	} else {
> -		struct timespec ts;
> -		if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
> -			return -2;
> -		ts.tv_sec += timeout;
> -		ret = sem_timedwait(sem, &ts);
> +	switch(l->type) {
> +	case LXC_LOCK_ANON_SEM:
> +		if (!timeout) {
> +			ret = sem_wait(l->u.sem);
> +			if (ret == -1)
> +				saved_errno = errno;
> +		} else {
> +			struct timespec ts;
> +			if (clock_gettime(CLOCK_REALTIME, &ts) ==
> -1) {
> +				ret = -2;
> +				goto out;
> +			}
> +			ts.tv_sec += timeout;
> +			ret = sem_timedwait(l->u.sem, &ts);
> +			if (ret == -1)
> +				saved_errno = errno;
> +		}
> +		break;
> +	case LXC_LOCK_FLOCK:
> +		ret = -2;
> +		if (timeout) {
> +			ERROR("Error: timeout not supported with
> flock");
> +			ret = -2;
> +			goto out;
> +		}
> +		if (!l->u.f.fname) {
> +			ERROR("Error: filename not set for flock");
> +			ret = -2;
> +			goto out;
> +		}
> +		if (l->u.f.fd == -1) {
> +			l->u.f.fd = open(l->u.f.fname,
> O_RDWR|O_CREAT,
> +					S_IWUSR | S_IRUSR);
> +			if (l->u.f.fd == -1) {
> +				ERROR("Error opening %s",
> l->u.f.fname);
> +				goto out;
> +			}
> +		}
> +		ret = flock(l->u.f.fd, LOCK_EX);
> +		if (ret == -1)
> +			saved_errno = errno;
> +		break;
>  	}
>  
> +out:
> +	pthread_mutex_unlock(&thread_mutex);
> +	errno = saved_errno;
>  	return ret;
>  }
>  
> -int lxcunlock(sem_t *sem)
> +int lxcunlock(struct lxc_lock *l)
>  {
> -	if (!sem)
> -		return -2;
> -	return sem_post(sem);
> +	int saved_errno = errno;
> +	int ret = pthread_mutex_lock(&thread_mutex);
> +
> +	if (ret != 0) {
> +		ERROR("pthread_mutex_lock returned:%d %s", ret,
> strerror(ret));
> +		return ret;
> +	}
> +
> +	switch(l->type) {
> +	case LXC_LOCK_ANON_SEM:
> +		if (!l->u.sem)
> +			ret = -2;
> +		else
> +			ret = sem_post(l->u.sem);
> +			saved_errno = errno;
> +		break;
> +	case LXC_LOCK_FLOCK:
> +		if (l->u.f.fd != -1) {
> +			if ((ret = flock(l->u.f.fd, LOCK_UN)) < 0)
> +				saved_errno = errno;
> +			close(l->u.f.fd);
> +			l->u.f.fd = -1;
> +		} else
> +			ret = -2;
> +		break;
> +	}
> +
> +	pthread_mutex_unlock(&thread_mutex);
> +	errno = saved_errno;
> +	return ret;
> +}
> +
> +void lxc_putlock(struct lxc_lock *l)
> +{
> +	int ret = pthread_mutex_lock(&thread_mutex);
> +	if (ret != 0) {
> +		ERROR("pthread_mutex_lock returned:%d %s", ret,
> strerror(ret));
> +		return;
> +	}
> +
> +	if (!l)
> +		goto out;
> +	switch(l->type) {
> +	case LXC_LOCK_ANON_SEM:
> +		if (l->u.sem)
> +			sem_close(l->u.sem);
> +		break;
> +	case LXC_LOCK_FLOCK:
> +		if (l->u.f.fd != -1) {
> +			close(l->u.f.fd);
> +			l->u.f.fd = -1;
> +		}
> +		if (l->u.f.fname) {
> +			free(l->u.f.fname);
> +			l->u.f.fname = NULL;
> +		}
> +		break;
> +	}
> +out:
> +	pthread_mutex_unlock(&thread_mutex);
>  }
> diff --git a/src/lxc/lxclock.h b/src/lxc/lxclock.h
> index 1226c22..61e7bbb 100644
> --- a/src/lxc/lxclock.h
> +++ b/src/lxc/lxclock.h
> @@ -19,43 +19,65 @@
>  
>  #include <fcntl.h>           /* For O_* constants */
>  #include <sys/stat.h>        /* For mode constants */
> +#include <sys/file.h>
>  #include <semaphore.h>
>  #include <string.h>
>  #include <time.h>
>  
> +#define LXC_LOCK_ANON_SEM 1
> +#define LXC_LOCK_FLOCK 2
> +struct lxc_lock {
> +	short type;
> +	union {
> +		sem_t *sem; // an anonymous semaphore
> +		struct {
> +			int fd; // fd on which a lock is held (if
> not -1)
> +			char *fname;
> +		} f;
> +	} u;
> +};
> +
>  /*
> - * lxc_newlock:
> + * lxc_newlock: Create a new (unlocked) lock.
> + *
>   * if name is not given, create an unnamed semaphore.  We use these
>   * to protect against racing threads.
>   * Note that an unnamed sem was malloced by us and needs to be freed.
>   *
> - * If name is given, it is prepended with '/lxcapi.', and used as the
> - * name for a system-wide (well, ipcns-wide) semaphore.  We use that
> - * to protect the containers as represented on disk.
> - * A named sem should not be freed.
> - *
> - * XXX TODO
> - * We should probably introduce a lxclock_close() which detecs the
> type
> - * of lock and calls sem_close() or sem_destroy()+free() not as
> appropriate.
> - * For now, it is up to the caller to do so.
> - *
>   * sem is initialized to value of 1
> + * A sem_t * which can be passed to lxclock() and lxcunlock()
> + * will be placed in l->u.sem
>   *
> - * return NULL on failure, else a sem_t * which can be passed to
> - * lxclock() and lxcunlock().
> + * If lxcpath and name are given (both must be given if either is
> + * given) then a lockfile is created, $lxcpath/$lxcname/locks/$name.
> + * We use that to protect the containers as represented on disk.
> + * lxc_newlock() for the named lock only allocates the pathname in
> + * memory so we can quickly open+lock it at lxclock.
> + * l->u.f.fname will container the malloc'ed name (which must be

s/container/contain/  though I can see why container just rolls off the
fingers :)

> + * freed when the container is freed), and u.f.fd = -1.
> + *
> + * return lxclock on success, NULL on failure.
>   */
> -extern sem_t *lxc_newlock(const char *name);
> +extern struct lxc_lock *lxc_newlock(const char *lxcpath, const char
> *name); 
>  /*
>   * lxclock: take an existing lock.  If timeout is 0, wait
>   * indefinately.  Otherwise use given timeout.
>   * return 0 if we got the lock, -2 on failure to set timeout, or -1
>   * otherwise in which case errno will be set by sem_wait()).
> + *
> + * Note that timeout is (currently?) only supported for privlock, not
> + * for slock.  Since currently there is not a single use of the
> timeout
> + * (except in the test case) I may remove the support for it in sem
> as
> + * well.
>   */
> -extern int lxclock(sem_t *sem, int timeout);
> +extern int lxclock(struct lxc_lock *lock, int timeout);
>  
>  /*
> - * lxcunlock: unlock given sem.  Return 0 on success.  Otherwise
> returns
> - * -1 and sem_post will leave errno set.
> + * lxcunlock: unlock given sem.  Return 0 on success, or -2 if we
> did not
> + * have the lock.  Otherwise returns -1 with errno saved from flock
> + * or sem_post function.
>   */
> -extern int lxcunlock(sem_t *lock);
> +extern int lxcunlock(struct lxc_lock *lock);
> +
> +extern void lxc_putlock(struct lxc_lock *l);
> diff --git a/src/tests/locktests.c b/src/tests/locktests.c
> index f085558..96df946 100644
> --- a/src/tests/locktests.c
> +++ b/src/tests/locktests.c
> @@ -27,213 +27,112 @@
>  #define mycontainername "lxctest.sem"
>  #define TIMEOUT_SECS 3
>  
> -int timedout;
> -int pid_to_kill;
> -
> -void timeouthandler(int sig)
> -{
> -	// timeout received
> -	timedout = 1;
> -	kill(pid_to_kill, SIGTERM);
> -}
> -
> -void starttimer(int secs)
> -{
> -	timedout = 0;
> -	signal(SIGALRM, timeouthandler);
> -	alarm(secs);
> -}
> -void stoptimer(void)
> -{
> -	alarm(0);
> -	signal(SIGALRM, NULL);
> -}
> -
> -int test_one_lock(sem_t *lock)
> -{
> -	int ret;
> -	starttimer(TIMEOUT_SECS);
> -	ret = lxclock(lock, TIMEOUT_SECS*2);
> -	stoptimer();
> -	if (ret == 0) {
> -		lxcunlock(lock);
> -		return 0;
> -	}
> -	if (timedout)
> -		fprintf(stderr, "%d: timed out waiting for lock\n",
> __LINE__);
> -	else
> -		fprintf(stderr, "%d: failed to get single lock\n",
> __LINE__);
> -	return 1;
> -}
> -
> -/*
> - * get one lock.  Fork a second task to try to get a second lock,
> - * with infinite timeout.  If our alarm hits, kill the second
> - * task.  If second task does not
> - */
> -int test_two_locks(sem_t *lock)
> +void test_two_locks(void)
>  {
> -	int status;
> -    int ret;
> +	struct lxc_lock *l;
> +	pid_t pid;
> +	int ret, status;
> +	int p[2];
> +	char c;
>  
> -	ret = lxclock(lock, 1);
> -	if (ret) {
> -		fprintf(stderr, "%d: Error getting first lock\n",
> __LINE__);
> -		return 2;
> -	}
> -
> -	pid_to_kill = fork();
> -	if (pid_to_kill < 0) {
> -		fprintf(stderr, "%d: Failed to fork\n", __LINE__);
> -		lxcunlock(lock);
> -		return 3;
> -	}
> -
> -	if (pid_to_kill == 0) { // child
> -		ret = lxclock(lock, TIMEOUT_SECS*2);
> -		if (ret == 0) {
> -			lxcunlock(lock);
> -			exit(0);
> -		}
> -		fprintf(stderr, "%d: child, was not able to get
> lock\n", __LINE__);
> +	if (pipe(p) < 0)
>  		exit(1);
> +	if ((pid = fork()) < 0)
> +		exit(1);
> +	if (pid == 0) {
> +		if (read(p[0], &c, 1) < 0) {
> +			perror("read");
> +			exit(1);
> +		}
> +		l = lxc_newlock("/tmp", "lxctest-sem");
> +		if (!l) {
> +			fprintf(stderr, "%d: child: failed to create
> lock\n", __LINE__);
> +			exit(1);
> +		}
> +		if (lxclock(l, 0) < 0) {
> +			fprintf(stderr, "%d: child: failed to grab
> lock\n", __LINE__);
> +			exit(1);
> +		}
> +		fprintf(stderr, "%d: child: grabbed lock\n",
> __LINE__);
> +		exit(0);
>  	}
> -	starttimer(TIMEOUT_SECS);
> -	waitpid(pid_to_kill, &status, 0);
> -	stoptimer();
> -	if (WIFEXITED(status)) {
> -		// child exited normally - timeout didn't kill it
> -		if (WEXITSTATUS(status) == 0)
> -			fprintf(stderr, "%d: child was able to get
> the lock\n", __LINE__);
> -		else
> -			fprintf(stderr, "%d: child timed out too
> early\n", __LINE__);
> -		lxcunlock(lock);
> -		return 1;
> -	}
> -	lxcunlock(lock);
> -	return 0;
> -}
> -
> -/*
> - * get one lock.  try to get second lock, but asking for timeout.  If
> - * should return failure.  If our own alarm, set at twice the lock
> - * request's timeout, hits, then lxclock() did not properly time out.
> - */
> -int test_with_timeout(sem_t *lock)
> -{
> -	int status;
> -	int ret = 0;
> -
> -	ret = lxclock(lock, 0);
> -	if (ret) {
> -		fprintf(stderr, "%d: Error getting first lock\n",
> __LINE__);
> -		return 2;
> -	}
> -	pid_to_kill = fork();
> -	if (pid_to_kill < 0) {
> -		fprintf(stderr, "%d: Error on fork\n", __LINE__);
> -		lxcunlock(lock);
> -		return 2;
> +	l = lxc_newlock("/tmp", "lxctest-sem");
> +	if (!l) {
> +		fprintf(stderr, "%d: failed to create lock\n",
> __LINE__);
> +		exit(1);
>  	}
> -	if (pid_to_kill == 0) {
> -		ret = lxclock(lock, TIMEOUT_SECS);
> -		if (ret == 0) {
> -			lxcunlock(lock);
> -			exit(0);
> -		}
> +	if (lxclock(l, 0) < 0) {
> +		fprintf(stderr, "%d; failed to get lock\n",
> __LINE__); exit(1);
>  	}
> -	starttimer(TIMEOUT_SECS * 2);
> -	waitpid(pid_to_kill, &status, 0);
> -	stoptimer();
> -	if (!WIFEXITED(status)) {
> -		fprintf(stderr, "%d: lxclock did not honor its
> timeout\n", __LINE__);
> -		lxcunlock(lock);
> -		return 1;
> +	if (write(p[1], &c, 1) < 0) {
> +		perror("write");
> +		exit(1);
>  	}
> -	if (WEXITSTATUS(status) == 0) {
> -		fprintf(stderr, "%d: child was able to get lock,
> should have failed with timeout\n", __LINE__);
> -		ret = 1;
> +	sleep(3);
> +	ret = waitpid(pid, &status, WNOHANG);
> +	if (ret == pid) { // task exited
> +		if (WIFEXITED(status)) {
> +			printf("%d exited normally with exit code
> %d\n", pid,
> +				WEXITSTATUS(status));
> +			if (WEXITSTATUS(status) == 0)
> +				exit(1);
> +		} else
> +			printf("%d did not exit normally\n", pid);
> +		return;
> +	} else if (ret < 0) {
> +		perror("waitpid");
> +		exit(1);
>  	}
> -	lxcunlock(lock);
> -	return ret;
> +	kill(pid, SIGKILL);
> +	wait(&status);
> +	close(p[1]);
> +	close(p[0]);
> +	lxcunlock(l);
> +	lxc_putlock(l);
>  }
>  
>  int main(int argc, char *argv[])
>  {
> -	int ret, sval, r;
> -	sem_t *lock;
> +	int ret;
> +	struct lxc_lock *lock;
>  
> -	lock = lxc_newlock(NULL);
> -    if (!lock) {
> +	lock = lxc_newlock(NULL, NULL);
> +	if (!lock) {
>  		fprintf(stderr, "%d: failed to get unnamed lock\n",
> __LINE__); exit(1);
> -    }
> -    ret = lxclock(lock, 0);
> -    if (ret) {
> +	}
> +	ret = lxclock(lock, 0);
> +	if (ret) {
>  		fprintf(stderr, "%d: failed to take unnamed lock
> (%d)\n", __LINE__, ret); exit(1);
> -    }
> +	}
>  
> -    ret = lxcunlock(lock);
> -    if (ret) {
> +	ret = lxcunlock(lock);
> +	if (ret) {
>  		fprintf(stderr, "%d: failed to put unnamed lock
> (%d)\n", __LINE__, ret); exit(1);
> -    }
> -
> -    sem_destroy(lock);
> -    free(lock);
> +	}
> +	lxc_putlock(lock);
>  
> -	lock = lxc_newlock(mycontainername);
> +	lock = lxc_newlock("/var/lib/lxc", mycontainername);
>  	if (!lock) {
>  		fprintf(stderr, "%d: failed to get lock\n",
> __LINE__); exit(1);
>  	}
> -	r = sem_getvalue(lock, &sval);
> -	if (!r) {
> -		fprintf(stderr, "%d: sem value at start is %d\n",
> __LINE__, sval);
> -	} else {
> -		fprintf(stderr, "%d: failed to get initial value\n",
> __LINE__);
> -	}
> -
> -	ret = test_one_lock(lock);
> -	if (ret) {
> -		fprintf(stderr, "%d: test failed\n", __LINE__);
> -		goto out;
> -	}
> -	r = sem_getvalue(lock, &sval);
> -	if (!r) {
> -		fprintf(stderr, "%d: sem value is %d\n", __LINE__,
> sval);
> -	} else {
> -		fprintf(stderr, "%d: failed to get sem value\n",
> __LINE__);
> -	}
> -
> -	ret = test_two_locks(lock);
> -	if (ret) {
> -		fprintf(stderr, "%d: test failed\n", __LINE__);
> -		goto out;
> -	}
> -	r = sem_getvalue(lock, &sval);
> -	if (!r) {
> -		fprintf(stderr, "%d: sem value is %d\n", __LINE__,
> sval);
> -	} else {
> -		fprintf(stderr, "%d: failed to get value\n",
> __LINE__);
> +	struct stat sb;
> +	char *pathname = "/var/lib/lxc/locks/" mycontainername;
> +	ret = stat(pathname, &sb);
> +	if (ret != 0) {
> +		fprintf(stderr, "%d: filename %s not created\n",
> __LINE__,
> +			pathname);
> +		exit(1);
>  	}
> +	lxc_putlock(lock);
>  
> -	ret = test_with_timeout(lock);
> -	if (ret) {
> -		fprintf(stderr, "%d: test failed\n", __LINE__);
> -		goto out;
> -	}
> -	r = sem_getvalue(lock, &sval);
> -	if (!r) {
> -		fprintf(stderr, "%d: sem value is %d\n", __LINE__,
> sval);
> -	} else {
> -		fprintf(stderr, "%d: failed to get value\n",
> __LINE__);
> -	}
> +	test_two_locks();
>  
> -    fprintf(stderr, "all tests passed\n");
> +	fprintf(stderr, "all tests passed\n");
>  
> -out:
>  	exit(ret);
>  }





More information about the lxc-devel mailing list