+// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
#ifndef _URCU_STATIC_LFSTACK_H
#define _URCU_STATIC_LFSTACK_H
/*
- * urcu/static/lfstack.h
- *
* Userspace RCU library - Lock-Free Stack
*
- * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
* TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
* linking dynamically with the userspace rcu library.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdbool.h>
#include <pthread.h>
-#include <assert.h>
+#include <urcu/assert.h>
#include <urcu/uatomic.h>
#include <urcu-pointer.h>
s->head = NULL;
ret = pthread_mutex_init(&s->lock, NULL);
- assert(!ret);
+ urcu_posix_assert(!ret);
}
/*
void _cds_lfs_destroy(struct cds_lfs_stack *s)
{
int ret = pthread_mutex_destroy(&s->lock);
- assert(!ret);
+ urcu_posix_assert(!ret);
}
/*
static inline
bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
{
- return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head));
+ return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED));
}
/*
*
* Does not require any synchronization with other push nor pop.
*
+ * Operations before push are consistent when observed after associated pop.
+ *
* Lock-free stack push is not subject to ABA problem, so no need to
* take the RCU read-side lock. Even if "head" changes between two
* uatomic_cmpxchg() invocations here (being popped, and then pushed
* uatomic_cmpxchg() implicit memory barrier orders earlier
* stores to node before publication.
*/
- head = uatomic_cmpxchg(&s->head, old_head, new_head);
+ cmm_emit_legacy_smp_mb();
+ head = uatomic_cmpxchg_mo(&s->head, old_head, new_head,
+ CMM_SEQ_CST, CMM_SEQ_CST);
if (old_head == head)
break;
}
*
* Returns NULL if stack is empty.
*
+ * Operations after pop are consistent when observed before associated push.
+ *
* __cds_lfs_pop needs to be synchronized using one of the following
* techniques:
*
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
- head = _CMM_LOAD_SHARED(s->head);
+ head = uatomic_load(&s->head, CMM_CONSUME);
if (___cds_lfs_empty_head(head))
return NULL; /* Empty stack */
* memory barrier before uatomic_cmpxchg() in
* cds_lfs_push.
*/
- cmm_smp_read_barrier_depends();
- next = _CMM_LOAD_SHARED(head->node.next);
+ next = uatomic_load(&head->node.next, CMM_RELAXED);
next_head = caa_container_of(next,
struct cds_lfs_head, node);
- if (uatomic_cmpxchg(&s->head, head, next_head) == head)
+ if (uatomic_cmpxchg_mo(&s->head, head, next_head,
+ CMM_SEQ_CST, CMM_SEQ_CST) == head){
+ cmm_emit_legacy_smp_mb();
return &head->node;
+ }
/* busy-loop if head changed under us */
}
}
struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
{
struct __cds_lfs_stack *s = u_s._s;
+ struct cds_lfs_head *head;
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
- return uatomic_xchg(&s->head, NULL);
+ head = uatomic_xchg_mo(&s->head, NULL, CMM_SEQ_CST);
+ cmm_emit_legacy_smp_mb();
+ return head;
}
/*
int ret;
ret = pthread_mutex_lock(&s->lock);
- assert(!ret);
+ urcu_posix_assert(!ret);
}
/*
int ret;
ret = pthread_mutex_unlock(&s->lock);
- assert(!ret);
+ urcu_posix_assert(!ret);
}
/*
_cds_lfs_pop_blocking(struct cds_lfs_stack *s)
{
struct cds_lfs_node *retnode;
+ cds_lfs_stack_ptr_t stack;
_cds_lfs_pop_lock(s);
- retnode = ___cds_lfs_pop(s);
+ stack.s = s;
+ retnode = ___cds_lfs_pop(stack);
_cds_lfs_pop_unlock(s);
return retnode;
}
_cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
{
struct cds_lfs_head *rethead;
+ cds_lfs_stack_ptr_t stack;
_cds_lfs_pop_lock(s);
- rethead = ___cds_lfs_pop_all(s);
+ stack.s = s;
+ rethead = ___cds_lfs_pop_all(stack);
_cds_lfs_pop_unlock(s);
return rethead;
}