fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / include / urcu / static / wfstack.h
index 088e6e3acbbe4631a6c7c16136a687c3d21811db..c46e97d9f25197e0e701a55ef24b0975bd1b2aac 100644 (file)
@@ -1,29 +1,15 @@
+// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
 #ifndef _URCU_STATIC_WFSTACK_H
 #define _URCU_STATIC_WFSTACK_H
 
 /*
- * urcu/static/wfstack.h
- *
  * Userspace RCU library - Stack with with wait-free push, blocking traversal.
  *
  * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
  * linking dynamically with the userspace rcu library.
- *
- * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
 #include <pthread.h>
@@ -124,7 +110,7 @@ static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
 {
        struct __cds_wfs_stack *s = u_stack._s;
 
-       return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
+       return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
 }
 
 /*
@@ -133,6 +119,8 @@ static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack)
  * Issues a full memory barrier before push. No mutual exclusion is
  * required.
  *
+ * Operations before push are consistent when observed after associated pop.
+ *
  * Returns 0 if the stack was empty prior to adding the node.
  * Returns non-zero otherwise.
  */
@@ -148,12 +136,13 @@ int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node)
         * uatomic_xchg() implicit memory barrier orders earlier stores
         * to node (setting it to NULL) before publication.
         */
-       old_head = uatomic_xchg(&s->head, new_head);
+       cmm_emit_legacy_smp_mb();
+       old_head = uatomic_xchg_mo(&s->head, new_head, CMM_SEQ_CST);
        /*
         * At this point, dequeuers see a NULL node->next, they should
         * busy-wait until node->next is set to old_head.
         */
-       CMM_STORE_SHARED(node->next, &old_head->node);
+       uatomic_store(&node->next, &old_head->node, CMM_RELEASE);
        return !___cds_wfs_end(old_head);
 }
 
@@ -169,7 +158,7 @@ ___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking)
        /*
         * Adaptative busy-looping waiting for push to complete.
         */
-       while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
+       while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
                if (!blocking)
                        return CDS_WFS_WOULDBLOCK;
                if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
@@ -194,7 +183,7 @@ ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
        if (state)
                *state = 0;
        for (;;) {
-               head = CMM_LOAD_SHARED(s->head);
+               head = uatomic_load(&s->head, CMM_CONSUME);
                if (___cds_wfs_end(head)) {
                        return NULL;
                }
@@ -203,9 +192,11 @@ ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
                        return CDS_WFS_WOULDBLOCK;
                }
                new_head = caa_container_of(next, struct cds_wfs_head, node);
-               if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
+               if (uatomic_cmpxchg_mo(&s->head, head, new_head,
+                                       CMM_SEQ_CST, CMM_SEQ_CST) == head) {
                        if (state && ___cds_wfs_end(new_head))
                                *state |= CDS_WFS_STATE_LAST;
+                       cmm_emit_legacy_smp_mb();
                        return &head->node;
                }
                if (!blocking) {
@@ -220,6 +211,8 @@ ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking)
  *
  * Returns NULL if stack is empty.
  *
+ * Operations after pop push are consistent when observed before associated push.
+ *
  * __cds_wfs_pop_blocking needs to be synchronized using one of the
  * following techniques:
  *
@@ -278,6 +271,8 @@ ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack)
 /*
  * __cds_wfs_pop_all: pop all nodes from a stack.
  *
+ * Operations after pop push are consistent when observed before associated push.
+ *
  * __cds_wfs_pop_all does not require any synchronization with other
  * push, nor with other __cds_wfs_pop_all, but requires synchronization
  * matching the technique used to synchronize __cds_wfs_pop_blocking:
@@ -309,7 +304,8 @@ ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack)
         * taking care to order writes to each node prior to the full
         * memory barrier after this uatomic_xchg().
         */
-       head = uatomic_xchg(&s->head, CDS_WFS_END);
+       head = uatomic_xchg_mo(&s->head, CDS_WFS_END, CMM_SEQ_CST);
+       cmm_emit_legacy_smp_mb();
        if (___cds_wfs_end(head))
                return NULL;
        return head;
This page took 0.027271 seconds and 4 git commands to generate.