From 8bc62ca44275eb3dc3f2b62f2ad4a63187473332 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 28 Mar 2009 22:40:33 -0400 Subject: [PATCH] RCU signal handler reader over reader Added RCU test for read over read signal handler. Data structures can now support multiple readers, but it fills my system's memory (16GB+). Signed-off-by: Mathieu Desnoyers --- formal-model/urcu/DEFINES | 17 +- formal-model/urcu/Makefile | 9 +- formal-model/urcu/urcu.spin | 460 ++++++++++++++++++++++++++++-------- 3 files changed, 384 insertions(+), 102 deletions(-) diff --git a/formal-model/urcu/DEFINES b/formal-model/urcu/DEFINES index 843d135..3ea116c 100644 --- a/formal-model/urcu/DEFINES +++ b/formal-model/urcu/DEFINES @@ -1,11 +1,20 @@ + +#define NR_READERS 1 +#define NR_WRITERS 1 + +#define NR_PROCS 2 + +#define read_free_race (read_generation[0] == last_free_gen) +#define read_free (free_done && data_access[0]) + +#define TEST_SIGNAL +#define TEST_SIGNAL_ON_READ + #define RCU_GP_CTR_BIT (1 << 7) #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) -#define read_free_race (read_generation == last_free_gen) -#define read_free (free_done && data_access) - #ifndef READER_NEST_LEVEL -#define READER_NEST_LEVEL 2 +#define READER_NEST_LEVEL 1 #endif #define REMOTE_BARRIERS diff --git a/formal-model/urcu/Makefile b/formal-model/urcu/Makefile index 4488219..dc36c25 100644 --- a/formal-model/urcu/Makefile +++ b/formal-model/urcu/Makefile @@ -23,7 +23,8 @@ SPINFILE=urcu.spin default: make urcu_free | tee urcu_free.log - make urcu_free_nested | tee urcu_free_nested.log + #nested useless with signal test. + #make urcu_free_nested | tee urcu_free_nested.log make urcu_free_no_rmb | tee urcu_free_no_rmb.log make urcu_free_no_wmb | tee urcu_free_no_wmb.log make urcu_free_no_mb | tee urcu_free_no_mb.log @@ -48,7 +49,7 @@ asserts: clean rm -f .input.spin.trail spin -a -X .input.spin gcc -w ${CFLAGS} -DSAFETY -o pan pan.c - ./pan -v -c1 -X -m10000 -w20 + ./pan -v -c1 -X -m10000000 -w20 cp .input.spin $@.spin.input -cp .input.spin.trail $@.spin.input.trail @@ -143,10 +144,10 @@ urcu_progress_writer_error_ltl: run_weak_fair: pan - ./pan -a -f -v -c1 -X -m10000 -w20 + ./pan -a -f -v -c1 -X -m10000000 -w20 run: pan - ./pan -a -v -c1 -X -m10000 -w20 + ./pan -a -v -c1 -X -m10000000 -w20 pan: pan.c gcc -w ${CFLAGS} -o pan pan.c diff --git a/formal-model/urcu/urcu.spin b/formal-model/urcu/urcu.spin index 324ee39..d1aff29 100644 --- a/formal-model/urcu/urcu.spin +++ b/formal-model/urcu/urcu.spin @@ -20,12 +20,19 @@ /* Promela validation variables. */ -#define NR_READERS 1 -#define NR_WRITERS 1 - -#define NR_PROCS 2 - +/* specific defines "included" here */ +/* DEFINES file "included" here */ + +/* All signal readers have same PID and uses same reader variable */ +#ifdef TEST_SIGNAL_ON_WRITE +#define get_pid() ((_pid < 1) -> 0 : 1) +#elif defined(TEST_SIGNAL_ON_READ) +#define get_pid() ((_pid < 2) -> 0 : 1) +#else #define get_pid() (_pid) +#endif + +#define get_readerid() (get_pid()) /* * Each process have its own data in cache. Caches are randomly updated. @@ -33,34 +40,56 @@ * both. */ -#define DECLARE_CACHED_VAR(type, x, v) \ - type mem_##x = v; \ - type cached_##x[NR_PROCS] = v; \ - bit cache_dirty_##x[NR_PROCS] = 0 +typedef per_proc_byte { + byte val[NR_PROCS]; +}; + +/* Bitfield has a maximum of 8 procs */ +typedef per_proc_bit { + byte bitfield; +}; + +#define DECLARE_CACHED_VAR(type, x) \ + type mem_##x; \ + per_proc_##type cached_##x; \ + per_proc_bit cache_dirty_##x; + +#define INIT_CACHED_VAR(x, v, j) \ + mem_##x = v; \ + cache_dirty_##x.bitfield = 0; \ + j = 0; \ + do \ + :: j < NR_PROCS -> \ + cached_##x.val[j] = v; \ + j++ \ + :: j >= NR_PROCS -> break \ + od; -#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id]) +#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id)) -#define READ_CACHED_VAR(x) (cached_##x[get_pid()]) +#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()]) -#define WRITE_CACHED_VAR(x, v) \ - atomic { \ - cached_##x[get_pid()] = v; \ - cache_dirty_##x[get_pid()] = 1; \ +#define WRITE_CACHED_VAR(x, v) \ + atomic { \ + cached_##x.val[get_pid()] = v; \ + cache_dirty_##x.bitfield = \ + cache_dirty_##x.bitfield | (1 << get_pid()); \ } -#define CACHE_WRITE_TO_MEM(x, id) \ - if \ - :: IS_CACHE_DIRTY(x, id) -> \ - mem_##x = cached_##x[id]; \ - cache_dirty_##x[id] = 0; \ - :: else -> \ - skip \ +#define CACHE_WRITE_TO_MEM(x, id) \ + if \ + :: IS_CACHE_DIRTY(x, id) -> \ + mem_##x = cached_##x.val[id]; \ + cache_dirty_##x.bitfield = \ + cache_dirty_##x.bitfield & (~(1 << id)); \ + :: else -> \ + skip \ fi; #define CACHE_READ_FROM_MEM(x, id) \ if \ :: !IS_CACHE_DIRTY(x, id) -> \ - cached_##x[id] = mem_##x;\ + cached_##x.val[id] = mem_##x;\ :: else -> \ skip \ fi; @@ -86,36 +115,48 @@ */ #ifdef REMOTE_BARRIERS -inline smp_rmb_pid(i) +inline smp_rmb_pid(i, j) { atomic { CACHE_READ_FROM_MEM(urcu_gp_ctr, i); - CACHE_READ_FROM_MEM(urcu_active_readers_one, i); + j = 0; + do + :: j < NR_READERS -> + CACHE_READ_FROM_MEM(urcu_active_readers[j], i); + j++ + :: j >= NR_READERS -> break + od; CACHE_READ_FROM_MEM(generation_ptr, i); } } -inline smp_wmb_pid(i) +inline smp_wmb_pid(i, j) { atomic { CACHE_WRITE_TO_MEM(urcu_gp_ctr, i); - CACHE_WRITE_TO_MEM(urcu_active_readers_one, i); + j = 0; + do + :: j < NR_READERS -> + CACHE_WRITE_TO_MEM(urcu_active_readers[j], i); + j++ + :: j >= NR_READERS -> break + od; CACHE_WRITE_TO_MEM(generation_ptr, i); } } -inline smp_mb_pid(i) +inline smp_mb_pid(i, j) { atomic { #ifndef NO_WMB - smp_wmb_pid(i); + smp_wmb_pid(i, j); #endif #ifndef NO_RMB - smp_rmb_pid(i); + smp_rmb_pid(i, j); #endif #ifdef NO_WMB #ifdef NO_RMB - ooo_mem(i); + ooo_mem(j); #endif #endif } @@ -127,51 +168,63 @@ inline smp_mb_pid(i) * We are not modeling the whole rendez-vous between readers and writers here, * we just let the writer update each reader's caches remotely. */ -inline smp_mb(i) +inline smp_mb(i, j) { if :: get_pid() >= NR_READERS -> - smp_mb_pid(get_pid()); + smp_mb_pid(get_pid(), j); i = 0; do :: i < NR_READERS -> - smp_mb_pid(i); + smp_mb_pid(i, j); i++; :: i >= NR_READERS -> break od; - smp_mb_pid(get_pid()); + smp_mb_pid(get_pid(), j); :: else -> skip; fi; } #else -inline smp_rmb(i) +inline smp_rmb(i, j) { atomic { CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid()); - CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid()); + i = 0; + do + :: i < NR_READERS -> + CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid()); + i++ + :: i >= NR_READERS -> break + od; CACHE_READ_FROM_MEM(generation_ptr, get_pid()); } } -inline smp_wmb(i) +inline smp_wmb(i, j) { atomic { CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid()); - CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid()); + i = 0; + do + :: i < NR_READERS -> + CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid()); + i++ + :: i >= NR_READERS -> break + od; CACHE_WRITE_TO_MEM(generation_ptr, get_pid()); } } -inline smp_mb(i) +inline smp_mb(i, j) { atomic { #ifndef NO_WMB - smp_wmb(i); + smp_wmb(i, j); #endif #ifndef NO_RMB - smp_rmb(i); + smp_rmb(i, j); #endif #ifdef NO_WMB #ifdef NO_RMB @@ -183,126 +236,227 @@ inline smp_mb(i) #endif -/* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */ -DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1); -/* Note ! currently only one reader */ -DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0); +/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */ +DECLARE_CACHED_VAR(byte, urcu_gp_ctr); +/* Note ! currently only two readers */ +DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]); /* pointer generation */ -DECLARE_CACHED_VAR(byte, generation_ptr, 0); +DECLARE_CACHED_VAR(byte, generation_ptr); byte last_free_gen = 0; bit free_done = 0; -byte read_generation = 1; -bit data_access = 0; +byte read_generation[NR_READERS]; +bit data_access[NR_READERS]; bit write_lock = 0; +bit init_done = 0; + +bit sighand_exec = 0; + +inline wait_init_done() +{ + do + :: init_done == 0 -> skip; + :: else -> break; + od; +} + +#ifdef TEST_SIGNAL + +inline wait_for_sighand_exec() +{ + sighand_exec = 0; + do + :: sighand_exec == 0 -> skip; + :: else -> + if + :: 1 -> break; + :: 1 -> sighand_exec = 0; + skip; + fi; + od; +} + +#else + +inline wait_for_sighand_exec() +{ + skip; +} + +#endif + +#ifdef TEST_SIGNAL_ON_WRITE +/* Block on signal handler execution */ +inline dispatch_sighand_write_exec() +{ + sighand_exec = 1; + do + :: sighand_exec == 1 -> + skip; + :: else -> + break; + od; +} + +#else + +inline dispatch_sighand_write_exec() +{ + skip; +} + +#endif + +#ifdef TEST_SIGNAL_ON_READ +/* Block on signal handler execution */ +inline dispatch_sighand_read_exec() +{ + sighand_exec = 1; + do + :: sighand_exec == 1 -> + skip; + :: else -> + break; + od; +} + +#else + +inline dispatch_sighand_read_exec() +{ + skip; +} + +#endif + + inline ooo_mem(i) { atomic { RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid()); - RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one, - get_pid()); + i = 0; + do + :: i < NR_READERS -> + RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i], + get_pid()); + i++ + :: i >= NR_READERS -> break + od; RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid()); RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid()); - RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one, - get_pid()); + i = 0; + do + :: i < NR_READERS -> + RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i], + get_pid()); + i++ + :: i >= NR_READERS -> break + od; RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid()); } } -#define get_readerid() (get_pid()) -#define get_writerid() (get_readerid() + NR_READERS) - -inline wait_for_reader(tmp, id, i) +inline wait_for_reader(tmp, tmp2, i, j) { do :: 1 -> - tmp = READ_CACHED_VAR(urcu_active_readers_one); + tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]); ooo_mem(i); + dispatch_sighand_write_exec(); if - :: (tmp & RCU_GP_CTR_NEST_MASK) - && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr)) + :: (tmp2 & RCU_GP_CTR_NEST_MASK) + && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr)) & RCU_GP_CTR_BIT) -> #ifndef GEN_ERROR_WRITER_PROGRESS - smp_mb(i); + smp_mb(i, j); #else ooo_mem(i); #endif + dispatch_sighand_write_exec(); :: else -> break; fi; od; } -inline wait_for_quiescent_state(tmp, i, j) +inline wait_for_quiescent_state(tmp, tmp2, i, j) { - i = 0; + tmp = 0; do - :: i < NR_READERS -> - wait_for_reader(tmp, i, j); + :: tmp < NR_READERS -> + wait_for_reader(tmp, tmp2, i, j); if - :: (NR_READERS > 1) && (i < NR_READERS - 1) - -> ooo_mem(j); + :: (NR_READERS > 1) && (tmp < NR_READERS - 1) + -> ooo_mem(i); + dispatch_sighand_write_exec(); :: else -> skip; fi; - i++ - :: i >= NR_READERS -> break + tmp++ + :: tmp >= NR_READERS -> break od; } /* Model the RCU read-side critical section. */ -inline urcu_one_read(i, nest_i, tmp, tmp2) +inline urcu_one_read(i, j, nest_i, tmp, tmp2) { nest_i = 0; do :: nest_i < READER_NEST_LEVEL -> ooo_mem(i); - tmp = READ_CACHED_VAR(urcu_active_readers_one); + dispatch_sighand_read_exec(); + tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); ooo_mem(i); + dispatch_sighand_read_exec(); if :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> tmp2 = READ_CACHED_VAR(urcu_gp_ctr); ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2); + dispatch_sighand_read_exec(); + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], + tmp2); :: else -> - WRITE_CACHED_VAR(urcu_active_readers_one, + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp + 1); fi; - smp_mb(i); + smp_mb(i, j); + dispatch_sighand_read_exec(); nest_i++; :: nest_i >= READER_NEST_LEVEL -> break; od; - ooo_mem(i); - read_generation = READ_CACHED_VAR(generation_ptr); - ooo_mem(i); - data_access = 1; - ooo_mem(i); - data_access = 0; + read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr); + data_access[get_readerid()] = 1; + data_access[get_readerid()] = 0; nest_i = 0; do :: nest_i < READER_NEST_LEVEL -> - smp_mb(i); - tmp2 = READ_CACHED_VAR(urcu_active_readers_one); + smp_mb(i, j); + dispatch_sighand_read_exec(); + tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1); + dispatch_sighand_read_exec(); + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); nest_i++; :: nest_i >= READER_NEST_LEVEL -> break; od; - ooo_mem(i); + //ooo_mem(i); + //dispatch_sighand_read_exec(); //smp_mc(i); /* added */ } -active [NR_READERS] proctype urcu_reader() +active proctype urcu_reader() { - byte i, nest_i; + byte i, j, nest_i; byte tmp, tmp2; + wait_init_done(); + assert(get_pid() < NR_PROCS); end_reader: @@ -316,20 +470,108 @@ end_reader: * progress when it's blocked by an always progressing reader. */ #ifdef READER_PROGRESS + /* Only test progress of one random reader. They are all the + * same. */ + if + :: get_readerid() == 0 -> progress_reader: + skip; + fi; #endif - urcu_one_read(i, nest_i, tmp, tmp2); + urcu_one_read(i, j, nest_i, tmp, tmp2); + od; +} + +#ifdef TEST_SIGNAL +/* signal handler reader */ + +inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2) +{ + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> + ooo_mem(i); + tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); + ooo_mem(i); + if + :: (!(tmp & RCU_GP_CTR_NEST_MASK)) + -> + tmp2 = READ_CACHED_VAR(urcu_gp_ctr); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], + tmp2); + :: else -> + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], + tmp + 1); + fi; + smp_mb(i, j); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; + od; + + read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr); + data_access[get_readerid()] = 1; + data_access[get_readerid()] = 0; + + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> + smp_mb(i, j); + tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; od; + //ooo_mem(i); + //smp_mc(i); /* added */ } +active proctype urcu_reader_sig() +{ + byte i, j, nest_i; + byte tmp, tmp2; + + wait_init_done(); + + assert(get_pid() < NR_PROCS); + +end_reader: + do + :: 1 -> + wait_for_sighand_exec(); + /* + * We do not test reader's progress here, because we are mainly + * interested in writer's progress. The reader never blocks + * anyway. We have to test for reader/writer's progress + * separately, otherwise we could think the writer is doing + * progress when it's blocked by an always progressing reader. + */ +#ifdef READER_PROGRESS + /* Only test progress of one random reader. They are all the + * same. */ + if + :: get_readerid() == 0 -> +progress_reader: + skip; + fi; +#endif + urcu_one_read_sig(i, j, nest_i, tmp, tmp2); + od; +} + +#endif + /* Model the RCU update process. */ -active [NR_WRITERS] proctype urcu_writer() +active proctype urcu_writer() { byte i, j; - byte tmp; + byte tmp, tmp2; byte old_gen; + wait_init_done(); + assert(get_pid() < NR_PROCS); do @@ -338,11 +580,13 @@ active [NR_WRITERS] proctype urcu_writer() progress_writer1: #endif ooo_mem(i); + dispatch_sighand_write_exec(); atomic { old_gen = READ_CACHED_VAR(generation_ptr); WRITE_CACHED_VAR(generation_ptr, old_gen + 1); } ooo_mem(i); + dispatch_sighand_write_exec(); do :: 1 -> @@ -356,24 +600,31 @@ progress_writer1: fi; } od; - smp_mb(i); + smp_mb(i, j); + dispatch_sighand_write_exec(); tmp = READ_CACHED_VAR(urcu_gp_ctr); ooo_mem(i); + dispatch_sighand_write_exec(); WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); ooo_mem(i); + dispatch_sighand_write_exec(); //smp_mc(i); - wait_for_quiescent_state(tmp, i, j); + wait_for_quiescent_state(tmp, tmp2, i, j); //smp_mc(i); #ifndef SINGLE_FLIP ooo_mem(i); + dispatch_sighand_write_exec(); tmp = READ_CACHED_VAR(urcu_gp_ctr); ooo_mem(i); + dispatch_sighand_write_exec(); WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); //smp_mc(i); ooo_mem(i); - wait_for_quiescent_state(tmp, i, j); + dispatch_sighand_write_exec(); + wait_for_quiescent_state(tmp, tmp2, i, j); #endif - smp_mb(i); + smp_mb(i, j); + dispatch_sighand_write_exec(); write_lock = 0; /* free-up step, e.g., kfree(). */ atomic { @@ -393,6 +644,27 @@ end_writer: #ifdef WRITER_PROGRESS progress_writer2: #endif - skip; + dispatch_sighand_write_exec(); od; } + +/* Leave after the readers and writers so the pid count is ok. */ +init { + byte i, j; + + atomic { + INIT_CACHED_VAR(urcu_gp_ctr, 1, j); + INIT_CACHED_VAR(generation_ptr, 0, j); + + i = 0; + do + :: i < NR_READERS -> + INIT_CACHED_VAR(urcu_active_readers[i], 0, j); + read_generation[i] = 1; + data_access[i] = 0; + i++; + :: i >= NR_READERS -> break + od; + init_done = 1; + } +} -- 2.34.1