move everything out of trunk
[lttv.git] / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/kthread.h>
10 #include <linux/delay.h>
11 #include <linux/hardirq.h>
12 #include <linux/module.h>
13 #include <linux/percpu.h>
14 #include <linux/spinlock.h>
15 #include <asm/ptrace.h>
16 #include <linux/wbias-rwlock.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_NPREADERS 2
30 #define NR_TRYLOCK_READERS 1
31
32 /*
33 * 1 : test standard rwlock
34 * 0 : test wbiasrwlock
35 */
36 #define TEST_STD_RWLOCK 0
37
38 /*
39 * 1 : test with thread and interrupt readers.
40 * 0 : test only with thread readers.
41 */
42 #define TEST_INTERRUPTS 1
43
44 #if (TEST_INTERRUPTS)
45 #define NR_INTERRUPT_READERS 1
46 #define NR_TRYLOCK_INTERRUPT_READERS 1
47 #else
48 #define NR_INTERRUPT_READERS 0
49 #define NR_TRYLOCK_INTERRUPT_READERS 0
50 #endif
51
52 /*
53 * 1 : test with thread preemption readers.
54 * 0 : test only with non-preemptable thread readers.
55 */
56 #define TEST_PREEMPT 1
57
58 #if (TEST_PREEMPT)
59 #define NR_PREADERS 2
60 #else
61 #define NR_PREADERS 0
62 #endif
63
64
65 /*
66 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
67 * starve readers.
68 */
69 #define WRITER_DELAY 100
70 #define TRYLOCK_WRITER_DELAY 1000
71
72 /*
73 * Number of iterations after which a trylock writer fails.
74 * -1 for infinite loop.
75 */
76 #define TRYLOCK_WRITERS_FAIL_ITER 100
77
78 /* Thread and interrupt reader delay, in ms */
79 #define THREAD_READER_DELAY 0 /* busy loop */
80 #define INTERRUPT_READER_DELAY 100
81
82 #ifdef CONFIG_PREEMPT
83 #define yield_in_non_preempt()
84 #else
85 #define yield_in_non_preempt() yield()
86 #endif
87
88 static int var[NR_VARS];
89 static struct task_struct *preader_threads[NR_PREADERS];
90 static struct task_struct *npreader_threads[NR_NPREADERS];
91 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
92 static struct task_struct *writer_threads[NR_WRITERS];
93 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
94 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
95 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
96
97 #if (TEST_STD_RWLOCK)
98
99 static DEFINE_RWLOCK(std_rw_lock);
100
101 #define wrap_read_lock() read_lock(&std_rw_lock)
102 #define wrap_read_trylock() read_trylock(&std_rw_lock)
103 #define wrap_read_unlock() read_unlock(&std_rw_lock)
104
105 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
106 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
107
108 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
109 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
110
111 #if (TEST_INTERRUPTS)
112 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
113 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
114 #else
115 #define wrap_write_lock() write_lock(&std_rw_lock)
116 #define wrap_write_unlock() write_unlock(&std_rw_lock)
117 #endif
118
119 #define wrap_write_trylock() write_trylock(&std_rw_lock)
120
121 #else
122
123 #if (TEST_INTERRUPTS)
124 #if (TEST_PREEMPT)
125 #define WBIASRWLOCKWCTX WB_PRIO_P
126 #define WBIASRWLOCKRCTX (WB_RIRQ | WB_RNPTHREAD | WB_RPTHREAD)
127 #else
128 #define WBIASRWLOCKWCTX WB_PRIO_NP
129 #define WBIASRWLOCKRCTX (WB_RIRQ | WB_RNPTHREAD)
130 #endif
131 #else
132 #if (TEST_PREEMPT)
133 #define WBIASRWLOCKWCTX WB_PRIO_P
134 #define WBIASRWLOCKRCTX (WB_RNPTHREAD | WB_RPTHREAD)
135 #else
136 #define WBIASRWLOCKWCTX WB_PRIO_NP
137 #define WBIASRWLOCKRCTX (WB_RNPTHREAD)
138 #endif
139 #endif
140
141 static DEFINE_WBIAS_RWLOCK(wbiasrwlock, WBIASRWLOCKWCTX, WBIASRWLOCKRCTX);
142 CHECK_WBIAS_RWLOCK_MAP(wbiasrwlock, WBIASRWLOCKWCTX, WBIASRWLOCKRCTX);
143
144
145 #if (TEST_PREEMPT)
146 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
147 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
148 #else
149 #define wrap_read_lock() wbias_read_lock_inatomic(&wbiasrwlock)
150 #define wrap_read_trylock() wbias_read_trylock_inatomic(&wbiasrwlock)
151 #endif
152 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
153
154 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
155 #define wrap_read_trylock_inatomic() \
156 wbias_read_trylock_inatomic(&wbiasrwlock)
157
158 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
159 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
160
161 #define wrap_write_lock() \
162 wbias_write_lock(&wbiasrwlock, WBIASRWLOCKWCTX, WBIASRWLOCKRCTX)
163 #define wrap_write_unlock() \
164 wbias_write_unlock(&wbiasrwlock, WBIASRWLOCKWCTX, WBIASRWLOCKRCTX)
165 #define wrap_write_trylock() \
166 wbias_write_trylock(&wbiasrwlock, WBIASRWLOCKWCTX, WBIASRWLOCKRCTX)
167
168 #endif
169
170 static cycles_t cycles_calibration_min,
171 cycles_calibration_avg,
172 cycles_calibration_max;
173
174 static inline cycles_t calibrate_cycles(cycles_t cycles)
175 {
176 return cycles - cycles_calibration_avg;
177 }
178
179 struct proc_dir_entry *pentry = NULL;
180
181 static int p_or_np_reader_thread(const char *typename,
182 void *data, int preemptable)
183 {
184 int i;
185 int prev, cur;
186 unsigned long iter = 0;
187 cycles_t time1, time2, delay;
188 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
189 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
190
191 printk("%s/%lu runnning\n", typename, (unsigned long)data);
192 do {
193 iter++;
194 if (!preemptable)
195 preempt_disable();
196 rdtsc_barrier();
197 time1 = get_cycles();
198 rdtsc_barrier();
199
200 if (!preemptable)
201 wrap_read_lock_inatomic();
202 else
203 wrap_read_lock();
204
205 rdtsc_barrier();
206 time2 = get_cycles();
207 rdtsc_barrier();
208 delay = time2 - time1;
209 ldelaymax = max(ldelaymax, delay);
210 ldelaymin = min(ldelaymin, delay);
211 ldelayavg += delay;
212 prev = var[0];
213 for (i = 1; i < NR_VARS; i++) {
214 cur = var[i];
215 if (cur != prev) {
216 printk(KERN_ALERT
217 "Unequal cur %d/prev %d at i %d, iter %lu "
218 "in reader thread\n",
219 cur, prev, i, iter);
220 }
221 }
222
223 rdtsc_barrier();
224 time1 = get_cycles();
225 rdtsc_barrier();
226
227 wrap_read_unlock();
228
229 rdtsc_barrier();
230 time2 = get_cycles();
231 rdtsc_barrier();
232 delay = time2 - time1;
233 udelaymax = max(udelaymax, delay);
234 udelaymin = min(udelaymin, delay);
235 udelayavg += delay;
236
237 if (!preemptable)
238 preempt_enable();
239
240 if (THREAD_READER_DELAY)
241 msleep(THREAD_READER_DELAY);
242 yield_in_non_preempt();
243 } while (!kthread_should_stop());
244 if (!iter) {
245 printk("%s/%lu iterations : %lu", typename,
246 (unsigned long)data, iter);
247 } else {
248 ldelayavg /= iter;
249 udelayavg /= iter;
250 printk("%s/%lu iterations : %lu, "
251 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
252 typename,
253 (unsigned long)data, iter,
254 calibrate_cycles(ldelaymin),
255 calibrate_cycles(ldelayavg),
256 calibrate_cycles(ldelaymax));
257 printk("%s/%lu iterations : %lu, "
258 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
259 typename,
260 (unsigned long)data, iter,
261 calibrate_cycles(udelaymin),
262 calibrate_cycles(udelayavg),
263 calibrate_cycles(udelaymax));
264 }
265 return 0;
266 }
267
268 static int preader_thread(void *data)
269 {
270 return p_or_np_reader_thread("preader_thread", data, 1);
271 }
272
273 static int npreader_thread(void *data)
274 {
275 return p_or_np_reader_thread("npreader_thread", data, 0);
276 }
277
278 static int trylock_reader_thread(void *data)
279 {
280 int i;
281 int prev, cur;
282 unsigned long iter = 0, success_iter = 0;
283
284 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
285 do {
286 #if (!TEST_PREEMPT)
287 preempt_disable();
288 #endif
289 while (!wrap_read_trylock()) {
290 cpu_relax();
291 iter++;
292 }
293 success_iter++;
294 prev = var[0];
295 for (i = 1; i < NR_VARS; i++) {
296 cur = var[i];
297 if (cur != prev) {
298 printk(KERN_ALERT
299 "Unequal cur %d/prev %d at i %d, iter %lu "
300 "in trylock reader thread\n",
301 cur, prev, i, iter);
302 }
303 }
304 wrap_read_unlock();
305 #if (!TEST_PREEMPT)
306 preempt_enable();
307 #endif
308 if (THREAD_READER_DELAY)
309 msleep(THREAD_READER_DELAY);
310 yield_in_non_preempt();
311 } while (!kthread_should_stop());
312 printk("trylock_reader_thread/%lu iterations : %lu, "
313 "successful iterations : %lu\n",
314 (unsigned long)data, iter + success_iter, success_iter);
315 return 0;
316 }
317
318 DEFINE_PER_CPU(cycles_t, int_ldelaymin);
319 DEFINE_PER_CPU(cycles_t, int_ldelayavg);
320 DEFINE_PER_CPU(cycles_t, int_ldelaymax);
321 DEFINE_PER_CPU(cycles_t, int_udelaymin);
322 DEFINE_PER_CPU(cycles_t, int_udelayavg);
323 DEFINE_PER_CPU(cycles_t, int_udelaymax);
324 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
325
326 static void interrupt_reader_ipi(void *data)
327 {
328 int i;
329 int prev, cur;
330 cycles_t time1, time2;
331 cycles_t *ldelaymax, *ldelaymin, *ldelayavg, *ipi_nr, delay;
332 cycles_t *udelaymax, *udelaymin, *udelayavg;
333
334 /*
335 * Skip the ipi caller, not in irq context.
336 */
337 if (!in_irq())
338 return;
339
340 ldelaymax = &per_cpu(int_ldelaymax, smp_processor_id());
341 ldelaymin = &per_cpu(int_ldelaymin, smp_processor_id());
342 ldelayavg = &per_cpu(int_ldelayavg, smp_processor_id());
343 udelaymax = &per_cpu(int_udelaymax, smp_processor_id());
344 udelaymin = &per_cpu(int_udelaymin, smp_processor_id());
345 udelayavg = &per_cpu(int_udelayavg, smp_processor_id());
346 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
347
348 rdtsc_barrier();
349 time1 = get_cycles();
350 rdtsc_barrier();
351
352 wrap_read_lock_irq();
353
354 rdtsc_barrier();
355 time2 = get_cycles();
356 rdtsc_barrier();
357 delay = time2 - time1;
358 *ldelaymax = max(*ldelaymax, delay);
359 *ldelaymin = min(*ldelaymin, delay);
360 *ldelayavg += delay;
361 (*ipi_nr)++;
362 prev = var[0];
363 for (i = 1; i < NR_VARS; i++) {
364 cur = var[i];
365 if (cur != prev)
366 printk(KERN_ALERT
367 "Unequal cur %d/prev %d at i %d in interrupt\n",
368 cur, prev, i);
369 }
370 rdtsc_barrier();
371 time1 = get_cycles();
372 rdtsc_barrier();
373 wrap_read_unlock();
374 time2 = get_cycles();
375 rdtsc_barrier();
376 delay = time2 - time1;
377 *udelaymax = max(*udelaymax, delay);
378 *udelaymin = min(*udelaymin, delay);
379 *udelayavg += delay;
380 }
381
382 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
383 DEFINE_PER_CPU(unsigned long, trylock_int_success);
384
385 static void trylock_interrupt_reader_ipi(void *data)
386 {
387 int i;
388 int prev, cur;
389
390 /*
391 * Skip the ipi caller, not in irq context.
392 */
393 if (!in_irq())
394 return;
395
396 per_cpu(trylock_int_iter, smp_processor_id())++;
397 while (!wrap_read_trylock_irq())
398 per_cpu(trylock_int_iter, smp_processor_id())++;
399 per_cpu(trylock_int_success, smp_processor_id())++;
400 prev = var[0];
401 for (i = 1; i < NR_VARS; i++) {
402 cur = var[i];
403 if (cur != prev)
404 printk(KERN_ALERT
405 "Unequal cur %d/prev %d at i %d in interrupt\n",
406 cur, prev, i);
407 }
408 wrap_read_unlock();
409 }
410
411
412 static int interrupt_reader_thread(void *data)
413 {
414 unsigned long iter = 0;
415 int i;
416
417 for_each_online_cpu(i) {
418 per_cpu(int_ldelaymax, i) = 0;
419 per_cpu(int_ldelaymin, i) = ULLONG_MAX;
420 per_cpu(int_ldelayavg, i) = 0;
421 per_cpu(int_udelaymax, i) = 0;
422 per_cpu(int_udelaymin, i) = ULLONG_MAX;
423 per_cpu(int_udelayavg, i) = 0;
424 per_cpu(int_ipi_nr, i) = 0;
425 }
426 do {
427 iter++;
428 on_each_cpu(interrupt_reader_ipi, NULL, 0);
429 if (INTERRUPT_READER_DELAY)
430 msleep(INTERRUPT_READER_DELAY);
431 yield_in_non_preempt();
432 } while (!kthread_should_stop());
433 printk("interrupt_reader_thread/%lu iterations : %lu\n",
434 (unsigned long)data, iter);
435 for_each_online_cpu(i) {
436 if (!per_cpu(int_ipi_nr, i))
437 continue;
438 per_cpu(int_ldelayavg, i) /= per_cpu(int_ipi_nr, i);
439 per_cpu(int_udelayavg, i) /= per_cpu(int_ipi_nr, i);
440 printk("interrupt readers on CPU %i, "
441 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
442 i,
443 calibrate_cycles(per_cpu(int_ldelaymin, i)),
444 calibrate_cycles(per_cpu(int_ldelayavg, i)),
445 calibrate_cycles(per_cpu(int_ldelaymax, i)));
446 printk("interrupt readers on CPU %i, "
447 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
448 i,
449 calibrate_cycles(per_cpu(int_udelaymin, i)),
450 calibrate_cycles(per_cpu(int_udelayavg, i)),
451 calibrate_cycles(per_cpu(int_udelaymax, i)));
452 }
453 return 0;
454 }
455
456 static int trylock_interrupt_reader_thread(void *data)
457 {
458 unsigned long iter = 0;
459 int i;
460
461 do {
462 iter++;
463 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
464 if (INTERRUPT_READER_DELAY)
465 msleep(INTERRUPT_READER_DELAY);
466 yield_in_non_preempt();
467 } while (!kthread_should_stop());
468 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
469 (unsigned long)data, iter);
470 for_each_online_cpu(i) {
471 printk("trylock interrupt readers on CPU %i, "
472 "iterations %lu, "
473 "successful iterations : %lu\n",
474 i, per_cpu(trylock_int_iter, i),
475 per_cpu(trylock_int_success, i));
476 per_cpu(trylock_int_iter, i) = 0;
477 per_cpu(trylock_int_success, i) = 0;
478 }
479 return 0;
480 }
481
482 static int writer_thread(void *data)
483 {
484 int i;
485 int new, prev, cur;
486 unsigned long iter = 0;
487 cycles_t time1, time2, delay;
488 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
489 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
490
491 printk("writer_thread/%lu runnning\n", (unsigned long)data);
492 do {
493 iter++;
494 #if (!TEST_PREEMPT)
495 preempt_disable();
496 #endif
497 rdtsc_barrier();
498 time1 = get_cycles();
499 rdtsc_barrier();
500
501 wrap_write_lock();
502
503 rdtsc_barrier();
504 time2 = get_cycles();
505 rdtsc_barrier();
506 delay = time2 - time1;
507 ldelaymax = max(ldelaymax, delay);
508 ldelaymin = min(ldelaymin, delay);
509 ldelayavg += delay;
510 /*
511 * Read the previous values, check that they are coherent.
512 */
513 prev = var[0];
514 for (i = 1; i < NR_VARS; i++) {
515 cur = var[i];
516 if (cur != prev)
517 printk(KERN_ALERT
518 "Unequal cur %d/prev %d at i %d, iter %lu "
519 "in writer thread\n",
520 cur, prev, i, iter);
521 }
522 new = (int)get_cycles();
523 for (i = 0; i < NR_VARS; i++) {
524 var[i] = new;
525 }
526
527 rdtsc_barrier();
528 time1 = get_cycles();
529 rdtsc_barrier();
530
531 wrap_write_unlock();
532
533 rdtsc_barrier();
534 time2 = get_cycles();
535 rdtsc_barrier();
536 delay = time2 - time1;
537 udelaymax = max(udelaymax, delay);
538 udelaymin = min(udelaymin, delay);
539 udelayavg += delay;
540
541 #if (!TEST_PREEMPT)
542 preempt_enable();
543 #endif
544 if (WRITER_DELAY > 0)
545 udelay(WRITER_DELAY);
546 cpu_relax(); /*
547 * make sure we don't busy-loop faster than
548 * the lock busy-loop, it would cause reader and
549 * writer starvation.
550 */
551 yield_in_non_preempt();
552 } while (!kthread_should_stop());
553 ldelayavg /= iter;
554 udelayavg /= iter;
555 printk("writer_thread/%lu iterations : %lu, "
556 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
557 (unsigned long)data, iter,
558 calibrate_cycles(ldelaymin),
559 calibrate_cycles(ldelayavg),
560 calibrate_cycles(ldelaymax));
561 printk("writer_thread/%lu iterations : %lu, "
562 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
563 (unsigned long)data, iter,
564 calibrate_cycles(udelaymin),
565 calibrate_cycles(udelayavg),
566 calibrate_cycles(udelaymax));
567 return 0;
568 }
569
570 static int trylock_writer_thread(void *data)
571 {
572 int i;
573 int new;
574 unsigned long iter = 0, success = 0, fail = 0;
575
576 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
577 do {
578 #if ((!TEST_PREEMPT) && (!TEST_STD_RWLOCK))
579 preempt_disable();
580 #endif
581
582 #if (TEST_STD_RWLOCK && TEST_INTERRUPTS)
583 /* std write trylock cannot disable interrupts. */
584 local_irq_disable();
585 #endif
586
587 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
588 for (;;) {
589 iter++;
590 if (wrap_write_trylock())
591 goto locked;
592 cpu_relax();
593 }
594 #else
595 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
596 iter++;
597 if (wrap_write_trylock())
598 goto locked;
599 cpu_relax();
600 }
601 #endif
602 fail++;
603
604 #if (TEST_STD_RWLOCK && TEST_INTERRUPTS)
605 local_irq_enable();
606 #endif
607
608 #if ((!TEST_PREEMPT) && (!TEST_STD_RWLOCK))
609 preempt_enable();
610 #endif
611 goto loop;
612 locked:
613 success++;
614 new = (int)get_cycles();
615 for (i = 0; i < NR_VARS; i++) {
616 var[i] = new;
617 }
618 wrap_write_unlock();
619 #if ((!TEST_PREEMPT) && (!TEST_STD_RWLOCK))
620 preempt_enable();
621 #endif
622 loop:
623 if (TRYLOCK_WRITER_DELAY > 0)
624 udelay(TRYLOCK_WRITER_DELAY);
625 cpu_relax(); /*
626 * make sure we don't busy-loop faster than
627 * the lock busy-loop, it would cause reader and
628 * writer starvation.
629 */
630 yield_in_non_preempt();
631 } while (!kthread_should_stop());
632 printk("trylock_writer_thread/%lu iterations : "
633 "[try,success,fail after %d try], "
634 "%lu,%lu,%lu\n",
635 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
636 iter, success, fail);
637 return 0;
638 }
639
640 static void wbias_rwlock_create(void)
641 {
642 unsigned long i;
643
644 for (i = 0; i < NR_PREADERS; i++) {
645 printk("starting preemptable reader thread %lu\n", i);
646 preader_threads[i] = kthread_run(preader_thread, (void *)i,
647 "wbiasrwlock_preader");
648 BUG_ON(!preader_threads[i]);
649 }
650
651 for (i = 0; i < NR_NPREADERS; i++) {
652 printk("starting non-preemptable reader thread %lu\n", i);
653 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
654 "wbiasrwlock_npreader");
655 BUG_ON(!npreader_threads[i]);
656 }
657
658 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
659 printk("starting trylock reader thread %lu\n", i);
660 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
661 (void *)i, "wbiasrwlock_trylock_reader");
662 BUG_ON(!trylock_reader_threads[i]);
663 }
664 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
665 printk("starting interrupt reader %lu\n", i);
666 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
667 (void *)i,
668 "wbiasrwlock_interrupt_reader");
669 }
670 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
671 printk("starting trylock interrupt reader %lu\n", i);
672 trylock_interrupt_reader[i] =
673 kthread_run(trylock_interrupt_reader_thread,
674 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
675 }
676 for (i = 0; i < NR_WRITERS; i++) {
677 printk("starting writer thread %lu\n", i);
678 writer_threads[i] = kthread_run(writer_thread, (void *)i,
679 "wbiasrwlock_writer");
680 BUG_ON(!writer_threads[i]);
681 }
682 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
683 printk("starting trylock writer thread %lu\n", i);
684 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
685 (void *)i, "wbiasrwlock_trylock_writer");
686 BUG_ON(!trylock_writer_threads[i]);
687 }
688 }
689
690 static void wbias_rwlock_stop(void)
691 {
692 unsigned long i;
693
694 for (i = 0; i < NR_WRITERS; i++)
695 kthread_stop(writer_threads[i]);
696 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
697 kthread_stop(trylock_writer_threads[i]);
698 for (i = 0; i < NR_NPREADERS; i++)
699 kthread_stop(npreader_threads[i]);
700 for (i = 0; i < NR_PREADERS; i++)
701 kthread_stop(preader_threads[i]);
702 for (i = 0; i < NR_TRYLOCK_READERS; i++)
703 kthread_stop(trylock_reader_threads[i]);
704 for (i = 0; i < NR_INTERRUPT_READERS; i++)
705 kthread_stop(interrupt_reader[i]);
706 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
707 kthread_stop(trylock_interrupt_reader[i]);
708 }
709
710
711 static void perform_test(const char *name, void (*callback)(void))
712 {
713 printk("%s\n", name);
714 callback();
715 }
716
717 static int my_open(struct inode *inode, struct file *file)
718 {
719 unsigned long i;
720 cycles_t time1, time2, delay;
721
722 printk("** get_cycles calibration **\n");
723 cycles_calibration_min = ULLONG_MAX;
724 cycles_calibration_avg = 0;
725 cycles_calibration_max = 0;
726
727 local_irq_disable();
728 for (i = 0; i < 10; i++) {
729 rdtsc_barrier();
730 time1 = get_cycles();
731 rdtsc_barrier();
732 rdtsc_barrier();
733 time2 = get_cycles();
734 rdtsc_barrier();
735 delay = time2 - time1;
736 cycles_calibration_min = min(cycles_calibration_min, delay);
737 cycles_calibration_avg += delay;
738 cycles_calibration_max = max(cycles_calibration_max, delay);
739 }
740 cycles_calibration_avg /= 10;
741 local_irq_enable();
742
743 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
744 "results calibrated on avg\n",
745 cycles_calibration_min,
746 cycles_calibration_avg,
747 cycles_calibration_max);
748 printk("\n");
749
750 #if (NR_WRITERS)
751 printk("** Single writer test, no contention **\n");
752 wbias_rwlock_profile_latency_reset();
753 writer_threads[0] = kthread_run(writer_thread, (void *)0,
754 "wbiasrwlock_writer");
755 BUG_ON(!writer_threads[0]);
756 ssleep(SINGLE_WRITER_TEST_DURATION);
757 kthread_stop(writer_threads[0]);
758 printk("\n");
759
760 wbias_rwlock_profile_latency_print();
761 #endif
762
763 #if (NR_TRYLOCK_WRITERS)
764 printk("** Single trylock writer test, no contention **\n");
765 wbias_rwlock_profile_latency_reset();
766 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
767 (void *)0,
768 "trylock_wbiasrwlock_writer");
769 BUG_ON(!trylock_writer_threads[0]);
770 ssleep(SINGLE_WRITER_TEST_DURATION);
771 kthread_stop(trylock_writer_threads[0]);
772 printk("\n");
773
774 wbias_rwlock_profile_latency_print();
775 #endif
776
777 #if (TEST_PREEMPT)
778 printk("** Single preemptable reader test, no contention **\n");
779 wbias_rwlock_profile_latency_reset();
780 preader_threads[0] = kthread_run(preader_thread, (void *)0,
781 "wbiasrwlock_preader");
782 BUG_ON(!preader_threads[0]);
783 ssleep(SINGLE_READER_TEST_DURATION);
784 kthread_stop(preader_threads[0]);
785 printk("\n");
786
787 wbias_rwlock_profile_latency_print();
788 #endif
789
790 printk("** Single non-preemptable reader test, no contention **\n");
791 wbias_rwlock_profile_latency_reset();
792 npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
793 "wbiasrwlock_npreader");
794 BUG_ON(!npreader_threads[0]);
795 ssleep(SINGLE_READER_TEST_DURATION);
796 kthread_stop(npreader_threads[0]);
797 printk("\n");
798
799 wbias_rwlock_profile_latency_print();
800
801 printk("** Multiple p/non-p readers test, no contention **\n");
802 wbias_rwlock_profile_latency_reset();
803 #if (TEST_PREEMPT)
804 for (i = 0; i < NR_PREADERS; i++) {
805 printk("starting preader thread %lu\n", i);
806 preader_threads[i] = kthread_run(preader_thread, (void *)i,
807 "wbiasrwlock_preader");
808 BUG_ON(!preader_threads[i]);
809 }
810 #endif
811 for (i = 0; i < NR_NPREADERS; i++) {
812 printk("starting npreader thread %lu\n", i);
813 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
814 "wbiasrwlock_npreader");
815 BUG_ON(!npreader_threads[i]);
816 }
817 ssleep(SINGLE_READER_TEST_DURATION);
818 for (i = 0; i < NR_NPREADERS; i++)
819 kthread_stop(npreader_threads[i]);
820 #if (TEST_PREEMPT)
821 for (i = 0; i < NR_PREADERS; i++)
822 kthread_stop(preader_threads[i]);
823 #endif
824 printk("\n");
825
826 wbias_rwlock_profile_latency_print();
827
828 printk("** High contention test **\n");
829 wbias_rwlock_profile_latency_reset();
830 perform_test("wbias-rwlock-create", wbias_rwlock_create);
831 ssleep(TEST_DURATION);
832 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
833 printk("\n");
834 wbias_rwlock_profile_latency_print();
835
836 return -EPERM;
837 }
838
839
840 static struct file_operations my_operations = {
841 .open = my_open,
842 };
843
844 int init_module(void)
845 {
846 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
847 if (pentry)
848 pentry->proc_fops = &my_operations;
849
850 printk("UC_READER_MASK : %08X\n", UC_READER_MASK);
851 printk("UC_HARDIRQ_R_MASK: %08X\n", UC_HARDIRQ_READER_MASK);
852 printk("UC_SOFTIRQ_R_MASK: %08X\n", UC_SOFTIRQ_READER_MASK);
853 printk("UC_NPTHREA_R_MASK: %08X\n", UC_NPTHREAD_READER_MASK);
854 printk("UC_PTHREAD_R_MASK: %08X\n", UC_PTHREAD_READER_MASK);
855 printk("UC_WRITER : %08X\n", UC_WRITER);
856 printk("UC_SLOW_WRITER : %08X\n", UC_SLOW_WRITER);
857 printk("UC_WQ_ACTIVE : %08X\n", UC_WQ_ACTIVE);
858 printk("WS_MASK : %08X\n", WS_MASK);
859 printk("WS_WQ_MUTEX : %08X\n", WS_WQ_MUTEX);
860 printk("WS_COUNT_MUTEX : %08X\n", WS_COUNT_MUTEX);
861 printk("WS_LOCK_MUTEX : %08X\n", WS_LOCK_MUTEX);
862 printk("CTX_RMASK : %016lX\n", CTX_RMASK);
863 printk("CTX_WMASK : %016lX\n", CTX_WMASK);
864
865 return 0;
866 }
867
868 void cleanup_module(void)
869 {
870 remove_proc_entry("testwbiasrwlock", NULL);
871 }
872
873 MODULE_LICENSE("GPL");
874 MODULE_AUTHOR("Mathieu Desnoyers");
875 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.067227 seconds and 4 git commands to generate.