update test
[lttv.git] / trunk / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/kthread.h>
10 #include <linux/delay.h>
11 #include <linux/hardirq.h>
12 #include <linux/module.h>
13 #include <linux/percpu.h>
14 #include <linux/spinlock.h>
15 #include <asm/ptrace.h>
16 #include <linux/wbias-rwlock.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_NPREADERS 2
30 #define NR_TRYLOCK_READERS 1
31
32 /*
33 * 1 : test standard rwlock
34 * 0 : test wbiasrwlock
35 */
36 #define TEST_STD_RWLOCK 0
37
38 /*
39 * 1 : test with thread and interrupt readers.
40 * 0 : test only with thread readers.
41 */
42 #define TEST_INTERRUPTS 1
43
44 #if (TEST_INTERRUPTS)
45 #define NR_INTERRUPT_READERS 1
46 #define NR_TRYLOCK_INTERRUPT_READERS 1
47 #else
48 #define NR_INTERRUPT_READERS 0
49 #define NR_TRYLOCK_INTERRUPT_READERS 0
50 #endif
51
52 /*
53 * 1 : test with thread preemption readers.
54 * 0 : test only with non-preemptable thread readers.
55 */
56 #define TEST_PREEMPT 1
57
58 #if (TEST_PREEMPT)
59 #define NR_PREADERS 2
60 #else
61 #define NR_PREADERS 0
62 #endif
63
64
65 /*
66 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
67 * starve readers.
68 */
69 #define WRITER_DELAY 100
70 #define TRYLOCK_WRITER_DELAY 1000
71
72 /*
73 * Number of iterations after which a trylock writer fails.
74 * -1 for infinite loop.
75 */
76 #define TRYLOCK_WRITERS_FAIL_ITER 100
77
78 /* Thread and interrupt reader delay, in ms */
79 #define THREAD_READER_DELAY 0 /* busy loop */
80 #define INTERRUPT_READER_DELAY 100
81
82 static int var[NR_VARS];
83 static struct task_struct *preader_threads[NR_PREADERS];
84 static struct task_struct *npreader_threads[NR_NPREADERS];
85 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
86 static struct task_struct *writer_threads[NR_WRITERS];
87 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
88 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
89 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
90
91 #if (TEST_STD_RWLOCK)
92
93 static DEFINE_RWLOCK(std_rw_lock);
94
95 #define wrap_read_lock() read_lock(&std_rw_lock)
96 #define wrap_read_trylock() read_trylock(&std_rw_lock)
97 #define wrap_read_unlock() read_unlock(&std_rw_lock)
98
99 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
100 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
101 #define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
102
103 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
104 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
105 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
106
107 #if (TEST_INTERRUPTS)
108 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
109 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
110 #else
111 #define wrap_write_lock() write_lock(&std_rw_lock)
112 #define wrap_write_unlock() write_unlock(&std_rw_lock)
113 #endif
114
115 #else
116
117 #if (TEST_INTERRUPTS)
118 #if (TEST_PREEMPT)
119 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RIRQ | BW_RNPTHREAD | BW_RPTHREAD)
120 #else
121 #define WBIASRWLOCKMASK (BW_WNPTHREAD | BW_RIRQ | BW_RNPTHREAD)
122 #endif
123 #else
124 #if (TEST_PREEMPT)
125 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RNPTHREAD | BW_RPTHREAD)
126 #else
127 #define WBIASRWLOCKMASK (BW_WNPTHREAD | BW_RNPTHREAD)
128 #endif
129 #endif
130 static DEFINE_WBIAS_RWLOCK(wbiasrwlock, WBIASRWLOCKMASK);
131 CHECK_WBIAS_RWLOCK_MAP(wbiasrwlock, WBIASRWLOCKMASK);
132
133
134 #if (TEST_PREEMPT)
135 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
136 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
137 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
138 #else
139 #define wrap_read_lock() wbias_read_lock_inatomic(&wbiasrwlock)
140 #define wrap_read_trylock() wbias_read_trylock_inatomic(&wbiasrwlock)
141 #define wrap_read_unlock() wbias_read_unlock_inatomic(&wbiasrwlock)
142 #endif
143
144 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
145 #define wrap_read_trylock_inatomic() \
146 wbias_read_trylock_inatomic(&wbiasrwlock)
147 #define wrap_read_unlock_inatomic() \
148 wbias_read_unlock_inatomic(&wbiasrwlock)
149
150 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
151 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
152 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
153
154 #define wrap_write_lock() \
155 wbias_write_lock(&wbiasrwlock, WBIASRWLOCKMASK)
156 #define wrap_write_unlock() \
157 wbias_write_unlock(&wbiasrwlock, WBIASRWLOCKMASK)
158 #define wrap_write_trylock_else_subscribe() \
159 wbias_write_trylock_else_subscribe(&wbiasrwlock, WBIASRWLOCKMASK)
160 #define wrap_write_trylock_subscribed() \
161 wbias_write_trylock_subscribed(&wbiasrwlock, WBIASRWLOCKMASK)
162 #define wrap_write_unsubscribe() \
163 wbias_write_unsubscribe(&wbiasrwlock, WBIASRWLOCKMASK)
164
165 #endif
166
167 static cycles_t cycles_calibration_min,
168 cycles_calibration_avg,
169 cycles_calibration_max;
170
171 static inline cycles_t calibrate_cycles(cycles_t cycles)
172 {
173 return cycles - cycles_calibration_avg;
174 }
175
176 struct proc_dir_entry *pentry = NULL;
177
178 static int p_or_np_reader_thread(const char *typename,
179 void *data, int preemptable)
180 {
181 int i;
182 int prev, cur;
183 unsigned long iter = 0;
184 cycles_t time1, time2, delay;
185 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
186 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
187
188 printk("%s/%lu runnning\n", typename, (unsigned long)data);
189 do {
190 iter++;
191 if (!preemptable)
192 preempt_disable();
193 rdtsc_barrier();
194 time1 = get_cycles();
195 rdtsc_barrier();
196
197 if (!preemptable)
198 wrap_read_lock_inatomic();
199 else
200 wrap_read_lock();
201
202 rdtsc_barrier();
203 time2 = get_cycles();
204 rdtsc_barrier();
205 delay = time2 - time1;
206 ldelaymax = max(ldelaymax, delay);
207 ldelaymin = min(ldelaymin, delay);
208 ldelayavg += delay;
209 prev = var[0];
210 for (i = 1; i < NR_VARS; i++) {
211 cur = var[i];
212 if (cur != prev)
213 printk(KERN_ALERT
214 "Unequal cur %d/prev %d at i %d, iter %lu "
215 "in thread\n", cur, prev, i, iter);
216 }
217
218 rdtsc_barrier();
219 time1 = get_cycles();
220 rdtsc_barrier();
221
222 if (!preemptable)
223 wrap_read_unlock_inatomic();
224 else
225 wrap_read_unlock();
226 rdtsc_barrier();
227 time2 = get_cycles();
228 rdtsc_barrier();
229 delay = time2 - time1;
230 udelaymax = max(udelaymax, delay);
231 udelaymin = min(udelaymin, delay);
232 udelayavg += delay;
233
234 if (!preemptable)
235 preempt_enable();
236
237 if (THREAD_READER_DELAY)
238 msleep(THREAD_READER_DELAY);
239 } while (!kthread_should_stop());
240 if (!iter) {
241 printk("%s/%lu iterations : %lu", typename,
242 (unsigned long)data, iter);
243 } else {
244 ldelayavg /= iter;
245 udelayavg /= iter;
246 printk("%s/%lu iterations : %lu, "
247 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
248 typename,
249 (unsigned long)data, iter,
250 calibrate_cycles(ldelaymin),
251 calibrate_cycles(ldelayavg),
252 calibrate_cycles(ldelaymax));
253 printk("%s/%lu iterations : %lu, "
254 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
255 typename,
256 (unsigned long)data, iter,
257 calibrate_cycles(udelaymin),
258 calibrate_cycles(udelayavg),
259 calibrate_cycles(udelaymax));
260 }
261 return 0;
262 }
263
264 static int preader_thread(void *data)
265 {
266 return p_or_np_reader_thread("preader_thread", data, 1);
267 }
268
269 static int npreader_thread(void *data)
270 {
271 return p_or_np_reader_thread("npreader_thread", data, 0);
272 }
273
274 static int trylock_reader_thread(void *data)
275 {
276 int i;
277 int prev, cur;
278 unsigned long iter = 0, success_iter = 0;
279
280 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
281 do {
282 #if (!TEST_PREEMPT)
283 preempt_disable();
284 #endif
285 while (!wrap_read_trylock())
286 iter++;
287 success_iter++;
288 prev = var[0];
289 for (i = 1; i < NR_VARS; i++) {
290 cur = var[i];
291 if (cur != prev)
292 printk(KERN_ALERT
293 "Unequal cur %d/prev %d at i %d, iter %lu "
294 "in thread\n", cur, prev, i, iter);
295 }
296 wrap_read_unlock();
297 #if (!TEST_PREEMPT)
298 preempt_enable();
299 #endif
300 if (THREAD_READER_DELAY)
301 msleep(THREAD_READER_DELAY);
302 } while (!kthread_should_stop());
303 printk("trylock_reader_thread/%lu iterations : %lu, "
304 "successful iterations : %lu\n",
305 (unsigned long)data, iter, success_iter);
306 return 0;
307 }
308
309 DEFINE_PER_CPU(cycles_t, int_ldelaymin);
310 DEFINE_PER_CPU(cycles_t, int_ldelayavg);
311 DEFINE_PER_CPU(cycles_t, int_ldelaymax);
312 DEFINE_PER_CPU(cycles_t, int_udelaymin);
313 DEFINE_PER_CPU(cycles_t, int_udelayavg);
314 DEFINE_PER_CPU(cycles_t, int_udelaymax);
315 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
316
317 static void interrupt_reader_ipi(void *data)
318 {
319 int i;
320 int prev, cur;
321 cycles_t time1, time2;
322 cycles_t *ldelaymax, *ldelaymin, *ldelayavg, *ipi_nr, delay;
323 cycles_t *udelaymax, *udelaymin, *udelayavg;
324
325 /*
326 * Skip the ipi caller, not in irq context.
327 */
328 if (!in_irq())
329 return;
330
331 ldelaymax = &per_cpu(int_ldelaymax, smp_processor_id());
332 ldelaymin = &per_cpu(int_ldelaymin, smp_processor_id());
333 ldelayavg = &per_cpu(int_ldelayavg, smp_processor_id());
334 udelaymax = &per_cpu(int_udelaymax, smp_processor_id());
335 udelaymin = &per_cpu(int_udelaymin, smp_processor_id());
336 udelayavg = &per_cpu(int_udelayavg, smp_processor_id());
337 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
338
339 rdtsc_barrier();
340 time1 = get_cycles();
341 rdtsc_barrier();
342
343 wrap_read_lock_irq();
344
345 rdtsc_barrier();
346 time2 = get_cycles();
347 rdtsc_barrier();
348 delay = time2 - time1;
349 *ldelaymax = max(*ldelaymax, delay);
350 *ldelaymin = min(*ldelaymin, delay);
351 *ldelayavg += delay;
352 (*ipi_nr)++;
353 prev = var[0];
354 for (i = 1; i < NR_VARS; i++) {
355 cur = var[i];
356 if (cur != prev)
357 printk(KERN_ALERT
358 "Unequal cur %d/prev %d at i %d in interrupt\n",
359 cur, prev, i);
360 }
361 rdtsc_barrier();
362 time1 = get_cycles();
363 rdtsc_barrier();
364 wrap_read_unlock_irq();
365 time2 = get_cycles();
366 rdtsc_barrier();
367 delay = time2 - time1;
368 *udelaymax = max(*udelaymax, delay);
369 *udelaymin = min(*udelaymin, delay);
370 *udelayavg += delay;
371 }
372
373 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
374 DEFINE_PER_CPU(unsigned long, trylock_int_success);
375
376 static void trylock_interrupt_reader_ipi(void *data)
377 {
378 int i;
379 int prev, cur;
380
381 /*
382 * Skip the ipi caller, not in irq context.
383 */
384 if (!in_irq())
385 return;
386
387 per_cpu(trylock_int_iter, smp_processor_id())++;
388 while (!wrap_read_trylock_irq())
389 per_cpu(trylock_int_iter, smp_processor_id())++;
390 per_cpu(trylock_int_success, smp_processor_id())++;
391 prev = var[0];
392 for (i = 1; i < NR_VARS; i++) {
393 cur = var[i];
394 if (cur != prev)
395 printk(KERN_ALERT
396 "Unequal cur %d/prev %d at i %d in interrupt\n",
397 cur, prev, i);
398 }
399 wrap_read_unlock_irq();
400 }
401
402
403 static int interrupt_reader_thread(void *data)
404 {
405 unsigned long iter = 0;
406 int i;
407
408 for_each_online_cpu(i) {
409 per_cpu(int_ldelaymax, i) = 0;
410 per_cpu(int_ldelaymin, i) = ULLONG_MAX;
411 per_cpu(int_ldelayavg, i) = 0;
412 per_cpu(int_udelaymax, i) = 0;
413 per_cpu(int_udelaymin, i) = ULLONG_MAX;
414 per_cpu(int_udelayavg, i) = 0;
415 per_cpu(int_ipi_nr, i) = 0;
416 }
417 do {
418 iter++;
419 on_each_cpu(interrupt_reader_ipi, NULL, 0);
420 if (INTERRUPT_READER_DELAY)
421 msleep(INTERRUPT_READER_DELAY);
422 } while (!kthread_should_stop());
423 printk("interrupt_reader_thread/%lu iterations : %lu\n",
424 (unsigned long)data, iter);
425 for_each_online_cpu(i) {
426 if (!per_cpu(int_ipi_nr, i))
427 continue;
428 per_cpu(int_ldelayavg, i) /= per_cpu(int_ipi_nr, i);
429 per_cpu(int_udelayavg, i) /= per_cpu(int_ipi_nr, i);
430 printk("interrupt readers on CPU %i, "
431 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
432 i,
433 calibrate_cycles(per_cpu(int_ldelaymin, i)),
434 calibrate_cycles(per_cpu(int_ldelayavg, i)),
435 calibrate_cycles(per_cpu(int_ldelaymax, i)));
436 printk("interrupt readers on CPU %i, "
437 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
438 i,
439 calibrate_cycles(per_cpu(int_udelaymin, i)),
440 calibrate_cycles(per_cpu(int_udelayavg, i)),
441 calibrate_cycles(per_cpu(int_udelaymax, i)));
442 }
443 return 0;
444 }
445
446 static int trylock_interrupt_reader_thread(void *data)
447 {
448 unsigned long iter = 0;
449 int i;
450
451 do {
452 iter++;
453 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
454 if (INTERRUPT_READER_DELAY)
455 msleep(INTERRUPT_READER_DELAY);
456 } while (!kthread_should_stop());
457 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
458 (unsigned long)data, iter);
459 for_each_online_cpu(i) {
460 printk("trylock interrupt readers on CPU %i, "
461 "iterations %lu, "
462 "successful iterations : %lu\n",
463 i, per_cpu(trylock_int_iter, i),
464 per_cpu(trylock_int_success, i));
465 per_cpu(trylock_int_iter, i) = 0;
466 per_cpu(trylock_int_success, i) = 0;
467 }
468 return 0;
469 }
470
471 static int writer_thread(void *data)
472 {
473 int i;
474 int new;
475 unsigned long iter = 0;
476 cycles_t time1, time2, delay;
477 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
478 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
479
480 printk("writer_thread/%lu runnning\n", (unsigned long)data);
481 do {
482 iter++;
483 #if (!TEST_PREEMPT)
484 preempt_disable();
485 #endif
486 rdtsc_barrier();
487 time1 = get_cycles();
488 rdtsc_barrier();
489
490 wrap_write_lock();
491
492 rdtsc_barrier();
493 time2 = get_cycles();
494 rdtsc_barrier();
495 delay = time2 - time1;
496 ldelaymax = max(ldelaymax, delay);
497 ldelaymin = min(ldelaymin, delay);
498 ldelayavg += delay;
499 new = (int)get_cycles();
500 for (i = 0; i < NR_VARS; i++) {
501 var[i] = new;
502 }
503
504 rdtsc_barrier();
505 time1 = get_cycles();
506 rdtsc_barrier();
507
508 wrap_write_unlock();
509
510 rdtsc_barrier();
511 time2 = get_cycles();
512 rdtsc_barrier();
513 delay = time2 - time1;
514 udelaymax = max(udelaymax, delay);
515 udelaymin = min(udelaymin, delay);
516 udelayavg += delay;
517
518 #if (!TEST_PREEMPT)
519 preempt_enable();
520 #endif
521 if (WRITER_DELAY > 0)
522 udelay(WRITER_DELAY);
523 cpu_relax(); /*
524 * make sure we don't busy-loop faster than
525 * the lock busy-loop, it would cause reader and
526 * writer starvation.
527 */
528 } while (!kthread_should_stop());
529 ldelayavg /= iter;
530 udelayavg /= iter;
531 printk("writer_thread/%lu iterations : %lu, "
532 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
533 (unsigned long)data, iter,
534 calibrate_cycles(ldelaymin),
535 calibrate_cycles(ldelayavg),
536 calibrate_cycles(ldelaymax));
537 printk("writer_thread/%lu iterations : %lu, "
538 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
539 (unsigned long)data, iter,
540 calibrate_cycles(udelaymin),
541 calibrate_cycles(udelayavg),
542 calibrate_cycles(udelaymax));
543 return 0;
544 }
545
546 #if (TEST_STD_RWLOCK)
547 static int trylock_writer_thread(void *data)
548 {
549 int i;
550 int new;
551 unsigned long iter = 0, success = 0, fail = 0;
552
553 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
554 do {
555 #if (TEST_INTERRUPTS)
556 /* std write trylock cannot disable interrupts. */
557 local_irq_disable();
558 #endif
559
560 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
561 for (;;) {
562 iter++;
563 if (write_trylock(&std_rw_lock))
564 goto locked;
565 }
566 #else
567 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
568 iter++;
569 if (write_trylock(&std_rw_lock))
570 goto locked;
571 }
572 #endif
573 fail++;
574 #if (TEST_INTERRUPTS)
575 local_irq_enable();
576 #endif
577 goto loop;
578 locked:
579 success++;
580 new = (int)get_cycles();
581 for (i = 0; i < NR_VARS; i++) {
582 var[i] = new;
583 }
584 #if (TEST_INTERRUPTS)
585 write_unlock_irq(&std_rw_lock);
586 #else
587 write_unlock(&std_rw_lock);
588 #endif
589 loop:
590 if (TRYLOCK_WRITER_DELAY > 0)
591 udelay(TRYLOCK_WRITER_DELAY);
592 cpu_relax(); /*
593 * make sure we don't busy-loop faster than
594 * the lock busy-loop, it would cause reader and
595 * writer starvation.
596 */
597 } while (!kthread_should_stop());
598 printk("trylock_writer_thread/%lu iterations : "
599 "[try,success,fail after %d try], "
600 "%lu,%lu,%lu\n",
601 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
602 iter, success, fail);
603 return 0;
604 }
605
606 #else /* !TEST_STD_RWLOCK */
607
608 static int trylock_writer_thread(void *data)
609 {
610 int i;
611 int new;
612 unsigned long iter = 0, success = 0, fail = 0;
613
614 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
615 do {
616 iter++;
617 #if (!TEST_PREEMPT)
618 preempt_disable();
619 #endif
620 if (wrap_write_trylock_else_subscribe())
621 goto locked;
622
623 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
624 for (;;) {
625 iter++;
626 if (wrap_write_trylock_subscribed())
627 goto locked;
628 }
629 #else
630 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
631 iter++;
632 if (wrap_write_trylock_subscribed())
633 goto locked;
634 }
635 #endif
636 fail++;
637 wrap_write_unsubscribe();
638 goto loop;
639 locked:
640 success++;
641 new = (int)get_cycles();
642 for (i = 0; i < NR_VARS; i++) {
643 var[i] = new;
644 }
645 wrap_write_unlock();
646 loop:
647 #if (!TEST_PREEMPT)
648 preempt_enable();
649 #endif
650 if (TRYLOCK_WRITER_DELAY > 0)
651 udelay(TRYLOCK_WRITER_DELAY);
652 cpu_relax(); /*
653 * make sure we don't busy-loop faster than
654 * the lock busy-loop, it would cause reader and
655 * writer starvation.
656 */
657 } while (!kthread_should_stop());
658 printk("trylock_writer_thread/%lu iterations : "
659 "[try,success,fail after %d try], "
660 "%lu,%lu,%lu\n",
661 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
662 iter, success, fail);
663 return 0;
664 }
665
666 #endif /* TEST_STD_RWLOCK */
667
668 static void wbias_rwlock_create(void)
669 {
670 unsigned long i;
671
672 for (i = 0; i < NR_PREADERS; i++) {
673 printk("starting preemptable reader thread %lu\n", i);
674 preader_threads[i] = kthread_run(preader_thread, (void *)i,
675 "wbiasrwlock_preader");
676 BUG_ON(!preader_threads[i]);
677 }
678
679 for (i = 0; i < NR_NPREADERS; i++) {
680 printk("starting non-preemptable reader thread %lu\n", i);
681 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
682 "wbiasrwlock_npreader");
683 BUG_ON(!npreader_threads[i]);
684 }
685
686 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
687 printk("starting trylock reader thread %lu\n", i);
688 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
689 (void *)i, "wbiasrwlock_trylock_reader");
690 BUG_ON(!trylock_reader_threads[i]);
691 }
692 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
693 printk("starting interrupt reader %lu\n", i);
694 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
695 (void *)i,
696 "wbiasrwlock_interrupt_reader");
697 }
698 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
699 printk("starting trylock interrupt reader %lu\n", i);
700 trylock_interrupt_reader[i] =
701 kthread_run(trylock_interrupt_reader_thread,
702 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
703 }
704 for (i = 0; i < NR_WRITERS; i++) {
705 printk("starting writer thread %lu\n", i);
706 writer_threads[i] = kthread_run(writer_thread, (void *)i,
707 "wbiasrwlock_writer");
708 BUG_ON(!writer_threads[i]);
709 }
710 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
711 printk("starting trylock writer thread %lu\n", i);
712 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
713 (void *)i, "wbiasrwlock_trylock_writer");
714 BUG_ON(!trylock_writer_threads[i]);
715 }
716 }
717
718 static void wbias_rwlock_stop(void)
719 {
720 unsigned long i;
721
722 for (i = 0; i < NR_WRITERS; i++)
723 kthread_stop(writer_threads[i]);
724 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
725 kthread_stop(trylock_writer_threads[i]);
726 for (i = 0; i < NR_NPREADERS; i++)
727 kthread_stop(npreader_threads[i]);
728 for (i = 0; i < NR_PREADERS; i++)
729 kthread_stop(preader_threads[i]);
730 for (i = 0; i < NR_TRYLOCK_READERS; i++)
731 kthread_stop(trylock_reader_threads[i]);
732 for (i = 0; i < NR_INTERRUPT_READERS; i++)
733 kthread_stop(interrupt_reader[i]);
734 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
735 kthread_stop(trylock_interrupt_reader[i]);
736 }
737
738
739 static void perform_test(const char *name, void (*callback)(void))
740 {
741 printk("%s\n", name);
742 callback();
743 }
744
745 static int my_open(struct inode *inode, struct file *file)
746 {
747 unsigned long i;
748 cycles_t time1, time2, delay;
749
750 printk("** get_cycles calibration **\n");
751 cycles_calibration_min = ULLONG_MAX;
752 cycles_calibration_avg = 0;
753 cycles_calibration_max = 0;
754
755 local_irq_disable();
756 for (i = 0; i < 10; i++) {
757 rdtsc_barrier();
758 time1 = get_cycles();
759 rdtsc_barrier();
760 rdtsc_barrier();
761 time2 = get_cycles();
762 rdtsc_barrier();
763 delay = time2 - time1;
764 cycles_calibration_min = min(cycles_calibration_min, delay);
765 cycles_calibration_avg += delay;
766 cycles_calibration_max = max(cycles_calibration_max, delay);
767 }
768 cycles_calibration_avg /= 10;
769 local_irq_enable();
770
771 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
772 "results calibrated on avg\n",
773 cycles_calibration_min,
774 cycles_calibration_avg,
775 cycles_calibration_max);
776 printk("\n");
777
778 printk("** Single writer test, no contention **\n");
779 wbias_rwlock_profile_latency_reset();
780 writer_threads[0] = kthread_run(writer_thread, (void *)0,
781 "wbiasrwlock_writer");
782 BUG_ON(!writer_threads[0]);
783 ssleep(SINGLE_WRITER_TEST_DURATION);
784 kthread_stop(writer_threads[0]);
785 printk("\n");
786
787 wbias_rwlock_profile_latency_print();
788
789 printk("** Single trylock writer test, no contention **\n");
790 wbias_rwlock_profile_latency_reset();
791 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
792 (void *)0,
793 "trylock_wbiasrwlock_writer");
794 BUG_ON(!trylock_writer_threads[0]);
795 ssleep(SINGLE_WRITER_TEST_DURATION);
796 kthread_stop(trylock_writer_threads[0]);
797 printk("\n");
798
799 wbias_rwlock_profile_latency_print();
800
801 printk("** Single preemptable reader test, no contention **\n");
802 wbias_rwlock_profile_latency_reset();
803 preader_threads[0] = kthread_run(preader_thread, (void *)0,
804 "wbiasrwlock_preader");
805 BUG_ON(!preader_threads[0]);
806 ssleep(SINGLE_READER_TEST_DURATION);
807 kthread_stop(preader_threads[0]);
808 printk("\n");
809
810 wbias_rwlock_profile_latency_print();
811
812 #if (TEST_PREEMPT)
813 printk("** Single non-preemptable reader test, no contention **\n");
814 wbias_rwlock_profile_latency_reset();
815 npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
816 "wbiasrwlock_npreader");
817 BUG_ON(!npreader_threads[0]);
818 ssleep(SINGLE_READER_TEST_DURATION);
819 kthread_stop(npreader_threads[0]);
820 printk("\n");
821
822 wbias_rwlock_profile_latency_print();
823 #endif
824
825 printk("** Multiple p/non-p readers test, no contention **\n");
826 wbias_rwlock_profile_latency_reset();
827 #if (TEST_PREEMPT)
828 for (i = 0; i < NR_PREADERS; i++) {
829 printk("starting preader thread %lu\n", i);
830 preader_threads[i] = kthread_run(preader_thread, (void *)i,
831 "wbiasrwlock_preader");
832 BUG_ON(!preader_threads[i]);
833 }
834 #endif
835 for (i = 0; i < NR_NPREADERS; i++) {
836 printk("starting npreader thread %lu\n", i);
837 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
838 "wbiasrwlock_npreader");
839 BUG_ON(!npreader_threads[i]);
840 }
841 ssleep(SINGLE_READER_TEST_DURATION);
842 for (i = 0; i < NR_NPREADERS; i++)
843 kthread_stop(npreader_threads[i]);
844 #if (TEST_PREEMPT)
845 for (i = 0; i < NR_PREADERS; i++)
846 kthread_stop(preader_threads[i]);
847 #endif
848 printk("\n");
849
850 wbias_rwlock_profile_latency_print();
851
852 printk("** High contention test **\n");
853 wbias_rwlock_profile_latency_reset();
854 perform_test("wbias-rwlock-create", wbias_rwlock_create);
855 ssleep(TEST_DURATION);
856 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
857 printk("\n");
858 wbias_rwlock_profile_latency_print();
859
860 return -EPERM;
861 }
862
863
864 static struct file_operations my_operations = {
865 .open = my_open,
866 };
867
868 int init_module(void)
869 {
870 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
871 if (pentry)
872 pentry->proc_fops = &my_operations;
873
874 printk("PTHREAD_ROFFSET : %016lX\n", PTHREAD_ROFFSET);
875 printk("PTHREAD_RMASK : %016lX\n", PTHREAD_RMASK);
876 printk("NPTHREAD_ROFFSET : %016lX\n", NPTHREAD_ROFFSET);
877 printk("NPTHREAD_RMASK : %016lX\n", NPTHREAD_RMASK);
878 printk("SOFTIRQ_ROFFSET : %016lX\n", SOFTIRQ_ROFFSET);
879 printk("SOFTIRQ_RMASK : %016lX\n", SOFTIRQ_RMASK);
880 printk("HARDIRQ_ROFFSET : %016lX\n", HARDIRQ_ROFFSET);
881 printk("HARDIRQ_RMASK : %016lX\n", HARDIRQ_RMASK);
882 printk("PTHREAD_WOFFSET : %016lX\n", PTHREAD_WOFFSET);
883 printk("PTHREAD_WMASK : %016lX\n", PTHREAD_WMASK);
884 printk("NPTHREAD_WOFFSET : %016lX\n", NPTHREAD_WOFFSET);
885 printk("NPTHREAD_WMASK : %016lX\n", NPTHREAD_WMASK);
886 printk("WRITER_MUTEX : %016lX\n", WRITER_MUTEX);
887 printk("SOFTIRQ_WMASK : %016lX\n", SOFTIRQ_WMASK);
888 printk("HARDIRQ_WMASK : %016lX\n", HARDIRQ_WMASK);
889 printk("WQ_MUTEX : %016lX\n", WQ_MUTEX);
890 printk("WQ_ACTIVE : %016lX\n", WQ_ACTIVE);
891
892 return 0;
893 }
894
895 void cleanup_module(void)
896 {
897 remove_proc_entry("testwbiasrwlock", NULL);
898 }
899
900 MODULE_LICENSE("GPL");
901 MODULE_AUTHOR("Mathieu Desnoyers");
902 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.046986 seconds and 5 git commands to generate.