update test
[lttv.git] / trunk / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/wbias-rwlock.h>
10 #include <linux/kthread.h>
11 #include <linux/delay.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/spinlock.h>
16 #include <asm/ptrace.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_PREADERS 2
30 #define NR_NPREADERS 2
31 #define NR_TRYLOCK_READERS 1
32
33 /*
34 * 1 : test standard rwlock
35 * 0 : test wbiasrwlock
36 */
37 #define TEST_STD_RWLOCK 0
38
39 /*
40 * 1 : test with thread and interrupt readers.
41 * 0 : test only with thread readers.
42 */
43 #define TEST_INTERRUPTS 1
44
45 #if (TEST_INTERRUPTS)
46 #define NR_INTERRUPT_READERS 1
47 #define NR_TRYLOCK_INTERRUPT_READERS 1
48 #else
49 #define NR_INTERRUPT_READERS 0
50 #define NR_TRYLOCK_INTERRUPT_READERS 0
51 #endif
52
53 /*
54 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
55 * starve readers.
56 */
57 #define WRITER_DELAY 100
58 #define TRYLOCK_WRITER_DELAY 1000
59
60 /*
61 * Number of iterations after which a trylock writer fails.
62 * -1 for infinite loop.
63 */
64 #define TRYLOCK_WRITERS_FAIL_ITER 100
65
66 /* Thread and interrupt reader delay, in ms */
67 #define THREAD_READER_DELAY 0 /* busy loop */
68 #define INTERRUPT_READER_DELAY 100
69
70 static int var[NR_VARS];
71 static struct task_struct *preader_threads[NR_PREADERS];
72 static struct task_struct *npreader_threads[NR_NPREADERS];
73 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
74 static struct task_struct *writer_threads[NR_WRITERS];
75 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
76 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
77 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
78
79 #if (TEST_STD_RWLOCK)
80
81 static DEFINE_RWLOCK(std_rw_lock);
82
83 #define wrap_read_lock() read_lock(&std_rw_lock)
84 #define wrap_read_trylock() read_trylock(&std_rw_lock)
85 #define wrap_read_unlock() read_unlock(&std_rw_lock)
86
87 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
88 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
89 #define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
90
91 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
92 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
93 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
94
95 #if (TEST_INTERRUPTS)
96 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
97 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
98 #else
99 #define wrap_write_lock() write_lock(&std_rw_lock)
100 #define wrap_write_unlock() write_unlock(&std_rw_lock)
101 #endif
102
103 #else
104
105 static DEFINE_WBIAS_RWLOCK(wbiasrwlock);
106
107 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
108 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
109 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
110
111 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
112 #define wrap_read_trylock_inatomic() \
113 wbias_read_trylock_inatomic(&wbiasrwlock)
114 #define wrap_read_unlock_inatomic() \
115 wbias_read_unlock_inatomic(&wbiasrwlock)
116
117 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
118 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
119 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
120
121 #if (TEST_INTERRUPTS)
122 #define wrap_write_lock() wbias_write_lock_irq(&wbiasrwlock)
123 #define wrap_write_unlock() wbias_write_unlock_irq(&wbiasrwlock)
124 #else
125 #define wrap_write_lock() wbias_write_lock(&wbiasrwlock)
126 #define wrap_write_unlock() wbias_write_unlock(&wbiasrwlock)
127 #endif
128
129 #endif
130
131 static cycles_t cycles_calibration_min,
132 cycles_calibration_avg,
133 cycles_calibration_max;
134
135 static inline cycles_t calibrate_cycles(cycles_t cycles)
136 {
137 return cycles - cycles_calibration_avg;
138 }
139
140 struct proc_dir_entry *pentry = NULL;
141
142 static int p_or_np_reader_thread(void *data, int preemptable)
143 {
144 int i;
145 int prev, cur;
146 unsigned long iter = 0;
147 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
148 delayavg = 0;
149
150 printk("reader_thread/%lu runnning\n", (unsigned long)data);
151 do {
152 iter++;
153 if (!preemptable)
154 preempt_disable();
155 rdtsc_barrier();
156 time1 = get_cycles();
157 rdtsc_barrier();
158
159 if (!preemptable)
160 wrap_read_lock_inatomic();
161 else
162 wrap_read_lock();
163
164 rdtsc_barrier();
165 time2 = get_cycles();
166 rdtsc_barrier();
167 delay = time2 - time1;
168 delaymax = max(delaymax, delay);
169 delaymin = min(delaymin, delay);
170 delayavg += delay;
171 prev = var[0];
172 for (i = 1; i < NR_VARS; i++) {
173 cur = var[i];
174 if (cur != prev)
175 printk(KERN_ALERT
176 "Unequal cur %d/prev %d at i %d, iter %lu "
177 "in thread\n", cur, prev, i, iter);
178 }
179
180 if (!preemptable)
181 wrap_read_unlock_inatomic();
182 else
183 wrap_read_unlock();
184 if (!preemptable)
185 preempt_enable();
186 if (THREAD_READER_DELAY)
187 msleep(THREAD_READER_DELAY);
188 } while (!kthread_should_stop());
189 if (!iter) {
190 printk("reader_thread/%lu iterations : %lu",
191 (unsigned long)data, iter);
192 } else {
193 delayavg /= iter;
194 printk("reader_thread/%lu iterations : %lu, "
195 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
196 (unsigned long)data, iter,
197 calibrate_cycles(delaymin),
198 calibrate_cycles(delayavg),
199 calibrate_cycles(delaymax));
200 }
201 return 0;
202 }
203
204 static int preader_thread(void *data)
205 {
206 return p_or_np_reader_thread(data, 1);
207 }
208
209 static int npreader_thread(void *data)
210 {
211 return p_or_np_reader_thread(data, 0);
212 }
213
214 static int trylock_reader_thread(void *data)
215 {
216 int i;
217 int prev, cur;
218 unsigned long iter = 0, success_iter = 0;
219
220 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
221 do {
222 while (!wrap_read_trylock())
223 iter++;
224 success_iter++;
225 prev = var[0];
226 for (i = 1; i < NR_VARS; i++) {
227 cur = var[i];
228 if (cur != prev)
229 printk(KERN_ALERT
230 "Unequal cur %d/prev %d at i %d, iter %lu "
231 "in thread\n", cur, prev, i, iter);
232 }
233 wrap_read_unlock();
234 if (THREAD_READER_DELAY)
235 msleep(THREAD_READER_DELAY);
236 } while (!kthread_should_stop());
237 printk("trylock_reader_thread/%lu iterations : %lu, "
238 "successful iterations : %lu\n",
239 (unsigned long)data, iter, success_iter);
240 return 0;
241 }
242
243 DEFINE_PER_CPU(cycles_t, int_delaymin);
244 DEFINE_PER_CPU(cycles_t, int_delayavg);
245 DEFINE_PER_CPU(cycles_t, int_delaymax);
246 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
247
248 static void interrupt_reader_ipi(void *data)
249 {
250 int i;
251 int prev, cur;
252 cycles_t time1, time2;
253 cycles_t *delaymax, *delaymin, *delayavg, *ipi_nr, delay;
254
255 /*
256 * Skip the ipi caller, not in irq context.
257 */
258 if (!in_irq())
259 return;
260
261 delaymax = &per_cpu(int_delaymax, smp_processor_id());
262 delaymin = &per_cpu(int_delaymin, smp_processor_id());
263 delayavg = &per_cpu(int_delayavg, smp_processor_id());
264 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
265
266 rdtsc_barrier();
267 time1 = get_cycles();
268 rdtsc_barrier();
269
270 wrap_read_lock_irq();
271
272 rdtsc_barrier();
273 time2 = get_cycles();
274 rdtsc_barrier();
275 delay = time2 - time1;
276 *delaymax = max(*delaymax, delay);
277 *delaymin = min(*delaymin, delay);
278 *delayavg += delay;
279 (*ipi_nr)++;
280 prev = var[0];
281 for (i = 1; i < NR_VARS; i++) {
282 cur = var[i];
283 if (cur != prev)
284 printk(KERN_ALERT
285 "Unequal cur %d/prev %d at i %d in interrupt\n",
286 cur, prev, i);
287 }
288 wrap_read_unlock_irq();
289 }
290
291 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
292 DEFINE_PER_CPU(unsigned long, trylock_int_success);
293
294 static void trylock_interrupt_reader_ipi(void *data)
295 {
296 int i;
297 int prev, cur;
298
299 /*
300 * Skip the ipi caller, not in irq context.
301 */
302 if (!in_irq())
303 return;
304
305 per_cpu(trylock_int_iter, smp_processor_id())++;
306 while (!wrap_read_trylock_irq())
307 per_cpu(trylock_int_iter, smp_processor_id())++;
308 per_cpu(trylock_int_success, smp_processor_id())++;
309 prev = var[0];
310 for (i = 1; i < NR_VARS; i++) {
311 cur = var[i];
312 if (cur != prev)
313 printk(KERN_ALERT
314 "Unequal cur %d/prev %d at i %d in interrupt\n",
315 cur, prev, i);
316 }
317 wrap_read_unlock_irq();
318 }
319
320
321 static int interrupt_reader_thread(void *data)
322 {
323 unsigned long iter = 0;
324 int i;
325
326 for_each_online_cpu(i) {
327 per_cpu(int_delaymax, i) = 0;
328 per_cpu(int_delaymin, i) = ULLONG_MAX;
329 per_cpu(int_delayavg, i) = 0;
330 per_cpu(int_ipi_nr, i) = 0;
331 }
332 do {
333 iter++;
334 on_each_cpu(interrupt_reader_ipi, NULL, 0);
335 if (INTERRUPT_READER_DELAY)
336 msleep(INTERRUPT_READER_DELAY);
337 } while (!kthread_should_stop());
338 printk("interrupt_reader_thread/%lu iterations : %lu\n",
339 (unsigned long)data, iter);
340 for_each_online_cpu(i) {
341 if (!per_cpu(int_ipi_nr, i))
342 continue;
343 per_cpu(int_delayavg, i) /= per_cpu(int_ipi_nr, i);
344 printk("interrupt readers on CPU %i, "
345 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
346 i,
347 calibrate_cycles(per_cpu(int_delaymin, i)),
348 calibrate_cycles(per_cpu(int_delayavg, i)),
349 calibrate_cycles(per_cpu(int_delaymax, i)));
350 }
351 return 0;
352 }
353
354 static int trylock_interrupt_reader_thread(void *data)
355 {
356 unsigned long iter = 0;
357 int i;
358
359 do {
360 iter++;
361 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
362 if (INTERRUPT_READER_DELAY)
363 msleep(INTERRUPT_READER_DELAY);
364 } while (!kthread_should_stop());
365 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
366 (unsigned long)data, iter);
367 for_each_online_cpu(i) {
368 printk("trylock interrupt readers on CPU %i, "
369 "iterations %lu, "
370 "successful iterations : %lu\n",
371 i, per_cpu(trylock_int_iter, i),
372 per_cpu(trylock_int_success, i));
373 per_cpu(trylock_int_iter, i) = 0;
374 per_cpu(trylock_int_success, i) = 0;
375 }
376 return 0;
377 }
378
379 static int writer_thread(void *data)
380 {
381 int i;
382 int new;
383 unsigned long iter = 0;
384 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
385 delayavg = 0;
386
387 printk("writer_thread/%lu runnning\n", (unsigned long)data);
388 do {
389 iter++;
390 //preempt_disable(); /* for get_cycles accuracy */
391 rdtsc_barrier();
392 time1 = get_cycles();
393 rdtsc_barrier();
394
395 wrap_write_lock();
396
397 rdtsc_barrier();
398 time2 = get_cycles();
399 rdtsc_barrier();
400 delay = time2 - time1;
401 delaymax = max(delaymax, delay);
402 delaymin = min(delaymin, delay);
403 delayavg += delay;
404 new = (int)get_cycles();
405 for (i = 0; i < NR_VARS; i++) {
406 var[i] = new;
407 }
408
409 wrap_write_unlock();
410
411 //preempt_enable(); /* for get_cycles accuracy */
412 if (WRITER_DELAY > 0)
413 udelay(WRITER_DELAY);
414 } while (!kthread_should_stop());
415 delayavg /= iter;
416 printk("writer_thread/%lu iterations : %lu, "
417 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
418 (unsigned long)data, iter,
419 calibrate_cycles(delaymin),
420 calibrate_cycles(delayavg),
421 calibrate_cycles(delaymax));
422 return 0;
423 }
424
425 #if (TEST_STD_RWLOCK)
426 static int trylock_writer_thread(void *data)
427 {
428 int i;
429 int new;
430 unsigned long iter = 0, success = 0, fail = 0;
431
432 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
433 do {
434 #if (TEST_INTERRUPTS)
435 /* std write trylock cannot disable interrupts. */
436 local_irq_disable();
437 #endif
438
439 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
440 for (;;) {
441 iter++;
442 if (write_trylock(&std_rw_lock))
443 goto locked;
444 }
445 #else
446 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
447 iter++;
448 if (write_trylock(&std_rw_lock))
449 goto locked;
450 }
451 #endif
452 fail++;
453 #if (TEST_INTERRUPTS)
454 local_irq_enable();
455 #endif
456 goto loop;
457 locked:
458 success++;
459 new = (int)get_cycles();
460 for (i = 0; i < NR_VARS; i++) {
461 var[i] = new;
462 }
463 #if (TEST_INTERRUPTS)
464 write_unlock_irq(&std_rw_lock);
465 #else
466 write_unlock(&std_rw_lock);
467 #endif
468 loop:
469 if (TRYLOCK_WRITER_DELAY > 0)
470 udelay(TRYLOCK_WRITER_DELAY);
471 } while (!kthread_should_stop());
472 printk("trylock_writer_thread/%lu iterations : "
473 "[try,success,fail after %d try], "
474 "%lu,%lu,%lu\n",
475 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
476 iter, success, fail);
477 return 0;
478 }
479
480 #else /* !TEST_STD_RWLOCK */
481
482 static int trylock_writer_thread(void *data)
483 {
484 int i;
485 int new;
486 unsigned long iter = 0, success = 0, fail = 0;
487
488 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
489 do {
490 iter++;
491 #if (TEST_INTERRUPTS)
492 if (wbias_write_trylock_irq_else_subscribe(&wbiasrwlock))
493 #else
494 if (wbias_write_trylock_else_subscribe(&wbiasrwlock))
495 #endif
496 goto locked;
497
498 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
499 for (;;) {
500 iter++;
501 #if (TEST_INTERRUPTS)
502 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
503 #else
504 if (wbias_write_trylock_subscribed(&wbiasrwlock))
505 #endif
506 goto locked;
507 }
508 #else
509 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
510 iter++;
511 #if (TEST_INTERRUPTS)
512 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
513 #else
514 if (wbias_write_trylock_subscribed(&wbiasrwlock))
515 #endif
516 goto locked;
517 }
518 #endif
519 fail++;
520 wbias_write_unsubscribe(&wbiasrwlock);
521 goto loop;
522 locked:
523 success++;
524 new = (int)get_cycles();
525 for (i = 0; i < NR_VARS; i++) {
526 var[i] = new;
527 }
528 #if (TEST_INTERRUPTS)
529 wbias_write_unlock_irq(&wbiasrwlock);
530 #else
531 wbias_write_unlock(&wbiasrwlock);
532 #endif
533 loop:
534 if (TRYLOCK_WRITER_DELAY > 0)
535 udelay(TRYLOCK_WRITER_DELAY);
536 } while (!kthread_should_stop());
537 printk("trylock_writer_thread/%lu iterations : "
538 "[try,success,fail after %d try], "
539 "%lu,%lu,%lu\n",
540 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
541 iter, success, fail);
542 return 0;
543 }
544
545 #endif /* TEST_STD_RWLOCK */
546
547 static void wbias_rwlock_create(void)
548 {
549 unsigned long i;
550
551 for (i = 0; i < NR_PREADERS; i++) {
552 printk("starting preemptable reader thread %lu\n", i);
553 preader_threads[i] = kthread_run(preader_thread, (void *)i,
554 "wbiasrwlock_preader");
555 BUG_ON(!preader_threads[i]);
556 }
557
558 for (i = 0; i < NR_NPREADERS; i++) {
559 printk("starting non-preemptable reader thread %lu\n", i);
560 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
561 "wbiasrwlock_npreader");
562 BUG_ON(!npreader_threads[i]);
563 }
564
565 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
566 printk("starting trylock reader thread %lu\n", i);
567 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
568 (void *)i, "wbiasrwlock_trylock_reader");
569 BUG_ON(!trylock_reader_threads[i]);
570 }
571 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
572 printk("starting interrupt reader %lu\n", i);
573 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
574 (void *)i,
575 "wbiasrwlock_interrupt_reader");
576 }
577 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
578 printk("starting trylock interrupt reader %lu\n", i);
579 trylock_interrupt_reader[i] =
580 kthread_run(trylock_interrupt_reader_thread,
581 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
582 }
583 for (i = 0; i < NR_WRITERS; i++) {
584 printk("starting writer thread %lu\n", i);
585 writer_threads[i] = kthread_run(writer_thread, (void *)i,
586 "wbiasrwlock_writer");
587 BUG_ON(!writer_threads[i]);
588 }
589 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
590 printk("starting trylock writer thread %lu\n", i);
591 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
592 (void *)i, "wbiasrwlock_trylock_writer");
593 BUG_ON(!trylock_writer_threads[i]);
594 }
595 }
596
597 static void wbias_rwlock_stop(void)
598 {
599 unsigned long i;
600
601 for (i = 0; i < NR_WRITERS; i++)
602 kthread_stop(writer_threads[i]);
603 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
604 kthread_stop(trylock_writer_threads[i]);
605 for (i = 0; i < NR_NPREADERS; i++)
606 kthread_stop(npreader_threads[i]);
607 for (i = 0; i < NR_PREADERS; i++)
608 kthread_stop(preader_threads[i]);
609 for (i = 0; i < NR_TRYLOCK_READERS; i++)
610 kthread_stop(trylock_reader_threads[i]);
611 for (i = 0; i < NR_INTERRUPT_READERS; i++)
612 kthread_stop(interrupt_reader[i]);
613 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
614 kthread_stop(trylock_interrupt_reader[i]);
615 }
616
617
618 static void perform_test(const char *name, void (*callback)(void))
619 {
620 printk("%s\n", name);
621 callback();
622 }
623
624 static int my_open(struct inode *inode, struct file *file)
625 {
626 unsigned long i;
627 cycles_t time1, time2, delay;
628
629 printk("** get_cycles calibration **\n");
630 cycles_calibration_min = ULLONG_MAX;
631 cycles_calibration_avg = 0;
632 cycles_calibration_max = 0;
633
634 local_irq_disable();
635 for (i = 0; i < 10; i++) {
636 rdtsc_barrier();
637 time1 = get_cycles();
638 rdtsc_barrier();
639 rdtsc_barrier();
640 time2 = get_cycles();
641 rdtsc_barrier();
642 delay = time2 - time1;
643 cycles_calibration_min = min(cycles_calibration_min, delay);
644 cycles_calibration_avg += delay;
645 cycles_calibration_max = max(cycles_calibration_max, delay);
646 }
647 cycles_calibration_avg /= 10;
648 local_irq_enable();
649
650 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
651 "results calibrated on avg\n",
652 cycles_calibration_min,
653 cycles_calibration_avg,
654 cycles_calibration_max);
655 printk("\n");
656
657 printk("** Single writer test, no contention **\n");
658 wbias_rwlock_profile_latency_reset();
659 writer_threads[0] = kthread_run(writer_thread, (void *)0,
660 "wbiasrwlock_writer");
661 BUG_ON(!writer_threads[0]);
662 ssleep(SINGLE_WRITER_TEST_DURATION);
663 kthread_stop(writer_threads[0]);
664 printk("\n");
665
666 wbias_rwlock_profile_latency_print();
667
668 printk("** Single trylock writer test, no contention **\n");
669 wbias_rwlock_profile_latency_reset();
670 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
671 (void *)0,
672 "trylock_wbiasrwlock_writer");
673 BUG_ON(!trylock_writer_threads[0]);
674 ssleep(SINGLE_WRITER_TEST_DURATION);
675 kthread_stop(trylock_writer_threads[0]);
676 printk("\n");
677
678 wbias_rwlock_profile_latency_print();
679
680 printk("** Single preemptable reader test, no contention **\n");
681 wbias_rwlock_profile_latency_reset();
682 preader_threads[0] = kthread_run(preader_thread, (void *)0,
683 "wbiasrwlock_preader");
684 BUG_ON(!preader_threads[0]);
685 ssleep(SINGLE_READER_TEST_DURATION);
686 kthread_stop(preader_threads[0]);
687 printk("\n");
688
689 wbias_rwlock_profile_latency_print();
690
691 printk("** Single non-preemptable reader test, no contention **\n");
692 wbias_rwlock_profile_latency_reset();
693 npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
694 "wbiasrwlock_npreader");
695 BUG_ON(!npreader_threads[0]);
696 ssleep(SINGLE_READER_TEST_DURATION);
697 kthread_stop(npreader_threads[0]);
698 printk("\n");
699
700 wbias_rwlock_profile_latency_print();
701
702 printk("** Multiple p/non-p readers test, no contention **\n");
703 wbias_rwlock_profile_latency_reset();
704 for (i = 0; i < NR_PREADERS; i++) {
705 printk("starting preader thread %lu\n", i);
706 preader_threads[i] = kthread_run(preader_thread, (void *)i,
707 "wbiasrwlock_preader");
708 BUG_ON(!preader_threads[i]);
709 }
710 for (i = 0; i < NR_NPREADERS; i++) {
711 printk("starting npreader thread %lu\n", i);
712 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
713 "wbiasrwlock_npreader");
714 BUG_ON(!npreader_threads[i]);
715 }
716 ssleep(SINGLE_READER_TEST_DURATION);
717 for (i = 0; i < NR_NPREADERS; i++)
718 kthread_stop(npreader_threads[i]);
719 for (i = 0; i < NR_PREADERS; i++)
720 kthread_stop(preader_threads[i]);
721 printk("\n");
722
723 wbias_rwlock_profile_latency_print();
724
725 printk("** High contention test **\n");
726 wbias_rwlock_profile_latency_reset();
727 perform_test("wbias-rwlock-create", wbias_rwlock_create);
728 ssleep(TEST_DURATION);
729 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
730 printk("\n");
731 wbias_rwlock_profile_latency_print();
732
733 return -EPERM;
734 }
735
736
737 static struct file_operations my_operations = {
738 .open = my_open,
739 };
740
741 int init_module(void)
742 {
743 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
744 if (pentry)
745 pentry->proc_fops = &my_operations;
746
747 printk("PTHREAD_ROFFSET : %lX\n", PTHREAD_ROFFSET);
748 printk("PTHREAD_RMASK : %lX\n", PTHREAD_RMASK);
749 printk("NPTHREAD_ROFFSET : %lX\n", NPTHREAD_ROFFSET);
750 printk("NPTHREAD_RMASK : %lX\n", NPTHREAD_RMASK);
751 printk("SOFTIRQ_ROFFSET : %lX\n", SOFTIRQ_ROFFSET);
752 printk("SOFTIRQ_RMASK : %lX\n", SOFTIRQ_RMASK);
753 printk("HARDIRQ_ROFFSET : %lX\n", HARDIRQ_ROFFSET);
754 printk("HARDIRQ_RMASK : %lX\n", HARDIRQ_RMASK);
755 printk("SUBSCRIBERS_WOFFSET : %lX\n", SUBSCRIBERS_WOFFSET);
756 printk("SUBSCRIBERS_WMASK : %lX\n", SUBSCRIBERS_WMASK);
757 printk("WRITER_MUTEX : %lX\n", WRITER_MUTEX);
758 printk("NPTHREAD_WMASK : %lX\n", SOFTIRQ_WMASK);
759 printk("SOFTIRQ_WMASK : %lX\n", SOFTIRQ_WMASK);
760 printk("HARDIRQ_WMASK : %lX\n", HARDIRQ_WMASK);
761
762 return 0;
763 }
764
765 void cleanup_module(void)
766 {
767 remove_proc_entry("testwbiasrwlock", NULL);
768 }
769
770 MODULE_LICENSE("GPL");
771 MODULE_AUTHOR("Mathieu Desnoyers");
772 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.044859 seconds and 5 git commands to generate.