update test
[lttv.git] / trunk / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/wbias-rwlock.h>
10 #include <linux/kthread.h>
11 #include <linux/delay.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/spinlock.h>
16 #include <asm/ptrace.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_PREADERS 2
30 #define NR_NPREADERS 2
31 #define NR_TRYLOCK_READERS 1
32
33 /*
34 * 1 : test standard rwlock
35 * 0 : test wbiasrwlock
36 */
37 #define TEST_STD_RWLOCK 0
38
39 /*
40 * 1 : test with thread and interrupt readers.
41 * 0 : test only with thread readers.
42 */
43 #define TEST_INTERRUPTS 1
44
45 #if (TEST_INTERRUPTS)
46 #define NR_INTERRUPT_READERS 1
47 #define NR_TRYLOCK_INTERRUPT_READERS 1
48 #else
49 #define NR_INTERRUPT_READERS 0
50 #define NR_TRYLOCK_INTERRUPT_READERS 0
51 #endif
52
53 /*
54 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
55 * starve readers.
56 */
57 #define WRITER_DELAY 100
58 #define TRYLOCK_WRITER_DELAY 1000
59
60 /*
61 * Number of iterations after which a trylock writer fails.
62 * -1 for infinite loop.
63 */
64 #define TRYLOCK_WRITERS_FAIL_ITER 100
65
66 /* Thread and interrupt reader delay, in ms */
67 #define THREAD_READER_DELAY 0 /* busy loop */
68 #define INTERRUPT_READER_DELAY 100
69
70 static int var[NR_VARS];
71 static struct task_struct *preader_threads[NR_PREADERS];
72 static struct task_struct *npreader_threads[NR_NPREADERS];
73 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
74 static struct task_struct *writer_threads[NR_WRITERS];
75 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
76 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
77 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
78
79 #if (TEST_STD_RWLOCK)
80
81 static DEFINE_RWLOCK(std_rw_lock);
82
83 #define wrap_read_lock() read_lock(&std_rw_lock)
84 #define wrap_read_trylock() read_trylock(&std_rw_lock)
85 #define wrap_read_unlock() read_unlock(&std_rw_lock)
86
87 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
88 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
89 #define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
90
91 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
92 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
93 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
94
95 #if (TEST_INTERRUPTS)
96 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
97 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
98 #else
99 #define wrap_write_lock() write_lock(&std_rw_lock)
100 #define wrap_write_unlock() write_unlock(&std_rw_lock)
101 #endif
102
103 #else
104
105 static DEFINE_WBIAS_RWLOCK(wbiasrwlock);
106
107 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
108 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
109 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
110
111 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
112 #define wrap_read_trylock_inatomic() \
113 wbias_read_trylock_inatomic(&wbiasrwlock)
114 #define wrap_read_unlock_inatomic() \
115 wbias_read_unlock_inatomic(&wbiasrwlock)
116
117 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
118 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
119 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
120
121 #if (TEST_INTERRUPTS)
122 #define wrap_write_lock() wbias_write_lock_irq(&wbiasrwlock)
123 #define wrap_write_unlock() wbias_write_unlock_irq(&wbiasrwlock)
124 #else
125 #define wrap_write_lock() wbias_write_lock(&wbiasrwlock)
126 #define wrap_write_unlock() wbias_write_unlock(&wbiasrwlock)
127 #endif
128
129 #endif
130
131 static cycles_t cycles_calibration_min,
132 cycles_calibration_avg,
133 cycles_calibration_max;
134
135 static inline cycles_t calibrate_cycles(cycles_t cycles)
136 {
137 return cycles - cycles_calibration_avg;
138 }
139
140 struct proc_dir_entry *pentry = NULL;
141
142 static int p_or_np_reader_thread(const char *typename,
143 void *data, int preemptable)
144 {
145 int i;
146 int prev, cur;
147 unsigned long iter = 0;
148 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
149 delayavg = 0;
150
151 printk("%s/%lu runnning\n", typename, (unsigned long)data);
152 do {
153 iter++;
154 if (!preemptable)
155 preempt_disable();
156 rdtsc_barrier();
157 time1 = get_cycles();
158 rdtsc_barrier();
159
160 if (!preemptable)
161 wrap_read_lock_inatomic();
162 else
163 wrap_read_lock();
164
165 rdtsc_barrier();
166 time2 = get_cycles();
167 rdtsc_barrier();
168 delay = time2 - time1;
169 delaymax = max(delaymax, delay);
170 delaymin = min(delaymin, delay);
171 delayavg += delay;
172 prev = var[0];
173 for (i = 1; i < NR_VARS; i++) {
174 cur = var[i];
175 if (cur != prev)
176 printk(KERN_ALERT
177 "Unequal cur %d/prev %d at i %d, iter %lu "
178 "in thread\n", cur, prev, i, iter);
179 }
180
181 if (!preemptable)
182 wrap_read_unlock_inatomic();
183 else
184 wrap_read_unlock();
185 if (!preemptable)
186 preempt_enable();
187 if (THREAD_READER_DELAY)
188 msleep(THREAD_READER_DELAY);
189 } while (!kthread_should_stop());
190 if (!iter) {
191 printk("%s/%lu iterations : %lu", typename,
192 (unsigned long)data, iter);
193 } else {
194 delayavg /= iter;
195 printk("%s/%lu iterations : %lu, "
196 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
197 typename,
198 (unsigned long)data, iter,
199 calibrate_cycles(delaymin),
200 calibrate_cycles(delayavg),
201 calibrate_cycles(delaymax));
202 }
203 return 0;
204 }
205
206 static int preader_thread(void *data)
207 {
208 return p_or_np_reader_thread("preader_thread", data, 1);
209 }
210
211 static int npreader_thread(void *data)
212 {
213 return p_or_np_reader_thread("npreader_thread", data, 0);
214 }
215
216 static int trylock_reader_thread(void *data)
217 {
218 int i;
219 int prev, cur;
220 unsigned long iter = 0, success_iter = 0;
221
222 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
223 do {
224 while (!wrap_read_trylock())
225 iter++;
226 success_iter++;
227 prev = var[0];
228 for (i = 1; i < NR_VARS; i++) {
229 cur = var[i];
230 if (cur != prev)
231 printk(KERN_ALERT
232 "Unequal cur %d/prev %d at i %d, iter %lu "
233 "in thread\n", cur, prev, i, iter);
234 }
235 wrap_read_unlock();
236 if (THREAD_READER_DELAY)
237 msleep(THREAD_READER_DELAY);
238 } while (!kthread_should_stop());
239 printk("trylock_reader_thread/%lu iterations : %lu, "
240 "successful iterations : %lu\n",
241 (unsigned long)data, iter, success_iter);
242 return 0;
243 }
244
245 DEFINE_PER_CPU(cycles_t, int_delaymin);
246 DEFINE_PER_CPU(cycles_t, int_delayavg);
247 DEFINE_PER_CPU(cycles_t, int_delaymax);
248 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
249
250 static void interrupt_reader_ipi(void *data)
251 {
252 int i;
253 int prev, cur;
254 cycles_t time1, time2;
255 cycles_t *delaymax, *delaymin, *delayavg, *ipi_nr, delay;
256
257 /*
258 * Skip the ipi caller, not in irq context.
259 */
260 if (!in_irq())
261 return;
262
263 delaymax = &per_cpu(int_delaymax, smp_processor_id());
264 delaymin = &per_cpu(int_delaymin, smp_processor_id());
265 delayavg = &per_cpu(int_delayavg, smp_processor_id());
266 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
267
268 rdtsc_barrier();
269 time1 = get_cycles();
270 rdtsc_barrier();
271
272 wrap_read_lock_irq();
273
274 rdtsc_barrier();
275 time2 = get_cycles();
276 rdtsc_barrier();
277 delay = time2 - time1;
278 *delaymax = max(*delaymax, delay);
279 *delaymin = min(*delaymin, delay);
280 *delayavg += delay;
281 (*ipi_nr)++;
282 prev = var[0];
283 for (i = 1; i < NR_VARS; i++) {
284 cur = var[i];
285 if (cur != prev)
286 printk(KERN_ALERT
287 "Unequal cur %d/prev %d at i %d in interrupt\n",
288 cur, prev, i);
289 }
290 wrap_read_unlock_irq();
291 }
292
293 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
294 DEFINE_PER_CPU(unsigned long, trylock_int_success);
295
296 static void trylock_interrupt_reader_ipi(void *data)
297 {
298 int i;
299 int prev, cur;
300
301 /*
302 * Skip the ipi caller, not in irq context.
303 */
304 if (!in_irq())
305 return;
306
307 per_cpu(trylock_int_iter, smp_processor_id())++;
308 while (!wrap_read_trylock_irq())
309 per_cpu(trylock_int_iter, smp_processor_id())++;
310 per_cpu(trylock_int_success, smp_processor_id())++;
311 prev = var[0];
312 for (i = 1; i < NR_VARS; i++) {
313 cur = var[i];
314 if (cur != prev)
315 printk(KERN_ALERT
316 "Unequal cur %d/prev %d at i %d in interrupt\n",
317 cur, prev, i);
318 }
319 wrap_read_unlock_irq();
320 }
321
322
323 static int interrupt_reader_thread(void *data)
324 {
325 unsigned long iter = 0;
326 int i;
327
328 for_each_online_cpu(i) {
329 per_cpu(int_delaymax, i) = 0;
330 per_cpu(int_delaymin, i) = ULLONG_MAX;
331 per_cpu(int_delayavg, i) = 0;
332 per_cpu(int_ipi_nr, i) = 0;
333 }
334 do {
335 iter++;
336 on_each_cpu(interrupt_reader_ipi, NULL, 0);
337 if (INTERRUPT_READER_DELAY)
338 msleep(INTERRUPT_READER_DELAY);
339 } while (!kthread_should_stop());
340 printk("interrupt_reader_thread/%lu iterations : %lu\n",
341 (unsigned long)data, iter);
342 for_each_online_cpu(i) {
343 if (!per_cpu(int_ipi_nr, i))
344 continue;
345 per_cpu(int_delayavg, i) /= per_cpu(int_ipi_nr, i);
346 printk("interrupt readers on CPU %i, "
347 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
348 i,
349 calibrate_cycles(per_cpu(int_delaymin, i)),
350 calibrate_cycles(per_cpu(int_delayavg, i)),
351 calibrate_cycles(per_cpu(int_delaymax, i)));
352 }
353 return 0;
354 }
355
356 static int trylock_interrupt_reader_thread(void *data)
357 {
358 unsigned long iter = 0;
359 int i;
360
361 do {
362 iter++;
363 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
364 if (INTERRUPT_READER_DELAY)
365 msleep(INTERRUPT_READER_DELAY);
366 } while (!kthread_should_stop());
367 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
368 (unsigned long)data, iter);
369 for_each_online_cpu(i) {
370 printk("trylock interrupt readers on CPU %i, "
371 "iterations %lu, "
372 "successful iterations : %lu\n",
373 i, per_cpu(trylock_int_iter, i),
374 per_cpu(trylock_int_success, i));
375 per_cpu(trylock_int_iter, i) = 0;
376 per_cpu(trylock_int_success, i) = 0;
377 }
378 return 0;
379 }
380
381 static int writer_thread(void *data)
382 {
383 int i;
384 int new;
385 unsigned long iter = 0;
386 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
387 delayavg = 0;
388
389 printk("writer_thread/%lu runnning\n", (unsigned long)data);
390 do {
391 iter++;
392 //preempt_disable(); /* for get_cycles accuracy */
393 rdtsc_barrier();
394 time1 = get_cycles();
395 rdtsc_barrier();
396
397 wrap_write_lock();
398
399 rdtsc_barrier();
400 time2 = get_cycles();
401 rdtsc_barrier();
402 delay = time2 - time1;
403 delaymax = max(delaymax, delay);
404 delaymin = min(delaymin, delay);
405 delayavg += delay;
406 new = (int)get_cycles();
407 for (i = 0; i < NR_VARS; i++) {
408 var[i] = new;
409 }
410
411 wrap_write_unlock();
412
413 //preempt_enable(); /* for get_cycles accuracy */
414 if (WRITER_DELAY > 0)
415 udelay(WRITER_DELAY);
416 } while (!kthread_should_stop());
417 delayavg /= iter;
418 printk("writer_thread/%lu iterations : %lu, "
419 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
420 (unsigned long)data, iter,
421 calibrate_cycles(delaymin),
422 calibrate_cycles(delayavg),
423 calibrate_cycles(delaymax));
424 return 0;
425 }
426
427 #if (TEST_STD_RWLOCK)
428 static int trylock_writer_thread(void *data)
429 {
430 int i;
431 int new;
432 unsigned long iter = 0, success = 0, fail = 0;
433
434 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
435 do {
436 #if (TEST_INTERRUPTS)
437 /* std write trylock cannot disable interrupts. */
438 local_irq_disable();
439 #endif
440
441 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
442 for (;;) {
443 iter++;
444 if (write_trylock(&std_rw_lock))
445 goto locked;
446 }
447 #else
448 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
449 iter++;
450 if (write_trylock(&std_rw_lock))
451 goto locked;
452 }
453 #endif
454 fail++;
455 #if (TEST_INTERRUPTS)
456 local_irq_enable();
457 #endif
458 goto loop;
459 locked:
460 success++;
461 new = (int)get_cycles();
462 for (i = 0; i < NR_VARS; i++) {
463 var[i] = new;
464 }
465 #if (TEST_INTERRUPTS)
466 write_unlock_irq(&std_rw_lock);
467 #else
468 write_unlock(&std_rw_lock);
469 #endif
470 loop:
471 if (TRYLOCK_WRITER_DELAY > 0)
472 udelay(TRYLOCK_WRITER_DELAY);
473 } while (!kthread_should_stop());
474 printk("trylock_writer_thread/%lu iterations : "
475 "[try,success,fail after %d try], "
476 "%lu,%lu,%lu\n",
477 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
478 iter, success, fail);
479 return 0;
480 }
481
482 #else /* !TEST_STD_RWLOCK */
483
484 static int trylock_writer_thread(void *data)
485 {
486 int i;
487 int new;
488 unsigned long iter = 0, success = 0, fail = 0;
489
490 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
491 do {
492 iter++;
493 #if (TEST_INTERRUPTS)
494 if (wbias_write_trylock_irq_else_subscribe(&wbiasrwlock))
495 #else
496 if (wbias_write_trylock_else_subscribe(&wbiasrwlock))
497 #endif
498 goto locked;
499
500 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
501 for (;;) {
502 iter++;
503 #if (TEST_INTERRUPTS)
504 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
505 #else
506 if (wbias_write_trylock_subscribed(&wbiasrwlock))
507 #endif
508 goto locked;
509 }
510 #else
511 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
512 iter++;
513 #if (TEST_INTERRUPTS)
514 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
515 #else
516 if (wbias_write_trylock_subscribed(&wbiasrwlock))
517 #endif
518 goto locked;
519 }
520 #endif
521 fail++;
522 wbias_write_unsubscribe(&wbiasrwlock);
523 goto loop;
524 locked:
525 success++;
526 new = (int)get_cycles();
527 for (i = 0; i < NR_VARS; i++) {
528 var[i] = new;
529 }
530 #if (TEST_INTERRUPTS)
531 wbias_write_unlock_irq(&wbiasrwlock);
532 #else
533 wbias_write_unlock(&wbiasrwlock);
534 #endif
535 loop:
536 if (TRYLOCK_WRITER_DELAY > 0)
537 udelay(TRYLOCK_WRITER_DELAY);
538 } while (!kthread_should_stop());
539 printk("trylock_writer_thread/%lu iterations : "
540 "[try,success,fail after %d try], "
541 "%lu,%lu,%lu\n",
542 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
543 iter, success, fail);
544 return 0;
545 }
546
547 #endif /* TEST_STD_RWLOCK */
548
549 static void wbias_rwlock_create(void)
550 {
551 unsigned long i;
552
553 for (i = 0; i < NR_PREADERS; i++) {
554 printk("starting preemptable reader thread %lu\n", i);
555 preader_threads[i] = kthread_run(preader_thread, (void *)i,
556 "wbiasrwlock_preader");
557 BUG_ON(!preader_threads[i]);
558 }
559
560 for (i = 0; i < NR_NPREADERS; i++) {
561 printk("starting non-preemptable reader thread %lu\n", i);
562 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
563 "wbiasrwlock_npreader");
564 BUG_ON(!npreader_threads[i]);
565 }
566
567 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
568 printk("starting trylock reader thread %lu\n", i);
569 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
570 (void *)i, "wbiasrwlock_trylock_reader");
571 BUG_ON(!trylock_reader_threads[i]);
572 }
573 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
574 printk("starting interrupt reader %lu\n", i);
575 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
576 (void *)i,
577 "wbiasrwlock_interrupt_reader");
578 }
579 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
580 printk("starting trylock interrupt reader %lu\n", i);
581 trylock_interrupt_reader[i] =
582 kthread_run(trylock_interrupt_reader_thread,
583 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
584 }
585 for (i = 0; i < NR_WRITERS; i++) {
586 printk("starting writer thread %lu\n", i);
587 writer_threads[i] = kthread_run(writer_thread, (void *)i,
588 "wbiasrwlock_writer");
589 BUG_ON(!writer_threads[i]);
590 }
591 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
592 printk("starting trylock writer thread %lu\n", i);
593 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
594 (void *)i, "wbiasrwlock_trylock_writer");
595 BUG_ON(!trylock_writer_threads[i]);
596 }
597 }
598
599 static void wbias_rwlock_stop(void)
600 {
601 unsigned long i;
602
603 for (i = 0; i < NR_WRITERS; i++)
604 kthread_stop(writer_threads[i]);
605 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
606 kthread_stop(trylock_writer_threads[i]);
607 for (i = 0; i < NR_NPREADERS; i++)
608 kthread_stop(npreader_threads[i]);
609 for (i = 0; i < NR_PREADERS; i++)
610 kthread_stop(preader_threads[i]);
611 for (i = 0; i < NR_TRYLOCK_READERS; i++)
612 kthread_stop(trylock_reader_threads[i]);
613 for (i = 0; i < NR_INTERRUPT_READERS; i++)
614 kthread_stop(interrupt_reader[i]);
615 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
616 kthread_stop(trylock_interrupt_reader[i]);
617 }
618
619
620 static void perform_test(const char *name, void (*callback)(void))
621 {
622 printk("%s\n", name);
623 callback();
624 }
625
626 static int my_open(struct inode *inode, struct file *file)
627 {
628 unsigned long i;
629 cycles_t time1, time2, delay;
630
631 printk("** get_cycles calibration **\n");
632 cycles_calibration_min = ULLONG_MAX;
633 cycles_calibration_avg = 0;
634 cycles_calibration_max = 0;
635
636 local_irq_disable();
637 for (i = 0; i < 10; i++) {
638 rdtsc_barrier();
639 time1 = get_cycles();
640 rdtsc_barrier();
641 rdtsc_barrier();
642 time2 = get_cycles();
643 rdtsc_barrier();
644 delay = time2 - time1;
645 cycles_calibration_min = min(cycles_calibration_min, delay);
646 cycles_calibration_avg += delay;
647 cycles_calibration_max = max(cycles_calibration_max, delay);
648 }
649 cycles_calibration_avg /= 10;
650 local_irq_enable();
651
652 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
653 "results calibrated on avg\n",
654 cycles_calibration_min,
655 cycles_calibration_avg,
656 cycles_calibration_max);
657 printk("\n");
658
659 printk("** Single writer test, no contention **\n");
660 wbias_rwlock_profile_latency_reset();
661 writer_threads[0] = kthread_run(writer_thread, (void *)0,
662 "wbiasrwlock_writer");
663 BUG_ON(!writer_threads[0]);
664 ssleep(SINGLE_WRITER_TEST_DURATION);
665 kthread_stop(writer_threads[0]);
666 printk("\n");
667
668 wbias_rwlock_profile_latency_print();
669
670 printk("** Single trylock writer test, no contention **\n");
671 wbias_rwlock_profile_latency_reset();
672 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
673 (void *)0,
674 "trylock_wbiasrwlock_writer");
675 BUG_ON(!trylock_writer_threads[0]);
676 ssleep(SINGLE_WRITER_TEST_DURATION);
677 kthread_stop(trylock_writer_threads[0]);
678 printk("\n");
679
680 wbias_rwlock_profile_latency_print();
681
682 printk("** Single preemptable reader test, no contention **\n");
683 wbias_rwlock_profile_latency_reset();
684 preader_threads[0] = kthread_run(preader_thread, (void *)0,
685 "wbiasrwlock_preader");
686 BUG_ON(!preader_threads[0]);
687 ssleep(SINGLE_READER_TEST_DURATION);
688 kthread_stop(preader_threads[0]);
689 printk("\n");
690
691 wbias_rwlock_profile_latency_print();
692
693 printk("** Single non-preemptable reader test, no contention **\n");
694 wbias_rwlock_profile_latency_reset();
695 npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
696 "wbiasrwlock_npreader");
697 BUG_ON(!npreader_threads[0]);
698 ssleep(SINGLE_READER_TEST_DURATION);
699 kthread_stop(npreader_threads[0]);
700 printk("\n");
701
702 wbias_rwlock_profile_latency_print();
703
704 printk("** Multiple p/non-p readers test, no contention **\n");
705 wbias_rwlock_profile_latency_reset();
706 for (i = 0; i < NR_PREADERS; i++) {
707 printk("starting preader thread %lu\n", i);
708 preader_threads[i] = kthread_run(preader_thread, (void *)i,
709 "wbiasrwlock_preader");
710 BUG_ON(!preader_threads[i]);
711 }
712 for (i = 0; i < NR_NPREADERS; i++) {
713 printk("starting npreader thread %lu\n", i);
714 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
715 "wbiasrwlock_npreader");
716 BUG_ON(!npreader_threads[i]);
717 }
718 ssleep(SINGLE_READER_TEST_DURATION);
719 for (i = 0; i < NR_NPREADERS; i++)
720 kthread_stop(npreader_threads[i]);
721 for (i = 0; i < NR_PREADERS; i++)
722 kthread_stop(preader_threads[i]);
723 printk("\n");
724
725 wbias_rwlock_profile_latency_print();
726
727 printk("** High contention test **\n");
728 wbias_rwlock_profile_latency_reset();
729 perform_test("wbias-rwlock-create", wbias_rwlock_create);
730 ssleep(TEST_DURATION);
731 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
732 printk("\n");
733 wbias_rwlock_profile_latency_print();
734
735 return -EPERM;
736 }
737
738
739 static struct file_operations my_operations = {
740 .open = my_open,
741 };
742
743 int init_module(void)
744 {
745 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
746 if (pentry)
747 pentry->proc_fops = &my_operations;
748
749 printk("PTHREAD_ROFFSET : %lX\n", PTHREAD_ROFFSET);
750 printk("PTHREAD_RMASK : %lX\n", PTHREAD_RMASK);
751 printk("NPTHREAD_ROFFSET : %lX\n", NPTHREAD_ROFFSET);
752 printk("NPTHREAD_RMASK : %lX\n", NPTHREAD_RMASK);
753 printk("SOFTIRQ_ROFFSET : %lX\n", SOFTIRQ_ROFFSET);
754 printk("SOFTIRQ_RMASK : %lX\n", SOFTIRQ_RMASK);
755 printk("HARDIRQ_ROFFSET : %lX\n", HARDIRQ_ROFFSET);
756 printk("HARDIRQ_RMASK : %lX\n", HARDIRQ_RMASK);
757 printk("PTHREAD_WOFFSET : %lX\n", PTHREAD_WOFFSET);
758 printk("PTHREAD_WMASK : %lX\n", PTHREAD_WMASK);
759 printk("NPTHREAD_WOFFSET : %lX\n", NPTHREAD_WOFFSET);
760 printk("NPTHREAD_WMASK : %lX\n", NPTHREAD_WMASK);
761 printk("WRITER_MUTEX : %lX\n", WRITER_MUTEX);
762 printk("NPTHREAD_WMASK : %lX\n", SOFTIRQ_WMASK);
763 printk("SOFTIRQ_WMASK : %lX\n", SOFTIRQ_WMASK);
764 printk("HARDIRQ_WMASK : %lX\n", HARDIRQ_WMASK);
765
766 return 0;
767 }
768
769 void cleanup_module(void)
770 {
771 remove_proc_entry("testwbiasrwlock", NULL);
772 }
773
774 MODULE_LICENSE("GPL");
775 MODULE_AUTHOR("Mathieu Desnoyers");
776 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.054027 seconds and 5 git commands to generate.