update test
[lttv.git] / trunk / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/kthread.h>
10 #include <linux/delay.h>
11 #include <linux/hardirq.h>
12 #include <linux/module.h>
13 #include <linux/percpu.h>
14 #include <linux/spinlock.h>
15 #include <asm/ptrace.h>
16 #include <linux/wbias-rwlock.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_NPREADERS 2
30 #define NR_TRYLOCK_READERS 1
31
32 /*
33 * 1 : test standard rwlock
34 * 0 : test wbiasrwlock
35 */
36 #define TEST_STD_RWLOCK 0
37
38 /*
39 * 1 : test with thread and interrupt readers.
40 * 0 : test only with thread readers.
41 */
42 #define TEST_INTERRUPTS 1
43
44 #if (TEST_INTERRUPTS)
45 #define NR_INTERRUPT_READERS 1
46 #define NR_TRYLOCK_INTERRUPT_READERS 1
47 #else
48 #define NR_INTERRUPT_READERS 0
49 #define NR_TRYLOCK_INTERRUPT_READERS 0
50 #endif
51
52 /*
53 * 1 : test with thread preemption readers.
54 * 0 : test only with non-preemptable thread readers.
55 */
56 #define TEST_PREEMPT 1
57
58 #if (TEST_PREEMPT)
59 #define NR_PREADERS 2
60 #else
61 #define NR_PREADERS 0
62 #endif
63
64
65 /*
66 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
67 * starve readers.
68 */
69 #define WRITER_DELAY 100
70 #define TRYLOCK_WRITER_DELAY 1000
71
72 /*
73 * Number of iterations after which a trylock writer fails.
74 * -1 for infinite loop.
75 */
76 #define TRYLOCK_WRITERS_FAIL_ITER 100
77
78 /* Thread and interrupt reader delay, in ms */
79 #define THREAD_READER_DELAY 0 /* busy loop */
80 #define INTERRUPT_READER_DELAY 100
81
82 static int var[NR_VARS];
83 static struct task_struct *preader_threads[NR_PREADERS];
84 static struct task_struct *npreader_threads[NR_NPREADERS];
85 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
86 static struct task_struct *writer_threads[NR_WRITERS];
87 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
88 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
89 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
90
91 #if (TEST_STD_RWLOCK)
92
93 static DEFINE_RWLOCK(std_rw_lock);
94
95 #define wrap_read_lock() read_lock(&std_rw_lock)
96 #define wrap_read_trylock() read_trylock(&std_rw_lock)
97 #define wrap_read_unlock() read_unlock(&std_rw_lock)
98
99 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
100 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
101 #define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
102
103 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
104 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
105 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
106
107 #if (TEST_INTERRUPTS)
108 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
109 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
110 #else
111 #define wrap_write_lock() write_lock(&std_rw_lock)
112 #define wrap_write_unlock() write_unlock(&std_rw_lock)
113 #endif
114
115 #else
116
117 #if (TEST_INTERRUPTS)
118 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RIRQ | BW_RNPTHREAD | BW_RPTHREAD)
119 #else
120 #if (TEST_PREEMPT)
121 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RNPTHREAD | BW_RPTHREAD)
122 #else
123 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RNPTHREAD)
124 #endif
125 #endif
126 static DEFINE_WBIAS_RWLOCK(wbiasrwlock, WBIASRWLOCKMASK);
127 CHECK_WBIAS_RWLOCK_MAP(WBIASRWLOCKMASK);
128
129
130 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
131 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
132 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
133
134 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
135 #define wrap_read_trylock_inatomic() \
136 wbias_read_trylock_inatomic(&wbiasrwlock)
137 #define wrap_read_unlock_inatomic() \
138 wbias_read_unlock_inatomic(&wbiasrwlock)
139
140 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
141 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
142 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
143
144 #define wrap_write_lock() \
145 wbias_write_lock(&wbiasrwlock, WBIASRWLOCKMASK)
146 #define wrap_write_unlock() \
147 wbias_write_unlock(&wbiasrwlock, WBIASRWLOCKMASK)
148 #define wrap_write_trylock_else_subscribe() \
149 wbias_write_trylock_else_subscribe(&wbiasrwlock, WBIASRWLOCKMASK)
150 #define wrap_write_trylock_subscribed() \
151 wbias_write_trylock_subscribed(&wbiasrwlock, WBIASRWLOCKMASK)
152 #define wrap_write_unsubscribe() \
153 wbias_write_unsubscribe(&wbiasrwlock, WBIASRWLOCKMASK)
154
155 #endif
156
157 static cycles_t cycles_calibration_min,
158 cycles_calibration_avg,
159 cycles_calibration_max;
160
161 static inline cycles_t calibrate_cycles(cycles_t cycles)
162 {
163 return cycles - cycles_calibration_avg;
164 }
165
166 struct proc_dir_entry *pentry = NULL;
167
168 static int p_or_np_reader_thread(const char *typename,
169 void *data, int preemptable)
170 {
171 int i;
172 int prev, cur;
173 unsigned long iter = 0;
174 cycles_t time1, time2, delay;
175 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
176 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
177
178 printk("%s/%lu runnning\n", typename, (unsigned long)data);
179 do {
180 iter++;
181 if (!preemptable)
182 preempt_disable();
183 rdtsc_barrier();
184 time1 = get_cycles();
185 rdtsc_barrier();
186
187 if (!preemptable)
188 wrap_read_lock_inatomic();
189 else
190 wrap_read_lock();
191
192 rdtsc_barrier();
193 time2 = get_cycles();
194 rdtsc_barrier();
195 delay = time2 - time1;
196 ldelaymax = max(ldelaymax, delay);
197 ldelaymin = min(ldelaymin, delay);
198 ldelayavg += delay;
199 prev = var[0];
200 for (i = 1; i < NR_VARS; i++) {
201 cur = var[i];
202 if (cur != prev)
203 printk(KERN_ALERT
204 "Unequal cur %d/prev %d at i %d, iter %lu "
205 "in thread\n", cur, prev, i, iter);
206 }
207
208 rdtsc_barrier();
209 time1 = get_cycles();
210 rdtsc_barrier();
211
212 if (!preemptable)
213 wrap_read_unlock_inatomic();
214 else
215 wrap_read_unlock();
216 rdtsc_barrier();
217 time2 = get_cycles();
218 rdtsc_barrier();
219 delay = time2 - time1;
220 udelaymax = max(udelaymax, delay);
221 udelaymin = min(udelaymin, delay);
222 udelayavg += delay;
223
224 if (!preemptable)
225 preempt_enable();
226
227 if (THREAD_READER_DELAY)
228 msleep(THREAD_READER_DELAY);
229 } while (!kthread_should_stop());
230 if (!iter) {
231 printk("%s/%lu iterations : %lu", typename,
232 (unsigned long)data, iter);
233 } else {
234 ldelayavg /= iter;
235 udelayavg /= iter;
236 printk("%s/%lu iterations : %lu, "
237 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
238 typename,
239 (unsigned long)data, iter,
240 calibrate_cycles(ldelaymin),
241 calibrate_cycles(ldelayavg),
242 calibrate_cycles(ldelaymax));
243 printk("%s/%lu iterations : %lu, "
244 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
245 typename,
246 (unsigned long)data, iter,
247 calibrate_cycles(udelaymin),
248 calibrate_cycles(udelayavg),
249 calibrate_cycles(udelaymax));
250 }
251 return 0;
252 }
253
254 static int preader_thread(void *data)
255 {
256 return p_or_np_reader_thread("preader_thread", data, 1);
257 }
258
259 static int npreader_thread(void *data)
260 {
261 return p_or_np_reader_thread("npreader_thread", data, 0);
262 }
263
264 static int trylock_reader_thread(void *data)
265 {
266 int i;
267 int prev, cur;
268 unsigned long iter = 0, success_iter = 0;
269
270 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
271 do {
272 while (!wrap_read_trylock())
273 iter++;
274 success_iter++;
275 prev = var[0];
276 for (i = 1; i < NR_VARS; i++) {
277 cur = var[i];
278 if (cur != prev)
279 printk(KERN_ALERT
280 "Unequal cur %d/prev %d at i %d, iter %lu "
281 "in thread\n", cur, prev, i, iter);
282 }
283 wrap_read_unlock();
284 if (THREAD_READER_DELAY)
285 msleep(THREAD_READER_DELAY);
286 } while (!kthread_should_stop());
287 printk("trylock_reader_thread/%lu iterations : %lu, "
288 "successful iterations : %lu\n",
289 (unsigned long)data, iter, success_iter);
290 return 0;
291 }
292
293 DEFINE_PER_CPU(cycles_t, int_ldelaymin);
294 DEFINE_PER_CPU(cycles_t, int_ldelayavg);
295 DEFINE_PER_CPU(cycles_t, int_ldelaymax);
296 DEFINE_PER_CPU(cycles_t, int_udelaymin);
297 DEFINE_PER_CPU(cycles_t, int_udelayavg);
298 DEFINE_PER_CPU(cycles_t, int_udelaymax);
299 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
300
301 static void interrupt_reader_ipi(void *data)
302 {
303 int i;
304 int prev, cur;
305 cycles_t time1, time2;
306 cycles_t *ldelaymax, *ldelaymin, *ldelayavg, *ipi_nr, delay;
307 cycles_t *udelaymax, *udelaymin, *udelayavg;
308
309 /*
310 * Skip the ipi caller, not in irq context.
311 */
312 if (!in_irq())
313 return;
314
315 ldelaymax = &per_cpu(int_ldelaymax, smp_processor_id());
316 ldelaymin = &per_cpu(int_ldelaymin, smp_processor_id());
317 ldelayavg = &per_cpu(int_ldelayavg, smp_processor_id());
318 udelaymax = &per_cpu(int_udelaymax, smp_processor_id());
319 udelaymin = &per_cpu(int_udelaymin, smp_processor_id());
320 udelayavg = &per_cpu(int_udelayavg, smp_processor_id());
321 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
322
323 rdtsc_barrier();
324 time1 = get_cycles();
325 rdtsc_barrier();
326
327 wrap_read_lock_irq();
328
329 rdtsc_barrier();
330 time2 = get_cycles();
331 rdtsc_barrier();
332 delay = time2 - time1;
333 *ldelaymax = max(*ldelaymax, delay);
334 *ldelaymin = min(*ldelaymin, delay);
335 *ldelayavg += delay;
336 (*ipi_nr)++;
337 prev = var[0];
338 for (i = 1; i < NR_VARS; i++) {
339 cur = var[i];
340 if (cur != prev)
341 printk(KERN_ALERT
342 "Unequal cur %d/prev %d at i %d in interrupt\n",
343 cur, prev, i);
344 }
345 rdtsc_barrier();
346 time1 = get_cycles();
347 rdtsc_barrier();
348 wrap_read_unlock_irq();
349 time2 = get_cycles();
350 rdtsc_barrier();
351 delay = time2 - time1;
352 *udelaymax = max(*udelaymax, delay);
353 *udelaymin = min(*udelaymin, delay);
354 *udelayavg += delay;
355 }
356
357 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
358 DEFINE_PER_CPU(unsigned long, trylock_int_success);
359
360 static void trylock_interrupt_reader_ipi(void *data)
361 {
362 int i;
363 int prev, cur;
364
365 /*
366 * Skip the ipi caller, not in irq context.
367 */
368 if (!in_irq())
369 return;
370
371 per_cpu(trylock_int_iter, smp_processor_id())++;
372 while (!wrap_read_trylock_irq())
373 per_cpu(trylock_int_iter, smp_processor_id())++;
374 per_cpu(trylock_int_success, smp_processor_id())++;
375 prev = var[0];
376 for (i = 1; i < NR_VARS; i++) {
377 cur = var[i];
378 if (cur != prev)
379 printk(KERN_ALERT
380 "Unequal cur %d/prev %d at i %d in interrupt\n",
381 cur, prev, i);
382 }
383 wrap_read_unlock_irq();
384 }
385
386
387 static int interrupt_reader_thread(void *data)
388 {
389 unsigned long iter = 0;
390 int i;
391
392 for_each_online_cpu(i) {
393 per_cpu(int_ldelaymax, i) = 0;
394 per_cpu(int_ldelaymin, i) = ULLONG_MAX;
395 per_cpu(int_ldelayavg, i) = 0;
396 per_cpu(int_udelaymax, i) = 0;
397 per_cpu(int_udelaymin, i) = ULLONG_MAX;
398 per_cpu(int_udelayavg, i) = 0;
399 per_cpu(int_ipi_nr, i) = 0;
400 }
401 do {
402 iter++;
403 on_each_cpu(interrupt_reader_ipi, NULL, 0);
404 if (INTERRUPT_READER_DELAY)
405 msleep(INTERRUPT_READER_DELAY);
406 } while (!kthread_should_stop());
407 printk("interrupt_reader_thread/%lu iterations : %lu\n",
408 (unsigned long)data, iter);
409 for_each_online_cpu(i) {
410 if (!per_cpu(int_ipi_nr, i))
411 continue;
412 per_cpu(int_ldelayavg, i) /= per_cpu(int_ipi_nr, i);
413 per_cpu(int_udelayavg, i) /= per_cpu(int_ipi_nr, i);
414 printk("interrupt readers on CPU %i, "
415 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
416 i,
417 calibrate_cycles(per_cpu(int_ldelaymin, i)),
418 calibrate_cycles(per_cpu(int_ldelayavg, i)),
419 calibrate_cycles(per_cpu(int_ldelaymax, i)));
420 printk("interrupt readers on CPU %i, "
421 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
422 i,
423 calibrate_cycles(per_cpu(int_udelaymin, i)),
424 calibrate_cycles(per_cpu(int_udelayavg, i)),
425 calibrate_cycles(per_cpu(int_udelaymax, i)));
426 }
427 return 0;
428 }
429
430 static int trylock_interrupt_reader_thread(void *data)
431 {
432 unsigned long iter = 0;
433 int i;
434
435 do {
436 iter++;
437 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
438 if (INTERRUPT_READER_DELAY)
439 msleep(INTERRUPT_READER_DELAY);
440 } while (!kthread_should_stop());
441 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
442 (unsigned long)data, iter);
443 for_each_online_cpu(i) {
444 printk("trylock interrupt readers on CPU %i, "
445 "iterations %lu, "
446 "successful iterations : %lu\n",
447 i, per_cpu(trylock_int_iter, i),
448 per_cpu(trylock_int_success, i));
449 per_cpu(trylock_int_iter, i) = 0;
450 per_cpu(trylock_int_success, i) = 0;
451 }
452 return 0;
453 }
454
455 static int writer_thread(void *data)
456 {
457 int i;
458 int new;
459 unsigned long iter = 0;
460 cycles_t time1, time2, delay;
461 cycles_t ldelaymax = 0, ldelaymin = ULLONG_MAX, ldelayavg = 0;
462 cycles_t udelaymax = 0, udelaymin = ULLONG_MAX, udelayavg = 0;
463
464 printk("writer_thread/%lu runnning\n", (unsigned long)data);
465 do {
466 iter++;
467 //preempt_disable(); /* for get_cycles accuracy */
468 rdtsc_barrier();
469 time1 = get_cycles();
470 rdtsc_barrier();
471
472 wrap_write_lock();
473
474 rdtsc_barrier();
475 time2 = get_cycles();
476 rdtsc_barrier();
477 delay = time2 - time1;
478 ldelaymax = max(ldelaymax, delay);
479 ldelaymin = min(ldelaymin, delay);
480 ldelayavg += delay;
481 new = (int)get_cycles();
482 for (i = 0; i < NR_VARS; i++) {
483 var[i] = new;
484 }
485
486 rdtsc_barrier();
487 time1 = get_cycles();
488 rdtsc_barrier();
489
490 wrap_write_unlock();
491
492 rdtsc_barrier();
493 time2 = get_cycles();
494 rdtsc_barrier();
495 delay = time2 - time1;
496 udelaymax = max(udelaymax, delay);
497 udelaymin = min(udelaymin, delay);
498 udelayavg += delay;
499
500 //preempt_enable(); /* for get_cycles accuracy */
501 if (WRITER_DELAY > 0)
502 udelay(WRITER_DELAY);
503 } while (!kthread_should_stop());
504 ldelayavg /= iter;
505 udelayavg /= iter;
506 printk("writer_thread/%lu iterations : %lu, "
507 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
508 (unsigned long)data, iter,
509 calibrate_cycles(ldelaymin),
510 calibrate_cycles(ldelayavg),
511 calibrate_cycles(ldelaymax));
512 printk("writer_thread/%lu iterations : %lu, "
513 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
514 (unsigned long)data, iter,
515 calibrate_cycles(udelaymin),
516 calibrate_cycles(udelayavg),
517 calibrate_cycles(udelaymax));
518 return 0;
519 }
520
521 #if (TEST_STD_RWLOCK)
522 static int trylock_writer_thread(void *data)
523 {
524 int i;
525 int new;
526 unsigned long iter = 0, success = 0, fail = 0;
527
528 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
529 do {
530 #if (TEST_INTERRUPTS)
531 /* std write trylock cannot disable interrupts. */
532 local_irq_disable();
533 #endif
534
535 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
536 for (;;) {
537 iter++;
538 if (write_trylock(&std_rw_lock))
539 goto locked;
540 }
541 #else
542 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
543 iter++;
544 if (write_trylock(&std_rw_lock))
545 goto locked;
546 }
547 #endif
548 fail++;
549 #if (TEST_INTERRUPTS)
550 local_irq_enable();
551 #endif
552 goto loop;
553 locked:
554 success++;
555 new = (int)get_cycles();
556 for (i = 0; i < NR_VARS; i++) {
557 var[i] = new;
558 }
559 #if (TEST_INTERRUPTS)
560 write_unlock_irq(&std_rw_lock);
561 #else
562 write_unlock(&std_rw_lock);
563 #endif
564 loop:
565 if (TRYLOCK_WRITER_DELAY > 0)
566 udelay(TRYLOCK_WRITER_DELAY);
567 } while (!kthread_should_stop());
568 printk("trylock_writer_thread/%lu iterations : "
569 "[try,success,fail after %d try], "
570 "%lu,%lu,%lu\n",
571 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
572 iter, success, fail);
573 return 0;
574 }
575
576 #else /* !TEST_STD_RWLOCK */
577
578 static int trylock_writer_thread(void *data)
579 {
580 int i;
581 int new;
582 unsigned long iter = 0, success = 0, fail = 0;
583
584 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
585 do {
586 iter++;
587 if (wrap_write_trylock_else_subscribe())
588 goto locked;
589
590 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
591 for (;;) {
592 iter++;
593 if (wrap_write_trylock_subscribed())
594 goto locked;
595 }
596 #else
597 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
598 iter++;
599 if (wrap_write_trylock_subscribed())
600 goto locked;
601 }
602 #endif
603 fail++;
604 wrap_write_unsubscribe();
605 goto loop;
606 locked:
607 success++;
608 new = (int)get_cycles();
609 for (i = 0; i < NR_VARS; i++) {
610 var[i] = new;
611 }
612 wrap_write_unlock();
613 loop:
614 if (TRYLOCK_WRITER_DELAY > 0)
615 udelay(TRYLOCK_WRITER_DELAY);
616 } while (!kthread_should_stop());
617 printk("trylock_writer_thread/%lu iterations : "
618 "[try,success,fail after %d try], "
619 "%lu,%lu,%lu\n",
620 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
621 iter, success, fail);
622 return 0;
623 }
624
625 #endif /* TEST_STD_RWLOCK */
626
627 static void wbias_rwlock_create(void)
628 {
629 unsigned long i;
630
631 for (i = 0; i < NR_PREADERS; i++) {
632 printk("starting preemptable reader thread %lu\n", i);
633 preader_threads[i] = kthread_run(preader_thread, (void *)i,
634 "wbiasrwlock_preader");
635 BUG_ON(!preader_threads[i]);
636 }
637
638 for (i = 0; i < NR_NPREADERS; i++) {
639 printk("starting non-preemptable reader thread %lu\n", i);
640 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
641 "wbiasrwlock_npreader");
642 BUG_ON(!npreader_threads[i]);
643 }
644
645 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
646 printk("starting trylock reader thread %lu\n", i);
647 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
648 (void *)i, "wbiasrwlock_trylock_reader");
649 BUG_ON(!trylock_reader_threads[i]);
650 }
651 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
652 printk("starting interrupt reader %lu\n", i);
653 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
654 (void *)i,
655 "wbiasrwlock_interrupt_reader");
656 }
657 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
658 printk("starting trylock interrupt reader %lu\n", i);
659 trylock_interrupt_reader[i] =
660 kthread_run(trylock_interrupt_reader_thread,
661 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
662 }
663 for (i = 0; i < NR_WRITERS; i++) {
664 printk("starting writer thread %lu\n", i);
665 writer_threads[i] = kthread_run(writer_thread, (void *)i,
666 "wbiasrwlock_writer");
667 BUG_ON(!writer_threads[i]);
668 }
669 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
670 printk("starting trylock writer thread %lu\n", i);
671 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
672 (void *)i, "wbiasrwlock_trylock_writer");
673 BUG_ON(!trylock_writer_threads[i]);
674 }
675 }
676
677 static void wbias_rwlock_stop(void)
678 {
679 unsigned long i;
680
681 for (i = 0; i < NR_WRITERS; i++)
682 kthread_stop(writer_threads[i]);
683 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
684 kthread_stop(trylock_writer_threads[i]);
685 for (i = 0; i < NR_NPREADERS; i++)
686 kthread_stop(npreader_threads[i]);
687 for (i = 0; i < NR_PREADERS; i++)
688 kthread_stop(preader_threads[i]);
689 for (i = 0; i < NR_TRYLOCK_READERS; i++)
690 kthread_stop(trylock_reader_threads[i]);
691 for (i = 0; i < NR_INTERRUPT_READERS; i++)
692 kthread_stop(interrupt_reader[i]);
693 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
694 kthread_stop(trylock_interrupt_reader[i]);
695 }
696
697
698 static void perform_test(const char *name, void (*callback)(void))
699 {
700 printk("%s\n", name);
701 callback();
702 }
703
704 static int my_open(struct inode *inode, struct file *file)
705 {
706 unsigned long i;
707 cycles_t time1, time2, delay;
708
709 printk("** get_cycles calibration **\n");
710 cycles_calibration_min = ULLONG_MAX;
711 cycles_calibration_avg = 0;
712 cycles_calibration_max = 0;
713
714 local_irq_disable();
715 for (i = 0; i < 10; i++) {
716 rdtsc_barrier();
717 time1 = get_cycles();
718 rdtsc_barrier();
719 rdtsc_barrier();
720 time2 = get_cycles();
721 rdtsc_barrier();
722 delay = time2 - time1;
723 cycles_calibration_min = min(cycles_calibration_min, delay);
724 cycles_calibration_avg += delay;
725 cycles_calibration_max = max(cycles_calibration_max, delay);
726 }
727 cycles_calibration_avg /= 10;
728 local_irq_enable();
729
730 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
731 "results calibrated on avg\n",
732 cycles_calibration_min,
733 cycles_calibration_avg,
734 cycles_calibration_max);
735 printk("\n");
736
737 printk("** Single writer test, no contention **\n");
738 wbias_rwlock_profile_latency_reset();
739 writer_threads[0] = kthread_run(writer_thread, (void *)0,
740 "wbiasrwlock_writer");
741 BUG_ON(!writer_threads[0]);
742 ssleep(SINGLE_WRITER_TEST_DURATION);
743 kthread_stop(writer_threads[0]);
744 printk("\n");
745
746 wbias_rwlock_profile_latency_print();
747
748 printk("** Single trylock writer test, no contention **\n");
749 wbias_rwlock_profile_latency_reset();
750 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
751 (void *)0,
752 "trylock_wbiasrwlock_writer");
753 BUG_ON(!trylock_writer_threads[0]);
754 ssleep(SINGLE_WRITER_TEST_DURATION);
755 kthread_stop(trylock_writer_threads[0]);
756 printk("\n");
757
758 wbias_rwlock_profile_latency_print();
759
760 printk("** Single preemptable reader test, no contention **\n");
761 wbias_rwlock_profile_latency_reset();
762 preader_threads[0] = kthread_run(preader_thread, (void *)0,
763 "wbiasrwlock_preader");
764 BUG_ON(!preader_threads[0]);
765 ssleep(SINGLE_READER_TEST_DURATION);
766 kthread_stop(preader_threads[0]);
767 printk("\n");
768
769 wbias_rwlock_profile_latency_print();
770
771 #if (TEST_PREEMPT)
772 printk("** Single non-preemptable reader test, no contention **\n");
773 wbias_rwlock_profile_latency_reset();
774 npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
775 "wbiasrwlock_npreader");
776 BUG_ON(!npreader_threads[0]);
777 ssleep(SINGLE_READER_TEST_DURATION);
778 kthread_stop(npreader_threads[0]);
779 printk("\n");
780
781 wbias_rwlock_profile_latency_print();
782 #endif
783
784 printk("** Multiple p/non-p readers test, no contention **\n");
785 wbias_rwlock_profile_latency_reset();
786 for (i = 0; i < NR_PREADERS; i++) {
787 printk("starting preader thread %lu\n", i);
788 preader_threads[i] = kthread_run(preader_thread, (void *)i,
789 "wbiasrwlock_preader");
790 BUG_ON(!preader_threads[i]);
791 }
792 for (i = 0; i < NR_NPREADERS; i++) {
793 printk("starting npreader thread %lu\n", i);
794 npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
795 "wbiasrwlock_npreader");
796 BUG_ON(!npreader_threads[i]);
797 }
798 ssleep(SINGLE_READER_TEST_DURATION);
799 for (i = 0; i < NR_NPREADERS; i++)
800 kthread_stop(npreader_threads[i]);
801 for (i = 0; i < NR_PREADERS; i++)
802 kthread_stop(preader_threads[i]);
803 printk("\n");
804
805 wbias_rwlock_profile_latency_print();
806
807 printk("** High contention test **\n");
808 wbias_rwlock_profile_latency_reset();
809 perform_test("wbias-rwlock-create", wbias_rwlock_create);
810 ssleep(TEST_DURATION);
811 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
812 printk("\n");
813 wbias_rwlock_profile_latency_print();
814
815 return -EPERM;
816 }
817
818
819 static struct file_operations my_operations = {
820 .open = my_open,
821 };
822
823 int init_module(void)
824 {
825 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
826 if (pentry)
827 pentry->proc_fops = &my_operations;
828
829 printk("PTHREAD_ROFFSET : %016lX\n", PTHREAD_ROFFSET);
830 printk("PTHREAD_RMASK : %016lX\n", PTHREAD_RMASK);
831 printk("NPTHREAD_ROFFSET : %016lX\n", NPTHREAD_ROFFSET);
832 printk("NPTHREAD_RMASK : %016lX\n", NPTHREAD_RMASK);
833 printk("SOFTIRQ_ROFFSET : %016lX\n", SOFTIRQ_ROFFSET);
834 printk("SOFTIRQ_RMASK : %016lX\n", SOFTIRQ_RMASK);
835 printk("HARDIRQ_ROFFSET : %016lX\n", HARDIRQ_ROFFSET);
836 printk("HARDIRQ_RMASK : %016lX\n", HARDIRQ_RMASK);
837 printk("PTHREAD_WOFFSET : %016lX\n", PTHREAD_WOFFSET);
838 printk("PTHREAD_WMASK : %016lX\n", PTHREAD_WMASK);
839 printk("NPTHREAD_WOFFSET : %016lX\n", NPTHREAD_WOFFSET);
840 printk("NPTHREAD_WMASK : %016lX\n", NPTHREAD_WMASK);
841 printk("WRITER_MUTEX : %016lX\n", WRITER_MUTEX);
842 printk("SOFTIRQ_WMASK : %016lX\n", SOFTIRQ_WMASK);
843 printk("HARDIRQ_WMASK : %016lX\n", HARDIRQ_WMASK);
844 printk("WQ_MUTEX : %016lX\n", WQ_MUTEX);
845 printk("WQ_ACTIVE : %016lX\n", WQ_ACTIVE);
846
847 return 0;
848 }
849
850 void cleanup_module(void)
851 {
852 remove_proc_entry("testwbiasrwlock", NULL);
853 }
854
855 MODULE_LICENSE("GPL");
856 MODULE_AUTHOR("Mathieu Desnoyers");
857 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.046987 seconds and 5 git commands to generate.