update test
[lttv.git] / trunk / tests / kernel / test-wbias-rwlock.c
1 /* test-wbias-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/wbias-rwlock.h>
10 #include <linux/kthread.h>
11 #include <linux/delay.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/spinlock.h>
16 #include <asm/ptrace.h>
17
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
22
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
25
26 #define NR_VARS 100
27 #define NR_WRITERS 2
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_READERS 4
30 #define NR_TRYLOCK_READERS 1
31
32 /*
33 * 1 : test standard rwlock
34 * 0 : test wbiasrwlock
35 */
36 #define TEST_STD_RWLOCK 0
37
38 /*
39 * 1 : test with thread and interrupt readers.
40 * 0 : test only with thread readers.
41 */
42 #define TEST_INTERRUPTS 1
43
44 #if (TEST_INTERRUPTS)
45 #define NR_INTERRUPT_READERS 1
46 #define NR_TRYLOCK_INTERRUPT_READERS 1
47 #else
48 #define NR_INTERRUPT_READERS 0
49 #define NR_TRYLOCK_INTERRUPT_READERS 0
50 #endif
51
52 /*
53 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
54 * starve readers.
55 */
56 #define WRITER_DELAY 100
57 #define TRYLOCK_WRITER_DELAY 1000
58
59 /*
60 * Number of iterations after which a trylock writer fails.
61 * -1 for infinite loop.
62 */
63 #define TRYLOCK_WRITERS_FAIL_ITER 100
64
65 /* Thread and interrupt reader delay, in ms */
66 #define THREAD_READER_DELAY 0 /* busy loop */
67 #define INTERRUPT_READER_DELAY 100
68
69 static int var[NR_VARS];
70 static struct task_struct *reader_threads[NR_READERS];
71 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
72 static struct task_struct *writer_threads[NR_WRITERS];
73 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
74 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
75 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
76
77 #if (TEST_STD_RWLOCK)
78
79 static DEFINE_RWLOCK(std_rw_lock);
80
81 #define wrap_read_lock() read_lock(&std_rw_lock)
82 #define wrap_read_trylock() read_trylock(&std_rw_lock)
83 #define wrap_read_unlock() read_unlock(&std_rw_lock)
84
85 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
86 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
87 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
88
89 #if (TEST_INTERRUPTS)
90 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
91 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
92 #else
93 #define wrap_write_lock() write_lock(&std_rw_lock)
94 #define wrap_write_unlock() write_unlock(&std_rw_lock)
95 #endif
96
97 #else
98
99 static DEFINE_WBIAS_RWLOCK(wbiasrwlock);
100
101 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
102 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
103 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
104
105 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
106 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
107 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
108
109 #if (TEST_INTERRUPTS)
110 #define wrap_write_lock() wbias_write_lock_irq(&wbiasrwlock)
111 #define wrap_write_unlock() wbias_write_unlock_irq(&wbiasrwlock)
112 #else
113 #define wrap_write_lock() wbias_write_lock(&wbiasrwlock)
114 #define wrap_write_unlock() wbias_write_unlock(&wbiasrwlock)
115 #endif
116
117 #endif
118
119 static cycles_t cycles_calibration_min,
120 cycles_calibration_avg,
121 cycles_calibration_max;
122
123 static inline cycles_t calibrate_cycles(cycles_t cycles)
124 {
125 return cycles - cycles_calibration_avg;
126 }
127
128 struct proc_dir_entry *pentry = NULL;
129
130 static int reader_thread(void *data)
131 {
132 int i;
133 int prev, cur;
134 unsigned long iter = 0;
135 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
136 delayavg = 0;
137
138 printk("reader_thread/%lu runnning\n", (unsigned long)data);
139 do {
140 iter++;
141 //preempt_disable(); /* for get_cycles accuracy */
142 rdtsc_barrier();
143 time1 = get_cycles();
144 rdtsc_barrier();
145
146 wrap_read_lock();
147
148 rdtsc_barrier();
149 time2 = get_cycles();
150 rdtsc_barrier();
151 delay = time2 - time1;
152 delaymax = max(delaymax, delay);
153 delaymin = min(delaymin, delay);
154 delayavg += delay;
155 prev = var[0];
156 for (i = 1; i < NR_VARS; i++) {
157 cur = var[i];
158 if (cur != prev)
159 printk(KERN_ALERT
160 "Unequal cur %d/prev %d at i %d, iter %lu "
161 "in thread\n", cur, prev, i, iter);
162 }
163
164 wrap_read_unlock();
165
166 //preempt_enable(); /* for get_cycles accuracy */
167 if (THREAD_READER_DELAY)
168 msleep(THREAD_READER_DELAY);
169 } while (!kthread_should_stop());
170 if (!iter) {
171 printk("reader_thread/%lu iterations : %lu",
172 (unsigned long)data, iter);
173 } else {
174 delayavg /= iter;
175 printk("reader_thread/%lu iterations : %lu, "
176 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
177 (unsigned long)data, iter,
178 calibrate_cycles(delaymin),
179 calibrate_cycles(delayavg),
180 calibrate_cycles(delaymax));
181 }
182 return 0;
183 }
184
185 static int trylock_reader_thread(void *data)
186 {
187 int i;
188 int prev, cur;
189 unsigned long iter = 0, success_iter = 0;
190
191 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
192 do {
193 while (!wrap_read_trylock())
194 iter++;
195 success_iter++;
196 prev = var[0];
197 for (i = 1; i < NR_VARS; i++) {
198 cur = var[i];
199 if (cur != prev)
200 printk(KERN_ALERT
201 "Unequal cur %d/prev %d at i %d, iter %lu "
202 "in thread\n", cur, prev, i, iter);
203 }
204 wrap_read_unlock();
205 if (THREAD_READER_DELAY)
206 msleep(THREAD_READER_DELAY);
207 } while (!kthread_should_stop());
208 printk("trylock_reader_thread/%lu iterations : %lu, "
209 "successful iterations : %lu\n",
210 (unsigned long)data, iter, success_iter);
211 return 0;
212 }
213
214 DEFINE_PER_CPU(cycles_t, int_delaymin);
215 DEFINE_PER_CPU(cycles_t, int_delayavg);
216 DEFINE_PER_CPU(cycles_t, int_delaymax);
217 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
218
219 static void interrupt_reader_ipi(void *data)
220 {
221 int i;
222 int prev, cur;
223 cycles_t time1, time2;
224 cycles_t *delaymax, *delaymin, *delayavg, *ipi_nr, delay;
225
226 /*
227 * Skip the ipi caller, not in irq context.
228 */
229 if (!in_irq())
230 return;
231
232 delaymax = &per_cpu(int_delaymax, smp_processor_id());
233 delaymin = &per_cpu(int_delaymin, smp_processor_id());
234 delayavg = &per_cpu(int_delayavg, smp_processor_id());
235 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
236
237 rdtsc_barrier();
238 time1 = get_cycles();
239 rdtsc_barrier();
240
241 wrap_read_lock_irq();
242
243 rdtsc_barrier();
244 time2 = get_cycles();
245 rdtsc_barrier();
246 delay = time2 - time1;
247 *delaymax = max(*delaymax, delay);
248 *delaymin = min(*delaymin, delay);
249 *delayavg += delay;
250 (*ipi_nr)++;
251 prev = var[0];
252 for (i = 1; i < NR_VARS; i++) {
253 cur = var[i];
254 if (cur != prev)
255 printk(KERN_ALERT
256 "Unequal cur %d/prev %d at i %d in interrupt\n",
257 cur, prev, i);
258 }
259 wrap_read_unlock_irq();
260 }
261
262 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
263 DEFINE_PER_CPU(unsigned long, trylock_int_success);
264
265 static void trylock_interrupt_reader_ipi(void *data)
266 {
267 int i;
268 int prev, cur;
269
270 /*
271 * Skip the ipi caller, not in irq context.
272 */
273 if (!in_irq())
274 return;
275
276 per_cpu(trylock_int_iter, smp_processor_id())++;
277 while (!wrap_read_trylock_irq())
278 per_cpu(trylock_int_iter, smp_processor_id())++;
279 per_cpu(trylock_int_success, smp_processor_id())++;
280 prev = var[0];
281 for (i = 1; i < NR_VARS; i++) {
282 cur = var[i];
283 if (cur != prev)
284 printk(KERN_ALERT
285 "Unequal cur %d/prev %d at i %d in interrupt\n",
286 cur, prev, i);
287 }
288 wrap_read_unlock_irq();
289 }
290
291
292 static int interrupt_reader_thread(void *data)
293 {
294 unsigned long iter = 0;
295 int i;
296
297 for_each_online_cpu(i) {
298 per_cpu(int_delaymax, i) = 0;
299 per_cpu(int_delaymin, i) = ULLONG_MAX;
300 per_cpu(int_delayavg, i) = 0;
301 per_cpu(int_ipi_nr, i) = 0;
302 }
303 do {
304 iter++;
305 on_each_cpu(interrupt_reader_ipi, NULL, 0);
306 if (INTERRUPT_READER_DELAY)
307 msleep(INTERRUPT_READER_DELAY);
308 } while (!kthread_should_stop());
309 printk("interrupt_reader_thread/%lu iterations : %lu\n",
310 (unsigned long)data, iter);
311 for_each_online_cpu(i) {
312 if (!per_cpu(int_ipi_nr, i))
313 continue;
314 per_cpu(int_delayavg, i) /= per_cpu(int_ipi_nr, i);
315 printk("interrupt readers on CPU %i, "
316 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
317 i,
318 calibrate_cycles(per_cpu(int_delaymin, i)),
319 calibrate_cycles(per_cpu(int_delayavg, i)),
320 calibrate_cycles(per_cpu(int_delaymax, i)));
321 }
322 return 0;
323 }
324
325 static int trylock_interrupt_reader_thread(void *data)
326 {
327 unsigned long iter = 0;
328 int i;
329
330 do {
331 iter++;
332 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
333 if (INTERRUPT_READER_DELAY)
334 msleep(INTERRUPT_READER_DELAY);
335 } while (!kthread_should_stop());
336 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
337 (unsigned long)data, iter);
338 for_each_online_cpu(i) {
339 printk("trylock interrupt readers on CPU %i, "
340 "iterations %lu, "
341 "successful iterations : %lu\n",
342 i, per_cpu(trylock_int_iter, i),
343 per_cpu(trylock_int_success, i));
344 per_cpu(trylock_int_iter, i) = 0;
345 per_cpu(trylock_int_success, i) = 0;
346 }
347 return 0;
348 }
349
350 static int writer_thread(void *data)
351 {
352 int i;
353 int new;
354 unsigned long iter = 0;
355 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
356 delayavg = 0;
357
358 printk("writer_thread/%lu runnning\n", (unsigned long)data);
359 do {
360 iter++;
361 //preempt_disable(); /* for get_cycles accuracy */
362 rdtsc_barrier();
363 time1 = get_cycles();
364 rdtsc_barrier();
365
366 wrap_write_lock();
367
368 rdtsc_barrier();
369 time2 = get_cycles();
370 rdtsc_barrier();
371 delay = time2 - time1;
372 delaymax = max(delaymax, delay);
373 delaymin = min(delaymin, delay);
374 delayavg += delay;
375 new = (int)get_cycles();
376 for (i = 0; i < NR_VARS; i++) {
377 var[i] = new;
378 }
379
380 wrap_write_unlock();
381
382 //preempt_enable(); /* for get_cycles accuracy */
383 if (WRITER_DELAY > 0)
384 udelay(WRITER_DELAY);
385 } while (!kthread_should_stop());
386 delayavg /= iter;
387 printk("writer_thread/%lu iterations : %lu, "
388 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
389 (unsigned long)data, iter,
390 calibrate_cycles(delaymin),
391 calibrate_cycles(delayavg),
392 calibrate_cycles(delaymax));
393 return 0;
394 }
395
396 #if (TEST_STD_RWLOCK)
397 static int trylock_writer_thread(void *data)
398 {
399 int i;
400 int new;
401 unsigned long iter = 0, success = 0, fail = 0;
402
403 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
404 do {
405 #if (TEST_INTERRUPTS)
406 /* std write trylock cannot disable interrupts. */
407 local_irq_disable();
408 #endif
409
410 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
411 for (;;) {
412 iter++;
413 if (write_trylock(&std_rw_lock))
414 goto locked;
415 }
416 #else
417 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
418 iter++;
419 if (write_trylock(&std_rw_lock))
420 goto locked;
421 }
422 #endif
423 fail++;
424 #if (TEST_INTERRUPTS)
425 local_irq_enable();
426 #endif
427 goto loop;
428 locked:
429 success++;
430 new = (int)get_cycles();
431 for (i = 0; i < NR_VARS; i++) {
432 var[i] = new;
433 }
434 #if (TEST_INTERRUPTS)
435 write_unlock_irq(&std_rw_lock);
436 #else
437 write_unlock(&std_rw_lock);
438 #endif
439 loop:
440 if (TRYLOCK_WRITER_DELAY > 0)
441 udelay(TRYLOCK_WRITER_DELAY);
442 } while (!kthread_should_stop());
443 printk("trylock_writer_thread/%lu iterations : "
444 "[try,success,fail after %d try], "
445 "%lu,%lu,%lu\n",
446 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
447 iter, success, fail);
448 return 0;
449 }
450
451 #else /* !TEST_STD_RWLOCK */
452
453 static int trylock_writer_thread(void *data)
454 {
455 int i;
456 int new;
457 unsigned long iter = 0, success = 0, fail = 0;
458
459 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
460 do {
461 iter++;
462 #if (TEST_INTERRUPTS)
463 if (wbias_write_trylock_irq_else_subscribe(&wbiasrwlock))
464 #else
465 if (wbias_write_trylock_else_subscribe(&wbiasrwlock))
466 #endif
467 goto locked;
468
469 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
470 for (;;) {
471 iter++;
472 #if (TEST_INTERRUPTS)
473 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
474 #else
475 if (wbias_write_trylock_subscribed(&wbiasrwlock))
476 #endif
477 goto locked;
478 }
479 #else
480 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
481 iter++;
482 #if (TEST_INTERRUPTS)
483 if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
484 #else
485 if (wbias_write_trylock_subscribed(&wbiasrwlock))
486 #endif
487 goto locked;
488 }
489 #endif
490 fail++;
491 wbias_write_unsubscribe(&wbiasrwlock);
492 goto loop;
493 locked:
494 success++;
495 new = (int)get_cycles();
496 for (i = 0; i < NR_VARS; i++) {
497 var[i] = new;
498 }
499 #if (TEST_INTERRUPTS)
500 wbias_write_unlock_irq(&wbiasrwlock);
501 #else
502 wbias_write_unlock(&wbiasrwlock);
503 #endif
504 loop:
505 if (TRYLOCK_WRITER_DELAY > 0)
506 udelay(TRYLOCK_WRITER_DELAY);
507 } while (!kthread_should_stop());
508 printk("trylock_writer_thread/%lu iterations : "
509 "[try,success,fail after %d try], "
510 "%lu,%lu,%lu\n",
511 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
512 iter, success, fail);
513 return 0;
514 }
515
516 #endif /* TEST_STD_RWLOCK */
517
518 static void wbias_rwlock_create(void)
519 {
520 unsigned long i;
521
522 for (i = 0; i < NR_READERS; i++) {
523 printk("starting reader thread %lu\n", i);
524 reader_threads[i] = kthread_run(reader_thread, (void *)i,
525 "wbiasrwlock_reader");
526 BUG_ON(!reader_threads[i]);
527 }
528
529 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
530 printk("starting trylock reader thread %lu\n", i);
531 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
532 (void *)i, "wbiasrwlock_trylock_reader");
533 BUG_ON(!trylock_reader_threads[i]);
534 }
535 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
536 printk("starting interrupt reader %lu\n", i);
537 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
538 (void *)i,
539 "wbiasrwlock_interrupt_reader");
540 }
541 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
542 printk("starting trylock interrupt reader %lu\n", i);
543 trylock_interrupt_reader[i] =
544 kthread_run(trylock_interrupt_reader_thread,
545 (void *)i, "wbiasrwlock_trylock_interrupt_reader");
546 }
547 for (i = 0; i < NR_WRITERS; i++) {
548 printk("starting writer thread %lu\n", i);
549 writer_threads[i] = kthread_run(writer_thread, (void *)i,
550 "wbiasrwlock_writer");
551 BUG_ON(!writer_threads[i]);
552 }
553 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
554 printk("starting trylock writer thread %lu\n", i);
555 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
556 (void *)i, "wbiasrwlock_trylock_writer");
557 BUG_ON(!trylock_writer_threads[i]);
558 }
559 }
560
561 static void wbias_rwlock_stop(void)
562 {
563 unsigned long i;
564
565 for (i = 0; i < NR_WRITERS; i++)
566 kthread_stop(writer_threads[i]);
567 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
568 kthread_stop(trylock_writer_threads[i]);
569 for (i = 0; i < NR_READERS; i++)
570 kthread_stop(reader_threads[i]);
571 for (i = 0; i < NR_TRYLOCK_READERS; i++)
572 kthread_stop(trylock_reader_threads[i]);
573 for (i = 0; i < NR_INTERRUPT_READERS; i++)
574 kthread_stop(interrupt_reader[i]);
575 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
576 kthread_stop(trylock_interrupt_reader[i]);
577 }
578
579
580 static void perform_test(const char *name, void (*callback)(void))
581 {
582 printk("%s\n", name);
583 callback();
584 }
585
586 static int my_open(struct inode *inode, struct file *file)
587 {
588 unsigned long i;
589 cycles_t time1, time2, delay;
590
591 printk("** get_cycles calibration **\n");
592 cycles_calibration_min = ULLONG_MAX;
593 cycles_calibration_avg = 0;
594 cycles_calibration_max = 0;
595
596 local_irq_disable();
597 for (i = 0; i < 10; i++) {
598 rdtsc_barrier();
599 time1 = get_cycles();
600 rdtsc_barrier();
601 rdtsc_barrier();
602 time2 = get_cycles();
603 rdtsc_barrier();
604 delay = time2 - time1;
605 cycles_calibration_min = min(cycles_calibration_min, delay);
606 cycles_calibration_avg += delay;
607 cycles_calibration_max = max(cycles_calibration_max, delay);
608 }
609 cycles_calibration_avg /= 10;
610 local_irq_enable();
611
612 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
613 "results calibrated on avg\n",
614 cycles_calibration_min,
615 cycles_calibration_avg,
616 cycles_calibration_max);
617 printk("\n");
618
619 printk("** Single writer test, no contention **\n");
620 wbias_rwlock_profile_latency_reset();
621 writer_threads[0] = kthread_run(writer_thread, (void *)0,
622 "wbiasrwlock_writer");
623 BUG_ON(!writer_threads[0]);
624 ssleep(SINGLE_WRITER_TEST_DURATION);
625 kthread_stop(writer_threads[0]);
626 printk("\n");
627
628 wbias_rwlock_profile_latency_print();
629
630 printk("** Single trylock writer test, no contention **\n");
631 wbias_rwlock_profile_latency_reset();
632 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
633 (void *)0,
634 "trylock_wbiasrwlock_writer");
635 BUG_ON(!trylock_writer_threads[0]);
636 ssleep(SINGLE_WRITER_TEST_DURATION);
637 kthread_stop(trylock_writer_threads[0]);
638 printk("\n");
639
640 wbias_rwlock_profile_latency_print();
641
642 printk("** Single reader test, no contention **\n");
643 wbias_rwlock_profile_latency_reset();
644 reader_threads[0] = kthread_run(reader_thread, (void *)0,
645 "wbiasrwlock_reader");
646 BUG_ON(!reader_threads[0]);
647 ssleep(SINGLE_READER_TEST_DURATION);
648 kthread_stop(reader_threads[0]);
649 printk("\n");
650
651 wbias_rwlock_profile_latency_print();
652
653 printk("** Multiple readers test, no contention **\n");
654 wbias_rwlock_profile_latency_reset();
655 for (i = 0; i < NR_READERS; i++) {
656 printk("starting reader thread %lu\n", i);
657 reader_threads[i] = kthread_run(reader_thread, (void *)i,
658 "wbiasrwlock_reader");
659 BUG_ON(!reader_threads[i]);
660 }
661 ssleep(SINGLE_READER_TEST_DURATION);
662 for (i = 0; i < NR_READERS; i++)
663 kthread_stop(reader_threads[i]);
664 printk("\n");
665
666 wbias_rwlock_profile_latency_print();
667
668 printk("** High contention test **\n");
669 wbias_rwlock_profile_latency_reset();
670 perform_test("wbias-rwlock-create", wbias_rwlock_create);
671 ssleep(TEST_DURATION);
672 perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
673 printk("\n");
674 wbias_rwlock_profile_latency_print();
675
676 return -EPERM;
677 }
678
679
680 static struct file_operations my_operations = {
681 .open = my_open,
682 };
683
684 int init_module(void)
685 {
686 pentry = create_proc_entry("testwbiasrwlock", 0444, NULL);
687 if (pentry)
688 pentry->proc_fops = &my_operations;
689
690 printk("PTHREAD_ROFFSET : %lX\n", PTHREAD_ROFFSET);
691 printk("PTHREAD_RMASK : %lX\n", PTHREAD_RMASK);
692 printk("NPTHREAD_ROFFSET : %lX\n", THREAD_ROFFSET);
693 printk("NPTHREAD_RMASK : %lX\n", THREAD_RMASK);
694 printk("SOFTIRQ_ROFFSET : %lX\n", SOFTIRQ_ROFFSET);
695 printk("SOFTIRQ_RMASK : %lX\n", SOFTIRQ_RMASK);
696 printk("HARDIRQ_ROFFSET : %lX\n", HARDIRQ_ROFFSET);
697 printk("HARDIRQ_RMASK : %lX\n", HARDIRQ_RMASK);
698 printk("SUBSCRIBERS_WOFFSET : %lX\n", SUBSCRIBERS_WOFFSET);
699 printk("SUBSCRIBERS_WMASK : %lX\n", SUBSCRIBERS_WMASK);
700 printk("WRITER_MUTEX : %lX\n", WRITER_MUTEX);
701 printk("NPTHREAD_WMASK : %lX\n", SOFTIRQ_WMASK);
702 printk("SOFTIRQ_WMASK : %lX\n", SOFTIRQ_WMASK);
703 printk("HARDIRQ_WMASK : %lX\n", HARDIRQ_WMASK);
704
705 return 0;
706 }
707
708 void cleanup_module(void)
709 {
710 remove_proc_entry("testwbiasrwlock", NULL);
711 }
712
713 MODULE_LICENSE("GPL");
714 MODULE_AUTHOR("Mathieu Desnoyers");
715 MODULE_DESCRIPTION("wbias rwlock test");
This page took 0.04386 seconds and 5 git commands to generate.