+ memcpy(&chanb->config, config, sizeof(*config));
+
+ /* Per-cpu buffer size: control (prior to backend) */
+ shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
+
+ /* Per-cpu buffer size: backend */
+ /* num_subbuf + 1 is the worse case */
+ num_subbuf_alloc = num_subbuf + 1;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, PAGE_SIZE);
+ shmsize += subbuf_size * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
+ /* Per-cpu buffer size: control (after backend) */
+ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
+ shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
+ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
+ shmsize += sizeof(struct commit_counters_cold) * num_subbuf;