4 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
6 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * Re-using code from kernel/relay.c, which is why it is licensed under
26 #include <linux/module.h>
28 #include <linux/version.h>
30 #include <wrapper/splice.h>
31 #include <wrapper/ringbuffer/backend.h>
32 #include <wrapper/ringbuffer/frontend.h>
33 #include <wrapper/ringbuffer/vfs.h>
36 #define printk_dbg(fmt, args...) printk(fmt, args)
38 #define printk_dbg(fmt, args...)
41 loff_t
vfs_lib_ring_buffer_no_llseek(struct file
*file
, loff_t offset
,
46 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek
);
49 * Release pages from the buffer so splice pipe_to_file can move them.
50 * Called after the pipe has been populated with buffer pages.
52 static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
53 struct pipe_buffer
*pbuf
)
55 __free_page(pbuf
->page
);
58 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops
= {
60 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
61 .map
= generic_pipe_buf_map
,
62 .unmap
= generic_pipe_buf_unmap
,
64 .confirm
= generic_pipe_buf_confirm
,
65 .release
= lib_ring_buffer_pipe_buf_release
,
66 .steal
= generic_pipe_buf_steal
,
67 .get
= generic_pipe_buf_get
,
71 * Page release operation after splice pipe_to_file ends.
73 static void lib_ring_buffer_page_release(struct splice_pipe_desc
*spd
,
76 __free_page(spd
->pages
[i
]);
80 * subbuf_splice_actor - splice up to one subbuf's worth of data
82 static int subbuf_splice_actor(struct file
*in
,
84 struct pipe_inode_info
*pipe
,
87 struct lib_ring_buffer
*buf
)
89 struct channel
*chan
= buf
->backend
.chan
;
90 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
91 unsigned int poff
, subbuf_pages
, nr_pages
;
92 struct page
*pages
[PIPE_DEF_BUFFERS
];
93 struct partial_page partial
[PIPE_DEF_BUFFERS
];
94 struct splice_pipe_desc spd
= {
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
101 .ops
= &ring_buffer_pipe_buf_ops
,
102 .spd_release
= lib_ring_buffer_page_release
,
104 unsigned long consumed_old
, roffset
;
105 unsigned long bytes_avail
;
108 * Check that a GET_SUBBUF ioctl has been done before.
110 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
111 consumed_old
= lib_ring_buffer_get_consumed(config
, buf
);
112 consumed_old
+= *ppos
;
115 * Adjust read len, if longer than what is available.
116 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
119 bytes_avail
= chan
->backend
.subbuf_size
;
120 WARN_ON(bytes_avail
> chan
->backend
.buf_size
);
121 len
= min_t(size_t, len
, bytes_avail
);
122 subbuf_pages
= bytes_avail
>> PAGE_SHIFT
;
123 nr_pages
= min_t(unsigned int, subbuf_pages
, PIPE_DEF_BUFFERS
);
124 roffset
= consumed_old
& PAGE_MASK
;
125 poff
= consumed_old
& ~PAGE_MASK
;
126 printk_dbg(KERN_DEBUG
"SPLICE actor len %zu pos %zd write_pos %ld\n",
127 len
, (ssize_t
)*ppos
, lib_ring_buffer_get_offset(config
, buf
));
129 for (; spd
.nr_pages
< nr_pages
; spd
.nr_pages
++) {
130 unsigned int this_len
;
131 unsigned long *pfnp
, new_pfn
;
132 struct page
*new_page
;
137 printk_dbg(KERN_DEBUG
"SPLICE actor loop len %zu roffset %ld\n",
141 * We have to replace the page we are moving into the splice
144 new_page
= alloc_pages_node(cpu_to_node(max(buf
->backend
.cpu
,
146 GFP_KERNEL
| __GFP_ZERO
, 0);
149 new_pfn
= page_to_pfn(new_page
);
150 this_len
= PAGE_SIZE
- poff
;
151 pfnp
= lib_ring_buffer_read_get_pfn(&buf
->backend
, roffset
, &virt
);
152 spd
.pages
[spd
.nr_pages
] = pfn_to_page(*pfnp
);
154 *virt
= page_address(new_page
);
155 spd
.partial
[spd
.nr_pages
].offset
= poff
;
156 spd
.partial
[spd
.nr_pages
].len
= this_len
;
159 roffset
+= PAGE_SIZE
;
166 return wrapper_splice_to_pipe(pipe
, &spd
);
169 ssize_t
lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
170 struct pipe_inode_info
*pipe
, size_t len
,
172 struct lib_ring_buffer
*buf
)
174 struct channel
*chan
= buf
->backend
.chan
;
175 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
179 if (config
->output
!= RING_BUFFER_SPLICE
)
183 * We require ppos and length to be page-aligned for performance reasons
184 * (no page copy). Size is known using the ioctl
185 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
186 * We fail when the ppos or len passed is not page-sized, because splice
187 * is not allowed to copy more than the length passed as parameter (so
188 * the ABI does not let us silently copy more than requested to include
191 if (*ppos
!= PAGE_ALIGN(*ppos
) || len
!= PAGE_ALIGN(len
))
197 printk_dbg(KERN_DEBUG
"SPLICE read len %zu pos %zd\n", len
,
199 while (len
&& !spliced
) {
200 ret
= subbuf_splice_actor(in
, ppos
, pipe
, len
, flags
, buf
);
201 printk_dbg(KERN_DEBUG
"SPLICE read loop ret %d\n", ret
);
205 if (flags
& SPLICE_F_NONBLOCK
)
223 EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read
);
225 ssize_t
vfs_lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
226 struct pipe_inode_info
*pipe
, size_t len
,
229 struct lib_ring_buffer
*buf
= in
->private_data
;
231 return lib_ring_buffer_splice_read(in
, ppos
, pipe
, len
, flags
, buf
);
233 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read
);