1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
10 size_t skip, copy, left, wanted;
11 const struct iovec *iov;
15 if (unlikely(bytes > i->count))
24 buf = iov->iov_base + skip;
25 copy = min(bytes, iov->iov_len - skip);
27 if (!fault_in_pages_writeable(buf, copy)) {
28 kaddr = kmap_atomic(page);
29 from = kaddr + offset;
31 /* first chunk, usually the only one */
32 left = __copy_to_user_inatomic(buf, from, copy);
38 while (unlikely(!left && bytes)) {
41 copy = min(bytes, iov->iov_len);
42 left = __copy_to_user_inatomic(buf, from, copy);
52 offset = from - kaddr;
55 copy = min(bytes, iov->iov_len - skip);
57 /* Too bad - revert to non-atomic kmap */
59 from = kaddr + offset;
60 left = __copy_to_user(buf, from, copy);
65 while (unlikely(!left && bytes)) {
68 copy = min(bytes, iov->iov_len);
69 left = __copy_to_user(buf, from, copy);
77 i->count -= wanted - bytes;
78 i->nr_segs -= iov - i->iov;
81 return wanted - bytes;
83 EXPORT_SYMBOL(copy_page_to_iter);
85 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
88 size_t skip, copy, left, wanted;
89 const struct iovec *iov;
93 if (unlikely(bytes > i->count))
101 skip = i->iov_offset;
102 buf = iov->iov_base + skip;
103 copy = min(bytes, iov->iov_len - skip);
105 if (!fault_in_pages_readable(buf, copy)) {
106 kaddr = kmap_atomic(page);
109 /* first chunk, usually the only one */
110 left = __copy_from_user_inatomic(to, buf, copy);
116 while (unlikely(!left && bytes)) {
119 copy = min(bytes, iov->iov_len);
120 left = __copy_from_user_inatomic(to, buf, copy);
126 if (likely(!bytes)) {
127 kunmap_atomic(kaddr);
132 kunmap_atomic(kaddr);
133 copy = min(bytes, iov->iov_len - skip);
135 /* Too bad - revert to non-atomic kmap */
138 left = __copy_from_user(to, buf, copy);
143 while (unlikely(!left && bytes)) {
146 copy = min(bytes, iov->iov_len);
147 left = __copy_from_user(to, buf, copy);
155 i->count -= wanted - bytes;
156 i->nr_segs -= iov - i->iov;
158 i->iov_offset = skip;
159 return wanted - bytes;
161 EXPORT_SYMBOL(copy_page_from_iter);
163 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
164 const struct iovec *iov, size_t base, size_t bytes)
166 size_t copied = 0, left = 0;
169 char __user *buf = iov->iov_base + base;
170 int copy = min(bytes, iov->iov_len - base);
173 left = __copy_from_user_inatomic(vaddr, buf, copy);
182 return copied - left;
186 * Copy as much as we can into the page and return the number of bytes which
187 * were successfully copied. If a fault is encountered then return the number of
188 * bytes which were copied.
190 size_t iov_iter_copy_from_user_atomic(struct page *page,
191 struct iov_iter *i, unsigned long offset, size_t bytes)
196 kaddr = kmap_atomic(page);
197 if (likely(i->nr_segs == 1)) {
199 char __user *buf = i->iov->iov_base + i->iov_offset;
200 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
201 copied = bytes - left;
203 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
204 i->iov, i->iov_offset, bytes);
206 kunmap_atomic(kaddr);
210 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
212 void iov_iter_advance(struct iov_iter *i, size_t bytes)
214 BUG_ON(i->count < bytes);
216 if (likely(i->nr_segs == 1)) {
217 i->iov_offset += bytes;
220 const struct iovec *iov = i->iov;
221 size_t base = i->iov_offset;
222 unsigned long nr_segs = i->nr_segs;
225 * The !iov->iov_len check ensures we skip over unlikely
226 * zero-length segments (without overruning the iovec).
228 while (bytes || unlikely(i->count && !iov->iov_len)) {
231 copy = min(bytes, iov->iov_len - base);
232 BUG_ON(!i->count || i->count < copy);
236 if (iov->iov_len == base) {
243 i->iov_offset = base;
244 i->nr_segs = nr_segs;
247 EXPORT_SYMBOL(iov_iter_advance);
250 * Fault in the first iovec of the given iov_iter, to a maximum length
251 * of bytes. Returns 0 on success, or non-zero if the memory could not be
252 * accessed (ie. because it is an invalid address).
254 * writev-intensive code may want this to prefault several iovecs -- that
255 * would be possible (callers must not rely on the fact that _only_ the
256 * first iovec will be faulted with the current implementation).
258 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
260 char __user *buf = i->iov->iov_base + i->iov_offset;
261 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
262 return fault_in_pages_readable(buf, bytes);
264 EXPORT_SYMBOL(iov_iter_fault_in_readable);
267 * Return the count of just the current iov_iter segment.
269 size_t iov_iter_single_seg_count(const struct iov_iter *i)
271 const struct iovec *iov = i->iov;
275 return min(i->count, iov->iov_len - i->iov_offset);
277 EXPORT_SYMBOL(iov_iter_single_seg_count);
279 unsigned long iov_iter_alignment(const struct iov_iter *i)
281 const struct iovec *iov = i->iov;
283 size_t size = i->count;
289 res = (unsigned long)iov->iov_base + i->iov_offset;
290 n = iov->iov_len - i->iov_offset;
295 while (size > (++iov)->iov_len) {
296 res |= (unsigned long)iov->iov_base | iov->iov_len;
297 size -= iov->iov_len;
299 res |= (unsigned long)iov->iov_base | size;
302 EXPORT_SYMBOL(iov_iter_alignment);
304 void iov_iter_init(struct iov_iter *i, int direction,
305 const struct iovec *iov, unsigned long nr_segs,
308 /* It will get better. Eventually... */
309 if (segment_eq(get_fs(), KERNEL_DS))
310 direction |= REQ_KERNEL;
313 i->nr_segs = nr_segs;
317 EXPORT_SYMBOL(iov_iter_init);
319 ssize_t iov_iter_get_pages(struct iov_iter *i,
320 struct page **pages, size_t maxsize,
323 size_t offset = i->iov_offset;
324 const struct iovec *iov = i->iov;
330 len = iov->iov_len - offset;
335 addr = (unsigned long)iov->iov_base + offset;
336 len += *start = addr & (PAGE_SIZE - 1);
337 addr &= ~(PAGE_SIZE - 1);
338 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
339 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
340 if (unlikely(res < 0))
342 return (res == n ? len : res * PAGE_SIZE) - *start;
344 EXPORT_SYMBOL(iov_iter_get_pages);
346 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
347 struct page ***pages, size_t maxsize,
350 size_t offset = i->iov_offset;
351 const struct iovec *iov = i->iov;
358 len = iov->iov_len - offset;
363 addr = (unsigned long)iov->iov_base + offset;
364 len += *start = addr & (PAGE_SIZE - 1);
365 addr &= ~(PAGE_SIZE - 1);
366 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
368 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
370 p = vmalloc(n * sizeof(struct page *));
374 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
375 if (unlikely(res < 0)) {
380 return (res == n ? len : res * PAGE_SIZE) - *start;
382 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
384 int iov_iter_npages(const struct iov_iter *i, int maxpages)
386 size_t offset = i->iov_offset;
387 size_t size = i->count;
388 const struct iovec *iov = i->iov;
392 for (n = 0; size && n < i->nr_segs; n++, iov++) {
393 unsigned long addr = (unsigned long)iov->iov_base + offset;
394 size_t len = iov->iov_len - offset;
396 if (unlikely(!len)) /* empty segment */
400 npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
402 if (npages >= maxpages) /* don't bother going further */
407 return min(npages, maxpages);
409 EXPORT_SYMBOL(iov_iter_npages);