Merge branch 'linux-linaro-lsk-v3.10' into linux-linaro-lsk-v3.10-android
[firefly-linux-kernel-4.4.55.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/aio.h>
32
33 #include <asm/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR   4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42                                              unsigned long size)
43 {
44         unsigned long sz;
45
46         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48         return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54         return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59         return 1;
60 }
61 #endif
62
63 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
64 #ifdef CONFIG_STRICT_DEVMEM
65 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
66 {
67         u64 from = ((u64)pfn) << PAGE_SHIFT;
68         u64 to = from + size;
69         u64 cursor = from;
70
71         while (cursor < to) {
72                 if (!devmem_is_allowed(pfn)) {
73                         printk(KERN_INFO
74                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
75                                 current->comm, from, to);
76                         return 0;
77                 }
78                 cursor += PAGE_SIZE;
79                 pfn++;
80         }
81         return 1;
82 }
83 #else
84 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
85 {
86         return 1;
87 }
88 #endif
89 #endif
90
91 #ifdef CONFIG_DEVMEM
92 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
93 {
94 }
95
96 /*
97  * This funcion reads the *physical* memory. The f_pos points directly to the
98  * memory location.
99  */
100 static ssize_t read_mem(struct file *file, char __user *buf,
101                         size_t count, loff_t *ppos)
102 {
103         phys_addr_t p = *ppos;
104         ssize_t read, sz;
105         char *ptr;
106
107         if (!valid_phys_addr_range(p, count))
108                 return -EFAULT;
109         read = 0;
110 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
111         /* we don't have page 0 mapped on sparc and m68k.. */
112         if (p < PAGE_SIZE) {
113                 sz = size_inside_page(p, count);
114                 if (sz > 0) {
115                         if (clear_user(buf, sz))
116                                 return -EFAULT;
117                         buf += sz;
118                         p += sz;
119                         count -= sz;
120                         read += sz;
121                 }
122         }
123 #endif
124
125         while (count > 0) {
126                 unsigned long remaining;
127
128                 sz = size_inside_page(p, count);
129
130                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
131                         return -EPERM;
132
133                 /*
134                  * On ia64 if a page has been mapped somewhere as uncached, then
135                  * it must also be accessed uncached by the kernel or data
136                  * corruption may occur.
137                  */
138                 ptr = xlate_dev_mem_ptr(p);
139                 if (!ptr)
140                         return -EFAULT;
141
142                 remaining = copy_to_user(buf, ptr, sz);
143                 unxlate_dev_mem_ptr(p, ptr);
144                 if (remaining)
145                         return -EFAULT;
146
147                 buf += sz;
148                 p += sz;
149                 count -= sz;
150                 read += sz;
151         }
152
153         *ppos += read;
154         return read;
155 }
156
157 static ssize_t write_mem(struct file *file, const char __user *buf,
158                          size_t count, loff_t *ppos)
159 {
160         phys_addr_t p = *ppos;
161         ssize_t written, sz;
162         unsigned long copied;
163         void *ptr;
164
165         if (!valid_phys_addr_range(p, count))
166                 return -EFAULT;
167
168         written = 0;
169
170 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
171         /* we don't have page 0 mapped on sparc and m68k.. */
172         if (p < PAGE_SIZE) {
173                 sz = size_inside_page(p, count);
174                 /* Hmm. Do something? */
175                 buf += sz;
176                 p += sz;
177                 count -= sz;
178                 written += sz;
179         }
180 #endif
181
182         while (count > 0) {
183                 sz = size_inside_page(p, count);
184
185                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
186                         return -EPERM;
187
188                 /*
189                  * On ia64 if a page has been mapped somewhere as uncached, then
190                  * it must also be accessed uncached by the kernel or data
191                  * corruption may occur.
192                  */
193                 ptr = xlate_dev_mem_ptr(p);
194                 if (!ptr) {
195                         if (written)
196                                 break;
197                         return -EFAULT;
198                 }
199
200                 copied = copy_from_user(ptr, buf, sz);
201                 unxlate_dev_mem_ptr(p, ptr);
202                 if (copied) {
203                         written += sz - copied;
204                         if (written)
205                                 break;
206                         return -EFAULT;
207                 }
208
209                 buf += sz;
210                 p += sz;
211                 count -= sz;
212                 written += sz;
213         }
214
215         *ppos += written;
216         return written;
217 }
218 #endif  /* CONFIG_DEVMEM */
219
220 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
221
222 int __weak phys_mem_access_prot_allowed(struct file *file,
223         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
224 {
225         return 1;
226 }
227
228 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
229
230 /*
231  * Architectures vary in how they handle caching for addresses
232  * outside of main memory.
233  *
234  */
235 #ifdef pgprot_noncached
236 static int uncached_access(struct file *file, phys_addr_t addr)
237 {
238 #if defined(CONFIG_IA64)
239         /*
240          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
241          * attribute aliases.
242          */
243         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
244 #elif defined(CONFIG_MIPS)
245         {
246                 extern int __uncached_access(struct file *file,
247                                              unsigned long addr);
248
249                 return __uncached_access(file, addr);
250         }
251 #else
252         /*
253          * Accessing memory above the top the kernel knows about or through a
254          * file pointer
255          * that was marked O_DSYNC will be done non-cached.
256          */
257         if (file->f_flags & O_DSYNC)
258                 return 1;
259         return addr >= __pa(high_memory);
260 #endif
261 }
262 #endif
263
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265                                      unsigned long size, pgprot_t vma_prot)
266 {
267 #ifdef pgprot_noncached
268         phys_addr_t offset = pfn << PAGE_SHIFT;
269
270         if (uncached_access(file, offset))
271                 return pgprot_noncached(vma_prot);
272 #endif
273         return vma_prot;
274 }
275 #endif
276
277 #ifndef CONFIG_MMU
278 static unsigned long get_unmapped_area_mem(struct file *file,
279                                            unsigned long addr,
280                                            unsigned long len,
281                                            unsigned long pgoff,
282                                            unsigned long flags)
283 {
284         if (!valid_mmap_phys_addr_range(pgoff, len))
285                 return (unsigned long) -EINVAL;
286         return pgoff << PAGE_SHIFT;
287 }
288
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292         return vma->vm_flags & VM_MAYSHARE;
293 }
294 #else
295 #define get_unmapped_area_mem   NULL
296
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299         return 1;
300 }
301 #endif
302
303 static const struct vm_operations_struct mmap_mem_ops = {
304 #ifdef CONFIG_HAVE_IOREMAP_PROT
305         .access = generic_access_phys
306 #endif
307 };
308
309 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
310 {
311         size_t size = vma->vm_end - vma->vm_start;
312
313         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
314                 return -EINVAL;
315
316         if (!private_mapping_ok(vma))
317                 return -ENOSYS;
318
319         if (!range_is_allowed(vma->vm_pgoff, size))
320                 return -EPERM;
321
322         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
323                                                 &vma->vm_page_prot))
324                 return -EINVAL;
325
326         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
327                                                  size,
328                                                  vma->vm_page_prot);
329
330         vma->vm_ops = &mmap_mem_ops;
331
332         /* Remap-pfn-range will mark the range VM_IO */
333         if (remap_pfn_range(vma,
334                             vma->vm_start,
335                             vma->vm_pgoff,
336                             size,
337                             vma->vm_page_prot)) {
338                 return -EAGAIN;
339         }
340         return 0;
341 }
342 #endif  /* CONFIG_DEVMEM */
343
344 #ifdef CONFIG_DEVKMEM
345 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
346 {
347         unsigned long pfn;
348
349         /* Turn a kernel-virtual address into a physical page frame */
350         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
351
352         /*
353          * RED-PEN: on some architectures there is more mapped memory than
354          * available in mem_map which pfn_valid checks for. Perhaps should add a
355          * new macro here.
356          *
357          * RED-PEN: vmalloc is not supported right now.
358          */
359         if (!pfn_valid(pfn))
360                 return -EIO;
361
362         vma->vm_pgoff = pfn;
363         return mmap_mem(file, vma);
364 }
365 #endif
366
367 #ifdef CONFIG_CRASH_DUMP
368 /*
369  * Read memory corresponding to the old kernel.
370  */
371 static ssize_t read_oldmem(struct file *file, char __user *buf,
372                                 size_t count, loff_t *ppos)
373 {
374         unsigned long pfn, offset;
375         size_t read = 0, csize;
376         int rc = 0;
377
378         while (count) {
379                 pfn = *ppos / PAGE_SIZE;
380                 if (pfn > saved_max_pfn)
381                         return read;
382
383                 offset = (unsigned long)(*ppos % PAGE_SIZE);
384                 if (count > PAGE_SIZE - offset)
385                         csize = PAGE_SIZE - offset;
386                 else
387                         csize = count;
388
389                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
390                 if (rc < 0)
391                         return rc;
392                 buf += csize;
393                 *ppos += csize;
394                 read += csize;
395                 count -= csize;
396         }
397         return read;
398 }
399 #endif
400
401 #ifdef CONFIG_DEVKMEM
402 /*
403  * This function reads the *virtual* memory as seen by the kernel.
404  */
405 static ssize_t read_kmem(struct file *file, char __user *buf,
406                          size_t count, loff_t *ppos)
407 {
408         unsigned long p = *ppos;
409         ssize_t low_count, read, sz;
410         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
411         int err = 0;
412
413         read = 0;
414         if (p < (unsigned long) high_memory) {
415                 low_count = count;
416                 if (count > (unsigned long)high_memory - p)
417                         low_count = (unsigned long)high_memory - p;
418
419 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
420                 /* we don't have page 0 mapped on sparc and m68k.. */
421                 if (p < PAGE_SIZE && low_count > 0) {
422                         sz = size_inside_page(p, low_count);
423                         if (clear_user(buf, sz))
424                                 return -EFAULT;
425                         buf += sz;
426                         p += sz;
427                         read += sz;
428                         low_count -= sz;
429                         count -= sz;
430                 }
431 #endif
432                 while (low_count > 0) {
433                         sz = size_inside_page(p, low_count);
434
435                         /*
436                          * On ia64 if a page has been mapped somewhere as
437                          * uncached, then it must also be accessed uncached
438                          * by the kernel or data corruption may occur
439                          */
440                         kbuf = xlate_dev_kmem_ptr((char *)p);
441
442                         if (copy_to_user(buf, kbuf, sz))
443                                 return -EFAULT;
444                         buf += sz;
445                         p += sz;
446                         read += sz;
447                         low_count -= sz;
448                         count -= sz;
449                 }
450         }
451
452         if (count > 0) {
453                 kbuf = (char *)__get_free_page(GFP_KERNEL);
454                 if (!kbuf)
455                         return -ENOMEM;
456                 while (count > 0) {
457                         sz = size_inside_page(p, count);
458                         if (!is_vmalloc_or_module_addr((void *)p)) {
459                                 err = -ENXIO;
460                                 break;
461                         }
462                         sz = vread(kbuf, (char *)p, sz);
463                         if (!sz)
464                                 break;
465                         if (copy_to_user(buf, kbuf, sz)) {
466                                 err = -EFAULT;
467                                 break;
468                         }
469                         count -= sz;
470                         buf += sz;
471                         read += sz;
472                         p += sz;
473                 }
474                 free_page((unsigned long)kbuf);
475         }
476         *ppos = p;
477         return read ? read : err;
478 }
479
480
481 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
482                                 size_t count, loff_t *ppos)
483 {
484         ssize_t written, sz;
485         unsigned long copied;
486
487         written = 0;
488 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
489         /* we don't have page 0 mapped on sparc and m68k.. */
490         if (p < PAGE_SIZE) {
491                 sz = size_inside_page(p, count);
492                 /* Hmm. Do something? */
493                 buf += sz;
494                 p += sz;
495                 count -= sz;
496                 written += sz;
497         }
498 #endif
499
500         while (count > 0) {
501                 char *ptr;
502
503                 sz = size_inside_page(p, count);
504
505                 /*
506                  * On ia64 if a page has been mapped somewhere as uncached, then
507                  * it must also be accessed uncached by the kernel or data
508                  * corruption may occur.
509                  */
510                 ptr = xlate_dev_kmem_ptr((char *)p);
511
512                 copied = copy_from_user(ptr, buf, sz);
513                 if (copied) {
514                         written += sz - copied;
515                         if (written)
516                                 break;
517                         return -EFAULT;
518                 }
519                 buf += sz;
520                 p += sz;
521                 count -= sz;
522                 written += sz;
523         }
524
525         *ppos += written;
526         return written;
527 }
528
529 /*
530  * This function writes to the *virtual* memory as seen by the kernel.
531  */
532 static ssize_t write_kmem(struct file *file, const char __user *buf,
533                           size_t count, loff_t *ppos)
534 {
535         unsigned long p = *ppos;
536         ssize_t wrote = 0;
537         ssize_t virtr = 0;
538         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
539         int err = 0;
540
541         if (p < (unsigned long) high_memory) {
542                 unsigned long to_write = min_t(unsigned long, count,
543                                                (unsigned long)high_memory - p);
544                 wrote = do_write_kmem(p, buf, to_write, ppos);
545                 if (wrote != to_write)
546                         return wrote;
547                 p += wrote;
548                 buf += wrote;
549                 count -= wrote;
550         }
551
552         if (count > 0) {
553                 kbuf = (char *)__get_free_page(GFP_KERNEL);
554                 if (!kbuf)
555                         return wrote ? wrote : -ENOMEM;
556                 while (count > 0) {
557                         unsigned long sz = size_inside_page(p, count);
558                         unsigned long n;
559
560                         if (!is_vmalloc_or_module_addr((void *)p)) {
561                                 err = -ENXIO;
562                                 break;
563                         }
564                         n = copy_from_user(kbuf, buf, sz);
565                         if (n) {
566                                 err = -EFAULT;
567                                 break;
568                         }
569                         vwrite(kbuf, (char *)p, sz);
570                         count -= sz;
571                         buf += sz;
572                         virtr += sz;
573                         p += sz;
574                 }
575                 free_page((unsigned long)kbuf);
576         }
577
578         *ppos = p;
579         return virtr + wrote ? : err;
580 }
581 #endif
582
583 #ifdef CONFIG_DEVPORT
584 static ssize_t read_port(struct file *file, char __user *buf,
585                          size_t count, loff_t *ppos)
586 {
587         unsigned long i = *ppos;
588         char __user *tmp = buf;
589
590         if (!access_ok(VERIFY_WRITE, buf, count))
591                 return -EFAULT;
592         while (count-- > 0 && i < 65536) {
593                 if (__put_user(inb(i), tmp) < 0)
594                         return -EFAULT;
595                 i++;
596                 tmp++;
597         }
598         *ppos = i;
599         return tmp-buf;
600 }
601
602 static ssize_t write_port(struct file *file, const char __user *buf,
603                           size_t count, loff_t *ppos)
604 {
605         unsigned long i = *ppos;
606         const char __user *tmp = buf;
607
608         if (!access_ok(VERIFY_READ, buf, count))
609                 return -EFAULT;
610         while (count-- > 0 && i < 65536) {
611                 char c;
612                 if (__get_user(c, tmp)) {
613                         if (tmp > buf)
614                                 break;
615                         return -EFAULT;
616                 }
617                 outb(c, i);
618                 i++;
619                 tmp++;
620         }
621         *ppos = i;
622         return tmp-buf;
623 }
624 #endif
625
626 static ssize_t read_null(struct file *file, char __user *buf,
627                          size_t count, loff_t *ppos)
628 {
629         return 0;
630 }
631
632 static ssize_t write_null(struct file *file, const char __user *buf,
633                           size_t count, loff_t *ppos)
634 {
635         return count;
636 }
637
638 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
639                              unsigned long nr_segs, loff_t pos)
640 {
641         return 0;
642 }
643
644 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
645                               unsigned long nr_segs, loff_t pos)
646 {
647         return iov_length(iov, nr_segs);
648 }
649
650 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
651                         struct splice_desc *sd)
652 {
653         return sd->len;
654 }
655
656 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
657                                  loff_t *ppos, size_t len, unsigned int flags)
658 {
659         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
660 }
661
662 static ssize_t read_zero(struct file *file, char __user *buf,
663                          size_t count, loff_t *ppos)
664 {
665         size_t written;
666
667         if (!count)
668                 return 0;
669
670         if (!access_ok(VERIFY_WRITE, buf, count))
671                 return -EFAULT;
672
673         written = 0;
674         while (count) {
675                 unsigned long unwritten;
676                 size_t chunk = count;
677
678                 if (chunk > PAGE_SIZE)
679                         chunk = PAGE_SIZE;      /* Just for latency reasons */
680                 unwritten = __clear_user(buf, chunk);
681                 written += chunk - unwritten;
682                 if (unwritten)
683                         break;
684                 if (signal_pending(current))
685                         return written ? written : -ERESTARTSYS;
686                 buf += chunk;
687                 count -= chunk;
688                 cond_resched();
689         }
690         return written ? written : -EFAULT;
691 }
692
693 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
694                              unsigned long nr_segs, loff_t pos)
695 {
696         size_t written = 0;
697         unsigned long i;
698         ssize_t ret;
699
700         for (i = 0; i < nr_segs; i++) {
701                 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
702                                 &pos);
703                 if (ret < 0)
704                         break;
705                 written += ret;
706         }
707
708         return written ? written : -EFAULT;
709 }
710
711 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
712 {
713 #ifndef CONFIG_MMU
714         return -ENOSYS;
715 #endif
716         if (vma->vm_flags & VM_SHARED)
717                 return shmem_zero_setup(vma);
718         return 0;
719 }
720
721 static ssize_t write_full(struct file *file, const char __user *buf,
722                           size_t count, loff_t *ppos)
723 {
724         return -ENOSPC;
725 }
726
727 /*
728  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
729  * can fopen() both devices with "a" now.  This was previously impossible.
730  * -- SRB.
731  */
732 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
733 {
734         return file->f_pos = 0;
735 }
736
737 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
738
739 /*
740  * The memory devices use the full 32/64 bits of the offset, and so we cannot
741  * check against negative addresses: they are ok. The return value is weird,
742  * though, in that case (0).
743  *
744  * also note that seeking relative to the "end of file" isn't supported:
745  * it has no meaning, so it returns -EINVAL.
746  */
747 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
748 {
749         loff_t ret;
750
751         mutex_lock(&file_inode(file)->i_mutex);
752         switch (orig) {
753         case SEEK_CUR:
754                 offset += file->f_pos;
755         case SEEK_SET:
756                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
757                 if ((unsigned long long)offset >= ~0xFFFULL) {
758                         ret = -EOVERFLOW;
759                         break;
760                 }
761                 file->f_pos = offset;
762                 ret = file->f_pos;
763                 force_successful_syscall_return();
764                 break;
765         default:
766                 ret = -EINVAL;
767         }
768         mutex_unlock(&file_inode(file)->i_mutex);
769         return ret;
770 }
771
772 #endif
773
774 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
775 static int open_port(struct inode *inode, struct file *filp)
776 {
777         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
778 }
779 #endif
780
781 #define zero_lseek      null_lseek
782 #define full_lseek      null_lseek
783 #define write_zero      write_null
784 #define read_full       read_zero
785 #define aio_write_zero  aio_write_null
786 #define open_mem        open_port
787 #define open_kmem       open_mem
788 #define open_oldmem     open_mem
789
790 #ifdef CONFIG_DEVMEM
791 static const struct file_operations mem_fops = {
792         .llseek         = memory_lseek,
793         .read           = read_mem,
794         .write          = write_mem,
795         .mmap           = mmap_mem,
796         .open           = open_mem,
797         .get_unmapped_area = get_unmapped_area_mem,
798 };
799 #endif
800
801 #ifdef CONFIG_DEVKMEM
802 static const struct file_operations kmem_fops = {
803         .llseek         = memory_lseek,
804         .read           = read_kmem,
805         .write          = write_kmem,
806         .mmap           = mmap_kmem,
807         .open           = open_kmem,
808         .get_unmapped_area = get_unmapped_area_mem,
809 };
810 #endif
811
812 static const struct file_operations null_fops = {
813         .llseek         = null_lseek,
814         .read           = read_null,
815         .write          = write_null,
816         .aio_read       = aio_read_null,
817         .aio_write      = aio_write_null,
818         .splice_write   = splice_write_null,
819 };
820
821 #ifdef CONFIG_DEVPORT
822 static const struct file_operations port_fops = {
823         .llseek         = memory_lseek,
824         .read           = read_port,
825         .write          = write_port,
826         .open           = open_port,
827 };
828 #endif
829
830 static const struct file_operations zero_fops = {
831         .llseek         = zero_lseek,
832         .read           = read_zero,
833         .write          = write_zero,
834         .aio_read       = aio_read_zero,
835         .aio_write      = aio_write_zero,
836         .mmap           = mmap_zero,
837 };
838
839 /*
840  * capabilities for /dev/zero
841  * - permits private mappings, "copies" are taken of the source of zeros
842  * - no writeback happens
843  */
844 static struct backing_dev_info zero_bdi = {
845         .name           = "char/mem",
846         .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
847 };
848
849 static const struct file_operations full_fops = {
850         .llseek         = full_lseek,
851         .read           = read_full,
852         .write          = write_full,
853 };
854
855 #ifdef CONFIG_CRASH_DUMP
856 static const struct file_operations oldmem_fops = {
857         .read   = read_oldmem,
858         .open   = open_oldmem,
859         .llseek = default_llseek,
860 };
861 #endif
862
863 static const struct memdev {
864         const char *name;
865         umode_t mode;
866         const struct file_operations *fops;
867         struct backing_dev_info *dev_info;
868 } devlist[] = {
869 #ifdef CONFIG_DEVMEM
870          [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
871 #endif
872 #ifdef CONFIG_DEVKMEM
873          [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
874 #endif
875          [3] = { "null", 0666, &null_fops, NULL },
876 #ifdef CONFIG_DEVPORT
877          [4] = { "port", 0, &port_fops, NULL },
878 #endif
879          [5] = { "zero", 0666, &zero_fops, &zero_bdi },
880          [7] = { "full", 0666, &full_fops, NULL },
881          [8] = { "random", 0666, &random_fops, NULL },
882          [9] = { "urandom", 0666, &urandom_fops, NULL },
883 #ifdef CONFIG_PRINTK
884         [11] = { "kmsg", 0644, &kmsg_fops, NULL },
885 #endif
886 #ifdef CONFIG_CRASH_DUMP
887         [12] = { "oldmem", 0, &oldmem_fops, NULL },
888 #endif
889 };
890
891 static int memory_open(struct inode *inode, struct file *filp)
892 {
893         int minor;
894         const struct memdev *dev;
895
896         minor = iminor(inode);
897         if (minor >= ARRAY_SIZE(devlist))
898                 return -ENXIO;
899
900         dev = &devlist[minor];
901         if (!dev->fops)
902                 return -ENXIO;
903
904         filp->f_op = dev->fops;
905         if (dev->dev_info)
906                 filp->f_mapping->backing_dev_info = dev->dev_info;
907
908         /* Is /dev/mem or /dev/kmem ? */
909         if (dev->dev_info == &directly_mappable_cdev_bdi)
910                 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
911
912         if (dev->fops->open)
913                 return dev->fops->open(inode, filp);
914
915         return 0;
916 }
917
918 static const struct file_operations memory_fops = {
919         .open = memory_open,
920         .llseek = noop_llseek,
921 };
922
923 static char *mem_devnode(struct device *dev, umode_t *mode)
924 {
925         if (mode && devlist[MINOR(dev->devt)].mode)
926                 *mode = devlist[MINOR(dev->devt)].mode;
927         return NULL;
928 }
929
930 static struct class *mem_class;
931
932 static int __init chr_dev_init(void)
933 {
934         int minor;
935         int err;
936
937         err = bdi_init(&zero_bdi);
938         if (err)
939                 return err;
940
941         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
942                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
943
944         mem_class = class_create(THIS_MODULE, "mem");
945         if (IS_ERR(mem_class))
946                 return PTR_ERR(mem_class);
947
948         mem_class->devnode = mem_devnode;
949         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
950                 if (!devlist[minor].name)
951                         continue;
952
953                 /*
954                  * Create /dev/port?
955                  */
956                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
957                         continue;
958
959                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
960                               NULL, devlist[minor].name);
961         }
962
963         return tty_init();
964 }
965
966 fs_initcall(chr_dev_init);