select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_GENERIC_DMA_COHERENT
++++++++ select HAVE_KERNEL_GZIP
++++++++ select HAVE_KERNEL_LZO
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
If you have any questions or comments about the Linux kernel port
to this board, send e-mail to <sjhill@cotw.com>.
+ config ARCH_DOVE
+ bool "Marvell Dove"
+ select PCI
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ select PLAT_ORION
+ help
+ Support for the Marvell Dove SoC 88AP510
+
config ARCH_KIRKWOOD
bool "Marvell Kirkwood"
select CPU_FEROCEON
<http://www.nuvoton.com/hq/enu/ProductAndSales/ProductLines/
ConsumerElectronicsIC/ARMMicrocontroller/ARMMicrocontroller>
+++++ +++config ARCH_NUC93X
+++++ +++ bool "Nuvoton NUC93X CPU"
+++++ +++ select CPU_ARM926T
+++++ +++ select HAVE_CLK
+++++ +++ select COMMON_CLKDEV
+++++ +++ help
+++++ +++ Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
+++++ +++ low-power and high performance MPEG-4/JPEG multimedia controller chip.
+++++ +++
config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
select CPU_ARM926T
select ARCH_SPARSEMEM_ENABLE
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
++ + select CPU_FREQ
select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_IDE
select COMMON_CLKDEV
select GENERIC_ALLOCATOR
++++++++ select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's DaVinci platform.
help
Support for Broadcom's BCMRing platform.
+ config ARCH_U8500
+ bool "ST-Ericsson U8500 Series"
+ select CPU_V7
+ select ARM_AMBA
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ select COMMON_CLKDEV
+ help
+ Support for ST-Ericsson's Ux500 architecture
+
endchoice
source "arch/arm/mach-clps711x/Kconfig"
source "arch/arm/mach-mmp/Kconfig"
+++++ +++source "arch/arm/mach-nuc93x/Kconfig"
+++++ +++
source "arch/arm/mach-sa1100/Kconfig"
source "arch/arm/plat-omap/Kconfig"
source "arch/arm/mach-kirkwood/Kconfig"
+ source "arch/arm/mach-dove/Kconfig"
+
+ source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
source "arch/arm/plat-s3c64xx/Kconfig"
source "arch/arm/plat-s3c/Kconfig"
source "arch/arm/plat-mxc/Kconfig"
source "arch/arm/mach-nomadik/Kconfig"
+ source "arch/arm/plat-nomadik/Kconfig"
source "arch/arm/mach-netx/Kconfig"
source "arch/arm/mach-bcmring/Kconfig"
+ source "arch/arm/mach-ux500/Kconfig"
+
# Definitions to make life easier
config ARCH_ACORN
bool
config PLAT_IOP
bool
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_TIME
config PLAT_ORION
bool
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\
- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4)
+ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || ARCH_U8500)
depends on GENERIC_CLOCKEVENTS
select USE_GENERIC_SMP_HELPERS
- select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4)
+ select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500)
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
config LOCAL_TIMERS
bool "Use local timer interrupts"
depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || \
- REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4)
+ REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || ARCH_U8500)
default y
- select HAVE_ARM_TWD if (ARCH_REALVIEW || ARCH_OMAP4)
+ select HAVE_ARM_TWD if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500)
help
Enable support for local timers on SMP platforms, rather then the
legacy IPI broadcast method. Local timers allows the system
config CPU_FREQ_SA1100
bool
-- - depends on CPU_FREQ && (SA1100_H3100 || SA1100_H3600 || SA1100_LART || SA1100_PLEB || SA1100_BADGE4 || SA1100_HACKKIT)
-- - default y
config CPU_FREQ_SA1110
bool
-- - depends on CPU_FREQ && (SA1100_ASSABET || SA1100_CERF || SA1100_PT_SYSTEM3)
-- - default y
config CPU_FREQ_INTEGRATOR
tristate "CPUfreq driver for ARM Integrator CPUs"
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
------ -- dma_cache_maint(ptr, size, dir);
++++++ ++ __dma_single_cpu_to_dev(ptr, size, dir);
}
return dma_addr;
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- - __cpuc_flush_kernel_dcache_area(ptr, size);
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
++ + __cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
++++++ ++ } else {
++++++ ++ __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
}
}
}
EXPORT_SYMBOL(dma_map_single);
+ /*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+ * the data from the safe buffer back to the unsafe buffer and free up
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+ {
+ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
+ __func__, (void *) dma_addr, size, dir);
+
+ unmap_single(dev, dma_addr, size, dir);
+ }
+ EXPORT_SYMBOL(dma_unmap_single);
+
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
* the safe buffer. (basically return things back to the way they
* should be)
*/
-
- void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
unmap_single(dev, dma_addr, size, dir);
}
- EXPORT_SYMBOL(dma_unmap_single);
+ EXPORT_SYMBOL(dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
* DMA Cache Coherency
* ===================
*
------ -- * dma_inv_range(start, end)
------ -- *
------ -- * Invalidate (discard) the specified virtual address range.
------ -- * May not write back any entries. If 'start' or 'end'
------ -- * are not cache line aligned, those lines must be written
------ -- * back.
------ -- * - start - virtual start address
------ -- * - end - virtual end address
------ -- *
------ -- * dma_clean_range(start, end)
------ -- *
------ -- * Clean (write back) the specified virtual address range.
------ -- * - start - virtual start address
------ -- * - end - virtual end address
------ -- *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
- void (*flush_kern_dcache_page)(void *);
+ void (*flush_kern_dcache_area)(void *, size_t);
+
-- --- -- void (*dma_inv_range)(const void *, const void *);
-- --- -- void (*dma_clean_range)(const void *, const void *);
++++++ ++ void (*dma_map_area)(const void *, size_t, int);
++++++ ++ void (*dma_unmap_area)(const void *, size_t, int);
++ +++ ++
- void (*dma_inv_range)(const void *, const void *);
- void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
- #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
+ #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
------ --#define dmac_inv_range cpu_cache.dma_inv_range
------ --#define dmac_clean_range cpu_cache.dma_clean_range
++++++ ++#define dmac_map_area cpu_cache.dma_map_area
++++++ ++#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
- #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
+ #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
- extern void __cpuc_flush_dcache_page(void *);
+ extern void __cpuc_flush_dcache_area(void *, size_t);
/*
* These are private to the dma-mapping API. Do not use directly.
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
------ --#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
------ --#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
++++++ ++#define dmac_map_area __glue(_CACHE,_dma_map_area)
++++++ ++#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
------ --extern void dmac_inv_range(const void *, const void *);
------ --extern void dmac_clean_range(const void *, const void *);
++++++ ++extern void dmac_map_area(const void *, size_t, int);
++++++ ++extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
- -------#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- ------- do { \
- ------- memcpy(dst, src, len); \
- ------- flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
- ------- } while (0)
- -------
+ +++++++extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ +++++++ unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
- #ifndef CONFIG_CPU_CACHE_VIPT
- static inline void flush_cache_mm(struct mm_struct *mm)
+
+ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
- flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
}
static inline void
- flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
}
}
- -------static inline void
- ------vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- ------- unsigned long uaddr, void *kaddr,
- ------- unsigned long len, int write)
- -------{
- ------- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- ------- unsigned long addr = (unsigned long)kaddr;
- ------- __cpuc_coherent_kern_range(addr, addr + len);
- ------- }
- -------}
- ------
+ #ifndef CONFIG_CPU_CACHE_VIPT
+ #define flush_cache_mm(mm) \
+ vivt_flush_cache_mm(mm)
+ #define flush_cache_range(vma,start,end) \
+ vivt_flush_cache_range(vma,start,end)
+ #define flush_cache_page(vma,addr,pfn) \
+ vivt_flush_cache_page(vma,addr,pfn)
- ------#define flush_ptrace_access(vma,page,ua,ka,len,write) \
- ------ vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
- -------extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- ------- unsigned long uaddr, void *kaddr,
- ------- unsigned long len, int write);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
- extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
static inline void __flush_icache_all(void)
{
+ #ifdef CONFIG_ARM_ERRATA_411920
+ extern void v6_icache_inval_all(void);
+ v6_icache_inval_all();
+ #else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
: "r" (0));
+ #endif
}
#define ARCH_HAS_FLUSH_ANON_PAGE
{
/* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
#define flush_dcache_mmap_lock(mapping) \
*/
#define flush_icache_page(vma,page) do { } while (0)
- static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
- unsigned offset, size_t size)
- {
- const void *start = (void __force *)virt + offset;
- dmac_inv_range(start, start + size);
- }
-
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
-- ------ struct page *from, unsigned long vaddr)
++ ++++++ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
- - __cpuc_flush_dcache_page(kto);
+ #ifdef CONFIG_HIGHMEM
+ /*
+ * kmap_atomic() doesn't set the page virtual address, and
+ * kunmap_atomic() takes care of cache flushing already.
+ */
+ if (page_address(to) != NULL)
+ #endif
++ + __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
-- ------ struct page *from, unsigned long vaddr)
++ ++++++ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache.
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
- ENTRY(xsc3_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ ENTRY(xsc3_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
cmp r0, r1
* - start - virtual start address
* - end - virtual end address
*/
------ --ENTRY(xsc3_dma_inv_range)
++++++ ++xsc3_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
* - start - virtual start address
* - end - virtual end address
*/
------ --ENTRY(xsc3_dma_clean_range)
++++++ ++xsc3_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mov pc, lr
++++++ ++/*
++++++ ++ * dma_map_area(start, size, dir)
++++++ ++ * - start - kernel virtual start address
++++++ ++ * - size - size of region
++++++ ++ * - dir - DMA direction
++++++ ++ */
++++++ ++ENTRY(xsc3_dma_map_area)
++++++ ++ add r1, r1, r0
++++++ ++ cmp r2, #DMA_TO_DEVICE
++++++ ++ beq xsc3_dma_clean_range
++++++ ++ bcs xsc3_dma_inv_range
++++++ ++ b xsc3_dma_flush_range
++++++ ++ENDPROC(xsc3_dma_map_area)
++++++ ++
++++++ ++/*
++++++ ++ * dma_unmap_area(start, size, dir)
++++++ ++ * - start - kernel virtual start address
++++++ ++ * - size - size of region
++++++ ++ * - dir - DMA direction
++++++ ++ */
++++++ ++ENTRY(xsc3_dma_unmap_area)
++++++ ++ mov pc, lr
++++++ ++ENDPROC(xsc3_dma_unmap_area)
++++++ ++
ENTRY(xsc3_cache_fns)
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
.long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
- .long xsc3_flush_kern_dcache_page
- .long xsc3_dma_inv_range
- .long xsc3_dma_clean_range
+ .long xsc3_flush_kern_dcache_area
-- --- -- .long xsc3_dma_inv_range
-- --- -- .long xsc3_dma_clean_range
++++++ ++ .long xsc3_dma_map_area
++++++ ++ .long xsc3_dma_unmap_area
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
orr r4, r4, #0x18 @ cache the page table in L2
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
- mov r0, #0 @ don't allow CP access
+ mov r0, #1 << 6 @ cp6 access for early sched_clock
mcr p15, 0, r0, c15, c1, 0 @ write CP access register
mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
adr r5, xsc3_crval
ldmia r5, {r5, r6}
++++++++
++++++++ #ifdef CONFIG_CACHE_XSC3L2
++++++++ mrc p15, 1, r0, c0, c0, 1 @ get L2 present information
++++++++ ands r0, r0, #0xf8
++++++++ orrne r6, r6, #(1 << 26) @ enable L2 if present
++++++++ #endif
++++++++
mrc p15, 0, r0, c1, c0, 0 @ get control register
bic r0, r0, r5 @ ..V. ..R. .... ..A.
orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)