#include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <linux/of_device.h>
#include <asm/iommu.h>
#include <asm/io-unit.h>
- #include "dma.h"
-
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
static struct resource *_sparc_find_resource(struct resource *r,
* Typically devices use them for control blocks.
* CPU may access them without any explicit flushing.
*/
- void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
+ static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
struct of_device *op = to_of_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
return NULL;
}
- void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
+ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
struct page *pgv;
n = (n + PAGE_SIZE-1) & PAGE_MASK;
if ((res->end-res->start)+1 != n) {
- printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
+ printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
return;
}
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary.
*/
- dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
+ static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *va = page_address(page) + offset;
+
/* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return 0;
return mmu_get_scsi_one(dev, va, len);
}
- void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
+ static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_one(dev, ba, n);
}
- int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+ static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
return n;
}
- void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+ static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_sgl(dev, sg, n);
}
- void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
+ static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
+ BUG();
}
- void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
+ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
- }
+ BUG();
+ }
+
+ struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+ .unmap_page = sbus_unmap_page,
+ .map_sg = sbus_map_sg,
+ .unmap_sg = sbus_unmap_sg,
+ .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
+ .sync_sg_for_device = sbus_sync_sg_for_device,
+ };
+
+ struct dma_map_ops *dma_ops = &sbus_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
static int __init sparc_register_ioport(void)
{
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
- void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
+ static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *pba, gfp_t gfp)
{
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
}
- EXPORT_SYMBOL(pci_alloc_consistent);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
- void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
+ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
unsigned long pgp;
free_pages(pgp, get_order(n));
}
- EXPORT_SYMBOL(pci_free_consistent);
-
- /* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single_* is performed.
- */
- dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
- int direction)
- {
- BUG_ON(direction == PCI_DMA_NONE);
- /* IIep is write-through, not flushing. */
- return virt_to_phys(ptr);
- }
- EXPORT_SYMBOL(pci_map_single);
-
- /* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
- void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
- int direction)
- {
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
- }
- }
- EXPORT_SYMBOL(pci_unmap_single);
/*
* Same as pci_map_single, but with pages.
*/
- dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
- EXPORT_SYMBOL(pci_map_page);
-
- void pci_unmap_page(struct pci_dev *hwdev,
- dma_addr_t dma_address, size_t size, int direction)
- {
- BUG_ON(direction == PCI_DMA_NONE);
- /* mmu_inval_dma_area XXX */
- }
- EXPORT_SYMBOL(pci_unmap_page);
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
- int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
}
return nents;
}
- EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
- void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
}
}
}
- EXPORT_SYMBOL(pci_unmap_sg);
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer.
* must first perform a pci_dma_sync_for_device, and then the
* device again owns the buffer.
*/
- void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
- EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
- void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
- EXPORT_SYMBOL(pci_dma_sync_single_for_device);
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
- void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
}
}
}
- EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
- void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
}
}
}
- EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
+
+ struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+ .map_sg = pci32_map_sg,
+ .unmap_sg = pci32_unmap_sg,
+ .sync_single_for_cpu = pci32_sync_single_for_cpu,
+ .sync_single_for_device = pci32_sync_single_for_device,
+ .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
+ .sync_sg_for_device = pci32_sync_sg_for_device,
+ };
+ EXPORT_SYMBOL(pci32_dma_ops);
+
#endif /* CONFIG_PCI */
+ /*
+ * Return whether the given PCI device DMA address mask can be
+ * supported properly. For example, if your device can only drive the
+ * low 24-bits during PCI bus mastering, then you would pass
+ * 0x00ffffff as the mask to this function.
+ */
+ int dma_supported(struct device *dev, u64 mask)
+ {
+ #ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return 1;
+ #endif
+ return 0;
+ }
+ EXPORT_SYMBOL(dma_supported);
+
+ int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+ #ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+ #endif
+ return -EOPNOTSUPP;
+ }
+ EXPORT_SYMBOL(dma_set_mask);
+
+
#ifdef CONFIG_PROC_FS
-static int
-_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
- void *data)
+static int sparc_io_proc_show(struct seq_file *m, void *v)
{
- char *p = buf, *e = buf + length;
- struct resource *r;
+ struct resource *root = m->private, *r;
const char *nm;
- for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
- if (p + 32 >= e) /* Better than nothing */
- break;
+ for (r = root->child; r != NULL; r = r->sibling) {
if ((nm = r->name) == 0) nm = "???";
- p += sprintf(p, "%016llx-%016llx: %s\n",
+ seq_printf(m, "%016llx-%016llx: %s\n",
(unsigned long long)r->start,
(unsigned long long)r->end, nm);
}
- return p-buf;
+ return 0;
}
+static int sparc_io_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sparc_io_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations sparc_io_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sparc_io_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS */
/*
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
- create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
- create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
+ proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
+ proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
#endif
}
#include <linux/delay.h>
#include <linux/smp.h>
+#include <asm/perf_counter.h>
#include <asm/ptrace.h>
#include <asm/local.h>
#include <asm/pcr.h>
* level 14 as our IRQ off level.
*/
-static int nmi_watchdog_active;
static int panic_on_timeout;
-int nmi_usable;
-EXPORT_SYMBOL_GPL(nmi_usable);
+/* nmi_active:
+ * >0: the NMI watchdog is active, but can be disabled
+ * <0: the NMI watchdog has not been set up, and cannot be enabled
+ * 0: the NMI watchdog is disabled, but can be enabled
+ */
+atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
+EXPORT_SYMBOL(nmi_active);
static unsigned int nmi_hz = HZ;
+static DEFINE_PER_CPU(short, wd_enabled);
+static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(local_t, alert_counter);
void touch_nmi_watchdog(void)
{
- if (nmi_watchdog_active) {
+ if (atomic_read(&nmi_active)) {
int cpu;
for_each_present_cpu(cpu) {
if (do_panic || panic_on_oops)
panic("Non maskable interrupt");
+ nmi_exit();
local_irq_enable();
do_exit(SIGBUS);
}
local_cpu_data().__nmi_count++;
+ nmi_enter();
+
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+ if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
local_set(&__get_cpu_var(alert_counter), 0);
}
- if (nmi_usable) {
+ if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
pcr_ops->write(pcr_enable);
}
+
+ nmi_exit();
}
static inline unsigned int get_nmi_count(int cpu)
return cpu_data(cpu).__nmi_count;
}
-static int endflag __initdata;
-
static __init void nmi_cpu_busy(void *data)
{
local_irq_enable_in_hardirq();
printk(KERN_WARNING
"and attach the output of the 'dmesg' command.\n");
- nmi_usable = 0;
+ per_cpu(wd_enabled, cpu) = 0;
+ atomic_dec(&nmi_active);
}
-static void stop_watchdog(void *unused)
+void stop_nmi_watchdog(void *unused)
{
pcr_ops->write(PCR_PIC_PRIV);
+ __get_cpu_var(wd_enabled) = 0;
+ atomic_dec(&nmi_active);
}
static int __init check_nmi_watchdog(void)
unsigned int *prev_nmi_count;
int cpu, err;
+ if (!atomic_read(&nmi_active))
+ return 0;
+
prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
for_each_online_cpu(cpu) {
+ if (!per_cpu(wd_enabled, cpu))
+ continue;
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
report_broken_nmi(cpu, prev_nmi_count);
}
endflag = 1;
- if (!nmi_usable) {
+ if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
+ atomic_set(&nmi_active, -1);
err = -ENODEV;
goto error;
}
kfree(prev_nmi_count);
return 0;
error:
- on_each_cpu(stop_watchdog, NULL, 1);
+ on_each_cpu(stop_nmi_watchdog, NULL, 1);
return err;
}
-static void start_watchdog(void *unused)
+void start_nmi_watchdog(void *unused)
{
+ __get_cpu_var(wd_enabled) = 1;
+ atomic_inc(&nmi_active);
+
+ pcr_ops->write(PCR_PIC_PRIV);
+ write_pic(picl_value(nmi_hz));
+
+ pcr_ops->write(pcr_enable);
+}
+
+static void nmi_adjust_hz_one(void *unused)
+{
+ if (!__get_cpu_var(wd_enabled))
+ return;
+
pcr_ops->write(PCR_PIC_PRIV);
write_pic(picl_value(nmi_hz));
void nmi_adjust_hz(unsigned int new_hz)
{
nmi_hz = new_hz;
- on_each_cpu(start_watchdog, NULL, 1);
+ on_each_cpu(nmi_adjust_hz_one, NULL, 1);
}
EXPORT_SYMBOL_GPL(nmi_adjust_hz);
static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
{
- on_each_cpu(stop_watchdog, NULL, 1);
+ on_each_cpu(stop_nmi_watchdog, NULL, 1);
return 0;
}
{
int err;
- nmi_usable = 1;
-
- on_each_cpu(start_watchdog, NULL, 1);
+ on_each_cpu(start_nmi_watchdog, NULL, 1);
err = check_nmi_watchdog();
if (!err) {
err = register_reboot_notifier(&nmi_reboot_notifier);
if (err) {
- nmi_usable = 0;
- on_each_cpu(stop_watchdog, NULL, 1);
+ on_each_cpu(stop_nmi_watchdog, NULL, 1);
+ atomic_set(&nmi_active, -1);
}
}
+ if (!err)
+ init_hw_perf_counters();
+
return err;
}