* 'stable/drivers-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xenbus: don't rely on xen_initial_domain to detect local xenstore
xenbus: Fix loopback event channel assuming domain 0
xen/pv-on-hvm:kexec: Fix implicit declaration of function 'xen_hvm_domain'
xen/pv-on-hvm kexec: add xs_reset_watches to shutdown watches from old kernel
xen/pv-on-hvm kexec: update xs_wire.h:xsd_sockmsg_type from xen-unstable
xen/pv-on-hvm kexec+kdump: reset PV devices in kexec or crash kernel
xen/pv-on-hvm kexec: rebind virqs to existing eventchannel ports
xen/pv-on-hvm kexec: prevent crash in xenwatch_thread() when stale watch events arrive
* 'stable/drivers.bugfixes-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen/pciback: Check if the device is found instead of blindly assuming so.
xen/pciback: Do not dereference psdev during printk when it is NULL.
xen: remove XEN_PLATFORM_PCI config option
xen: XEN_PVHVM depends on PCI
xen/pciback: double lock typo
xen/pciback: use mutex rather than spinlock in vpci backend
xen/pciback: Use mutexes when working with Xenbus state transitions.
xen/pciback: miscellaneous adjustments
xen/pciback: use mutex rather than spinlock in passthrough backend
xen/pciback: use resource_size()
* 'stable/pci.fixes-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen/pci: support multi-segment systems
xen-swiotlb: When doing coherent alloc/dealloc check before swizzling the MFNs.
xen/pci: make bus notifier handler return sane values
xen-swiotlb: fix printk and panic args
xen-swiotlb: Fix wrong panic.
xen-swiotlb: Retry up three times to allocate Xen-SWIOTLB
xen-pcifront: Update warning comment to use 'e820_host' option.
/*
- - * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
- - * x86 PCI core to support the Xen PCI Frontend
+ + * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
+ + * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
+ + * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
+ + * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
+ + * 0xcf8 PCI configuration read/write.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ + * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <xen/events.h>
#include <asm/xen/pci.h>
+ +static int xen_pcifront_enable_irq(struct pci_dev *dev)
+ +{
+ + int rc;
+ + int share = 1;
+ + int pirq;
+ + u8 gsi;
+ +
+ + rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+ + if (rc < 0) {
+ + dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ + rc);
+ + return rc;
+ + }
+ + /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
+ + pirq = gsi;
+ +
+ + if (gsi < NR_IRQS_LEGACY)
+ + share = 0;
+ +
+ + rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
+ + if (rc < 0) {
+ + dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
+ + gsi, pirq, rc);
+ + return rc;
+ + }
+ +
+ + dev->irq = rc;
+ + dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
+ + return 0;
+ +}
+ +
#ifdef CONFIG_ACPI
- -static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
- - int trigger, int polarity)
+ +static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
+ + bool set_pirq)
{
- - int rc, irq;
+ + int rc, pirq = -1, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
- - if (!xen_hvm_domain())
- - return -1;
+ + if (set_pirq)
+ + pirq = gsi;
map_irq.domid = DOMID_SELF;
map_irq.type = MAP_PIRQ_TYPE_GSI;
map_irq.index = gsi;
- - map_irq.pirq = -1;
+ + map_irq.pirq = pirq;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
if (rc) {
return -1;
}
- - if (trigger == ACPI_EDGE_SENSITIVE) {
+ + if (triggering == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
name = "ioapic-level";
}
+ + if (gsi_override >= 0)
+ + gsi = gsi_override;
+ +
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
+ + if (irq < 0)
+ + goto out;
+ +
+ + printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
+ +out:
+ + return irq;
+ +}
+ +
+ +static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ + int trigger, int polarity)
+ +{
+ + if (!xen_hvm_domain())
+ + return -1;
- - printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+ + return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
+ + false /* no mapping of GSI to PIRQ */);
+ +}
+ +
+ +#ifdef CONFIG_XEN_DOM0
+ +static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
+ +{
+ + int rc, irq;
+ + struct physdev_setup_gsi setup_gsi;
+ +
+ + if (!xen_pv_domain())
+ + return -1;
+ +
+ + printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
+ + gsi, triggering, polarity);
+ +
+ + irq = xen_register_pirq(gsi, gsi_override, triggering, true);
+ +
+ + setup_gsi.gsi = gsi;
+ + setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ + setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+ +
+ + rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ + if (rc == -EEXIST)
+ + printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
+ + else if (rc) {
+ + printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
+ + gsi, rc);
+ + }
return irq;
}
+ +
+ +static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
+ + int trigger, int polarity)
+ +{
+ + return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
+ +}
+ +#endif
#endif
#if defined(CONFIG_PCI_MSI)
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
- if (irq < 0)
+ +static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ +{
+ + int irq, ret, i;
+ + struct msi_desc *msidesc;
+ + int *v;
+ +
+ + v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ + if (!v)
+ + return -ENOMEM;
+ +
+ + if (type == PCI_CAP_ID_MSIX)
+ + ret = xen_pci_frontend_enable_msix(dev, v, nvec);
+ + else
+ + ret = xen_pci_frontend_enable_msi(dev, v);
+ + if (ret)
+ + goto error;
+ + i = 0;
+ + list_for_each_entry(msidesc, &dev->msi_list, list) {
+ + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ + (type == PCI_CAP_ID_MSIX) ?
+ + "pcifront-msi-x" :
+ + "pcifront-msi",
+ + DOMID_SELF);
+++ if (irq < 0) {
+++ ret = irq;
+ + goto free;
+++ }
+ + i++;
+ + }
+ + kfree(v);
+ + return 0;
+ +
+ +error:
+ + dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ +free:
+ + kfree(v);
+ + return ret;
+ +}
+ +
#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
if (msg.data != XEN_PIRQ_MSI_DATA ||
xen_irq_from_pirq(pirq) < 0) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
--- if (pirq < 0)
+++ if (pirq < 0) {
+++ irq = -ENODEV;
goto error;
+++ }
xen_msi_compose_msg(dev, pirq, &msg);
__write_msi_msg(msidesc, &msg);
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
error:
dev_err(&dev->dev,
"Xen PCI frontend has not registered MSI/MSI-X support!\n");
--- return -ENODEV;
- -}
- -
- -/*
- - * For MSI interrupts we have to use drivers/xen/event.s functions to
- - * allocate an irq_desc and setup the right */
- -
- -
- -static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
- -{
- - int irq, ret, i;
- - struct msi_desc *msidesc;
- - int *v;
- -
- - v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
- - if (!v)
- - return -ENOMEM;
- -
- - if (type == PCI_CAP_ID_MSIX)
- - ret = xen_pci_frontend_enable_msix(dev, v, nvec);
- - else
- - ret = xen_pci_frontend_enable_msi(dev, v);
- - if (ret)
- - goto error;
- - i = 0;
- - list_for_each_entry(msidesc, &dev->msi_list, list) {
- - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
- - (type == PCI_CAP_ID_MSIX) ?
- - "pcifront-msi-x" :
- - "pcifront-msi",
- - DOMID_SELF);
- - if (irq < 0)
- - goto free;
- - i++;
- - }
- - kfree(v);
- - return 0;
- -
- -error:
- - dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
- -free:
- - kfree(v);
- - return ret;
- -}
- -
- -static void xen_teardown_msi_irqs(struct pci_dev *dev)
- -{
- - struct msi_desc *msidesc;
- -
- - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
- - if (msidesc->msi_attrib.is_msix)
- - xen_pci_frontend_disable_msix(dev);
- - else
- - xen_pci_frontend_disable_msi(dev);
- -
- - /* Free the IRQ's and the msidesc using the generic code. */
- - default_teardown_msi_irqs(dev);
- -}
- -
- -static void xen_teardown_msi_irq(unsigned int irq)
- -{
- - xen_destroy_irq(irq);
+++ return irq;
}
#ifdef CONFIG_XEN_DOM0
+++ static bool __read_mostly pci_seg_supported = true;
+++
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int ret = 0;
memset(&map_irq, 0, sizeof(map_irq));
map_irq.domid = domid;
--- map_irq.type = MAP_PIRQ_TYPE_MSI;
+++ map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
map_irq.index = -1;
map_irq.pirq = -1;
--- map_irq.bus = dev->bus->number;
+++ map_irq.bus = dev->bus->number |
+++ (pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSIX) {
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
}
--- ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+++ ret = -EINVAL;
+++ if (pci_seg_supported)
+++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+++ &map_irq);
+++ if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
+++ map_irq.type = MAP_PIRQ_TYPE_MSI;
+++ map_irq.index = -1;
+++ map_irq.pirq = -1;
+++ map_irq.bus = dev->bus->number;
+++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+++ &map_irq);
+++ if (ret != -EINVAL)
+++ pci_seg_supported = false;
+++ }
if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid);
return ret;
}
#endif
- -#endif
- -static int xen_pcifront_enable_irq(struct pci_dev *dev)
+ +static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
- - int rc;
- - int share = 1;
- - int pirq;
- - u8 gsi;
- -
- - rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- - if (rc < 0) {
- - dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
- - rc);
- - return rc;
- - }
- -
- - rc = xen_allocate_pirq_gsi(gsi);
- - if (rc < 0) {
- - dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n",
- - gsi, rc);
- - return rc;
- - }
- - pirq = rc;
+ + struct msi_desc *msidesc;
- - if (gsi < NR_IRQS_LEGACY)
- - share = 0;
+ + msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ + if (msidesc->msi_attrib.is_msix)
+ + xen_pci_frontend_disable_msix(dev);
+ + else
+ + xen_pci_frontend_disable_msi(dev);
- - rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
- - if (rc < 0) {
- - dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
- - gsi, pirq, rc);
- - return rc;
- - }
+ + /* Free the IRQ's and the msidesc using the generic code. */
+ + default_teardown_msi_irqs(dev);
+ +}
- - dev->irq = rc;
- - dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
- - return 0;
+ +static void xen_teardown_msi_irq(unsigned int irq)
+ +{
+ + xen_destroy_irq(irq);
}
+ +#endif
+ +
int __init pci_xen_init(void)
{
if (!xen_pv_domain() || xen_initial_domain())
}
#ifdef CONFIG_XEN_DOM0
- -static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
- -{
- - int rc, pirq, irq = -1;
- - struct physdev_map_pirq map_irq;
- - int shareable = 0;
- - char *name;
- -
- - if (!xen_pv_domain())
- - return -1;
- -
- - if (triggering == ACPI_EDGE_SENSITIVE) {
- - shareable = 0;
- - name = "ioapic-edge";
- - } else {
- - shareable = 1;
- - name = "ioapic-level";
- - }
- - pirq = xen_allocate_pirq_gsi(gsi);
- - if (pirq < 0)
- - goto out;
- -
- - if (gsi_override >= 0)
- - irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
- - else
- - irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
- - if (irq < 0)
- - goto out;
- -
- - printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
- -
- - map_irq.domid = DOMID_SELF;
- - map_irq.type = MAP_PIRQ_TYPE_GSI;
- - map_irq.index = gsi;
- - map_irq.pirq = pirq;
- -
- - rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- - if (rc) {
- - printk(KERN_WARNING "xen map irq failed %d\n", rc);
- - return -1;
- - }
- -
- -out:
- - return irq;
- -}
- -
- -static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
- -{
- - int rc, irq;
- - struct physdev_setup_gsi setup_gsi;
- -
- - if (!xen_pv_domain())
- - return -1;
- -
- - printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
- - gsi, triggering, polarity);
- -
- - irq = xen_register_pirq(gsi, gsi_override, triggering);
- -
- - setup_gsi.gsi = gsi;
- - setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
- - setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- -
- - rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
- - if (rc == -EEXIST)
- - printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
- - else if (rc) {
- - printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
- - gsi, rc);
- - }
- -
- - return irq;
- -}
- -
static __init void xen_setup_acpi_sci(void)
{
int rc;
}
trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
- -
+ +
printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
"polarity=%d\n", gsi, trigger, polarity);
* the ACPI interpreter and keels over since IRQ 9 has not been
* setup as we had setup IRQ 20 for it).
*/
- - /* Check whether the GSI != IRQ */
if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- - if (irq >= 0 && irq != gsi)
- - /* Bugger, we MUST have that IRQ. */
+ + /* Use the provided value if it's valid. */
+ + if (irq >= 0)
gsi_override = irq;
}
return;
}
- -static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
- - int trigger, int polarity)
+ +int __init pci_xen_initial_domain(void)
{
- - return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
- -}
+ + int irq;
- -static int __init pci_xen_initial_domain(void)
- -{
#ifdef CONFIG_PCI_MSI
x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
#endif
xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
- -
- - return 0;
- -}
- -
- -void __init xen_setup_pirqs(void)
- -{
- - int pirq, irq;
- -
- - pci_xen_initial_domain();
- -
- - if (0 == nr_ioapics) {
- - for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
- - pirq = xen_allocate_pirq_gsi(irq);
- - if (WARN(pirq < 0,
- - "Could not allocate PIRQ for legacy interrupt\n"))
- - break;
- - irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic");
- - }
- - return;
- - }
- -
/* Pre-allocate legacy irqs */
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity;
continue;
xen_register_pirq(irq, -1 /* no GSI override */,
- - trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
+ + trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
+ + true /* Map GSI to PIRQ */);
}
+ + if (0 == nr_ioapics) {
+ + for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ + xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
+ + }
+ + return 0;
}
- -#endif
- -#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
struct pci_dev *dev;
config XEN_PVHVM
def_bool y
-- - depends on XEN
-- - depends on X86_LOCAL_APIC
++ + depends on XEN && PCI && X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
int
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
---
---config XEN_DEBUG
--- bool "Enable Xen debug checks"
--- depends on XEN
--- default n
--- help
--- Enable various WARN_ON checks in the Xen MMU code.
--- Enabling this option WILL incur a significant performance overhead.
#include <xen/interface/io/pciif.h>
#include <asm/xen/pci.h>
#include <linux/interrupt.h>
- -#include <asm/atomic.h>
+ +#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
pci_name(dev), i);
if (pci_claim_resource(dev, i)) {
--- dev_err(&pdev->xdev->dev, "Could not claim "
--- "resource %s/%d! Device offline. Try "
--- "giving less than 4GB to domain.\n",
+++ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+++ "Device offline. Try using e820_host=1 in the guest config.\n",
pci_name(dev), i);
}
}
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
*/
---static DEFINE_SPINLOCK(irq_mapping_update_lock);
+++static DEFINE_MUTEX(irq_mapping_update_lock);
static LIST_HEAD(xen_irq_list_head);
irq = irq_alloc_desc_from(first, -1);
--- xen_irq_init(irq);
+++ if (irq >= 0)
+++ xen_irq_init(irq);
return irq;
}
return -1;
}
- -int xen_allocate_pirq_gsi(unsigned gsi)
- -{
- - return gsi;
- -}
- -
/*
* Do not make any assumptions regarding the relationship between the
* IRQ number returned here and the Xen pirq argument.
int irq = -1;
struct physdev_irq irq_op;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
handle_edge_irq, name);
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
{
int irq, ret;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
--- if (irq == -1)
+++ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
if (ret < 0)
goto error_irq;
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq);
--- return -1;
+++ return ret;
}
#endif
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
desc = irq_to_desc(irq);
if (!desc)
xen_free_irq(irq);
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return rc;
}
struct irq_info *info;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
--- if (info == NULL || info->type != IRQT_PIRQ)
+++ if (info->type != IRQT_PIRQ)
continue;
irq = info->irq;
if (info->u.pirq.pirq == pirq)
}
irq = -1;
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
{
int irq;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn];
}
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
}
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
}
+ ++static int find_virq(unsigned int virq, unsigned int cpu)
+ ++{
+ ++ struct evtchn_status status;
+ ++ int port, rc = -ENOENT;
+ ++
+ ++ memset(&status, 0, sizeof(status));
+ ++ for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ ++ status.dom = DOMID_SELF;
+ ++ status.port = port;
+ ++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+ ++ if (rc < 0)
+ ++ continue;
+ ++ if (status.status != EVTCHNSTAT_virq)
+ ++ continue;
+ ++ if (status.u.virq == virq && status.vcpu == cpu) {
+ ++ rc = port;
+ ++ break;
+ ++ }
+ ++ }
+ ++ return rc;
+ ++}
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- -- int evtchn, irq;
+ ++ int evtchn, irq, ret;
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq];
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
- -- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- -- &bind_virq) != 0)
- -- BUG();
- -- evtchn = bind_virq.port;
+ ++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ ++ &bind_virq);
+ ++ if (ret == 0)
+ ++ evtchn = bind_virq.port;
+ ++ else {
+ ++ if (ret == -EEXIST)
+ ++ ret = find_virq(virq, cpu);
+ ++ BUG_ON(ret < 0);
+ ++ evtchn = ret;
+ ++ }
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
}
out:
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
xen_free_irq(irq);
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
}
int bind_evtchn_to_irqhandler(unsigned int evtchn,
will also be masked. */
disable_irq(irq);
--- spin_lock(&irq_mapping_update_lock);
+++ mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
xen_irq_info_evtchn_init(irq, evtchn);
--- spin_unlock(&irq_mapping_update_lock);
+++ mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0));
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
+++ BUG_ON(!evtchn_to_irq);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
evtchn_to_irq[i] = -1;
} else {
irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
- - xen_setup_pirqs();
+ + pci_xen_initial_domain();
}
}
kfree(to_xenbus_device(dev));
}
- -static ssize_t xendev_show_nodename(struct device *dev,
- - struct device_attribute *attr, char *buf)
+ +static ssize_t nodename_show(struct device *dev,
+ + struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
- -static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
- -static ssize_t xendev_show_devtype(struct device *dev,
- - struct device_attribute *attr, char *buf)
+ +static ssize_t devtype_show(struct device *dev,
+ + struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
- -static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
- -static ssize_t xendev_show_modalias(struct device *dev,
- - struct device_attribute *attr, char *buf)
+ +static ssize_t modalias_show(struct device *dev,
+ + struct device_attribute *attr, char *buf)
{
- - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
+ + return sprintf(buf, "%s:%s\n", dev->bus->name,
+ + to_xenbus_device(dev)->devicetype);
}
- -static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
+ +
+ +struct device_attribute xenbus_dev_attrs[] = {
+ + __ATTR_RO(nodename),
+ + __ATTR_RO(devtype),
+ + __ATTR_RO(modalias),
+ + __ATTR_NULL
+ +};
+ +EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
if (err)
goto fail;
- - err = device_create_file(&xendev->dev, &dev_attr_nodename);
- - if (err)
- - goto fail_unregister;
- -
- - err = device_create_file(&xendev->dev, &dev_attr_devtype);
- - if (err)
- - goto fail_remove_nodename;
- -
- - err = device_create_file(&xendev->dev, &dev_attr_modalias);
- - if (err)
- - goto fail_remove_devtype;
- -
return 0;
- -fail_remove_devtype:
- - device_remove_file(&xendev->dev, &dev_attr_devtype);
- -fail_remove_nodename:
- - device_remove_file(&xendev->dev, &dev_attr_nodename);
- -fail_unregister:
- - device_unregister(&xendev->dev);
fail:
kfree(xendev);
return err;
device_initcall(xenbus_probe_initcall);
- --static int __init xenbus_init(void)
+ ++/* Set up event channel for xenstored which is run as a local process
+ ++ * (this is normally used only in dom0)
+ ++ */
+ ++static int __init xenstored_local_init(void)
{
int err = 0;
unsigned long page = 0;
+ ++ struct evtchn_alloc_unbound alloc_unbound;
- -- DPRINTK("");
+ ++ /* Allocate Xenstore page */
+ ++ page = get_zeroed_page(GFP_KERNEL);
+ ++ if (!page)
+ ++ goto out_err;
- -- err = -ENODEV;
- -- if (!xen_domain())
- -- return err;
+ ++ xen_store_mfn = xen_start_info->store_mfn =
+ ++ pfn_to_mfn(virt_to_phys((void *)page) >>
+ ++ PAGE_SHIFT);
- -- /*
- -- * Domain0 doesn't have a store_evtchn or store_mfn yet.
- -- */
- -- if (xen_initial_domain()) {
- -- struct evtchn_alloc_unbound alloc_unbound;
+ ++ /* Next allocate a local port which xenstored can bind to */
+ ++ alloc_unbound.dom = DOMID_SELF;
+ ++ alloc_unbound.remote_dom = DOMID_SELF;
- -- /* Allocate Xenstore page */
- -- page = get_zeroed_page(GFP_KERNEL);
- -- if (!page)
- -- goto out_error;
+ ++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ ++ &alloc_unbound);
+ ++ if (err == -ENOSYS)
+ ++ goto out_err;
- -- xen_store_mfn = xen_start_info->store_mfn =
- -- pfn_to_mfn(virt_to_phys((void *)page) >>
- -- PAGE_SHIFT);
+ ++ BUG_ON(err);
+ ++ xen_store_evtchn = xen_start_info->store_evtchn =
+ ++ alloc_unbound.port;
- -- /* Next allocate a local port which xenstored can bind to */
- -- alloc_unbound.dom = DOMID_SELF;
- -- alloc_unbound.remote_dom = 0;
+ ++ return 0;
- -- err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
- -- &alloc_unbound);
- -- if (err == -ENOSYS)
- -- goto out_error;
+ ++ out_err:
+ ++ if (page != 0)
+ ++ free_page(page);
+ ++ return err;
+ ++}
- -- BUG_ON(err);
- -- xen_store_evtchn = xen_start_info->store_evtchn =
- -- alloc_unbound.port;
+ ++static int __init xenbus_init(void)
+ ++{
+ ++ int err = 0;
- -- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ ++ if (!xen_domain())
+ ++ return -ENODEV;
+ ++
+ ++ if (xen_hvm_domain()) {
+ ++ uint64_t v = 0;
+ ++ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ ++ if (err)
+ ++ goto out_error;
+ ++ xen_store_evtchn = (int)v;
+ ++ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ ++ if (err)
+ ++ goto out_error;
+ ++ xen_store_mfn = (unsigned long)v;
+ ++ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
} else {
- -- if (xen_hvm_domain()) {
- -- uint64_t v = 0;
- -- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- -- if (err)
- -- goto out_error;
- -- xen_store_evtchn = (int)v;
- -- err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ ++ xen_store_evtchn = xen_start_info->store_evtchn;
+ ++ xen_store_mfn = xen_start_info->store_mfn;
+ ++ if (xen_store_evtchn)
+ ++ xenstored_ready = 1;
+ ++ else {
+ ++ err = xenstored_local_init();
if (err)
goto out_error;
- -- xen_store_mfn = (unsigned long)v;
- -- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
- -- } else {
- -- xen_store_evtchn = xen_start_info->store_evtchn;
- -- xen_store_mfn = xen_start_info->store_mfn;
- -- xen_store_interface = mfn_to_virt(xen_store_mfn);
- -- xenstored_ready = 1;
}
+ ++ xen_store_interface = mfn_to_virt(xen_store_mfn);
}
/* Initialize the interface to xenstore. */
proc_mkdir("xen", NULL);
#endif
- -- return 0;
- --
- -- out_error:
- -- if (page != 0)
- -- free_page(page);
- --
+ ++ out_error:
return err;
}
xenbus_otherend_changed(watch, vec, len, 1);
}
- -static struct device_attribute xenbus_frontend_dev_attrs[] = {
- - __ATTR_NULL
- -};
- -
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- - .dev_attrs = xenbus_frontend_dev_attrs,
+ + .dev_attrs = xenbus_dev_attrs,
.pm = &xenbus_pm_ops,
},
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+ ++static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
+ ++static int backend_state;
+ ++
+ ++static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
+ ++ const char **v, unsigned int l)
+ ++{
+ ++ xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ ++ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ ++ v[XS_WATCH_PATH], xenbus_strstate(backend_state));
+ ++ wake_up(&backend_state_wq);
+ ++}
+ ++
+ ++static void xenbus_reset_wait_for_backend(char *be, int expected)
+ ++{
+ ++ long timeout;
+ ++ timeout = wait_event_interruptible_timeout(backend_state_wq,
+ ++ backend_state == expected, 5 * HZ);
+ ++ if (timeout <= 0)
+ ++ printk(KERN_INFO "XENBUS: backend %s timed out.\n", be);
+ ++}
+ ++
+ ++/*
+ ++ * Reset frontend if it is in Connected or Closed state.
+ ++ * Wait for backend to catch up.
+ ++ * State Connected happens during kdump, Closed after kexec.
+ ++ */
+ ++static void xenbus_reset_frontend(char *fe, char *be, int be_state)
+ ++{
+ ++ struct xenbus_watch be_watch;
+ ++
+ ++ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ ++ be, xenbus_strstate(be_state));
+ ++
+ ++ memset(&be_watch, 0, sizeof(be_watch));
+ ++ be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be);
+ ++ if (!be_watch.node)
+ ++ return;
+ ++
+ ++ be_watch.callback = xenbus_reset_backend_state_changed;
+ ++ backend_state = XenbusStateUnknown;
+ ++
+ ++ printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", be);
+ ++ register_xenbus_watch(&be_watch);
+ ++
+ ++ /* fall through to forward backend to state XenbusStateInitialising */
+ ++ switch (be_state) {
+ ++ case XenbusStateConnected:
+ ++ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
+ ++ xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+ ++
+ ++ case XenbusStateClosing:
+ ++ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
+ ++ xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+ ++
+ ++ case XenbusStateClosed:
+ ++ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
+ ++ xenbus_reset_wait_for_backend(be, XenbusStateInitWait);
+ ++ }
+ ++
+ ++ unregister_xenbus_watch(&be_watch);
+ ++ printk(KERN_INFO "XENBUS: reconnect done on %s\n", be);
+ ++ kfree(be_watch.node);
+ ++}
+ ++
+ ++static void xenbus_check_frontend(char *class, char *dev)
+ ++{
+ ++ int be_state, fe_state, err;
+ ++ char *backend, *frontend;
+ ++
+ ++ frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev);
+ ++ if (!frontend)
+ ++ return;
+ ++
+ ++ err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state);
+ ++ if (err != 1)
+ ++ goto out;
+ ++
+ ++ switch (fe_state) {
+ ++ case XenbusStateConnected:
+ ++ case XenbusStateClosed:
+ ++ printk(KERN_DEBUG "XENBUS: frontend %s %s\n",
+ ++ frontend, xenbus_strstate(fe_state));
+ ++ backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
+ ++ if (!backend || IS_ERR(backend))
+ ++ goto out;
+ ++ err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state);
+ ++ if (err == 1)
+ ++ xenbus_reset_frontend(frontend, backend, be_state);
+ ++ kfree(backend);
+ ++ break;
+ ++ default:
+ ++ break;
+ ++ }
+ ++out:
+ ++ kfree(frontend);
+ ++}
+ ++
+ ++static void xenbus_reset_state(void)
+ ++{
+ ++ char **devclass, **dev;
+ ++ int devclass_n, dev_n;
+ ++ int i, j;
+ ++
+ ++ devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n);
+ ++ if (IS_ERR(devclass))
+ ++ return;
+ ++
+ ++ for (i = 0; i < devclass_n; i++) {
+ ++ dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n);
+ ++ if (IS_ERR(dev))
+ ++ continue;
+ ++ for (j = 0; j < dev_n; j++)
+ ++ xenbus_check_frontend(devclass[i], dev[j]);
+ ++ kfree(dev);
+ ++ }
+ ++ kfree(devclass);
+ ++}
+ ++
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
+ ++ /* reset devices in Connected or Closed state */
+ ++ if (xen_hvm_domain())
+ ++ xenbus_reset_state();
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);