- clock-frequency : The frequency of the main counter, in Hz. Optional.
+- always-on : a boolean property. If present, the timer is powered through an
+ always-on power domain, therefore it never loses context.
+
Example:
timer {
* "sata-phy" for the SATA 6.0Gbps PHY
Optional properties:
+- dma-coherent : Present if dma operations are coherent
- status : Shall be "ok" if enabled or "disabled" if disabled.
Default is "ok".
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy2 0>;
<0x0 0x1f23e000 0x0 0x1000>,
<0x0 0x1f237000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy3 0>;
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
clocks = <&emac_0_clk>;
- clocks-names = "stmmaceth";
+ clock-names = "stmmaceth";
};
- max-frame-size: See ethernet.txt file in the same directory
- clocks: If present, the first clock should be the GMAC main clock,
further clocks may be specified in derived bindings.
-- clocks-names: One name for each entry in the clocks property, the
+- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth".
Examples:
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
interrupt-parent = <&PIO3>;
#interrupt-cells = <2>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
- interrupts-names = "card-detect";
+ interrupt-names = "card-detect";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc>;
};
reg = <0x100000 0x3000>;
reg-names "mpu";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>;
serial-dir = <
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
- reg - <int> - I2C slave address
+- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
+ DVDD-supply : power supplies for the device as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt
Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
- DVDD-supply : power supplies for the device as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
CODEC output pins:
* HPL
F: drivers/extcon/
F: Documentation/extcon/
+EXYNOS DP DRIVER
+M: Jingoo Han <jg1.han@samsung.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/exynos/exynos_dp*
+
EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com>
M: Donghwa Lee <dh09.lee@samsung.com>
F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
-M: Jeff Layton <jlayton@redhat.com>
+M: Jeff Layton <jlayton@poochiereds.net>
M: J. Bruce Fields <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
+M: Marc Zyngier <marc.zyngier@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm
S: Supported
F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
+F: virt/kvm/arm/
+F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
F: drivers/block/brd.c
RANDOM NUMBER DRIVER
-M: Theodore Ts'o" <tytso@mit.edu>
+M: "Theodore Ts'o" <tytso@mit.edu>
S: Maintained
F: drivers/char/random.c
VERSION = 3
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
resume_kernel_mode:
-#ifdef CONFIG_PREEMPT
-
- ; This is a must for preempt_schedule_irq()
+ ; Disable Interrupts from this point on
+ ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+ ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
IRQ_DISABLE r9
+#ifdef CONFIG_PREEMPT
+
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x8000>;
PIO5: gpio@fee00000 {
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x8000>;
PIO13: gpio@fe820000 {
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x6000>;
PIO0: gpio@fe610000 {
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x10000>;
PIO5: gpio@fee00000 {
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x6000>;
PIO13: gpio@fe820000 {
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
- depends on ARM_VIRT_EXT && ARM_LPAE
+ depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(boot_hyp_pgd);
+ free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(init_bounce_page);
+ free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
- kfree(hyp_pgd);
+ free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL;
}
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base;
- init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
(unsigned long)phys_base);
}
- hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+ boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM;
<0x0 0x1f21e000 0x0 0x1000>,
<0x0 0x1f217000 0x0 0x1000>;
interrupts = <0x0 0x86 0x4>;
+ dma-coherent;
status = "disabled";
clocks = <&sata01clk 0>;
phys = <&phy1 0>;
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sata23clk 0>;
phys = <&phy2 0>;
<0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sata45clk 0>;
phys = <&phy3 0>;
}
/* no options parsing yet */
- if (paddr) {
- set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
- early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
- }
+ if (paddr)
+ early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
printch = match->printch;
early_console = &early_console_dev;
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ struct device *dev = _dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- dma_ops = &coherent_swiotlb_dma_ops;
+ /*
+ * These must be registered before of_platform_populate().
+ */
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+ dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
-subsys_initcall(swiotlb_late_init);
+arch_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
if (pmd_none(*pmd))
return 0;
+ if (pmd_sect(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
+++ /dev/null
-/*
- * Memory barrier definitions for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define rmb() barrier()
-#define read_barrier_depends() barrier()
-#define wmb() barrier()
-#define mb() barrier()
-#define smp_rmb() barrier()
-#define smp_read_barrier_depends() barrier()
-#define smp_wmb() barrier()
-#define smp_mb() barrier()
-
-/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
-#define set_mb(var, value) \
- do { var = value; mb(); } while (0)
-
-#endif /* _ASM_BARRIER_H */
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += resource.h
+
header-y += bitsperlong.h
header-y += byteorder.h
header-y += errno.h
header-y += pdc.h
header-y += posix_types.h
header-y += ptrace.h
-header-y += resource.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
+++ /dev/null
-#ifndef _ASM_PARISC_RESOURCE_H
-#define _ASM_PARISC_RESOURCE_H
-
-#define _STK_LIM_MAX 10 * _STK_LIM
-#include <asm-generic/resource.h>
-
-#endif
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
*/
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
- getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+ getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
printf("\n\rLinux/PowerPC load: %s", cmdline);
/* If possible, edit the command line */
if (console_ops.edit_cmdline)
- console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
+ console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
printf("\n\r");
/* Put the command line back into the devtree for the kernel */
* built-in command line wasn't set by an external tool */
if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
memmove(cmdline, loader_info.cmdline,
- min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
+ min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
if (console_ops.open && (console_ops.open() < 0))
exit();
#include "types.h"
#include "string.h"
-#define COMMAND_LINE_SIZE 512
+#define BOOT_COMMAND_LINE_SIZE 2048
#define MAX_PATH_LEN 256
#define MAX_PROP_LEN 256 /* What should this be? */
* The buffer is put in it's own section so that tools may locate it easier.
*/
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
- getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+ getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
else
setprop_str(chosen, "bootargs", cmdline);
* size except the last one in the list to be as well.
*/
struct opal_sg_entry {
- void *data;
- long length;
+ __be64 data;
+ __be64 length;
};
-/* sg list */
+/* SG list */
struct opal_sg_list {
- unsigned long num_entries;
- struct opal_sg_list *next;
+ __be64 length;
+ __be64 next;
struct opal_sg_entry entry[];
};
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, __be32 *data, uint32_t sz);
-int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
-int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_send_ack_elog(uint64_t log_id);
void opal_resend_pending_logs(void);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
int64_t opal_dump_init(uint8_t dump_type);
-int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
-int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void);
-int64_t opal_get_msg(uint64_t buffer, size_t size);
-int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
- size_t length);
+ uint64_t length);
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
- size_t length);
+ uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+ int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
extern void hvc_opal_init_early(void);
-/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
- int depth, void *data);
-
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-
extern int __opal_async_get_token(void);
extern int opal_async_get_token_interruptible(void);
extern int __opal_async_release_token(int token);
extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
-extern void hvc_opal_init_early(void);
-
struct rtc_time;
extern int opal_set_rtc_time(struct rtc_time *tm);
extern void opal_get_rtc_time(struct rtc_time *tm);
extern void opal_lpc_init(void);
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
-#include <asm-generic/setup.h>
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE 2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
EXPORT_SYMBOL(flush_instruction_cache);
#endif
EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC32
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
pr_info("rtas_flash: no firmware flash support\n");
- return 1;
+ return -EINVAL;
}
rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
*/
.globl kvm_start_guest
kvm_start_guest:
+
+ /* Set runlatch bit the minute you wake up from nap */
+ mfspr r1, SPRN_CTRLF
+ ori r1, r1, 1
+ mtspr SPRN_CTRLT, r1
+
ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
kvm_do_nap:
+ /* Clear the runlatch bit before napping */
+ mfspr r2, SPRN_CTRLF
+ clrrdi r2, r2, 1
+ mtspr SPRN_CTRLT, r2
+
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
/*
* Take a nap until a decrementer or external or doobell interrupt
- * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+ * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+ * runlatch bit before napping.
*/
+ mfspr r2, SPRN_CTRLF
+ clrrdi r2, r2, 1
+ mtspr SPRN_CTRLT, r2
+
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- /* Add AVAL part */
- if (psize != apsize) {
- /*
- * MPSS, 64K base page size and 16MB parge page size
- * We don't need all the bits, but rest of the bits
- * must be ignored by the processor.
- * vpn cover upto 65 bits of va. (0...65) and we need
- * 58..64 bits of va.
- */
- va |= (vpn & 0xfe);
- }
+ /*
+ * AVAL bits:
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe); /* AVAL */
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- /* Add AVAL part */
- if (psize != apsize) {
- /*
- * MPSS, 64K base page size and 16MB parge page size
- * We don't need all the bits, but rest of the bits
- * must be ignored by the processor.
- * vpn cover upto 65 bits of va. (0...65) and we need
- * 58..64 bits of va.
- */
- va |= (vpn & 0xfe);
- }
+ /*
+ * AVAL bits:
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
return copy_len;
}
-static unsigned long h_get_24x7_catalog_page(char page[static 4096],
- u32 version, u32 index)
+static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
+ unsigned long version,
+ unsigned long index)
{
- WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
+ pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
+ phys_4096,
+ version,
+ index);
+ WARN_ON(!IS_ALIGNED(phys_4096, 4096));
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
- virt_to_phys(page),
+ phys_4096,
version,
index);
}
+static unsigned long h_get_24x7_catalog_page(char page[],
+ u64 version, u32 index)
+{
+ return h_get_24x7_catalog_page_(virt_to_phys(page),
+ version, index);
+}
+
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
ssize_t ret = 0;
size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
loff_t page_offset = 0;
- uint32_t catalog_version_num = 0;
+ uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
if (!page)
goto e_free;
}
- catalog_version_num = be32_to_cpu(page_0->version);
+ catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length);
catalog_len = catalog_page_len * 4096;
page, 4096, page_offset * 4096);
e_free:
if (hret)
- pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
- catalog_version_num, page_offset, hret);
+ pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
+ " rc=%ld\n",
+ catalog_version_num, page_offset, hret);
kfree(page);
pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
static DEVICE_ATTR_RO(_name)
PAGE_0_ATTR(catalog_version, "%lld\n",
- (unsigned long long)be32_to_cpu(page_0->version));
+ (unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- pr_info("not a virtualized system, not enabling\n");
+ pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
- pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+ pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
}
-DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(kernel_version);
HV_CAPS_ATTR(version, "0x%x\n");
HV_CAPS_ATTR(ga, "%d\n");
HV_CAPS_ATTR(expanded, "%d\n");
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- pr_info("not a virtualized system, not enabling\n");
+ pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
- pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+ pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}
.default_attrs = dump_default_attrs,
};
-static void free_dump_sg_list(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg1;
- while (list) {
- sg1 = list->next;
- kfree(list);
- list = sg1;
- }
- list = NULL;
-}
-
-static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
-{
- struct opal_sg_list *sg1, *list = NULL;
- void *addr;
- int64_t size;
-
- addr = dump->buffer;
- size = dump->size;
-
- sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1)
- goto nomem;
-
- list = sg1;
- sg1->num_entries = 0;
- while (size > 0) {
- /* Translate virtual address to physical address */
- sg1->entry[sg1->num_entries].data =
- (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
- if (size > PAGE_SIZE)
- sg1->entry[sg1->num_entries].length = PAGE_SIZE;
- else
- sg1->entry[sg1->num_entries].length = size;
-
- sg1->num_entries++;
- if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1->next)
- goto nomem;
-
- sg1 = sg1->next;
- sg1->num_entries = 0;
- }
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return list;
-
-nomem:
- pr_err("%s : Failed to allocate memory\n", __func__);
- free_dump_sg_list(list);
- return NULL;
-}
-
-static void sglist_to_phy_addr(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg, *next;
-
- for (sg = list; sg; sg = next) {
- next = sg->next;
- /* Don't translate NULL pointer for last entry */
- if (sg->next)
- sg->next = (struct opal_sg_list *)__pa(sg->next);
- else
- sg->next = NULL;
-
- /* Convert num_entries to length */
- sg->num_entries =
- sg->num_entries * sizeof(struct opal_sg_entry) + 16;
- }
-}
-
-static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
{
+ __be32 id, size, type;
int rc;
- *type = 0xffffffff;
- rc = opal_dump_info2(id, size, type);
+ type = cpu_to_be32(0xffffffff);
+ rc = opal_dump_info2(&id, &size, &type);
if (rc == OPAL_PARAMETER)
- rc = opal_dump_info(id, size);
+ rc = opal_dump_info(&id, &size);
+
+ *dump_id = be32_to_cpu(id);
+ *dump_size = be32_to_cpu(size);
+ *dump_type = be32_to_cpu(type);
if (rc)
pr_warn("%s: Failed to get dump info (%d)\n",
}
/* Generate SG list */
- list = dump_data_to_sglist(dump);
+ list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
if (!list) {
rc = -ENOMEM;
goto out;
}
- /* Translate sg list addr to real address */
- sglist_to_phy_addr(list);
-
/* First entry address */
addr = __pa(list);
__func__, dump->id);
/* Free SG list */
- free_dump_sg_list(list);
+ opal_free_sg_list(list);
out:
return rc;
static void elog_work_fn(struct work_struct *work)
{
- size_t elog_size;
+ __be64 size;
+ __be64 id;
+ __be64 type;
+ uint64_t elog_size;
uint64_t log_id;
uint64_t elog_type;
int rc;
char name[2+16+1];
- rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+ rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n");
return;
}
+ elog_size = be64_to_cpu(size);
+ log_id = be64_to_cpu(id);
+ elog_type = be64_to_cpu(type);
+
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
/* XXX: Assume candidate image size is <= 1GB */
#define MAX_IMAGE_SIZE 0x40000000
-/* Flash sg list version */
-#define SG_LIST_VERSION (1UL)
-
/* Image status */
enum {
IMAGE_INVALID,
*/
static inline void opal_flash_validate(void)
{
- struct validate_flash_t *args_buf = &validate_flash_data;
+ long ret;
+ void *buf = validate_flash_data.buf;
+ __be32 size, result;
- args_buf->status = opal_validate_flash(__pa(args_buf->buf),
- &(args_buf->buf_size),
- &(args_buf->result));
+ ret = opal_validate_flash(__pa(buf), &size, &result);
+
+ validate_flash_data.status = ret;
+ validate_flash_data.buf_size = be32_to_cpu(size);
+ validate_flash_data.result = be32_to_cpu(result);
}
/*
return count;
}
-/*
- * Free sg list
- */
-static void free_sg_list(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg1;
- while (list) {
- sg1 = list->next;
- kfree(list);
- list = sg1;
- }
- list = NULL;
-}
-
-/*
- * Build candidate image scatter gather list
- *
- * list format:
- * -----------------------------------
- * | VER (8) | Entry length in bytes |
- * -----------------------------------
- * | Pointer to next entry |
- * -----------------------------------
- * | Address of memory area 1 |
- * -----------------------------------
- * | Length of memory area 1 |
- * -----------------------------------
- * | ......... |
- * -----------------------------------
- * | ......... |
- * -----------------------------------
- * | Address of memory area N |
- * -----------------------------------
- * | Length of memory area N |
- * -----------------------------------
- */
-static struct opal_sg_list *image_data_to_sglist(void)
-{
- struct opal_sg_list *sg1, *list = NULL;
- void *addr;
- int size;
-
- addr = image_data.data;
- size = image_data.size;
-
- sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1)
- return NULL;
-
- list = sg1;
- sg1->num_entries = 0;
- while (size > 0) {
- /* Translate virtual address to physical address */
- sg1->entry[sg1->num_entries].data =
- (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
- if (size > PAGE_SIZE)
- sg1->entry[sg1->num_entries].length = PAGE_SIZE;
- else
- sg1->entry[sg1->num_entries].length = size;
-
- sg1->num_entries++;
- if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1->next) {
- pr_err("%s : Failed to allocate memory\n",
- __func__);
- goto nomem;
- }
-
- sg1 = sg1->next;
- sg1->num_entries = 0;
- }
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return list;
-nomem:
- free_sg_list(list);
- return NULL;
-}
-
/*
* OPAL update flash
*/
static int opal_flash_update(int op)
{
- struct opal_sg_list *sg, *list, *next;
+ struct opal_sg_list *list;
unsigned long addr;
int64_t rc = OPAL_PARAMETER;
goto flash;
}
- list = image_data_to_sglist();
+ list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
if (!list)
goto invalid_img;
/* First entry address */
addr = __pa(list);
- /* Translate sg list address to absolute */
- for (sg = list; sg; sg = next) {
- next = sg->next;
- /* Don't translate NULL pointer for last entry */
- if (sg->next)
- sg->next = (struct opal_sg_list *)__pa(sg->next);
- else
- sg->next = NULL;
-
- /*
- * Convert num_entries to version/length format
- * to satisfy OPAL.
- */
- sg->num_entries = (SG_LIST_VERSION << 56) |
- (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
- }
-
pr_alert("FLASH: Image is %u bytes\n", image_data.size);
pr_alert("FLASH: Image update requested\n");
pr_alert("FLASH: Image will be updated during system reboot\n");
struct kobj_attribute kobj_attr;
};
-static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
{
struct opal_msg msg;
- int ret, token;
+ ssize_t ret;
+ int token;
token = opal_async_get_token_interruptible();
if (token < 0) {
ret = opal_async_wait_response(token, &msg);
if (ret) {
- pr_err("%s: Failed to wait for the async response, %d\n",
+ pr_err("%s: Failed to wait for the async response, %zd\n",
__func__, ret);
goto out_token;
}
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
- int ret;
+ ssize_t ret;
mutex_lock(&opal_sysparam_mutex);
ret = opal_get_sys_param(attr->param_id, attr->param_size,
memcpy(buf, param_data_buf, attr->param_size);
+ ret = attr->param_size;
out:
mutex_unlock(&opal_sysparam_mutex);
- return ret ? ret : attr->param_size;
+ return ret;
}
static ssize_t sys_param_store(struct kobject *kobj,
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
- int ret;
+ ssize_t ret;
+
+ /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
+ if (count > MAX_PARAM_DATA_LEN)
+ count = MAX_PARAM_DATA_LEN;
mutex_lock(&opal_sysparam_mutex);
memcpy(param_data_buf, buf, count);
ret = opal_set_sys_param(attr->param_id, attr->param_size,
param_data_buf);
mutex_unlock(&opal_sysparam_mutex);
- return ret ? ret : count;
+ if (!ret)
+ ret = count;
+ return ret;
}
void __init opal_sys_param_init(void)
}
if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
- pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+ pr_err("SYSPARAM: Missing property param-len in the DT\n");
goto out_free_perm;
}
if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
- pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+ pr_err("SYSPARAM: Missing property param-perm in the DT\n");
goto out_free_perm;
}
/* For each of the parameters, populate the parameter attributes */
for (i = 0; i < count; i++) {
+ if (size[i] > MAX_PARAM_DATA_LEN) {
+ pr_warn("SYSPARAM: Not creating parameter %d as size "
+ "exceeds buffer length\n", i);
+ continue;
+ }
+
sysfs_attr_init(&attr[i].kobj_attr.attr);
attr[i].param_id = id[i];
attr[i].param_size = size[i];
void opal_notifier_enable(void)
{
int64_t rc;
- uint64_t evt = 0;
+ __be64 evt = 0;
atomic_set(&opal_notifier_hold, 0);
/* Process pending events */
rc = opal_poll_events(&evt);
if (rc == OPAL_SUCCESS && evt)
- opal_do_notifier(evt);
+ opal_do_notifier(be64_to_cpu(evt));
}
void opal_notifier_disable(void)
opal_handle_interrupt(virq_to_hw(irq), &events);
- opal_do_notifier(events);
+ opal_do_notifier(be64_to_cpu(events));
return IRQ_HANDLED;
}
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
+
+/* Convert a region of vmalloc memory to an opal sg list */
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size)
+{
+ struct opal_sg_list *sg, *first = NULL;
+ unsigned long i = 0;
+
+ sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sg)
+ goto nomem;
+
+ first = sg;
+
+ while (vmalloc_size > 0) {
+ uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
+ uint64_t length = min(vmalloc_size, PAGE_SIZE);
+
+ sg->entry[i].data = cpu_to_be64(data);
+ sg->entry[i].length = cpu_to_be64(length);
+ i++;
+
+ if (i >= SG_ENTRIES_PER_NODE) {
+ struct opal_sg_list *next;
+
+ next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!next)
+ goto nomem;
+
+ sg->length = cpu_to_be64(
+ i * sizeof(struct opal_sg_entry) + 16);
+ i = 0;
+ sg->next = cpu_to_be64(__pa(next));
+ sg = next;
+ }
+
+ vmalloc_addr += length;
+ vmalloc_size -= length;
+ }
+
+ sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
+
+ return first;
+
+nomem:
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ opal_free_sg_list(first);
+ return NULL;
+}
+
+void opal_free_sg_list(struct opal_sg_list *sg)
+{
+ while (sg) {
+ uint64_t next = be64_to_cpu(sg->next);
+
+ kfree(sg);
+
+ if (next)
+ sg = __va(next);
+ else
+ sg = NULL;
+ }
+}
pci_name(dev));
continue;
}
- pci_dev_get(dev);
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
}
#ifdef CONFIG_KEXEC
+static void pnv_kexec_wait_secondaries_down(void)
+{
+ int my_cpu, i, notified = -1;
+
+ my_cpu = get_cpu();
+
+ for_each_online_cpu(i) {
+ uint8_t status;
+ int64_t rc;
+
+ if (i == my_cpu)
+ continue;
+
+ for (;;) {
+ rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
+ &status);
+ if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
+ break;
+ barrier();
+ if (i != notified) {
+ printk(KERN_INFO "kexec: waiting for cpu %d "
+ "(physical %d) to enter OPAL\n",
+ i, paca[i].hw_cpu_id);
+ notified = i;
+ }
+ }
+ }
+}
+
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
xics_kexec_teardown_cpu(secondary);
- /* Return secondary CPUs to firmware on OPAL v3 */
- if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
+ /* On OPAL v3, we return all CPUs to firmware */
+
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return;
+
+ if (secondary) {
+ /* Return secondary CPUs to firmware on OPAL v3 */
mb();
get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
mb();
/* Return the CPU to OPAL */
opal_return_cpu();
+ } else if (crash_shutdown) {
+ /*
+ * On crash, we don't wait for secondaries to go
+ * down as they might be unreachable or hung, so
+ * instead we just wait a bit and move on.
+ */
+ mdelay(1);
+ } else {
+ /* Primary waits for the secondaries to have reached OPAL */
+ pnv_kexec_wait_secondaries_down();
}
}
#endif /* CONFIG_KEXEC */
#include <asm/cputhreads.h>
#include <asm/xics.h>
#include <asm/opal.h>
+#include <asm/runlatch.h>
#include "powernv.h"
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
+ ppc64_runlatch_off();
power7_nap();
+ ppc64_runlatch_on();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
static void rtas_stop_self(void)
{
- struct rtas_args args = {
- .token = cpu_to_be32(rtas_stop_self_token),
+ static struct rtas_args args = {
.nargs = 0,
.nret = 1,
.rets = &args.args[0],
};
+ args.token = cpu_to_be32(rtas_stop_self_token);
+
local_irq_disable();
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
start_pfn = base >> PAGE_SHIFT;
- if (!pfn_valid(start_pfn)) {
- memblock_remove(base, memblock_size);
- return 0;
- }
+ lock_device_hotplug();
+
+ if (!pfn_valid(start_pfn))
+ goto out;
block_sz = memory_block_size_bytes();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
base += MIN_MEMORY_BLOCK_SIZE;
}
+out:
/* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
+ unlock_device_hotplug();
return 0;
}
return 1;
}
-static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND:
- case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM:
case BPF_S_LD_MEM:
case BPF_S_MISC_TXA:
KBUILD_CFLAGS += -m64
# Don't autogenerate traditional x87, MMX or SSE instructions
- KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+ KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
# Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
return nr_irqs_gsi;
}
+unsigned int arch_dynirq_lower_bound(unsigned int from)
+{
+ return from < nr_irqs_gsi ? nr_irqs_gsi : from;
+}
+
int __init arch_probe_nr_irqs(void)
{
int nr;
if (phys_id < 0)
return -1;
- if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+ /* protect rdmsrl() to handle virtualization */
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
return -1;
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
#define TOPOLOGY_REGISTER_OFFSET 0x10
+/* Flag below is initialized once during vSMP PCI initialization. */
+static int irq_routing_comply = 1;
+
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/*
* Interrupt control on vSMPowered systems:
#ifdef CONFIG_SMP
if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8);
+
+ /* Interrupt routing set to ignore */
+ irq_routing_comply = 0;
+
#ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */
no_irq_affinity = 1;
{
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
- apic->vector_allocation_domain = fill_vector_allocation_domain;
+
+ if (!irq_routing_comply)
+ apic->vector_allocation_domain = fill_vector_allocation_domain;
}
void __init vsmp_init(void)
[number##_HIGH] = VMCS12_OFFSET(name)+4
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
/*
* We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD,
GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS
};
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
GUEST_RIP,
GUEST_RSP,
GUEST_CR0,
HOST_FS_SELECTOR,
HOST_GS_SELECTOR
};
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = {
}
}
+static void init_vmcs_shadow_fields(void)
+{
+ int i, j;
+
+ /* No checks for read only fields yet */
+
+ for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+ switch (shadow_read_write_fields[i]) {
+ case GUEST_BNDCFGS:
+ if (!vmx_mpx_supported())
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ if (j < i)
+ shadow_read_write_fields[j] =
+ shadow_read_write_fields[i];
+ j++;
+ }
+ max_shadow_read_write_fields = j;
+
+ /* shadowed fields guest access without vmexit */
+ for (i = 0; i < max_shadow_read_write_fields; i++) {
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmwrite_bitmap);
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmread_bitmap);
+ }
+ for (i = 0; i < max_shadow_read_only_fields; i++)
+ clear_bit(shadow_read_only_fields[i],
+ vmx_vmread_bitmap);
+}
+
static __init int alloc_kvm_area(void)
{
int cpu;
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) {
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
- /* shadowed read/write fields */
- for (i = 0; i < max_shadow_read_write_fields; i++) {
- clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
- clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
- }
- /* shadowed read only fields */
- for (i = 0; i < max_shadow_read_only_fields; i++)
- clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
/*
* Allow direct access to the PC debug port (it is often used for I/O
select GENERIC_PCI_IOMAP
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select IRQ_DOMAIN
select HAVE_OPROFILE
If in doubt, say Y.
+config HIGHMEM
+ bool "High Memory Support"
+ help
+ Linux can use the full amount of RAM in the system by
+ default. However, the default MMUv2 setup only maps the
+ lowermost 128 MB of memory linearly to the areas starting
+ at 0xd0000000 (cached) and 0xd8000000 (uncached).
+ When there are more than 128 MB memory in the system not
+ all of it can be "permanently mapped" by the kernel.
+ The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a
+ machine with more than 128 MB total physical RAM, answer
+ N here.
+
+ If unsure, say Y.
+
endmenu
config XTENSA_CALIBRATE_CCOUNT
config XTENSA_PLATFORM_ISS
bool "ISS"
- depends on TTY
select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE
help
--- /dev/null
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-kc705";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+};
--- /dev/null
+/ {
+ soc {
+ flash: flash@00000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x00000000 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "data";
+ reg = <0x00000000 0x06000000>;
+ };
+ partition@0x6000000 {
+ label = "boot loader area";
+ reg = <0x06000000 0x00800000>;
+ };
+ partition@0x6800000 {
+ label = "kernel image";
+ reg = <0x06800000 0x017e0000>;
+ };
+ partition@0x7fe0000 {
+ label = "boot environment";
+ reg = <0x07fe0000 0x00020000>;
+ };
+ };
+ };
+};
/ {
- flash: flash@f8000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0xf8000000 0x01000000>;
- bank-width = <2>;
- device-width = <2>;
- partition@0x0 {
- label = "boot loader area";
- reg = <0x00000000 0x00400000>;
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x01000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@0x400000 {
+ label = "kernel image";
+ reg = <0x00400000 0x00600000>;
+ };
+ partition@0xa00000 {
+ label = "data";
+ reg = <0x00a00000 0x005e0000>;
+ };
+ partition@0xfe0000 {
+ label = "boot environment";
+ reg = <0x00fe0000 0x00020000>;
+ };
};
- partition@0x400000 {
- label = "kernel image";
- reg = <0x00400000 0x00600000>;
- };
- partition@0xa00000 {
- label = "data";
- reg = <0x00a00000 0x005e0000>;
- };
- partition@0xfe0000 {
- label = "boot environment";
- reg = <0x00fe0000 0x00020000>;
- };
- };
+ };
};
/ {
- flash: flash@f8000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0xf8000000 0x00400000>;
- bank-width = <2>;
- device-width = <2>;
- partition@0x0 {
- label = "boot loader area";
- reg = <0x00000000 0x003f0000>;
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x00400000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x003f0000>;
+ };
+ partition@0x3f0000 {
+ label = "boot environment";
+ reg = <0x003f0000 0x00010000>;
+ };
};
- partition@0x3f0000 {
- label = "boot environment";
- reg = <0x003f0000 0x00010000>;
- };
- };
+ };
};
};
};
- serial0: serial@fd050020 {
- device_type = "serial";
- compatible = "ns16550a";
- no-loopback-test;
- reg = <0xfd050020 0x20>;
- reg-shift = <2>;
- interrupts = <0 1>; /* external irq 0 */
- clocks = <&osc>;
- };
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
- enet0: ethoc@fd030000 {
- compatible = "opencores,ethoc";
- reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
- interrupts = <1 1>; /* external irq 1 */
- local-mac-address = [00 50 c2 13 6f 00];
- clocks = <&osc>;
+ serial0: serial@0d050020 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ no-loopback-test;
+ reg = <0x0d050020 0x20>;
+ reg-shift = <2>;
+ interrupts = <0 1>; /* external irq 0 */
+ clocks = <&osc>;
+ };
+
+ enet0: ethoc@0d030000 {
+ compatible = "opencores,ethoc";
+ reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+ interrupts = <1 1>; /* external irq 1 */
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
};
};
unsigned long data[0]; /* data */
} bp_tag_t;
-typedef struct meminfo {
+struct bp_meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
-} meminfo_t;
-
-#define SYSMEM_BANKS_MAX 5
+};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
-typedef struct sysmem_info {
- int nr_banks;
- meminfo_t bank[SYSMEM_BANKS_MAX];
-} sysmem_info_t;
-
-extern sysmem_info_t sysmem;
-
#endif
#endif
--- /dev/null
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel( \
+ pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+ (vaddr) \
+ )
+
+#endif
* this archive for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
-extern void flush_cache_kmaps(void);
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
#endif
update_pte(ptep, pteval);
}
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval)
--- /dev/null
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#define SYSMEM_BANKS_MAX 31
+
+struct meminfo {
+ unsigned long start;
+ unsigned long end;
+};
+
+/*
+ * Bank array is sorted by .start.
+ * Banks don't overlap and there's at least one page gap
+ * between adjacent bank entries.
+ */
+struct sysmem_info {
+ int nr_banks;
+ struct meminfo bank[SYSMEM_BANKS_MAX];
+};
+
+extern struct sysmem_info sysmem;
+
+int add_sysmem_bank(unsigned long start, unsigned long end);
+int mem_reserve(unsigned long, unsigned long, int);
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_SMP
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long);
-
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#else /* !CONFIG_SMP */
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
end)
-#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
#endif /* CONFIG_SMP */
#include <asm/param.h>
#include <asm/traps.h>
#include <asm/smp.h>
+#include <asm/sysmem.h>
#include <platform/hardware.h>
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
-sysmem_info_t __initdata sysmem;
-
-extern int mem_reserve(unsigned long, unsigned long, int);
-extern void bootmem_init(void);
-extern void zones_init(void);
-
/*
* Boot parameter parsing.
*
/* parse current tag */
-static int __init add_sysmem_bank(unsigned long type, unsigned long start,
- unsigned long end)
-{
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
- printk(KERN_WARNING
- "Ignoring memory bank 0x%08lx size %ldKB\n",
- start, end - start);
- return -EINVAL;
- }
- sysmem.bank[sysmem.nr_banks].type = type;
- sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
- sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK;
- sysmem.nr_banks++;
-
- return 0;
-}
-
static int __init parse_tag_mem(const bp_tag_t *tag)
{
- meminfo_t *mi = (meminfo_t *)(tag->data);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
- return add_sysmem_bank(mi->type, mi->start, mi->end);
+ return add_sysmem_bank(mi->start, mi->end);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
static int __init parse_tag_initrd(const bp_tag_t* tag)
{
- meminfo_t* mi;
- mi = (meminfo_t*)(tag->data);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
initrd_start = (unsigned long)__va(mi->start);
initrd_end = (unsigned long)__va(mi->end);
return;
size &= PAGE_MASK;
- add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
+ add_sysmem_bank(base, base + size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
void __init init_arch(bp_tag_t *bp_start)
{
- sysmem.nr_banks = 0;
-
/* Parse boot parameters */
if (bp_start)
#endif
if (sysmem.nr_banks == 0) {
- sysmem.nr_banks = 1;
- sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
- sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
- + PLATFORM_DEFAULT_MEM_SIZE;
+ add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+ PLATFORM_DEFAULT_MEM_START +
+ PLATFORM_DEFAULT_MEM_SIZE);
}
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
- __pa(initrd_end), 0);
+ __pa(initrd_end), 0) == 0;
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
__pa(&_Level6InterruptVector_text_end), 0);
#endif
+ parse_early_param();
bootmem_init();
unflatten_and_copy_device_tree();
on_each_cpu(ipi_flush_tlb_range, &fd, 1);
}
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
/* Cache flush functions */
static void ipi_flush_cache_all(void *arg)
#include <linux/in6.h>
#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
/*
* Kernel hacking ...
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
obj-y := init.o cache.o misc.o
obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
*
*/
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
- unsigned long paddr = (unsigned long) page_address(page);
+ unsigned long paddr = (unsigned long)kmap_atomic(page);
__flush_dcache_page(paddr);
__invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags);
+ kunmap_atomic((void *)paddr);
}
#endif
}
--- /dev/null
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+ int type;
+
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int idx, type;
+
+ if (kvaddr >= (void *)FIXADDR_START &&
+ kvaddr < (void *)FIXADDR_TOP) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
+ pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ local_flush_tlb_kernel_range((unsigned long)kvaddr,
+ (unsigned long)kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
+#include <asm/sysmem.h>
+
+struct sysmem_info sysmem __initdata;
+
+static void __init sysmem_dump(void)
+{
+ unsigned i;
+
+ pr_debug("Sysmem:\n");
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
+ sysmem.bank[i].start, sysmem.bank[i].end,
+ (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start <= start)
+ it = sysmem.bank + i;
+ else
+ break;
+ return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+ unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+ if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+ return -ENOMEM;
+ if (to != from)
+ memmove(to, from, n * sizeof(struct meminfo));
+ sysmem.nr_banks += to - from;
+ return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
+
+ if (start == end ||
+ (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+ pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+
+ start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
+ sz = end - start;
+
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
+
+ if (it && bank_sz >= start - it->start) {
+ if (end - it->start > bank_sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (!it)
+ it = sysmem.bank;
+ else
+ ++it;
+
+ if (it - sysmem.bank < sysmem.nr_banks &&
+ it->start - start <= sz) {
+ it->start = start;
+ if (it->end - it->start < sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (move_banks(it + 1, it) < 0) {
+ pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+ it->start = start;
+ it->end = end;
+ return 0;
+ }
+ }
+ sz = it->end - it->start;
+ for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start - it->start <= sz) {
+ if (sz < sysmem.bank[i].end - it->start)
+ it->end = sysmem.bank[i].end;
+ } else {
+ break;
+ }
+
+ move_banks(it + 1, sysmem.bank + i);
+ return 0;
+}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
*
* Parameters:
* start Start of region,
* must_exist Must exist in memory pool.
*
* Returns:
- * 0 (memory area couldn't be mapped)
- * -1 (success)
+ * 0 (success)
+ * < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
- int i;
-
- if (start == end)
- return 0;
+ struct meminfo *it;
+ struct meminfo *rm = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
+ sz = end - start;
+ if (!sz)
+ return -EINVAL;
- for (i = 0; i < sysmem.nr_banks; i++)
- if (start < sysmem.bank[i].end
- && end >= sysmem.bank[i].start)
- break;
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
- if (i == sysmem.nr_banks) {
- if (must_exist)
- printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
- "not in any region!\n", start, end);
- return 0;
+ if ((!it || end - it->start > bank_sz) && must_exist) {
+ pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+ start, end);
+ return -EINVAL;
}
- if (start > sysmem.bank[i].start) {
- if (end < sysmem.bank[i].end) {
- /* split entry */
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
- panic("meminfo overflow\n");
- sysmem.bank[sysmem.nr_banks].start = end;
- sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
- sysmem.nr_banks++;
+ if (it && start - it->start < bank_sz) {
+ if (start == it->start) {
+ if (end - it->start < bank_sz) {
+ it->start = end;
+ return 0;
+ } else {
+ rm = it;
+ }
+ } else {
+ it->end = start;
+ if (end - it->start < bank_sz)
+ return add_sysmem_bank(end,
+ it->start + bank_sz);
+ ++it;
}
- sysmem.bank[i].end = start;
+ }
- } else if (end < sysmem.bank[i].end) {
- sysmem.bank[i].start = end;
+ if (!it)
+ it = sysmem.bank;
- } else {
- /* remove entry */
- sysmem.nr_banks--;
- sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
- sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
+ for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+ if (it->end - start <= sz) {
+ if (!rm)
+ rm = it;
+ } else {
+ if (it->start - start < sz)
+ it->start = end;
+ break;
+ }
}
- return -1;
+
+ if (rm)
+ move_banks(rm, it);
+
+ return 0;
}
unsigned long bootmap_start, bootmap_size;
int i;
+ sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
void __init zones_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- int i;
-
/* All pages are DMA-able, so we put them all in the DMA zone. */
-
- zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
- for (i = 1; i < MAX_NR_ZONES; i++)
- zones_size[i] = 0;
-
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+ [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
-
+ };
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
void __init mem_init(void)
{
- max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
#ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
+ unsigned long tmp;
+
+ reset_all_zones_managed_pages();
+ for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
#endif
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
free_all_bootmem();
mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
+ " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
{
free_initmem_default(-1);
}
+
+static void __init parse_memmap_one(char *p)
+{
+ char *oldp;
+ unsigned long start_at, mem_size;
+
+ if (!p)
+ return;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
+
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ add_sysmem_bank(start_at, start_at + mem_size);
+ break;
+
+ case '$':
+ start_at = memparse(p + 1, &p);
+ mem_reserve(start_at, start_at + mem_size, 0);
+ break;
+
+ case 0:
+ mem_reserve(mem_size, 0, 0);
+ break;
+
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
+ }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ parse_memmap_one(str);
+ str = k;
+ }
+
+ return 0;
+}
+early_param("memmap", parse_memmap_opt);
*
* Extracted from init.c
*/
+#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
#include <asm/initialize_mmu.h>
#include <asm/io.h>
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+ if (pmd_none(*pmd)) {
+ unsigned i;
+ pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, pte + i);
+
+ set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+ __func__, vaddr, pmd, pte);
+ return pte;
+ } else {
+ return pte_offset_kernel(pmd, 0);
+ }
+}
+
+static void __init fixedrange_init(void)
+{
+ BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+ init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
void __init paging_init(void)
{
memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE);
+ kmap_init();
+#endif
}
/*
local_irq_restore(flags);
}
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ local_flush_tlb_all();
+ }
+}
+
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
# "prom monitor" library routines under Linux.
#
-obj-y = console.o setup.o
+obj-y = setup.o
+obj-$(CONFIG_TTY) += console.o
obj-$(CONFIG_NET) += network.o
obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
/* early initialization */
-extern sysmem_info_t __initdata sysmem;
-
-void platform_init(bp_tag_t* first)
+void __init platform_init(bp_tag_t *first)
{
- /* Set default memory block if not provided by the bootloader. */
-
- if (sysmem.nr_banks == 0) {
- sysmem.nr_banks = 1;
- sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
- sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
- + PLATFORM_DEFAULT_MEM_SIZE;
- }
}
/* Heartbeat. Let the LED blink. */
acpi_status status;
int ret;
+ if (pr->apic_id == -1)
+ return -ENODEV;
+
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
}
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0) {
+ if (apic_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- return -ENODEV;
- }
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
spin_unlock_irqrestore(&ec->lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_ec_sync_query(ec, NULL);
}
return 0;
}
EXPORT_SYMBOL(ec_get_handle);
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
/*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query_unlocked(ec, &value);
+ status = acpi_ec_sync_query(ec, &value);
if (status || !value)
break;
}
kfree(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (data)
+ *data = value;
+ if (status)
return status;
+
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
if (!ec)
return;
mutex_lock(&ec->mutex);
- acpi_ec_sync_query(ec);
+ acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex);
}
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
*/
static void driver_deferred_probe_trigger(void)
{
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
+ int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
return -ENXIO;
return dev->archdata.irqs[num];
#else
- struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ struct resource *r;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+ return of_irq_get(dev->dev.of_node, num);
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
#endif
int ret;
while (ptr) {
- ret = copy_to_user(param, ptr, sizeof(*ptr));
+ struct floppy_raw_cmd cmd = *ptr;
+ cmd.next = NULL;
+ cmd.kernel_data = NULL;
+ ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
- if (ret)
- return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
+ ptr->kernel_data = NULL;
+ if (ret)
+ return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
- ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
struct clk *clk;
u32 range[2];
+ vexpress_sysreg_of_early_init();
+
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
if (!osc)
return;
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
/*
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_CP15_TIMER) {
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
}
}
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
arch_timer_register();
arch_timer_common_init();
}
return ret;
}
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+ BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
* Sets a new clock ratio.
*/
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
unsigned int table_index)
{
unsigned int mults_index;
/* Safety precautions */
mult = mults[mults_index & 0x1f];
if (mult == -1)
- return;
+ return -EINVAL;
+
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
- return;
+ return -EINVAL;
+
/* Voltage transition before frequency transition? */
if (can_scale_voltage && longhaul_index < table_index)
dir = 1;
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- cpufreq_freq_transition_begin(policy, &freqs);
-
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop:
goto retry_loop;
}
}
- /* Report true CPU frequency */
- cpufreq_freq_transition_end(policy, &freqs, 0);
- if (!bm_timeout)
+ if (!bm_timeout) {
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/*
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
+ int retval = 0;
if (!can_scale_voltage)
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
else {
/* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both
while (i != table_index) {
vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) {
- longhaul_setstate(policy, i);
+ retval = longhaul_setstate(policy, i);
current_vid = vid;
msleep(200);
}
else
i--;
}
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
}
+
longhaul_index = table_index;
- return 0;
+ return retval;
}
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = longhaul_table[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
longhaul_setstate(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
}
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
- struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
return -EINVAL;
}
- freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
- freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
powernow_k6_set_cpu_multiplier(best_i);
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
- for (i = 0; i < 8; i++) {
- if (i == max_multiplier)
+
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == max_multiplier) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = clock_ratio[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
}
return 0;
}
freqs.new = powernow_table[index].frequency;
- cpufreq_freq_transition_begin(policy, &freqs);
-
/* Now do the magic poking into the MSRs. */
if (have_a0 == 1) /* A0 errata 5 */
if (have_a0 == 1)
local_irq_enable();
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
+ u64 transition_latency_hz;
np = of_get_cpu_node(cpu, NULL);
if (!np)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
+ transition_latency_hz = 12ULL * NSEC_PER_SEC;
policy->cpuinfo.transition_latency =
- (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq();
+ do_div(transition_latency_hz, fsl_get_sys_freq());
+
of_node_put(np);
return 0;
plane->crtc = crtc;
plane->fb = crtc->primary->fb;
+ drm_framebuffer_reference(plane->fb);
return 0;
}
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
- DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsi->reg_base) {
+ if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n");
- return -EADDRNOTAVAIL;
+ return PTR_ERR(dsi->reg_base);
}
dsi->phy = devm_phy_get(&pdev->dev, "dsim");
win_data->enabled = true;
- DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
/* Full ppgtt disabled by default for now due to issues. */
if (full)
- return false; /* HAS_PPGTT(dev) */
+ return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
else
return HAS_ALIASING_PPGTT(dev);
}
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
- WARN_ONCE(hpd[i] & hotplug_trigger &&
- dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
- "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
- hotplug_trigger, i, hpd[i]);
+ if (hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
+
+ continue;
+ }
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
+# define STOP_RING (1 << 8)
#define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ /*
+ * FIXME: BIOS likes to set up a cloned config with lvds+external
+ * screen. Since we don't yet re-compute the pipe config when moving
+ * just the lvds port away to another pipe the sw tracking won't match.
+ *
+ * Proper atomic modesets with recomputed global state will fix this.
+ * Until then just don't check gmch state for inherited modes.
+ */
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ }
+
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
base.head) {
memset(&crtc->config, 0, sizeof(crtc->config));
+ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
bool has_dpcd;
if (!is_edp(intel_dp))
return true;
+ /* The VDD bit needs a power domain reference, so if the bit is already
+ * enabled when we boot, grab this reference. */
+ if (edp_have_panel_vdd(intel_dp)) {
+ enum intel_display_power_domain power_domain;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+ }
+
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
/* User requested mode, only valid as a starting point to
mutex_lock(&dev->struct_mutex);
+ if (intel_fb &&
+ (sizes->fb_width > intel_fb->base.width ||
+ sizes->fb_height > intel_fb->base.height)) {
+ DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
+ drm_framebuffer_unreference(&intel_fb->base);
+ intel_fb = ifbdev->fb = NULL;
+ }
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
}
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+ true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
I915_WRITE(HWS_PGA, addr);
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = ring->obj;
- int ret = 0;
- u32 head;
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ if (!IS_GEN2(ring->dev)) {
+ I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ return false;
+ }
+ }
- /* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
- if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
- else
- ring_setup_phys_status_page(ring);
+ if (!IS_GEN2(ring->dev)) {
+ (void)I915_READ_CTL(ring);
+ I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ }
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ if (!stop_ring(ring)) {
+ /* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
I915_READ_TAIL(ring),
I915_READ_START(ring));
- I915_WRITE_HEAD(ring, 0);
-
- if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ if (!stop_ring(ring)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
}
}
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
- MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
}
/* and drop the iova ref + obj rev when done scanning out: */
if (old_bo) {
/* drop our previous reference: */
- msm_gem_put_iova(old_bo, mdp4_kms->id);
- drm_gem_object_unreference_unlocked(old_bo);
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
}
- crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
- mdp_dispatch_irqs(mdp_kms, status);
-
return IRQ_HANDLED;
}
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ if (mdp4_kms->blank_cursor_bo)
+ drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
goto fail;
}
+ mutex_lock(&dev->struct_mutex);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
return kms;
fail:
struct clk *lut_clk;
struct mdp_irq error_handler;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint32_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-
- mdp_dispatch_irqs(mdp_kms, status);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
dma_addr_t paddr;
int ret, size;
- /* only doing ARGB32 since this is what is needed to alpha-blend
- * with video overlays:
- */
sizes->surface_bpp = 32;
- sizes->surface_depth = 32;
+ sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
if (iommu_present(&platform_bus_type))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
+ else {
drm_mm_remove_node(msm_obj->vram_node);
+ drm_free_large(msm_obj->pages);
+ }
msm_obj->pages = NULL;
}
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
- list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) {
tegra_plane_disable(plane);
plane->crtc = NULL;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ SVGA3dCmdSurfaceDMASuffix *suffix;
+ uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ header->size - sizeof(*suffix));
+
+ /* Make sure device and verifier stays in sync. */
+ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+ DRM_ERROR("Invalid DMA suffix size.\n");
+ return -EINVAL;
+ }
+
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
+ /* Make sure DMA doesn't cross BO boundaries. */
+ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+ DRM_ERROR("Invalid DMA offset.\n");
+ return -EINVAL;
+ }
+
+ bo_size -= cmd->dma.guest.ptr.offset;
+ if (unlikely(suffix->maximumOffset > bo_size))
+ suffix->maximumOffset = bo_size;
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
- val = (eax >> 16) & 0x7f;
+ val = (eax >> 16) & 0xff;
/*
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val >= 85) {
+ if (val) {
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
config INFINIBAND_CXGB4
- tristate "Chelsio T4 RDMA Driver"
+ tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
- This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
- 10GbE adapters.
+ This is an iWARP/RDMA driver for the Chelsio T4 and T5
+ 1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- state_set(&ep->com, ABORTING);
+ __state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
return credits;
}
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask;
int err;
+ int disconnect = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
* will abort the connection.
*/
if (stop_ep_timer(ep))
- return;
+ return 0;
/*
* If we get more than the supported amount of private data
* if we don't even have the mpa message, then bail.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
+ return 0;
mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
+ return 0;
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
goto out;
send_abort(ep, skb, GFP_KERNEL);
out:
connect_reply_upcall(ep, err);
- return;
+ return disconnect;
}
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
+ int disconnect = 0;
ep = lookup_tid(t, tid);
if (!ep)
switch (ep->com.state) {
case MPA_REQ_SENT:
ep->rcv_seq += dlen;
- process_mpa_reply(ep, skb);
+ disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
ep->rcv_seq += dlen;
ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ disconnect = 1;
break;
}
default:
break;
}
mutex_unlock(&ep->com.mutex);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0;
}
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1);
}
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
- mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
}
u8 ecode;
u16 sq_db_inc;
u16 rq_db_inc;
+ u8 send_term;
};
struct c4iw_qp {
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
- disconnect = 1;
- c4iw_get_ep(&qhp->ep->com);
- if (!internal)
+ if (!internal) {
+ c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- else {
+ disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
/*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode.
+ * Only allow this on T4 devices.
*/
attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+ if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+ (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+ return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
}
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL 14
+#define M_CONG_CNTRL 0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID (1 << 31)
+
#endif /* _T4FW_RI_API_H_ */
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_375_PPI_CAUSE (0x10)
struct msi_desc *desc)
{
struct msi_msg msg;
- irq_hw_number_t hwirq;
- int virq;
+ int virq, hwirq;
hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0)
unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
+ unsigned long hwirq = d->hwirq;
+
irq_dispose_mapping(irq);
- armada_370_xp_free_msi(d->hwirq);
+ armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+ int nvec, int type)
+{
+ /* We support MSI, but not MSI-X */
+ if (type == PCI_CAP_ID_MSI)
+ return 0;
+ return -EINVAL;
}
static struct irq_chip armada_370_xp_msi_irq_chip = {
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+ msi_chip->check_device = armada_370_xp_check_msi_device;
msi_chip->of_node = node;
armada_370_xp_msi_domain =
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
- unsigned long reg;
- unsigned long new_mask = 0;
- unsigned long online_mask = 0;
- unsigned long count = 0;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long reg, mask;
int cpu;
- for_each_cpu(cpu, mask_val) {
- new_mask |= 1 << cpu_logical_map(cpu);
- count++;
- }
-
- /*
- * Forbid mutlicore interrupt affinity
- * This is required since the MPIC HW doesn't limit
- * several CPUs from acknowledging the same interrupt.
- */
- if (count > 1)
- return -EINVAL;
-
- for_each_cpu(cpu, cpu_online_mask)
- online_mask |= 1 << cpu_logical_map(cpu);
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ mask = 1UL << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock);
-
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- reg = (reg & (~online_mask)) | new_mask;
+ reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
raw_spin_unlock(&irq_controller_lock);
return 0;
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
-
- /*
- * Set the default affinity from all CPUs to the boot cpu.
- * This is required since the MPIC doesn't limit several CPUs
- * from acknowledging the same interrupt.
- */
- cpumask_clear(irq_default_affinity);
- cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
#endif
armada_370_xp_msi_init(node, main_int_res.start);
int i, size, max, reserved = 0, entry;
const __be32 *irqsr;
- cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
} else {
inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
!is_dirty(cache, lookup_result.cblock))
struct bio_list deferred_bio_list;
struct bio_list retry_on_resume_list;
struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+ /*
+ * Ensures the thin is not destroyed until the worker has finished
+ * iterating the active_thins list.
+ */
+ atomic_t refcount;
+ struct completion can_destroy;
};
/*----------------------------------------------------------------*/
blk_finish_plug(&plug);
}
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block. So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+ struct thin_c *tc = NULL;
+
+ rcu_read_lock();
+ if (!list_empty(&pool->active_thins)) {
+ tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ thin_get(tc);
+ }
+ rcu_read_unlock();
+
+ return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+ struct thin_c *old_tc = tc;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+ thin_get(tc);
+ thin_put(old_tc);
+ rcu_read_unlock();
+ return tc;
+ }
+ thin_put(old_tc);
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
struct bio_list bios;
struct thin_c *tc;
- rcu_read_lock();
- list_for_each_entry_rcu(tc, &pool->active_thins, list)
+ tc = get_first_thin(pool);
+ while (tc) {
process_thin_deferred_bios(tc);
- rcu_read_unlock();
+ tc = get_next_thin(pool, tc);
+ }
/*
* If there are any deferred flush bios, we must commit
{
struct noflush_work w;
- INIT_WORK(&w.worker, fn);
+ INIT_WORK_ONSTACK(&w.worker, fn);
w.tc = tc;
atomic_set(&w.complete, 0);
init_waitqueue_head(&w.wait);
/*----------------------------------------------------------------
* Thin target methods
*--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+ atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+ if (atomic_dec_and_test(&tc->refcount))
+ complete(&tc->can_destroy);
+}
+
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
unsigned long flags;
+ thin_put(tc);
+ wait_for_completion(&tc->can_destroy);
+
spin_lock_irqsave(&tc->pool->lock, flags);
list_del_rcu(&tc->list);
spin_unlock_irqrestore(&tc->pool->lock, flags);
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
+ unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock(&tc->pool->lock);
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock(&tc->pool->lock);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
return r;
}
}
-
todo = 1 << v->data_dev_block_bits;
- while (io->iter.bi_size) {
+ do {
u8 *page;
+ unsigned len;
struct bio_vec bv = bio_iter_iovec(bio, io->iter);
page = kmap_atomic(bv.bv_page);
- r = crypto_shash_update(desc, page + bv.bv_offset,
- bv.bv_len);
+ len = bv.bv_len;
+ if (likely(len >= todo))
+ len = todo;
+ r = crypto_shash_update(desc, page + bv.bv_offset, len);
kunmap_atomic(page);
if (r < 0) {
return r;
}
- bio_advance_iter(bio, &io->iter, bv.bv_len);
- }
+ bio_advance_iter(bio, &io->iter, len);
+ todo -= len;
+ } while (todo);
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
* Create one workqueue per volume (per registered block device).
* Rembember workqueues are cheap, they're not threads.
*/
- dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+ dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq)
goto out_free_queue;
INIT_WORK(&dev->work, ubiblock_do_work);
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
rb_erase(&e->u.rb, &ubi->free);
return e;
peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
+ if (peb < 0)
+ return peb;
+
err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
/* Give the unused PEB back */
wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
goto out_cancel;
}
self_check_in_wl_tree(ubi, e1, &ubi->used);
memset(r, 0, sizeof(*r));
/*
- * Get optional "interrupts-names" property to add a name
+ * Get optional "interrupt-names" property to add a name
* to the resource.
*/
of_property_read_string_index(dev, "interrupt-names", index,
}
EXPORT_SYMBOL_GPL(of_irq_to_resource);
+/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+ int rc;
+ struct of_phandle_args oirq;
+ struct irq_domain *domain;
+
+ rc = of_irq_parse_one(dev, index, &oirq);
+ if (rc)
+ return rc;
+
+ domain = irq_find_host(oirq.np);
+ if (!domain)
+ return -EPROBE_DEFER;
+
+ return irq_create_of_mapping(&oirq);
+}
+
/**
* of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node
rc = of_address_to_resource(np, i, res);
WARN_ON(rc);
}
- WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+ if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+ pr_debug("not all legacy IRQ resources mapped for %s\n",
+ np->name);
}
dev->dev.of_node = of_node_get(np);
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
}
}
+static void __init of_selftest_platform_populate(void)
+{
+ int irq;
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_find_node_by_path("/testcase-data");
+ of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+ /* Test that a missing irq domain returns -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device1");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 1 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq != -EPROBE_DEFER)
+ selftest(0, "device deferred probe failed - %d\n", irq);
+
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 || irq == -EPROBE_DEFER)
+ selftest(0, "device parsing error failed - %d\n", irq);
+
+ selftest(1, "passed");
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
of_selftest_match_node();
+ of_selftest_platform_populate();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
<&test_intmap1 1 2>;
};
};
+
+ testcase-device1 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>;
+ };
+
+ testcase-device2 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc2>;
+ interrupts = <1>; /* invalid specifier - too short */
+ };
};
+
};
};
struct as3722_gpio_pin_control {
- bool enable_gpio_invert;
unsigned mode_prop;
int io_function;
};
return mode;
}
- if (as_pci->gpio_control[offset].enable_gpio_invert)
- mode |= AS3722_GPIO_INV;
-
- return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+ return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+ AS3722_GPIO_MODE_MASK, mode);
}
static const struct pinmux_ops as3722_pinmux_ops = {
{
struct as3722_pctrl_info *as_pci = to_as_pci(chip);
struct as3722 *as3722 = as_pci->as3722;
- int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+ int en_invert;
u32 val;
int ret;
+ ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+ if (ret < 0) {
+ dev_err(as_pci->dev,
+ "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+ return;
+ }
+ en_invert = !!(val & AS3722_GPIO_INV);
+
if (value)
val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
else
static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
unsigned pin_pos)
{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
struct pcs_name *pn;
int i;
return -ENOMEM;
}
+ if (pcs_soc->irq_enable_mask) {
+ unsigned val;
+
+ val = pcs->read(pcs->base + offset);
+ if (val & pcs_soc->irq_enable_mask) {
+ dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+ (unsigned long)pcs->res->start + offset, val);
+ val &= ~pcs_soc->irq_enable_mask;
+ pcs->write(val, pcs->base + offset);
+ }
+ }
+
pin = &pcs->pins.pa[i];
pn = &pcs->names[i];
sprintf(pn->name, "%lx.%d",
*/
for (i = 0; i < state->pinfuncgrpcnt; i++) {
const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
- unsigned int port = pfg->port;
unsigned int mode = pfg->mode;
- int j;
+ int j, port = pfg->port;
/*
* Skip pin groups which are always mapped and don't need
FN_MSIOF0_SCK_B, 0,
/* IP5_23_21 [3] */
FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
- FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
- FN_IERX_C, 0,
+ FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
/* IP5_20_18 [3] */
FN_WE0_N, FN_IECLK, FN_CAN_CLK,
FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
/* SEL_SCIF3 [2] */
FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
/* SEL_IEB [2] */
- FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
/* SEL_MMC [1] */
FN_SEL_MMC_0, FN_SEL_MMC_1,
/* SEL_SCIF5 [1] */
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- struct acpi_buffer buffer;
- int ret;
+ int ret = 0;
pnp_dbg(&dev->dev, "set resources\n");
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
- ret = pnpacpi_build_resource_template(dev, &buffer);
- if (ret)
- return ret;
- ret = pnpacpi_encode_resources(dev, &buffer);
- if (ret) {
+ if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+ struct acpi_buffer buffer;
+
+ ret = pnpacpi_build_resource_template(dev, &buffer);
+ if (ret)
+ return ret;
+
+ ret = pnpacpi_encode_resources(dev, &buffer);
+ if (!ret) {
+ acpi_status status;
+
+ status = acpi_set_current_resources(handle, &buffer);
+ if (ACPI_FAILURE(status))
+ ret = -EIO;
+ }
kfree(buffer.pointer);
- return ret;
}
- if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
- ret = -EINVAL;
- else if (acpi_bus_power_manageable(handle))
+ if (!ret && acpi_bus_power_manageable(handle))
ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
- kfree(buffer.pointer);
+
return ret;
}
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- int ret;
+ acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- ret = 0;
if (acpi_bus_power_manageable(handle))
acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
- /* continue even if acpi_bus_set_power() fails */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
- ret = -ENODEV;
- return ret;
+
+ /* continue even if acpi_bus_set_power() fails */
+ status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ return -ENODEV;
+
+ return 0;
}
#ifdef CONFIG_ACPI_SLEEP
}
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
#ifdef CONFIG_AMD_NB
{"PNP0c01", quirk_amd_mmconfig_area},
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
{"PNP0c02", quirk_intel_mch},
#endif
{""}
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
- do {
+ static int ntsm_unsupported;
+
+ while (true) {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e;
- sei->ntsm = ntsm;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
if (chsc(sei))
break;
if (sei->response.code != 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei->response.code);
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
break;
}
CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
}
/*
mpt2sas_base_free_resources(ioc);
pci_save_state(pdev);
- pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
return 0;
}
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues; i++)
+ for (i = 0; i < vscsi->num_queues; i++) {
+ if (!vscsi->req_vqs[i].vq)
+ continue;
+
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+ }
vscsi->affinity_hint_set = false;
}
struct work_struct free_work;
+ /*
+ * signals when all in-flight requests are done
+ */
+ struct completion *requests_done;
+
struct {
/*
* This counts the number of available slots in the ringbuffer,
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+ /* At this point we know that there are no any in-flight requests */
+ if (ctx->requests_done)
+ complete(ctx->requests_done);
+
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
}
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+ struct completion *requests_done)
{
if (!atomic_xchg(&ctx->dead, 1)) {
struct kioctx_table *table;
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users);
+ } else {
+ if (requests_done)
+ complete(requests_done);
}
}
*/
ctx->mmap_size = 0;
- kill_ioctx(mm, ctx);
+ kill_ioctx(mm, ctx, NULL);
}
}
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
if (ret)
- kill_ioctx(current->mm, ioctx);
+ kill_ioctx(current->mm, ioctx, NULL);
percpu_ref_put(&ioctx->users);
}
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
- kill_ioctx(current->mm, ioctx);
+ struct completion requests_done =
+ COMPLETION_INITIALIZER_ONSTACK(requests_done);
+
+ /* Pass requests_done to kill_ioctx() where it can be set
+ * in a thread-safe way. If we try to set it here then we have
+ * a race condition if two io_destroy() called simultaneously.
+ */
+ kill_ioctx(current->mm, ioctx, &requests_done);
percpu_ref_put(&ioctx->users);
+
+ /* Wait until all IO for the context are done. Otherwise kernel
+ * keep using user-space buffers even if user thinks the context
+ * is destroyed.
+ */
+ wait_for_completion(&requests_done);
+
return 0;
}
pr_debug("EINVAL: io_destroy: invalid context id\n");
&iovec, compat)
: aio_setup_single_vector(req, rw, buf, &nr_segs,
iovec);
- if (ret)
- return ret;
-
- ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+ if (!ret)
+ ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
if (ret < 0) {
if (iovec != &inline_vec)
kfree(iovec);
rel->seq = cpu_to_le32(cap->seq);
rel->issue_seq = cpu_to_le32(cap->issue_seq),
rel->mseq = cpu_to_le32(cap->mseq);
- rel->caps = cpu_to_le32(cap->issued);
+ rel->caps = cpu_to_le32(cap->implemented);
rel->wanted = cpu_to_le32(cap->mds_wanted);
rel->dname_len = 0;
rel->dname_seq = 0;
/* start at beginning? */
if (ctx->pos == 2 || last == NULL ||
- ctx->pos < ceph_dentry(last)->offset) {
+ fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
if (list_empty(&parent->d_subdirs))
goto out_unlock;
p = parent->d_subdirs.prev;
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
+ /* make sure a dentry wasn't dropped while we didn't have parent lock */
+ if (!ceph_dir_is_complete(dir)) {
+ dout(" lost dir complete on %p; falling back to mds\n", dir);
+ dput(dentry);
+ err = -EAGAIN;
+ goto out;
+ }
+
dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
- ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len,
ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
return 0;
}
+ ctx->pos = di->offset + 1;
+
if (last)
dput(last);
last = dentry;
- ctx->pos++;
-
- /* make sure a dentry wasn't dropped while we didn't have parent lock */
- if (!ceph_dir_is_complete(dir)) {
- dout(" lost dir complete on %p; falling back to mds\n", dir);
- err = -EAGAIN;
- goto out;
- }
-
spin_lock(&parent->d_lock);
p = p->prev; /* advance to next dentry */
goto more;
err = __dcache_readdir(file, ctx, shared_gen);
if (err != -EAGAIN)
return err;
+ frag = fpos_frag(ctx->pos);
+ off = fpos_off(ctx->pos);
} else {
spin_unlock(&ci->i_ceph_lock);
}
if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
dout(" marking %p complete\n", inode);
__ceph_dir_set_complete(ci, fi->dir_release_count);
- ci->i_max_offset = ctx->pos;
}
spin_unlock(&ci->i_ceph_lock);
* to do it here.
*/
- /* d_move screws up d_subdirs order */
- ceph_dir_clear_complete(new_dir);
-
d_move(old_dentry, new_dentry);
/* ensure target dentry is invalidated, despite
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(new_dentry);
+
+ /* d_move screws up sibling dentries' offsets */
+ ceph_dir_clear_complete(old_dir);
+ ceph_dir_clear_complete(new_dir);
+
}
ceph_mdsc_put_request(req);
return err;
!__ceph_dir_is_complete(ci)) {
dout(" marking %p complete (empty)\n", inode);
__ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
- ci->i_max_offset = 2;
}
no_change:
/* only update max_size on auth cap */
return;
}
-/*
- * Set dentry's directory position based on the current dir's max, and
- * order it in d_subdirs, so that dcache_readdir behaves.
- *
- * Always called under directory's i_mutex.
- */
-static void ceph_set_dentry_offset(struct dentry *dn)
-{
- struct dentry *dir = dn->d_parent;
- struct inode *inode = dir->d_inode;
- struct ceph_inode_info *ci;
- struct ceph_dentry_info *di;
-
- BUG_ON(!inode);
-
- ci = ceph_inode(inode);
- di = ceph_dentry(dn);
-
- spin_lock(&ci->i_ceph_lock);
- if (!__ceph_dir_is_complete(ci)) {
- spin_unlock(&ci->i_ceph_lock);
- return;
- }
- di->offset = ceph_inode(inode)->i_max_offset++;
- spin_unlock(&ci->i_ceph_lock);
-
- spin_lock(&dir->d_lock);
- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&dn->d_u.d_child, &dir->d_subdirs);
- dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
- dn->d_u.d_child.prev, dn->d_u.d_child.next);
- spin_unlock(&dn->d_lock);
- spin_unlock(&dir->d_lock);
-}
-
/*
* splice a dentry to an inode.
* caller must hold directory i_mutex for this to be safe.
* the caller) if we fail.
*/
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
- bool *prehash, bool set_offset)
+ bool *prehash)
{
struct dentry *realdn;
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
- if (set_offset)
- ceph_set_dentry_offset(dn);
out:
return dn;
}
{
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct inode *in = NULL;
- struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
int err = 0;
/* rename? */
if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
+ struct inode *olddir = req->r_old_dentry_dir;
+ BUG_ON(!olddir);
+
dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
req->r_old_dentry->d_name.len,
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(dn);
- /*
- * d_move() puts the renamed dentry at the end of
- * d_subdirs. We need to assign it an appropriate
- * directory offset so we can behave when dir is
- * complete.
- */
- ceph_set_dentry_offset(req->r_old_dentry);
+ /* d_move screws up sibling dentries' offsets */
+ ceph_dir_clear_complete(dir);
+ ceph_dir_clear_complete(olddir);
+
dout("dn %p gets new offset %lld\n", req->r_old_dentry,
ceph_dentry(req->r_old_dentry)->offset);
/* attach proper inode */
if (!dn->d_inode) {
+ ceph_dir_clear_complete(dir);
ihold(in);
- dn = splice_dentry(dn, in, &have_lease, true);
+ dn = splice_dentry(dn, in, &have_lease);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
(req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
req->r_op == CEPH_MDS_OP_MKSNAP)) {
struct dentry *dn = req->r_dentry;
+ struct inode *dir = req->r_locked_dir;
/* fill out a snapdir LOOKUPSNAP dentry */
BUG_ON(!dn);
- BUG_ON(!req->r_locked_dir);
- BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
- ininfo = rinfo->targeti.in;
- vino.ino = le64_to_cpu(ininfo->ino);
- vino.snap = le64_to_cpu(ininfo->snapid);
+ BUG_ON(!dir);
+ BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
dout(" linking snapped dir %p to dn %p\n", in, dn);
+ ceph_dir_clear_complete(dir);
ihold(in);
- dn = splice_dentry(dn, in, NULL, true);
+ dn = splice_dentry(dn, in, NULL);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
}
if (!dn->d_inode) {
- dn = splice_dentry(dn, in, NULL, false);
+ dn = splice_dentry(dn, in, NULL);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
dn = NULL;
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
+ req->r_num_caps = 1;
+
req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
req->r_args.setlayout.layout.fl_stripe_unit =
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
+ req->r_num_caps = 1;
req->r_args.setlayout.layout.fl_stripe_unit =
cpu_to_le32(l.stripe_unit);
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
+ req->r_num_caps = 1;
/* mds requires start and length rather than start and end */
if (LLONG_MAX == fl->fl_end)
struct timespec i_rctime;
u64 i_rbytes, i_rfiles, i_rsubdirs;
u64 i_files, i_subdirs;
- u64 i_max_offset; /* largest readdir offset, set with complete dir */
struct rb_root i_fragtree;
struct mutex i_fragtree_mutex;
if (c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
- return err;
+ goto out;
}
err = check_free_space(c);
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+#define set_fixmap_offset_io(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */
}
#ifndef zero_bytemask
-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
+#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
#endif /* _ASM_WORD_AT_A_TIME_H */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
- * @mask: cpumask
+ * @cpumask: cpumask
*
* Fails if cpumask does not contain an online CPU
*/
/**
* irq_force_affinity - Force the irq affinity of a given irq
* @irq: Interrupt to set affinity
- * @mask: cpumask
+ * @cpumask: cpumask
*
* Same as irq_set_affinity, but without checking the mask against
* online cpus.
return d ? irqd_get_trigger_type(d) : 0;
}
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner);
#ifdef CONFIG_OF_IRQ
extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
#else
static inline int of_irq_count(struct device_node *dev)
{
return 0;
}
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_OF)
{ (1UL << TAINT_OOT_MODULE), "O" }, \
{ (1UL << TAINT_FORCED_MODULE), "F" }, \
{ (1UL << TAINT_CRAP), "C" }, \
- { (1UL << TAINT_UNSIGNED_MODULE), "X" })
+ { (1UL << TAINT_UNSIGNED_MODULE), "E" })
TRACE_EVENT(module_load,
goto again;
}
timer->base = new_base;
+ } else {
+ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+ cpu = this_cpu;
+ goto again;
+ }
}
return new_base;
}
cpu_base->expires_next.tv64 = expires_next.tv64;
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * leave the hang delay active in the hardware. We want the
+ * system to make progress. That also prevents the following
+ * scenario:
+ * T1 expires 50ms from now
+ * T2 expires 5s from now
+ *
+ * T1 is removed, so this code is called and would reprogram
+ * the hardware to 5s from now. Any hrtimer_start after that
+ * will not reprogram the hardware due to hang_detected being
+ * set. So we'd effectivly block all timers until the T2 event
+ * fires.
+ */
+ if (cpu_base->hang_detected)
+ return;
+
if (cpu_base->expires_next.tv64 != KTIME_MAX)
tick_program_event(cpu_base->expires_next, 1);
}
if (from > irq)
return -EINVAL;
from = irq;
+ } else {
+ /*
+ * For interrupts which are freely allocated the
+ * architecture can force a lower bound to the @from
+ * argument. x86 uses this to exclude the GSI space.
+ */
+ from = arch_dynirq_lower_bound(from);
}
mutex_lock(&sparse_irq_lock);
return -EFAULT;
name[MODULE_NAME_LEN-1] = '\0';
- if (!(flags & O_NONBLOCK))
- pr_warn("waiting module removal not supported: please upgrade\n");
-
if (mutex_lock_interruptible(&module_mutex) != 0)
return -EINTR;
dynamic_debug_setup(info->debug, info->num_debug);
+ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+ ftrace_module_init(mod);
+
/* Finally it's fully formed, ready to start executing. */
err = complete_formation(mod, info);
if (err)
{
return 0;
}
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+ return from;
+}
bit = find_last_bit(&mask, BITS_PER_LONG);
- mask = (1 << bit) - 1;
+ mask = (1UL << bit) - 1;
expires_limit = expires_limit & ~(mask);
ftrace_process_locs(mod, start, end);
}
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
{
- struct module *mod = data;
-
- if (val == MODULE_STATE_COMING)
- ftrace_init_module(mod, mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
- return 0;
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
}
static int ftrace_module_notify_exit(struct notifier_block *self,
return 0;
}
#else
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{
}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_enter_nb = {
- .notifier_call = ftrace_module_notify_enter,
- .priority = INT_MAX, /* Run before anything that can use kprobes */
-};
-
struct notifier_block ftrace_module_exit_nb = {
.notifier_call = ftrace_module_notify_exit,
.priority = INT_MIN, /* Run after anything that can remove kprobes */
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_enter_nb);
- if (ret)
- pr_warning("Failed to register trace ftrace module enter notifier\n");
-
ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret)
pr_warning("Failed to register trace ftrace module exit notifier\n");
data->ops->func(data);
continue;
}
- filter = rcu_dereference(data->filter);
+ filter = rcu_dereference_sched(data->filter);
if (filter && !filter_match_preds(filter, rec))
continue;
if (data->cmd_ops->post_trigger) {
for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i];
- if (vma && vma->vm_start <= addr && vma->vm_end > addr) {
- BUG_ON(vma->vm_mm != mm);
+ if (!vma)
+ continue;
+ if (WARN_ON_ONCE(vma->vm_mm != mm))
+ break;
+ if (vma->vm_start <= addr && vma->vm_end > addr)
return vma;
- }
}
return NULL;
return;
for (i = 0; i < len; i++) {
- if (osds[i] != CRUSH_ITEM_NONE &&
- osdmap->osd_primary_affinity[i] !=
+ int osd = osds[i];
+
+ if (osd != CRUSH_ITEM_NONE &&
+ osdmap->osd_primary_affinity[osd] !=
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
break;
}
* osd's pgs get rejected as primary.
*/
for (i = 0; i < len; i++) {
- int osd;
+ int osd = osds[i];
u32 aff;
- osd = osds[i];
if (osd == CRUSH_ITEM_NONE)
continue;
#define EM_ARCOMPACT 93
#endif
+#ifndef EM_XTENSA
+#define EM_XTENSA 94
+#endif
+
#ifndef EM_AARCH64
#define EM_AARCH64 183
#endif
case EM_AARCH64:
case EM_MICROBLAZE:
case EM_MIPS:
+ case EM_XTENSA:
break;
} /* end switch */
/* reset the corb hw read pointer */
azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
- for (timeout = 1000; timeout > 0; timeout--) {
- if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
- break;
- udelay(1);
- }
- if (timeout <= 0)
- dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
- azx_readw(chip, CORBRP));
+ if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
+ for (timeout = 1000; timeout > 0; timeout--) {
+ if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
+ break;
+ udelay(1);
+ }
+ if (timeout <= 0)
+ dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
+ azx_readw(chip, CORBRP));
- azx_writew(chip, CORBRP, 0);
- for (timeout = 1000; timeout > 0; timeout--) {
- if (azx_readw(chip, CORBRP) == 0)
- break;
- udelay(1);
+ azx_writew(chip, CORBRP, 0);
+ for (timeout = 1000; timeout > 0; timeout--) {
+ if (azx_readw(chip, CORBRP) == 0)
+ break;
+ udelay(1);
+ }
+ if (timeout <= 0)
+ dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
+ azx_readw(chip, CORBRP));
}
- if (timeout <= 0)
- dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
- azx_readw(chip, CORBRP));
/* enable corb dma */
azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
(AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
- AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+ AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
+ AZX_DCAPS_CORBRP_SELF_CLEAR)
#define AZX_DCAPS_PRESET_CTHDA \
(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
/* position fix mode */
enum {
SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret);
return ret;
}
- vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2);
if (ret < 0) {
dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret);
return ret;
}
+ vid2 >>= 8;
if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
dev_err(&client->dev, "unknown or wrong codec\n");
}
if (cs42l52->pdata.reset_gpio) {
- ret = gpio_request_one(cs42l52->pdata.reset_gpio,
- GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+ ret = devm_gpio_request_one(&i2c_client->dev,
+ cs42l52->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "CS42L52 /RST");
if (ret < 0) {
dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
cs42l52->pdata.reset_gpio, ret);
i2c_set_clientdata(i2c_client, cs42l73);
if (cs42l73->pdata.reset_gpio) {
- ret = gpio_request_one(cs42l73->pdata.reset_gpio,
- GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+ ret = devm_gpio_request_one(&i2c_client->dev,
+ cs42l73->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "CS42L73 /RST");
if (ret < 0) {
dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
cs42l73->pdata.reset_gpio, ret);
}
aic3x_add_widgets(codec);
- list_add(&aic3x->list, &reset_list);
return 0;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_aic3x, &aic3x_dai, 1);
- return ret;
+
+ if (ret != 0)
+ goto err_gpio;
+
+ list_add(&aic3x->list, &reset_list);
+
+ return 0;
err_gpio:
if (gpio_is_valid(aic3x->gpio_reset) &&
/* SPDIF Clock register */
#define STC_SYSCLK_DIV_OFFSET 11
-#define STC_SYSCLK_DIV_MASK (0x1ff << STC_TXCLK_SRC_OFFSET)
-#define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_SYSCLK_DIV_MASK (0x1ff << STC_SYSCLK_DIV_OFFSET)
+#define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
#define STC_TXCLK_SRC_OFFSET 8
#define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET)
#define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
enum sst_data_type data_type; /* type of module data */
u32 size; /* size in bytes */
- u32 offset; /* offset in FW file */
+ int32_t offset; /* offset in FW file */
u32 data_offset; /* offset in ADSP memory space */
void *data; /* module data */
};
case IPC_POSITION_CHANGED:
trace_ipc_notification("DSP stream position changed for",
stream->reply.stream_hw_id);
- sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos));
+ sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
if (stream->notify_position)
stream->notify_position(stream, stream->pdata);
return -EINVAL;
sst_dsp_read(hsw->dsp, volume,
- stream->reply.volume_register_address[channel], sizeof(volume));
+ stream->reply.volume_register_address[channel],
+ sizeof(*volume));
return 0;
}
trace_ipc_request("PM enter Dx state", state);
ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
- dx, sizeof(dx));
+ dx, sizeof(*dx));
if (ret < 0) {
dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
return ret;
#
# Jz4740 Platform Support
#
-snd-soc-jz4740-objs := jz4740-pcm.o
snd-soc-jz4740-i2s-objs := jz4740-i2s.o
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
# Jz4740 Machine Support
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
- clk_enable(src->clk);
+ clk_prepare_enable(src->clk);
return 0;
}
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
- clk_disable(src->clk);
+ clk_disable_unprepare(src->clk);
return 0;
}
u32 cr;
if (0 == ssi->usrcnt) {
- clk_enable(ssi->clk);
+ clk_prepare_enable(ssi->clk);
if (rsnd_dai_is_clk_master(rdai)) {
if (rsnd_ssi_clk_from_parent(ssi))
rsnd_ssi_master_clk_stop(ssi);
}
- clk_disable(ssi->clk);
+ clk_disable_unprepare(ssi->clk);
}
dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
- kfree(data->widget);
kfree(data->wlist);
kfree(data);
}
char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug/",
- "/debug/",
+ "/sys/kernel/debug",
+ "/debug",
0,
};
format, len_arg, arg);
trace_seq_terminate(&p);
trace_seq_puts(s, p.buffer);
+ trace_seq_destroy(&p);
arg = arg->next;
break;
default:
struct event_filter *pevent_filter_alloc(struct pevent *pevent);
/* for backward compatibility */
-#define FILTER_NONE PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_NOEXIST PEVENT_ERRNO__NO_FILTER
+#define FILTER_NONE PEVENT_ERRNO__NO_FILTER
+#define FILTER_NOEXIST PEVENT_ERRNO__FILTER_NOT_FOUND
#define FILTER_MISS PEVENT_ERRNO__FILTER_MISS
#define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH
$(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
$(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+ $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
sp = (unsigned long) regs[PERF_REG_X86_SP];
- map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+ map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp);
if (!map) {
pr_debug("failed to get stack map\n");
+ free(buf);
return -1;
}
-
#include <linux/linkage.h>
#define AX 0
ret
ENDPROC(perf_regs_load)
#endif
+
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
LIBUNWIND_LIBS = -lunwind -lunwind-arm
endif
+# So far there's only x86 libdw unwind support merged in perf.
+# Disable it on all other architectures in case libdw unwind
+# support is detected in system. Add supported architectures
+# to the check.
+ifneq ($(ARCH),x86)
+ NO_LIBDW_DWARF_UNWIND := 1
+endif
+
ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1
else
CFLAGS += -Wextra
CFLAGS += -std=gnu99
+# Enforce a non-executable stack, as we may regress (again) in the future by
+# adding assembler files missing the .GNU-stack linker note.
+LDFLAGS += -Wl,-z,noexecstack
+
EXTLIBS = -lelf -lpthread -lrt -lm -ldl
ifneq ($(OUTPUT),)
stackprotector-all \
timerfd \
libunwind-debug-frame \
- bionic
+ bionic \
+ liberty \
+ liberty-z \
+ cplus-demangle
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd -lz -liberty
+ EXTLIBS += -lbfd
+
+ # call all detections now so we get correct
+ # status in VF output
+ $(call feature_check,liberty)
+ $(call feature_check,liberty-z)
+ $(call feature_check,cplus-demangle)
+
+ ifeq ($(feature-liberty), 1)
+ EXTLIBS += -liberty
+ else
+ ifeq ($(feature-liberty-z), 1)
+ EXTLIBS += -liberty -lz
+ endif
+ endif
endif
ifdef NO_DEMANGLE
CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
ifneq ($(feature-libbfd), 1)
- $(call feature_check,liberty)
- ifeq ($(feature-liberty), 1)
- EXTLIBS += -lbfd -liberty
- else
- $(call feature_check,liberty-z)
- ifeq ($(feature-liberty-z), 1)
- EXTLIBS += -lbfd -liberty -lz
- else
- $(call feature_check,cplus-demangle)
+ ifneq ($(feature-liberty), 1)
+ ifneq ($(feature-liberty-z), 1)
+ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+ # or any of 'bfd iberty z' trinity
ifeq ($(feature-cplus-demangle), 1)
EXTLIBS += -liberty
CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
make_install_html := install-html
make_install_info := install-info
make_install_pdf := install-pdf
+make_static := LDFLAGS=-static
# all the NO_* variable combined
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
# run += make_install_info
# run += make_install_pdf
run += make_minimal
+run += make_static
ifneq ($(call has,ctags),)
run += make_tags
}
static int map_groups__set_modules_path_dir(struct map_groups *mg,
- const char *dir_name)
+ const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
!strcmp(dent->d_name, ".."))
continue;
- ret = map_groups__set_modules_path_dir(mg, path);
+ /* Do not follow top-level source and build symlinks */
+ if (depth == 0) {
+ if (!strcmp(dent->d_name, "source") ||
+ !strcmp(dent->d_name, "build"))
+ continue;
+ }
+
+ ret = map_groups__set_modules_path_dir(mg, path,
+ depth + 1);
if (ret < 0)
goto out;
} else {
if (!version)
return -1;
- snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
machine->root_dir, version);
free(version);
- return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
static int machine__create_module(void *arg, const char *name, u64 start)
u32 val;
u32 *reg;
- offset >>= 1;
reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset);
+ vcpu->vcpu_id, offset >> 1);
- if (offset & 2)
+ if (offset & 4)
val = *reg >> 16;
else
val = *reg & 0xffff;
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 4) {
+ if (offset < 8) {
*reg = ~0U; /* Force PPIs/SGIs to 1 */
return false;
}
val = vgic_cfg_compress(val);
- if (offset & 2) {
+ if (offset & 4) {
*reg &= 0xffff;
*reg |= val << 16;
} else {
case 0:
if (!target_cpus)
return;
+ break;
case 1:
target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
if (addr + size < addr)
return -EINVAL;
+ *ioaddr = addr;
ret = vgic_ioaddr_overlap(kvm);
if (ret)
- return ret;
- *ioaddr = addr;
+ *ioaddr = VGIC_ADDR_UNDEF;
+
return ret;
}
if (dev->entries_nr == 0)
return r;
- r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+ r = pci_enable_msix_exact(dev->dev,
+ dev->host_msix_entries, dev->entries_nr);
if (r)
return r;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- mmdrop(mm);
+ mmput(mm);
kvm_put_kvm(vcpu->kvm);
}
flush_work(&work->work);
#else
if (cancel_work_sync(&work->work)) {
- mmdrop(work->mm);
+ mmput(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
- atomic_inc(&work->mm->mm_count);
+ atomic_inc(&work->mm->mm_users);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
- mmdrop(work->mm);
+ mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}