ALPHA PORT
M: Richard Henderson <rth@twiddle.net>
-S: Odd Fixes for 2.4; Maintained for 2.6.
M: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
-S: Maintained for 2.4; PCI support for 2.6.
+M: Matt Turner <mattst88@gmail.com>
L: linux-alpha@vger.kernel.org
F: arch/alpha/
{
int cpu;
+ if (node == -1)
+ return cpu_all_mask;
+
cpumask_clear(&node_to_cpumask_map[node]);
for_each_online_cpu(cpu) {
};
struct davinci_ks_platform_data {
+ int (*device_enable)(struct device *dev);
unsigned short *keymap;
u32 keymapsize;
u8 rep:1;
/* DM9000AEP 10/100 ethernet controller */
-static struct resource mini2440_dm9k_resource[] __initdata = {
+static struct resource mini2440_dm9k_resource[] = {
[0] = {
.start = MACH_MINI2440_DM9K_BASE,
.end = MACH_MINI2440_DM9K_BASE + 3,
* The DM9000 has no eeprom, and it's MAC address is set by
* the bootloader before starting the kernel.
*/
-static struct dm9000_plat_data mini2440_dm9k_pdata __initdata = {
+static struct dm9000_plat_data mini2440_dm9k_pdata = {
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
-static struct platform_device mini2440_device_eth __initdata = {
+static struct platform_device mini2440_device_eth = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(mini2440_dm9k_resource),
* | | +----+ +----+
* .....
*/
-static struct gpio_keys_button mini2440_buttons[] __initdata = {
+static struct gpio_keys_button mini2440_buttons[] = {
{
.gpio = S3C2410_GPG(0), /* K1 */
.code = KEY_F1,
#endif
};
-static struct gpio_keys_platform_data mini2440_button_data __initdata = {
+static struct gpio_keys_platform_data mini2440_button_data = {
.buttons = mini2440_buttons,
.nbuttons = ARRAY_SIZE(mini2440_buttons),
};
-static struct platform_device mini2440_button_device __initdata = {
+static struct platform_device mini2440_button_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
/* LEDS */
-static struct s3c24xx_led_platdata mini2440_led1_pdata __initdata = {
+static struct s3c24xx_led_platdata mini2440_led1_pdata = {
.name = "led1",
.gpio = S3C2410_GPB(5),
.flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
.def_trigger = "heartbeat",
};
-static struct s3c24xx_led_platdata mini2440_led2_pdata __initdata = {
+static struct s3c24xx_led_platdata mini2440_led2_pdata = {
.name = "led2",
.gpio = S3C2410_GPB(6),
.flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
.def_trigger = "nand-disk",
};
-static struct s3c24xx_led_platdata mini2440_led3_pdata __initdata = {
+static struct s3c24xx_led_platdata mini2440_led3_pdata = {
.name = "led3",
.gpio = S3C2410_GPB(7),
.flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
.def_trigger = "mmc0",
};
-static struct s3c24xx_led_platdata mini2440_led4_pdata __initdata = {
+static struct s3c24xx_led_platdata mini2440_led4_pdata = {
.name = "led4",
.gpio = S3C2410_GPB(8),
.flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
.def_trigger = "",
};
-static struct s3c24xx_led_platdata mini2440_led_backlight_pdata __initdata = {
+static struct s3c24xx_led_platdata mini2440_led_backlight_pdata = {
.name = "backlight",
.gpio = S3C2410_GPG(4),
.def_trigger = "backlight",
};
-static struct platform_device mini2440_led1 __initdata = {
+static struct platform_device mini2440_led1 = {
.name = "s3c24xx_led",
.id = 1,
.dev = {
},
};
-static struct platform_device mini2440_led2 __initdata = {
+static struct platform_device mini2440_led2 = {
.name = "s3c24xx_led",
.id = 2,
.dev = {
},
};
-static struct platform_device mini2440_led3 __initdata = {
+static struct platform_device mini2440_led3 = {
.name = "s3c24xx_led",
.id = 3,
.dev = {
},
};
-static struct platform_device mini2440_led4 __initdata = {
+static struct platform_device mini2440_led4 = {
.name = "s3c24xx_led",
.id = 4,
.dev = {
},
};
-static struct platform_device mini2440_led_backlight __initdata = {
+static struct platform_device mini2440_led_backlight = {
.name = "s3c24xx_led",
.id = 5,
.dev = {
/* AUDIO */
-static struct s3c24xx_uda134x_platform_data mini2440_audio_pins __initdata = {
+static struct s3c24xx_uda134x_platform_data mini2440_audio_pins = {
.l3_clk = S3C2410_GPB(4),
.l3_mode = S3C2410_GPB(2),
.l3_data = S3C2410_GPB(3),
.model = UDA134X_UDA1341
};
-static struct platform_device mini2440_audio __initdata = {
+static struct platform_device mini2440_audio = {
.name = "s3c24xx_uda134x",
.id = 0,
.dev = {
.supply_name = "B_PWR_5V",
.microvolts = 5000000,
.init_data = &smdk6410_b_pwr_5v_data,
+ .gpio = -EINVAL,
};
static struct platform_device smdk6410_b_pwr_5v = {
return -ENOMEM;
}
- size = sizeof(int) * set->nr_chips;
- if (size) {
+ if (set->nr_map && set->nr_chips) {
+ size = sizeof(int) * set->nr_chips;
ptr = kmemdup(set->nr_map, size, GFP_KERNEL);
set->nr_map = ptr;
.read = pit_read_clk,
.shift = 20,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/***************************************************************************/
unreachable(); \
} while (0)
+#define __WARN() do { \
+ __EMIT_BUG(BUGFLAG_WARNING); \
+} while (0)
+
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
#ifndef __SIGP__
#define __SIGP__
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <asm/system.h>
/* get real cpu address from logical cpu number */
-extern volatile int __cpu_logical_map[];
+extern int __cpu_logical_map[];
+
+static inline int cpu_logical_map(int cpu)
+{
+#ifdef CONFIG_SMP
+ return __cpu_logical_map[cpu];
+#else
+ return stap();
+#endif
+}
typedef enum
{
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
- : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
+ : "d" (reg1), "d" (cpu_logical_map(cpu_addr)),
"a" (order_code) : "cc" , "memory");
return ccode;
}
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
- : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
+ : "d" (reg1), "d" (cpu_logical_map(cpu_addr)),
"a" (order_code) : "cc" , "memory");
return ccode;
}
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "+d" (reg1)
- : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
+ : "d" (cpu_logical_map(cpu_addr)), "a" (order_code)
: "cc" , "memory");
*statusptr = reg1;
return ccode;
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
-#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
-#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_31BIT 18 /* 32bit process */
-#define TIF_MEMDIE 19
-#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
-#define TIF_FREEZE 21 /* thread is freezing for suspend */
+#define TIF_31BIT 17 /* 32bit process */
+#define TIF_MEMDIE 18
+#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
+#define TIF_FREEZE 20 /* thread is freezing for suspend */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_FREEZE (1<<TIF_FREEZE)
#define __NR_pwritev 329
#define __NR_rt_tgsigqueueinfo 330
#define __NR_perf_event_open 331
-#define __NR_recvmmsg 332
-#define NR_syscalls 333
+#define NR_syscalls 332
/*
* There are some system calls that are not present on 64 bit, some
#define __IGNORE_migrate_pages
#define __IGNORE_move_pages
+/* Ignore system calls that are also reachable via sys_socket */
+#define __IGNORE_recvmmsg
+
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
*/
struct mmap_arg_struct_emu31 {
- u32 addr;
- u32 len;
- u32 prot;
- u32 flags;
- u32 fd;
- u32 offset;
+ compat_ulong_t addr;
+ compat_ulong_t len;
+ compat_ulong_t prot;
+ compat_ulong_t flags;
+ compat_ulong_t fd;
+ compat_ulong_t offset;
};
-asmlinkage unsigned long
-old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
{
struct mmap_arg_struct_emu31 a;
- int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
-
- error = -EINVAL;
+ return -EFAULT;
if (a.offset & ~PAGE_MASK)
- goto out;
-
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
-out:
- return error;
+ return -EINVAL;
+ a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
}
-asmlinkage long
-sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
{
struct mmap_arg_struct_emu31 a;
- int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
-out:
- return error;
+ return -EFAULT;
+ a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
llgtr %r3,%r3 # compat_uptr_t *
llgtr %r4,%r4 # compat_uptr_t *
jg sys32_execve # branch to system call
-
- .globl compat_sys_recvmmsg_wrapper
-compat_sys_recvmmsg_wrapper:
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct compat_mmsghdr *
- llgfr %r4,%r4 # unsigned int
- llgfr %r5,%r5 # unsigned int
- llgtr %r6,%r6 # struct compat_timespec *
- jg compat_sys_recvmmsg
void flush_thread(void)
{
- clear_used_math();
- clear_tsk_thread_flag(current, TIF_USEDFPU);
}
void release_thread(struct task_struct *dead_task)
p->thread.mm_segment = get_fs();
/* Don't copy debug registers */
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
+ clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
ti = task_thread_info(p);
ti->user_timer = 0;
{
struct pt_regs *regs;
per_struct *per_info;
+ per_cr_words cr_words;
regs = task_pt_regs(task);
per_info = (per_struct *) &task->thread.per_info;
per_info->control_regs.bits.storage_alt_space_ctl = 1;
else
per_info->control_regs.bits.storage_alt_space_ctl = 0;
+
+ if (task == current) {
+ __ctl_store(cr_words, 9, 11);
+ if (memcmp(&cr_words, &per_info->control_regs.words,
+ sizeof(cr_words)) != 0)
+ __ctl_load(per_info->control_regs.words, 9, 11);
+ }
}
void user_enable_single_step(struct task_struct *task)
char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
-volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
int __initdata memory_end_set;
unsigned long __initdata memory_end;
*/
get_cpu_id(&S390_lowcore.cpu_id);
- /*
- * Force FPU initialization:
- */
- clear_thread_flag(TIF_USEDFPU);
- clear_used_math();
-
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
setup_lowcore();
cpu_init();
- __cpu_logical_map[0] = stap();
s390_init_cpu_topology();
/*
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
- /*
- * If we would have taken a single-step trap
- * for a normal instruction, act like we took
- * one for the handler setup.
- */
- if (current->thread.per_info.single_step)
- set_thread_flag(TIF_SINGLE_STEP);
-
/*
* Let tracing know that we've done the handler setup.
*/
tracehook_signal_handler(signr, &info, &ka, regs,
- test_thread_flag(TIF_SINGLE_STEP));
+ current->thread.per_info.single_step);
}
return;
}
#include <asm/cpu.h>
#include "entry.h"
+/* logical cpu to cpu address */
+int __cpu_logical_map[NR_CPUS];
+
static struct task_struct *current_set[NR_CPUS];
static u8 smp_cpu_type;
{
}
+void __init smp_setup_processor_id(void)
+{
+ S390_lowcore.cpu_nr = 0;
+ __cpu_logical_map[0] = stap();
+}
+
/*
* the frequency of the profiling timer can be changed
* by writing a multiplier value into /proc/profile.
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
-SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper)
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
- if (__cpu_logical_map[lcpu] == rcpu) {
+ if (cpu_logical_map(lcpu) == rcpu) {
cpu_set(lcpu, core->mask);
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
{
if (MACHINE_HAS_DIAG9C)
asm volatile("diag %0,0,0x9c"
- : : "d" (__cpu_logical_map[cpu]));
+ : : "d" (cpu_logical_map(cpu)));
else
_raw_yield();
}
static inline unsigned long mmap_base(void)
{
- unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
#endif
return sysctl_legacy_va_layout ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
- current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
+ rlimit(RLIMIT_STACK) == RLIM_INFINITY;
}
#ifndef CONFIG_64BIT
{
I2C_BOARD_INFO("r2025sd", 0x32),
},
+ {
+ I2C_BOARD_INFO("lis3lv02d", 0x1c),
+ .irq = 33,
+ }
};
/* KEYSC */
gpio_direction_output(GPIO_PTU0, 0);
mdelay(20);
+ /* enable motion sensor */
+ gpio_request(GPIO_FN_INTC_IRQ1, NULL);
+ gpio_direction_input(GPIO_FN_INTC_IRQ1);
+
/* enable I2C device */
i2c_register_board_info(0, i2c0_devices,
ARRAY_SIZE(i2c0_devices));
#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
-extern struct atomic_notifier_head x86_mce_decoder_chain;
#ifdef __KERNEL__
+extern struct atomic_notifier_head x86_mce_decoder_chain;
+
#include <linux/percpu.h>
#include <linux/init.h>
#include <asm/atomic.h>
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
+/*
+ * Get the minimum revision number of the hub chips within the partition.
+ * 1 - initial rev 1.0 silicon
+ * 2 - rev 2.0 production silicon
+ */
+static inline int uv_get_min_hub_revision_id(void)
+{
+ extern int uv_min_hub_revision_id;
+
+ return uv_min_hub_revision_id;
+}
+
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_UV_UV_HUB_H */
#include <asm/x86_init.h>
int gart_iommu_aperture;
+EXPORT_SYMBOL_GPL(gart_iommu_aperture);
int gart_iommu_aperture_disabled __initdata;
int gart_iommu_aperture_allowed __initdata;
static enum uv_system_type uv_system_type;
static u64 gru_start_paddr, gru_end_paddr;
+int uv_min_hub_revision_id;
+EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
static inline bool is_GRU_range(u64 start, u64 end)
{
mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
node_id.v = *mmr;
early_iounmap(mmr, sizeof(*mmr));
+
+ /* Currently, all blades have same revision number */
+ uv_min_hub_revision_id = node_id.s.revision;
+
return node_id.s.node_id;
}
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
+ int nodeid;
+
if (!strcmp(oem_id, "SGI")) {
+ nodeid = early_get_nodeid();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH")) {
__get_cpu_var(x2apic_extra_bits) =
- early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
+ nodeid << (UV_APIC_PNODE_SHIFT - 1);
uv_system_type = UV_NON_UNIQUE_APIC;
return 1;
}
enum map_type {map_wb, map_uc};
-static __init void map_high(char *id, unsigned long base, int shift,
- int max_pnode, enum map_type map_type)
+static __init void map_high(char *id, unsigned long base, int pshift,
+ int bshift, int max_pnode, enum map_type map_type)
{
unsigned long bytes, paddr;
- paddr = base << shift;
- bytes = (1UL << shift) * (max_pnode + 1);
+ paddr = base << pshift;
+ bytes = (1UL << bshift) * (max_pnode + 1);
printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
paddr + bytes);
if (map_type == map_uc)
gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
if (gru.s.enable) {
- map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
+ map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
gru_start_paddr = ((u64)gru.s.base << shift);
gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
if (mmr.s.enable)
- map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
+ map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
}
static __init void map_mmioh_high(int max_pnode)
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
if (mmioh.s.enable)
- map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
+ map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
+ max_pnode, map_uc);
}
static __init void map_low_mmrs(void)
while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr;
- if (__kernel_text_address(addr)) {
- ops->address(data, addr, 1);
- frame = frame->next_frame;
- ret_addr = &frame->return_address;
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
- }
+ if (!__kernel_text_address(addr))
+ break;
+
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+ print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
}
+
return (unsigned long)frame;
}
EXPORT_SYMBOL_GPL(print_context_stack_bp);
regs.es = __USER_DS;
regs.fs = __KERNEL_PERCPU;
regs.gs = __KERNEL_STACK_CANARY;
+#else
+ regs.ss = __KERNEL_DS;
#endif
regs.orig_ax = -1;
/* keep using Xen gdt for now; no urgent need to change it */
+#ifdef CONFIG_X86_32
pv_info.kernel_rpl = 1;
if (xen_feature(XENFEAT_supervisor_mode_kernel))
pv_info.kernel_rpl = 0;
+#else
+ pv_info.kernel_rpl = 0;
+#endif
/* set the limit of our address space */
xen_reserve_top();
* Block size attribute stuff
*/
static ssize_t
-print_block_size(struct class *class, char *buf)
+print_block_size(struct sysdev_class *class,
+ struct sysdev_class_attribute *class_attr,
+ char *buf)
{
- return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
+ return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
}
-static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static int block_size_init(void)
{
return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_block_size_bytes.attr);
+ &attr_block_size_bytes.attr);
}
/*
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
-memory_probe_store(struct class *class, const char *buf, size_t count)
+memory_probe_store(struct sysdev_class *class,
+ struct sysdev_class_attribute *class_attr,
+ const char *buf, size_t count)
{
u64 phys_addr;
int nid;
return count;
}
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
static int memory_probe_init(void)
{
return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_probe.attr);
+ &attr_probe.attr);
}
#else
static inline int memory_probe_init(void)
/* Soft offline a page */
static ssize_t
-store_soft_offline_page(struct class *class, const char *buf, size_t count)
+store_soft_offline_page(struct sysdev_class *class,
+ struct sysdev_class_attribute *class_attr,
+ const char *buf, size_t count)
{
int ret;
u64 pfn;
/* Forcibly offline a page, including killing processes. */
static ssize_t
-store_hard_offline_page(struct class *class, const char *buf, size_t count)
+store_hard_offline_page(struct sysdev_class *class,
+ struct sysdev_class_attribute *class_attr,
+ const char *buf, size_t count)
{
int ret;
u64 pfn;
return ret ? ret : count;
}
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static SYSDEV_CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static SYSDEV_CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
int err;
err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_soft_offline_page.attr);
+ &attr_soft_offline_page.attr);
if (!err)
err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_hard_offline_page.attr);
+ &attr_hard_offline_page.attr);
return err;
}
#else
int __init agp_amd64_init(void)
{
int err = 0;
- static int done = 0;
if (agp_off)
return -EINVAL;
- if (done++)
+ if (gart_iommu_aperture)
return agp_bridges_found ? 0 : -ENODEV;
err = pci_register_driver(&agp_amd64_pci_driver);
static void __exit agp_amd64_cleanup(void)
{
+ if (gart_iommu_aperture)
+ return;
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);
&bridge->mode);
}
- if (bridge->driver->mask_memory == intel_i965_mask_memory)
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
{ 0 },
};
-static struct virtio_driver virtio_rng = {
+static struct virtio_driver virtio_rng_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
static int __init init(void)
{
- return register_virtio_driver(&virtio_rng);
+ return register_virtio_driver(&virtio_rng_driver);
}
static void __exit fini(void)
{
- unregister_virtio_driver(&virtio_rng);
+ unregister_virtio_driver(&virtio_rng_driver);
}
module_init(init);
module_exit(fini);
edac_printk(KERN_DEBUG, EDAC_MC,
"pci-read, sdram scrub control value: %d \n", scrubval);
- for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
if (scrubrates[i].scrubval == scrubval) {
*bw = scrubrates[i].bandwidth;
status = 0;
debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
- channel = branch;
+
+ /*
+ * According with i5000 datasheet, bit 28 has no significance
+ * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
+ */
+ channel = branch & 2;
+
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ if (IS_I965G(dev))
+ dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
+ 0xf0;
+
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0400);
+unsigned int i915_lvds_downclock = 0;
+module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+
static struct drm_driver driver;
#define INTEL_VGA_DEVICE(id, info) { \
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
.resume = i915_resume,
+
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
unsigned int lvds_use_ssc:1;
unsigned int edp_support:1;
int lvds_ssc_freq;
+ int edp_bpp;
struct notifier_block lid_notifier;
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_lvds_downclock;
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
return 0;
}
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+
+ /* Wait on any GPU rendering and flushing to occur. */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ if (ret != 0)
+ return ret;
+ }
+
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
+ return 0;
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
"back to user (%d)\n",
args->buffer_count, ret);
}
- } else {
- DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
}
drm_free_large(exec_list);
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
- u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
- for (;;) {
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
- break;
-
- ret = IRQ_HANDLED;
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ goto done;
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- new_pch_iir = I915_READ(SDEIIR);
+ ret = IRQ_HANDLED;
- I915_WRITE(DEIIR, de_iir);
- new_de_iir = I915_READ(DEIIR);
- I915_WRITE(GTIIR, gt_iir);
- new_gt_iir = I915_READ(GTIIR);
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
- if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ if (gt_iir & GT_USER_INTERRUPT) {
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ }
- /* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
- de_iir = new_de_iir;
- gt_iir = new_gt_iir;
- pch_iir = new_pch_iir;
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+
+done:
I915_WRITE(DEIER, de_ier);
(void)I915_READ(DEIER);
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
-#define DSPFW_CURSORA_SHIFT 16
+#define DSPFW_CURSORA_SHIFT 8
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
+static int panel_type;
+
static void *
find_section(struct bdb_header *bdb, int section_id)
{
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+ panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
memset(temp_mode, 0, sizeof(*temp_mode));
}
kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
dev_priv->render_reclock_avail = true;
}
+static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
+ assume 18bpp panel color depth.\n");
+ dev_priv->edp_bpp = 18;
+ }
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp_bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp_bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp_bpp = 30;
+ break;
+ }
+}
+
static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
u8 custom_vbt_version;
} __attribute__((packed));
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t3;
+ u16 t7;
+ u16 t9;
+ u16 t10;
+ u16 t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
bool intel_init_bios(struct drm_device *dev);
/*
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
- bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
};
#define I8XX_DOT_MIN 25000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 5
+#define IRONLAKE_N_MAX 6
#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 118
+#define IRONLAKE_M_MAX 127
#define IRONLAKE_M1_MIN 12
-#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
#define IRONLAKE_P_SDVO_DAC_MIN 5
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
/* below parameter and function is for G4X Chipset Family*/
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_ironlake_sdvo = {
return (err != target);
}
-
-static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-
-{
- struct drm_device *dev = crtc->dev;
- intel_clock_t clock;
- int err = target;
- bool found = false;
-
- memcpy(&clock, best_clock, sizeof(intel_clock_t));
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in Pineview */
- if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
- break;
- for (clock.n = limit->n.min; clock.n <= limit->n.max;
- clock.n++) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
return ret;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_display_plane(obj);
if (ret != 0) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- if (is_lvds && limit->find_reduced_pll &&
- dev_priv->lvds_downclock_avail) {
- memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
- has_reduced_clock = limit->find_reduced_pll(limit, crtc,
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
+ } else if (is_edp) {
+ switch (dev_priv->edp_bpp/3) {
+ case 8:
+ temp |= PIPE_8BPC;
+ break;
+ case 10:
+ temp |= PIPE_10BPC;
+ break;
+ case 6:
+ temp |= PIPE_6BPC;
+ break;
+ case 12:
+ temp |= PIPE_12BPC;
+ break;
+ }
} else
temp |= PIPE_8BPC;
I915_WRITE(pipeconf_reg, temp);
/* I think this is a fiction */
static int
-intel_dp_link_required(int pixel_clock)
+intel_dp_link_required(struct drm_device *dev,
+ struct intel_output *intel_output, int pixel_clock)
{
- return pixel_clock * 3;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_eDP(intel_output))
+ return (pixel_clock * dev_priv->edp_bpp) / 8;
+ else
+ return pixel_clock * 3;
}
static int
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
int max_lanes = intel_dp_max_lane_count(intel_output);
- if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
+ > max_link_clock * max_lanes)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
- if (intel_dp_link_required(mode->clock) <= link_avail) {
+ if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
+ <= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given DP is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it is assumed that the given
- * DP is present.
- */
-static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, dp_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- dp_port = 0;
- if (dp_reg == DP_B || dp_reg == PCH_DP_B)
- dp_port = PORT_IDPB;
- else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
- dp_port = PORT_IDPC;
- else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
- dp_port = PORT_IDPD;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not DP, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_DP &&
- p_child->device_type != DEVICE_TYPE_eDP)
- continue;
- /* Find the eDP port */
- if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
- ret = 1;
- break;
- }
- /* Find the DP port */
- if (p_child->dvo_port == dp_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct intel_dp_priv *dp_priv;
const char *name = NULL;
- if (!dp_is_present_in_vbt(dev, output_reg)) {
- DRM_DEBUG_KMS("DP is not present. Ignore it\n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_output)) {
- intel_output->crtc_mask = (1 << 1);
+ if (IS_eDP(intel_output))
intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- } else
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
.destroy = intel_hdmi_enc_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given HDMI is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the given
- * HDMI is present.
- */
-static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, hdmi_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- if (hdmi_reg == SDVOB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == SDVOC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMIB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == HDMIC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMID)
- hdmi_port = DVO_D;
- else
- return 0;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not HDMI, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_HDMI)
- continue;
- /* Find the HDMI port */
- if (p_child->dvo_port == hdmi_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
- if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
- DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
/* Some lid devices report incorrect lid status, assume they're connected */
static const struct dmi_system_id bad_lid_status[] = {
+ {
+ .ident = "Compaq nx9020",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "3084"),
+ },
+ },
+ {
+ .ident = "Samsung SX20S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
+ },
+ },
{
.ident = "Aspire One",
.matches = {
}
}
mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
}
/**
- * Don't check status code from this as it switches the bus back to the
- * SDVO chips which defeats the purpose of doing a bus switch in the first
- * place.
+ * Try to read the response after issuie the DDC switch command. But it
+ * is noted that we must do the action of reading response and issuing DDC
+ * switch command in one I2C transaction. Otherwise when we try to start
+ * another I2C transaction after issuing the DDC bus switch, it will be
+ * switched to the internal SDVO register.
*/
static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
u8 target)
{
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ },
+ /* the following two are to read the response */
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ret_value,
+ },
+ };
+
+ intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &target, 1);
+ /* write the DDC switch command argument */
+ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
+
+ out_buf[0] = SDVO_I2C_OPCODE;
+ out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
+ cmd_buf[0] = SDVO_I2C_CMD_STATUS;
+ cmd_buf[1] = 0;
+ ret_value[0] = 0;
+ ret_value[1] = 0;
+
+ ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
+ if (ret != 3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return;
+ }
+ if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("DDC switch command returns response %d\n",
+ ret_value[0]);
+ return;
+ }
+ return;
}
static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
edid = drm_get_edid(&intel_output->base,
intel_output->ddc_bus);
+ /* This is only applied to SDVO cards with multiple outputs */
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
+ uint8_t saved_ddc, temp_ddc;
+ saved_ddc = sdvo_priv->ddc_bus;
+ temp_ddc = sdvo_priv->ddc_bus >> 1;
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ while(temp_ddc > 1) {
+ sdvo_priv->ddc_bus = temp_ddc;
+ edid = drm_get_edid(&intel_output->base,
+ intel_output->ddc_bus);
+ if (edid) {
+ /*
+ * When we can get the EDID, maybe it is the
+ * correct DDC bus. Update it.
+ */
+ sdvo_priv->ddc_bus = temp_ddc;
+ break;
+ }
+ temp_ddc >>= 1;
+ }
+ if (edid == NULL)
+ sdvo_priv->ddc_bus = saved_ddc;
+ }
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
-
- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
out:
drm_sysfs_connector_add(connector);
}
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
+ } while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
+ } while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
unsigned long timeout;
if (irq > -1) {
- ret = wait_event_interruptible_timeout(pca_wait,
+ ret = wait_event_timeout(pca_wait,
pca_isa_readbyte(pd, I2C_PCA_CON)
& I2C_PCA_CON_SI, pca_isa_ops.timeout);
} else {
}
static irqreturn_t pca_handler(int this_irq, void *dev_id) {
- wake_up_interruptible(&pca_wait);
+ wake_up(&pca_wait);
return IRQ_HANDLED;
}
unsigned long timeout;
if (i2c->irq) {
- ret = wait_event_interruptible_timeout(i2c->wait,
+ ret = wait_event_timeout(i2c->wait,
i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
& I2C_PCA_CON_SI, i2c->adap.timeout);
} else {
if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
return IRQ_NONE;
- wake_up_interruptible(&i2c->wait);
+ wake_up(&i2c->wait);
return IRQ_HANDLED;
}
else
msleep(1);
- while ((timeout++ < MAX_TIMEOUT) &&
+ while ((++timeout < MAX_TIMEOUT) &&
((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
do {
msleep(1);
temp = inb_p(SMBHSTSTS);
- } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT));
+ } while ((temp & 0x01) && (++timeout < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
result = -ETIMEDOUT;
dev_err(&vt596_adapter.dev, "SMBus timeout!\n");
}
NULL
};
-const static struct dev_pm_ops i2c_device_pm_ops = {
+static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
};
adap->dev.parent);
#endif
+ /* device name is gone after device_unregister */
+ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
+
/* clean up the sysfs representation */
init_completion(&adap->dev_released);
device_unregister(&adap->dev);
idr_remove(&i2c_adapter_idr, adap->nr);
mutex_unlock(&core_lock);
- dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
-
/* Clear the device structure in case this adapter is ever going to be
added again */
memset(&adap->dev, 0, sizeof(adap->dev));
client->head &= EVDEV_BUFFER_SIZE - 1;
spin_unlock(&client->buffer_lock);
- kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ if (event->type == EV_SYN)
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
/*
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/smp_lock.h>
+#include "input-compat.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("Input core");
return error;
}
+#ifdef CONFIG_COMPAT
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ int len = 0;
+
+ if (INPUT_COMPAT_TEST) {
+ u32 dword = bits >> 32;
+ if (dword || !skip_empty)
+ len += snprintf(buf, buf_size, "%x ", dword);
+
+ dword = bits & 0xffffffffUL;
+ if (dword || !skip_empty || len)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%x", dword);
+ } else {
+ if (bits || !skip_empty)
+ len += snprintf(buf, buf_size, "%lx", bits);
+ }
+
+ return len;
+}
+
+#else /* !CONFIG_COMPAT */
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ return bits || !skip_empty ?
+ snprintf(buf, buf_size, "%lx", bits) : 0;
+}
+
+#endif
#ifdef CONFIG_PROC_FS
unsigned long *bitmap, int max)
{
int i;
-
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
+ bool skip_empty = true;
+ char buf[18];
seq_printf(seq, "B: %s=", name);
- for (; i >= 0; i--)
- seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
+
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ if (input_bits_to_string(buf, sizeof(buf),
+ bitmap[i], skip_empty)) {
+ skip_empty = false;
+ seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
+ }
+ }
+
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (skip_empty)
+ seq_puts(seq, "0");
+
seq_putc(seq, '\n');
}
{
int i;
int len = 0;
+ bool skip_empty = true;
+
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ len += input_bits_to_string(buf + len, max(buf_size - len, 0),
+ bitmap[i], skip_empty);
+ if (len) {
+ skip_empty = false;
+ if (i > 0)
+ len += snprintf(buf + len, max(buf_size - len, 0), " ");
+ }
+ }
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
-
- for (; i >= 0; i--)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%lx%s", bitmap[i], i > 0 ? " " : "");
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (len == 0)
+ len = snprintf(buf, buf_size, "%d", 0);
if (add_cr)
len += snprintf(buf + len, max(buf_size - len, 0), "\n");
{ \
struct input_dev *input_dev = to_input_dev(dev); \
int len = input_print_bitmap(buf, PAGE_SIZE, \
- input_dev->bm##bit, ev##_MAX, 1); \
+ input_dev->bm##bit, ev##_MAX, \
+ true); \
return min_t(int, len, PAGE_SIZE); \
} \
static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
len = input_print_bitmap(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen,
- bitmap, max, 0);
+ bitmap, max, false);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
}
#ifdef RESET_WORKS
- if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) ||
+ if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) &&
(gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) {
err = -ENODEV;
goto fail2;
}
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
xpad->odata[6] = 0x00;
xpad->odata[7] = 0x00;
xpad->irq_out->transfer_buffer_length = 8;
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
}
return 0;
struct delayed_work event_work;
unsigned long event_jiffies;
- struct mutex event_mutex;
unsigned long event_mask;
+
+ /* Serializes reconnect(), attr->set() and event work */
+ struct mutex mutex;
};
/*
{
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
- mutex_lock(&atkbd->event_mutex);
+ mutex_lock(&atkbd->mutex);
if (!atkbd->enabled) {
/*
atkbd_set_repeat_rate(atkbd);
}
- mutex_unlock(&atkbd->event_mutex);
+ mutex_unlock(&atkbd->mutex);
}
/*
atkbd->event_jiffies = jiffies;
set_bit(event_bit, &atkbd->event_mask);
- wmb();
+ mb();
schedule_delayed_work(&atkbd->event_work, delay);
}
{
struct atkbd *atkbd = serio_get_drvdata(serio);
+ sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
+
atkbd_disable(atkbd);
- /* make sure we don't have a command in flight */
+ input_unregister_device(atkbd->dev);
+
+ /*
+ * Make sure we don't have a command in flight.
+ * Note that since atkbd->enabled is false event work will keep
+ * rescheduling itself until it gets canceled and will not try
+ * accessing freed input device or serio port.
+ */
cancel_delayed_work_sync(&atkbd->event_work);
- sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
- input_unregister_device(atkbd->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
kfree(atkbd);
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work);
- mutex_init(&atkbd->event_mutex);
+ mutex_init(&atkbd->mutex);
switch (serio->id.type) {
{
struct atkbd *atkbd = serio_get_drvdata(serio);
struct serio_driver *drv = serio->drv;
+ int retval = -1;
if (!atkbd || !drv) {
printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
+ mutex_lock(&atkbd->mutex);
+
atkbd_disable(atkbd);
if (atkbd->write) {
if (atkbd_probe(atkbd))
- return -1;
+ goto out;
+
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
- return -1;
+ goto out;
atkbd_activate(atkbd);
}
atkbd_enable(atkbd);
+ retval = 0;
- return 0;
+ out:
+ mutex_unlock(&atkbd->mutex);
+ return retval;
}
static struct serio_device_id atkbd_serio_ids[] = {
ssize_t (*handler)(struct atkbd *, char *))
{
struct serio *serio = to_serio_port(dev);
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- retval = handler((struct atkbd *)serio_get_drvdata(serio), buf);
+ struct atkbd *atkbd = serio_get_drvdata(serio);
-out:
- serio_unpin_driver(serio);
- return retval;
+ return handler(atkbd, buf);
}
static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
ssize_t (*handler)(struct atkbd *, const char *, size_t))
{
struct serio *serio = to_serio_port(dev);
- struct atkbd *atkbd;
+ struct atkbd *atkbd = serio_get_drvdata(serio);
int retval;
- retval = serio_pin_driver(serio);
+ retval = mutex_lock_interruptible(&atkbd->mutex);
if (retval)
return retval;
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- atkbd = serio_get_drvdata(serio);
atkbd_disable(atkbd);
retval = handler(atkbd, buf, count);
atkbd_enable(atkbd);
-out:
- serio_unpin_driver(serio);
+ mutex_unlock(&atkbd->mutex);
+
return retval;
}
struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
int error, i;
+ if (pdata->device_enable) {
+ error = pdata->device_enable(dev);
+ if (error < 0) {
+ dev_dbg(dev, "device enable function failed\n");
+ return error;
+ }
+ }
+
if (!pdata->keymap) {
dev_dbg(dev, "no keymap from pdata\n");
return -EINVAL;
/* trackpad finger data size, empirically at least ten fingers */
#define SIZEOF_FINGER sizeof(struct tp_finger)
#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER)
+#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
struct bcm5974_param {
input_set_abs_params(input_dev, ABS_Y,
0, cfg->y.dim, cfg->y.fuzz, 0);
+ /* finger touch area */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger approach area */
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger orientation */
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ -MAX_FINGER_ORIENTATION,
+ MAX_FINGER_ORIENTATION, 0, 0);
+ /* finger position */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ cfg->x.devmin, cfg->x.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ cfg->y.devmin, cfg->y.devmax, 0, 0);
+
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
return 0;
}
+static void report_finger_data(struct input_dev *input,
+ const struct bcm5974_config *cfg,
+ const struct tp_finger *f)
+{
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ MAX_FINGER_ORIENTATION - raw2int(f->orientation));
+ input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y));
+ input_mt_sync(input);
+}
+
/* report trackpad data as logical trackpad state */
static int report_tp_state(struct bcm5974 *dev, int size)
{
const struct bcm5974_config *c = &dev->cfg;
const struct tp_finger *f;
struct input_dev *input = dev->input;
- int raw_p, raw_w, raw_x, raw_y, raw_n;
+ int raw_p, raw_w, raw_x, raw_y, raw_n, i;
int ptest, origin, ibt = 0, nmin = 0, nmax = 0;
int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
/* always track the first finger; when detached, start over */
if (raw_n) {
+
+ /* report raw trackpad data */
+ for (i = 0; i < raw_n; i++)
+ report_finger_data(input, c, &f[i]);
+
raw_p = raw2int(f->force_major);
raw_w = raw2int(f->size_major);
raw_x = raw2int(f->abs_x);
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
- if (!set_properties || synaptics_init(psmouse) == 0)
+/*
+ * Try activating protocol, but check if support is enabled first, since
+ * we try detecting Synaptics even when protocol is disabled.
+ */
+ if (synaptics_supported() &&
+ (!set_properties || synaptics_init(psmouse) == 0)) {
return PSMOUSE_SYNAPTICS;
+ }
+
/*
* Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
* Unfortunately Logitech/Genius probes confuse some firmware versions so
max_proto = PSMOUSE_IMEX;
}
-/*
- * Try Finger Sensing Pad
- */
- if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
- if (!set_properties || fsp_init(psmouse) == 0)
- return PSMOUSE_FSP;
-/*
- * Init failed, try basic relative protocols
- */
- max_proto = PSMOUSE_IMEX;
- }
- }
if (max_proto > PSMOUSE_IMEX) {
if (genius_detect(psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
+/*
+ * Try Finger Sensing Pad. We do it here because its probe upsets
+ * Trackpoint devices (causing TP_READ_ID command to time out).
+ */
+ if (max_proto > PSMOUSE_IMEX) {
+ if (fsp_detect(psmouse, set_properties) == 0) {
+ if (!set_properties || fsp_init(psmouse) == 0)
+ return PSMOUSE_FSP;
+/*
+ * Init failed, try basic relative protocols
+ */
+ max_proto = PSMOUSE_IMEX;
+ }
+ }
+
/*
* Reset to defaults in case the device got confused by extended
* protocol probes. Note that we follow up with full reset because
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse;
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out;
- }
psmouse = serio_get_drvdata(serio);
- retval = attr->show(psmouse, attr->data, buf);
-
-out:
- serio_unpin_driver(serio);
- return retval;
+ return attr->show(psmouse, attr->data, buf);
}
ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
struct psmouse *psmouse, *parent = NULL;
int retval;
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out_unpin;
- }
-
retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
- goto out_unpin;
+ goto out;
psmouse = serio_get_drvdata(serio);
out_unlock:
mutex_unlock(&psmouse_mutex);
- out_unpin:
- serio_unpin_driver(serio);
+ out:
return retval;
}
}
mutex_unlock(&psmouse_mutex);
- serio_unpin_driver(serio);
serio_unregister_child_port(serio);
- serio_pin_driver_uninterruptible(serio);
mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
if (packet[3] & BIT(1))
button_status |= 0x0f; /* wheel up */
if (packet[3] & BIT(2))
- button_status |= BIT(5);/* horizontal left */
+ button_status |= BIT(4);/* horizontal left */
if (packet[3] & BIT(3))
- button_status |= BIT(4);/* horizontal right */
+ button_status |= BIT(5);/* horizontal right */
/* push back to packet queue */
if (button_status != 0)
packet[3] = button_status;
return -1;
}
+bool synaptics_supported(void)
+{
+ return true;
+}
+
#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
void __init synaptics_module_init(void)
return -ENOSYS;
}
+bool synaptics_supported(void)
+{
+ return false;
+}
+
#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
+bool synaptics_supported(void);
#endif /* _SYNAPTICS_H */
* have turned up in 2007 that also need this again.
*/
static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
+ {
+ /* Acer Aspire 5610 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ },
+ },
{
/* Acer Aspire 5630 */
.matches = {
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
-extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000e_get_phy_id(hw);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000e_get_phy_type_from_id(phy->id);
switch (phy->type) {
break;
}
+out:
return ret_val;
}
}
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
return ret_val;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
if (((hw->phy.type == e1000_phy_82577) &&
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
/* Allow time for h/w to get to a quiescent state after reset */
mdelay(10);
+ /* Perform any necessary post-reset workarounds */
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
- phy->ops.release(hw);
- }
-
return ret_val;
}
return 0;
}
-/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: true for slow mode, false for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = 0;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define DRV_VERSION "2.0.44-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
if (spins != 0) {
--spins;
udelay(1);
- } else
- schedule();
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
time = get_seconds();
static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad = __ffs(efx->mdio.mmds);
+ int devad;
u16 physid1, physid2;
- if (efx->phy_type == PHY_TYPE_NONE)
+ if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
+ devad = __ffs(efx->mdio.mmds);
+ else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
+ devad = MDIO_DEVAD_NONE;
+ else
return 0;
mutex_lock(&efx->mac_lock);
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2009 Broadcom Corporation.
+ * Copyright (C) 2005-2010 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.105"
-#define DRV_MODULE_RELDATE "December 2, 2009"
+#define DRV_MODULE_VERSION "3.106"
+#define DRV_MODULE_RELDATE "January 12, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
else
tp->phy_addr = 1;
- is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
(*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
- u32 idx = *post_ptr % TG3_RX_RING_SIZE;
- tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
+ tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
rx_std_posted = 0;
}
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
else
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
if (err)
return err;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+ return -ENOTSUPP;
+
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2010 Broadcom Corporation.
*/
#ifndef _T3_H
#define CPMU_MUTEX_REQ_DRIVER 0x00001000
#define TG3_CPMU_MUTEX_GNT 0x00003660
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
+#define TG3_CPMU_PHY_STRAP 0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
/* 0x3664 --> 0x3800 unused */
/* Mbuf cluster free registers */
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
- u32 cksum, offset;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
- for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
return -EIO;
}
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 8) ? \
+#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 16) ? \
+ (sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 32) ? \
+ ((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int tid = MAX_TID_COUNT;
+ int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static inline void iwl_set_no_assoc(struct iwl_priv *priv)
+{
+ priv->assoc_id = 0;
+ iwl_led_disassociate(priv);
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ priv->staging_rxon.assoc_id = 0;
+ iwlcore_commit_rxon(priv);
+}
+
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
priv->cfg->ops->lib->post_associate(priv);
- } else {
- priv->assoc_id = 0;
- iwl_led_disassociate(priv);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * send
- */
- priv->staging_rxon.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = 0;
- iwlcore_commit_rxon(priv);
- }
+ } else
+ iwl_set_no_assoc(priv);
}
if (changes && iwl_is_associated(priv) && priv->assoc_id) {
}
}
- if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
- vif->bss_conf.enable_beacon) {
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ iwlcore_config_ap(priv);
+ } else
+ iwl_set_no_assoc(priv);
}
mutex_unlock(&priv->mutex);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
- return q->write_ptr > q->read_ptr ?
+ return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
- if (mwl8k_fw_lock(hw))
+ if (mwl8k_fw_lock(hw)) {
+ kfree(cmd);
return;
+ }
if (priv->sniffer_enabled) {
mwl8k_enable_sniffer(hw, 0);
rt2x00_set_field32(®, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(®, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(®, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(®, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(®, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(®, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(®, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
#define ALIGN_SIZE(__skb, __header) \
( ((unsigned long)((__skb)->data + (__header))) & 3 )
+/*
+ * Constants for extra TX headroom for alignment purposes.
+ */
+#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
+#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
+
/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
/*
* Initialize extra TX headroom required.
*/
- rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+ rt2x00dev->hw->extra_tx_headroom =
+ max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
+ rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Take TX headroom required for alignment into account.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
+ else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
- skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
/*
* Restore data pointer to original location again.
*/
- skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->hw->extra_tx_headroom,
+ skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- /* changed_flags is always populated but this driver
- * doesn't support all FIF flags so its possible we don't
- * need to do anything */
- if (!changed_flags)
- return;
-
+ /*
+ * If multicast parameter (as returned by zd_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
+#include <asm/compat.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
-#ifndef CONFIG_64BIT
- /* Make sure pointers are sane even on 31 bit. */
- if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
+ if (is_compat_task() || sizeof(long) == 4) {
+ /* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
- goto out;
+ if ((usrparm.psf_data >> 32) != 0)
+ goto out;
+ if ((usrparm.rssd_result >> 32) != 0)
+ goto out;
+ usrparm.psf_data &= 0x7fffffffULL;
+ usrparm.rssd_result &= 0x7fffffffULL;
}
-#endif
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/smp_lock.h>
-
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
- unsigned long arg)
+ struct cmbdata __user *argp)
{
- struct cmbdata __user *argp = (void __user *) arg;
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block = bdev->bd_disk->private_data;
- void __user *argp = (void __user *)arg;
+ void __user *argp;
+
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
if (!block)
return -ENODEV;
case BIODASDCMFDISABLE:
return disable_cmf(block->base->cdev);
case BIODASDREADALLCMB:
- return dasd_ioctl_readall_cmb(block, cmd, arg);
+ return dasd_ioctl_readall_cmb(block, cmd, argp);
default:
/* if the discipline has an ioctl method try it. */
if (block->base->discipline->ioctl) {
tty_wakeup(tty);
}
-/*
- * Currently we don't have any io controls for 3215 ttys
- */
-static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- switch (cmd) {
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
/*
* Disable reading from a 3215 tty
*/
.write_room = tty3215_write_room,
.chars_in_buffer = tty3215_chars_in_buffer,
.flush_buffer = tty3215_flush_buffer,
- .ioctl = tty3215_ioctl,
.throttle = tty3215_throttle,
.unthrottle = tty3215_unthrottle,
.stop = tty3215_stop,
#include <linux/types.h>
#include <linux/smp_lock.h>
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
fp->write_command = arg;
break;
case TUBGETI:
- rc = put_user(fp->read_command, (char __user *) arg);
+ rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
- rc = put_user(fp->write_command,(char __user *) arg);
+ rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
- if (copy_to_user((char __user *) arg, &iocb,
- sizeof(struct raw3270_iocb)))
+ if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
- .open = fs3270_open, /* open */
- .release = fs3270_close, /* release */
+ .open = fs3270_open, /* open */
+ .release = fs3270_close, /* release */
};
/*
*/
static int tapeblock_open(struct block_device *, fmode_t);
static int tapeblock_release(struct gendisk *, fmode_t);
-static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
return 0;
}
-/*
- * Support of some generic block device IOCTLs.
- */
-static int
-tapeblock_ioctl(
- struct block_device * bdev,
- fmode_t mode,
- unsigned int command,
- unsigned long arg
-) {
- int rc;
- int minor;
- struct gendisk *disk = bdev->bd_disk;
- struct tape_device *device;
-
- rc = 0;
- BUG_ON(!disk);
- device = disk->private_data;
- BUG_ON(!device);
- minor = MINOR(bdev->bd_dev);
-
- DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
- DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
-
- switch (command) {
- /* Refuse some IOCTL calls without complaining (mount). */
- case 0x5310: /* CDROMMULTISESSION */
- rc = -EINVAL;
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
/*
* Initialize block device frontend.
*/
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/smp_lock.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
-static long tapechar_compat_ioctl(struct file *, unsigned int,
- unsigned long);
+#ifdef CONFIG_COMPAT
+static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
static const struct file_operations tape_fops =
{
.read = tapechar_read,
.write = tapechar_write,
.unlocked_ioctl = tapechar_ioctl,
+#ifdef CONFIG_COMPAT
.compat_ioctl = tapechar_compat_ioctl,
+#endif
.open = tapechar_open,
.release = tapechar_release,
};
return rc;
}
+#ifdef CONFIG_COMPAT
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
+ unsigned long argp;
+ /* The 'arg' argument of any ioctl function may only be used for
+ * pointers because of the compat pointer conversion.
+ * Consider this when adding new ioctls.
+ */
+ argp = (unsigned long) compat_ptr(data);
if (device->discipline->ioctl_fn) {
mutex_lock(&device->mutex);
- rval = device->discipline->ioctl_fn(device, no, data);
+ rval = device->discipline->ioctl_fn(device, no, argp);
mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
return rval;
}
+#endif /* CONFIG_COMPAT */
/*
* Initialize character device frontend.
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
+ int __user *argp;
int temp;
session = (struct vmcp_session *)file->private_data;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
- temp = get_user(session->bufsize, (int __user *)arg);
+ temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp;
+
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
- return chsc_ioctl_start((void __user *)arg);
+ return chsc_ioctl_start(argp);
case CHSC_INFO_CHANNEL_PATH:
- return chsc_ioctl_info_channel_path((void __user *)arg);
+ return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
- return chsc_ioctl_info_cu((void __user *)arg);
+ return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
- return chsc_ioctl_info_sch_cu((void __user *)arg);
+ return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
- return chsc_ioctl_conf_info((void __user *)arg);
+ return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
- return chsc_ioctl_conf_comp_list((void __user *)arg);
+ return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
- return chsc_ioctl_chpd((void __user *)arg);
+ return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
- return chsc_ioctl_dcal((void __user *)arg);
+ return chsc_ioctl_dcal(argp);
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
* u_mult_inv > 128 bytes.
*/
if (copied == 0) {
- int len;
+ unsigned int len;
spin_unlock_bh(&zcrypt_device_lock);
/* len is max 256 / 2 - 120 = 8 */
len = crt->inputdatalength / 2 - 120;
+ if (len > sizeof(z1))
+ return -EFAULT;
z1 = z2 = z3 = 0;
if (copy_from_user(&z1, crt->np_prime, len) ||
copy_from_user(&z2, crt->bp_key, len) ||
{ "FUJ02E5", 0 },
/* Fujitsu P-series tablet PC device */
{ "FUJ02E6", 0 },
+ /* Fujitsu Wacom 2FGT Tablet PC device */
+ { "FUJ02E7", 0 },
/*
* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
* disguise)
sci_port->port.ops = &sci_uart_ops;
sci_port->port.iotype = UPIO_MEM;
sci_port->port.line = index;
- sci_port->port.fifosize = 1;
+
+ switch (p->type) {
+ case PORT_SCIFA:
+ sci_port->port.fifosize = 64;
+ break;
+ case PORT_SCIF:
+ sci_port->port.fifosize = 16;
+ break;
+ default:
+ sci_port->port.fifosize = 1;
+ break;
+ }
if (dev) {
sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
- * @id: window id.
* @sfb: The hardware state.
* @pixclock: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
*/
-static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
+static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
{
- struct s3c_fb_pd_win *win = sfb->pdata->win[id];
unsigned long clk = clk_get_rate(sfb->bus_clk);
+ unsigned long long tmp;
unsigned int result;
- pixclk *= win->win_mode.refresh;
- result = clk / pixclk;
+ tmp = (unsigned long long)clk;
+ tmp *= pixclk;
+
+ do_div(tmp, 1000000000UL);
+ result = (unsigned int)tmp / 1000;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
pixclk, clk, result, clk / result);
/* use window 0 as the basis for the lcd output timings */
if (win_no == 0) {
- clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
+ clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
data = sfb->pdata->vidcon0;
data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
tmp, dst_pitch);
return -EINVAL;
}
- tmp = (tmp >> 3) | (dst_pitch << (16 - 3));
+ tmp = VIA_PITCH_ENABLE | (tmp >> 3) | (dst_pitch << (16 - 3));
writel(tmp, engine + 0x38);
if (op == VIA_BITBLT_FILL)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
+ /* Init 2D engine reg to reset 2D engine */
+ writel(0x0, engine + VIA_REG_KEYCONTROL);
+
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
}
if (vmode_index != VIA_RES_INVALID) {
- viafb_setmode(vmode_index, info->var.xres, info->var.yres,
- info->var.bits_per_pixel, vmode_index1,
- viafb_second_xres, viafb_second_yres, viafb_bpp1);
-
viafb_update_fix(info);
viafb_bpp = info->var.bits_per_pixel;
if (info->var.accel_flags & FB_ACCELF_TEXT)
info->flags &= ~FBINFO_HWACCEL_DISABLED;
else
info->flags |= FBINFO_HWACCEL_DISABLED;
+ viafb_setmode(vmode_index, info->var.xres, info->var.yres,
+ info->var.bits_per_pixel, vmode_index1,
+ viafb_second_xres, viafb_second_yres, viafb_bpp1);
}
return 0;
if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo)
return -ENODEV;
- if (chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2)
+ /* LCD ouput does not support hw cursors (at least on VN896) */
+ if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) ||
+ viafb_LCD_ON)
return -ENODEV;
viafb_show_hw_cursor(info, HW_Cursor_OFF);
static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
-static struct virtio_driver virtio_balloon = {
+static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
static int __init init(void)
{
- return register_virtio_driver(&virtio_balloon);
+ return register_virtio_driver(&virtio_balloon_driver);
}
static void __exit fini(void)
{
- unregister_virtio_driver(&virtio_balloon);
+ unregister_virtio_driver(&virtio_balloon_driver);
}
module_init(init);
module_exit(fini);
goto out_thaw;
}
+ printk(KERN_DEBUG "suspending xenstore...\n");
+ xs_suspend();
+
err = dpm_suspend_noirq(PMSG_SUSPEND);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto out_resume;
}
- printk(KERN_DEBUG "suspending xenstore...\n");
- xs_suspend();
-
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
dpm_resume_noirq(PMSG_RESUME);
cancelled = 1;
}
+out_resume:
if (!cancelled) {
xen_arch_resume();
xs_resume();
} else
xs_suspend_cancel();
-out_resume:
dpm_resume_end(PMSG_RESUME);
/* Make sure timer events get retriggered on all CPUs */
if (warned)
return 0;
- warned = false;
+ warned = true;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd,
+ group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
- group->inotify_data.last_wd = 1;
+ group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return ret;
}
-/*****************************************************************************/
-/*
- * check that file shrinkage doesn't leave any VMAs dangling in midair
- */
-static int ramfs_nommu_check_mappings(struct inode *inode,
- size_t newsize, size_t size)
-{
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
-
- /* search for VMAs that fall within the dead zone */
- vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
- newsize >> PAGE_SHIFT,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT
- ) {
- /* found one - only interested if it's shared out of the page
- * cache */
- if (vma->vm_flags & VM_SHARED)
- return -ETXTBSY; /* not quite true, but near enough */
- }
-
- return 0;
-}
-
/*****************************************************************************/
/*
*
/* check that a decrease in size doesn't cut off any shared mappings */
if (newsize < size) {
- ret = ramfs_nommu_check_mappings(inode, newsize, size);
+ ret = nommu_shrink_inode_mappings(inode, size, newsize);
if (ret < 0)
return ret;
}
/* Force a compilation error if condition is constant and true */
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions
/**
* DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
* @name: name of the declared kfifo datatype
- * @size: size of the fifo buffer
+ * @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used inside struct or union declaration
* Note2: the macro creates two objects:
/**
* DEFINE_KFIFO - macro to define and initialize a kfifo
* @name: name of the declared kfifo datatype
- * @size: size of the fifo buffer
+ * @size: size of the fifo buffer. Must be a power of two.
*
* Note1: the macro can be used for global and local kfifo data type variables
* Note2: the macro creates two objects:
#undef __kfifo_initializer
-extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+extern void kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size);
extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int kfifo_in(struct kfifo *fifo,
- const unsigned char *from, unsigned int len);
+ const void *from, unsigned int len);
extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
- unsigned char *to, unsigned int len);
+ void *to, unsigned int len);
+extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned offset);
+
+/**
+ * kfifo_initialized - Check if kfifo is initialized.
+ * @fifo: fifo to check
+ * Return %true if FIFO is initialized, otherwise %false.
+ * Assumes the fifo was 0 before.
+ */
+static inline bool kfifo_initialized(struct kfifo *fifo)
+{
+ return fifo->buffer != 0;
+}
/**
* kfifo_reset - removes the entire FIFO contents
* bytes copied.
*/
static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
- const unsigned char *from, unsigned int n, spinlock_t *lock)
+ const void *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
* @to buffer and returns the number of copied bytes.
*/
static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
- unsigned char *to, unsigned int n, spinlock_t *lock)
+ void *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
ret = kfifo_out(fifo, to, n);
- /*
- * optimization: if the FIFO is empty, set the indices to 0
- * so we don't wrap the next time
- */
- if (kfifo_is_empty(fifo))
- kfifo_reset(fifo);
-
spin_unlock_irqrestore(lock, flags);
return ret;
extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
-extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
- const void __user *from, unsigned int n);
+extern __must_check int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned *lenout);
-extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
- void __user *to, unsigned int n);
+extern __must_check int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned *lenout);
/*
* __kfifo_add_out internal helper function for updating the out offset
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
+extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
- atomic_t vm_usage; /* region usage count */
+ int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
* this region */
};
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
+#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
#include <linux/aio.h>
+#ifdef CONFIG_MMU
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
+#else
+static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+#endif
#if USE_SPLIT_PTLOCKS
/*
#endif /* CONFIG_SMP */
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
-
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
spin_unlock_irq(&serio->lock);
}
-/*
- * Use the following functions to pin serio's driver in process context
- */
-static inline int serio_pin_driver(struct serio *serio)
-{
- return mutex_lock_interruptible(&serio->drv_mutex);
-}
-
-static inline void serio_pin_driver_uninterruptible(struct serio *serio)
-{
- mutex_lock(&serio->drv_mutex);
-}
-
-static inline void serio_unpin_driver(struct serio *serio)
-{
- mutex_unlock(&serio->drv_mutex);
-}
-
-
#endif
/*
}
#ifndef __HAVE_ARCH_STRSTR
-extern char * strstr(const char *,const char *);
+extern char * strstr(const char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNSTR
+extern char * strnstr(const char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
-extern inline struct tty_port *tty_port_get(struct tty_port *port)
+static inline struct tty_port *tty_port_get(struct tty_port *port)
{
if (port)
kref_get(&port->kref);
struct tty_struct *tty, struct file *filp);
extern int tty_port_open(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
-extern inline int tty_port_users(struct tty_port *port)
+static inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
}
return inet6_sk(sk)->mc_loop;
#endif
}
- __WARN();
+ WARN_ON(1);
return 1;
}
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = shm_get_unmapped_area,
+#endif
};
static const struct file_operations shm_file_operations_huge = {
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
- * @rw: mapping needs to be read/write (values: VERIFY_READ,
- * VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+ if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
}
again:
- err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
+ err = get_user_pages_fast(address, 1, 1, &page);
if (err < 0)
return err;
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
+ ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
goto out;
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out_put_key1;
pi_state = NULL;
}
- ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2,
- requeue_pi ? VERIFY_WRITE : VERIFY_READ);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out_put_key1;
*/
retry:
q->key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
+ ret = get_futex_key(uaddr, fshared, &q->key);
if (unlikely(ret != 0))
return ret;
q.requeue_pi_key = NULL;
retry:
q.key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
+ ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
goto out;
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
- ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
+ ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
goto out;
rt_waiter.task = NULL;
key2 = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ ret = get_futex_key(uaddr2, fshared, &key2);
if (unlikely(ret != 0))
goto out;
#include <linux/log2.h>
#include <linux/uaccess.h>
-static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+static void _kfifo_init(struct kfifo *fifo, void *buffer,
unsigned int size)
{
fifo->buffer = buffer;
* kfifo_init - initialize a FIFO using a preallocated buffer
* @fifo: the fifo to assign the buffer
* @buffer: the preallocated buffer to be used.
- * @size: the size of the internal buffer, this have to be a power of 2.
+ * @size: the size of the internal buffer, this has to be a power of 2.
*
*/
-void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size)
+void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size)
{
/* size must be a power of 2 */
BUG_ON(!is_power_of_2(size));
memcpy(to + l, fifo->buffer, len - l);
}
-static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned int off)
+static inline int __kfifo_from_user_data(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off,
+ unsigned *lenout)
{
unsigned int l;
int ret;
/* first put the data starting from fifo->in to buffer end */
l = min(len, fifo->size - off);
ret = copy_from_user(fifo->buffer + off, from, l);
-
- if (unlikely(ret))
- return ret + len - l;
+ if (unlikely(ret)) {
+ *lenout = ret;
+ return -EFAULT;
+ }
+ *lenout = l;
/* then put the rest (if any) at the beginning of the buffer */
- return copy_from_user(fifo->buffer, from + l, len - l);
+ ret = copy_from_user(fifo->buffer, from + l, len - l);
+ *lenout += ret ? ret : len - l;
+ return ret ? -EFAULT : 0;
}
-static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned int off)
+static inline int __kfifo_to_user_data(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned int off, unsigned *lenout)
{
unsigned int l;
int ret;
/* first get the data from fifo->out until the end of the buffer */
l = min(len, fifo->size - off);
ret = copy_to_user(to, fifo->buffer + off, l);
-
- if (unlikely(ret))
- return ret + len - l;
+ *lenout = l;
+ if (unlikely(ret)) {
+ *lenout -= ret;
+ return -EFAULT;
+ }
/* then get the rest (if any) from the beginning of the buffer */
- return copy_to_user(to + l, fifo->buffer, len - l);
+ len -= l;
+ ret = copy_to_user(to + l, fifo->buffer, len);
+ if (unlikely(ret)) {
+ *lenout += len - ret;
+ return -EFAULT;
+ }
+ *lenout += len;
+ return 0;
}
unsigned int __kfifo_in_n(struct kfifo *fifo,
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from,
+unsigned int kfifo_in(struct kfifo *fifo, const void *from,
unsigned int len)
{
len = min(kfifo_avail(fifo), len);
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
+unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len)
{
len = min(kfifo_len(fifo), len);
}
EXPORT_SYMBOL(kfifo_out);
+/**
+ * kfifo_out_peek - copy some data from the FIFO, but do not remove it
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @len: the size of the destination buffer.
+ * @offset: offset into the fifo
+ *
+ * This function copies at most @len bytes at @offset from the FIFO
+ * into the @to buffer and returns the number of copied bytes.
+ * The data is not removed from the FIFO.
+ */
+unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len,
+ unsigned offset)
+{
+ len = min(kfifo_len(fifo), len + offset);
+
+ __kfifo_out_data(fifo, to, len, offset);
+ return len;
+}
+EXPORT_SYMBOL(kfifo_out_peek);
+
unsigned int __kfifo_out_generic(struct kfifo *fifo,
void *to, unsigned int len, unsigned int recsize,
unsigned int *total)
unsigned int __kfifo_from_user_n(struct kfifo *fifo,
const void __user *from, unsigned int len, unsigned int recsize)
{
+ unsigned total;
+
if (kfifo_avail(fifo) < len + recsize)
return len + 1;
- return __kfifo_from_user_data(fifo, from, len, recsize);
+ __kfifo_from_user_data(fifo, from, len, recsize, &total);
+ return total;
}
EXPORT_SYMBOL(__kfifo_from_user_n);
* @len: the length of the data to be added.
*
* This function copies at most @len bytes from the @from into the
- * FIFO depending and returns the number of copied bytes.
+ * FIFO depending and returns -EFAULT/0.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_from_user(struct kfifo *fifo,
- const void __user *from, unsigned int len)
+int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned *total)
{
+ int ret;
len = min(kfifo_avail(fifo), len);
- len -= __kfifo_from_user_data(fifo, from, len, 0);
+ ret = __kfifo_from_user_data(fifo, from, len, 0, total);
+ if (ret)
+ return ret;
__kfifo_add_in(fifo, len);
- return len;
+ return 0;
}
EXPORT_SYMBOL(kfifo_from_user);
void __user *to, unsigned int len, unsigned int reclen,
unsigned int recsize)
{
- unsigned int ret;
+ unsigned int ret, total;
if (kfifo_len(fifo) < reclen + recsize)
return len;
- ret = __kfifo_to_user_data(fifo, to, reclen, recsize);
+ ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total);
if (likely(ret == 0))
__kfifo_add_out(fifo, reclen + recsize);
- return ret;
+ return total;
}
EXPORT_SYMBOL(__kfifo_to_user_n);
* @fifo: the fifo to be used.
* @to: where the data must be copied.
* @len: the size of the destination buffer.
+ @ @lenout: pointer to output variable with copied data
*
* This function copies at most @len bytes from the FIFO into the
- * @to buffer and returns the number of copied bytes.
+ * @to buffer and 0 or -EFAULT.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int kfifo_to_user(struct kfifo *fifo,
- void __user *to, unsigned int len)
+int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned *lenout)
{
+ int ret;
len = min(kfifo_len(fifo), len);
- len -= __kfifo_to_user_data(fifo, to, len, 0);
- __kfifo_add_out(fifo, len);
- return len;
+ ret = __kfifo_to_user_data(fifo, to, len, 0, lenout);
+ __kfifo_add_out(fifo, *lenout);
+ return ret;
}
EXPORT_SYMBOL(kfifo_to_user);
goto call;
/* Try for same node. */
- nodemask = cpumask_of_node(cpu);
+ nodemask = cpumask_of_node(cpu_to_node(cpu));
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
cpu = cpumask_next_and(cpu, nodemask, mask)) {
if (cpu_online(cpu))
static int ftrace_match(char *str, char *regex, int len, int type)
{
int matched = 0;
- char *ptr;
+ int slen;
switch (type) {
case MATCH_FULL:
matched = 1;
break;
case MATCH_END_ONLY:
- ptr = strstr(str, regex);
- if (ptr && (ptr[len] == 0))
+ slen = strlen(str);
+ if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
matched = 1;
break;
}
* Splice the empty reader page into the list around the head.
*/
reader = rb_set_head_page(cpu_buffer);
- cpu_buffer->reader_page->list.next = reader->list.next;
+ cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev;
/*
*
* Now make the new head point back to the reader page.
*/
- reader->list.next->prev = &cpu_buffer->reader_page->list;
+ rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
/* Finally update the reader page to the new head */
{
char **addr = (char **)(event + pred->offset);
int cmp, match;
+ int len = strlen(*addr) + 1; /* including tailing '\0' */
- cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
+ cmp = pred->regex.match(*addr, &pred->regex, len);
match = cmp ^ pred->not;
return 0;
}
-/* Basic regex callbacks */
+/*
+ * regex_match_foo - Basic regex callbacks
+ *
+ * @str: the string to be searched
+ * @r: the regex structure containing the pattern string
+ * @len: the length of the string to be searched (including '\0')
+ *
+ * Note:
+ * - @str might not be NULL-terminated if it's of type DYN_STRING
+ * or STATIC_STRING
+ */
+
static int regex_match_full(char *str, struct regex *r, int len)
{
if (strncmp(str, r->pattern, len) == 0)
static int regex_match_front(char *str, struct regex *r, int len)
{
- if (strncmp(str, r->pattern, len) == 0)
+ if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
}
static int regex_match_middle(char *str, struct regex *r, int len)
{
- if (strstr(str, r->pattern))
+ if (strnstr(str, r->pattern, len))
return 1;
return 0;
}
static int regex_match_end(char *str, struct regex *r, int len)
{
- char *ptr = strstr(str, r->pattern);
+ int strlen = len - 1;
- if (ptr && (ptr[r->len] == 0))
+ if (strlen >= r->len &&
+ memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
return 1;
return 0;
}
pred->regex.field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING)
fn = filter_pred_strloc;
- else {
+ else
fn = filter_pred_pchar;
- pred->regex.field_len = strlen(pred->regex.pattern);
- }
} else {
if (field->is_signed)
ret = strict_strtoll(pred->regex.pattern, 0, &val);
*/
char *strstr(const char *s1, const char *s2)
{
- int l1, l2;
+ size_t l1, l2;
l2 = strlen(s2);
if (!l2)
EXPORT_SYMBOL(strstr);
#endif
+#ifndef __HAVE_ARCH_STRNSTR
+/**
+ * strnstr - Find the first substring in a length-limited string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ * @len: the maximum number of characters to search
+ */
+char *strnstr(const char *s1, const char *s2, size_t len)
+{
+ size_t l1 = len, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strnstr);
+#endif
+
#ifndef __HAVE_ARCH_MEMCHR
/**
* memchr - Find a character in an area of memory.
if (free_all)
goto try_to_free;
move_account:
- while (mem->res.usage > 0) {
+ do {
ret = -EBUSY;
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
goto out;
if (ret == -ENOMEM)
goto try_to_free;
cond_resched();
- }
- ret = 0;
+ /* "ret" should also be checked to ensure all lists are empty. */
+ } while (mem->res.usage > 0 || ret);
out:
css_put(&mem->css);
return ret;
}
lru_add_drain();
/* try move_account...there may be some *locked* pages. */
- if (mem->res.usage)
- goto move_account;
- ret = 0;
- goto out;
+ goto move_account;
}
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
static void __put_nommu_region(struct vm_region *region)
__releases(nommu_region_sem)
{
- kenter("%p{%d}", region, atomic_read(®ion->vm_usage));
+ kenter("%p{%d}", region, region->vm_usage);
BUG_ON(!nommu_region_tree.rb_node);
- if (atomic_dec_and_test(®ion->vm_usage)) {
+ if (--region->vm_usage == 0) {
if (region->vm_top > region->vm_start)
delete_nommu_region(region);
up_write(&nommu_region_sem);
if (!vma)
goto error_getting_vma;
- atomic_set(®ion->vm_usage, 1);
+ region->vm_usage = 1;
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
}
/* we've found a region we can share */
- atomic_inc(&pregion->vm_usage);
+ pregion->vm_usage++;
vma->vm_region = pregion;
start = pregion->vm_start;
start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
vma->vm_region = NULL;
vma->vm_start = 0;
vma->vm_end = 0;
- atomic_dec(&pregion->vm_usage);
+ pregion->vm_usage--;
pregion = NULL;
goto error_just_free;
}
kenter("");
- /* we're only permitted to split anonymous regions that have a single
- * owner */
- if (vma->vm_file ||
- atomic_read(&vma->vm_region->vm_usage) != 1)
+ /* we're only permitted to split anonymous regions (these should have
+ * only a single usage on the region) */
+ if (vma->vm_file)
return -ENOMEM;
if (mm->map_count >= sysctl_max_map_count)
/* cut the backing region down to size */
region = vma->vm_region;
- BUG_ON(atomic_read(®ion->vm_usage) != 1);
+ BUG_ON(region->vm_usage != 1);
down_write(&nommu_region_sem);
delete_nommu_region(region);
}
EXPORT_SYMBOL(unmap_mapping_range);
-/*
- * ask for an unmapped area at which to create a mapping on a file
- */
-unsigned long get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
- get_area = current->mm->get_unmapped_area;
- if (file && file->f_op && file->f_op->get_unmapped_area)
- get_area = file->f_op->get_unmapped_area;
-
- if (!get_area)
- return -ENOSYS;
-
- return get_area(file, addr, len, pgoff, flags);
-}
-EXPORT_SYMBOL(get_unmapped_area);
-
/*
* Check that a process has enough memory to allocate a new virtual
* mapping. 0 means there is enough memory for the allocation to
mmput(mm);
return len;
}
+
+/**
+ * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
+ * @inode: The inode to check
+ * @size: The current filesize of the inode
+ * @newsize: The proposed filesize of the inode
+ *
+ * Check the shared mappings on an inode on behalf of a shrinking truncate to
+ * make sure that that any outstanding VMAs aren't broken and then shrink the
+ * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
+ * automatically grant mappings that are too large.
+ */
+int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
+ size_t newsize)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ struct vm_region *region;
+ pgoff_t low, high;
+ size_t r_size, r_top;
+
+ low = newsize >> PAGE_SHIFT;
+ high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ down_write(&nommu_region_sem);
+
+ /* search for VMAs that fall within the dead zone */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+ low, high) {
+ /* found one - only interested if it's shared out of the page
+ * cache */
+ if (vma->vm_flags & VM_SHARED) {
+ up_write(&nommu_region_sem);
+ return -ETXTBSY; /* not quite true, but near enough */
+ }
+ }
+
+ /* reduce any regions that overlap the dead zone - if in existence,
+ * these will be pointed to by VMAs that don't overlap the dead zone
+ *
+ * we don't check for any regions that start beyond the EOF as there
+ * shouldn't be any
+ */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+ 0, ULONG_MAX) {
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+
+ region = vma->vm_region;
+ r_size = region->vm_top - region->vm_start;
+ r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
+
+ if (r_top > newsize) {
+ region->vm_top -= r_top - newsize;
+ if (region->vm_end > region->vm_top)
+ region->vm_end = region->vm_top;
+ }
+ }
+
+ up_write(&nommu_region_sem);
+ return 0;
+}
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
spin_unlock(&zone->lock);
if (!page)
goto failed;
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
}
/* Merge backward if suitable */
- if (start_pfn < early_node_map[i].end_pfn &&
+ if (start_pfn < early_node_map[i].start_pfn &&
end_pfn >= early_node_map[i].start_pfn) {
early_node_map[i].start_pfn = start_pfn;
return;
}
EXPORT_SYMBOL(strndup_user);
-#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
if (!populated_zone(zone))
continue;
+ if (zone_is_all_unreclaimable(zone))
+ continue;
+
if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
0, 0))
return 1;
return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
}
+static inline struct net *ipv6_skb_net(struct sk_buff *skb)
+{
+ return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
+}
+
/* Router Alert as of RFC 2711 */
static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
+ struct net *net = ipv6_skb_net(skb);
u32 pkt_len;
- struct net *net = dev_net(skb_dst(skb)->dev);
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "debugfs_netdev.h"
#include "mesh.h"
#include "led.h"
#include "driver-ops.h"
+#include "wme.h"
/**
* DOC: Interface list locking
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
err_del_interface:
/*
* Stop TX on this interface first.
*/
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
/*
* Now delete all active aggregation sessions.
WARN_ON(flushed);
}
+static u16 ieee80211_netdev_select_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
+}
+
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_select_queue = ieee80211_netdev_select_queue,
};
+static u16 ieee80211_monitor_select_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_radiotap_header *rtap = (void *)skb->data;
+ u8 *p;
+
+ if (local->hw.queues < 4)
+ return 0;
+
+ if (skb->len < 4 ||
+ skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
+ return 0; /* doesn't matter, frame will be dropped */
+
+ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
+
+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ skb->priority = 7;
+ return ieee802_1d_to_ac[skb->priority];
+ }
+
+ p = ieee80211_get_qos_ctl(hdr);
+ skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+ return ieee80211_downgrade_queue(local, skb);
+}
+
static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_select_queue = ieee80211_monitor_select_queue,
};
static void ieee80211_if_setup(struct net_device *dev)
ASSERT_RTNL();
- ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup);
+ ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
+ name, ieee80211_if_setup, local->hw.queues);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
ieee80211_recalc_ps(local, -1);
mutex_unlock(&local->iflist_mtx);
- netif_start_queue(sdata->dev);
+ netif_tx_start_all_queues(sdata->dev);
netif_carrier_on(sdata->dev);
}
* time -- we don't want the scan code to enable queues.
*/
- netif_stop_queue(sdata->dev);
+ netif_tx_stop_all_queues(sdata->dev);
netif_carrier_off(sdata->dev);
rcu_read_lock();
rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
- /* XXX: differentiate, can only happen for CSA now! */
+ if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
+ break;
+
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
ifmgd->associated);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- ieee80211_select_queue(local, fwd_skb);
+ skb_set_queue_mapping(skb,
+ ieee80211_select_queue(rx->sdata, fwd_skb));
+ ieee80211_set_qos_hdr(local, skb);
if (is_multicast_ether_addr(fwd_hdr->addr1))
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
}
break;
default:
+ /* do not process rejected action frames */
+ if (mgmt->u.action.category & 0x80)
+ return RX_DROP_MONITOR;
+
return RX_CONTINUE;
}
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated) {
ieee80211_scan_ps_disable(sdata);
- netif_wake_queue(sdata->dev);
+ netif_tx_wake_all_queues(sdata->dev);
}
} else
- netif_wake_queue(sdata->dev);
+ netif_tx_wake_all_queues(sdata->dev);
/* re-enable beaconing */
if (sdata->vif.type == NL80211_IFTYPE_AP ||
* are handled in the scan state machine
*/
if (sdata->vif.type != NL80211_IFTYPE_STATION)
- netif_stop_queue(sdata->dev);
+ netif_tx_stop_all_queues(sdata->dev);
}
mutex_unlock(&local->iflist_mtx);
continue;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- netif_stop_queue(sdata->dev);
+ netif_tx_stop_all_queues(sdata->dev);
if (sdata->u.mgd.associated)
ieee80211_scan_ps_enable(sdata);
}
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated)
ieee80211_scan_ps_disable(sdata);
- netif_wake_queue(sdata->dev);
+ netif_tx_wake_all_queues(sdata->dev);
}
}
mutex_unlock(&local->iflist_mtx);
return;
}
- ieee80211_select_queue(local, skb);
+ ieee80211_set_qos_hdr(local, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
+ /* send all internal mgmt frames on VO */
+ skb_set_queue_mapping(skb, 0);
+
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
if (!skb_queue_empty(&local->pending[queue]))
tasklet_schedule(&local->tx_pending_tasklet);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
__set_bit(reason, &local->queue_stop_reasons[queue]);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
}
-/* Indicate which queue to use. */
-static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
+/* Indicate which queue to use. */
+u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta = NULL;
+ u32 sta_flags = 0;
+ const u8 *ra = NULL;
+ bool qos = false;
- if (!ieee80211_is_data(hdr->frame_control)) {
- /* management frames go on AC_VO queue, but are sent
- * without QoS control fields */
- return 0;
+ if (local->hw.queues < 4 || skb->len < 6) {
+ skb->priority = 0; /* required for correct WPA/11i MIC */
+ return min_t(u16, local->hw.queues - 1,
+ ieee802_1d_to_ac[skb->priority]);
+ }
+
+ rcu_read_lock();
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ rcu_read_lock();
+ sta = rcu_dereference(sdata->u.vlan.sta);
+ if (sta)
+ sta_flags = get_sta_flags(sta);
+ rcu_read_unlock();
+ if (sta)
+ break;
+ case NL80211_IFTYPE_AP:
+ ra = skb->data;
+ break;
+ case NL80211_IFTYPE_WDS:
+ ra = sdata->u.wds.remote_addr;
+ break;
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ /*
+ * XXX: This is clearly broken ... but already was before,
+ * because ieee80211_fill_mesh_addresses() would clear A1
+ * except for multicast addresses.
+ */
+ break;
+#endif
+ case NL80211_IFTYPE_STATION:
+ ra = sdata->u.mgd.bssid;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ra = skb->data;
+ break;
+ default:
+ break;
}
- if (0 /* injected */) {
- /* use AC from radiotap */
+ if (!sta && ra && !is_multicast_ether_addr(ra)) {
+ sta = sta_info_get(local, ra);
+ if (sta)
+ sta_flags = get_sta_flags(sta);
}
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ if (sta_flags & WLAN_STA_WME)
+ qos = true;
+
+ rcu_read_unlock();
+
+ if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return ieee802_1d_to_ac[skb->priority];
}
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
+ return ieee80211_downgrade_queue(local, skb);
+}
+
+u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
/* in case we are a client verify acm is not set for this ac */
while (unlikely(local->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 queue;
- u8 tid;
-
- queue = classify80211(local, skb);
- if (unlikely(queue >= local->hw.queues))
- queue = local->hw.queues - 1;
-
- /*
- * Now we know the 1d priority, fill in the QoS header if
- * there is one (and we haven't done this before).
- */
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ /* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
- u8 ack_policy = 0;
+ u8 ack_policy = 0, tid;
+
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+
if (unlikely(local->wifi_wme_noack_test))
ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
QOS_CONTROL_ACK_POLICY_SHIFT;
*p++ = ack_policy | tid;
*p = 0;
}
-
- skb_set_queue_mapping(skb, queue);
}
extern const int ieee802_1d_to_ac[8];
-void ieee80211_select_queue(struct ieee80211_local *local,
- struct sk_buff *skb);
+u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
+u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+ struct sk_buff *skb);
+
#endif /* _WME_H */
mutex_unlock(&rdev->devlist_mtx);
dev_put(dev);
}
-#ifdef CONFIG_CFG80211_WEXT
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
+#ifdef CONFIG_CFG80211_WEXT
wdev_lock(wdev);
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
break;
}
wdev_unlock(wdev);
+#endif
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
-#endif
break;
case NETDEV_UNREGISTER:
/*
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
- request->initiator = NL80211_REGDOM_SET_BY_USER,
+ request->initiator = NL80211_REGDOM_SET_BY_USER;
queue_regulatory_request(request);
if ($line =~ /RIP: 0010:\[\<([a-z0-9]+)\>\]/) {
$target = $1;
}
- if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+(0x[0-9a-f]+)\/0x[a-f0-9]/) {
+ if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+0x([0-9a-f]+)\/0x[a-f0-9]/) {
$function = $1;
$func_offset = $2;
}
match = strstr(symbol, name);
if (!match)
return 0;
- return match[strlen(symbol)] == '\0';
+ return match[strlen(name)] == '\0';
}
static void do_table(void *symval, unsigned long size,
}
}
-if ($arch eq "x86") {
+if ($arch =~ /(x86(_64)?)|(i386)/) {
if ($bits == 64) {
$arch = "x86_64";
} else {
# explicitly what architecture to check for. Fix this up for yours..
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null "$(QUIET_STDERR)" && echo y"), y)
+ifeq ($(V), 2)
+ QUIET_STDERR = ">/dev/null"
+else
+ QUIET_STDERR = ">/dev/null 2>&1"
+endif
+
+BITBUCKET = "/dev/null"
+
+ifneq ($(shell sh -c "(echo '\#include <stdio.h>'; echo 'int main(void) { return puts(\"hi\"); }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
+ BITBUCKET = .perf.dev.null
+endif
+
+ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
CFLAGS := $(CFLAGS) -fstack-protector-all
endif
PERFLIBS = $(LIB_FILE)
-ifeq ($(V), 2)
- QUIET_STDERR = ">/dev/null"
-else
- QUIET_STDERR = ">/dev/null 2>&1"
-endif
#
# Platform specific tweaks
#
PTHREAD_LIBS =
endif
-ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
endif
- ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231);
BASIC_CFLAGS += -DNO_LIBDWARF
else
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
endif
-ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o $(BITBUCKET) $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DNO_LIBPERL
else
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
ifdef NO_DEMANGLE
BASIC_CFLAGS += -DNO_DEMANGLE
else
- has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
+ has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
else
- has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
ifeq ($(has_bfd_iberty),y)
EXTLIBS += -lbfd -liberty
else
- has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
ifeq ($(has_bfd_iberty_z),y)
EXTLIBS += -lbfd -liberty -lz
else
- has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
+ has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
ifeq ($(has_cplus_demangle),y)
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
SHELL = $(SHELL_PATH)
-all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
+all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
ifneq (,$X)
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
endif
.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
.PHONY: .FORCE-PERF-BUILD-OPTIONS
+.perf.dev.null:
+ touch .perf.dev.null
+
+.INTERMEDIATE: .perf.dev.null
+
### Make sure built-ins do not have dups and listed in perf.c
#
check-builtins::