nousb [USB] Disable the USB subsystem
- nowatchdog [KNL] Disable the lockup detector.
+ nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
nowb [ARM]
zero)
bool pm_runtime_suspended(struct device *dev);
- - return true if the device's runtime PM status is 'suspended', or false
- otherwise
+ - return true if the device's runtime PM status is 'suspended' and its
+ 'power.disable_depth' field is equal to zero, or false otherwise
void pm_runtime_allow(struct device *dev);
- set the power.runtime_auto flag for the device and decrease its usage
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION =
+EXTRAVERSION = .3
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
config RWSEM_XCHGADD_ALGORITHM
bool
+config ARCH_HAS_CPU_IDLE_WAIT
+ bool
+ default y
+
+config ARCH_HAS_DEFAULT_IDLE
+ bool
+ default y
+
config ARCH_HAS_ILOG2_U32
bool
help
Setting ARM L1 cache line size to 64 Bytes.
+config ARCH_PROVIDES_UDELAY
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
select HAVE_CLK
select COMMON_CLKDEV
select ARCH_HAS_BARRIERS if CACHE_L2X0
+ select ARCH_HAS_CPUFREQ
+ select ARCH_PROVIDES_UDELAY
+ select FIQ
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
Tegra 6xx and Tegra 2 series).
unsigned int irq_offset;
void __iomem *dist_base;
void __iomem *cpu_base;
+#ifdef CONFIG_PM
+ u32 saved_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_pri[DIV_ROUND_UP(1020, 4)];
+ u32 saved_target[DIV_ROUND_UP(1020, 4)];
+#endif
+ unsigned int max_irq;
};
#ifndef MAX_GIC_NR
* our "acknowledge" routine disable the interrupt, then mark it as
* complete.
*/
-static void gic_ack_irq(unsigned int irq)
+void gic_ack_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_unlock(&irq_controller_lock);
}
-static void gic_mask_irq(unsigned int irq)
+void gic_mask_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_unlock(&irq_controller_lock);
}
-static void gic_unmask_irq(unsigned int irq)
+void gic_unmask_irq(unsigned int irq)
{
u32 mask = 1 << (irq % 32);
spin_unlock(&irq_controller_lock);
}
-static int gic_set_type(unsigned int irq, unsigned int type)
+int gic_set_type(unsigned int irq, unsigned int type)
{
void __iomem *base = gic_dist_base(irq);
unsigned int gicirq = gic_irq(irq);
}
#ifdef CONFIG_SMP
-static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
+int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
{
void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
unsigned int shift = (irq % 4) * 8;
set_irq_chained_handler(irq, gic_handle_cascade_irq);
}
-void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
- unsigned int irq_start)
+static unsigned int _gic_dist_init(unsigned int gic_nr)
{
unsigned int max_irq, i;
+ void __iomem *base = gic_data[gic_nr].dist_base;
u32 cpumask = 1 << smp_processor_id();
- if (gic_nr >= MAX_GIC_NR)
- BUG();
-
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
- gic_data[gic_nr].dist_base = base;
- gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
-
writel(0, base + GIC_DIST_CTRL);
/*
for (i = 0; i < max_irq; i += 32)
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+ return max_irq;
+}
+
+static void _gic_dist_exit(unsigned int gic_nr)
+{
+ writel(0, gic_data[gic_nr].dist_base + GIC_DIST_CTRL);
+}
+
+#ifdef CONFIG_PM
+void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int max_irq = gic_data[gic_nr].max_irq;
+ void __iomem *dist_base = gic_data[gic_nr].dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ _gic_dist_exit(gic_nr);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 16); i++)
+ gic_data[gic_nr].saved_conf[i] =
+ readl(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 4); i++)
+ gic_data[gic_nr].saved_pri[i] =
+ readl(dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 4); i++)
+ gic_data[gic_nr].saved_target[i] =
+ readl(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 32); i++)
+ gic_data[gic_nr].saved_enable[i] =
+ readl(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int max_irq;
+ unsigned int i;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ _gic_dist_init(gic_nr);
+
+ max_irq = gic_data[gic_nr].max_irq;
+ dist_base = gic_data[gic_nr].dist_base;
+ cpu_base = gic_data[gic_nr].cpu_base;
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 16); i++)
+ writel(gic_data[gic_nr].saved_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 4); i++)
+ writel(gic_data[gic_nr].saved_pri[i],
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 4); i++)
+ writel(gic_data[gic_nr].saved_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(max_irq, 32); i++)
+ writel(gic_data[gic_nr].saved_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel(1, dist_base + GIC_DIST_CTRL);
+ writel(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel(1, cpu_base + GIC_CPU_CTRL);
+}
+#endif
+
+void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
+ unsigned int irq_start)
+{
+ unsigned int max_irq;
+ unsigned int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_data[gic_nr].dist_base = base;
+ gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
+
+ max_irq = _gic_dist_init(gic_nr);
+ gic_data[gic_nr].max_irq = max_irq;
+
/*
* Setup the Linux IRQ subsystem.
*/
writel(1, base + GIC_DIST_CTRL);
}
+void gic_dist_exit(unsigned int gic_nr)
+{
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ _gic_dist_exit(gic_nr);
+}
+
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
{
if (gic_nr >= MAX_GIC_NR)
writel(1, base + GIC_CPU_CTRL);
}
+void gic_cpu_exit(unsigned int gic_nr)
+{
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ writel(0, gic_data[gic_nr].cpu_base + GIC_CPU_CTRL);
+}
+
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
--- /dev/null
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="arm-eabi-"
+# CONFIG_SWAP is not set
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PANIC_TIMEOUT=10
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_ASHMEM=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_MACH_HARMONY=y
+CONFIG_MACH_VENTANA=y
+CONFIG_TEGRA_DEBUG_UARTD=y
+CONFIG_TEGRA_PWM=y
+CONFIG_TEGRA_NVRM=y
+CONFIG_TEGRA_NVOS=y
+CONFIG_FIQ_DEBUGGER=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=448M@0M console=ttyS0,115200n8 earlyprintk init=/bin/ash"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_PM=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ADDRTYPE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+# CONFIG_RPS is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_RFKILL=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND_TEGRA=y
+CONFIG_MTD_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_SENSORS_AK8975=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_PANJIT_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PDA_POWER=y
+CONFIG_WATCHDOG=y
+CONFIG_TEGRA_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+# CONFIG_RC_MAP is not set
+# CONFIG_IR_NEC_DECODER is not set
+# CONFIG_IR_RC5_DECODER is not set
+# CONFIG_IR_RC6_DECODER is not set
+# CONFIG_IR_JVC_DECODER is not set
+# CONFIG_IR_SONY_DECODER is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_DC=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_TEGRA_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_ANDROID=y
+CONFIG_USB_ANDROID_ADB=y
+CONFIG_USB_ANDROID_MTP=y
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.35-rc2
+# Tue Jun 8 17:11:49 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_LOCKBREAK=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+# CONFIG_CGROUP_NS is not set
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_CGROUP_MEM_RES_CTLR is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+CONFIG_ARCH_TEGRA=y
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_PLAT_SPEAR is not set
+
+#
+# NVIDIA Tegra options
+#
+CONFIG_ARCH_TEGRA_2x_SOC=y
+
+#
+# Tegra board type
+#
+CONFIG_MACH_HARMONY=y
+# CONFIG_TEGRA_DEBUG_UART_NONE is not set
+# CONFIG_TEGRA_DEBUG_UARTA is not set
+# CONFIG_TEGRA_DEBUG_UARTB is not set
+# CONFIG_TEGRA_DEBUG_UARTC is not set
+CONFIG_TEGRA_DEBUG_UARTD=y
+# CONFIG_TEGRA_DEBUG_UARTE is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_ARCH_HAS_BARRIERS=y
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_ARM_GIC=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
+CONFIG_LOCAL_TIMERS=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HIGHMEM=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=448M@0M console=ttyS0,115200n8 earlyprintk init=/bin/ash"
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+# CONFIG_NEON is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+# CONFIG_IPV6_ROUTE_INFO is not set
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_RPS=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ANDROID_PMEM is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+# CONFIG_MFD_SUPPORT is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_TEGRA=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_LOGO is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB_LEAK is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_DEBUG_SG=y
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
.if \rept == 2
- usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
+ usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
.endif
add\cond \ptr, #\rept * \inc
#include <asm/param.h> /* HZ */
+#ifdef CONFIG_ARCH_PROVIDES_UDELAY
+#include <mach/delay.h>
+#else
extern void __delay(int loops);
/*
__const_udelay((n) * ((2199023U*HZ)>>11))) : \
__udelay(n))
+#endif /* defined(ARCH_PROVIDES_UDELAY) */
#endif /* defined(_ARM_DELAY_H) */
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
+#define L2X0_PREFETCH_OFFSET 0xF60
+#define L2X0_PWR_CTRL 0xF80
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern void l2x0_shutdown(void);
+extern void l2x0_restart(void);
+extern bool l2x0_disabled;
#endif
#endif
#ifndef __ASSEMBLY__
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
+void gic_dist_save(unsigned int gic_nr);
+void gic_dist_restore(unsigned int gic_nr);
+void gic_dist_exit(unsigned int gic_nr);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
+void gic_cpu_exit(unsigned int gic_nr);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+void gic_ack_irq(unsigned int irq);
+void gic_mask_irq(unsigned int irq);
+void gic_unmask_irq(unsigned int irq);
+int gic_set_type(unsigned int irq, unsigned int type);
+#ifdef CONFIG_SMP
+int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val);
+#endif
#endif
#endif
#define _GP_REGS 16
#define _FP_REGS 8
#define _EXTRA_REGS 2
-#define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
#define _SPT 13
#define _LR 14
#define _PC 15
-#define _CPSR (DBG_MAX_REG_NUM - 1)
+#define _CPSR (GDB_MAX_REGS - 1)
/*
* So that we can denote the end of a frame for tracing,
#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
+#define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */
#define L_PTE_MT_MASK (0x0f << 2)
#ifndef __ASSEMBLY__
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
#endif
+#define pgprot_inner_writeback(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_INNER_WB)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
+#define cache_ops_need_broadcast() 0
+#else
static inline int cache_ops_need_broadcast(void)
{
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
}
+#endif
#endif
#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+#define TWD_TIMER_CONTROL_PRESCALE_MASK (0xFF << 8)
struct clock_event_device;
int twd_timer_ack(void);
void twd_timer_setup(struct clock_event_device *);
+/*
+ * Use this setup function on systems where the cpu clock frequency may
+ * change. periphclk_prescaler is the fixed divider value between the cpu
+ * clock and the PERIPHCLK clock that feeds the TWD. target_rate should be
+ * low enough that the prescaler can accurately reach the target rate from the
+ * lowest cpu frequency.
+ */
+void twd_timer_setup_scalable(struct clock_event_device *,
+ unsigned long target_rate, unsigned int periphclk_prescaler);
+
#endif
extern int cpu_architecture(void);
extern void cpu_init(void);
+extern void cpu_idle_wait(void);
+extern void default_idle(void);
void arm_machine_restart(char mode, const char *cmd);
extern void (*arm_pm_restart)(char str, const char *cmd);
return;
/* Initialize to zero */
- for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
+ for (regno = 0; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
-static void default_idle(void)
+void default_idle(void)
{
if (!need_resched())
arch_idle();
local_irq_enable();
}
+EXPORT_SYMBOL(default_idle);
void (*pm_idle)(void) = default_idle;
EXPORT_SYMBOL(pm_idle);
}
}
+#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
+static void do_nothing(void *unused)
+{
+}
+
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ smp_call_function(do_nothing, NULL, 1);
+}
+#endif
+
+
static char reboot_mode = 'h';
int __init reboot_setup(char *str)
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/cpufreq.h>
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
void __iomem *twd_base;
static unsigned long twd_timer_rate;
+static unsigned long twd_periphclk_prescaler;
+static unsigned long twd_cpu_rate;
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- unsigned long ctrl;
+ unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ ctrl &= TWD_TIMER_CONTROL_PRESCALE_MASK;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* timer load already set up */
- ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
+ ctrl |= TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
- ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
+ ctrl |= TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
- ctrl = 0;
+ break;
}
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
-static void __cpuinit twd_calibrate_rate(void)
+/*
+ * Recalculate the twd prescaler value when the cpu frequency changes. To
+ * prevent early timer interrupts, must be called before changing the cpu
+ * frequency if the frequency is increasing, or after if the frequency is
+ * decreasing.
+ */
+static void twd_update_prescaler(void *data)
+{
+ u32 ctrl;
+ int prescaler;
+ unsigned long periphclk_rate;
+
+ BUG_ON(twd_periphclk_prescaler == 0 || twd_timer_rate == 0);
+
+ periphclk_rate = twd_cpu_rate / twd_periphclk_prescaler;
+
+ prescaler = DIV_ROUND_UP(periphclk_rate, twd_timer_rate);
+ prescaler = clamp(prescaler - 1, 0, 0xFF);
+
+ ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ ctrl &= ~TWD_TIMER_CONTROL_PRESCALE_MASK;
+ ctrl |= prescaler << 8;
+ __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+ if (((freqs->new > freqs->old) && state == CPUFREQ_PRECHANGE) ||
+ ((freqs->old > freqs->new) && state == CPUFREQ_POSTCHANGE)) {
+ /* freqs->new is in kHz, twd_cpu_rate is in Hz */
+ twd_cpu_rate = freqs->new * 1000;
+
+ smp_call_function_single(freqs->cpu, twd_update_prescaler,
+ NULL, 1);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+ .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+ if (twd_cpu_rate)
+ return cpufreq_register_notifier(&twd_cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+static void __cpuinit twd_calibrate_rate(unsigned long target_rate,
+ unsigned int periphclk_prescaler)
{
unsigned long load, count;
u64 waitjiffies;
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
+ /*
+ * If a target rate has been requested, adjust the TWD prescaler
+ * to get the closest lower frequency.
+ */
+ if (target_rate) {
+ twd_periphclk_prescaler = periphclk_prescaler;
+
+ printk("%lu.%02luMHz, setting to ",
+ twd_timer_rate / 1000000,
+ (twd_timer_rate / 10000) % 100);
+ twd_cpu_rate = twd_timer_rate * periphclk_prescaler;
+ twd_timer_rate = target_rate;
+ twd_update_prescaler(NULL);
+ }
+
printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
- (twd_timer_rate / 100000) % 100);
+ (twd_timer_rate / 10000) % 100);
+ } else {
+ if (target_rate) {
+ BUG_ON(target_rate != twd_timer_rate);
+ twd_update_prescaler(NULL);
+ }
}
load = twd_timer_rate / HZ;
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit twd_timer_setup(struct clock_event_device *clk)
+static void __cpuinit __twd_timer_setup(struct clock_event_device *clk,
+ unsigned long target_rate, unsigned int periphclk_prescaler)
{
unsigned long flags;
- twd_calibrate_rate();
+ twd_calibrate_rate(target_rate, periphclk_prescaler);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
clockevents_register_device(clk);
}
+void __cpuinit twd_timer_setup_scalable(struct clock_event_device *clk,
+ unsigned long target_rate, unsigned int periphclk_prescaler)
+{
+ __twd_timer_setup(clk, target_rate, periphclk_prescaler);
+}
+
+void __cpuinit twd_timer_setup(struct clock_event_device *clk)
+{
+ __twd_timer_setup(clk, 0, 0);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
/*
* take a local timer down
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- delay.o findbit.o memchr.o memcpy.o \
+ findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
+ifneq ($(CONFIG_ARCH_PROVIDES_UDELAY),y)
+ lib-y += delay.o
+endif
+
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
- rsb r1, r3, #0
- and r3, r3, r1
+ rsb r0, r3, #0
+ and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
addeq r2, r2, #1
mov r0, r2
#endif
+ cmp r1, r0 @ Clamp to maxbit
+ movlo r0, r1
mov pc, lr
#define AT91_MCI_TRTYP_BLOCK (0 << 19)
#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
#define AT91_MCI_TRTYP_STREAM (2 << 19)
+#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
+#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
#define AT91_MCI_BLKR 0x18 /* Block Register */
#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
* published by the Free Software Foundation.
*/
- .macro addruart,rx
+ .macro addruart,rx,rtmp
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
moveq \rx, #0x10000000
{
int i;
- hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS,
+ hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
int i;
#ifdef CONFIG_CACHE_L2X0
- if (core_tile_pbxa9mp()) {
+ if (!l2x0_disabled && core_tile_pbxa9mp()) {
void __iomem *l2x0_base =
__io_address(REALVIEW_PBX_TILE_L220_BASE);
select CPU_V7
select ARM_GIC
select ARCH_REQUIRE_GPIOLIB
+ select ARM_ERRATA_742230
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select USB_ULPI if USB_SUPPORT
+ select USB_ULPI_VIEWPORT if USB_SUPPORT
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
config MACH_HARMONY
bool "Harmony board"
help
- Support for nVidia Harmony development platform
+ Support for NVIDIA Harmony development platform
+
+config MACH_VENTANA
+ bool "Ventana board"
+ help
+ Support for NVIDIA Ventana development platform
choice
prompt "Low-level debug console UART"
endchoice
+config TEGRA_SYSTEM_DMA
+ bool "Enable system DMA driver for NVIDIA Tegra SoCs"
+ default y
+ help
+ Adds system DMA functionality for NVIDIA Tegra SoCs, used by
+ several Tegra device drivers
+
+config TEGRA_PWM
+ tristate "Enable PWM driver"
+ select HAVE_PWM
+ help
+ Enable support for the Tegra PWM controller(s).
+
+config TEGRA_FIQ_DEBUGGER
+ bool "Enable the FIQ serial debugger on Tegra"
+ default y
+ select FIQ_DEBUGGER
+ help
+ Enables the FIQ serial debugger on Tegra"
+
endif
+
+config TEGRA_EMC_SCALING_ENABLE
+ bool "Enable scaling the memory frequency"
+ default n
+
+config TEGRA_CPU_DVFS
+ bool "Enable voltage scaling on Tegra CPU"
+ default y
+
+config TEGRA_CORE_DVFS
+ bool "Enable voltage scaling on Tegra core"
+ depends on TEGRA_CPU_DVFS
+ default y
+
+config TEGRA_IOVMM_GART
+ bool "Enable I/O virtual memory manager for GART"
+ depends on ARCH_TEGRA_2x_SOC
+ default y
+ select TEGRA_IOVMM
+ help
+ Enables support for remapping discontiguous physical memory
+ shared with the operating system into contiguous I/O virtual
+ space through the GART hardware included on Tegra SoCs
+
+config TEGRA_IOVMM
+ bool
+
+config TEGRA_ARB_SEMAPHORE
+ bool
+
+config TEGRA_THERMAL_THROTTLE
+ bool "Enable throttling of CPU speed on overtemp"
+ depends on CPU_FREQ
+ default y
+ help
+ Also requires enabling a temperature sensor such as NCT1008.
obj-y += common.o
+obj-y += apbio.o
obj-y += io.o
-obj-y += irq.o
+obj-y += irq.o legacy_irq.o
obj-y += clock.o
+obj-y += dvfs.o
obj-y += timer.o
obj-y += gpio.o
obj-y += pinmux.o
+obj-y += devices.o
+obj-y += delay.o
+obj-y += powergate.o
+obj-y += suspend.o
+obj-y += fuse.o
+obj-y += kfuse.o
+obj-y += tegra_i2s_audio.o
+obj-y += tegra_spdif_audio.o
+obj-y += mc.o
+obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
+obj-$(CONFIG_FIQ) += fiq.o
+obj-$(CONFIG_TEGRA_FIQ_DEBUGGER) += tegra_fiq_debugger.o
+obj-$(CONFIG_TEGRA_PWM) += pwm.o
+obj-$(CONFIG_TEGRA_ARB_SEMAPHORE) += arb_sema.o
+
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
-obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_fuse.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += suspend-t2.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_save.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
+obj-$(CONFIG_CPU_V7) += cortex-a9.o
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
+obj-$(CONFIG_SMP) += localtimer.o
+obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += headsmp-t2.o
+obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
+obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_TEGRA_IOVMM) += iovmm.o
+obj-$(CONFIG_TEGRA_IOVMM_GART) += iovmm-gart.o
obj-${CONFIG_MACH_HARMONY} += board-harmony.o
obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o
+obj-${CONFIG_MACH_HARMONY} += board-harmony-panel.o
+obj-${CONFIG_MACH_HARMONY} += board-harmony-sdhci.o
+
+obj-${CONFIG_MACH_VENTANA} += board-ventana.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-pinmux.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-sdhci.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-power.o
+obj-${CONFIG_MACH_VENTANA} += board-ventana-panel.o
--- /dev/null
+/*
+ * arch/arm/mach-tegra/apbio.c
+ *
+ * Copyright (C) 2010 NVIDIA Corporation.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+
+#include "apbio.h"
+
+static DEFINE_MUTEX(tegra_apb_dma_lock);
+
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+static struct tegra_dma_channel *tegra_apb_dma;
+static u32 *tegra_apb_bb;
+static dma_addr_t tegra_apb_bb_phys;
+static DECLARE_COMPLETION(tegra_apb_wait);
+
+static void apb_dma_complete(struct tegra_dma_req *req)
+{
+ complete(&tegra_apb_wait);
+}
+
+static inline u32 apb_readl(unsigned long offset)
+{
+ struct tegra_dma_req req;
+ int ret;
+
+ if (!tegra_apb_dma)
+ return readl(IO_TO_VIRT(offset));
+
+ mutex_lock(&tegra_apb_dma_lock);
+ req.complete = apb_dma_complete;
+ req.to_memory = 1;
+ req.dest_addr = tegra_apb_bb_phys;
+ req.dest_bus_width = 32;
+ req.dest_wrap = 1;
+ req.source_addr = offset;
+ req.source_bus_width = 32;
+ req.source_wrap = 4;
+ req.req_sel = 0;
+ req.size = 4;
+
+ INIT_COMPLETION(tegra_apb_wait);
+
+ tegra_dma_enqueue_req(tegra_apb_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_apb_wait,
+ msecs_to_jiffies(50));
+
+ if (WARN(ret == 0, "apb read dma timed out"))
+ *(u32 *)tegra_apb_bb = 0;
+
+ mutex_unlock(&tegra_apb_dma_lock);
+ return *((u32 *)tegra_apb_bb);
+}
+
+static inline void apb_writel(u32 value, unsigned long offset)
+{
+ struct tegra_dma_req req;
+ int ret;
+
+ if (!tegra_apb_dma) {
+ writel(value, IO_TO_VIRT(offset));
+ return;
+ }
+
+ mutex_lock(&tegra_apb_dma_lock);
+ *((u32 *)tegra_apb_bb) = value;
+ req.complete = apb_dma_complete;
+ req.to_memory = 0;
+ req.dest_addr = offset;
+ req.dest_wrap = 4;
+ req.dest_bus_width = 32;
+ req.source_addr = tegra_apb_bb_phys;
+ req.source_bus_width = 32;
+ req.source_wrap = 1;
+ req.req_sel = 0;
+ req.size = 4;
+
+ INIT_COMPLETION(tegra_apb_wait);
+
+ tegra_dma_enqueue_req(tegra_apb_dma, &req);
+
+ ret = wait_for_completion_timeout(&tegra_apb_wait,
+ msecs_to_jiffies(50));
+
+ mutex_unlock(&tegra_apb_dma_lock);
+}
+#else
+static inline u32 apb_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(offset));
+}
+
+static inline void apb_writel(u32 value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(offset));
+}
+#endif
+
+u32 tegra_apb_readl(unsigned long offset)
+{
+ return apb_readl(offset);
+}
+
+void tegra_apb_writel(u32 value, unsigned long offset)
+{
+ apb_writel(value, offset);
+}
+
+void tegra_init_apb_dma(void)
+{
+#ifdef CONFIG_TEGRA_SYSTEM_DMA
+ tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+ TEGRA_DMA_SHARED);
+ if (!tegra_apb_dma) {
+ pr_err("%s: can not allocate dma channel\n", __func__);
+ return;
+ }
+
+ tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
+ &tegra_apb_bb_phys, GFP_KERNEL);
+ if (!tegra_apb_bb) {
+ pr_err("%s: can not allocate bounce buffer\n", __func__);
+ tegra_dma_free_channel(tegra_apb_dma);
+ tegra_apb_dma = NULL;
+ return;
+ }
+#endif
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/apbio.h
+ *
+ * Copyright (C) 2010 NVIDIA Corporation.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+u32 tegra_apb_readl(unsigned long offset);
+void tegra_apb_writel(u32 value, unsigned long offset);
+void tegra_init_apb_dma(void);
--- /dev/null
+/*
+ * Copyright (C) 2010, NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <mach/arb_sema.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+
+#define TEGRA_RPC_MAX_SEM 32
+
+/* arb_gnt ictrl */
+#define ARB_CPU_INT_EN 0x4
+
+/* arb_sema */
+#define ARB_GRANT_STATUS 0x0
+#define ARB_GRANT_REQUEST 0x4
+#define ARB_GRANT_RELEASE 0x8
+
+struct tegra_arb_dev {
+ void __iomem *sema_base;
+ void __iomem *gnt_base;
+ spinlock_t lock;
+ struct completion arb_gnt_complete[TEGRA_RPC_MAX_SEM];
+ struct mutex mutexes[TEGRA_RPC_MAX_SEM];
+ int irq;
+ int status;
+ bool suspended;
+};
+
+static struct tegra_arb_dev *arb;
+
+static inline u32 arb_sema_read(u32 offset)
+{
+ return readl(arb->sema_base + offset);
+}
+
+static inline void arb_sema_write(u32 value, u32 offset)
+{
+ writel(value, arb->sema_base + offset);
+}
+
+static inline u32 arb_gnt_read(u32 offset)
+{
+ return readl(arb->gnt_base + offset);
+}
+
+static inline void arb_gnt_write(u32 value, u32 offset)
+{
+ writel(value, arb->gnt_base + offset);
+}
+
+static void request_arb_sem(enum tegra_arb_module lock)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ arb_sema_write(1 << lock, ARB_GRANT_REQUEST);
+ value = arb_gnt_read(ARB_CPU_INT_EN);
+ value |= (1 << lock);
+ arb_gnt_write(value, ARB_CPU_INT_EN);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+}
+
+static void cancel_arb_sem(enum tegra_arb_module lock)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ arb_sema_write(1 << lock, ARB_GRANT_RELEASE);
+ value = arb_gnt_read(ARB_CPU_INT_EN);
+ value &= ~(1 << lock);
+ arb_gnt_write(value, ARB_CPU_INT_EN);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+}
+
+int tegra_arb_mutex_lock_timeout(enum tegra_arb_module lock, int msecs)
+{
+ int ret;
+
+ if (!arb)
+ return -ENODEV;
+
+ if (arb->suspended) {
+ pr_err("device in suspend\n");
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&arb->mutexes[lock]);
+ INIT_COMPLETION(arb->arb_gnt_complete[lock]);
+ request_arb_sem(lock);
+ ret = wait_for_completion_timeout(&arb->arb_gnt_complete[lock], msecs_to_jiffies(msecs));
+ if (ret == 0) {
+ pr_err("timed out.\n");
+ cancel_arb_sem(lock);
+ mutex_unlock(&arb->mutexes[lock]);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_arb_mutex_lock_timeout);
+
+int tegra_arb_mutex_unlock(enum tegra_arb_module lock)
+{
+ if (!arb)
+ return -ENODEV;
+
+ if (arb->suspended) {
+ pr_err("device in suspend\n");
+ return -ETIMEDOUT;
+ }
+
+ cancel_arb_sem(lock);
+ mutex_unlock(&arb->mutexes[lock]);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_arb_mutex_unlock);
+
+static irqreturn_t arb_gnt_isr(int irq, void *dev_id)
+{
+ struct tegra_arb_dev *dev = dev_id;
+ unsigned long status;
+ u32 cpu_int_en;
+ unsigned int bit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arb->lock, flags);
+
+ status = arb_sema_read(ARB_GRANT_STATUS);
+ pr_debug("%s: 0x%lx\n", __func__, status);
+
+ /* disable the arb semaphores which were signalled */
+ cpu_int_en = arb_gnt_read(ARB_CPU_INT_EN);
+ arb_gnt_write((cpu_int_en & ~(status & cpu_int_en)),
+ ARB_CPU_INT_EN);
+
+ status &= cpu_int_en;
+ for_each_set_bit(bit, &status, BITS_PER_LONG)
+ complete(&dev->arb_gnt_complete[bit]);
+
+ spin_unlock_irqrestore(&arb->lock, flags);
+ return IRQ_HANDLED;
+}
+
+int tegra_arb_suspend(void)
+{
+ unsigned long status = arb_sema_read(ARB_GRANT_STATUS);
+
+ if (WARN_ON(status != 0)) {
+ pr_err("%s: suspending while holding arbitration "
+ "semaphore: %08lx\n", __func__, status);
+ }
+ arb->suspended = true;
+
+ return status ? -EBUSY : 0;
+}
+
+int tegra_arb_resume(void)
+{
+ arb->suspended = false;
+ return 0;
+}
+
+static int __init tegra_arb_init(void)
+{
+ struct tegra_arb_dev *dev = NULL;
+ int err, i;
+
+ dev = kzalloc(sizeof(struct tegra_arb_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ pr_err("%s: unable to alloc data struct.\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEGRA_RPC_MAX_SEM; i++) {
+ mutex_init(&dev->mutexes[i]);
+ init_completion(&dev->arb_gnt_complete[i]);
+ }
+
+ dev->sema_base = IO_ADDRESS(TEGRA_ARB_SEMA_BASE);
+ if (!dev->sema_base) {
+ pr_err("%s: can't get arb sema_base\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->gnt_base = IO_ADDRESS(TEGRA_ARBGNT_ICTLR_BASE);
+ if (!dev->gnt_base) {
+ pr_err("%s: can't ioremap gnt_base\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->irq = INT_GNT_1;
+ err = request_irq(dev->irq, arb_gnt_isr, 0, "rpc-arbsema", dev);
+ if (err) {
+ pr_err("%s: request_irq(%d) failed(%d)\n", __func__,
+ dev->irq, err);
+ goto out;
+ }
+
+ spin_lock_init(&dev->lock);
+ arb = dev;
+
+ pr_info("%s: initialized\n", __func__);
+ return 0;
+
+out:
+ kfree(dev);
+ pr_err("%s: initialization failed.\n", __func__);
+ return err;
+}
+subsys_initcall(tegra_arb_init);
+
+MODULE_LICENSE("GPLv2");
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-harmony-panel.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/tegra_fb.h>
+
+/* Framebuffer */
+static struct resource fb_resource[] = {
+ [0] = {
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = 0x1c012000,
+ .end = 0x1c012000 + 0x500000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_fb_lcd_data tegra_fb_lcd_platform_data = {
+ .lcd_xres = 1024,
+ .lcd_yres = 600,
+ .fb_xres = 1024,
+ .fb_yres = 600,
+ .bits_per_pixel = 16,
+};
+
+static struct platform_device tegra_fb_device = {
+ .name = "tegrafb",
+ .id = 0,
+ .resource = fb_resource,
+ .num_resources = ARRAY_SIZE(fb_resource),
+ .dev = {
+ .platform_data = &tegra_fb_lcd_platform_data,
+ },
+};
+
+int __init harmony_panel_init(void) {
+ return platform_device_register(&tegra_fb_device);
+}
+
*/
#include <linux/kernel.h>
+#include <linux/init.h>
#include <mach/pinmux.h>
#include "board-harmony.h"
-static struct tegra_pingroup_config harmony_pinmux[] = {
+static __initdata struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
-void harmony_pinmux_init(void)
+void __init harmony_pinmux_init(void)
{
tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-harmony-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+#include <mach/pinmux.h>
+
+#include "gpio-names.h"
+
+static struct resource sdhci_resource1[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC2,
+ .end = INT_SDMMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC2_BASE,
+ .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource4[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data1 = {
+ .clk_id = NULL,
+ .force_hs = 1,
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .clk_id = NULL,
+ .force_hs = 1,
+ .cd_gpio = TEGRA_GPIO_PI5,
+ .wp_gpio = TEGRA_GPIO_PH1,
+ .power_gpio = TEGRA_GPIO_PT3,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data4 = {
+ .clk_id = NULL,
+ .force_hs = 0,
+ .cd_gpio = TEGRA_GPIO_PH2,
+ .wp_gpio = TEGRA_GPIO_PH3,
+ .power_gpio = TEGRA_GPIO_PI6,
+};
+
+static struct platform_device tegra_sdhci_device1 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource1,
+ .num_resources = ARRAY_SIZE(sdhci_resource1),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data1,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 1,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device4 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource4,
+ .num_resources = ARRAY_SIZE(sdhci_resource4),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data4,
+ },
+};
+
+int __init harmony_sdhci_init(void)
+{
+ gpio_request(tegra_sdhci_platform_data2.power_gpio, "sdhci2_power");
+ gpio_request(tegra_sdhci_platform_data2.cd_gpio, "sdhci2_cd");
+ gpio_request(tegra_sdhci_platform_data2.wp_gpio, "sdhci2_wp");
+
+ tegra_gpio_enable(tegra_sdhci_platform_data2.power_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.cd_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.wp_gpio);
+
+ gpio_request(tegra_sdhci_platform_data4.power_gpio, "sdhci4_power");
+ gpio_request(tegra_sdhci_platform_data4.cd_gpio, "sdhci4_cd");
+ gpio_request(tegra_sdhci_platform_data4.wp_gpio, "sdhci4_wp");
+
+ tegra_gpio_enable(tegra_sdhci_platform_data4.power_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data4.cd_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data4.wp_gpio);
+
+ gpio_direction_output(tegra_sdhci_platform_data2.power_gpio, 1);
+ gpio_direction_output(tegra_sdhci_platform_data4.power_gpio, 1);
+
+ platform_device_register(&tegra_sdhci_device1);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device4);
+
+ return 0;
+}
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/pda_power.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/iomap.h>
#include <mach/irqs.h>
+#include <mach/nand.h>
+#include <mach/clk.h>
+#include "clock.h"
#include "board.h"
#include "board-harmony.h"
#include "clock.h"
+#include "devices.h"
/* NVidia bootloader tags */
#define ATAG_NVIDIA 0x41000801
}
__tagtable(ATAG_NVIDIA, parse_tag_nvidia);
+static struct tegra_nand_chip_parms nand_chip_parms[] = {
+ /* Samsung K5E2G1GACM */
+ [0] = {
+ .vendor_id = 0xEC,
+ .device_id = 0xAA,
+ .capacity = 256,
+ .timing = {
+ .trp = 21,
+ .trh = 15,
+ .twp = 21,
+ .twh = 15,
+ .tcs = 31,
+ .twhr = 60,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 30,
+ .tadl = 100,
+ },
+ },
+ /* Hynix H5PS1GB3EFR */
+ [1] = {
+ .vendor_id = 0xAD,
+ .device_id = 0xDC,
+ .capacity = 512,
+ .timing = {
+ .trp = 12,
+ .trh = 10,
+ .twp = 12,
+ .twh = 10,
+ .tcs = 20,
+ .twhr = 80,
+ .tcr_tar_trr = 20,
+ .twb = 100,
+ .trp_resp = 20,
+ .tadl = 70,
+ },
+ },
+};
+
+struct tegra_nand_platform harmony_nand_data = {
+ .max_chips = 8,
+ .chip_parms = nand_chip_parms,
+ .nr_chip_parms = ARRAY_SIZE(nand_chip_parms),
+};
+
+static struct resource resources_nand[] = {
+ [0] = {
+ .start = INT_NANDFLASH,
+ .end = INT_NANDFLASH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_nand_device = {
+ .name = "tegra_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(resources_nand),
+ .resource = resources_nand,
+ .dev = {
+ .platform_data = &harmony_nand_data,
+ },
+};
+
static struct plat_serial8250_port debug_uart_platform_data[] = {
{
.membase = IO_ADDRESS(TEGRA_UARTD_BASE),
},
};
+/* PDA power */
+static struct pda_power_pdata pda_power_pdata = {
+};
+
+static struct platform_device pda_power_device = {
+ .name = "pda_power",
+ .id = -1,
+ .dev = {
+ .platform_data = &pda_power_pdata,
+ },
+};
+
static struct platform_device *harmony_devices[] __initdata = {
&debug_uart,
+ &pmu_device,
+ &tegra_nand_device,
+ &tegra_udc_device,
+ &pda_power_device,
+ &tegra_i2c_device1,
+ &tegra_i2c_device2,
+ &tegra_i2c_device3,
+ &tegra_i2c_device4,
+ &tegra_spi_device1,
+ &tegra_spi_device2,
+ &tegra_spi_device3,
+ &tegra_spi_device4,
+ &tegra_gart_device,
};
static void __init tegra_harmony_fixup(struct machine_desc *desc,
harmony_pinmux_init();
platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices));
+
+ harmony_panel_init();
+ harmony_sdhci_init();
}
MACHINE_START(HARMONY, "harmony")
#define _MACH_TEGRA_BOARD_HARMONY_H
void harmony_pinmux_init(void);
+int harmony_panel_init(void);
+int harmony_sdhci_init(void);
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-ventana-panel.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/pwm_backlight.h>
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+
+#include "devices.h"
+#include "gpio-names.h"
+
+#define ventana_bl_enb TEGRA_GPIO_PD4
+#define ventana_lvds_shutdown TEGRA_GPIO_PB2
+#define ventana_hdmi_hpd TEGRA_GPIO_PN7
+#define ventana_hdmi_enb TEGRA_GPIO_PV5
+
+static int ventana_backlight_init(struct device *dev) {
+ int ret;
+
+ ret = gpio_request(ventana_bl_enb, "backlight_enb");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(ventana_bl_enb, 1);
+ if (ret < 0)
+ gpio_free(ventana_bl_enb);
+ else
+ tegra_gpio_enable(ventana_bl_enb);
+
+ return ret;
+};
+
+static void ventana_backlight_exit(struct device *dev) {
+ gpio_set_value(ventana_bl_enb, 0);
+ gpio_free(ventana_bl_enb);
+ tegra_gpio_disable(ventana_bl_enb);
+}
+
+static int ventana_backlight_notify(struct device *unused, int brightness)
+{
+ gpio_set_value(ventana_bl_enb, !!brightness);
+ return brightness;
+}
+
+static struct platform_pwm_backlight_data ventana_backlight_data = {
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .dft_brightness = 224,
+ .pwm_period_ns = 5000000,
+ .init = ventana_backlight_init,
+ .exit = ventana_backlight_exit,
+ .notify = ventana_backlight_notify,
+};
+
+static struct platform_device ventana_backlight_device = {
+ .name = "pwm-backlight",
+ .id = -1,
+ .dev = {
+ .platform_data = &ventana_backlight_data,
+ },
+};
+
+static int ventana_panel_enable(void)
+{
+ static struct regulator *reg = NULL;
+
+ if (reg == NULL) {
+ reg = regulator_get(NULL, "avdd_lvds");
+ if (WARN_ON(IS_ERR(reg)))
+ pr_err("%s: couldn't get regulator avdd_lvds: %ld\n",
+ __func__, PTR_ERR(reg));
+ else
+ regulator_enable(reg);
+ }
+
+ gpio_set_value(ventana_lvds_shutdown, 1);
+ return 0;
+}
+
+static int ventana_panel_disable(void)
+{
+ gpio_set_value(ventana_lvds_shutdown, 0);
+ return 0;
+}
+
+static int ventana_hdmi_enable(void)
+{
+ gpio_set_value(ventana_hdmi_enb, 1);
+ return 0;
+}
+
+static int ventana_hdmi_disable(void)
+{
+ gpio_set_value(ventana_hdmi_enb, 0);
+ return 0;
+}
+
+static struct resource ventana_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0x18012000,
+ .end = 0x18414000 - 1, /* enough for 1080P 16bpp */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource ventana_disp2_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x18414000,
+ .end = 0x18BFD000 - 1,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_dc_mode ventana_panel_modes[] = {
+ {
+ .pclk = 62200000,
+ .h_ref_to_sync = 11,
+ .v_ref_to_sync = 1,
+ .h_sync_width = 58,
+ .v_sync_width = 4,
+ .h_back_porch = 58,
+ .v_back_porch = 4,
+ .h_active = 1366,
+ .v_active = 768,
+ .h_front_porch = 58,
+ .v_front_porch = 4,
+ },
+};
+
+static struct tegra_fb_data ventana_fb_data = {
+ .win = 0,
+ .xres = 1366,
+ .yres = 768,
+ .bits_per_pixel = 16,
+};
+
+static struct tegra_fb_data ventana_hdmi_fb_data = {
+ .win = 0,
+ .xres = 1280,
+ .yres = 720,
+ .bits_per_pixel = 16,
+};
+
+static struct tegra_dc_out ventana_disp1_out = {
+ .type = TEGRA_DC_OUT_RGB,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .modes = ventana_panel_modes,
+ .n_modes = ARRAY_SIZE(ventana_panel_modes),
+
+ .enable = ventana_panel_enable,
+ .disable = ventana_panel_disable,
+};
+
+static struct tegra_dc_out ventana_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+
+ .dcc_bus = 1,
+ .hotplug_gpio = ventana_hdmi_hpd,
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = ventana_hdmi_enable,
+ .disable = ventana_hdmi_disable,
+};
+
+static struct tegra_dc_platform_data ventana_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &ventana_disp1_out,
+ .fb = &ventana_fb_data,
+};
+
+static struct tegra_dc_platform_data ventana_disp2_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &ventana_disp2_out,
+ .fb = &ventana_hdmi_fb_data,
+};
+
+static struct nvhost_device ventana_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = ventana_disp1_resources,
+ .num_resources = ARRAY_SIZE(ventana_disp1_resources),
+ .dev = {
+ .platform_data = &ventana_disp1_pdata,
+ },
+};
+
+static struct nvhost_device ventana_disp2_device = {
+ .name = "tegradc",
+ .id = 1,
+ .resource = ventana_disp2_resources,
+ .num_resources = ARRAY_SIZE(ventana_disp2_resources),
+ .dev = {
+ .platform_data = &ventana_disp2_pdata,
+ },
+};
+
+static struct nvmap_platform_carveout ventana_carveouts[] = {
+ [0] = {
+ .name = "iram",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_IRAM,
+ .base = TEGRA_IRAM_BASE,
+ .size = TEGRA_IRAM_SIZE,
+ .buddy_size = 0, /* no buddy allocation for IRAM */
+ },
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0x18C00000,
+ .size = SZ_128M - 0xC00000,
+ .buddy_size = SZ_32K,
+ },
+};
+
+static struct nvmap_platform_data ventana_nvmap_data = {
+ .carveouts = ventana_carveouts,
+ .nr_carveouts = ARRAY_SIZE(ventana_carveouts),
+};
+
+static struct platform_device ventana_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &ventana_nvmap_data,
+ },
+};
+
+static struct platform_device *ventana_gfx_devices[] __initdata = {
+ &ventana_nvmap_device,
+ &tegra_grhost_device,
+ &tegra_pwfm2_device,
+ &ventana_backlight_device,
+};
+
+int __init ventana_panel_init(void)
+{
+ int err;
+
+ gpio_request(ventana_lvds_shutdown, "lvds_shdn");
+ gpio_direction_output(ventana_lvds_shutdown, 1);
+ tegra_gpio_enable(ventana_lvds_shutdown);
+
+ gpio_request(ventana_hdmi_enb, "hdmi_5v_en");
+ gpio_direction_output(ventana_hdmi_enb, 0);
+ tegra_gpio_enable(ventana_hdmi_enb);
+
+ gpio_request(ventana_hdmi_hpd, "hdmi_hpd");
+ gpio_direction_input(ventana_hdmi_hpd);
+ tegra_gpio_enable(ventana_hdmi_hpd);
+
+ err = platform_add_devices(ventana_gfx_devices,
+ ARRAY_SIZE(ventana_gfx_devices));
+
+ if (!err)
+ err = nvhost_device_register(&ventana_disp1_device);
+
+ if (!err)
+ err = nvhost_device_register(&ventana_disp2_device);
+
+ return err;
+}
+
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-ventana-pinmux.c
+ *
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/pinmux.h>
+
+#define DEFAULT_DRIVE(_name) \
+ { \
+ .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \
+ .hsm = TEGRA_HSM_DISABLE, \
+ .schmitt = TEGRA_SCHMITT_ENABLE, \
+ .drive = TEGRA_DRIVE_DIV_1, \
+ .pull_down = TEGRA_PULL_31, \
+ .pull_up = TEGRA_PULL_31, \
+ .slew_rising = TEGRA_SLEW_SLOWEST, \
+ .slew_falling = TEGRA_SLEW_SLOWEST, \
+ }
+
+
+static __initdata struct tegra_drive_pingroup_config ventana_drive_pinmux[] = {
+ DEFAULT_DRIVE(DBG),
+ DEFAULT_DRIVE(DDC),
+ DEFAULT_DRIVE(VI1),
+ DEFAULT_DRIVE(VI2),
+ DEFAULT_DRIVE(SDIO1),
+};
+
+static __initdata struct tegra_pingroup_config ventana_pinmux[] = {
+ {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_OSC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LCSN, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LDC, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LM1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+};
+
+void __init ventana_pinmux_init(void)
+{
+ tegra_pinmux_config_table(ventana_pinmux, ARRAY_SIZE(ventana_pinmux));
+ tegra_drive_pinmux_config_table(ventana_drive_pinmux,
+ ARRAY_SIZE(ventana_drive_pinmux));
+}
--- /dev/null
+/*
+ * Copyright (C) 2010 NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/i2c.h>
+#include <linux/pda_power.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/gpio.h>
+#include <mach/suspend.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+
+#include "gpio-names.h"
+#include "power.h"
+#include "wakeups-t2.h"
+#include "board.h"
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_INTR_LOW (1 << 17)
+
+static struct regulator_consumer_supply tps658621_sm0_supply[] = {
+ REGULATOR_SUPPLY("vdd_core", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm1_supply[] = {
+ REGULATOR_SUPPLY("vdd_cpu", NULL),
+};
+static struct regulator_consumer_supply tps658621_sm2_supply[] = {
+ REGULATOR_SUPPLY("vdd_sm2", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo0_supply[] = {
+ REGULATOR_SUPPLY("p_cam_avdd", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo1_supply[] = {
+ REGULATOR_SUPPLY("avdd_pll", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo2_supply[] = {
+ REGULATOR_SUPPLY("vdd_rtc", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo3_supply[] = {
+ REGULATOR_SUPPLY("avdd_usb", NULL),
+ REGULATOR_SUPPLY("avdd_usb_pll", NULL),
+ REGULATOR_SUPPLY("avdd_lvds", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo4_supply[] = {
+ REGULATOR_SUPPLY("avdd_osc", NULL),
+ REGULATOR_SUPPLY("vddio_sys", "panjit_touch"),
+};
+static struct regulator_consumer_supply tps658621_ldo5_supply[] = {
+ REGULATOR_SUPPLY("vcore_mmc", "sdhci-tegra.1"),
+ REGULATOR_SUPPLY("vcore_mmc", "sdhci-tegra.3"),
+};
+static struct regulator_consumer_supply tps658621_ldo6_supply[] = {
+ REGULATOR_SUPPLY("vddio_vi", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo7_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi", NULL),
+ REGULATOR_SUPPLY("vdd_fuse", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo8_supply[] = {
+ REGULATOR_SUPPLY("avdd_hdmi_pll", NULL),
+};
+static struct regulator_consumer_supply tps658621_ldo9_supply[] = {
+ REGULATOR_SUPPLY("avdd_2v85", NULL),
+ REGULATOR_SUPPLY("vdd_ddr_rx", NULL),
+ REGULATOR_SUPPLY("avdd_amp", NULL),
+};
+
+#define REGULATOR_INIT(_id, _minmv, _maxmv) \
+ { \
+ .constraints = { \
+ .min_uV = (_minmv)*1000, \
+ .max_uV = (_maxmv)*1000, \
+ .valid_modes_mask = (REGULATOR_MODE_NORMAL | \
+ REGULATOR_MODE_STANDBY), \
+ .valid_ops_mask = (REGULATOR_CHANGE_MODE | \
+ REGULATOR_CHANGE_STATUS | \
+ REGULATOR_CHANGE_VOLTAGE), \
+ }, \
+ .num_consumer_supplies = ARRAY_SIZE(tps658621_##_id##_supply),\
+ .consumer_supplies = tps658621_##_id##_supply, \
+ }
+
+static struct regulator_init_data sm0_data = REGULATOR_INIT(sm0, 725, 1500);
+static struct regulator_init_data sm1_data = REGULATOR_INIT(sm1, 725, 1500);
+static struct regulator_init_data sm2_data = REGULATOR_INIT(sm2, 3000, 4550);
+static struct regulator_init_data ldo0_data = REGULATOR_INIT(ldo0, 1250, 3300);
+static struct regulator_init_data ldo1_data = REGULATOR_INIT(ldo1, 725, 1500);
+static struct regulator_init_data ldo2_data = REGULATOR_INIT(ldo2, 725, 1500);
+static struct regulator_init_data ldo3_data = REGULATOR_INIT(ldo3, 1250, 3300);
+static struct regulator_init_data ldo4_data = REGULATOR_INIT(ldo4, 1700, 2475);
+static struct regulator_init_data ldo5_data = REGULATOR_INIT(ldo5, 1250, 3300);
+static struct regulator_init_data ldo6_data = REGULATOR_INIT(ldo6, 1250, 3300);
+static struct regulator_init_data ldo7_data = REGULATOR_INIT(ldo7, 1250, 3300);
+static struct regulator_init_data ldo8_data = REGULATOR_INIT(ldo8, 1250, 3300);
+static struct regulator_init_data ldo9_data = REGULATOR_INIT(ldo9, 1250, 3300);
+
+static struct tps6586x_rtc_platform_data rtc_data = {
+ .irq = TEGRA_NR_IRQS + TPS6586X_INT_RTC_ALM1,
+};
+
+#define TPS_REG(_id, _data) \
+ { \
+ .id = TPS6586X_ID_##_id, \
+ .name = "tps6586x-regulator", \
+ .platform_data = _data, \
+ }
+
+static struct tps6586x_subdev_info tps_devs[] = {
+ TPS_REG(SM_0, &sm0_data),
+ TPS_REG(SM_1, &sm1_data),
+ TPS_REG(SM_2, &sm2_data),
+ TPS_REG(LDO_0, &ldo0_data),
+ TPS_REG(LDO_1, &ldo1_data),
+ TPS_REG(LDO_2, &ldo2_data),
+ TPS_REG(LDO_3, &ldo3_data),
+ TPS_REG(LDO_4, &ldo4_data),
+ TPS_REG(LDO_5, &ldo5_data),
+ TPS_REG(LDO_6, &ldo6_data),
+ TPS_REG(LDO_7, &ldo7_data),
+ TPS_REG(LDO_8, &ldo8_data),
+ TPS_REG(LDO_9, &ldo9_data),
+ {
+ .id = 0,
+ .name = "tps6586x-rtc",
+ .platform_data = &rtc_data,
+ },
+};
+
+static struct tps6586x_platform_data tps_platform = {
+ .irq_base = TEGRA_NR_IRQS,
+ .num_subdevs = ARRAY_SIZE(tps_devs),
+ .subdevs = tps_devs,
+ .gpio_base = TEGRA_NR_GPIOS,
+};
+
+static struct i2c_board_info __initdata ventana_regulators[] = {
+ {
+ I2C_BOARD_INFO("tps6586x", 0x34),
+ .irq = INT_EXTERNAL_PMU,
+ .platform_data = &tps_platform,
+ },
+};
+
+static struct tegra_suspend_platform_data ventana_suspend_data = {
+ .cpu_timer = 2000,
+ .cpu_off_timer = 0,
+ .suspend_mode = TEGRA_SUSPEND_LP1,
+ .core_timer = 0x7e7e,
+ .core_off_timer = 0,
+ .separate_req = true,
+ .corereq_high = false,
+ .sysclkreq_high = true,
+ .wake_enb = TEGRA_WAKE_GPIO_PV2,
+ .wake_high = 0,
+ .wake_low = TEGRA_WAKE_GPIO_PV2,
+ .wake_any = 0,
+};
+
+int __init ventana_regulator_init(void)
+{
+ void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+ u32 pmc_ctrl;
+
+ /* configure the power management controller to trigger PMU
+ * interrupts when low */
+ pmc_ctrl = readl(pmc + PMC_CTRL);
+ writel(pmc_ctrl | PMC_CTRL_INTR_LOW, pmc + PMC_CTRL);
+ i2c_register_board_info(4, ventana_regulators, 1);
+ tegra_init_suspend(&ventana_suspend_data);
+ return 0;
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-harmony-sdhci.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/sdhci.h>
+
+#include "gpio-names.h"
+#include "board.h"
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+ .clk_id = NULL,
+ .force_hs = 1,
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+ .clk_id = NULL,
+ .force_hs = 1,
+ .cd_gpio = TEGRA_GPIO_PI5,
+ .wp_gpio = TEGRA_GPIO_PH1,
+ .power_gpio = TEGRA_GPIO_PT3,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .clk_id = NULL,
+ .force_hs = 0,
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = TEGRA_GPIO_PI6,
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+int __init ventana_sdhci_init(void)
+{
+ gpio_request(tegra_sdhci_platform_data2.power_gpio, "sdhci2_power");
+ gpio_request(tegra_sdhci_platform_data2.cd_gpio, "sdhci2_cd");
+ gpio_request(tegra_sdhci_platform_data2.wp_gpio, "sdhci2_wp");
+ gpio_request(tegra_sdhci_platform_data3.power_gpio, "sdhci3_power");
+
+ tegra_gpio_enable(tegra_sdhci_platform_data2.power_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.cd_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data2.wp_gpio);
+ tegra_gpio_enable(tegra_sdhci_platform_data3.power_gpio);
+
+ gpio_direction_output(tegra_sdhci_platform_data2.power_gpio, 1);
+ gpio_direction_output(tegra_sdhci_platform_data3.power_gpio, 1);
+ gpio_set_value(tegra_sdhci_platform_data3.power_gpio, 1);
+
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device2);
+ platform_device_register(&tegra_sdhci_device0);
+
+ return 0;
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-ventana.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/usb/android_composite.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include "board.h"
+#include "clock.h"
+#include "board-ventana.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "fuse.h"
+
+static struct plat_serial8250_port debug_uart_platform_data[] = {
+ {
+ .membase = IO_ADDRESS(TEGRA_UARTD_BASE),
+ .mapbase = TEGRA_UARTD_BASE,
+ .irq = INT_UARTD,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = 216000000,
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device debug_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = debug_uart_platform_data,
+ },
+};
+
+static __initdata struct tegra_clk_init_table ventana_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "uartd", "pll_p", 216000000, true},
+ { "pll_m", "clk_m", 600000000, true},
+ { "pwm", "clk_32k", 32768, false},
+ { NULL, NULL, 0, 0},
+};
+
+static char *usb_functions[] = { "mtp" };
+static char *usb_functions_adb[] = { "mtp", "adb" };
+
+static struct android_usb_product usb_products[] = {
+ {
+ .product_id = 0x7102,
+ .num_functions = ARRAY_SIZE(usb_functions),
+ .functions = usb_functions,
+ },
+ {
+ .product_id = 0x7100,
+ .num_functions = ARRAY_SIZE(usb_functions_adb),
+ .functions = usb_functions_adb,
+ },
+};
+
+/* standard android USB platform data */
+static struct android_usb_platform_data andusb_plat = {
+ .vendor_id = 0x0955,
+ .product_id = 0x7100,
+ .manufacturer_name = "NVIDIA",
+ .product_name = "Ventana",
+ .serial_number = NULL,
+ .num_products = ARRAY_SIZE(usb_products),
+ .products = usb_products,
+ .num_functions = ARRAY_SIZE(usb_functions_adb),
+ .functions = usb_functions_adb,
+};
+
+static struct platform_device androidusb_device = {
+ .name = "android_usb",
+ .id = -1,
+ .dev = {
+ .platform_data = &andusb_plat,
+ },
+};
+
+static struct tegra_i2c_platform_data ventana_i2c1_platform_data = {
+ .adapter_nr = 0,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+};
+
+static const struct tegra_pingroup_config i2c2_ddc = {
+ .pingroup = TEGRA_PINGROUP_DDC,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static const struct tegra_pingroup_config i2c2_gen2 = {
+ .pingroup = TEGRA_PINGROUP_PTA,
+ .func = TEGRA_MUX_I2C2,
+};
+
+static struct tegra_i2c_platform_data ventana_i2c2_platform_data = {
+ .adapter_nr = 1,
+ .bus_count = 2,
+ .bus_clk_rate = { 400000, 100000 },
+ .bus_mux = { &i2c2_ddc, &i2c2_gen2 },
+ .bus_mux_len = { 1, 1 },
+};
+
+static struct tegra_i2c_platform_data ventana_i2c3_platform_data = {
+ .adapter_nr = 3,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+};
+
+static struct tegra_i2c_platform_data ventana_dvc_platform_data = {
+ .adapter_nr = 4,
+ .bus_count = 1,
+ .bus_clk_rate = { 400000, 0 },
+ .is_dvc = true,
+};
+
+static void ventana_i2c_init(void)
+{
+ tegra_i2c_device1.dev.platform_data = &ventana_i2c1_platform_data;
+ tegra_i2c_device2.dev.platform_data = &ventana_i2c2_platform_data;
+ tegra_i2c_device3.dev.platform_data = &ventana_i2c3_platform_data;
+ tegra_i2c_device4.dev.platform_data = &ventana_dvc_platform_data;
+
+ platform_device_register(&tegra_i2c_device4);
+ platform_device_register(&tegra_i2c_device3);
+ platform_device_register(&tegra_i2c_device2);
+ platform_device_register(&tegra_i2c_device1);
+}
+
+#define GPIO_KEY(_id, _gpio, _iswake) \
+ { \
+ .code = _id, \
+ .gpio = TEGRA_GPIO_##_gpio, \
+ .active_low = 1, \
+ .desc = #_id, \
+ .type = EV_KEY, \
+ .wakeup = _iswake, \
+ .debounce_interval = 10, \
+ }
+
+static struct gpio_keys_button ventana_keys[] = {
+ [0] = GPIO_KEY(KEY_MENU, PQ3, 0),
+ [1] = GPIO_KEY(KEY_HOME, PQ1, 0),
+ [2] = GPIO_KEY(KEY_BACK, PQ2, 0),
+ [3] = GPIO_KEY(KEY_VOLUMEUP, PQ5, 0),
+ [4] = GPIO_KEY(KEY_VOLUMEDOWN, PQ4, 0),
+ [5] = GPIO_KEY(KEY_POWER, PV2, 1),
+};
+
+static struct gpio_keys_platform_data ventana_keys_platform_data = {
+ .buttons = ventana_keys,
+ .nbuttons = ARRAY_SIZE(ventana_keys),
+};
+
+static struct platform_device ventana_keys_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &ventana_keys_platform_data,
+ },
+};
+
+static struct platform_device *ventana_devices[] __initdata = {
+ &tegra_otg_device,
+ &androidusb_device,
+ &debug_uart,
+ &pmu_device,
+ &tegra_udc_device,
+ &tegra_gart_device,
+ &tegra_aes_device,
+ &ventana_keys_device,
+};
+
+static void ventana_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ventana_keys); i++)
+ tegra_gpio_enable(ventana_keys[i].gpio);
+}
+
+static struct panjit_i2c_ts_platform_data panjit_data = {
+ .gpio_reset = TEGRA_GPIO_PQ7,
+};
+
+static const struct i2c_board_info ventana_i2c_bus1_touch_info[] = {
+ {
+ I2C_BOARD_INFO("panjit_touch", 0x3),
+ .irq = TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV6),
+ .platform_data = &panjit_data,
+ },
+};
+
+static int __init ventana_touch_init(void)
+{
+ tegra_gpio_enable(TEGRA_GPIO_PV6);
+
+ tegra_gpio_enable(TEGRA_GPIO_PQ7);
+ i2c_register_board_info(0, ventana_i2c_bus1_touch_info, 1);
+
+ return 0;
+}
+
+static void __init tegra_ventana_init(void)
+{
+ char serial[20];
+
+ tegra_common_init();
+ tegra_clk_init_from_table(ventana_clk_init_table);
+ ventana_pinmux_init();
+
+ snprintf(serial, sizeof(serial), "%llx", tegra_chip_uid());
+ andusb_plat.serial_number = kstrdup(serial, GFP_KERNEL);
+ platform_add_devices(ventana_devices, ARRAY_SIZE(ventana_devices));
+
+ ventana_sdhci_init();
+ ventana_i2c_init();
+ ventana_regulator_init();
+ ventana_touch_init();
+ ventana_keys_init();
+ ventana_panel_init();
+}
+
+MACHINE_START(VENTANA, "ventana")
+ .boot_params = 0x00000100,
+ .phys_io = IO_APB_PHYS,
+ .io_pg_offst = ((IO_APB_VIRT) >> 18) & 0xfffc,
+ .init_irq = tegra_init_irq,
+ .init_machine = tegra_ventana_init,
+ .map_io = tegra_map_common_io,
+ .timer = &tegra_timer,
+MACHINE_END
--- /dev/null
+/*
+ * arch/arm/mach-tegra/board-ventana.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_BOARD_VENTANA_H
+#define _MACH_TEGRA_BOARD_VENTANA_H
+
+int ventana_regulator_init(void);
+int ventana_sdhci_init(void);
+int ventana_pinmux_init(void);
+int ventana_panel_init(void);
+
+#endif
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
void __init tegra_init_clock(void);
+void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
+ unsigned long fb2_size);
+void __init tegra_protected_aperture_init(unsigned long aperture);
+void tegra_move_framebuffer(unsigned long to, unsigned long from,
+ unsigned long size);
+int tegra_dvfs_rail_disable_by_name(const char *reg_id);
+
+extern unsigned long tegra_bootloader_fb_start;
+extern unsigned long tegra_bootloader_fb_size;
+extern unsigned long tegra_fb_start;
+extern unsigned long tegra_fb_size;
+extern unsigned long tegra_fb2_start;
+extern unsigned long tegra_fb2_size;
+extern unsigned long tegra_carveout_start;
+extern unsigned long tegra_carveout_size;
+extern unsigned long tegra_lp0_vec_start;
+extern unsigned long tegra_lp0_vec_size;
+extern unsigned long tegra_grhost_aperture;
extern struct sys_timer tegra_timer;
#endif
#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+
#include <asm/clkdev.h>
+#include <mach/clk.h>
+
+#include "board.h"
#include "clock.h"
+#include "dvfs.h"
+/*
+ * Locking:
+ *
+ * Each struct clk has a lock. Depending on the cansleep flag, that lock
+ * may be a spinlock or a mutex. For most clocks, the spinlock is sufficient,
+ * and using the spinlock allows the clock to be manipulated from an interrupt
+ * or while holding a spinlock. Some clocks may need to adjust a regulator
+ * in order to maintain the required voltage for a new frequency. Those
+ * clocks set the cansleep flag, and take a mutex so that the regulator api
+ * can be used while holding the lock.
+ *
+ * To avoid AB-BA locking problems, locks must always be traversed from child
+ * clock to parent clock. For example, when enabling a clock, the clock's lock
+ * is taken, and then clk_enable is called on the parent, which take's the
+ * parent clock's lock. There are two exceptions to this ordering:
+ * 1. When setting a clock as cansleep, in which case the entire list of clocks
+ * is traversed to set the children as cansleep as well. This must occur
+ * during init, before any calls to clk_get, so no other clock locks can
+ * get taken.
+ * 2. When dumping the clock tree through debugfs. In this case, clk_lock_all
+ * is called, which attemps to iterate through the entire list of clocks
+ * and take every clock lock. If any call to clk_trylock fails, a locked
+ * clocks are unlocked, and the process is retried. When all the locks
+ * are held, the only clock operation that can be called is
+ * clk_get_rate_all_locked.
+ *
+ * Within a single clock, no clock operation can call another clock operation
+ * on itself, except for clk_get_rate_locked. Any clock operation can call
+ * any other clock operation on any of it's possible parents.
+ *
+ * clk_set_cansleep is used to mark a clock as sleeping. It is called during
+ * dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
+ * dvfs requirement. It can only be called on clocks that are the sole parent
+ * of all of their child clocks, meaning the child clock can not be reparented
+ * onto a different, possibly non-sleeping, clock. This is inherently true
+ * of all leaf clocks in the clock tree
+ *
+ * An additional lock, clock_list_lock, is used to protect the list of all
+ * clocks.
+ *
+ * The clock operations must lock internally to protect against
+ * read-modify-write on registers that are shared by multiple clocks
+ */
+static DEFINE_MUTEX(clock_list_lock);
static LIST_HEAD(clocks);
-static DEFINE_SPINLOCK(clock_lock);
+static inline bool clk_is_auto_dvfs(struct clk *c)
+{
+ return c->auto_dvfs;
+}
+
+static inline bool clk_is_dvfs(struct clk *c)
+{
+ return (c->dvfs != NULL);
+}
+
+static inline bool clk_cansleep(struct clk *c)
+{
+ return c->cansleep;
+}
+
+#define clk_lock_save(c, flags) \
+ do { \
+ if (clk_cansleep(c)) { \
+ flags = 0; \
+ mutex_lock(&c->mutex); \
+ } else { \
+ spin_lock_irqsave(&c->spinlock, flags); \
+ } \
+ } while (0)
+
+#define clk_unlock_restore(c, flags) \
+ do { \
+ if (clk_cansleep(c)) \
+ mutex_unlock(&c->mutex); \
+ else \
+ spin_unlock_irqrestore(&c->spinlock, flags); \
+ } while (0)
+
+static inline void clk_lock_init(struct clk *c)
+{
+ mutex_init(&c->mutex);
+ spin_lock_init(&c->spinlock);
+}
struct clk *tegra_get_clock_by_name(const char *name)
{
struct clk *c;
struct clk *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
+ mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
if (strcmp(c->name, name) == 0) {
ret = c;
break;
}
}
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_lock);
return ret;
}
-int clk_reparent(struct clk *c, struct clk *parent)
+/* Must be called with clk_lock(c) held */
+static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
{
- pr_debug("%s: %s\n", __func__, c->name);
- if (c->refcnt && c->parent)
- clk_disable_locked(c->parent);
- c->parent = parent;
- if (c->refcnt && c->parent)
- clk_enable_locked(c->parent);
- list_del(&c->sibling);
- list_add_tail(&c->sibling, &parent->children);
- return 0;
+ u64 rate;
+
+ rate = clk_get_rate(p);
+
+ if (c->mul != 0 && c->div != 0) {
+ rate *= c->mul;
+ rate += c->div / 2; /* round up */
+ do_div(rate, c->div);
+ }
+
+ return rate;
}
-static void propagate_rate(struct clk *c)
+/* Must be called with clk_lock(c) held */
+unsigned long clk_get_rate_locked(struct clk *c)
{
- struct clk *clkp;
- pr_debug("%s: %s\n", __func__, c->name);
- list_for_each_entry(clkp, &c->children, sibling) {
- pr_debug(" %s\n", clkp->name);
- if (clkp->ops->recalculate_rate)
- clkp->ops->recalculate_rate(clkp);
- propagate_rate(clkp);
- }
+ unsigned long rate;
+
+ if (c->parent)
+ rate = clk_predict_rate_from_parent(c, c->parent);
+ else
+ rate = c->rate;
+
+ return rate;
}
-void clk_init(struct clk *c)
+unsigned long clk_get_rate(struct clk *c)
{
unsigned long flags;
+ unsigned long rate;
+
+ clk_lock_save(c, flags);
+
+ rate = clk_get_rate_locked(c);
- spin_lock_irqsave(&clock_lock, flags);
+ clk_unlock_restore(c, flags);
- INIT_LIST_HEAD(&c->children);
- INIT_LIST_HEAD(&c->sibling);
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+static void __clk_set_cansleep(struct clk *c)
+{
+ struct clk *child;
+ BUG_ON(mutex_is_locked(&c->mutex));
+ BUG_ON(spin_is_locked(&c->spinlock));
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
+ WARN(child->ops && child->ops->set_parent,
+ "can't make child clock %s of %s "
+ "sleepable if it's parent could change",
+ child->name, c->name);
+
+ __clk_set_cansleep(child);
+ }
+
+ c->cansleep = true;
+}
+
+/* Must be called before any clk_get calls */
+void clk_set_cansleep(struct clk *c)
+{
+
+ mutex_lock(&clock_list_lock);
+ __clk_set_cansleep(c);
+ mutex_unlock(&clock_list_lock);
+}
+
+int clk_reparent(struct clk *c, struct clk *parent)
+{
+ c->parent = parent;
+ return 0;
+}
+
+void clk_init(struct clk *c)
+{
+ clk_lock_init(c);
if (c->ops && c->ops->init)
c->ops->init(c);
- list_add(&c->node, &clocks);
-
- if (c->parent)
- list_add_tail(&c->sibling, &c->parent->children);
+ if (!c->ops || !c->ops->enable) {
+ c->refcnt++;
+ c->set = true;
+ if (c->parent)
+ c->state = c->parent->state;
+ else
+ c->state = ON;
+ }
- spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_lock(&clock_list_lock);
+ list_add(&c->node, &clocks);
+ mutex_unlock(&clock_list_lock);
}
-int clk_enable_locked(struct clk *c)
+int clk_enable(struct clk *c)
{
- int ret;
- pr_debug("%s: %s\n", __func__, c->name);
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, flags);
+
+ if (clk_is_auto_dvfs(c)) {
+ ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
+ if (ret)
+ goto out;
+ }
+
if (c->refcnt == 0) {
if (c->parent) {
- ret = clk_enable_locked(c->parent);
+ ret = clk_enable(c->parent);
if (ret)
- return ret;
+ goto out;
}
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
if (ret) {
if (c->parent)
- clk_disable_locked(c->parent);
- return ret;
+ clk_disable(c->parent);
+ goto out;
}
c->state = ON;
-#ifdef CONFIG_DEBUG_FS
- c->set = 1;
-#endif
+ c->set = true;
}
}
c->refcnt++;
-
- return 0;
-}
-
-int clk_enable(struct clk *c)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_enable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
+out:
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
-void clk_disable_locked(struct clk *c)
+void clk_disable(struct clk *c)
{
- pr_debug("%s: %s\n", __func__, c->name);
+ unsigned long flags;
+
+ clk_lock_save(c, flags);
+
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
+ clk_unlock_restore(c, flags);
return;
}
if (c->refcnt == 1) {
c->ops->disable(c);
if (c->parent)
- clk_disable_locked(c->parent);
+ clk_disable(c->parent);
c->state = OFF;
}
c->refcnt--;
-}
-void clk_disable(struct clk *c)
-{
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
- clk_disable_locked(c);
- spin_unlock_irqrestore(&clock_lock, flags);
+ if (clk_is_auto_dvfs(c) && c->refcnt == 0)
+ tegra_dvfs_set_rate(c, 0);
+
+ clk_unlock_restore(c, flags);
}
EXPORT_SYMBOL(clk_disable);
-int clk_set_parent_locked(struct clk *c, struct clk *parent)
+int clk_set_parent(struct clk *c, struct clk *parent)
{
- int ret;
+ int ret = 0;
+ unsigned long flags;
+ unsigned long new_rate;
+ unsigned long old_rate;
- pr_debug("%s: %s\n", __func__, c->name);
+ clk_lock_save(c, flags);
- if (!c->ops || !c->ops->set_parent)
- return -ENOSYS;
+ if (!c->ops || !c->ops->set_parent) {
+ ret = -ENOSYS;
+ goto out;
+ }
- ret = c->ops->set_parent(c, parent);
+ new_rate = clk_predict_rate_from_parent(c, parent);
+ old_rate = clk_get_rate_locked(c);
- if (ret)
- return ret;
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ (!c->parent || new_rate > old_rate)) {
+ ret = tegra_dvfs_set_rate(c, new_rate);
+ if (ret)
+ goto out;
+ }
- propagate_rate(c);
+ ret = c->ops->set_parent(c, parent);
+ if (ret)
+ goto out;
- return 0;
-}
+ if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
+ new_rate < old_rate)
+ ret = tegra_dvfs_set_rate(c, new_rate);
-int clk_set_parent(struct clk *c, struct clk *parent)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk_set_parent_locked(c, parent);
- spin_unlock_irqrestore(&clock_lock, flags);
+out:
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
{
int ret = 0;
unsigned long flags;
+ unsigned long old_rate;
+ long new_rate;
- spin_lock_irqsave(&clock_lock, flags);
+ clk_lock_save(c, flags);
- pr_debug("%s: %s\n", __func__, c->name);
-
- if (c->ops && c->ops->set_rate)
- ret = c->ops->set_rate(c, rate);
- else
+ if (!c->ops || !c->ops->set_rate) {
ret = -ENOSYS;
+ goto out;
+ }
+
+ old_rate = clk_get_rate_locked(c);
+
+ if (rate > c->max_rate)
+ rate = c->max_rate;
+
+ if (c->ops && c->ops->round_rate) {
+ new_rate = c->ops->round_rate(c, rate);
- propagate_rate(c);
+ if (new_rate < 0) {
+ ret = new_rate;
+ goto out;
+ }
+
+ rate = new_rate;
+ }
+
+ if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
+ ret = tegra_dvfs_set_rate(c, rate);
+ if (ret)
+ goto out;
+ }
+
+ ret = c->ops->set_rate(c, rate);
+ if (ret)
+ goto out;
- spin_unlock_irqrestore(&clock_lock, flags);
+ if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
+ ret = tegra_dvfs_set_rate(c, rate);
+out:
+ clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
-unsigned long clk_get_rate(struct clk *c)
+/* Must be called with clocks lock and all indvidual clock locks held */
+unsigned long clk_get_rate_all_locked(struct clk *c)
+{
+ u64 rate;
+ int mul = 1;
+ int div = 1;
+ struct clk *p = c;
+
+ while (p) {
+ c = p;
+ if (c->mul != 0 && c->div != 0) {
+ mul *= c->mul;
+ div *= c->div;
+ }
+ p = c->parent;
+ }
+
+ rate = c->rate;
+ rate *= mul;
+ do_div(rate, div);
+
+ return rate;
+}
+
+long clk_round_rate(struct clk *c, unsigned long rate)
{
unsigned long flags;
- unsigned long ret;
+ long ret;
- spin_lock_irqsave(&clock_lock, flags);
+ clk_lock_save(c, flags);
- pr_debug("%s: %s\n", __func__, c->name);
+ if (!c->ops || !c->ops->round_rate) {
+ ret = -ENOSYS;
+ goto out;
+ }
- ret = c->rate;
+ if (rate > c->max_rate)
+ rate = c->max_rate;
- spin_unlock_irqrestore(&clock_lock, flags);
+ ret = c->ops->round_rate(c, rate);
+
+out:
+ clk_unlock_restore(c, flags);
return ret;
}
-EXPORT_SYMBOL(clk_get_rate);
+EXPORT_SYMBOL(clk_round_rate);
static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
{
}
EXPORT_SYMBOL(tegra_periph_reset_assert);
-int __init tegra_init_clock(void)
+void __init tegra_init_clock(void)
{
tegra2_init_clocks();
+ tegra2_init_dvfs();
+}
+/*
+ * Iterate through all clocks, disabling any for which the refcount is 0
+ * but the clock init detected the bootloader left the clock on.
+ */
+int __init tegra_disable_boot_clocks(void)
+{
+ unsigned long flags;
+ struct clk *c;
+
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ clk_lock_save(c, flags);
+ if (c->refcnt == 0 && c->state == ON &&
+ c->ops && c->ops->disable) {
+ pr_warning("Disabling clock %s left on by bootloader\n",
+ c->name);
+ c->ops->disable(c);
+ c->state = OFF;
+ }
+ clk_unlock_restore(c, flags);
+ }
+
+ mutex_unlock(&clock_list_lock);
return 0;
}
+int __init tegra_late_init_clock(void)
+{
+ tegra_dvfs_late_init();
+ tegra_disable_boot_clocks();
+ return 0;
+}
+late_initcall(tegra_late_init_clock);
+
+/* The SDMMC controllers have extra bits in the clock source register that
+ * adjust the delay between the clock and data to compenstate for delays
+ * on the PCB. */
+void tegra_sdmmc_tap_delay(struct clk *c, int delay) {
+ unsigned long flags;
+
+ clk_lock_save(c, flags);
+ tegra2_sdmmc_tap_delay(c, delay);
+ clk_unlock_restore(c, flags);
+}
+
#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Attempt to lock all the clocks that are marked cansleep
+ * Must be called with irqs enabled
+ */
+static int __clk_lock_all_mutexes(void)
+{
+ struct clk *c;
+
+ might_sleep();
+
+ list_for_each_entry(c, &clocks, node)
+ if (clk_cansleep(c))
+ if (!mutex_trylock(&c->mutex))
+ goto unlock_mutexes;
+
+ return 0;
+
+unlock_mutexes:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+
+ return -EAGAIN;
+}
+
+/*
+ * Attempt to lock all the clocks that are not marked cansleep
+ * Must be called with irqs disabled
+ */
+static int __clk_lock_all_spinlocks(void)
+{
+ struct clk *c;
+
+ list_for_each_entry(c, &clocks, node)
+ if (!clk_cansleep(c))
+ if (!spin_trylock(&c->spinlock))
+ goto unlock_spinlocks;
+
+ return 0;
+
+unlock_spinlocks:
+ list_for_each_entry_continue_reverse(c, &clocks, node)
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
+
+ return -EAGAIN;
+}
+
+static void __clk_unlock_all_mutexes(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ if (clk_cansleep(c))
+ mutex_unlock(&c->mutex);
+}
+
+static void __clk_unlock_all_spinlocks(void)
+{
+ struct clk *c;
+
+ list_for_each_entry_reverse(c, &clocks, node)
+ if (!clk_cansleep(c))
+ spin_unlock(&c->spinlock);
+}
+
+/*
+ * This function retries until it can take all locks, and may take
+ * an arbitrarily long time to complete.
+ * Must be called with irqs enabled, returns with irqs disabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_lock_all(void)
+{
+ int ret;
+retry:
+ ret = __clk_lock_all_mutexes();
+ if (ret)
+ goto failed_mutexes;
+
+ local_irq_disable();
+
+ ret = __clk_lock_all_spinlocks();
+ if (ret)
+ goto failed_spinlocks;
+
+ /* All locks taken successfully, return */
+ return;
+
+failed_spinlocks:
+ local_irq_enable();
+ __clk_unlock_all_mutexes();
+failed_mutexes:
+ msleep(1);
+ goto retry;
+}
+
+/*
+ * Unlocks all clocks after a clk_lock_all
+ * Must be called with irqs disabled, returns with irqs enabled
+ * Must be called with clock_list_lock held
+ */
+static void clk_unlock_all(void)
+{
+ __clk_unlock_all_spinlocks();
+
+ local_irq_enable();
+
+ __clk_unlock_all_mutexes();
+}
+
static struct dentry *clk_debugfs_root;
+static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
+{
+ seq_printf(s, "%*s %-*s%21s%d mV\n",
+ level * 3 + 1, "",
+ 30 - level * 3, d->dvfs_rail->reg_id,
+ "",
+ d->cur_millivolts);
+}
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
- struct clk *safe;
const char *state = "uninit";
- char div[5] = {0};
+ char div[8] = {0};
if (c->state == ON)
state = "on";
state = "off";
if (c->mul != 0 && c->div != 0) {
- BUG_ON(c->mul > 2);
- if (c->mul > c->div)
- snprintf(div, sizeof(div), "x%d", c->mul / c->div);
- else
+ if (c->mul > c->div) {
+ int mul = c->mul / c->div;
+ int mul2 = (c->mul * 10 / c->div) % 10;
+ int mul3 = (c->mul * 10) % c->div;
+ if (mul2 == 0 && mul3 == 0)
+ snprintf(div, sizeof(div), "x%d", mul);
+ else if (mul3 == 0)
+ snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
+ else
+ snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
+ } else {
snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
(c->div % c->mul) ? ".5" : "");
+ }
}
- seq_printf(s, "%*s%-*s %-6s %-3d %-5s %-10lu\n",
- level * 3 + 1, c->set ? "" : "*",
+ seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
+ level * 3 + 1, "",
+ c->rate > c->max_rate ? '!' : ' ',
+ !c->set ? '*' : ' ',
30 - level * 3, c->name,
- state, c->refcnt, div, c->rate);
- list_for_each_entry_safe(child, safe, &c->children, sibling) {
+ state, c->refcnt, div, clk_get_rate_all_locked(c));
+
+ if (c->dvfs)
+ dvfs_show_one(s, c->dvfs, level + 1);
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
clock_tree_show_one(s, child, level + 1);
}
}
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
- unsigned long flags;
- seq_printf(s, " clock state ref div rate \n");
- seq_printf(s, "-----------------------------------------------------------\n");
- spin_lock_irqsave(&clock_lock, flags);
+ seq_printf(s, " clock state ref div rate\n");
+ seq_printf(s, "--------------------------------------------------------------\n");
+
+ mutex_lock(&clock_list_lock);
+
+ clk_lock_all();
+
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
- spin_unlock_irqrestore(&clock_lock, flags);
+
+ clk_unlock_all();
+
+ mutex_unlock(&clock_list_lock);
return 0;
}
if (!d)
goto err_out;
+ if (dvfs_debugfs_init(clk_debugfs_root))
+ goto err_out;
+
list_for_each_entry(c, &clocks, node) {
err = clk_debugfs_register(c);
if (err)
#define __MACH_TEGRA_CLOCK_H
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <asm/clkdev.h>
#define DIV_BUS (1 << 0)
#define DIV_U71 (1 << 1)
#define DIV_U71_FIXED (1 << 2)
#define DIV_2 (1 << 3)
-#define PLL_FIXED (1 << 4)
-#define PLL_HAS_CPCON (1 << 5)
-#define MUX (1 << 6)
-#define PLLD (1 << 7)
-#define PERIPH_NO_RESET (1 << 8)
-#define PERIPH_NO_ENB (1 << 9)
-#define PERIPH_EMC_ENB (1 << 10)
-#define PERIPH_MANUAL_RESET (1 << 11)
-#define PLL_ALT_MISC_REG (1 << 12)
+#define DIV_U16 (1 << 4)
+#define PLL_FIXED (1 << 5)
+#define PLL_HAS_CPCON (1 << 6)
+#define MUX (1 << 7)
+#define PLLD (1 << 8)
+#define PERIPH_NO_RESET (1 << 9)
+#define PERIPH_NO_ENB (1 << 10)
+#define PERIPH_EMC_ENB (1 << 11)
+#define PERIPH_MANUAL_RESET (1 << 12)
+#define PLL_ALT_MISC_REG (1 << 13)
+#define PLLU (1 << 14)
#define ENABLE_ON_INIT (1 << 28)
struct clk;
u32 value;
};
-struct clk_pll_table {
+struct clk_pll_freq_table {
unsigned long input_rate;
unsigned long output_rate;
u16 n;
void (*init)(struct clk *);
int (*enable)(struct clk *);
void (*disable)(struct clk *);
- void (*recalc)(struct clk *);
int (*set_parent)(struct clk *, struct clk *);
int (*set_rate)(struct clk *, unsigned long);
- unsigned long (*get_rate)(struct clk *);
long (*round_rate)(struct clk *, unsigned long);
- unsigned long (*recalculate_rate)(struct clk *);
+ void (*reset)(struct clk *, bool);
};
enum clk_state {
struct clk {
/* node for master clocks list */
- struct list_head node;
- struct list_head children; /* list of children */
- struct list_head sibling; /* node for children */
+ struct list_head node; /* node for list of all clocks */
+ struct dvfs *dvfs;
+ struct clk_lookup lookup;
+
#ifdef CONFIG_DEBUG_FS
- struct dentry *dent;
- struct dentry *parent_dent;
+ struct dentry *dent;
+ struct dentry *parent_dent;
#endif
- struct clk_ops *ops;
- struct clk *parent;
- struct clk_lookup lookup;
- unsigned long rate;
- u32 flags;
- u32 refcnt;
- const char *name;
+ bool set;
+ struct clk_ops *ops;
+ unsigned long dvfs_rate;
+ unsigned long rate;
+ unsigned long max_rate;
+ unsigned long min_rate;
+ bool auto_dvfs;
+ bool cansleep;
+ u32 flags;
+ const char *name;
+
+ u32 refcnt;
+ enum clk_state state;
+ struct clk *parent;
+ u32 div;
+ u32 mul;
+
+ const struct clk_mux_sel *inputs;
u32 reg;
u32 reg_shift;
- unsigned int clk_num;
- enum clk_state state;
-#ifdef CONFIG_DEBUG_FS
- bool set;
-#endif
- /* PLL */
- unsigned long input_min;
- unsigned long input_max;
- unsigned long cf_min;
- unsigned long cf_max;
- unsigned long vco_min;
- unsigned long vco_max;
- u32 m;
- u32 n;
- u32 p;
- u32 cpcon;
- const struct clk_pll_table *pll_table;
-
- /* DIV */
- u32 div;
- u32 mul;
-
- /* MUX */
- const struct clk_mux_sel *inputs;
- u32 sel;
- u32 reg_mask;
+ struct list_head shared_bus_list;
+
+ union {
+ struct {
+ unsigned int clk_num;
+ } periph;
+ struct {
+ unsigned long input_min;
+ unsigned long input_max;
+ unsigned long cf_min;
+ unsigned long cf_max;
+ unsigned long vco_min;
+ unsigned long vco_max;
+ const struct clk_pll_freq_table *freq_table;
+ int lock_delay;
+ } pll;
+ struct {
+ u32 sel;
+ u32 reg_mask;
+ } mux;
+ struct {
+ struct clk *main;
+ struct clk *backup;
+ } cpu;
+ struct {
+ struct list_head node;
+ bool enabled;
+ unsigned long rate;
+ } shared_bus_user;
+ } u;
+
+ struct mutex mutex;
+ spinlock_t spinlock;
};
-
struct clk_duplicate {
const char *name;
struct clk_lookup lookup;
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
-void clk_disable_locked(struct clk *c);
-int clk_enable_locked(struct clk *c);
-int clk_set_parent_locked(struct clk *c, struct clk *parent);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
+void clk_set_cansleep(struct clk *c);
+unsigned long clk_get_rate_locked(struct clk *c);
+void tegra2_sdmmc_tap_delay(struct clk *c, int delay);
#endif
/*
- * arch/arm/mach-tegra/board-harmony.c
+ * arch/arm/mach-tegra/common.c
*
* Copyright (C) 2010 Google, Inc.
*
*
*/
+#include <linux/console.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/system.h>
#include <mach/iomap.h>
+#include <mach/dma.h>
+#include <mach/powergate.h>
+#include <mach/system.h>
+#include "apbio.h"
#include "board.h"
#include "clock.h"
+#include "fuse.h"
+
+#define MC_SECURITY_CFG2 0x7c
+
+unsigned long tegra_bootloader_fb_start;
+unsigned long tegra_bootloader_fb_size;
+unsigned long tegra_fb_start;
+unsigned long tegra_fb_size;
+unsigned long tegra_fb2_start;
+unsigned long tegra_fb2_size;
+unsigned long tegra_carveout_start;
+unsigned long tegra_carveout_size;
+unsigned long tegra_lp0_vec_start;
+unsigned long tegra_lp0_vec_size;
+unsigned long tegra_grhost_aperture;
+
+void (*tegra_reset)(char mode, const char *cmd);
static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
+ /* set up clocks that should always be on */
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
{ "pll_p", "clk_m", 216000000, true },
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
- { "pll_p_out4", "pll_p", 108000000, true },
- { "sys", "pll_p_out4", 108000000, true },
- { "hclk", "sys", 108000000, true },
- { "pclk", "hclk", 54000000, true },
+ { "pll_m_out1", "pll_m", 120000000, true },
+ { "sclk", "pll_m_out1", 120000000, true },
+ { "hclk", "sclk", 120000000, true },
+ { "pclk", "hclk", 60000000, true },
+ { "pll_x", NULL, 0, true },
+ { "cpu", NULL, 0, true },
+ { "emc", NULL, 0, true },
+ { "csite", NULL, 0, true },
+ { "timer", NULL, 0, true },
+ { "kfuse", NULL, 0, true },
+ { "rtc", NULL, 0, true },
+
+ /* set frequencies of some device clocks */
+ { "pll_u", "clk_m", 480000000, false },
+ { "sdmmc1", "pll_p", 48000000, false},
+ { "sdmmc2", "pll_p", 48000000, false},
+ { "sdmmc3", "pll_p", 48000000, false},
+ { "sdmmc4", "pll_p", 48000000, false},
{ NULL, NULL, 0, 0},
};
writel(0x331, p + L2X0_TAG_LATENCY_CTRL);
writel(0x441, p + L2X0_DATA_LATENCY_CTRL);
+ writel(2, p + L2X0_PWR_CTRL);
- l2x0_init(p, 0x6C080001, 0x8200c3fe);
+ l2x0_init(p, 0x6C480001, 0x8200c3fe);
#endif
+
+}
+
+static void __init tegra_init_power(void)
+{
+ tegra_powergate_power_off(TEGRA_POWERGATE_MPE);
+ tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+}
+
+static bool console_flushed;
+
+static void tegra_pm_flush_console(void)
+{
+ if (console_flushed)
+ return;
+ console_flushed = true;
+
+ printk("\n");
+ pr_emerg("Restarting %s\n", linux_banner);
+ if (!try_acquire_console_sem()) {
+ release_console_sem();
+ return;
+ }
+
+ mdelay(50);
+
+ local_irq_disable();
+ if (try_acquire_console_sem())
+ pr_emerg("tegra_restart: Console was locked! Busting\n");
+ else
+ pr_emerg("tegra_restart: Console was locked!\n");
+ release_console_sem();
+}
+
+static void tegra_pm_restart(char mode, const char *cmd)
+{
+ tegra_pm_flush_console();
+ arm_machine_restart(mode, cmd);
}
void __init tegra_common_init(void)
{
+ arm_pm_restart = tegra_pm_restart;
+ tegra_init_fuse();
tegra_init_clock();
tegra_clk_init_from_table(common_clk_init_table);
+ tegra_init_power();
tegra_init_cache();
+ tegra_dma_init();
+ tegra_init_apb_dma();
+}
+
+static int __init tegra_bootloader_fb_arg(char *options)
+{
+ char *p = options;
+
+ tegra_bootloader_fb_size = memparse(p, &p);
+ if (*p == '@')
+ tegra_bootloader_fb_start = memparse(p+1, &p);
+
+ pr_info("Found tegra_fbmem: %08lx@%08lx\n",
+ tegra_bootloader_fb_size, tegra_bootloader_fb_start);
+
+ return 0;
+}
+early_param("tegra_fbmem", tegra_bootloader_fb_arg);
+
+static int __init tegra_lp0_vec_arg(char *options)
+{
+ char *p = options;
+
+ tegra_lp0_vec_size = memparse(p, &p);
+ if (*p == '@')
+ tegra_lp0_vec_start = memparse(p+1, &p);
+
+ return 0;
+}
+early_param("lp0_vec", tegra_lp0_vec_arg);
+
+/*
+ * Tegra has a protected aperture that prevents access by most non-CPU
+ * memory masters to addresses above the aperture value. Enabling it
+ * secures the CPU's memory from the GPU, except through the GART.
+ */
+void __init tegra_protected_aperture_init(unsigned long aperture)
+{
+#ifndef CONFIG_NVMAP_ALLOW_SYSMEM
+ void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
+ pr_info("Enabling Tegra protected aperture at 0x%08lx\n", aperture);
+ writel(aperture, mc_base + MC_SECURITY_CFG2);
+#else
+ pr_err("Tegra protected aperture disabled because nvmap is using "
+ "system memory\n");
+#endif
+}
+
+/*
+ * Due to conflicting restrictions on the placement of the framebuffer,
+ * the bootloader is likely to leave the framebuffer pointed at a location
+ * in memory that is outside the grhost aperture. This function will move
+ * the framebuffer contents from a physical address that is anywher (lowmem,
+ * highmem, or outside the memory map) to a physical address that is outside
+ * the memory map.
+ */
+void tegra_move_framebuffer(unsigned long to, unsigned long from,
+ unsigned long size)
+{
+ struct page *page;
+ void __iomem *to_io;
+ void *from_virt;
+ unsigned long i;
+
+ BUG_ON(PAGE_ALIGN((unsigned long)to) != (unsigned long)to);
+ BUG_ON(PAGE_ALIGN(from) != from);
+ BUG_ON(PAGE_ALIGN(size) != size);
+
+ to_io = ioremap(to, size);
+ if (!to_io) {
+ pr_err("%s: Failed to map target framebuffer\n", __func__);
+ return;
+ }
+
+ if (pfn_valid(page_to_pfn(phys_to_page(from)))) {
+ for (i = 0 ; i < size; i += PAGE_SIZE) {
+ page = phys_to_page(from + i);
+ from_virt = kmap(page);
+ memcpy_toio(to_io + i, from_virt, PAGE_SIZE);
+ kunmap(page);
+ }
+ } else {
+ void __iomem *from_io = ioremap(from, size);
+ if (!from_io) {
+ pr_err("%s: Failed to map source framebuffer\n",
+ __func__);
+ goto out;
+ }
+
+ for (i = 0; i < size; i+= 4)
+ writel(readl(from_io + i), to_io + i);
+
+ iounmap(from_io);
+ }
+out:
+ iounmap(to_io);
+}
+
+void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
+ unsigned long fb2_size)
+{
+ if (tegra_lp0_vec_size)
+ if (memblock_reserve(tegra_lp0_vec_start, tegra_lp0_vec_size))
+ pr_err("Failed to reserve lp0_vec %08lx@%08lx\n",
+ tegra_lp0_vec_size, tegra_lp0_vec_start);
+
+
+ tegra_carveout_start = memblock_end_of_DRAM() - carveout_size;
+ if (memblock_remove(tegra_carveout_start, carveout_size))
+ pr_err("Failed to remove carveout %08lx@%08lx from memory "
+ "map\n",
+ tegra_carveout_start, carveout_size);
+ else
+ tegra_carveout_size = carveout_size;
+
+ tegra_fb2_start = memblock_end_of_DRAM() - fb2_size;
+ if (memblock_remove(tegra_fb2_start, fb2_size))
+ pr_err("Failed to remove second framebuffer %08lx@%08lx from "
+ "memory map\n",
+ tegra_fb2_start, fb2_size);
+ else
+ tegra_fb2_size = fb2_size;
+
+ tegra_fb_start = memblock_end_of_DRAM() - fb_size;
+ if (memblock_remove(tegra_fb_start, fb_size))
+ pr_err("Failed to remove framebuffer %08lx@%08lx from memory "
+ "map\n",
+ tegra_fb_start, fb_size);
+ else
+ tegra_fb_size = fb_size;
+
+ if (tegra_fb_size)
+ tegra_grhost_aperture = tegra_fb_start;
+
+ if (tegra_fb2_size && tegra_fb2_start < tegra_grhost_aperture)
+ tegra_grhost_aperture = tegra_fb2_start;
+
+ if (tegra_carveout_size && tegra_carveout_start < tegra_grhost_aperture)
+ tegra_grhost_aperture = tegra_carveout_start;
+
+ /*
+ * TODO: We should copy the bootloader's framebuffer to the framebuffer
+ * allocated above, and then free this one.
+ */
+ if (tegra_bootloader_fb_size)
+ if (memblock_reserve(tegra_bootloader_fb_start,
+ tegra_bootloader_fb_size))
+ pr_err("Failed to reserve lp0_vec %08lx@%08lx\n",
+ tegra_lp0_vec_size, tegra_lp0_vec_start);
+
+ pr_info("Tegra reserved memory:\n"
+ "LP0: %08lx - %08lx\n"
+ "Bootloader framebuffer: %08lx - %08lx\n"
+ "Framebuffer: %08lx - %08lx\n"
+ "2nd Framebuffer: %08lx - %08lx\n"
+ "Carveout: %08lx - %08lx\n",
+ tegra_lp0_vec_start,
+ tegra_lp0_vec_start + tegra_lp0_vec_size - 1,
+ tegra_bootloader_fb_start,
+ tegra_bootloader_fb_start + tegra_bootloader_fb_size - 1,
+ tegra_fb_start,
+ tegra_fb_start + tegra_fb_size - 1,
+ tegra_fb2_start,
+ tegra_fb2_start + tegra_fb2_size - 1,
+ tegra_carveout_start,
+ tegra_carveout_start + tegra_carveout_size - 1);
}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/cortex-a9.S
+ *
+ * CPU state save & restore routines for CPU hotplug
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/domain.h>
+#include <asm/ptrace.h>
+#include <asm/cache.h>
+#include <asm/vfpmacros.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+/* .section ".cpuinit.text", "ax"*/
+
+#define TTB_FLAGS 0x6A @ IRGN_WBWA, OC_RGN_WBWA, S, NOS
+
+/*
+ * spooled CPU context is 1KB / CPU
+ */
+#define CTX_SP 0
+#define CTX_CPSR 4
+#define CTX_SPSR 8
+#define CTX_CPACR 12
+#define CTX_CSSELR 16
+#define CTX_SCTLR 20
+#define CTX_ACTLR 24
+#define CTX_PCTLR 28
+
+#define CTX_FPEXC 32
+#define CTX_FPSCR 36
+#define CTX_DIAGNOSTIC 40
+
+#define CTX_TTBR0 48
+#define CTX_TTBR1 52
+#define CTX_TTBCR 56
+#define CTX_DACR 60
+#define CTX_PAR 64
+#define CTX_PRRR 68
+#define CTX_NMRR 72
+#define CTX_VBAR 76
+#define CTX_CONTEXTIDR 80
+#define CTX_TPIDRURW 84
+#define CTX_TPIDRURO 88
+#define CTX_TPIDRPRW 92
+
+#define CTX_SVC_SP 0
+#define CTX_SVC_LR -1 @ stored on stack
+#define CTX_SVC_SPSR 8
+
+#define CTX_SYS_SP 96
+#define CTX_SYS_LR 100
+
+#define CTX_ABT_SPSR 112
+#define CTX_ABT_SP 116
+#define CTX_ABT_LR 120
+
+#define CTX_UND_SPSR 128
+#define CTX_UND_SP 132
+#define CTX_UND_LR 136
+
+#define CTX_IRQ_SPSR 144
+#define CTX_IRQ_SP 148
+#define CTX_IRQ_LR 152
+
+#define CTX_FIQ_SPSR 160
+#define CTX_FIQ_R8 164
+#define CTX_FIQ_R9 168
+#define CTX_FIQ_R10 172
+#define CTX_FIQ_R11 178
+#define CTX_FIQ_R12 180
+#define CTX_FIQ_SP 184
+#define CTX_FIQ_LR 188
+
+/* context only relevant for master cpu */
+#ifdef CONFIG_CACHE_L2X0
+#define CTX_L2_CTRL 224
+#define CTX_L2_AUX 228
+#define CTX_L2_TAG_CTRL 232
+#define CTX_L2_DAT_CTRL 236
+#define CTX_L2_PREFETCH 240
+#endif
+
+#define CTX_VFP_REGS 256
+#define CTX_VFP_SIZE (32 * 8)
+
+#define CTX_CP14_REGS 512
+#define CTS_CP14_DSCR 512
+#define CTX_CP14_WFAR 516
+#define CTX_CP14_VCR 520
+#define CTX_CP14_CLAIM 524
+
+/* Each of the folowing is 2 32-bit registers */
+#define CTS_CP14_BKPT_0 528
+#define CTS_CP14_BKPT_1 536
+#define CTS_CP14_BKPT_2 544
+#define CTS_CP14_BKPT_3 552
+#define CTS_CP14_BKPT_4 560
+#define CTS_CP14_BKPT_5 568
+
+/* Each of the folowing is 2 32-bit registers */
+#define CTS_CP14_WPT_0 576
+#define CTS_CP14_WPT_1 584
+#define CTS_CP14_WPT_2 592
+#define CTS_CP14_WPT_3 600
+
+#include "power.h"
+#include "power-macros.S"
+
+.macro ctx_ptr, rd, tmp
+ cpu_id \tmp
+ mov32 \rd, tegra_context_area
+ ldr \rd, [\rd]
+ add \rd, \rd, \tmp, lsl #(CONTEXT_SIZE_BYTES_SHIFT)
+.endm
+
+.macro translate, pa, va, tmp
+ mov \tmp, #0x1000
+ sub \tmp, \tmp, #1
+ bic \pa, \va, \tmp
+ mcr p15, 0, \pa, c7, c8, 1
+ mrc p15, 0, \pa, c7, c4, 0
+ bic \pa, \pa, \tmp
+ and \tmp, \va, \tmp
+ orr \pa, \pa, \tmp
+.endm
+
+/*
+ * __cortex_a9_save(unsigned int mode)
+ *
+ * spools out the volatile processor state to memory, so that
+ * the CPU may be safely powered down. does not preserve:
+ * - CP15 c0 registers (except cache size select 2,c0/c0,0)
+ * - CP15 c1 secure registers (c1/c1, 0-3)
+ * - CP15 c5 fault status registers (c5/c0 0&1, c5/c1 0&1)
+ * - CP15 c6 fault address registers (c6/c0 0&2)
+ * - CP15 c9 performance monitor registers (c9/c12 0-5,
+ * c9/c13 0-2, c9/c14 0-2)
+ * - CP15 c10 TLB lockdown register (c10/c0, 0)
+ * - CP15 c12 MVBAR (c12/c0, 1)
+ * - CP15 c15 TLB lockdown registers
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__cortex_a9_save)
+ mrs r3, cpsr
+ cps 0x13 @ save off svc registers
+ mov r1, sp
+ stmfd sp!, {r3-r12, lr}
+
+ bic r2, sp, #(L1_CACHE_BYTES-1)
+
+1: mcr p15, 0, r2, c7, c14, 1 @ clean out dirty stack cachelines
+ add r2, r2, #L1_CACHE_BYTES
+ cmp r2, r1
+ ble 1b
+ dsb
+
+ ctx_ptr r8, r9
+ mov r12, r0
+
+ /* zero-out context area */
+ mov r9, r8
+ add r10, r8, #(CONTEXT_SIZE_BYTES)
+ mov r0, #0
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ mov r4, #0
+ mov r5, #0
+ mov r6, #0
+ mov r7, #0
+2: stmia r9!, {r0-r7}
+ cmp r9, r10
+ blo 2b
+
+ mov r0, sp
+ mov sp, r12 @ sp holds the power mode
+ mrs r1, cpsr
+ mrs r2, spsr
+
+ mrc p15, 0, r3, c1, c0, 2 @ cpacr
+ stmia r8, {r0-r3}
+ mrc p15, 2, r0, c0, c0, 0 @ csselr
+ mrc p15, 0, r1, c1, c0, 0 @ sctlr
+ mrc p15, 0, r2, c1, c0, 1 @ actlr
+ mrc p15, 0, r4, c15, c0, 0 @ pctlr
+ add r9, r8, #CTX_CSSELR
+ stmia r9, {r0-r2, r4}
+
+#ifdef CONFIG_VFPv3
+ orr r2, r3, #0xF00000
+ mcr p15, 0, r2, c1, c0, 2 @ enable access to FPU
+ VFPFMRX r2, FPEXC
+ str r2, [r8, #CTX_FPEXC]
+ mov r1, #0x40000000 @ enable access to FPU
+ VFPFMXR FPEXC, r1
+ VFPFMRX r1, FPSCR
+ str r1, [r8, #CTX_FPSCR]
+ isb
+ add r9, r8, #CTX_VFP_REGS
+
+ VFPFSTMIA r9, r12 @ save out (16 or 32)*8B of FPU registers
+ VFPFMXR FPEXC, r2
+ mrc p15, 0, r3, c1, c0, 2 @ restore original FPEXC/CPACR
+#endif
+ mrc p15, 0, r0, c15, c0, 1 @ diag
+ str r0, [r8, #CTX_DIAGNOSTIC]
+
+ add r9, r8, #CTX_TTBR0
+ mrc p15, 0, r0, c2, c0, 0 @ TTBR0
+ mrc p15, 0, r1, c2, c0, 1 @ TTBR1
+ mrc p15, 0, r2, c2, c0, 2 @ TTBCR
+ mrc p15, 0, r3, c3, c0, 0 @ domain access control reg
+ mrc p15, 0, r4, c7, c4, 0 @ PAR
+ mrc p15, 0, r5, c10, c2, 0 @ PRRR
+ mrc p15, 0, r6, c10, c2, 1 @ NMRR
+ mrc p15, 0, r7, c12, c0, 0 @ VBAR
+ stmia r9!, {r0-r7}
+ mrc p15, 0, r0, c13, c0, 1 @ CONTEXTIDR
+ mrc p15, 0, r1, c13, c0, 2 @ TPIDRURW
+ mrc p15, 0, r2, c13, c0, 3 @ TPIDRURO
+ mrc p15, 0, r3, c13, c0, 4 @ TPIDRPRW
+ stmia r9, {r0-r3}
+
+ cps 0x1f @ SYS mode
+ add r9, r8, #CTX_SYS_SP
+ stmia r9, {sp,lr}
+
+ cps 0x17 @ Abort mode
+ mrs r12, spsr
+ add r9, r8, #CTX_ABT_SPSR
+ stmia r9, {r12,sp,lr}
+
+ cps 0x12 @ IRQ mode
+ mrs r12, spsr
+ add r9, r8, #CTX_IRQ_SPSR
+ stmia r9, {r12,sp,lr}
+
+ cps 0x1b @ Undefined mode
+ mrs r12, spsr
+ add r9, r8, #CTX_UND_SPSR
+ stmia r9, {r12,sp,lr}
+
+ mov r0, r8
+ add r1, r8, #CTX_FIQ_SPSR
+ cps 0x11 @ FIQ mode
+ mrs r7, spsr
+ stmia r1, {r7-r12,sp,lr}
+
+ cps 0x13 @ back to SVC
+ mov r8, r0
+
+ /* Save CP14 debug controller context */
+ add r9, r8, #CTX_CP14_REGS
+ mrc p14, 0, r0, c0, c1, 0 @ DSCR
+ mrc p14, 0, r1, c0, c6, 0 @ WFAR
+ mrc p14, 0, r2, c0, c7, 0 @ VCR
+ mrc p14, 0, r3, c7, c9, 6 @ CLAIM
+ stmia r9, {r0-r3}
+
+ add r9, r8, #CTS_CP14_BKPT_0
+ mrc p14, 0, r2, c0, c0, 4
+ mrc p14, 0, r3, c0, c0, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+ mrc p14, 0, r2, c0, c1, 4
+ mrc p14, 0, r3, c0, c1, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+ mrc p14, 0, r2, c0, c2, 4
+ mrc p14, 0, r3, c0, c2, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+ mrc p14, 0, r2, c0, c3, 4
+ mrc p14, 0, r3, c0, c3, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+ mrc p14, 0, r2, c0, c4, 4
+ mrc p14, 0, r3, c0, c4, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+ mrc p14, 0, r2, c0, c5, 4
+ mrc p14, 0, r3, c0, c5, 5
+ stmia r9!, {r2-r3} @ BRKPT_0
+
+ add r9, r8, #CTS_CP14_WPT_0
+ mrc p14, 0, r2, c0, c0, 6
+ mrc p14, 0, r3, c0, c0, 7
+ stmia r9!, {r2-r3} @ WPT_0
+ mrc p14, 0, r2, c0, c1, 6
+ mrc p14, 0, r3, c0, c1, 7
+ stmia r9!, {r2-r3} @ WPT_0
+ mrc p14, 0, r2, c0, c2, 6
+ mrc p14, 0, r3, c0, c2, 7
+ stmia r9!, {r2-r3} @ WPT_0
+ mrc p14, 0, r2, c0, c3, 6
+ mrc p14, 0, r3, c0, c3, 7
+ stmia r9!, {r2-r3} @ WPT_0
+
+#ifdef CONFIG_CACHE_L2X0
+ cpu_id r4
+ cmp r4, #0
+ bne __cortex_a9_save_clean_cache
+ mov32 r4, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
+ add r9, r8, #CTX_L2_CTRL
+ ldr r0, [r4, #L2X0_CTRL]
+ ldr r1, [r4, #L2X0_AUX_CTRL]
+ ldr r2, [r4, #L2X0_TAG_LATENCY_CTRL]
+ ldr r3, [r4, #L2X0_DATA_LATENCY_CTRL]
+ ldr r4, [r4, #L2X0_PREFETCH_OFFSET]
+ stmia r9, {r0-r4}
+#endif
+
+
+__cortex_a9_save_clean_cache:
+ mov r10, r8
+ add r9, r10, #(CONTEXT_SIZE_BYTES)
+ add r9, r9, #(L1_CACHE_BYTES-1)
+ bic r10, r10, #(L1_CACHE_BYTES-1)
+ bic r9, r9, #(L1_CACHE_BYTES-1)
+
+3: mcr p15, 0, r10, c7, c10, 1
+ add r10, r10, #L1_CACHE_BYTES
+ cmp r10, r9
+ blo 3b
+ dsb
+
+ translate r10, r8, r1
+
+ mov r0, #0
+ mcr p15, 0, r0, c1, c0, 1 @ exit coherency
+ isb
+ cpu_id r0
+ mov32 r1, (TEGRA_ARM_PERIF_BASE-IO_CPU_PHYS+IO_CPU_VIRT+0xC)
+ mov r3, r0, lsl #2
+ mov r2, #0xf
+ mov r2, r2, lsl r3
+ str r2, [r1] @ invalidate SCU tags for CPU
+
+ cmp r0, #0
+ bne __put_cpu_in_reset
+ mov r8, r10
+ b __tear_down_master
+ENDPROC(__cortex_a9_save)
+
+/*
+ * __cortex_a9_restore
+ *
+ * reloads the volatile CPU state from the context area
+ * the MMU should already be enabled using the secondary_data
+ * page tables for cpu_up before this function is called, and the
+ * CPU should be coherent with the SMP complex
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__cortex_a9_restore)
+ cps 0x13
+ ctx_ptr r0, r9
+
+ cps 0x11 @ FIQ mode
+ add r1, r0, #CTX_FIQ_SPSR
+ ldmia r1, {r7-r12,sp,lr}
+ msr spsr_fsxc, r7
+
+ cps 0x12 @ IRQ mode
+ add r1, r0, #CTX_IRQ_SPSR
+ ldmia r1, {r12, sp, lr}
+ msr spsr_fsxc, r12
+
+ cps 0x17 @ abort mode
+ add r1, r0, #CTX_ABT_SPSR
+ ldmia r1, {r12, sp, lr}
+ msr spsr_fsxc, r12
+
+ cps 0x1f @ SYS mode
+ add r1, r0, #CTX_SYS_SP
+ ldmia r1, {sp, lr}
+
+ cps 0x1b @ Undefined mode
+ add r1, r0, #CTX_UND_SPSR
+ ldmia r1, {r12, sp, lr}
+ msr spsr_fsxc, r12
+
+ cps 0x13 @ back to SVC
+ mov r8, r0
+
+ add r9, r8, #CTX_CSSELR
+ ldmia r9, {r0-r3}
+
+ mcr p15, 2, r0, c0, c0, 0 @ csselr
+ mcr p15, 0, r1, c1, c0, 0 @ sctlr
+ mcr p15, 0, r2, c1, c0, 1 @ actlr
+ mcr p15, 0, r3, c15, c0, 0 @ pctlr
+
+ add r9, r8, #CTX_TTBR0
+ ldmia r9!, {r0-r7}
+
+ mcr p15, 0, r4, c7, c4, 0 @ PAR
+ mcr p15, 0, r7, c12, c0, 0 @ VBAR
+ mcr p15, 0, r3, c3, c0, 0 @ domain access control reg
+ isb
+ mcr p15, 0, r2, c2, c0, 2 @ TTBCR
+ isb
+ mcr p15, 0, r5, c10, c2, 0 @ PRRR
+ isb
+ mcr p15, 0, r6, c10, c2, 1 @ NMRR
+ isb
+
+ ldmia r9, {r4-r7}
+
+ mcr p15, 0, r5, c13, c0, 2 @ TPIDRURW
+ mcr p15, 0, r6, c13, c0, 3 @ TPIDRURO
+ mcr p15, 0, r7, c13, c0, 4 @ TPIDRPRW
+
+ ldmia r8, {r5-r7, lr}
+
+ /* perform context switch to previous context */
+ mov r9, #0
+ mcr p15, 0, r9, c13, c0, 1 @ set reserved context
+ isb
+ mcr p15, 0, r0, c2, c0, 0 @ TTBR0
+ isb
+ mcr p15, 0, r4, c13, c0, 1 @ CONTEXTIDR
+ isb
+ mcr p15, 0, r1, c2, c0, 1 @ TTBR1
+ isb
+
+ mov r4, #0
+ mcr p15, 0, r4, c8, c3, 0 @ invalidate TLB
+ mcr p15, 0, r4, c7, c5, 6 @ flush BTAC
+ mcr p15, 0, r4, c7, c5, 0 @ flush instruction cache
+ dsb
+ isb
+
+ mov sp, r5
+ msr cpsr_cxsf, r6
+ msr spsr_cxsf, r7
+
+ /* Restore CP14 debug controller context */
+ add r9, r8, #CTX_CP14_REGS
+ ldmia r9, {r0-r3}
+ mcr p14, 0, r1, c0, c6, 0 @ WFAR
+ mcr p14, 0, r2, c0, c7, 0 @ VCR
+ mcr p14, 0, r3, c7, c8, 6 @ CLAIM
+
+ add r9, r8, #CTS_CP14_BKPT_0
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c0, 4
+ mcr p14, 0, r3, c0, c0, 5
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c1, 4
+ mcr p14, 0, r3, c0, c1, 5
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c2, 4
+ mcr p14, 0, r3, c0, c2, 5
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c3, 4
+ mcr p14, 0, r3, c0, c3, 5
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c4, 4
+ mcr p14, 0, r3, c0, c4, 5
+ ldmia r9!, {r2-r3} @ BRKPT_0
+ mcr p14, 0, r2, c0, c5, 4
+ mcr p14, 0, r3, c0, c5, 5
+
+ add r9, r8, #CTS_CP14_WPT_0
+ ldmia r9!, {r2-r3} @ WPT_0
+ mcr p14, 0, r2, c0, c0, 6
+ mcr p14, 0, r3, c0, c0, 7
+ ldmia r9!, {r2-r3} @ WPT_0
+ mcr p14, 0, r2, c0, c1, 6
+ mcr p14, 0, r3, c0, c1, 7
+ ldmia r9!, {r2-r3} @ WPT_0
+ mcr p14, 0, r2, c0, c2, 6
+ mcr p14, 0, r3, c0, c2, 7
+ ldmia r9!, {r2-r3} @ WPT_0
+ mcr p14, 0, r2, c0, c3, 6
+ mcr p14, 0, r3, c0, c3, 7
+ isb
+ mcr p14, 0, r0, c0, c2, 2 @ DSCR
+ isb
+
+#ifdef CONFIG_VFPv3
+ orr r4, lr, #0xF00000
+ mcr p15, 0, r4, c1, c0, 2 @ enable coproc access
+ mov r5, #0x40000000
+ VFPFMXR FPEXC, r5 @ enable FPU access
+ add r9, r8, #CTX_VFP_REGS
+ add r7, r8, #CTX_FPEXC
+ VFPFLDMIA r9, r10
+ ldmia r7, {r0, r4}
+ VFPFMXR FPSCR, r4
+ VFPFMXR FPEXC, r0
+#endif
+ mcr p15, 0, lr, c1, c0, 2 @ cpacr (loaded before VFP)
+
+ ldr r9, [r8, #CTX_DIAGNOSTIC]
+ mcr p15, 0, r9, c15, c0, 1 @ diag
+
+ /* finally, restore the stack and return */
+ ldmfd sp!, {r3-r12, lr}
+ msr cpsr_fsxc, r3 @ restore original processor mode
+ isb
+ mov pc, lr
+ENDPROC(__cortex_a9_restore)
+
+/*
+ * __cortex_a9_l2x0_restart(bool invalidate)
+ *
+ * Reconfigures the L2 cache following a power event.
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__cortex_a9_l2x0_restart)
+#ifdef CONFIG_CACHE_L2X0
+ ctx_ptr r8, r9
+ mov32 r9, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
+ add r10, r8, #CTX_L2_CTRL
+ ldmia r10, {r3-r7}
+ str r5, [r9, #L2X0_TAG_LATENCY_CTRL]
+ str r6, [r9, #L2X0_DATA_LATENCY_CTRL]
+ str r7, [r9, #L2X0_PREFETCH_OFFSET]
+ str r4, [r9, #L2X0_AUX_CTRL]
+ mov r4, #0x2 @ L2X0_DYNAMIC_CLK_GATING_EN
+ str r4, [r9, #L2X0_PWR_CTRL]
+ cmp r0, #0
+
+ beq __reenable_l2x0
+
+ mov r0, #0xff
+ str r0, [r9, #L2X0_INV_WAY]
+1: ldr r1, [r9, #L2X0_INV_WAY]
+ tst r1, r0
+ bne 1b
+ mov r0, #0
+ str r0, [r9, #L2X0_CACHE_SYNC]
+__reenable_l2x0:
+ mov r5, #0
+ mcr p15, 0, r5, c8, c3, 0 @ invalidate TLB
+ mcr p15, 0, r5, c7, c5, 6 @ flush BTAC
+ mcr p15, 0, r5, c7, c5, 0 @ flush instruction cache
+ dsb
+ isb
+ str r3, [r9, #L2X0_CTRL]
+#endif
+ b __cortex_a9_restore
+
+
+ .align L1_CACHE_SHIFT
+ENTRY(__shut_off_mmu)
+ mrc p15, 0, r3, c1, c0, 0
+ movw r2, #(1<<12) | (1<<11) | (1<<2) | (1<<0)
+ bic r3, r3, r2
+ dsb
+ mcr p15, 0, r3, c1, c0, 0
+ isb
+ bx r9
+ENDPROC(__shut_off_mmu)
+
+/*
+ * __invalidate_l1
+ *
+ * Invalidates the L1 data cache (no clean) during initial boot of
+ * a secondary processor
+ *
+ * Corrupted registers: r0-r6
+ */
+__invalidate_l1:
+ mov r0, #0
+ mcr p15, 2, r0, c0, c0, 0
+ mrc p15, 1, r0, c0, c0, 0
+
+ movw r1, #0x7fff
+ and r2, r1, r0, lsr #13
+
+ movw r1, #0x3ff
+
+ and r3, r1, r0, lsr #3 @ NumWays - 1
+ add r2, r2, #1 @ NumSets
+
+ and r0, r0, #0x7
+ add r0, r0, #4 @ SetShift
+
+ clz r1, r3 @ WayShift
+ add r4, r3, #1 @ NumWays
+1: sub r2, r2, #1 @ NumSets--
+ mov r3, r4 @ Temp = NumWays
+2: subs r3, r3, #1 @ Temp--
+ mov r5, r3, lsl r1
+ mov r6, r2, lsl r0
+ orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ mcr p15, 0, r5, c7, c6, 2
+ bgt 2b
+ cmp r2, #0
+ bgt 1b
+ dsb
+ isb
+ bx lr
+ENDPROC(__invalidate_l1)
+
+/*
+ * __invalidate_cpu_state
+ *
+ * Invalidates volatile CPU state (SCU tags, caches, branch address
+ * arrays, exclusive monitor, etc.) so that they can be safely enabled
+ * instruction caching and branch predicition enabled as early as
+ * possible to improve performance
+ */
+ENTRY(__invalidate_cpu_state)
+ clrex
+ mov r0, #0
+ mcr p15, 0, r0, c1, c0, 1 @ disable SMP, prefetch, broadcast
+ isb
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate BTAC, i-cache
+ mcr p15, 0, r0, c7, c5, 6 @ invalidate branch pred array
+ mcr p15, 0, r0, c8, c7, 0 @ invalidate unified TLB
+ dsb
+ isb
+
+ cpu_id r0
+ cmp r0, #0
+ mov32 r1, (TEGRA_ARM_PERIF_BASE + 0xC)
+ movne r0, r0, lsl #2
+ movne r2, #0xf
+ movne r2, r2, lsl r0
+ strne r2, [r1] @ invalidate SCU tags for CPU
+
+ dsb
+ mov r0, #0x1800
+ mcr p15, 0, r0, c1, c0, 0 @ enable branch prediction, i-cache
+ isb
+ b __invalidate_l1 @ invalidate data cache
+ENDPROC(__invalidate_cpu_state)
+
+/*
+ * __return_to_virtual(unsigned long pgdir, void (*ctx_restore)(void))
+ *
+ * Restores a CPU to the world of virtual addressing, using the
+ * specified page tables (which must ensure that a VA=PA mapping
+ * exists for the __enable_mmu function), and then jumps to
+ * ctx_restore to restore CPU context and return control to the OS
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__return_to_virtual)
+ orr r8, r0, #TTB_FLAGS
+ mov lr, r1 @ "return" to ctx_restore
+ mov r3, #0
+ mcr p15, 0, r3, c2, c0, 2 @ TTB control register
+
+ mcr p15, 0, r8, c2, c0, 1 @ load TTBR1
+
+ mov r0, #0x1f
+ mcr p15, 0, r0, c3, c0, 0 @ domain access register
+
+ mov32 r0, 0xff0a89a8
+#ifdef CONFIG_SMP
+ mov32 r1, 0xc0e0c4e0
+#else
+ mov32 r1, 0x40e044e0
+#endif
+ mcr p15, 0, r0, c10, c2, 0 @ PRRR
+ mcr p15, 0, r1, c10, c2, 1 @ NMRR
+ mrc p15, 0, r0, c1, c0, 0
+ mov32 r1, 0x0120c302
+ bic r0, r0, r1
+ mov32 r1, 0x10c03c7d
+ orr r0, r0, r1
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+ orr r0, r0, #0x2
+#else
+ bic r0, r0, #0x2
+#endif
+ mov r1, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+ mcr p15, 0, r1, c3, c0, 0 @ domain access register
+ mcr p15, 0, r8, c2, c0, 0 @ TTBR0
+ b __turn_mmu_on_again
+ andeq r0, r0, r0
+ andeq r0, r0, r0
+ andeq r0, r0, r0
+ andeq r0, r0, r0
+ENDPROC(__return_to_virtual)
+
+/*
+ * __turn_mmu_on_again
+ *
+ * does exactly what it advertises: turns the MMU on, again
+ * jumps to the *virtual* address lr after the MMU is enabled.
+ */
+ .align L1_CACHE_SHIFT
+__turn_mmu_on_again:
+ mov r0, r0
+ mcr p15, 0, r0, c1, c0, 0
+ mrc p15, 0, r3, c0, c0, 0
+ mov r3, r3
+ mov r3, lr
+ bx lr
+ENDPROC(__turn_mmu_on_again)
--- /dev/null
+/*
+ * arch/arm/mach-tegra/cpu-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+
+#include <asm/smp_twd.h>
+#include <asm/system.h>
+
+#include <mach/hardware.h>
+#include <mach/clk.h>
+
+/*
+ * Frequency table index must be sequential starting at 0 and frequencies
+ * must be ascending.
+ */
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 760000 },
+ { 5, 816000 },
+ { 6, 912000 },
+ { 7, 1000000 },
+ { 8, CPUFREQ_TABLE_END },
+};
+
+#define NUM_CPUS 2
+
+static struct clk *cpu_clk;
+static struct clk *emc_clk;
+
+static unsigned long target_cpu_speed[NUM_CPUS];
+static DEFINE_MUTEX(tegra_cpu_lock);
+static bool is_suspended;
+
+unsigned int tegra_getspeed(unsigned int cpu);
+static int tegra_update_cpu_speed(unsigned long rate);
+static unsigned long tegra_cpu_highest_speed(void);
+
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+/* CPU frequency is gradually lowered when throttling is enabled */
+#define THROTTLE_LOWEST_INDEX 2 /* 456000 */
+#define THROTTLE_HIGHEST_INDEX 6 /* 912000 */
+#define THROTTLE_DELAY msecs_to_jiffies(2000)
+
+static bool is_throttling;
+static int throttle_index;
+static int throttle_next_index;
+static struct delayed_work throttle_work;
+static struct workqueue_struct *workqueue;
+
+#define tegra_cpu_is_throttling() (is_throttling)
+
+static void tegra_throttle_work_func(struct work_struct *work)
+{
+ unsigned int current_freq;
+
+ mutex_lock(&tegra_cpu_lock);
+ current_freq = tegra_getspeed(0);
+ throttle_index = throttle_next_index;
+
+ if (freq_table[throttle_index].frequency < current_freq)
+ tegra_update_cpu_speed(freq_table[throttle_index].frequency);
+
+ if (throttle_index > THROTTLE_LOWEST_INDEX) {
+ throttle_next_index = throttle_index - 1;
+ queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
+ }
+
+ mutex_unlock(&tegra_cpu_lock);
+}
+
+/*
+ * tegra_throttling_enable
+ * This function may sleep
+ */
+void tegra_throttling_enable(bool enable)
+{
+ mutex_lock(&tegra_cpu_lock);
+
+ if (enable && !is_throttling) {
+ unsigned int current_freq = tegra_getspeed(0);
+
+ is_throttling = true;
+
+ for (throttle_index = THROTTLE_HIGHEST_INDEX;
+ throttle_index >= THROTTLE_LOWEST_INDEX;
+ throttle_index--)
+ if (freq_table[throttle_index].frequency
+ < current_freq)
+ break;
+
+ throttle_index = max(throttle_index, THROTTLE_LOWEST_INDEX);
+ throttle_next_index = throttle_index;
+ queue_delayed_work(workqueue, &throttle_work, 0);
+ } else if (!enable && is_throttling) {
+ cancel_delayed_work_sync(&throttle_work);
+ is_throttling = false;
+ /* restore speed requested by governor */
+ tegra_update_cpu_speed(tegra_cpu_highest_speed());
+ }
+
+ mutex_unlock(&tegra_cpu_lock);
+}
+EXPORT_SYMBOL_GPL(tegra_throttling_enable);
+
+static unsigned int throttle_governor_speed(unsigned int requested_speed)
+{
+ return tegra_cpu_is_throttling() ?
+ min(requested_speed, freq_table[throttle_index].frequency) :
+ requested_speed;
+}
+
+static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", is_throttling);
+}
+
+cpufreq_freq_attr_ro(throttle);
+
+#ifdef CONFIG_DEBUG_FS
+static int throttle_debug_set(void *data, u64 val)
+{
+ tegra_throttling_enable(val);
+ return 0;
+}
+static int throttle_debug_get(void *data, u64 *val)
+{
+ *val = (u64) is_throttling;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
+
+static struct dentry *cpu_tegra_debugfs_root;
+
+static int __init tegra_cpu_debug_init(void)
+{
+ cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
+
+ if (!cpu_tegra_debugfs_root)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(cpu_tegra_debugfs_root);
+ return -ENOMEM;
+
+}
+
+static void __exit tegra_cpu_debug_exit(void)
+{
+ debugfs_remove_recursive(cpu_tegra_debugfs_root);
+}
+
+late_initcall(tegra_cpu_debug_init);
+module_exit(tegra_cpu_debug_exit);
+#endif /* CONFIG_DEBUG_FS */
+
+#else /* CONFIG_TEGRA_THERMAL_THROTTLE */
+#define tegra_cpu_is_throttling() (0)
+#define throttle_governor_speed(requested_speed) (requested_speed)
+
+void tegra_throttling_enable(bool enable)
+{
+}
+#endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
+
+int tegra_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+unsigned int tegra_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NUM_CPUS)
+ return 0;
+
+ rate = clk_get_rate(cpu_clk) / 1000;
+ return rate;
+}
+
+static int tegra_update_cpu_speed(unsigned long rate)
+{
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+
+ freqs.old = tegra_getspeed(0);
+ freqs.new = rate;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ /*
+ * Vote on memory bus frequency based on cpu frequency
+ * This sets the minimum frequency, display or avp may request higher
+ */
+ if (rate >= 816000)
+ clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
+ else if (rate >= 456000)
+ clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
+ else
+ clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
+ freqs.old, freqs.new);
+#endif
+
+ ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+ if (ret) {
+ pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
+ freqs.new);
+ return ret;
+ }
+
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned long tegra_cpu_highest_speed(void) {
+ unsigned long rate = 0;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = max(rate, target_cpu_speed[i]);
+ return rate;
+}
+
+static int tegra_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int idx;
+ unsigned int freq;
+ unsigned int new_speed;
+ int ret = 0;
+
+ mutex_lock(&tegra_cpu_lock);
+
+ if (is_suspended) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx);
+
+ freq = freq_table[idx].frequency;
+
+ target_cpu_speed[policy->cpu] = freq;
+ new_speed = throttle_governor_speed(tegra_cpu_highest_speed());
+ ret = tegra_update_cpu_speed(new_speed);
+out:
+ mutex_unlock(&tegra_cpu_lock);
+ return ret;
+}
+
+
+static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *dummy)
+{
+ mutex_lock(&tegra_cpu_lock);
+ if (event == PM_SUSPEND_PREPARE) {
+ is_suspended = true;
+ pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
+ freq_table[0].frequency);
+ tegra_update_cpu_speed(freq_table[0].frequency);
+ } else if (event == PM_POST_SUSPEND) {
+ is_suspended = false;
+ }
+ mutex_unlock(&tegra_cpu_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpu_pm_notifier = {
+ .notifier_call = tegra_pm_notify,
+};
+
+static int tegra_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu >= NUM_CPUS)
+ return -EINVAL;
+
+ cpu_clk = clk_get_sys(NULL, "cpu");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ emc_clk = clk_get_sys("cpu", "emc");
+ if (IS_ERR(emc_clk)) {
+ clk_put(cpu_clk);
+ return PTR_ERR(emc_clk);
+ }
+ clk_enable(emc_clk);
+
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ policy->cur = tegra_getspeed(policy->cpu);
+ target_cpu_speed[policy->cpu] = policy->cur;
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_copy(policy->related_cpus, cpu_possible_mask);
+
+ if (policy->cpu == 0) {
+ register_pm_notifier(&tegra_cpu_pm_notifier);
+ }
+
+ return 0;
+}
+
+static int tegra_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
+ clk_put(cpu_clk);
+ return 0;
+}
+
+static struct freq_attr *tegra_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+ &throttle,
+#endif
+ NULL,
+};
+
+static struct cpufreq_driver tegra_cpufreq_driver = {
+ .verify = tegra_verify_speed,
+ .target = tegra_target,
+ .get = tegra_getspeed,
+ .init = tegra_cpu_init,
+ .exit = tegra_cpu_exit,
+ .name = "tegra",
+ .attr = tegra_cpufreq_attr,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+ /*
+ * High-priority, others flags default: not bound to a specific
+ * CPU, has rescue worker task (in case of allocation deadlock,
+ * etc.). Single-threaded.
+ */
+ workqueue = alloc_workqueue("cpu-tegra",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
+ if (!workqueue)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
+#endif
+ return cpufreq_register_driver(&tegra_cpufreq_driver);
+}
+
+static void __exit tegra_cpufreq_exit(void)
+{
+#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
+ destroy_workqueue(workqueue);
+#endif
+ cpufreq_unregister_driver(&tegra_cpufreq_driver);
+}
+
+
+MODULE_AUTHOR("Colin Cross <ccross@android.com>");
+MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
+MODULE_LICENSE("GPL");
+module_init(tegra_cpufreq_init);
+module_exit(tegra_cpufreq_exit);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/cpuidle.c
+ *
+ * CPU idle driver for Tegra CPUs
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/localtimer.h>
+
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/legacy_irq.h>
+#include <mach/suspend.h>
+
+#include "power.h"
+
+#define TEGRA_CPUIDLE_BOTH_IDLE INT_QUAD_RES_24
+#define TEGRA_CPUIDLE_TEAR_DOWN INT_QUAD_RES_25
+
+#define EVP_CPU_RESET_VECTOR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
+#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
+
+static bool lp2_in_idle __read_mostly = true;
+static bool lp2_disabled_by_suspend;
+module_param(lp2_in_idle, bool, 0644);
+
+static s64 tegra_cpu1_idle_time = LLONG_MAX;;
+static int tegra_lp2_exit_latency;
+static int tegra_lp2_power_off_time;
+
+static struct {
+ unsigned int cpu_ready_count[2];
+ unsigned long long cpu_wants_lp2_time[2];
+ unsigned long long in_lp2_time;
+ unsigned int both_idle_count;
+ unsigned int tear_down_count;
+ unsigned int lp2_count;
+ unsigned int lp2_completed_count;
+ unsigned int lp2_count_bin[32];
+ unsigned int lp2_completed_count_bin[32];
+ unsigned int lp2_int_count[NR_IRQS];
+ unsigned int last_lp2_int_count[NR_IRQS];
+} idle_stats;
+
+struct cpuidle_driver tegra_idle = {
+ .name = "tegra_idle",
+ .owner = THIS_MODULE,
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device *, idle_devices);
+
+#define FLOW_CTRL_WAITEVENT (2<<29)
+#define FLOW_CTRL_JTAG_RESUME (1<<28)
+#define FLOW_CTRL_HALT_CPUx_EVENTS(cpu) ((cpu)?((cpu-1)*0x8 + 0x14) : 0x0)
+
+#define PMC_SCRATCH_38 0x134
+#define PMC_SCRATCH_39 0x138
+
+#define CLK_RESET_CLK_MASK_ARM 0x44
+
+static inline unsigned int time_to_bin(unsigned int time)
+{
+ return fls(time);
+}
+
+static inline void tegra_unmask_irq(int irq)
+{
+ struct irq_chip *chip = get_irq_chip(irq);
+ chip->unmask(irq);
+}
+
+static inline void tegra_mask_irq(int irq)
+{
+ struct irq_chip *chip = get_irq_chip(irq);
+ chip->mask(irq);
+}
+
+static inline int tegra_pending_interrupt(void)
+{
+ void __iomem *gic_cpu = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100);
+ u32 reg = readl(gic_cpu + 0x18);
+ reg &= 0x3FF;
+
+ return reg;
+}
+
+static inline void tegra_flow_wfi(struct cpuidle_device *dev)
+{
+ void __iomem *flow_ctrl = IO_ADDRESS(TEGRA_FLOW_CTRL_BASE);
+ u32 reg = FLOW_CTRL_WAITEVENT | FLOW_CTRL_JTAG_RESUME;
+
+ flow_ctrl = flow_ctrl + FLOW_CTRL_HALT_CPUx_EVENTS(dev->cpu);
+
+ stop_critical_timings();
+ dsb();
+ __raw_writel(reg, flow_ctrl);
+ reg = __raw_readl(flow_ctrl);
+ __asm__ volatile ("wfi");
+ __raw_writel(0, flow_ctrl);
+ reg = __raw_readl(flow_ctrl);
+ start_critical_timings();
+}
+
+#ifdef CONFIG_SMP
+static inline bool tegra_wait_for_both_idle(struct cpuidle_device *dev)
+{
+ int wake_int;
+
+ tegra_unmask_irq(TEGRA_CPUIDLE_BOTH_IDLE);
+
+ tegra_flow_wfi(dev);
+
+ wake_int = tegra_pending_interrupt();
+
+ tegra_mask_irq(TEGRA_CPUIDLE_BOTH_IDLE);
+
+ return wake_int == TEGRA_CPUIDLE_BOTH_IDLE &&
+ tegra_pending_interrupt() == 1023;
+}
+
+static inline bool tegra_wait_for_tear_down(struct cpuidle_device *dev)
+{
+ int wake_int;
+ irq_set_affinity(TEGRA_CPUIDLE_TEAR_DOWN, cpumask_of(1));
+ tegra_unmask_irq(TEGRA_CPUIDLE_TEAR_DOWN);
+
+ tegra_flow_wfi(dev);
+
+ wake_int = tegra_pending_interrupt();
+
+ tegra_mask_irq(TEGRA_CPUIDLE_TEAR_DOWN);
+
+ return wake_int == TEGRA_CPUIDLE_TEAR_DOWN &&
+ tegra_pending_interrupt() == 1023;
+}
+
+static inline bool tegra_cpu_in_reset(int cpu)
+{
+ return !!(readl(CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET) & (1 << cpu));
+}
+
+static int tegra_tear_down_cpu1(void)
+{
+ u32 reg;
+
+ /* Signal to CPU1 to tear down */
+ tegra_legacy_force_irq_set(TEGRA_CPUIDLE_TEAR_DOWN);
+
+ /* At this point, CPU0 can no longer abort LP2, but CP1 can */
+ /* TODO: any way not to poll here? Use the LP2 timer to wfi? */
+ /* takes ~80 us */
+ while (!tegra_cpu_in_reset(1) &&
+ tegra_legacy_force_irq_status(TEGRA_CPUIDLE_BOTH_IDLE))
+ cpu_relax();
+
+ tegra_legacy_force_irq_clr(TEGRA_CPUIDLE_TEAR_DOWN);
+
+ /* If CPU1 aborted LP2, restart the process */
+ if (!tegra_legacy_force_irq_status(TEGRA_CPUIDLE_BOTH_IDLE))
+ return -EAGAIN;
+
+ /* CPU1 is ready for LP2, clock gate it */
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | (1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+
+ return 0;
+}
+
+static void tegra_wake_cpu1(void)
+{
+ unsigned long boot_vector;
+ unsigned long old_boot_vector;
+ unsigned long timeout;
+ u32 reg;
+
+ boot_vector = virt_to_phys(tegra_hotplug_startup);
+ old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
+ writel(boot_vector, EVP_CPU_RESET_VECTOR);
+
+ /* enable cpu clock on cpu */
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~(1 << (8 + 1)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+
+ reg = 0x1111 << 1;
+ writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+
+ /* unhalt the cpu */
+ writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
+ break;
+ udelay(10);
+ }
+
+ /* put the old boot vector back */
+ writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
+
+ /* CPU1 is now started */
+}
+#else
+static inline bool tegra_wait_for_both_idle(struct cpuidle_device *dev)
+{
+ return true;
+}
+
+static inline int tegra_tear_down_cpu1(void)
+{
+ return 0;
+}
+
+static inline void tegra_wake_cpu1(void)
+{
+}
+#endif
+
+static void tegra_idle_enter_lp2_cpu0(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ s64 request;
+ ktime_t enter;
+ ktime_t exit;
+ bool sleep_completed = false;
+ int bin;
+
+restart:
+ if (!tegra_wait_for_both_idle(dev))
+ return;
+
+ idle_stats.both_idle_count++;
+
+ if (need_resched())
+ return;
+
+ /* CPU1 woke CPU0 because both are idle */
+
+ request = ktime_to_us(tick_nohz_get_sleep_length());
+ if (request < state->target_residency) {
+ /* Not enough time left to enter LP2 */
+ tegra_flow_wfi(dev);
+ return;
+ }
+
+ idle_stats.tear_down_count++;
+
+ if (tegra_tear_down_cpu1())
+ goto restart;
+
+ /* Enter LP2 */
+ request = ktime_to_us(tick_nohz_get_sleep_length());
+ smp_rmb();
+ request = min_t(s64, request, tegra_cpu1_idle_time);
+
+ enter = ktime_get();
+ if (request > state->target_residency) {
+ s64 sleep_time = request - tegra_lp2_exit_latency;
+
+ bin = time_to_bin((u32)request / 1000);
+ idle_stats.lp2_count++;
+ idle_stats.lp2_count_bin[bin]++;
+
+ if (tegra_suspend_lp2(sleep_time) == 0)
+ sleep_completed = true;
+ else
+ idle_stats.lp2_int_count[tegra_pending_interrupt()]++;
+ }
+
+ /* Bring CPU1 out of LP2 */
+ /* TODO: polls for CPU1 to boot, wfi would be better */
+ /* takes ~80 us */
+
+ /* set the reset vector to point to the secondary_startup routine */
+ smp_wmb();
+
+ tegra_wake_cpu1();
+
+ /*
+ * TODO: is it worth going back to wfi if no interrupt is pending
+ * and the requested sleep time has not passed?
+ */
+
+ exit = ktime_get();
+ if (sleep_completed) {
+ /*
+ * Stayed in LP2 for the full time until the next tick,
+ * adjust the exit latency based on measurement
+ */
+ int offset = ktime_to_us(ktime_sub(exit, enter)) - request;
+ int latency = tegra_lp2_exit_latency + offset / 16;
+ latency = clamp(latency, 0, 10000);
+ tegra_lp2_exit_latency = latency;
+ smp_wmb();
+
+ idle_stats.lp2_completed_count++;
+ idle_stats.lp2_completed_count_bin[bin]++;
+ idle_stats.in_lp2_time += ktime_to_us(ktime_sub(exit, enter));
+
+ pr_debug("%lld %lld %d %d\n", request,
+ ktime_to_us(ktime_sub(exit, enter)),
+ offset, bin);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void tegra_idle_enter_lp2_cpu1(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ u32 twd_ctrl;
+ u32 twd_load;
+ s64 request;
+
+ tegra_legacy_force_irq_set(TEGRA_CPUIDLE_BOTH_IDLE);
+
+ if (!tegra_wait_for_tear_down(dev))
+ goto out;
+
+ if (need_resched())
+ goto out;
+
+ /*
+ * CPU1 woke CPU0 because both were idle
+ * CPU0 responded by waking CPU1 to tell it to disable itself
+ */
+
+ request = ktime_to_us(tick_nohz_get_sleep_length());
+ if (request < tegra_lp2_exit_latency) {
+ /*
+ * Not enough time left to enter LP2
+ * Signal to CPU0 that CPU1 rejects LP2, and stay in
+ */
+ tegra_legacy_force_irq_clr(TEGRA_CPUIDLE_BOTH_IDLE);
+ tegra_flow_wfi(dev);
+ goto out;
+ }
+
+ tegra_cpu1_idle_time = request;
+ smp_wmb();
+
+ /* Prepare CPU1 for LP2 by putting it in reset */
+
+ stop_critical_timings();
+ gic_cpu_exit(0);
+ barrier();
+ twd_ctrl = readl(twd_base + 0x8);
+ twd_load = readl(twd_base + 0);
+
+ flush_cache_all();
+ barrier();
+ __cortex_a9_save(0);
+ /* CPU1 is in reset, waiting for CPU0 to boot it, possibly after LP2 */
+
+
+ /* CPU0 booted CPU1 out of reset */
+ barrier();
+ writel(twd_ctrl, twd_base + 0x8);
+ writel(twd_load, twd_base + 0);
+ gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
+ tegra_unmask_irq(IRQ_LOCALTIMER);
+
+ tegra_legacy_force_irq_clr(TEGRA_CPUIDLE_BOTH_IDLE);
+
+ writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
+ start_critical_timings();
+
+ /*
+ * TODO: is it worth going back to wfi if no interrupt is pending
+ * and the requested sleep time has not passed?
+ */
+
+ return;
+
+out:
+ tegra_legacy_force_irq_clr(TEGRA_CPUIDLE_BOTH_IDLE);
+}
+#endif
+
+static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ ktime_t enter, exit;
+ s64 us;
+
+ local_irq_disable();
+ local_fiq_disable();
+
+ enter = ktime_get();
+ if (!need_resched())
+ tegra_flow_wfi(dev);
+ exit = ktime_sub(ktime_get(), enter);
+ us = ktime_to_us(exit);
+
+ local_fiq_enable();
+ local_irq_enable();
+ return (int)us;
+}
+
+static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ ktime_t enter, exit;
+ s64 us;
+
+ if (!lp2_in_idle || lp2_disabled_by_suspend)
+ return tegra_idle_enter_lp3(dev, state);
+
+ local_irq_disable();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ local_fiq_disable();
+ enter = ktime_get();
+
+ idle_stats.cpu_ready_count[dev->cpu]++;
+
+#ifdef CONFIG_SMP
+ if (dev->cpu == 0)
+ tegra_idle_enter_lp2_cpu0(dev, state);
+ else
+ tegra_idle_enter_lp2_cpu1(dev, state);
+#else
+ tegra_idle_enter_lp2_cpu0(dev, state);
+#endif
+
+ exit = ktime_sub(ktime_get(), enter);
+ us = ktime_to_us(exit);
+
+ local_fiq_enable();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ local_irq_enable();
+
+ smp_rmb();
+ state->exit_latency = tegra_lp2_exit_latency;
+ state->target_residency = tegra_lp2_exit_latency +
+ tegra_lp2_power_off_time;
+
+ idle_stats.cpu_wants_lp2_time[dev->cpu] += us;
+
+ return (int)us;
+}
+
+static int tegra_cpuidle_register_device(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+ struct cpuidle_state *state;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->state_count = 0;
+ dev->cpu = cpu;
+
+ tegra_lp2_power_off_time = tegra_cpu_power_off_time();
+
+ state = &dev->states[0];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LP3");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU flow-controlled");
+ state->exit_latency = 10;
+ state->target_residency = 10;
+ state->power_usage = 600;
+ state->flags = CPUIDLE_FLAG_SHALLOW | CPUIDLE_FLAG_TIME_VALID;
+ state->enter = tegra_idle_enter_lp3;
+ dev->safe_state = state;
+ dev->state_count++;
+
+ state = &dev->states[1];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LP2");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power-gate");
+ state->exit_latency = tegra_cpu_power_good_time();
+
+ state->target_residency = tegra_cpu_power_off_time() +
+ tegra_cpu_power_good_time();
+ state->power_usage = 0;
+ state->flags = CPUIDLE_FLAG_BALANCED | CPUIDLE_FLAG_TIME_VALID;
+ state->enter = tegra_idle_enter_lp2;
+
+ dev->power_specified = 1;
+ dev->safe_state = state;
+ dev->state_count++;
+
+ if (cpuidle_register_device(dev)) {
+ pr_err("CPU%u: failed to register idle device\n", cpu);
+ kfree(dev);
+ return -EIO;
+ }
+ per_cpu(idle_devices, cpu) = dev;
+ return 0;
+}
+
+/* The IRQs that are used for communication between the cpus to agree on the
+ * cpuidle state should never get handled
+ */
+static irqreturn_t tegra_cpuidle_irq(int irq, void *dev)
+{
+ pr_err("%s: unexpected interrupt %d on cpu %d\n", __func__, irq,
+ smp_processor_id());
+ BUG();
+}
+
+static int tegra_cpuidle_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+ if (event == PM_SUSPEND_PREPARE)
+ lp2_disabled_by_suspend = true;
+ else if (event == PM_POST_SUSPEND)
+ lp2_disabled_by_suspend = false;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpuidle_pm_notifier = {
+ .notifier_call = tegra_cpuidle_pm_notify,
+};
+
+static int __init tegra_cpuidle_init(void)
+{
+ unsigned int cpu;
+ void __iomem *mask_arm;
+ unsigned int reg;
+ int ret;
+
+ irq_set_affinity(TEGRA_CPUIDLE_BOTH_IDLE, cpumask_of(0));
+ irq_set_affinity(TEGRA_CPUIDLE_TEAR_DOWN, cpumask_of(1));
+
+ ret = request_irq(TEGRA_CPUIDLE_BOTH_IDLE, tegra_cpuidle_irq,
+ IRQF_NOAUTOEN, "tegra_cpuidle_both_idle", NULL);
+ if (ret) {
+ pr_err("%s: Failed to request cpuidle irq\n", __func__);
+ return ret;
+ }
+
+ ret = request_irq(TEGRA_CPUIDLE_TEAR_DOWN, tegra_cpuidle_irq,
+ IRQF_NOAUTOEN, "tegra_cpuidle_tear_down_cpu1", NULL);
+ if (ret) {
+ pr_err("%s: Failed to request cpuidle irq\n", __func__);
+ return ret;
+ }
+
+
+ disable_irq(TEGRA_CPUIDLE_BOTH_IDLE);
+ disable_irq(TEGRA_CPUIDLE_TEAR_DOWN);
+ tegra_mask_irq(TEGRA_CPUIDLE_BOTH_IDLE);
+ tegra_mask_irq(TEGRA_CPUIDLE_TEAR_DOWN);
+
+ mask_arm = IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RESET_CLK_MASK_ARM;
+
+ reg = readl(mask_arm);
+ writel(reg | (1<<31), mask_arm);
+
+ ret = cpuidle_register_driver(&tegra_idle);
+
+ if (ret)
+ return ret;
+
+ for_each_possible_cpu(cpu) {
+ if (tegra_cpuidle_register_device(cpu))
+ pr_err("CPU%u: error initializing idle loop\n", cpu);
+ }
+
+ tegra_lp2_exit_latency = tegra_cpu_power_good_time();
+
+ register_pm_notifier(&tegra_cpuidle_pm_notifier);
+
+ return 0;
+}
+
+static void __exit tegra_cpuidle_exit(void)
+{
+ cpuidle_unregister_driver(&tegra_idle);
+}
+
+module_init(tegra_cpuidle_init);
+module_exit(tegra_cpuidle_exit);
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_lp2_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ int i;
+ seq_printf(s, " cpu0 cpu1\n");
+ seq_printf(s, "-------------------------------------------------\n");
+ seq_printf(s, "cpu ready: %8u %8u\n",
+ idle_stats.cpu_ready_count[0],
+ idle_stats.cpu_ready_count[1]);
+ seq_printf(s, "both idle: %8u %7u%% %7u%%\n",
+ idle_stats.both_idle_count,
+ idle_stats.both_idle_count * 100 /
+ (idle_stats.cpu_ready_count[0] ?: 1),
+ idle_stats.both_idle_count * 100 /
+ (idle_stats.cpu_ready_count[1] ?: 1));
+ seq_printf(s, "tear down: %8u %7u%%\n", idle_stats.tear_down_count,
+ idle_stats.tear_down_count * 100 /
+ (idle_stats.both_idle_count ?: 1));
+ seq_printf(s, "lp2: %8u %7u%%\n", idle_stats.lp2_count,
+ idle_stats.lp2_count * 100 /
+ (idle_stats.both_idle_count ?: 1));
+ seq_printf(s, "lp2 completed: %8u %7u%%\n",
+ idle_stats.lp2_completed_count,
+ idle_stats.lp2_completed_count * 100 /
+ (idle_stats.lp2_count ?: 1));
+
+ seq_printf(s, "\n");
+ seq_printf(s, "cpu ready time: %8llu %8llu ms\n",
+ div64_u64(idle_stats.cpu_wants_lp2_time[0], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[1], 1000));
+ seq_printf(s, "lp2 time: %8llu ms %7d%% %7d%%\n",
+ div64_u64(idle_stats.in_lp2_time, 1000),
+ (int)div64_u64(idle_stats.in_lp2_time * 100,
+ idle_stats.cpu_wants_lp2_time[0] ?: 1),
+ (int)div64_u64(idle_stats.in_lp2_time * 100,
+ idle_stats.cpu_wants_lp2_time[1] ?: 1));
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%19s %8s %8s %8s\n", "", "lp2", "comp", "%");
+ seq_printf(s, "-------------------------------------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (idle_stats.lp2_count_bin[bin] == 0)
+ continue;
+ seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
+ 1 << (bin - 1), 1 << bin,
+ idle_stats.lp2_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin],
+ idle_stats.lp2_completed_count_bin[bin] * 100 /
+ idle_stats.lp2_count_bin[bin]);
+ }
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%3s %20s %6s %10s\n",
+ "int", "name", "count", "last count");
+ seq_printf(s, "--------------------------------------------\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ if (idle_stats.lp2_int_count[i] == 0)
+ continue;
+ seq_printf(s, "%3d %20s %6d %10d\n",
+ i, irq_to_desc(i)->action ?
+ irq_to_desc(i)->action->name ?: "???" : "???",
+ idle_stats.lp2_int_count[i],
+ idle_stats.lp2_int_count[i] -
+ idle_stats.last_lp2_int_count[i]);
+ idle_stats.last_lp2_int_count[i] = idle_stats.lp2_int_count[i];
+ };
+ return 0;
+}
+
+static int tegra_lp2_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_lp2_debug_show, inode->i_private);
+}
+
+static const struct file_operations tegra_lp2_debug_ops = {
+ .open = tegra_lp2_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_cpuidle_debug_init(void)
+{
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = debugfs_create_dir("cpuidle", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("lp2", S_IRUGO, dir, NULL,
+ &tegra_lp2_debug_ops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+#endif
+
+late_initcall(tegra_cpuidle_debug_init);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/delay.S
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/iomap.h>
+#include <mach/io.h>
+#include "power-macros.S"
+
+ .text
+
+ENTRY(__udelay)
+ENTRY(__const_udelay)
+ mov32 r3, (IO_PPSB_VIRT + TEGRA_TMRUS_BASE - IO_PPSB_PHYS)
+ ldr r1, [r3]
+
+/* r0 - usecs to wait
+ * r1 - initial value of the counter
+ */
+loop:
+ ldr r2, [r3]
+ sub r2, r2, r1
+ cmp r2, r0
+ bls loop
+ mov pc, lr
+ENDPROC(__const_udelay)
+ENDPROC(__udelay)
+
+
+@ Delay routine
+ENTRY(__delay)
+ subs r0, r0, #1
+ bhi __delay
+ mov pc, lr
+ENDPROC(__delay)
--- /dev/null
+/*
+ * arch/arm/mach-tegra/devices.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ * Erik Gilling <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/serial_8250.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/dma.h>
+
+static struct resource i2c_resource1[] = {
+ [0] = {
+ .start = INT_I2C,
+ .end = INT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C_BASE,
+ .end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource2[] = {
+ [0] = {
+ .start = INT_I2C2,
+ .end = INT_I2C2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C2_BASE,
+ .end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource3[] = {
+ [0] = {
+ .start = INT_I2C3,
+ .end = INT_I2C3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_I2C3_BASE,
+ .end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource i2c_resource4[] = {
+ [0] = {
+ .start = INT_DVC,
+ .end = INT_DVC,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_DVC_BASE,
+ .end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device tegra_i2c_device1 = {
+ .name = "tegra-i2c",
+ .id = 0,
+ .resource = i2c_resource1,
+ .num_resources = ARRAY_SIZE(i2c_resource1),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device2 = {
+ .name = "tegra-i2c",
+ .id = 1,
+ .resource = i2c_resource2,
+ .num_resources = ARRAY_SIZE(i2c_resource2),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device3 = {
+ .name = "tegra-i2c",
+ .id = 2,
+ .resource = i2c_resource3,
+ .num_resources = ARRAY_SIZE(i2c_resource3),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+struct platform_device tegra_i2c_device4 = {
+ .name = "tegra-i2c",
+ .id = 3,
+ .resource = i2c_resource4,
+ .num_resources = ARRAY_SIZE(i2c_resource4),
+ .dev = {
+ .platform_data = 0,
+ },
+};
+
+static struct resource spi_resource1[] = {
+ [0] = {
+ .start = INT_S_LINK1,
+ .end = INT_S_LINK1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI1_BASE,
+ .end = TEGRA_SPI1_BASE + TEGRA_SPI1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource2[] = {
+ [0] = {
+ .start = INT_SPI_2,
+ .end = INT_SPI_2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI2_BASE,
+ .end = TEGRA_SPI2_BASE + TEGRA_SPI2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource3[] = {
+ [0] = {
+ .start = INT_SPI_3,
+ .end = INT_SPI_3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI3_BASE,
+ .end = TEGRA_SPI3_BASE + TEGRA_SPI3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource spi_resource4[] = {
+ [0] = {
+ .start = INT_SPI_4,
+ .end = INT_SPI_4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SPI4_BASE,
+ .end = TEGRA_SPI4_BASE + TEGRA_SPI4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device tegra_spi_device1 = {
+ .name = "spi_tegra",
+ .id = 0,
+ .resource = spi_resource1,
+ .num_resources = ARRAY_SIZE(spi_resource1),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device2 = {
+ .name = "spi_tegra",
+ .id = 1,
+ .resource = spi_resource2,
+ .num_resources = ARRAY_SIZE(spi_resource2),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device3 = {
+ .name = "spi_tegra",
+ .id = 2,
+ .resource = spi_resource3,
+ .num_resources = ARRAY_SIZE(spi_resource3),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+struct platform_device tegra_spi_device4 = {
+ .name = "spi_tegra",
+ .id = 3,
+ .resource = spi_resource4,
+ .num_resources = ARRAY_SIZE(spi_resource4),
+ .dev = {
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+
+static struct resource sdhci_resource1[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC2,
+ .end = INT_SDMMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC2_BASE,
+ .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource4[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+/* board files should fill in platform_data register the devices themselvs.
+ * See board-harmony.c for an example
+ */
+struct platform_device tegra_sdhci_device1 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource1,
+ .num_resources = ARRAY_SIZE(sdhci_resource1),
+};
+
+struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 1,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+};
+
+struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+};
+
+struct platform_device tegra_sdhci_device4 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource4,
+ .num_resources = ARRAY_SIZE(sdhci_resource4),
+};
+
+static struct resource w1_resources[] = {
+ [0] = {
+ .start = INT_OWR,
+ .end = INT_OWR,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_OWR_BASE,
+ .end = TEGRA_OWR_BASE + TEGRA_OWR_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_w1_device = {
+ .name = "tegra_w1",
+ .id = -1,
+ .resource = w1_resources,
+ .num_resources = ARRAY_SIZE(w1_resources),
+};
+
+static struct resource tegra_udc_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_usb1_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_usb2_resources[] = {
+ [0] = {
+ .start = TEGRA_USB2_BASE,
+ .end = TEGRA_USB2_BASE + TEGRA_USB2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB2,
+ .end = INT_USB2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_usb3_resources[] = {
+ [0] = {
+ .start = TEGRA_USB3_BASE,
+ .end = TEGRA_USB3_BASE + TEGRA_USB3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB3,
+ .end = INT_USB3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 tegra_udc_dmamask = DMA_BIT_MASK(32);
+
+static struct fsl_usb2_platform_data tegra_udc_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI,
+};
+
+struct platform_device tegra_udc_device = {
+ .name = "fsl-tegra-udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = &tegra_udc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &tegra_udc_pdata,
+ },
+ .resource = tegra_udc_resources,
+ .num_resources = ARRAY_SIZE(tegra_udc_resources),
+};
+
+static u64 tegra_ehci_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device tegra_ehci1_device = {
+ .name = "tegra-ehci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb1_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb1_resources),
+};
+
+struct platform_device tegra_ehci2_device = {
+ .name = "tegra-ehci",
+ .id = 1,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb2_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb2_resources),
+};
+
+struct platform_device tegra_ehci3_device = {
+ .name = "tegra-ehci",
+ .id = 2,
+ .dev = {
+ .dma_mask = &tegra_ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = tegra_usb3_resources,
+ .num_resources = ARRAY_SIZE(tegra_usb3_resources),
+};
+
+static struct resource tegra_otg_resources[] = {
+ [0] = {
+ .start = TEGRA_USB_BASE,
+ .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_USB,
+ .end = INT_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_otg_device = {
+ .name = "tegra-otg",
+ .id = -1,
+ .resource = tegra_otg_resources,
+ .num_resources = ARRAY_SIZE(tegra_otg_resources),
+};
+
+static struct resource i2s_resource1[] = {
+ [0] = {
+ .start = INT_I2S1,
+ .end = INT_I2S1,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_DMA_REQ_SEL_I2S_1,
+ .end = TEGRA_DMA_REQ_SEL_I2S_1,
+ .flags = IORESOURCE_DMA
+ },
+ [2] = {
+ .start = TEGRA_I2S1_BASE,
+ .end = TEGRA_I2S1_BASE + TEGRA_I2S1_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct resource i2s_resource2[] = {
+ [0] = {
+ .start = INT_I2S2,
+ .end = INT_I2S2,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_DMA_REQ_SEL_I2S2_1,
+ .end = TEGRA_DMA_REQ_SEL_I2S2_1,
+ .flags = IORESOURCE_DMA
+ },
+ [2] = {
+ .start = TEGRA_I2S2_BASE,
+ .end = TEGRA_I2S2_BASE + TEGRA_I2S2_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct resource spdif_resource[] = {
+ [0] = {
+ .start = INT_SPDIF,
+ .end = INT_SPDIF,
+ .flags = IORESOURCE_IRQ
+ },
+ [1] = {
+ .start = TEGRA_DMA_REQ_SEL_SPD_I,
+ .end = TEGRA_DMA_REQ_SEL_SPD_I,
+ .flags = IORESOURCE_DMA
+ },
+ [2] = {
+ .start = TEGRA_SPDIF_BASE,
+ .end = TEGRA_SPDIF_BASE + TEGRA_SPDIF_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+struct platform_device tegra_i2s_device1 = {
+ .name = "i2s",
+ .id = 0,
+ .resource = i2s_resource1,
+ .num_resources = ARRAY_SIZE(i2s_resource1),
+};
+
+struct platform_device tegra_i2s_device2 = {
+ .name = "i2s",
+ .id = 1,
+ .resource = i2s_resource2,
+ .num_resources = ARRAY_SIZE(i2s_resource2),
+};
+
+struct platform_device tegra_spdif_device = {
+ .name = "spdif_out",
+ .id = -1,
+ .resource = spdif_resource,
+ .num_resources = ARRAY_SIZE(spdif_resource),
+};
+
+static struct resource tegra_gart_resources[] = {
+ [0] = {
+ .name = "mc",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_MC_BASE,
+ .end = TEGRA_MC_BASE + TEGRA_MC_SIZE - 1,
+ },
+ [1] = {
+ .name = "gart",
+ .flags = IORESOURCE_MEM,
+ .start = TEGRA_GART_BASE,
+ .end = TEGRA_GART_BASE + TEGRA_GART_SIZE - 1,
+ }
+};
+
+struct platform_device tegra_gart_device = {
+ .name = "tegra_gart",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_gart_resources),
+ .resource = tegra_gart_resources
+};
+
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = INT_CPU0_PMU_INTR,
+ .end = INT_CPU0_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = INT_CPU1_PMU_INTR,
+ .end = INT_CPU1_PMU_INTR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
+#define CLK_RESET_RST_SOURCE 0x0
+static struct resource tegra_wdt_resources[] = {
+ [0] = {
+ .start = TEGRA_CLK_RESET_BASE + CLK_RESET_RST_SOURCE,
+ .end = TEGRA_CLK_RESET_BASE + CLK_RESET_RST_SOURCE + 4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = TEGRA_TMR1_BASE,
+ .end = TEGRA_TMR1_BASE + TEGRA_TMR1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = INT_TMR1,
+ .end = INT_TMR1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_wdt_device = {
+ .name = "tegra_wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_wdt_resources),
+ .resource = tegra_wdt_resources,
+};
+
+static struct resource tegra_pwfm0_resource = {
+ .start = TEGRA_PWFM0_BASE,
+ .end = TEGRA_PWFM0_BASE + TEGRA_PWFM0_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm1_resource = {
+ .start = TEGRA_PWFM1_BASE,
+ .end = TEGRA_PWFM1_BASE + TEGRA_PWFM1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm2_resource = {
+ .start = TEGRA_PWFM2_BASE,
+ .end = TEGRA_PWFM2_BASE + TEGRA_PWFM2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource tegra_pwfm3_resource = {
+ .start = TEGRA_PWFM3_BASE,
+ .end = TEGRA_PWFM3_BASE + TEGRA_PWFM3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device tegra_pwfm0_device = {
+ .name = "tegra_pwm",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &tegra_pwfm0_resource,
+};
+
+struct platform_device tegra_pwfm1_device = {
+ .name = "tegra_pwm",
+ .id = 1,
+ .num_resources = 1,
+ .resource = &tegra_pwfm1_resource,
+};
+
+struct platform_device tegra_pwfm2_device = {
+ .name = "tegra_pwm",
+ .id = 2,
+ .num_resources = 1,
+ .resource = &tegra_pwfm2_resource,
+};
+
+struct platform_device tegra_pwfm3_device = {
+ .name = "tegra_pwm",
+ .id = 3,
+ .num_resources = 1,
+ .resource = &tegra_pwfm3_resource,
+};
+
+static struct resource tegra_uarta_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTA_BASE,
+ .end = TEGRA_UARTA_BASE + TEGRA_UARTA_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTA,
+ .end = INT_UARTA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartb_resources[]= {
+ [0] = {
+ .start = TEGRA_UARTB_BASE,
+ .end = TEGRA_UARTB_BASE + TEGRA_UARTB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTB,
+ .end = INT_UARTB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartc_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTC_BASE,
+ .end = TEGRA_UARTC_BASE + TEGRA_UARTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTC,
+ .end = INT_UARTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uartd_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTD_BASE,
+ .end = TEGRA_UARTD_BASE + TEGRA_UARTD_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTD,
+ .end = INT_UARTD,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource tegra_uarte_resources[] = {
+ [0] = {
+ .start = TEGRA_UARTE_BASE,
+ .end = TEGRA_UARTE_BASE + TEGRA_UARTE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_UARTE,
+ .end = INT_UARTE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_uarta_device = {
+ .name = "tegra_uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(tegra_uarta_resources),
+ .resource = tegra_uarta_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartb_device = {
+ .name = "tegra_uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(tegra_uartb_resources),
+ .resource = tegra_uartb_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartc_device = {
+ .name = "tegra_uart",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(tegra_uartc_resources),
+ .resource = tegra_uartc_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uartd_device = {
+ .name = "tegra_uart",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(tegra_uartd_resources),
+ .resource = tegra_uartd_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device tegra_uarte_device = {
+ .name = "tegra_uart",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(tegra_uarte_resources),
+ .resource = tegra_uarte_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource tegra_grhost_resources[] = {
+ {
+ .start = TEGRA_HOST1X_BASE,
+ .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_VI_BASE,
+ .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_ISP_BASE,
+ .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TEGRA_MPE_BASE,
+ .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_SYNCPT_THRESH_BASE,
+ .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_HOST1X_MPCORE_GENERAL,
+ .end = INT_HOST1X_MPCORE_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device tegra_grhost_device = {
+ .name = "tegra_grhost",
+ .id = -1,
+ .resource = tegra_grhost_resources,
+ .num_resources = ARRAY_SIZE(tegra_grhost_resources),
+};
+
+static struct resource tegra_avp_resources[] = {
+ [0] = {
+ .start = INT_SHR_SEM_INBOX_IBF,
+ .end = INT_SHR_SEM_INBOX_IBF,
+ .flags = IORESOURCE_IRQ,
+ .name = "mbox_from_avp_pending",
+ },
+};
+
+struct platform_device tegra_avp_device = {
+ .name = "tegra-avp",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(tegra_avp_resources),
+ .resource = tegra_avp_resources,
+ .dev = {
+ .coherent_dma_mask = 0xffffffffULL,
+ },
+};
+
+static struct resource tegra_aes_resources[] = {
+ {
+ .start = TEGRA_VDE_BASE,
+ .end = TEGRA_VDE_BASE + TEGRA_VDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static u64 tegra_aes_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device tegra_aes_device = {
+ .name = "tegra-aes",
+ .id = -1,
+ .resource = tegra_aes_resources,
+ .num_resources = ARRAY_SIZE(tegra_aes_resources),
+ .dev = {
+ .dma_mask = &tegra_aes_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
--- /dev/null
+/*
+ * arch/arm/mach-tegra/devices.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ * Erik Gilling <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_DEVICES_H
+#define __MACH_TEGRA_DEVICES_H
+
+#include <linux/platform_device.h>
+
+extern struct platform_device tegra_sdhci_device1;
+extern struct platform_device tegra_sdhci_device2;
+extern struct platform_device tegra_sdhci_device3;
+extern struct platform_device tegra_sdhci_device4;
+extern struct platform_device tegra_i2c_device1;
+extern struct platform_device tegra_i2c_device2;
+extern struct platform_device tegra_i2c_device3;
+extern struct platform_device tegra_i2c_device4;
+extern struct platform_device tegra_spi_device1;
+extern struct platform_device tegra_spi_device2;
+extern struct platform_device tegra_spi_device3;
+extern struct platform_device tegra_spi_device4;
+extern struct platform_device tegra_w1_device;
+extern struct platform_device tegra_udc_device;
+extern struct platform_device tegra_ehci1_device;
+extern struct platform_device tegra_ehci2_device;
+extern struct platform_device tegra_ehci3_device;
+extern struct platform_device tegra_i2s_device1;
+extern struct platform_device tegra_i2s_device2;
+extern struct platform_device tegra_gart_device;
+extern struct platform_device pmu_device;
+extern struct platform_device tegra_wdt_device;
+extern struct platform_device tegra_pwfm0_device;
+extern struct platform_device tegra_pwfm1_device;
+extern struct platform_device tegra_pwfm2_device;
+extern struct platform_device tegra_pwfm3_device;
+extern struct platform_device tegra_otg_device;
+extern struct platform_device tegra_uarta_device;
+extern struct platform_device tegra_uartb_device;
+extern struct platform_device tegra_uartc_device;
+extern struct platform_device tegra_uartd_device;
+extern struct platform_device tegra_uarte_device;
+extern struct platform_device tegra_spdif_device;
+extern struct platform_device tegra_grhost_device;
+extern struct platform_device tegra_spdif_device;
+extern struct platform_device tegra_avp_device;
+extern struct platform_device tegra_aes_device;
+
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/dma.c
+ *
+ * System DMA driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <mach/suspend.h>
+
+#define APB_DMA_GEN 0x000
+#define GEN_ENABLE (1<<31)
+
+#define APB_DMA_CNTRL 0x010
+
+#define APB_DMA_IRQ_MASK 0x01c
+
+#define APB_DMA_IRQ_MASK_SET 0x020
+
+#define APB_DMA_CHAN_CSR 0x000
+#define CSR_ENB (1<<31)
+#define CSR_IE_EOC (1<<30)
+#define CSR_HOLD (1<<29)
+#define CSR_DIR (1<<28)
+#define CSR_ONCE (1<<27)
+#define CSR_FLOW (1<<21)
+#define CSR_REQ_SEL_SHIFT 16
+#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
+#define CSR_WCOUNT_SHIFT 2
+#define CSR_WCOUNT_MASK 0xFFFC
+
+#define APB_DMA_CHAN_STA 0x004
+#define STA_BUSY (1<<31)
+#define STA_ISE_EOC (1<<30)
+#define STA_HALT (1<<29)
+#define STA_PING_PONG (1<<28)
+#define STA_COUNT_SHIFT 2
+#define STA_COUNT_MASK 0xFFFC
+
+#define APB_DMA_CHAN_AHB_PTR 0x010
+
+#define APB_DMA_CHAN_AHB_SEQ 0x014
+#define AHB_SEQ_INTR_ENB (1<<31)
+#define AHB_SEQ_BUS_WIDTH_SHIFT 28
+#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
+#define AHB_SEQ_DATA_SWAP (1<<27)
+#define AHB_SEQ_BURST_MASK (0x7<<24)
+#define AHB_SEQ_BURST_1 (4<<24)
+#define AHB_SEQ_BURST_4 (5<<24)
+#define AHB_SEQ_BURST_8 (6<<24)
+#define AHB_SEQ_DBL_BUF (1<<19)
+#define AHB_SEQ_WRAP_SHIFT 16
+#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
+
+#define APB_DMA_CHAN_APB_PTR 0x018
+
+#define APB_DMA_CHAN_APB_SEQ 0x01c
+#define APB_SEQ_BUS_WIDTH_SHIFT 28
+#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
+#define APB_SEQ_DATA_SWAP (1<<27)
+#define APB_SEQ_WRAP_SHIFT 16
+#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
+
+#define TEGRA_SYSTEM_DMA_CH_NR 16
+#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
+#define TEGRA_SYSTEM_DMA_CH_MIN 0
+#define TEGRA_SYSTEM_DMA_CH_MAX \
+ (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
+
+const unsigned int ahb_addr_wrap_table[8] = {
+ 0, 32, 64, 128, 256, 512, 1024, 2048
+};
+
+const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
+
+const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
+
+#define TEGRA_DMA_NAME_SIZE 16
+struct tegra_dma_channel {
+ struct list_head list;
+ int id;
+ spinlock_t lock;
+ char name[TEGRA_DMA_NAME_SIZE];
+ void __iomem *addr;
+ int mode;
+ int irq;
+ int req_transfer_count;
+};
+
+#define NV_DMA_MAX_CHANNELS 32
+
+static DEFINE_MUTEX(tegra_dma_lock);
+static DEFINE_SPINLOCK(enable_lock);
+
+static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
+static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
+
+static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+static void tegra_dma_stop(struct tegra_dma_channel *ch);
+
+void tegra_dma_stop(struct tegra_dma_channel *ch)
+{
+ u32 csr;
+ u32 status;
+
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
+ csr &= ~CSR_IE_EOC;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ csr &= ~CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC)
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+}
+
+bool tegra_dma_is_stopped(struct tegra_dma_channel *ch)
+{
+ return !!(readl(ch->addr + APB_DMA_CHAN_STA) & CSR_ENB);
+}
+
+int tegra_dma_cancel(struct tegra_dma_channel *ch)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ while (!list_empty(&ch->list))
+ list_del(ch->list.next);
+
+ tegra_dma_stop(ch);
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return 0;
+}
+
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, unsigned int status)
+{
+ unsigned int to_transfer;
+ unsigned int req_transfer_count;
+
+ unsigned int bytes_transferred;
+
+ to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
+ req_transfer_count = ch->req_transfer_count;
+ req_transfer_count += 1;
+ to_transfer += 1;
+
+ bytes_transferred = req_transfer_count;
+
+ if (status & STA_BUSY)
+ bytes_transferred -= to_transfer;
+
+ /* In continuous transfer mode, DMA only tracks the count of the
+ * half DMA buffer. So, if the DMA already finished half the DMA
+ * then add the half buffer to the completed count.
+ */
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+ bytes_transferred += req_transfer_count;
+
+ if (status & STA_ISE_EOC)
+ bytes_transferred += req_transfer_count;
+
+ bytes_transferred *= 4;
+
+ return bytes_transferred;
+}
+
+int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req)
+{
+ struct tegra_dma_req *req = NULL;
+ int found = 0;
+ unsigned int status;
+ unsigned long irq_flags;
+ int stop = 0;
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
+ stop = 1;
+
+ list_for_each_entry(req, &ch->list, node) {
+ if (req == _req) {
+ list_del(&req->node);
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return -EINVAL;
+ }
+
+ if (!stop)
+ goto skip_status;
+
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number of pending
+ * bytes to be transfered.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
+ */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
+
+ req->bytes_transferred = dma_active_count(ch, req, status);
+
+ if (!list_empty(&ch->list)) {
+ /* if the list is not empty, queue the next request */
+ struct tegra_dma_req *next_req;
+ next_req = list_entry(ch->list.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
+ }
+skip_status:
+ req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dma_dequeue_req);
+
+bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
+{
+ unsigned long irq_flags;
+ bool is_empty;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list))
+ is_empty = true;
+ else
+ is_empty = false;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return is_empty;
+}
+EXPORT_SYMBOL(tegra_dma_is_empty);
+
+bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req)
+{
+ unsigned long irq_flags;
+ struct tegra_dma_req *req;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ list_for_each_entry(req, &ch->list, node) {
+ if (req == _req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return false;
+}
+EXPORT_SYMBOL(tegra_dma_is_req_inflight);
+
+int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ unsigned long irq_flags;
+ struct tegra_dma_req *_req;
+ int start_dma = 0;
+
+ if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
+ req->source_addr & 0x3 || req->dest_addr & 0x3) {
+ pr_err("Invalid DMA request for channel %d\n", ch->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ list_for_each_entry(_req, &ch->list, node) {
+ if (req == _req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return -EEXIST;
+ }
+ }
+
+ req->bytes_transferred = 0;
+ req->status = 0;
+ /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
+ if (list_empty(&ch->list))
+ start_dma = 1;
+
+ list_add_tail(&req->node, &ch->list);
+
+ if (start_dma)
+ tegra_dma_update_hw(ch, req);
+ /* Check to see if this request needs to be pushed immediately.
+ * For continuous single-buffer DMA:
+ * The first buffer is always in-flight. The 2nd buffer should
+ * also be in-flight. The 3rd buffer becomes in-flight when the
+ * first is completed in the interrupt.
+ */
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE) {
+ struct tegra_dma_req *first_req, *second_req;
+ first_req = list_entry(ch->list.next,
+ typeof(*first_req), node);
+ second_req = list_entry(first_req->node.next,
+ typeof(*second_req), node);
+ if (second_req == req) {
+ unsigned long status =
+ readl(ch->addr + APB_DMA_CHAN_STA);
+ if (!(status & STA_ISE_EOC))
+ tegra_dma_update_hw_partial(ch, req);
+ /* Handle the case where the IRQ fired while we're
+ * writing the interrupts.
+ */
+ if (status & STA_ISE_EOC) {
+ /* Interrupt fired, let the IRQ stop/restart
+ * the DMA with this buffer in a clean way.
+ */
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dma_enqueue_req);
+
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
+{
+ int channel;
+ struct tegra_dma_channel *ch = NULL;
+
+ mutex_lock(&tegra_dma_lock);
+
+ /* first channel is the shared channel */
+ if (mode & TEGRA_DMA_SHARED) {
+ channel = TEGRA_SYSTEM_DMA_CH_MIN;
+ } else {
+ channel = find_first_zero_bit(channel_usage,
+ ARRAY_SIZE(dma_channels));
+ if (channel >= ARRAY_SIZE(dma_channels)) {
+ pr_err("%s: failed to allocate a DMA channel",
+ __func__);
+ goto out;
+ }
+ }
+ __set_bit(channel, channel_usage);
+ ch = &dma_channels[channel];
+ ch->mode = mode;
+
+out:
+ mutex_unlock(&tegra_dma_lock);
+ return ch;
+}
+EXPORT_SYMBOL(tegra_dma_allocate_channel);
+
+void tegra_dma_free_channel(struct tegra_dma_channel *ch)
+{
+ if (ch->mode & TEGRA_DMA_SHARED)
+ return;
+ tegra_dma_cancel(ch);
+ mutex_lock(&tegra_dma_lock);
+ __clear_bit(ch->id, channel_usage);
+ mutex_unlock(&tegra_dma_lock);
+}
+EXPORT_SYMBOL(tegra_dma_free_channel);
+
+static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ u32 apb_ptr;
+ u32 ahb_ptr;
+ u32 csr;
+
+ if (req->to_memory) {
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
+ } else {
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
+ }
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
+ ch->req_transfer_count = (req->size >> 3) - 1;
+ else
+ ch->req_transfer_count = (req->size >> 2) - 1;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
+ csr &= ~CSR_WCOUNT_MASK;
+ csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+ return;
+}
+
+static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req)
+{
+ int ahb_addr_wrap;
+ int apb_addr_wrap;
+ int ahb_bus_width;
+ int apb_bus_width;
+ int index;
+
+ u32 ahb_seq;
+ u32 apb_seq;
+ u32 ahb_ptr;
+ u32 apb_ptr;
+ u32 csr;
+
+ csr = CSR_IE_EOC | CSR_FLOW;
+ ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
+ apb_seq = 0;
+
+ csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
+
+ ch->req_transfer_count = (req->size >> 2) - 1;
+
+ /* One shot mode is always single buffered. Continuous mode could
+ * support either.
+ */
+ if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
+ csr |= CSR_ONCE;
+ } else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) {
+ ahb_seq |= AHB_SEQ_DBL_BUF;
+ /* We want an interrupt halfway through, then on the
+ * completion. The double buffer means 2 interrupts
+ * pass before the DMA HW latches a new AHB_PTR etc.
+ */
+ ch->req_transfer_count = (req->size >> 3) - 1;
+ }
+ csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
+
+ if (req->to_memory) {
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
+
+ apb_addr_wrap = req->source_wrap;
+ ahb_addr_wrap = req->dest_wrap;
+ apb_bus_width = req->source_bus_width;
+ ahb_bus_width = req->dest_bus_width;
+
+ } else {
+ csr |= CSR_DIR;
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
+
+ apb_addr_wrap = req->dest_wrap;
+ ahb_addr_wrap = req->source_wrap;
+ apb_bus_width = req->dest_bus_width;
+ ahb_bus_width = req->source_bus_width;
+ }
+
+ apb_addr_wrap >>= 2;
+ ahb_addr_wrap >>= 2;
+
+ /* set address wrap for APB size */
+ index = 0;
+ do {
+ if (apb_addr_wrap_table[index] == apb_addr_wrap)
+ break;
+ index++;
+ } while (index < ARRAY_SIZE(apb_addr_wrap_table));
+ BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
+ apb_seq |= index << APB_SEQ_WRAP_SHIFT;
+
+ /* set address wrap for AHB size */
+ index = 0;
+ do {
+ if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
+ break;
+ index++;
+ } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
+ BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
+ ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
+
+ for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
+ if (bus_width_table[index] == ahb_bus_width)
+ break;
+ }
+ BUG_ON(index == ARRAY_SIZE(bus_width_table));
+ ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
+
+ for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
+ if (bus_width_table[index] == apb_bus_width)
+ break;
+ }
+ BUG_ON(index == ARRAY_SIZE(bus_width_table));
+ apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
+
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+ writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+
+ csr |= CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+}
+
+static void handle_oneshot_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list)) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return;
+ }
+
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (req) {
+ list_del(&req->node);
+ req->bytes_transferred = req->size;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ /* Callback should be called without any lock */
+ pr_debug("%s: transferred %d bytes\n", __func__,
+ req->bytes_transferred);
+ req->complete(req);
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ }
+
+ if (!list_empty(&ch->list)) {
+ req = list_entry(ch->list.next, typeof(*req), node);
+ /* the complete function we just called may have enqueued
+ another req, in which case dma has already started */
+ if (req->status != TEGRA_DMA_REQ_INFLIGHT)
+ tegra_dma_update_hw(ch, req);
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+}
+
+static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list)) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return;
+ }
+
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (req) {
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
+ bool is_dma_ping_complete;
+ is_dma_ping_complete =
+ !!(readl(ch->addr + APB_DMA_CHAN_STA) &
+ STA_PING_PONG);
+ if (req->to_memory)
+ is_dma_ping_complete = !is_dma_ping_complete;
+ /* Out of sync - Release current buffer */
+ if (!is_dma_ping_complete) {
+ req->buffer_status =
+ TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = req->size;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ tegra_dma_stop(ch);
+
+ if (!list_is_last(&req->node, &ch->list)) {
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
+ }
+
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is
+ * called. */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ return;
+ }
+ /* Load the next request into the hardware, if
+ * available. */
+ if (!list_is_last(&req->node, &ch->list)) {
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw_partial(ch, next_req);
+ }
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ /* DMA lock is NOT held when callback is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ if (likely(req->threshold))
+ req->threshold(req);
+ return;
+
+ } else if (req->buffer_status ==
+ TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
+ /* Callback when the buffer is completely full (i.e on
+ * the second interrupt */
+
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = req->size;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list))
+ tegra_dma_stop(ch);
+ else {
+ /* It may be possible that req came after
+ * half dma complete so it need to start
+ * immediately */
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ if (next_req->status !=
+ TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ }
+
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ return;
+
+ } else {
+ tegra_dma_stop(ch);
+ /* Dma should be stop much earlier */
+ BUG();
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+}
+
+static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
+{
+ struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
+ struct tegra_dma_req *next_next_req;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (list_empty(&ch->list)) {
+ tegra_dma_stop(ch);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("%s: No requests in the list.\n", __func__);
+ return;
+ }
+ req = list_entry(ch->list.next, typeof(*req), node);
+ if (!req || (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL)) {
+ tegra_dma_stop(ch);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("%s: DMA complete irq without corresponding req\n",
+ __func__);
+ return;
+ }
+
+ /* Handle the case when buffer is completely full */
+ req->bytes_transferred = req->size;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list)) {
+ pr_debug("%s: stop\n", __func__);
+ tegra_dma_stop(ch);
+ } else {
+ /* The next entry should have already been queued and is now
+ * in the middle of xfer. We can then write the next->next one
+ * if it exists.
+ */
+ next_req = list_entry(req->node.next, typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ pr_debug("%s: interrupt during enqueue\n", __func__);
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ } else if (!list_is_last(&next_req->node, &ch->list)) {
+ next_next_req = list_entry(next_req->node.next,
+ typeof(*next_next_req), node);
+ tegra_dma_update_hw_partial(ch, next_next_req);
+ }
+ }
+ list_del(&req->node);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+}
+
+static irqreturn_t dma_isr(int irq, void *data)
+{
+ struct tegra_dma_channel *ch = data;
+ unsigned long status;
+
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC)
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ else {
+ pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
+ return IRQ_HANDLED;
+ }
+
+ if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
+ handle_oneshot_dma(ch);
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
+ handle_continuous_dbl_dma(ch);
+ else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
+ handle_continuous_sngl_dma(ch);
+ else
+ pr_err("Bad channel mode for DMA ISR to handle\n");
+ return IRQ_HANDLED;
+}
+
+int __init tegra_dma_init(void)
+{
+ int ret = 0;
+ int i;
+ unsigned int irq;
+ void __iomem *addr;
+
+ addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ writel(0, addr + APB_DMA_CNTRL);
+ writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
+ addr + APB_DMA_IRQ_MASK_SET);
+
+ memset(channel_usage, 0, sizeof(channel_usage));
+ memset(dma_channels, 0, sizeof(dma_channels));
+
+ /* Reserve all the channels we are not supposed to touch */
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
+ __set_bit(i, channel_usage);
+
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+
+ __clear_bit(i, channel_usage);
+
+ ch->id = i;
+ snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
+
+ ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->list);
+
+ irq = INT_APB_DMA_CH0 + i;
+ ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
+ if (ret) {
+ pr_err("Failed to register IRQ %d for DMA %d\n",
+ irq, i);
+ goto fail;
+ }
+ ch->irq = irq;
+ }
+ /* mark the shared channel allocated */
+ __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
+
+ for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
+ __set_bit(i, channel_usage);
+
+ return ret;
+fail:
+ writel(0, addr + APB_DMA_GEN);
+ for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
+ struct tegra_dma_channel *ch = &dma_channels[i];
+ if (ch->irq)
+ free_irq(ch->irq, ch);
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
+
+void tegra_dma_suspend(void)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ u32 *ctx = apb_dma;
+ int i;
+
+ *ctx++ = readl(addr + APB_DMA_GEN);
+ *ctx++ = readl(addr + APB_DMA_CNTRL);
+ *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
+
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
+ addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
+ *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
+ *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
+ }
+}
+
+void tegra_dma_resume(void)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ u32 *ctx = apb_dma;
+ int i;
+
+ writel(*ctx++, addr + APB_DMA_GEN);
+ writel(*ctx++, addr + APB_DMA_CNTRL);
+ writel(*ctx++, addr + APB_DMA_IRQ_MASK);
+
+ for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
+ addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
+ TEGRA_APB_DMA_CH0_SIZE * i);
+
+ writel(*ctx++, addr + APB_DMA_CHAN_CSR);
+ writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
+ writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
+ writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
+ }
+}
+
+#endif
--- /dev/null
+/*
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+
+#include <asm/clkdev.h>
+
+#include <mach/clk.h>
+
+#include "board.h"
+#include "clock.h"
+#include "dvfs.h"
+
+static LIST_HEAD(dvfs_rail_list);
+static DEFINE_MUTEX(dvfs_lock);
+
+static int dvfs_rail_update(struct dvfs_rail *rail);
+
+void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n)
+{
+ int i;
+ struct dvfs_relationship *rel;
+
+ mutex_lock(&dvfs_lock);
+
+ for (i = 0; i < n; i++) {
+ rel = &rels[i];
+ list_add_tail(&rel->from_node, &rel->to->relationships_from);
+ list_add_tail(&rel->to_node, &rel->from->relationships_to);
+ }
+
+ mutex_unlock(&dvfs_lock);
+}
+
+int tegra_dvfs_init_rails(struct dvfs_rail *rails[], int n)
+{
+ int i;
+
+ mutex_lock(&dvfs_lock);
+
+ for (i = 0; i < n; i++) {
+ INIT_LIST_HEAD(&rails[i]->dvfs);
+ INIT_LIST_HEAD(&rails[i]->relationships_from);
+ INIT_LIST_HEAD(&rails[i]->relationships_to);
+ rails[i]->millivolts = rails[i]->nominal_millivolts;
+ rails[i]->new_millivolts = rails[i]->nominal_millivolts;
+ if (!rails[i]->step)
+ rails[i]->step = rails[i]->max_millivolts;
+
+ list_add_tail(&rails[i]->node, &dvfs_rail_list);
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+};
+
+static int dvfs_solve_relationship(struct dvfs_relationship *rel)
+{
+ return rel->solve(rel->from, rel->to);
+}
+
+/* Sets the voltage on a dvfs rail to a specific value, and updates any
+ * rails that depend on this rail. */
+static int dvfs_rail_set_voltage(struct dvfs_rail *rail, int millivolts)
+{
+ int ret = 0;
+ struct dvfs_relationship *rel;
+ int step = (millivolts > rail->millivolts) ? rail->step : -rail->step;
+ int i;
+ int steps;
+
+ if (!rail->reg) {
+ if (millivolts == rail->millivolts)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (rail->disabled)
+ return 0;
+
+ steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step);
+
+ for (i = 0; i < steps; i++) {
+ if (abs(millivolts - rail->millivolts) > rail->step)
+ rail->new_millivolts = rail->millivolts + step;
+ else
+ rail->new_millivolts = millivolts;
+
+ /* Before changing the voltage, tell each rail that depends
+ * on this rail that the voltage will change.
+ * This rail will be the "from" rail in the relationship,
+ * the rail that depends on this rail will be the "to" rail.
+ * from->millivolts will be the old voltage
+ * from->new_millivolts will be the new voltage */
+ list_for_each_entry(rel, &rail->relationships_to, to_node) {
+ ret = dvfs_rail_update(rel->to);
+ if (ret)
+ return ret;
+ }
+
+ if (!rail->disabled) {
+ ret = regulator_set_voltage(rail->reg,
+ rail->new_millivolts * 1000,
+ rail->max_millivolts * 1000);
+ }
+ if (ret) {
+ pr_err("Failed to set dvfs regulator %s\n", rail->reg_id);
+ return ret;
+ }
+
+ rail->millivolts = rail->new_millivolts;
+
+ /* After changing the voltage, tell each rail that depends
+ * on this rail that the voltage has changed.
+ * from->millivolts and from->new_millivolts will be the
+ * new voltage */
+ list_for_each_entry(rel, &rail->relationships_to, to_node) {
+ ret = dvfs_rail_update(rel->to);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (unlikely(rail->millivolts != millivolts)) {
+ pr_err("%s: rail didn't reach target %d in %d steps (%d)\n",
+ __func__, millivolts, steps, rail->millivolts);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Determine the minimum valid voltage for a rail, taking into account
+ * the dvfs clocks and any rails that this rail depends on. Calls
+ * dvfs_rail_set_voltage with the new voltage, which will call
+ * dvfs_rail_update on any rails that depend on this rail. */
+static int dvfs_rail_update(struct dvfs_rail *rail)
+{
+ int millivolts = 0;
+ struct dvfs *d;
+ struct dvfs_relationship *rel;
+ int ret = 0;
+
+ /* if dvfs is suspended, return and handle it during resume */
+ if (rail->suspended)
+ return 0;
+
+ /* if regulators are not connected yet, return and handle it later */
+ if (!rail->reg)
+ return 0;
+
+ /* Find the maximum voltage requested by any clock */
+ list_for_each_entry(d, &rail->dvfs, reg_node)
+ millivolts = max(d->cur_millivolts, millivolts);
+
+ rail->new_millivolts = millivolts;
+
+ /* Check any rails that this rail depends on */
+ list_for_each_entry(rel, &rail->relationships_from, from_node)
+ rail->new_millivolts = dvfs_solve_relationship(rel);
+
+ if (rail->new_millivolts != rail->millivolts)
+ ret = dvfs_rail_set_voltage(rail, rail->new_millivolts);
+
+ return ret;
+}
+
+static int dvfs_rail_connect_to_regulator(struct dvfs_rail *rail)
+{
+ struct regulator *reg;
+
+ if (!rail->reg) {
+ reg = regulator_get(NULL, rail->reg_id);
+ if (IS_ERR(reg))
+ return -EINVAL;
+ }
+
+ rail->reg = reg;
+
+ return 0;
+}
+
+static int
+__tegra_dvfs_set_rate(struct dvfs *d, unsigned long rate)
+{
+ int i = 0;
+ int ret;
+
+ if (d->freqs == NULL || d->millivolts == NULL)
+ return -ENODEV;
+
+ if (rate > d->freqs[d->num_freqs - 1]) {
+ pr_warn("tegra_dvfs: rate %lu too high for dvfs on %s\n", rate,
+ d->clk_name);
+ return -EINVAL;
+ }
+
+ if (rate == 0) {
+ d->cur_millivolts = 0;
+ } else {
+ while (i < d->num_freqs && rate > d->freqs[i])
+ i++;
+
+ d->cur_millivolts = d->millivolts[i];
+ }
+
+ d->cur_rate = rate;
+
+ ret = dvfs_rail_update(d->dvfs_rail);
+ if (ret)
+ pr_err("Failed to set regulator %s for clock %s to %d mV\n",
+ d->dvfs_rail->reg_id, d->clk_name, d->cur_millivolts);
+
+ return ret;
+}
+
+int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+
+ if (!c->dvfs)
+ return -EINVAL;
+
+ mutex_lock(&dvfs_lock);
+ ret = __tegra_dvfs_set_rate(c->dvfs, rate);
+ mutex_unlock(&dvfs_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(tegra_dvfs_set_rate);
+
+/* May only be called during clock init, does not take any locks on clock c. */
+int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
+{
+ int i;
+
+ if (c->dvfs) {
+ pr_err("Error when enabling dvfs on %s for clock %s:\n",
+ d->dvfs_rail->reg_id, c->name);
+ pr_err("DVFS already enabled for %s\n",
+ c->dvfs->dvfs_rail->reg_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ if (d->millivolts[i] == 0)
+ break;
+
+ d->freqs[i] *= d->freqs_mult;
+
+ /* If final frequencies are 0, pad with previous frequency */
+ if (d->freqs[i] == 0 && i > 1)
+ d->freqs[i] = d->freqs[i - 1];
+ }
+ d->num_freqs = i;
+
+ if (d->auto_dvfs) {
+ c->auto_dvfs = true;
+ clk_set_cansleep(c);
+ }
+
+ c->dvfs = d;
+
+ mutex_lock(&dvfs_lock);
+ list_add_tail(&d->reg_node, &d->dvfs_rail->dvfs);
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+}
+
+static bool tegra_dvfs_all_rails_suspended(void)
+{
+ struct dvfs_rail *rail;
+ bool all_suspended = true;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ if (!rail->suspended && !rail->disabled)
+ all_suspended = false;
+
+ return all_suspended;
+}
+
+static bool tegra_dvfs_from_rails_suspended(struct dvfs_rail *to)
+{
+ struct dvfs_relationship *rel;
+ bool all_suspended = true;
+
+ list_for_each_entry(rel, &to->relationships_from, from_node)
+ if (!rel->from->suspended && !rel->from->disabled)
+ all_suspended = false;
+
+ return all_suspended;
+}
+
+static int tegra_dvfs_suspend_one(void)
+{
+ struct dvfs_rail *rail;
+ int ret;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ if (!rail->suspended && !rail->disabled &&
+ tegra_dvfs_from_rails_suspended(rail)) {
+ ret = dvfs_rail_set_voltage(rail,
+ rail->nominal_millivolts);
+ if (ret)
+ return ret;
+ rail->suspended = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void tegra_dvfs_resume(void)
+{
+ struct dvfs_rail *rail;
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ rail->suspended = false;
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ dvfs_rail_update(rail);
+
+ mutex_unlock(&dvfs_lock);
+}
+
+static int tegra_dvfs_suspend(void)
+{
+ int ret = 0;
+
+ mutex_lock(&dvfs_lock);
+
+ while (!tegra_dvfs_all_rails_suspended()) {
+ ret = tegra_dvfs_suspend_one();
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ if (ret)
+ tegra_dvfs_resume();
+
+ return ret;
+}
+
+static int tegra_dvfs_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ if (tegra_dvfs_suspend())
+ return NOTIFY_STOP;
+ break;
+ case PM_POST_SUSPEND:
+ tegra_dvfs_resume();
+ break;
+ }
+
+ return NOTIFY_OK;
+};
+
+static struct notifier_block tegra_dvfs_nb = {
+ .notifier_call = tegra_dvfs_pm_notify,
+};
+
+/* must be called with dvfs lock held */
+static void __tegra_dvfs_rail_disable(struct dvfs_rail *rail)
+{
+ int ret;
+
+ if (!rail->disabled) {
+ ret = dvfs_rail_set_voltage(rail, rail->nominal_millivolts);
+ if (ret)
+ pr_info("dvfs: failed to set regulator %s to disable "
+ "voltage %d\n", rail->reg_id,
+ rail->nominal_millivolts);
+ rail->disabled = true;
+ }
+}
+
+/* must be called with dvfs lock held */
+static void __tegra_dvfs_rail_enable(struct dvfs_rail *rail)
+{
+ if (rail->disabled) {
+ rail->disabled = false;
+ dvfs_rail_update(rail);
+ }
+}
+
+void tegra_dvfs_rail_enable(struct dvfs_rail *rail)
+{
+ mutex_lock(&dvfs_lock);
+ __tegra_dvfs_rail_enable(rail);
+ mutex_unlock(&dvfs_lock);
+}
+
+void tegra_dvfs_rail_disable(struct dvfs_rail *rail)
+{
+ mutex_lock(&dvfs_lock);
+ __tegra_dvfs_rail_disable(rail);
+ mutex_unlock(&dvfs_lock);
+}
+
+int tegra_dvfs_rail_disable_by_name(const char *reg_id)
+{
+ struct dvfs_rail *rail;
+ int ret = 0;
+
+ mutex_lock(&dvfs_lock);
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ if (!strcmp(reg_id, rail->reg_id)) {
+ __tegra_dvfs_rail_disable(rail);
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+
+out:
+ mutex_unlock(&dvfs_lock);
+ return ret;
+}
+
+/*
+ * Iterate through all the dvfs regulators, finding the regulator exported
+ * by the regulator api for each one. Must be called in late init, after
+ * all the regulator api's regulators are initialized.
+ */
+int __init tegra_dvfs_late_init(void)
+{
+ struct dvfs_rail *rail;
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ dvfs_rail_connect_to_regulator(rail);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node)
+ dvfs_rail_update(rail);
+
+ mutex_unlock(&dvfs_lock);
+
+ register_pm_notifier(&tegra_dvfs_nb);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
+{
+ struct dvfs *da = list_entry(a, struct dvfs, reg_node);
+ struct dvfs *db = list_entry(b, struct dvfs, reg_node);
+ int ret;
+
+ ret = strcmp(da->dvfs_rail->reg_id, db->dvfs_rail->reg_id);
+ if (ret != 0)
+ return ret;
+
+ if (da->cur_millivolts < db->cur_millivolts)
+ return 1;
+ if (da->cur_millivolts > db->cur_millivolts)
+ return -1;
+
+ return strcmp(da->clk_name, db->clk_name);
+}
+
+static int dvfs_tree_show(struct seq_file *s, void *data)
+{
+ struct dvfs *d;
+ struct dvfs_rail *rail;
+ struct dvfs_relationship *rel;
+
+ seq_printf(s, " clock rate mV\n");
+ seq_printf(s, "--------------------------------\n");
+
+ mutex_lock(&dvfs_lock);
+
+ list_for_each_entry(rail, &dvfs_rail_list, node) {
+ seq_printf(s, "%s %d mV%s:\n", rail->reg_id,
+ rail->millivolts, rail->disabled ? " disabled" : "");
+ list_for_each_entry(rel, &rail->relationships_from, from_node) {
+ seq_printf(s, " %-10s %-7d mV %-4d mV\n",
+ rel->from->reg_id,
+ rel->from->millivolts,
+ dvfs_solve_relationship(rel));
+ }
+
+ list_sort(NULL, &rail->dvfs, dvfs_tree_sort_cmp);
+
+ list_for_each_entry(d, &rail->dvfs, reg_node) {
+ seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name,
+ d->cur_rate, d->cur_millivolts);
+ }
+ }
+
+ mutex_unlock(&dvfs_lock);
+
+ return 0;
+}
+
+static int dvfs_tree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dvfs_tree_show, inode->i_private);
+}
+
+static const struct file_operations dvfs_tree_fops = {
+ .open = dvfs_tree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init dvfs_debugfs_init(struct dentry *clk_debugfs_root)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("dvfs", S_IRUGO, clk_debugfs_root, NULL,
+ &dvfs_tree_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+#endif
--- /dev/null
+/*
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _TEGRA_DVFS_H_
+#define _TEGRA_DVFS_H_
+
+#define MAX_DVFS_FREQS 16
+
+struct clk;
+struct dvfs_rail;
+
+/*
+ * dvfs_relationship between to rails, "from" and "to"
+ * when the rail changes, it will call dvfs_rail_update on the rails
+ * in the relationship_to list.
+ * when determining the voltage to set a rail to, it will consider each
+ * rail in the relationship_from list.
+ */
+struct dvfs_relationship {
+ struct dvfs_rail *to;
+ struct dvfs_rail *from;
+ int (*solve)(struct dvfs_rail *, struct dvfs_rail *);
+
+ struct list_head to_node; /* node in relationship_to list */
+ struct list_head from_node; /* node in relationship_from list */
+};
+
+struct dvfs_rail {
+ const char *reg_id;
+ int min_millivolts;
+ int max_millivolts;
+ int nominal_millivolts;
+ int step;
+ bool disabled;
+
+ struct list_head node; /* node in dvfs_rail_list */
+ struct list_head dvfs; /* list head of attached dvfs clocks */
+ struct list_head relationships_to;
+ struct list_head relationships_from;
+ struct regulator *reg;
+ int millivolts;
+ int new_millivolts;
+ bool suspended;
+};
+
+struct dvfs {
+ /* Used only by tegra2_clock.c */
+ const char *clk_name;
+ int cpu_process_id;
+
+ /* Must be initialized before tegra_dvfs_init */
+ int freqs_mult;
+ unsigned long freqs[MAX_DVFS_FREQS];
+ const int *millivolts;
+ struct dvfs_rail *dvfs_rail;
+ bool auto_dvfs;
+
+ /* Filled in by tegra_dvfs_init */
+ int max_millivolts;
+ int num_freqs;
+
+ int cur_millivolts;
+ unsigned long cur_rate;
+ struct list_head node;
+ struct list_head debug_node;
+ struct list_head reg_node;
+};
+
+void tegra2_init_dvfs(void);
+int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d);
+int dvfs_debugfs_init(struct dentry *clk_debugfs_root);
+int tegra_dvfs_late_init(void);
+int tegra_dvfs_init_rails(struct dvfs_rail *dvfs_rails[], int n);
+void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n);
+void tegra_dvfs_rail_enable(struct dvfs_rail *rail);
+void tegra_dvfs_rail_disable(struct dvfs_rail *rail);
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Brian Swetland <swetland@google.com>
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/iomap.h>
+#include <mach/fiq.h>
+#include <mach/legacy_irq.h>
+
+#include "board.h"
+
+void tegra_fiq_enable(int irq)
+{
+ void __iomem *base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100);
+ /* enable FIQ */
+ u32 val = readl(base + GIC_CPU_CTRL);
+ val &= ~8; /* pass FIQs through */
+ val |= 2; /* enableNS */
+ writel(val, base + GIC_CPU_CTRL);
+ tegra_legacy_select_fiq(irq, true);
+ tegra_legacy_unmask_irq(irq);
+}
+
+void tegra_fiq_disable(int irq)
+{
+ tegra_legacy_mask_irq(irq);
+ tegra_legacy_select_fiq(irq, false);
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/fuse.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <mach/iomap.h>
+
+#include "fuse.h"
+#include "apbio.h"
+
+#define FUSE_UID_LOW 0x108
+#define FUSE_UID_HIGH 0x10c
+#define FUSE_SKU_INFO 0x110
+#define FUSE_SPARE_BIT 0x200
+
+static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
+ [TEGRA_REVISION_UNKNOWN] = "unknown",
+ [TEGRA_REVISION_A02] = "A02",
+ [TEGRA_REVISION_A03] = "A03",
+ [TEGRA_REVISION_A03p] = "A03 prime",
+};
+
+u32 tegra_fuse_readl(unsigned long offset)
+{
+ return tegra_apb_readl(TEGRA_FUSE_BASE + offset);
+}
+
+void tegra_fuse_writel(u32 value, unsigned long offset)
+{
+ tegra_apb_writel(value, TEGRA_FUSE_BASE + offset);
+}
+
+static inline bool get_spare_fuse(int bit)
+{
+ return tegra_fuse_readl(FUSE_SPARE_BIT + bit * 4);
+}
+
+void tegra_init_fuse(void)
+{
+ u32 reg = readl(IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+ reg |= 1 << 28;
+ writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48));
+
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
+ tegra_revision_name[tegra_get_revision()],
+ tegra_sku_id(), tegra_cpu_process_id(),
+ tegra_core_process_id());
+}
+
+unsigned long long tegra_chip_uid(void)
+{
+ unsigned long long lo, hi;
+
+ lo = tegra_fuse_readl(FUSE_UID_LOW);
+ hi = tegra_fuse_readl(FUSE_UID_HIGH);
+ return (hi << 32ull) | lo;
+}
+
+int tegra_sku_id(void)
+{
+ int sku_id;
+ u32 reg = tegra_fuse_readl(FUSE_SKU_INFO);
+ sku_id = reg & 0xFF;
+ return sku_id;
+}
+
+int tegra_cpu_process_id(void)
+{
+ int cpu_process_id;
+ u32 reg = tegra_fuse_readl(FUSE_SPARE_BIT);
+ cpu_process_id = (reg >> 6) & 3;
+ return cpu_process_id;
+}
+
+int tegra_core_process_id(void)
+{
+ int core_process_id;
+ u32 reg = tegra_fuse_readl(FUSE_SPARE_BIT);
+ core_process_id = (reg >> 12) & 3;
+ return core_process_id;
+}
+
+enum tegra_revision tegra_get_revision(void)
+{
+ void __iomem *chip_id = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804;
+ u32 id = readl(chip_id);
+
+ switch ((id >> 16) & 0xf) {
+ case 2:
+ return TEGRA_REVISION_A02;
+ case 3:
+ if (get_spare_fuse(18) || get_spare_fuse(19))
+ return TEGRA_REVISION_A03p;
+ else
+ return TEGRA_REVISION_A03;
+ default:
+ return TEGRA_REVISION_UNKNOWN;
+ }
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/fuse.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+enum tegra_revision {
+ TEGRA_REVISION_UNKNOWN = 0,
+ TEGRA_REVISION_A02,
+ TEGRA_REVISION_A03,
+ TEGRA_REVISION_A03p,
+ TEGRA_REVISION_MAX,
+};
+
+unsigned long long tegra_chip_uid(void);
+int tegra_sku_id(void);
+int tegra_cpu_process_id(void);
+int tegra_core_process_id(void);
+void tegra_init_fuse(void);
+u32 tegra_fuse_readl(unsigned long offset);
+void tegra_fuse_writel(u32 value, unsigned long offset);
+enum tegra_revision tegra_get_revision(void);
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <mach/iomap.h>
+#include <mach/suspend.h>
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
int bank;
int irq;
spinlock_t lvl_lock[4];
+#ifdef CONFIG_PM
+ u32 cnf[4];
+ u32 out[4];
+ u32 oe[4];
+ u32 int_enb[4];
+ u32 int_lvl[4];
+#endif
};
.direction_output = tegra_gpio_direction_output,
.set = tegra_gpio_set,
.base = 0,
- .ngpio = ARCH_NR_GPIOS,
+ .ngpio = TEGRA_NR_GPIOS,
};
static void tegra_gpio_irq_ack(unsigned int irq)
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
__set_irq_handler_unlocked(irq, handle_edge_irq);
+ if (tegra_get_suspend_mode() == TEGRA_SUSPEND_LP0)
+ tegra_set_lp0_wake_type(irq, type);
+
return 0;
}
}
+#ifdef CONFIG_PM
+void tegra_gpio_resume(void)
+{
+ unsigned long flags;
+ int b, p, i;
+
+ local_irq_save(flags);
+
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
+ __raw_writel(bank->out[p], GPIO_OUT(gpio));
+ __raw_writel(bank->oe[p], GPIO_OE(gpio));
+ __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
+ __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+ }
+ }
+
+ local_irq_restore(flags);
+
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc || (desc->status & IRQ_WAKEUP))
+ continue;
+ enable_irq(i);
+ }
+}
+
+void tegra_gpio_suspend(void)
+{
+ unsigned long flags;
+ int b, p, i;
+
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+ if (desc->status & IRQ_WAKEUP) {
+ int gpio = i - INT_GPIO_BASE;
+ pr_debug("gpio %d.%d is wakeup\n", gpio/8, gpio&7);
+ continue;
+ }
+ disable_irq(i);
+ }
+
+ local_irq_save(flags);
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
+ bank->out[p] = __raw_readl(GPIO_OUT(gpio));
+ bank->oe[p] = __raw_readl(GPIO_OE(gpio));
+ bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
+ bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
+ }
+ }
+ local_irq_restore(flags);
+}
+
+static int tegra_gpio_wake_enable(unsigned int irq, unsigned int enable)
+{
+ int ret;
+ struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+
+ ret = tegra_set_lp1_wake(bank->irq, enable);
+ if (ret)
+ return ret;
+
+ if (tegra_get_suspend_mode() == TEGRA_SUSPEND_LP0)
+ return tegra_set_lp0_wake(irq, enable);
+
+ return 0;
+}
+#endif
static struct irq_chip tegra_gpio_irq_chip = {
.name = "GPIO",
.mask = tegra_gpio_irq_mask,
.unmask = tegra_gpio_irq_unmask,
.set_type = tegra_gpio_irq_set_type,
+#ifdef CONFIG_PM
+ .set_wake = tegra_gpio_wake_enable,
+#endif
};
gpiochip_add(&tegra_gpio_chip);
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + ARCH_NR_GPIOS); i++) {
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
lockdep_set_class(&irq_desc[i].lock, &gpio_lock_class);
for (i = 0; i < 7; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
- seq_printf(s, "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
- i, j,
- __raw_readl(GPIO_CNF(gpio)),
- __raw_readl(GPIO_OE(gpio)),
- __raw_readl(GPIO_OUT(gpio)),
- __raw_readl(GPIO_IN(gpio)),
- __raw_readl(GPIO_INT_STA(gpio)),
- __raw_readl(GPIO_INT_ENB(gpio)),
- __raw_readl(GPIO_INT_LVL(gpio)));
+ seq_printf(s,
+ "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+ i, j,
+ __raw_readl(GPIO_CNF(gpio)),
+ __raw_readl(GPIO_OE(gpio)),
+ __raw_readl(GPIO_OUT(gpio)),
+ __raw_readl(GPIO_IN(gpio)),
+ __raw_readl(GPIO_INT_STA(gpio)),
+ __raw_readl(GPIO_INT_ENB(gpio)),
+ __raw_readl(GPIO_INT_LVL(gpio)));
}
}
return 0;
--- /dev/null
+/*
+ * arch/arm/mach-tegra/headsmp.S
+ *
+ * SMP initialization routines for Tegra SoCs
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/domain.h>
+#include <asm/ptrace.h>
+#include <asm/cache.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "power-macros.S"
+
+#define TTB_FLAGS 0x6A @ IRGN_WBWA, OC_RGN_WBWA, S, NOS
+
+#define PMC_DPD_SAMPLE 0x20
+#define PMC_DPD_ENABLE 0x24
+#define PMC_SCRATCH1 0x54
+#define PMC_SCRATCH39 0x138
+#define RST_DEVICES_U 0xc
+
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+#define CLK_RESET_PLLP_BASE 0xa0
+#define CLK_RESET_PLLP_OUTA 0xa4
+#define CLK_RESET_PLLP_OUTB 0xa8
+#define CLK_RESET_PLLP_MISC 0xac
+
+/* .section ".cpuinit.text", "ax"*/
+
+.macro poke_ev, val, tmp
+ mov32 \tmp, (TEGRA_EXCEPTION_VECTORS_BASE + 0x100)
+ str \val, [\tmp]
+.endm
+
+#ifdef CONFIG_SMP
+/*
+ * tegra_secondary_startup
+ *
+ * Initial secondary processor boot vector; jumps to kernel's
+ * secondary_startup routine
+ */
+ENTRY(tegra_secondary_startup)
+ msr cpsr_fsxc, #0xd3
+ bl __invalidate_cpu_state
+ cpu_id r0
+ enable_coresite r1
+ poke_ev r0, r1
+ b secondary_startup
+ENDPROC(tegra_secondary_startup)
+#endif
+
+/*
+ * __restart_plls
+ *
+ * Loads the saved PLLX and PLLP parameters into the PLLs, to
+ * allow them to stabilize while the rest of the CPU state is restored.
+ * Should be called after the MMU is enabled. Jumps directly
+ * to __cortex_a9_restore
+ */
+ .align L1_CACHE_SHIFT
+__restart_plls:
+ mov32 r0, tegra_sctx
+ mov32 r3, (TEGRA_CLK_RESET_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
+ mov32 r4, (TEGRA_TMRUS_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
+
+ ldr r1, [r0, #0x0] @ pllx_misc
+ ldr r2, [r0, #0x4] @ pllx_base
+ str r1, [r3, #CLK_RESET_PLLX_MISC]
+ str r2, [r3, #CLK_RESET_PLLX_BASE]
+
+ ldr r1, [r0, #0x8] @ pllp_misc
+ ldr r2, [r0, #0xc] @ pllp_base
+ str r1, [r3, #CLK_RESET_PLLP_MISC]
+ str r2, [r3, #CLK_RESET_PLLP_BASE]
+
+ ldr r1, [r0, #0x10] @ pllp_outa
+ ldr r2, [r0, #0x14] @ pllp_outb
+ str r1, [r3, #CLK_RESET_PLLP_OUTA]
+ str r2, [r3, #CLK_RESET_PLLP_OUTB]
+
+ /* record the time that PLLX and PLLP will be stable */
+ ldr r1, [r4]
+ add r1, r1, #300
+ str r1, [r0, #0x18] @ pll_timeout
+ /* FIXME: need to record actual power transition here */
+ mov r0, #0
+ b __cortex_a9_l2x0_restart
+ENDPROC(__restart_pllx)
+/*
+ * __enable_coresite_access
+ *
+ * Takes the coresite debug interface out of reset, enables
+ * access to all CPUs. Called with MMU disabled.
+ */
+ .align L1_CACHE_SHIFT
+__enable_coresite_access:
+ mov32 r0, (TEGRA_CLK_RESET_BASE + RST_DEVICES_U)
+ mov32 r2, (TEGRA_TMRUS_BASE)
+
+ /* assert reset for 2usec */
+ ldr r1, [r0]
+ orr r1, #(1<<9)
+ str r1, [r0]
+ wait_for_us r3, r2, r4
+ add r3, r3, #2
+ bic r1, r1, #(1<<9)
+ wait_until r3, r2, r4
+ str r1, [r0]
+ enable_coresite r3
+ bx lr
+ENDPROC(__enable_coresite_access)
+/*
+ * tegra_lp2_startup
+ *
+ * Secondary CPU boot vector when restarting the master CPU following
+ * an LP2 idle transition. Re-enable coresight access, re-enable
+ * MMU, re-start PLLX, restore processor context.
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(tegra_lp2_startup)
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+
+ mov32 r0, TEGRA_TMRUS_BASE
+ ldr r1, [r0]
+ mov32 r0, TEGRA_PMC_BASE
+ str r1, [r0, #PMC_SCRATCH39] @ save off exact lp2 exit time
+ mov r1, #0
+ str r1, [r0, #PMC_DPD_SAMPLE]
+ str r1, [r0, #PMC_DPD_ENABLE]
+
+ bl __invalidate_cpu_state
+ bl __enable_coresite_access
+
+ mrc p15, 0, r0, c1, c0, 1
+ orr r0, r0, #(1 << 6) | (1 << 0) @ re-enable coherency
+ mcr p15, 0, r0, c1, c0, 1
+
+ /* enable SCU */
+ mov32 r0, TEGRA_ARM_PERIF_BASE
+ ldr r1, [r0]
+ orr r1, r1, #1
+ str r1, [r0]
+
+ adr r4, __tegra_lp2_data
+ ldmia r4, {r5, r7, r12}
+ mov r1, r12 @ ctx_restore = __cortex_a9_restore
+ sub r4, r4, r5
+ ldr r0, [r7, r4] @ pgdir = tegra_pgd_phys
+ b __return_to_virtual
+ENDPROC(tegra_lp2_startup)
+ .type __tegra_lp2_data, %object
+__tegra_lp2_data:
+ .long .
+ .long tegra_pgd_phys
+ .long __restart_plls
+ .size __tegra_lp2_data, . - __tegra_lp2_data
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * tegra_hotplug_startup
+ *
+ * Secondary CPU boot vector when restarting a CPU following a
+ * hot-unplug. Uses the page table created by smp_prepare_cpus and
+ * stored in tegra_pgd_phys as the safe page table for
+ * __return_to_virtual, and jumps directly to __cortex_a9_restore.
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(tegra_hotplug_startup)
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+ bl __invalidate_cpu_state
+ enable_coresite r1
+
+ /* most of the below is a retread of what happens in __v7_setup and
+ * secondary_startup, to get the MMU re-enabled and to branch
+ * to secondary_kernel_startup */
+ mrc p15, 0, r0, c1, c0, 1
+ orr r0, r0, #(1 << 6) | (1 << 0) @ re-enable coherency
+ mcr p15, 0, r0, c1, c0, 1
+
+ adr r4, __tegra_hotplug_data
+ ldmia r4, {r5, r7, r12}
+ mov r1, r12 @ ctx_restore = __cortex_a9_restore
+ sub r4, r4, r5
+ ldr r0, [r7, r4] @ pgdir = secondary_data.pgdir
+ b __return_to_virtual
+ENDPROC(tegra_hotplug_startup)
+
+
+ .type __tegra_hotplug_data, %object
+__tegra_hotplug_data:
+ .long .
+ .long tegra_pgd_phys
+ .long __cortex_a9_restore
+ .size __tegra_hotplug_data, . - __tegra_hotplug_data
+#endif
+++ /dev/null
-#include <linux/linkage.h>
-#include <linux/init.h>
-
- .section ".text.head", "ax"
- __CPUINIT
-
-/*
- * Tegra specific entry point for secondary CPUs.
- * The secondary kernel init calls v7_flush_dcache_all before it enables
- * the L1; however, the L1 comes out of reset in an undefined state, so
- * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
- * of cache lines with uninitialized data and uninitialized tags to get
- * written out to memory, which does really unpleasant things to the main
- * processor. We fix this by performing an invalidate, rather than a
- * clean + invalidate, before jumping into the kernel.
- */
-ENTRY(v7_invalidate_l1)
- mov r0, #0
- mcr p15, 2, r0, c0, c0, 0
- mrc p15, 1, r0, c0, c0, 0
-
- ldr r1, =0x7fff
- and r2, r1, r0, lsr #13
-
- ldr r1, =0x3ff
-
- and r3, r1, r0, lsr #3 @ NumWays - 1
- add r2, r2, #1 @ NumSets
-
- and r0, r0, #0x7
- add r0, r0, #4 @ SetShift
-
- clz r1, r3 @ WayShift
- add r4, r3, #1 @ NumWays
-1: sub r2, r2, #1 @ NumSets--
- mov r3, r4 @ Temp = NumWays
-2: subs r3, r3, #1 @ Temp--
- mov r5, r3, lsl r1
- mov r6, r2, lsl r0
- orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
- mcr p15, 0, r5, c7, c6, 2
- bgt 2b
- cmp r2, #0
- bgt 1b
- dsb
- isb
- mov pc, lr
-ENDPROC(v7_invalidate_l1)
-
-ENTRY(tegra_secondary_startup)
- msr cpsr_fsxc, #0xd3
- bl v7_invalidate_l1
- mrc p15, 0, r0, c0, c0, 5
- and r0, r0, #15
- ldr r1, =0x6000f100
- str r0, [r1]
-1: ldr r2, [r1]
- cmp r0, r2
- beq 1b
- b secondary_startup
-ENDPROC(tegra_secondary_startup)
+++ /dev/null
-/*
- * linux/arch/arm/mach-realview/hotplug.c
- *
- * Copyright (C) 2002 ARM Ltd.
- * All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/completion.h>
-
-#include <asm/cacheflush.h>
-
-static DECLARE_COMPLETION(cpu_killed);
-
-static inline void cpu_enter_lowpower(void)
-{
- unsigned int v;
-
- flush_cache_all();
- asm volatile(
- " mcr p15, 0, %1, c7, c5, 0\n"
- " mcr p15, 0, %1, c7, c10, 4\n"
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, #0x04\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "r" (0)
- : "cc");
-}
-
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, #0x04\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- :
- : "cc");
-}
-
-static inline void platform_do_lowpower(unsigned int cpu)
-{
- /*
- * there is no power-control hardware on this platform, so all
- * we can do is put the core into WFI; this is safe as the calling
- * code will have already disabled interrupts
- */
- for (;;) {
- /*
- * here's the WFI
- */
- asm(".word 0xe320f003\n"
- :
- :
- : "memory", "cc");
-
- /*if (pen_release == cpu) {*/
- /*
- * OK, proper wakeup, we're done
- */
- break;
- /*}*/
-
- /*
- * getting here, means that we have come out of WFI without
- * having been woken up - this shouldn't happen
- *
- * The trouble is, letting people know about this is not really
- * possible, since we are currently running incoherently, and
- * therefore cannot safely call printk() or anything else
- */
-#ifdef DEBUG
- printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
-#endif
- }
-}
-
-int platform_cpu_kill(unsigned int cpu)
-{
- return wait_for_completion_timeout(&cpu_killed, 5000);
-}
-
-/*
- * platform-specific code to shutdown a CPU
- *
- * Called with IRQs disabled
- */
-void platform_cpu_die(unsigned int cpu)
-{
-#ifdef DEBUG
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
-#endif
-
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
-
- /*
- * we're ready for shutdown now, so do it
- */
- cpu_enter_lowpower();
- platform_do_lowpower(cpu);
-
- /*
- * bring this CPU back into the world of cache
- * coherency, and then restore interrupts
- */
- cpu_leave_lowpower();
-}
-
-int platform_cpu_disable(unsigned int cpu)
-{
- /*
- * we don't allow CPU 0 to be shutdown (it is still too special
- * e.g. clock tick interrupts)
- */
- return cpu == 0 ? -EPERM : 0;
-}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/arb_sema.h
+ *
+ * Hardware arbitration semaphore interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_ARB_SEMA_H
+#define __MACH_TEGRA_ARB_SEMA_H
+
+enum tegra_arb_module {
+ TEGRA_ARB_AES = 0,
+};
+
+int tegra_arb_mutex_lock_timeout(enum tegra_arb_module lock, int msecs);
+
+int tegra_arb_mutex_unlock(enum tegra_arb_module lock);
+
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/audio.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_TEGRA_AUDIO_H
+#define __ARCH_ARM_MACH_TEGRA_AUDIO_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/i2s.h>
+
+#define FIFO1 0
+#define FIFO2 1
+
+/* FIXME: this is not enforced by the hardware. */
+#define I2S_FIFO_TX FIFO1
+#define I2S_FIFO_RX FIFO2
+
+#define TEGRA_AUDIO_ENABLE_TX 1
+#define TEGRA_AUDIO_ENABLE_RX 2
+
+struct tegra_audio_platform_data {
+ bool i2s_master;
+ bool dsp_master;
+ int i2s_master_clk; /* When I2S mode and master, the framesync rate. */
+ int dsp_master_clk; /* When DSP mode and master, the framesync rate. */
+ bool dma_on;
+ unsigned long i2s_clk_rate;
+ const char *dap_clk;
+ const char *audio_sync_clk;
+
+ int mode; /* I2S, LJM, RJM, etc. */
+ int fifo_fmt;
+ int bit_size;
+ int i2s_bus_width; /* 32-bit for 16-bit packed I2S */
+ int dsp_bus_width; /* 16-bit for DSP data format */
+ int mask; /* enable tx and rx? */
+ bool stereo_capture; /* True if hardware supports stereo */
+ void *driver_data;
+};
+
+#endif /* __ARCH_ARM_MACH_TEGRA_AUDIO_H */
#ifndef __MACH_CLK_H
#define __MACH_CLK_H
+struct dvfs;
+
void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
+int tegra_dvfs_set_rate(struct clk *c, unsigned long rate);
+unsigned long clk_get_rate_all_locked(struct clk *c);
+void tegra_sdmmc_tap_delay(struct clk *c, int delay);
+
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/dc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_DC_H
+#define __MACH_TEGRA_DC_H
+
+#include <linux/pm.h>
+
+#define TEGRA_MAX_DC 2
+#define DC_N_WINDOWS 3
+
+struct tegra_dc_mode {
+ int pclk;
+ int h_ref_to_sync;
+ int v_ref_to_sync;
+ int h_sync_width;
+ int v_sync_width;
+ int h_back_porch;
+ int v_back_porch;
+ int h_active;
+ int v_active;
+ int h_front_porch;
+ int v_front_porch;
+ u32 flags;
+};
+
+#define TEGRA_DC_MODE_FLAG_NEG_V_SYNC (1 << 0)
+#define TEGRA_DC_MODE_FLAG_NEG_H_SYNC (1 << 1)
+
+enum {
+ TEGRA_DC_OUT_RGB,
+ TEGRA_DC_OUT_HDMI,
+};
+
+struct tegra_dc_out {
+ int type;
+ unsigned flags;
+
+ /* size in mm */
+ unsigned h_size;
+ unsigned v_size;
+
+ int dcc_bus;
+ int hotplug_gpio;
+
+ unsigned order;
+ unsigned align;
+ unsigned depth;
+
+ unsigned height; /* mm */
+ unsigned width; /* mm */
+
+ struct tegra_dc_mode *modes;
+ int n_modes;
+
+ int (*enable)(void);
+ int (*disable)(void);
+};
+
+/* bits for tegra_dc_out.flags */
+#define TEGRA_DC_OUT_HOTPLUG_HIGH (0 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_LOW (1 << 1)
+#define TEGRA_DC_OUT_HOTPLUG_MASK (1 << 1)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ALWAYS_ON (0 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND (1 << 2)
+#define TEGRA_DC_OUT_NVHDCP_POLICY_MASK (1 << 2)
+
+#define TEGRA_DC_ALIGN_MSB 0
+#define TEGRA_DC_ALIGN_LSB 1
+
+#define TEGRA_DC_ORDER_RED_BLUE 0
+#define TEGRA_DC_ORDER_BLUE_RED 1
+
+struct tegra_dc;
+struct nvmap_handle_ref;
+
+struct tegra_dc_win {
+ u8 idx;
+ u8 fmt;
+ u32 flags;
+
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ unsigned offset_u;
+ unsigned offset_v;
+ unsigned stride;
+ unsigned stride_uv;
+ unsigned x;
+ unsigned y;
+ unsigned w;
+ unsigned h;
+ unsigned out_x;
+ unsigned out_y;
+ unsigned out_w;
+ unsigned out_h;
+ unsigned z;
+
+ int dirty;
+ int underflows;
+ struct tegra_dc *dc;
+
+ struct nvmap_handle_ref *cur_handle;
+};
+
+
+#define TEGRA_WIN_FLAG_ENABLED (1 << 0)
+#define TEGRA_WIN_FLAG_BLEND_PREMULT (1 << 1)
+#define TEGRA_WIN_FLAG_BLEND_COVERAGE (1 << 2)
+#define TEGRA_WIN_FLAG_INVERT_H (1 << 3)
+#define TEGRA_WIN_FLAG_INVERT_V (1 << 4)
+#define TEGRA_WIN_FLAG_TILED (1 << 5)
+
+#define TEGRA_WIN_BLEND_FLAGS_MASK \
+ (TEGRA_WIN_FLAG_BLEND_PREMULT | TEGRA_WIN_FLAG_BLEND_COVERAGE)
+
+/* Note: These are the actual values written to the DC_WIN_COLOR_DEPTH register
+ * and may change in new tegra architectures.
+ */
+#define TEGRA_WIN_FMT_P1 0
+#define TEGRA_WIN_FMT_P2 1
+#define TEGRA_WIN_FMT_P4 2
+#define TEGRA_WIN_FMT_P8 3
+#define TEGRA_WIN_FMT_B4G4R4A4 4
+#define TEGRA_WIN_FMT_B5G5R5A 5
+#define TEGRA_WIN_FMT_B5G6R5 6
+#define TEGRA_WIN_FMT_AB5G5R5 7
+#define TEGRA_WIN_FMT_B8G8R8A8 12
+#define TEGRA_WIN_FMT_R8G8B8A8 13
+#define TEGRA_WIN_FMT_B6x2G6x2R6x2A8 14
+#define TEGRA_WIN_FMT_R6x2G6x2B6x2A8 15
+#define TEGRA_WIN_FMT_YCbCr422 16
+#define TEGRA_WIN_FMT_YUV422 17
+#define TEGRA_WIN_FMT_YCbCr420P 18
+#define TEGRA_WIN_FMT_YUV420P 19
+#define TEGRA_WIN_FMT_YCbCr422P 20
+#define TEGRA_WIN_FMT_YUV422P 21
+#define TEGRA_WIN_FMT_YCbCr422R 22
+#define TEGRA_WIN_FMT_YUV422R 23
+#define TEGRA_WIN_FMT_YCbCr422RA 24
+#define TEGRA_WIN_FMT_YUV422RA 25
+
+struct tegra_fb_data {
+ int win;
+
+ int xres;
+ int yres;
+ int bits_per_pixel; /* -1 means autodetect */
+
+ unsigned long flags;
+};
+
+#define TEGRA_FB_FLIP_ON_PROBE (1 << 0)
+
+struct tegra_dc_platform_data {
+ unsigned long flags;
+ unsigned long emc_clk_rate;
+ struct tegra_dc_out *default_out;
+ struct tegra_fb_data *fb;
+};
+
+#define TEGRA_DC_FLAG_ENABLED (1 << 0)
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx);
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win);
+
+void tegra_dc_enable(struct tegra_dc *dc);
+void tegra_dc_disable(struct tegra_dc *dc);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc);
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc);
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, u32 val);
+
+/* tegra_dc_update_windows and tegra_dc_sync_windows do not support windows
+ * with differenct dcs in one call
+ */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n);
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n);
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode);
+
+unsigned tegra_dc_get_out_height(struct tegra_dc *dc);
+unsigned tegra_dc_get_out_width(struct tegra_dc *dc);
+
+#endif
*/
#include <mach/io.h>
+#include <mach/iomap.h>
.macro addruart,rx, tmp
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
ldreq \rx, =IO_APB_PHYS @ physical
ldrne \rx, =IO_APB_VIRT @ virtual
-#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
-#error "A debug UART must be selected in the kernel config to use DEBUG_LL"
-#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
- orr \rx, \rx, #0x6000
-#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
- ldr \tmp, =0x6040
+ ldr \tmp, =(TEGRA_DEBUG_UART_BASE & 0xFFFF)
orr \rx, \rx, \tmp
-#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
- orr \rx, \rx, #0x6200
-#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
- orr \rx, \rx, #0x6300
-#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
- orr \rx, \rx, #0x6400
-#endif
.endm
#define UART_SHIFT 2
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/delay.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MACH_TEGRA_DELAY_H
+#define __MACH_TEGRA_DELAY_H
+
+/* needed by loops_per_jiffy calculations */
+extern void __delay(int loops);
+
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+
+/* we don't have any restrictions on maximum udelay length, but we'll enforce
+ * the same restriction as the ARM default so we don't introduce any
+ * incompatibilties in drivers.
+ */
+extern void __bad_udelay(void);
+
+#define MAX_UDELAY_MS 2
+
+#define udelay(n) \
+ ((__builtin_constant_p(n) && (n) > (MAX_UDELAY_MS * 1000)) ? \
+ __bad_udelay() : \
+ __udelay(n))
+
+#endif /* defined(__MACH_TEGRA_DELAY_H) */
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/dma.h
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_DMA_H
+#define __MACH_TEGRA_DMA_H
+
+#include <linux/list.h>
+
+#if defined(CONFIG_TEGRA_SYSTEM_DMA)
+
+struct tegra_dma_req;
+struct tegra_dma_channel;
+
+#define TEGRA_DMA_REQ_SEL_CNTR 0
+#define TEGRA_DMA_REQ_SEL_I2S_2 1
+#define TEGRA_DMA_REQ_SEL_I2S_1 2
+#define TEGRA_DMA_REQ_SEL_SPD_I 3
+#define TEGRA_DMA_REQ_SEL_UI_I 4
+#define TEGRA_DMA_REQ_SEL_MIPI 5
+#define TEGRA_DMA_REQ_SEL_I2S2_2 6
+#define TEGRA_DMA_REQ_SEL_I2S2_1 7
+#define TEGRA_DMA_REQ_SEL_UARTA 8
+#define TEGRA_DMA_REQ_SEL_UARTB 9
+#define TEGRA_DMA_REQ_SEL_UARTC 10
+#define TEGRA_DMA_REQ_SEL_SPI 11
+#define TEGRA_DMA_REQ_SEL_AC97 12
+#define TEGRA_DMA_REQ_SEL_ACMODEM 13
+#define TEGRA_DMA_REQ_SEL_SL4B 14
+#define TEGRA_DMA_REQ_SEL_SL2B1 15
+#define TEGRA_DMA_REQ_SEL_SL2B2 16
+#define TEGRA_DMA_REQ_SEL_SL2B3 17
+#define TEGRA_DMA_REQ_SEL_SL2B4 18
+#define TEGRA_DMA_REQ_SEL_UARTD 19
+#define TEGRA_DMA_REQ_SEL_UARTE 20
+#define TEGRA_DMA_REQ_SEL_I2C 21
+#define TEGRA_DMA_REQ_SEL_I2C2 22
+#define TEGRA_DMA_REQ_SEL_I2C3 23
+#define TEGRA_DMA_REQ_SEL_DVC_I2C 24
+#define TEGRA_DMA_REQ_SEL_OWR 25
+#define TEGRA_DMA_REQ_SEL_INVALID 31
+
+#define TEGRA_DMA_MAX_TRANSFER_SIZE 0x10000
+
+enum tegra_dma_mode {
+ TEGRA_DMA_SHARED = 1,
+ TEGRA_DMA_MODE_CONTINUOUS = 2,
+ TEGRA_DMA_MODE_CONTINUOUS_DOUBLE = TEGRA_DMA_MODE_CONTINUOUS,
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE = 4,
+ TEGRA_DMA_MODE_ONESHOT = 8,
+};
+
+enum tegra_dma_req_error {
+ TEGRA_DMA_REQ_SUCCESS = 0,
+ TEGRA_DMA_REQ_ERROR_ABORTED,
+ TEGRA_DMA_REQ_INFLIGHT,
+};
+
+enum tegra_dma_req_buff_status {
+ TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
+ TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
+ TEGRA_DMA_REQ_BUF_STATUS_FULL,
+};
+
+struct tegra_dma_req {
+ struct list_head node;
+ unsigned int modid;
+ int instance;
+
+ /* Called when the req is complete and from the DMA ISR context.
+ * When this is called the req structure is no longer queued by
+ * the DMA channel.
+ *
+ * State of the DMA depends on the number of req it has. If there are
+ * no DMA requests queued up, then it will STOP the DMA. It there are
+ * more requests in the DMA, then it will queue the next request.
+ */
+ void (*complete)(struct tegra_dma_req *req);
+
+ /* This is a called from the DMA ISR context when the DMA is still in
+ * progress and is actively filling same buffer.
+ *
+ * In case of continuous mode receive, this threshold is 1/2 the buffer
+ * size. In other cases, this will not even be called as there is no
+ * hardware support for it.
+ *
+ * In the case of continuous mode receive, if there is next req already
+ * queued, DMA programs the HW to use that req when this req is
+ * completed. If there is no "next req" queued, then DMA ISR doesn't do
+ * anything before calling this callback.
+ *
+ * This is mainly used by the cases, where the clients has queued
+ * only one req and want to get some sort of DMA threshold
+ * callback to program the next buffer.
+ *
+ */
+ void (*threshold)(struct tegra_dma_req *req);
+
+ /* 1 to copy to memory.
+ * 0 to copy from the memory to device FIFO */
+ int to_memory;
+
+ void *virt_addr;
+
+ unsigned long source_addr;
+ unsigned long dest_addr;
+ unsigned long dest_wrap;
+ unsigned long source_wrap;
+ unsigned long source_bus_width;
+ unsigned long dest_bus_width;
+ unsigned long req_sel;
+ unsigned int size;
+
+ /* Updated by the DMA driver on the conpletion of the request. */
+ int bytes_transferred;
+ int status;
+
+ /* DMA completion tracking information */
+ int buffer_status;
+
+ /* Client specific data */
+ void *dev;
+};
+
+int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+void tegra_dma_flush(struct tegra_dma_channel *ch);
+
+bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req);
+bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
+bool tegra_dma_is_stopped(struct tegra_dma_channel *ch);
+
+struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
+void tegra_dma_free_channel(struct tegra_dma_channel *ch);
+int tegra_dma_cancel(struct tegra_dma_channel *ch);
+
+int __init tegra_dma_init(void);
+
+#else /* !defined(CONFIG_TEGRA_SYSTEM_DMA) */
+static inline int tegra_dma_init(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif
#include <asm/hardware/gic.h>
/* Uses the GIC interrupt controller built into the cpu */
-#define ICTRL_BASE (IO_CPU_VIRT + 0x100)
+#define ICTRL_BASE (IO_CPU_VIRT + 0x40100)
.macro disable_fiq
.endm
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_FB_H
+#define __MACH_TEGRA_FB_H
+
+#include <linux/fb.h>
+
+struct nvhost_device;
+struct tegra_dc;
+struct tegra_fb_data;
+struct tegra_fb_info;
+struct resource;
+
+#ifdef CONFIG_FB_TEGRA
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem);
+void tegra_fb_unregister(struct tegra_fb_info *fb_info);
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode));
+/* called by display controller on suspend */
+void tegra_fb_suspend(struct tegra_fb_info *tegra_fb);
+#else
+static inline struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem)
+{
+ return NULL;
+}
+
+static inline void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+}
+
+static inline void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode))
+{
+}
+static inline void tegra_fb_suspend(struct tegra_fb_info *tegra_fb)
+{
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_TEGRA_FIQ_H
+#define __ASM_ARCH_TEGRA_FIQ_H
+
+/* enable/disable an interrupt that is an FIQ (safe from FIQ context?) */
+void tegra_fiq_enable(int n);
+void tegra_fiq_disable(int n);
+
+#endif
#include <mach/irqs.h>
-#define ARCH_NR_GPIOS INT_GPIO_NR
+#define TEGRA_NR_GPIOS INT_GPIO_NR
#include <asm-generic/gpio.h>
static inline int gpio_to_irq(unsigned int gpio)
{
- if (gpio < ARCH_NR_GPIOS)
+ if (gpio < TEGRA_NR_GPIOS)
return INT_GPIO_BASE + gpio;
return -EINVAL;
}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/i2s.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_TEGRA_I2S_H
+#define __ARCH_ARM_MACH_TEGRA_I2S_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+/* Offsets from TEGRA_I2S1_BASE and TEGRA_I2S2_BASE */
+
+#define I2S_I2S_CTRL_0 0
+#define I2S_I2S_STATUS_0 4
+#define I2S_I2S_TIMING_0 8
+#define I2S_I2S_FIFO_SCR_0 0x0c
+#define I2S_I2S_PCM_CTRL_0 0x10
+#define I2S_I2S_NW_CTRL_0 0x14
+#define I2S_I2S_TDM_CTRL_0 0x20
+#define I2S_I2S_TDM_TX_RX_CTRL_0 0x24
+#define I2S_I2S_FIFO1_0 0x40
+#define I2S_I2S_FIFO2_0 0x80
+
+/*
+ * I2S_I2S_CTRL_0
+ */
+
+#define I2S_I2S_CTRL_FIFO2_TX_ENABLE (1<<30)
+#define I2S_I2S_CTRL_FIFO1_ENABLE (1<<29)
+#define I2S_I2S_CTRL_FIFO2_ENABLE (1<<28)
+#define I2S_I2S_CTRL_FIFO1_RX_ENABLE (1<<27)
+#define I2S_I2S_CTRL_FIFO_LPBK_ENABLE (1<<26)
+#define I2S_I2S_CTRL_MASTER_ENABLE (1<<25)
+#define I2S_I2S_CTRL_L_R_CTRL (1<<24) /* 0 = L/R: low/high */
+
+#define I2S_BIT_FORMAT_I2S 0
+#define I2S_BIT_FORMAT_RJM 1
+#define I2S_BIT_FORMAT_LJM 2
+#define I2S_BIT_FORMAT_DSP 3
+#define I2S_BIT_FORMAT_SHIFT 10
+
+#define I2S_I2S_CTRL_BIT_FORMAT_MASK (3<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_I2S (I2S_BIT_FORMAT_I2S<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_RJM (I2S_BIT_FORMAT_RJM<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_LJM (I2S_BIT_FORMAT_LJM<<10)
+#define I2S_I2S_CTRL_BIT_FORMAT_DSP (I2S_BIT_FORMAT_DSP<<10)
+
+#define I2S_BIT_SIZE_16 0
+#define I2S_BIT_SIZE_20 1
+#define I2S_BIT_SIZE_24 2
+#define I2S_BIT_SIZE_32 3
+#define I2S_BIT_SIZE_SHIFT 8
+
+#define I2S_I2S_CTRL_BIT_SIZE_MASK (3 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_16 (I2S_BIT_SIZE_16 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_20 (I2S_BIT_SIZE_20 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_24 (I2S_BIT_SIZE_24 << I2S_BIT_SIZE_SHIFT)
+#define I2S_I2S_CTRL_BIT_SIZE_32 (I2S_BIT_SIZE_32 << I2S_BIT_SIZE_SHIFT)
+
+#define I2S_FIFO_16_LSB 0
+#define I2S_FIFO_20_LSB 1
+#define I2S_FIFO_24_LSB 2
+#define I2S_FIFO_32 3
+#define I2S_FIFO_PACKED 7
+#define I2S_FIFO_SHIFT 4
+
+#define I2S_I2S_CTRL_FIFO_FORMAT_MASK (7<<4)
+#define I2S_I2S_CTRL_FIFO_FORMAT_16_LSB \
+ (I2S_FIFO_16_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_20_LSB \
+ (I2S_FIFO_20_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_24_LSB \
+ (I2S_FIFO_24_LSB << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_32 \
+ (I2S_FIFO_32 << I2S_FIFO_SHIFT)
+#define I2S_I2S_CTRL_FIFO_FORMAT_PACKED \
+ (I2S_FIFO_PACKED << I2S_FIFO_SHIFT)
+
+#define I2S_I2S_IE_FIFO1_ERR (1<<3)
+#define I2S_I2S_IE_FIFO2_ERR (1<<2)
+#define I2S_I2S_QE_FIFO1 (1<<1)
+#define I2S_I2S_QE_FIFO2 (1<<0)
+
+/*
+ * I2S_I2S_STATUS_0
+ */
+
+#define I2S_I2S_STATUS_FIFO1_RDY (1<<31)
+#define I2S_I2S_STATUS_FIFO2_RDY (1<<30)
+#define I2S_I2S_STATUS_FIFO1_BSY (1<<29)
+#define I2S_I2S_STATUS_FIFO2_BSY (1<<28)
+#define I2S_I2S_STATUS_FIFO1_ERR (1<<3)
+#define I2S_I2S_STATUS_FIFO2_ERR (1<<2)
+#define I2S_I2S_STATUS_QS_FIFO1 (1<<1)
+#define I2S_I2S_STATUS_QS_FIFO2 (1<<0)
+
+/*
+ * I2S_I2S_TIMING_0
+ */
+
+#define I2S_I2S_TIMING_NON_SYM_ENABLE (1<<12)
+#define I2S_I2S_TIMING_CHANNEL_BIT_COUNT_MASK 0x7ff
+#define I2S_I2S_TIMING_CHANNEL_BIT_COUNT (1<<0)
+
+/*
+ * I2S_I2S_FIFO_SCR_0
+ */
+
+#define I2S_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK 0x3f
+#define I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT 24
+#define I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT 16
+
+#define I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_MASK (0x3f<<24)
+#define I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_MASK (0x3f<<16)
+
+#define I2S_I2S_FIFO_SCR_FIFO2_CLR (1<<12)
+#define I2S_I2S_FIFO_SCR_FIFO1_CLR (1<<8)
+
+#define I2S_FIFO_ATN_LVL_ONE_SLOT 0
+#define I2S_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define I2S_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define I2S_FIFO_ATN_LVL_TWELVE_SLOTS 3
+#define I2S_FIFO2_ATN_LVL_SHIFT 4
+#define I2S_FIFO1_ATN_LVL_SHIFT 0
+
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK \
+ (3 << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_ONE_SLOT \
+ (I2S_FIFO_ATN_LVL_ONE_SLOT << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS \
+ (I2S_FIFO_ATN_LVL_FOUR_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_EIGHT_SLOTS \
+ (I2S_FIFO_ATN_LVL_EIGHT_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_TWELVE_SLOTS \
+ (I2S_FIFO_ATN_LVL_TWELVE_SLOTS << I2S_FIFO2_ATN_LVL_SHIFT)
+
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK \
+ (3 << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_ONE_SLOT \
+ (I2S_FIFO_ATN_LVL_ONE_SLOT << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS \
+ (I2S_FIFO_ATN_LVL_FOUR_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_EIGHT_SLOTS \
+ (I2S_FIFO_ATN_LVL_EIGHT_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+#define I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_TWELVE_SLOTS \
+ (I2S_FIFO_ATN_LVL_TWELVE_SLOTS << I2S_FIFO1_ATN_LVL_SHIFT)
+/*
+ * I2S_I2S_PCM_CTRL_0
+ */
+#define I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ 0
+#define I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ 1
+#define I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ 2
+#define I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ 3
+#define I2S_PCM_TRM_EDGE_CTRL_SHIFT 9
+
+#define I2S_I2S_PCM_TRM_EDGE_CTRL_MASK \
+ (3 << I2S_I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ \
+ (I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_POS_EDGE_HIGHZ \
+ (I2S_PCM_TRM_EDGE_POS_EDGE_NO_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ \
+ (I2S_PCM_TRM_EDGE_NEG_EDGE_NO_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+#define I2S_I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ \
+ (I2S_PCM_TRM_EDGE_NEG_EDGE_HIGHZ \
+ << I2S_PCM_TRM_EDGE_CTRL_SHIFT)
+
+#define I2S_PCM_TRM_MASK_BITS_ZERO 0
+#define I2S_PCM_TRM_MASK_BITS_ONE 1
+#define I2S_PCM_TRM_MASK_BITS_TWO 2
+#define I2S_PCM_TRM_MASK_BITS_THREE 3
+#define I2S_PCM_TRM_MASK_BITS_FOUR 4
+#define I2S_PCM_TRM_MASK_BITS_FIVE 5
+#define I2S_PCM_TRM_MASK_BITS_SIX 6
+#define I2S_PCM_TRM_MASK_BITS_SEVEN 7
+#define I2S_PCM_TRM_MASK_BITS_SHIFT 6
+
+#define I2S_I2S_PCM_TRM_MASK_BITS_MASK \
+ (7 << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_ZERO \
+ (I2S_PCM_TRM_MASK_BITS_ZERO \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_ONE \
+ (I2S_PCM_TRM_MASK_BITS_ONE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_TWO \
+ (I2S_PCM_TRM_MASK_BITS_TWO \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_THREE \
+ (I2S_PCM_TRM_MASK_BITS_THREE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_FOUR \
+ (I2S_PCM_TRM_MASK_BITS_FOUR \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_FIVE \
+ (I2S_PCM_TRM_MASK_BITS_FIVE \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_SIX \
+ (I2S_PCM_TRM_MASK_BITS_SIX \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_TRM_MASK_BITS_SEVEN \
+ (I2S_PCM_TRM_MASK_BITS_SEVEN \
+ << I2S_PCM_TRM_MASK_BITS_SHIFT)
+
+#define I2S_I2S_PCM_CTRL_FSYNC_PCM_CTRL (1<<5)
+#define I2S_I2S_PCM_CTRL_TRM_MODE (1<<4)
+
+#define I2S_PCM_RCV_MASK_BITS_ZERO 0
+#define I2S_PCM_RCV_MASK_BITS_ONE 1
+#define I2S_PCM_RCV_MASK_BITS_TWO 2
+#define I2S_PCM_RCV_MASK_BITS_THREE 3
+#define I2S_PCM_RCV_MASK_BITS_FOUR 4
+#define I2S_PCM_RCV_MASK_BITS_FIVE 5
+#define I2S_PCM_RCV_MASK_BITS_SIX 6
+#define I2S_PCM_RCV_MASK_BITS_SEVEN 7
+#define I2S_PCM_RCV_MASK_BITS_SHIFT 1
+
+#define I2S_I2S_PCM_RCV_MASK_BITS_MASK \
+ (7 << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_ZERO \
+ (I2S_PCM_RCV_MASK_BITS_ZERO \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_ONE \
+ (I2S_PCM_RCV_MASK_BITS_ONE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_TWO \
+ (I2S_PCM_RCV_MASK_BITS_TWO \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_THREE \
+ (I2S_PCM_RCV_MASK_BITS_THREE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_FOUR \
+ (I2S_PCM_RCV_MASK_BITS_FOUR \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_FIVE \
+ (I2S_PCM_RCV_MASK_BITS_FIVE \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_SIX \
+ (I2S_PCM_RCV_MASK_BITS_SIX \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+#define I2S_I2S_PCM_RCV_MASK_BITS_SEVEN \
+ (I2S_PCM_RCV_MASK_BITS_SEVEN \
+ << I2S_PCM_RCV_MASK_BITS_SHIFT)
+
+#define I2S_I2S_PCM_CTRL_RCV_MODE (1<<0)
+
+/*
+ * I2S_I2S_NW_CTRL_0
+ */
+
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT1 0
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT2 1
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT3 2
+#define I2S_TRM_TLPHY_SLOT_SEL_SLOT4 3
+#define I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT 4
+
+#define I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_MASK \
+ (3 << I2S_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT1 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT1 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT2 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT2 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT3 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT3 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_TRM_TLPHY_SLOT_SEL_SLOT4 \
+ (I2S_TRM_TLPHY_SLOT_SEL_SLOT4 \
+ << I2S_I2S_NW_TRM_TLPHY_SLOT_SEL_SHIFT)
+
+#define I2S_I2S_NW_CTRL_TRM_TLPHY_MODE (1<<3)
+
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT1 0
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT2 1
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT3 2
+#define I2S_RCV_TLPHY_SLOT_SEL_SLOT4 3
+#define I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT 1
+
+#define I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_MASK \
+ (3 << I2S_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT1 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT1 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT2 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT2 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT3 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT3 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+#define I2S_I2S_RCV_TLPHY_SLOT_SEL_SLOT4 \
+ (I2S_RCV_TLPHY_SLOT_SEL_SLOT4 \
+ << I2S_I2S_NW_RCV_TLPHY_SLOT_SEL_SHIFT)
+
+#define I2S_I2S_NW_CTRL_RCV_TLPHY_MODE (1<<0)
+
+#endif /* __ARCH_ARM_MACH_TEGRA_I2S_H */
*
*/
-#define IO_CPU_PHYS 0x50040000
+#define IO_IRAM_PHYS 0x40000000
+#define IO_IRAM_VIRT 0xFE400000
+#define IO_IRAM_SIZE SZ_256K
+
+#define IO_CPU_PHYS 0x50000000
#define IO_CPU_VIRT 0xFE000000
-#define IO_CPU_SIZE SZ_16K
+#define IO_CPU_SIZE SZ_1M
#define IO_PPSB_PHYS 0x60000000
#define IO_PPSB_VIRT 0xFE200000
#define IO_APB_VIRT 0xFE300000
#define IO_APB_SIZE SZ_1M
+#define IO_USB_PHYS 0xC5000000
+#define IO_USB_VIRT 0xFE500000
+#define IO_USB_SIZE SZ_1M
+
+#define IO_SDMMC_PHYS 0xC8000000
+#define IO_SDMMC_VIRT 0xFE600000
+#define IO_SDMMC_SIZE SZ_1M
+
+#define IO_HOST1X_PHYS 0x54000000
+#define IO_HOST1X_VIRT 0xFE700000
+#define IO_HOST1X_SIZE SZ_4M
+
#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_HOST1X_PHYS, IO_HOST1X_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_HOST1X_PHYS, IO_HOST1X_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_USB_PHYS, IO_USB_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_USB_PHYS, IO_USB_VIRT) : \
+ IO_TO_VIRT_BETWEEN((n), IO_SDMMC_PHYS, IO_SDMMC_SIZE) ? \
+ IO_TO_VIRT_XLATE((n), IO_SDMMC_PHYS, IO_SDMMC_VIRT) : \
0)
#ifndef __ASSEMBLER__
#include <asm/sizes.h>
+#define TEGRA_IRAM_BASE 0x40000000
+#define TEGRA_IRAM_SIZE SZ_256K
+
+#define TEGRA_HOST1X_BASE 0x50000000
+#define TEGRA_HOST1X_SIZE 0x24000
+
#define TEGRA_ARM_PERIF_BASE 0x50040000
#define TEGRA_ARM_PERIF_SIZE SZ_8K
+#define TEGRA_ARM_PL310_BASE 0x50043000
+#define TEGRA_ARM_PL310_SIZE SZ_4K
+
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
+#define TEGRA_MPE_BASE 0x54040000
+#define TEGRA_MPE_SIZE SZ_256K
+
+#define TEGRA_VI_BASE 0x54080000
+#define TEGRA_VI_SIZE SZ_256K
+
+#define TEGRA_ISP_BASE 0x54100000
+#define TEGRA_ISP_SIZE SZ_256K
+
#define TEGRA_DISPLAY_BASE 0x54200000
#define TEGRA_DISPLAY_SIZE SZ_256K
#define TEGRA_DISPLAY2_BASE 0x54240000
#define TEGRA_DISPLAY2_SIZE SZ_256K
+#define TEGRA_HDMI_BASE 0x54280000
+#define TEGRA_HDMI_SIZE SZ_256K
+
+#define TEGRA_GART_BASE 0x58000000
+#define TEGRA_GART_SIZE SZ_32M
+
+#define TEGRA_RES_SEMA_BASE 0x60001000
+#define TEGRA_RES_SEMA_SIZE SZ_4K
+
+#define TEGRA_ARB_SEMA_BASE 0x60002000
+#define TEGRA_ARB_SEMA_SIZE SZ_4K
+
#define TEGRA_PRIMARY_ICTLR_BASE 0x60004000
-#define TEGRA_PRIMARY_ICTLR_SIZE SZ_64
+#define TEGRA_PRIMARY_ICTLR_SIZE 64
+
+#define TEGRA_ARBGNT_ICTLR_BASE 0x60004040
+#define TEGRA_ARBGNT_ICTLR_SIZE 192
#define TEGRA_SECONDARY_ICTLR_BASE 0x60004100
-#define TEGRA_SECONDARY_ICTLR_SIZE SZ_64
+#define TEGRA_SECONDARY_ICTLR_SIZE 64
#define TEGRA_TERTIARY_ICTLR_BASE 0x60004200
-#define TEGRA_TERTIARY_ICTLR_SIZE SZ_64
+#define TEGRA_TERTIARY_ICTLR_SIZE 64
#define TEGRA_QUATERNARY_ICTLR_BASE 0x60004300
-#define TEGRA_QUATERNARY_ICTLR_SIZE SZ_64
+#define TEGRA_QUATERNARY_ICTLR_SIZE 64
#define TEGRA_TMR1_BASE 0x60005000
-#define TEGRA_TMR1_SIZE SZ_8
+#define TEGRA_TMR1_SIZE 8
#define TEGRA_TMR2_BASE 0x60005008
-#define TEGRA_TMR2_SIZE SZ_8
+#define TEGRA_TMR2_SIZE 8
#define TEGRA_TMRUS_BASE 0x60005010
-#define TEGRA_TMRUS_SIZE SZ_64
+#define TEGRA_TMRUS_SIZE 64
#define TEGRA_TMR3_BASE 0x60005050
-#define TEGRA_TMR3_SIZE SZ_8
+#define TEGRA_TMR3_SIZE 8
#define TEGRA_TMR4_BASE 0x60005058
-#define TEGRA_TMR4_SIZE SZ_8
+#define TEGRA_TMR4_SIZE 8
#define TEGRA_CLK_RESET_BASE 0x60006000
#define TEGRA_CLK_RESET_SIZE SZ_4K
#define TEGRA_FLOW_CTRL_BASE 0x60007000
#define TEGRA_FLOW_CTRL_SIZE 20
-#define TEGRA_STATMON_BASE 0x6000C4000
+#define TEGRA_AHB_DMA_BASE 0x60008000
+#define TEGRA_AHB_DMA_SIZE SZ_4K
+
+#define TEGRA_AHB_DMA_CH0_BASE 0x60009000
+#define TEGRA_AHB_DMA_CH0_SIZE 32
+
+#define TEGRA_APB_DMA_BASE 0x6000A000
+#define TEGRA_APB_DMA_SIZE SZ_4K
+
+#define TEGRA_APB_DMA_CH0_BASE 0x6000B000
+#define TEGRA_APB_DMA_CH0_SIZE 32
+
+#define TEGRA_AHB_GIZMO_BASE 0x6000C004
+#define TEGRA_AHB_GIZMO_SIZE 0x10C
+
+#define TEGRA_STATMON_BASE 0x6000C400
#define TEGRA_STATMON_SIZE SZ_1K
#define TEGRA_GPIO_BASE 0x6000D000
#define TEGRA_EXCEPTION_VECTORS_BASE 0x6000F000
#define TEGRA_EXCEPTION_VECTORS_SIZE SZ_4K
+#define TEGRA_VDE_BASE 0x6001A000
+#define TEGRA_VDE_SIZE (SZ_8K + SZ_4K - SZ_256)
+
#define TEGRA_APB_MISC_BASE 0x70000000
#define TEGRA_APB_MISC_SIZE SZ_4K
#define TEGRA_I2S2_SIZE SZ_256
#define TEGRA_UARTA_BASE 0x70006000
-#define TEGRA_UARTA_SIZE SZ_64
+#define TEGRA_UARTA_SIZE 64
#define TEGRA_UARTB_BASE 0x70006040
-#define TEGRA_UARTB_SIZE SZ_64
+#define TEGRA_UARTB_SIZE 64
#define TEGRA_UARTC_BASE 0x70006200
#define TEGRA_UARTC_SIZE SZ_256
#define TEGRA_PWFM_BASE 0x7000A000
#define TEGRA_PWFM_SIZE SZ_256
+#define TEGRA_PWFM0_BASE 0x7000A000
+#define TEGRA_PWFM0_SIZE 4
+
+#define TEGRA_PWFM1_BASE 0x7000A010
+#define TEGRA_PWFM1_SIZE 4
+
+#define TEGRA_PWFM2_BASE 0x7000A020
+#define TEGRA_PWFM2_SIZE 4
+
+#define TEGRA_PWFM3_BASE 0x7000A030
+#define TEGRA_PWFM3_SIZE 4
+
#define TEGRA_MIPI_BASE 0x7000B000
#define TEGRA_MIPI_SIZE SZ_256
#define TEGRA_I2C3_BASE 0x7000C500
#define TEGRA_I2C3_SIZE SZ_256
-#define TEGRA_OWR_BASE 0x7000D000
+#define TEGRA_OWR_BASE 0x7000C600
#define TEGRA_OWR_SIZE 80
#define TEGRA_DVC_BASE 0x7000D000
#define TEGRA_USB_BASE 0xC5000000
#define TEGRA_USB_SIZE SZ_16K
-#define TEGRA_USB1_BASE 0xC5004000
-#define TEGRA_USB1_SIZE SZ_16K
-
-#define TEGRA_USB2_BASE 0xC5008000
+#define TEGRA_USB2_BASE 0xC5004000
#define TEGRA_USB2_SIZE SZ_16K
+#define TEGRA_USB3_BASE 0xC5008000
+#define TEGRA_USB3_SIZE SZ_16K
+
#define TEGRA_SDMMC1_BASE 0xC8000000
#define TEGRA_SDMMC1_SIZE SZ_512
#define TEGRA_SDMMC4_BASE 0xC8000600
#define TEGRA_SDMMC4_SIZE SZ_512
+#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
+# define TEGRA_DEBUG_UART_BASE 0
+#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTA_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTB_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTC_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTD_BASE
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE
+#endif
+
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/iovmm.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed i the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#ifndef _MACH_TEGRA_IOVMM_H_
+#define _MACH_TEGRA_IOVMM_H_
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+typedef u32 tegra_iovmm_addr_t;
+#else
+#error "Unsupported tegra architecture family"
+#endif
+
+struct tegra_iovmm_device_ops;
+
+/* each I/O virtual memory manager unit should register a device with
+ * the iovmm system
+ */
+struct tegra_iovmm_device {
+ struct tegra_iovmm_device_ops *ops;
+ const char *name;
+ struct list_head list;
+ int pgsize_bits;
+};
+
+/* tegra_iovmm_domain serves a purpose analagous to mm_struct as defined in
+ * <linux/mm_types.h> - it defines a virtual address space within which
+ * tegra_iovmm_areas can be created.
+ */
+struct tegra_iovmm_domain {
+ atomic_t clients;
+ atomic_t locks;
+ spinlock_t block_lock;
+ unsigned long flags;
+ wait_queue_head_t delay_lock; /* when lock_client fails */
+ struct rw_semaphore map_lock;
+ struct rb_root all_blocks; /* ordered by address */
+ struct rb_root free_blocks; /* ordered by size */
+ struct tegra_iovmm_device *dev;
+};
+
+/* tegra_iovmm_client is analagous to an individual task in the task group
+ * which owns an mm_struct.
+ */
+
+struct iovmm_share_group;
+
+struct tegra_iovmm_client {
+ const char *name;
+ unsigned long flags;
+ struct iovmm_share_group *group;
+ struct tegra_iovmm_domain *domain;
+ struct list_head list;
+};
+
+/* tegra_iovmm_area serves a purpose analagous to vm_area_struct as defined
+ * in <linux/mm_types.h> - it defines a virtual memory area which can be
+ * mapped to physical memory by a client-provided mapping function. */
+
+struct tegra_iovmm_area {
+ struct tegra_iovmm_domain *domain;
+ tegra_iovmm_addr_t iovm_start;
+ tegra_iovmm_addr_t iovm_length;
+ pgprot_t pgprot;
+ struct tegra_iovmm_area_ops *ops;
+};
+
+struct tegra_iovmm_device_ops {
+ /* maps a VMA using the page residency functions provided by the VMA */
+ int (*map)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *io_vma);
+ /* marks all PTEs in a VMA as invalid; decommits the virtual addres
+ * space (potentially freeing PDEs when decommit is true.) */
+ void (*unmap)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *io_vma, bool decommit);
+ void (*map_pfn)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *io_vma,
+ tegra_iovmm_addr_t offs, unsigned long pfn);
+ /* ensures that a domain is resident in the hardware's mapping region
+ * so that it may be used by a client */
+ int (*lock_domain)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_domain *domain);
+ void (*unlock_domain)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_domain *domain);
+ /* allocates a vmm_domain for the specified client; may return the same
+ * domain for multiple clients */
+ struct tegra_iovmm_domain* (*alloc_domain)(
+ struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_client *client);
+ void (*free_domain)(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_domain *domain);
+ int (*suspend)(struct tegra_iovmm_device *dev);
+ void (*resume)(struct tegra_iovmm_device *dev);
+};
+
+struct tegra_iovmm_area_ops {
+ /* ensures that the page of data starting at the specified offset
+ * from the start of the iovma is resident and pinned for use by
+ * DMA, returns the system pfn, or an invalid pfn if the
+ * operation fails. */
+ unsigned long (*lock_makeresident)(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t offs);
+ /* called when the page is unmapped from the I/O VMA */
+ void (*release)(struct tegra_iovmm_area *area, tegra_iovmm_addr_t offs);
+};
+
+#ifdef CONFIG_TEGRA_IOVMM
+/* called by clients to allocate an I/O VMM client mapping context which
+ * will be shared by all clients in the same share_group */
+struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
+ const char *share_group);
+
+size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client);
+
+void tegra_iovmm_free_client(struct tegra_iovmm_client *client);
+
+/* called by clients to ensure that their mapping context is resident
+ * before performing any DMA operations addressing I/O VMM regions.
+ * client_lock may return -EINTR. */
+int tegra_iovmm_client_lock(struct tegra_iovmm_client *client);
+int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client);
+
+/* called by clients after DMA operations are complete */
+void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client);
+
+/* called by clients to allocate a new iovmm_area and reserve I/O virtual
+ * address space for it. if ops is NULL, clients should subsequently call
+ * tegra_iovmm_vm_map_pages and/or tegra_iovmm_vm_insert_pfn to explicitly
+ * map the I/O virtual address to an OS-allocated page or physical address,
+ * respectively. VM operations may be called before this call returns */
+struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ unsigned long size, pgprot_t pgprot);
+
+/* called by clients to "zap" an iovmm_area, and replace all mappings
+ * in it with invalid ones, without freeing the virtual address range */
+void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm);
+
+/* after zapping a demand-loaded iovmm_area, the client should unzap it
+ * to allow the VMM device to remap the page range. */
+void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm);
+
+/* called by clients to return an iovmm_area to the free pool for the domain */
+void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm);
+
+/* called by client software to map the page-aligned I/O address vaddr to
+ * a specific physical address pfn. I/O VMA should have been created with
+ * a NULL tegra_iovmm_area_ops structure. */
+void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn);
+
+/* called by clients to return the iovmm_area containing addr, or NULL if
+ * addr has not been allocated. caller should call tegra_iovmm_put_area when
+ * finished using the returned pointer */
+struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr);
+
+struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm);
+void tegra_iovmm_area_put(struct tegra_iovmm_area *vm);
+
+/* called by drivers to initialize a tegra_iovmm_domain structure */
+int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end);
+
+/* called by drivers to register an I/O VMM device with the system */
+int tegra_iovmm_register(struct tegra_iovmm_device *dev);
+
+/* called by drivers to remove an I/O VMM device from the system */
+int tegra_iovmm_unregister(struct tegra_iovmm_device *dev);
+
+/* called by platform suspend code to save IOVMM context */
+int tegra_iovmm_suspend(void);
+
+/* restores IOVMM context */
+void tegra_iovmm_resume(void);
+
+#else /* CONFIG_TEGRA_IOVMM */
+
+static inline struct tegra_iovmm_client *tegra_iovmm_alloc_client(
+ const char *name, const char *share_group)
+{
+ return NULL;
+}
+
+static inline size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
+{}
+
+static inline int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
+{}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ unsigned long size, pgprot_t pgprot)
+{
+ return NULL;
+}
+
+static inline void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm) { }
+
+static inline void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm) { }
+
+static inline void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm) { }
+
+static inline void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn) { }
+
+static inline struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
+{
+ return NULL;
+}
+
+static inline struct tegra_iovmm_area *tegra_iovmm_area_get(
+ struct tegra_iovmm_area *vm)
+{
+ return NULL;
+}
+
+static inline void tegra_iovmm_area_put(struct tegra_iovmm_area *vm) { }
+
+static inline int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_register(struct tegra_iovmm_device *dev)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
+{
+ return 0;
+}
+
+static inline int tegra_iovmm_suspend(void)
+{
+ return 0;
+}
+
+static inline void tegra_iovmm_resume(void) { }
+#endif /* CONFIG_TEGRA_IOVMM */
+
+
+#endif
#define IRQ_LOCALTIMER 29
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* Primary Interrupt Controller */
#define INT_PRI_BASE (INT_GIC_BASE + 32)
#define INT_TMR1 (INT_PRI_BASE + 0)
#define INT_SYS_STATS_MON (INT_SEC_BASE + 22)
#define INT_GPIO5 (INT_SEC_BASE + 23)
#define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24)
-#define INT_CPU2_PMU_INTR (INT_SEC_BASE + 25)
+#define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25)
#define INT_SEC_RES_26 (INT_SEC_BASE + 26)
#define INT_S_LINK1 (INT_SEC_BASE + 27)
#define INT_APB_DMA_COP (INT_SEC_BASE + 28)
#define INT_QUAD_RES_30 (INT_QUAD_BASE + 30)
#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
-#define INT_GPIO_BASE (INT_QUAD_BASE + 32)
+#define INT_MAIN_NR (INT_QUAD_BASE + 32 - INT_PRI_BASE)
+
+#define INT_SYNCPT_THRESH_BASE (INT_QUAD_BASE + 32)
+#define INT_SYNCPT_THRESH_NR 32
+
+#define INT_GPIO_BASE (INT_SYNCPT_THRESH_BASE + \
+ INT_SYNCPT_THRESH_NR)
#define INT_GPIO_NR (28 * 8)
-#define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+#define FIQ_START INT_GIC_BASE
+
+#define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
+
+#define INT_BOARD_BASE TEGRA_NR_IRQS
+#define NR_BOARD_IRQS 32
+
+#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS)
+#endif
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/kfuse.h
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* there are 144 32-bit values in total */
+#define KFUSE_DATA_SZ (144 * 4)
+
+int tegra_kfuse_read(void *dest, size_t len);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/legacy_irq.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+#define _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
+
+void tegra_legacy_mask_irq(unsigned int irq);
+void tegra_legacy_unmask_irq(unsigned int irq);
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
+void tegra_legacy_force_irq_set(unsigned int irq);
+void tegra_legacy_force_irq_clr(unsigned int irq);
+int tegra_legacy_force_irq_status(unsigned int irq);
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
+unsigned long tegra_legacy_vfiq(int nr);
+unsigned long tegra_legacy_class(int nr);
+int tegra_legacy_irq_set_wake(int irq, int enable);
+void tegra_legacy_irq_set_lp1_wake_mask(void);
+void tegra_legacy_irq_restore_mask(void);
+void tegra_init_legacy_irq(void);
+void tegra_legacy_irq_suspend(void);
+void tegra_legacy_irq_resume(void);
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/mc.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_MC_H
+#define __MACH_TEGRA_MC_H
+
+#define TEGRA_MC_FPRI_CTRL_AVPC 0x17c
+#define TEGRA_MC_FPRI_CTRL_DC 0x180
+#define TEGRA_MC_FPRI_CTRL_DCB 0x184
+#define TEGRA_MC_FPRI_CTRL_EPP 0x188
+#define TEGRA_MC_FPRI_CTRL_G2 0x18c
+#define TEGRA_MC_FPRI_CTRL_HC 0x190
+#define TEGRA_MC_FPRI_CTRL_ISP 0x194
+#define TEGRA_MC_FPRI_CTRL_MPCORE 0x198
+#define TEGRA_MC_FPRI_CTRL_MPEA 0x19c
+#define TEGRA_MC_FPRI_CTRL_MPEB 0x1a0
+#define TEGRA_MC_FPRI_CTRL_MPEC 0x1a4
+#define TEGRA_MC_FPRI_CTRL_NV 0x1a8
+#define TEGRA_MC_FPRI_CTRL_PPCS 0x1ac
+#define TEGRA_MC_FPRI_CTRL_VDE 0x1b0
+#define TEGRA_MC_FPRI_CTRL_VI 0x1b4
+
+#define TEGRA_MC_CLIENT_AVPCARM7R ((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 0)
+#define TEGRA_MC_CLIENT_AVPCARM7W ((TEGRA_MC_FPRI_CTRL_AVPC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0A ((TEGRA_MC_FPRI_CTRL_DC << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0B ((TEGRA_MC_FPRI_CTRL_DC << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0C ((TEGRA_MC_FPRI_CTRL_DC << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1B ((TEGRA_MC_FPRI_CTRL_DC << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHC ((TEGRA_MC_FPRI_CTRL_DC << 8) | 8)
+#define TEGRA_MC_CLIENT_DISPLAY0AB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 0)
+#define TEGRA_MC_CLIENT_DISPLAY0BB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 2)
+#define TEGRA_MC_CLIENT_DISPLAY0CB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 4)
+#define TEGRA_MC_CLIENT_DISPLAY1BB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 6)
+#define TEGRA_MC_CLIENT_DISPLAYHCB ((TEGRA_MC_FPRI_CTRL_DCB << 8) | 8)
+#define TEGRA_MC_CLIENT_EPPUP ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 0)
+#define TEGRA_MC_CLIENT_EPPU ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 2)
+#define TEGRA_MC_CLIENT_EPPV ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 4)
+#define TEGRA_MC_CLIENT_EPPY ((TEGRA_MC_FPRI_CTRL_EPP << 8) | 6)
+#define TEGRA_MC_CLIENT_G2PR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 0)
+#define TEGRA_MC_CLIENT_G2SR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 2)
+#define TEGRA_MC_CLIENT_G2DR ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 4)
+#define TEGRA_MC_CLIENT_G2DW ((TEGRA_MC_FPRI_CTRL_G2 << 8) | 6)
+#define TEGRA_MC_CLIENT_HOST1XDMAR ((TEGRA_MC_FPRI_CTRL_HC << 8) | 0)
+#define TEGRA_MC_CLIENT_HOST1XR ((TEGRA_MC_FPRI_CTRL_HC << 8) | 2)
+#define TEGRA_MC_CLIENT_HOST1XW ((TEGRA_MC_FPRI_CTRL_HC << 8) | 4)
+#define TEGRA_MC_CLIENT_ISPW ((TEGRA_MC_FPRI_CTRL_ISP << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCORER ((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 0)
+#define TEGRA_MC_CLIENT_MPCOREW ((TEGRA_MC_FPRI_CTRL_MPCORE << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEAMEMRD ((TEGRA_MC_FPRI_CTRL_MPEA << 8) | 0)
+#define TEGRA_MC_CLIENT_MPEUNIFBR ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 0)
+#define TEGRA_MC_CLIENT_MPE_IPRED ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 2)
+#define TEGRA_MC_CLIENT_MPEUNIFBW ((TEGRA_MC_FPRI_CTRL_MPEB << 8) | 4)
+#define TEGRA_MC_CLIENT_MPECSRD ((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 0)
+#define TEGRA_MC_CLIENT_MPECSWR ((TEGRA_MC_FPRI_CTRL_MPEC << 8) | 2)
+#define TEGRA_MC_CLIENT_FDCDRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 0)
+#define TEGRA_MC_CLIENT_IDXSRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 2)
+#define TEGRA_MC_CLIENT_TEXSRD ((TEGRA_MC_FPRI_CTRL_NV << 8) | 4)
+#define TEGRA_MC_CLIENT_FDCDWR ((TEGRA_MC_FPRI_CTRL_NV << 8) | 6)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAR ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 0)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVR ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 2)
+#define TEGRA_MC_CLIENT_PPCSAHBDMAW ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 4)
+#define TEGRA_MC_CLIENT_PPCSAHBSLVW ((TEGRA_MC_FPRI_CTRL_PPCS << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVR ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 0)
+#define TEGRA_MC_CLIENT_VDEMBER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 2)
+#define TEGRA_MC_CLIENT_VDEMCER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 4)
+#define TEGRA_MC_CLIENT_VDETPER ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 6)
+#define TEGRA_MC_CLIENT_VDEBSEVW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 8)
+#define TEGRA_MC_CLIENT_VDEMBEW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 10)
+#define TEGRA_MC_CLIENT_VDETPMW ((TEGRA_MC_FPRI_CTRL_VDE << 8) | 12)
+#define TEGRA_MC_CLIENT_VIRUV ((TEGRA_MC_FPRI_CTRL_VI << 8) | 0)
+#define TEGRA_MC_CLIENT_VIWSB ((TEGRA_MC_FPRI_CTRL_VI << 8) | 2)
+#define TEGRA_MC_CLIENT_VIWU ((TEGRA_MC_FPRI_CTRL_VI << 8) | 4)
+#define TEGRA_MC_CLIENT_VIWV ((TEGRA_MC_FPRI_CTRL_VI << 8) | 6)
+#define TEGRA_MC_CLIENT_VIWY ((TEGRA_MC_FPRI_CTRL_VI << 8) | 8)
+
+#define TEGRA_MC_PRIO_LOWEST 0
+#define TEGRA_MC_PRIO_LOW 1
+#define TEGRA_MC_PRIO_MED 2
+#define TEGRA_MC_PRIO_HIGH 3
+#define TEGRA_MC_PRIO_MASK 3
+
+void tegra_mc_set_priority(unsigned long client, unsigned long prio);
+
+#endif
/* physical offset of RAM */
#define PHYS_OFFSET UL(0)
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
+
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Dima Zavin <dmitriyz@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_NAND_H
+#define __MACH_TEGRA_NAND_H
+
+struct tegra_nand_chip_parms {
+ uint8_t vendor_id;
+ uint8_t device_id;
+ uint32_t flags;
+
+ uint32_t capacity;
+
+ /* all timing info is in nanoseconds */
+ struct {
+ uint32_t trp;
+ uint32_t trh;
+ uint32_t twp;
+ uint32_t twh;
+ uint32_t tcs;
+ uint32_t twhr;
+ uint32_t tcr_tar_trr;
+ uint32_t twb;
+ uint32_t trp_resp;
+ uint32_t tadl;
+ } timing;
+};
+
+struct tegra_nand_platform {
+ uint8_t max_chips;
+ struct tegra_nand_chip_parms *chip_parms;
+ unsigned int nr_chip_parms;
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+};
+
+#endif
--- /dev/null
+/*
+ * include/linux/nvhost.h
+ *
+ * Tegra graphics host driver
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_NVHOST_H
+#define __LINUX_NVHOST_H
+
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct nvhost_master;
+
+struct nvhost_device {
+ const char *name;
+ struct device dev;
+ int id;
+ u32 num_resources;
+ struct resource *resource;
+
+ struct nvhost_master *host;
+};
+
+extern int nvhost_device_register(struct nvhost_device *);
+extern void nvhost_device_unregister(struct nvhost_device *);
+
+extern struct bus_type nvhost_bus_type;
+
+struct nvhost_driver {
+ int (*probe)(struct nvhost_device *);
+ int (*remove)(struct nvhost_device *);
+ void (*shutdown)(struct nvhost_device *);
+ int (*suspend)(struct nvhost_device *, pm_message_t state);
+ int (*resume)(struct nvhost_device *);
+ struct device_driver driver;
+};
+
+extern int nvhost_driver_register(struct nvhost_driver *);
+extern void nvhost_driver_unregister(struct nvhost_driver *);
+extern struct resource *nvhost_get_resource(struct nvhost_device *, unsigned int, unsigned int);
+extern int nvhost_get_irq(struct nvhost_device *, unsigned int);
+extern struct resource *nvhost_get_resource_byname(struct nvhost_device *, unsigned int, const char *);
+extern int nvhost_get_irq_byname(struct nvhost_device *, const char *);
+
+#define to_nvhost_device(x) container_of((x), struct nvhost_device, dev)
+#define to_nvhost_driver(drv) (container_of((drv), struct nvhost_driver, \
+ driver))
+
+#define nvhost_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
+#define nvhost_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data))
+
+int nvhost_bus_register(struct nvhost_master *host);
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_NO_TIMEOUT (-1)
+#define NVHOST_IOCTL_MAGIC 'H'
+
+struct nvhost_submit_hdr {
+ __u32 syncpt_id;
+ __u32 syncpt_incrs;
+ __u32 num_cmdbufs;
+ __u32 num_relocs;
+ __u32 num_waitchks;
+ __u32 waitchk_mask;
+};
+
+struct nvhost_cmdbuf {
+ __u32 mem;
+ __u32 offset;
+ __u32 words;
+};
+
+struct nvhost_reloc {
+ __u32 cmdbuf_mem;
+ __u32 cmdbuf_offset;
+ __u32 target;
+ __u32 target_offset;
+};
+
+struct nvhost_waitchk {
+ __u32 mem;
+ __u32 offset;
+ __u32 syncpt_id;
+ __u32 thresh;
+};
+
+struct nvhost_get_param_args {
+ __u32 value;
+};
+
+struct nvhost_set_nvmap_fd_args {
+ __u32 fd;
+};
+
+#define NVHOST_IOCTL_CHANNEL_FLUSH \
+ _IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS \
+ _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES \
+ _IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES \
+ _IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
+#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD \
+ _IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
+#define NVHOST_IOCTL_CHANNEL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD)
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_get_param_args)
+
+struct nvhost_ctrl_syncpt_read_args {
+ __u32 id;
+ __u32 value;
+};
+
+struct nvhost_ctrl_syncpt_incr_args {
+ __u32 id;
+};
+
+struct nvhost_ctrl_syncpt_wait_args {
+ __u32 id;
+ __u32 thresh;
+ __s32 timeout;
+};
+
+struct nvhost_ctrl_module_mutex_args {
+ __u32 id;
+ __u32 lock;
+};
+
+struct nvhost_ctrl_module_regrdwr_args {
+ __u32 id;
+ __u32 num_offsets;
+ __u32 block_size;
+ __u32 *offsets;
+ __u32 *values;
+ __u32 write;
+};
+
+#define NVHOST_IOCTL_CTRL_SYNCPT_READ \
+ _IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_INCR \
+ _IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
+#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT \
+ _IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
+
+#define NVHOST_IOCTL_CTRL_MODULE_MUTEX \
+ _IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
+#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR \
+ _IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
+
+#define NVHOST_IOCTL_CTRL_LAST \
+ _IOC_NR(NVHOST_IOCTL_CTRL_MODULE_REGRDWR)
+#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE sizeof(struct nvhost_ctrl_module_regrdwr_args)
+
+#endif
--- /dev/null
+/*
+ * include/linux/nvmap.h
+ *
+ * structure declarations for nvmem and nvmap user-space ioctls
+ *
+ * Copyright (c) 2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#ifndef __NVMAP_H
+#define __NVMAP_H
+
+#define NVMAP_HEAP_SYSMEM (1ul<<31)
+#define NVMAP_HEAP_IOVMM (1ul<<30)
+
+/* common carveout heaps */
+#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
+#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
+
+/* allocation flags */
+#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
+#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
+#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
+#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
+
+#define NVMAP_HANDLE_SECURE (0x1ul << 2)
+
+
+#if defined(__KERNEL__)
+
+struct nvmap_handle_ref;
+struct nvmap_handle;
+struct nvmap_client;
+struct nvmap_device;
+
+#define nvmap_ref_to_handle(_ref) (*(struct nvmap_handle **)(_ref))
+#define nvmap_id_to_handle(_id) ((struct nvmap_handle *)(_id))
+
+struct nvmap_pinarray_elem {
+ __u32 patch_mem;
+ __u32 patch_offset;
+ __u32 pin_mem;
+ __u32 pin_offset;
+};
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name);
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags);
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+void *nvmap_mmap(struct nvmap_handle_ref *r);
+
+void nvmap_munmap(struct nvmap_handle_ref *r, void *addr);
+
+struct nvmap_client *nvmap_client_get_file(int fd);
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
+
+void nvmap_client_put(struct nvmap_client *c);
+
+unsigned long nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r);
+
+unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id);
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r);
+
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique);
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr);
+
+int nvmap_patch_wait(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value);
+
+struct nvmap_platform_carveout {
+ const char *name;
+ unsigned int usage_mask;
+ unsigned long base;
+ size_t size;
+ size_t buddy_size;
+};
+
+struct nvmap_platform_data {
+ const struct nvmap_platform_carveout *carveouts;
+ unsigned int nr_carveouts;
+};
+
+extern struct nvmap_device *nvmap_dev;
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * linux/arch/arm/mach-tegra/include/mach/pinmux-t2.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_PINMUX_T2_H
+#define __MACH_TEGRA_PINMUX_T2_H
+
+enum tegra_pingroup {
+ TEGRA_PINGROUP_ATA = 0,
+ TEGRA_PINGROUP_ATB,
+ TEGRA_PINGROUP_ATC,
+ TEGRA_PINGROUP_ATD,
+ TEGRA_PINGROUP_ATE,
+ TEGRA_PINGROUP_CDEV1,
+ TEGRA_PINGROUP_CDEV2,
+ TEGRA_PINGROUP_CRTP,
+ TEGRA_PINGROUP_CSUS,
+ TEGRA_PINGROUP_DAP1,
+ TEGRA_PINGROUP_DAP2,
+ TEGRA_PINGROUP_DAP3,
+ TEGRA_PINGROUP_DAP4,
+ TEGRA_PINGROUP_DDC,
+ TEGRA_PINGROUP_DTA,
+ TEGRA_PINGROUP_DTB,
+ TEGRA_PINGROUP_DTC,
+ TEGRA_PINGROUP_DTD,
+ TEGRA_PINGROUP_DTE,
+ TEGRA_PINGROUP_DTF,
+ TEGRA_PINGROUP_GMA,
+ TEGRA_PINGROUP_GMB,
+ TEGRA_PINGROUP_GMC,
+ TEGRA_PINGROUP_GMD,
+ TEGRA_PINGROUP_GME,
+ TEGRA_PINGROUP_GPU,
+ TEGRA_PINGROUP_GPU7,
+ TEGRA_PINGROUP_GPV,
+ TEGRA_PINGROUP_HDINT,
+ TEGRA_PINGROUP_I2CP,
+ TEGRA_PINGROUP_IRRX,
+ TEGRA_PINGROUP_IRTX,
+ TEGRA_PINGROUP_KBCA,
+ TEGRA_PINGROUP_KBCB,
+ TEGRA_PINGROUP_KBCC,
+ TEGRA_PINGROUP_KBCD,
+ TEGRA_PINGROUP_KBCE,
+ TEGRA_PINGROUP_KBCF,
+ TEGRA_PINGROUP_LCSN,
+ TEGRA_PINGROUP_LD0,
+ TEGRA_PINGROUP_LD1,
+ TEGRA_PINGROUP_LD10,
+ TEGRA_PINGROUP_LD11,
+ TEGRA_PINGROUP_LD12,
+ TEGRA_PINGROUP_LD13,
+ TEGRA_PINGROUP_LD14,
+ TEGRA_PINGROUP_LD15,
+ TEGRA_PINGROUP_LD16,
+ TEGRA_PINGROUP_LD17,
+ TEGRA_PINGROUP_LD2,
+ TEGRA_PINGROUP_LD3,
+ TEGRA_PINGROUP_LD4,
+ TEGRA_PINGROUP_LD5,
+ TEGRA_PINGROUP_LD6,
+ TEGRA_PINGROUP_LD7,
+ TEGRA_PINGROUP_LD8,
+ TEGRA_PINGROUP_LD9,
+ TEGRA_PINGROUP_LDC,
+ TEGRA_PINGROUP_LDI,
+ TEGRA_PINGROUP_LHP0,
+ TEGRA_PINGROUP_LHP1,
+ TEGRA_PINGROUP_LHP2,
+ TEGRA_PINGROUP_LHS,
+ TEGRA_PINGROUP_LM0,
+ TEGRA_PINGROUP_LM1,
+ TEGRA_PINGROUP_LPP,
+ TEGRA_PINGROUP_LPW0,
+ TEGRA_PINGROUP_LPW1,
+ TEGRA_PINGROUP_LPW2,
+ TEGRA_PINGROUP_LSC0,
+ TEGRA_PINGROUP_LSC1,
+ TEGRA_PINGROUP_LSCK,
+ TEGRA_PINGROUP_LSDA,
+ TEGRA_PINGROUP_LSDI,
+ TEGRA_PINGROUP_LSPI,
+ TEGRA_PINGROUP_LVP0,
+ TEGRA_PINGROUP_LVP1,
+ TEGRA_PINGROUP_LVS,
+ TEGRA_PINGROUP_OWC,
+ TEGRA_PINGROUP_PMC,
+ TEGRA_PINGROUP_PTA,
+ TEGRA_PINGROUP_RM,
+ TEGRA_PINGROUP_SDB,
+ TEGRA_PINGROUP_SDC,
+ TEGRA_PINGROUP_SDD,
+ TEGRA_PINGROUP_SDIO1,
+ TEGRA_PINGROUP_SLXA,
+ TEGRA_PINGROUP_SLXC,
+ TEGRA_PINGROUP_SLXD,
+ TEGRA_PINGROUP_SLXK,
+ TEGRA_PINGROUP_SPDI,
+ TEGRA_PINGROUP_SPDO,
+ TEGRA_PINGROUP_SPIA,
+ TEGRA_PINGROUP_SPIB,
+ TEGRA_PINGROUP_SPIC,
+ TEGRA_PINGROUP_SPID,
+ TEGRA_PINGROUP_SPIE,
+ TEGRA_PINGROUP_SPIF,
+ TEGRA_PINGROUP_SPIG,
+ TEGRA_PINGROUP_SPIH,
+ TEGRA_PINGROUP_UAA,
+ TEGRA_PINGROUP_UAB,
+ TEGRA_PINGROUP_UAC,
+ TEGRA_PINGROUP_UAD,
+ TEGRA_PINGROUP_UCA,
+ TEGRA_PINGROUP_UCB,
+ TEGRA_PINGROUP_UDA,
+ /* these pin groups only have pullup and pull down control */
+ TEGRA_PINGROUP_CK32,
+ TEGRA_PINGROUP_DDRC,
+ TEGRA_PINGROUP_PMCA,
+ TEGRA_PINGROUP_PMCB,
+ TEGRA_PINGROUP_PMCC,
+ TEGRA_PINGROUP_PMCD,
+ TEGRA_PINGROUP_PMCE,
+ TEGRA_PINGROUP_XM2C,
+ TEGRA_PINGROUP_XM2D,
+ TEGRA_MAX_PINGROUP,
+};
+
+enum tegra_drive_pingroup {
+ TEGRA_DRIVE_PINGROUP_AO1 = 0,
+ TEGRA_DRIVE_PINGROUP_AO2,
+ TEGRA_DRIVE_PINGROUP_AT1,
+ TEGRA_DRIVE_PINGROUP_AT2,
+ TEGRA_DRIVE_PINGROUP_CDEV1,
+ TEGRA_DRIVE_PINGROUP_CDEV2,
+ TEGRA_DRIVE_PINGROUP_CSUS,
+ TEGRA_DRIVE_PINGROUP_DAP1,
+ TEGRA_DRIVE_PINGROUP_DAP2,
+ TEGRA_DRIVE_PINGROUP_DAP3,
+ TEGRA_DRIVE_PINGROUP_DAP4,
+ TEGRA_DRIVE_PINGROUP_DBG,
+ TEGRA_DRIVE_PINGROUP_LCD1,
+ TEGRA_DRIVE_PINGROUP_LCD2,
+ TEGRA_DRIVE_PINGROUP_SDMMC2,
+ TEGRA_DRIVE_PINGROUP_SDMMC3,
+ TEGRA_DRIVE_PINGROUP_SPI,
+ TEGRA_DRIVE_PINGROUP_UAA,
+ TEGRA_DRIVE_PINGROUP_UAB,
+ TEGRA_DRIVE_PINGROUP_UART2,
+ TEGRA_DRIVE_PINGROUP_UART3,
+ TEGRA_DRIVE_PINGROUP_VI1,
+ TEGRA_DRIVE_PINGROUP_VI2,
+ TEGRA_DRIVE_PINGROUP_XM2A,
+ TEGRA_DRIVE_PINGROUP_XM2C,
+ TEGRA_DRIVE_PINGROUP_XM2D,
+ TEGRA_DRIVE_PINGROUP_XM2CLK,
+ TEGRA_DRIVE_PINGROUP_MEMCOMP,
+ TEGRA_DRIVE_PINGROUP_SDIO1,
+ TEGRA_DRIVE_PINGROUP_CRT,
+ TEGRA_DRIVE_PINGROUP_DDC,
+ TEGRA_DRIVE_PINGROUP_GMA,
+ TEGRA_DRIVE_PINGROUP_GMB,
+ TEGRA_DRIVE_PINGROUP_GMC,
+ TEGRA_DRIVE_PINGROUP_GMD,
+ TEGRA_DRIVE_PINGROUP_GME,
+ TEGRA_DRIVE_PINGROUP_OWR,
+ TEGRA_DRIVE_PINGROUP_UAD,
+ TEGRA_MAX_DRIVE_PINGROUP,
+};
+
+#endif
+
#ifndef __MACH_TEGRA_PINMUX_H
#define __MACH_TEGRA_PINMUX_H
-enum tegra_pingroup {
- TEGRA_PINGROUP_ATA = 0,
- TEGRA_PINGROUP_ATB,
- TEGRA_PINGROUP_ATC,
- TEGRA_PINGROUP_ATD,
- TEGRA_PINGROUP_ATE,
- TEGRA_PINGROUP_CDEV1,
- TEGRA_PINGROUP_CDEV2,
- TEGRA_PINGROUP_CRTP,
- TEGRA_PINGROUP_CSUS,
- TEGRA_PINGROUP_DAP1,
- TEGRA_PINGROUP_DAP2,
- TEGRA_PINGROUP_DAP3,
- TEGRA_PINGROUP_DAP4,
- TEGRA_PINGROUP_DDC,
- TEGRA_PINGROUP_DTA,
- TEGRA_PINGROUP_DTB,
- TEGRA_PINGROUP_DTC,
- TEGRA_PINGROUP_DTD,
- TEGRA_PINGROUP_DTE,
- TEGRA_PINGROUP_DTF,
- TEGRA_PINGROUP_GMA,
- TEGRA_PINGROUP_GMB,
- TEGRA_PINGROUP_GMC,
- TEGRA_PINGROUP_GMD,
- TEGRA_PINGROUP_GME,
- TEGRA_PINGROUP_GPU,
- TEGRA_PINGROUP_GPU7,
- TEGRA_PINGROUP_GPV,
- TEGRA_PINGROUP_HDINT,
- TEGRA_PINGROUP_I2CP,
- TEGRA_PINGROUP_IRRX,
- TEGRA_PINGROUP_IRTX,
- TEGRA_PINGROUP_KBCA,
- TEGRA_PINGROUP_KBCB,
- TEGRA_PINGROUP_KBCC,
- TEGRA_PINGROUP_KBCD,
- TEGRA_PINGROUP_KBCE,
- TEGRA_PINGROUP_KBCF,
- TEGRA_PINGROUP_LCSN,
- TEGRA_PINGROUP_LD0,
- TEGRA_PINGROUP_LD1,
- TEGRA_PINGROUP_LD10,
- TEGRA_PINGROUP_LD11,
- TEGRA_PINGROUP_LD12,
- TEGRA_PINGROUP_LD13,
- TEGRA_PINGROUP_LD14,
- TEGRA_PINGROUP_LD15,
- TEGRA_PINGROUP_LD16,
- TEGRA_PINGROUP_LD17,
- TEGRA_PINGROUP_LD2,
- TEGRA_PINGROUP_LD3,
- TEGRA_PINGROUP_LD4,
- TEGRA_PINGROUP_LD5,
- TEGRA_PINGROUP_LD6,
- TEGRA_PINGROUP_LD7,
- TEGRA_PINGROUP_LD8,
- TEGRA_PINGROUP_LD9,
- TEGRA_PINGROUP_LDC,
- TEGRA_PINGROUP_LDI,
- TEGRA_PINGROUP_LHP0,
- TEGRA_PINGROUP_LHP1,
- TEGRA_PINGROUP_LHP2,
- TEGRA_PINGROUP_LHS,
- TEGRA_PINGROUP_LM0,
- TEGRA_PINGROUP_LM1,
- TEGRA_PINGROUP_LPP,
- TEGRA_PINGROUP_LPW0,
- TEGRA_PINGROUP_LPW1,
- TEGRA_PINGROUP_LPW2,
- TEGRA_PINGROUP_LSC0,
- TEGRA_PINGROUP_LSC1,
- TEGRA_PINGROUP_LSCK,
- TEGRA_PINGROUP_LSDA,
- TEGRA_PINGROUP_LSDI,
- TEGRA_PINGROUP_LSPI,
- TEGRA_PINGROUP_LVP0,
- TEGRA_PINGROUP_LVP1,
- TEGRA_PINGROUP_LVS,
- TEGRA_PINGROUP_OWC,
- TEGRA_PINGROUP_PMC,
- TEGRA_PINGROUP_PTA,
- TEGRA_PINGROUP_RM,
- TEGRA_PINGROUP_SDB,
- TEGRA_PINGROUP_SDC,
- TEGRA_PINGROUP_SDD,
- TEGRA_PINGROUP_SDIO1,
- TEGRA_PINGROUP_SLXA,
- TEGRA_PINGROUP_SLXC,
- TEGRA_PINGROUP_SLXD,
- TEGRA_PINGROUP_SLXK,
- TEGRA_PINGROUP_SPDI,
- TEGRA_PINGROUP_SPDO,
- TEGRA_PINGROUP_SPIA,
- TEGRA_PINGROUP_SPIB,
- TEGRA_PINGROUP_SPIC,
- TEGRA_PINGROUP_SPID,
- TEGRA_PINGROUP_SPIE,
- TEGRA_PINGROUP_SPIF,
- TEGRA_PINGROUP_SPIG,
- TEGRA_PINGROUP_SPIH,
- TEGRA_PINGROUP_UAA,
- TEGRA_PINGROUP_UAB,
- TEGRA_PINGROUP_UAC,
- TEGRA_PINGROUP_UAD,
- TEGRA_PINGROUP_UCA,
- TEGRA_PINGROUP_UCB,
- TEGRA_PINGROUP_UDA,
- /* these pin groups only have pullup and pull down control */
- TEGRA_PINGROUP_CK32,
- TEGRA_PINGROUP_DDRC,
- TEGRA_PINGROUP_PMCA,
- TEGRA_PINGROUP_PMCB,
- TEGRA_PINGROUP_PMCC,
- TEGRA_PINGROUP_PMCD,
- TEGRA_PINGROUP_PMCE,
- TEGRA_PINGROUP_XM2C,
- TEGRA_PINGROUP_XM2D,
- TEGRA_MAX_PINGROUP,
-};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#include "pinmux-t2.h"
+#else
+#error "Undefined Tegra architecture"
+#endif
enum tegra_mux_func {
TEGRA_MUX_RSVD = 0x8000,
TEGRA_MUX_VI,
TEGRA_MUX_VI_SENSOR_CLK,
TEGRA_MUX_XIO,
+ TEGRA_MUX_SAFE,
TEGRA_MAX_MUX,
};
TEGRA_TRI_TRISTATE = 1,
};
+enum tegra_vddio {
+ TEGRA_VDDIO_BB = 0,
+ TEGRA_VDDIO_LCD,
+ TEGRA_VDDIO_VI,
+ TEGRA_VDDIO_UART,
+ TEGRA_VDDIO_DDR,
+ TEGRA_VDDIO_NAND,
+ TEGRA_VDDIO_SYS,
+ TEGRA_VDDIO_AUDIO,
+ TEGRA_VDDIO_SD,
+};
+
struct tegra_pingroup_config {
enum tegra_pingroup pingroup;
enum tegra_mux_func func;
TEGRA_MAX_PULL,
};
-enum tegra_drive_pingroup {
- TEGRA_DRIVE_PINGROUP_AO1 = 0,
- TEGRA_DRIVE_PINGROUP_AO2,
- TEGRA_DRIVE_PINGROUP_AT1,
- TEGRA_DRIVE_PINGROUP_AT2,
- TEGRA_DRIVE_PINGROUP_CDEV1,
- TEGRA_DRIVE_PINGROUP_CDEV2,
- TEGRA_DRIVE_PINGROUP_CSUS,
- TEGRA_DRIVE_PINGROUP_DAP1,
- TEGRA_DRIVE_PINGROUP_DAP2,
- TEGRA_DRIVE_PINGROUP_DAP3,
- TEGRA_DRIVE_PINGROUP_DAP4,
- TEGRA_DRIVE_PINGROUP_DBG,
- TEGRA_DRIVE_PINGROUP_LCD1,
- TEGRA_DRIVE_PINGROUP_LCD2,
- TEGRA_DRIVE_PINGROUP_SDMMC2,
- TEGRA_DRIVE_PINGROUP_SDMMC3,
- TEGRA_DRIVE_PINGROUP_SPI,
- TEGRA_DRIVE_PINGROUP_UAA,
- TEGRA_DRIVE_PINGROUP_UAB,
- TEGRA_DRIVE_PINGROUP_UART2,
- TEGRA_DRIVE_PINGROUP_UART3,
- TEGRA_DRIVE_PINGROUP_VI1,
- TEGRA_DRIVE_PINGROUP_VI2,
- TEGRA_DRIVE_PINGROUP_XM2A,
- TEGRA_DRIVE_PINGROUP_XM2C,
- TEGRA_DRIVE_PINGROUP_XM2D,
- TEGRA_DRIVE_PINGROUP_XM2CLK,
- TEGRA_DRIVE_PINGROUP_MEMCOMP,
- TEGRA_MAX_DRIVE_PINGROUP,
-};
-
enum tegra_drive {
TEGRA_DRIVE_DIV_8 = 0,
TEGRA_DRIVE_DIV_4,
enum tegra_slew slew_falling;
};
-int tegra_pinmux_set_func(enum tegra_pingroup pg, enum tegra_mux_func func);
-int tegra_pinmux_set_tristate(enum tegra_pingroup pg, enum tegra_tristate tristate);
-int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg, enum tegra_pullupdown pupd);
+struct tegra_drive_pingroup_desc {
+ const char *name;
+ s16 reg;
+};
+
+struct tegra_pingroup_desc {
+ const char *name;
+ int funcs[4];
+ int func_safe;
+ int vddio;
+ s16 tri_reg; /* offset into the TRISTATE_REG_* register bank */
+ s16 mux_reg; /* offset into the PIN_MUX_CTL_* register bank */
+ s16 pupd_reg; /* offset into the PULL_UPDOWN_REG_* register bank */
+ s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
+ s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
+ s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
+};
+
+extern const struct tegra_pingroup_desc tegra_soc_pingroups[];
+extern const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[];
-void tegra_pinmux_config_pingroup(enum tegra_pingroup pingroup,
- enum tegra_mux_func func, enum tegra_pullupdown pupd,
+int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
enum tegra_tristate tristate);
+int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
+ enum tegra_pullupdown pupd);
-void tegra_pinmux_config_table(struct tegra_pingroup_config *config, int len);
+void tegra_pinmux_config_table(const struct tegra_pingroup_config *config,
+ int len);
void tegra_drive_pinmux_config_table(struct tegra_drive_pingroup_config *config,
int len);
-
+void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *config,
+ int len);
+void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config,
+ int len);
+void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_tristate tristate);
+void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_pullupdown pupd);
#endif
--- /dev/null
+/*
+ * drivers/regulator/tegra-regulator.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_TEGRA_POWERGATE_H_
+#define _MACH_TEGRA_POWERGATE_H_
+
+#define TEGRA_POWERGATE_CPU 0
+#define TEGRA_POWERGATE_3D 1
+#define TEGRA_POWERGATE_VENC 2
+#define TEGRA_POWERGATE_PCIE 3
+#define TEGRA_POWERGATE_VDEC 4
+#define TEGRA_POWERGATE_L2 5
+#define TEGRA_POWERGATE_MPE 6
+#define TEGRA_NUM_POWERGATE 7
+
+int tegra_powergate_power_on(int id);
+int tegra_powergate_power_off(int id);
+bool tegra_powergate_is_powered(int id);
+int tegra_powergate_remove_clamping(int id);
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_powergate_sequence_power_up(int id, struct clk *clk);
+
+#endif /* _MACH_TEGRA_POWERGATE_H_ */
--- /dev/null
+/*
+ * include/asm-arm/arch-tegra/sdhci.h
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
+#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
+
+#include <linux/mmc/host.h>
+
+struct tegra_sdhci_platform_data {
+ const char *clk_id;
+ int force_hs;
+ int rt_disable;
+ int cd_gpio;
+ int wp_gpio;
+ int power_gpio;
+
+ void (*board_probe)(int id, struct mmc_host *);
+ void (*board_remove)(int id, struct mmc_host *);
+};
+
+#endif
*/
static inline void smp_cross_call(const struct cpumask *mask)
{
+ dsb();
gic_raise_softirq(mask, 1);
}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/spdif.h
+ *
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+
+#ifndef __ARCH_ARM_MACH_TEGRA_SPDIF_H
+#define __ARCH_ARM_MACH_TEGRA_SPDIF_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+/* Offsets from TEGRA_SPDIF_BASE */
+
+#define SPDIF_CTRL_0 0x0
+#define SPDIF_STATUS_0 0x4
+#define SPDIF_STROBE_CTRL_0 0x8
+#define SPDIF_DATA_FIFO_CSR_0 0x0C
+#define SPDIF_DATA_OUT_0 0x40
+#define SPDIF_DATA_IN_0 0x80
+#define SPDIF_CH_STA_RX_A_0 0x100
+#define SPDIF_CH_STA_RX_B_0 0x104
+#define SPDIF_CH_STA_RX_C_0 0x108
+#define SPDIF_CH_STA_RX_D_0 0x10C
+#define SPDIF_CH_STA_RX_E_0 0x110
+#define SPDIF_CH_STA_RX_F_0 0x114
+#define SPDIF_CH_STA_TX_A_0 0x140
+#define SPDIF_CH_STA_TX_B_0 0x144
+#define SPDIF_CH_STA_TX_C_0 0x148
+#define SPDIF_CH_STA_TX_D_0 0x14C
+#define SPDIF_CH_STA_TX_E_0 0x150
+#define SPDIF_CH_STA_TX_F_0 0x154
+#define SPDIF_USR_STA_RX_A_0 0x180
+#define SPDIF_USR_DAT_TX_A_0 0x1C0
+
+/*
+ * Register SPDIF_CTRL_0
+ */
+
+/*
+ * 1=start capturing from left channel,0=start
+ * capturing from right channel.
+ */
+#define SPDIF_CTRL_0_CAP_LC (1<<30)
+
+/* SPDIF receiver(RX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_RX_EN (1<<29)
+
+/* SPDIF Transmitter(TX): 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TX_EN (1<<28)
+
+/* Transmit Channel status: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TC_EN (1<<27)
+
+/* Transmit user Data: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_TU_EN (1<<26)
+
+/* Interrupt on transmit error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_TXE (1<<25)
+
+/* Interrupt on receive error: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_RXE (1<<24)
+
+/* Interrupt on invalid preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_P (1<<23)
+
+/* Interrupt on "B" preamble: 1=enable, 0=disable. */
+#define SPDIF_CTRL_0_IE_B (1<<22)
+
+/*
+ * Interrupt when block of channel status received:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_C (1<<21)
+
+/*
+ * Interrupt when a valid information unit (IU) recieve:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_IE_U (1<<20)
+
+/*
+ * Interrupt when RX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RU (1<<19)
+
+/*
+ * Interrupt when TX user FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TU (1<<18)
+
+/*
+ * Interrupt when RX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_RX (1<<17)
+
+/*
+ * Interrupt when TX data FIFO attn. level is reached:
+ * 1=enable, 0=disable.
+ */
+#define SPDIF_CTRL_0_QE_TX (1<<16)
+
+/* Loopback test mode: 1=enable internal loopback, 0=Normal mode. */
+#define SPDIF_CTRL_0_LBK_EN (1<<15)
+
+/*
+ * Pack data mode:
+ * 1=Packeted left/right channel data into a single word,
+ * 0=Single data (16 bit needs to be padded to match the
+ * interface data bit size)
+ */
+#define SPDIF_CTRL_0_PACK (1<<14)
+
+/*
+ * 00=16bit data
+ * 01=20bit data
+ * 10=24bit data
+ * 11=raw data
+ */
+#define SPDIF_BIT_MODE_MODE16BIT (0)
+#define SPDIF_BIT_MODE_MODE20BIT (1)
+#define SPDIF_BIT_MODE_MODE24BIT (2)
+#define SPDIF_BIT_MODE_MODERAW (3)
+#define SPDIF_CTRL_0_BIT_MODE_SHIFT (12)
+
+#define SPDIF_CTRL_0_BIT_MODE_MASK \
+ ((0x3) << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE16BIT \
+ (SPDIF_BIT_MODE_MODE16BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE20BIT \
+ (SPDIF_BIT_MODE_MODE20BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODE24BIT \
+ (SPDIF_BIT_MODE_MODE24BIT << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+#define SPDIF_CTRL_0_BIT_MODE_MODERAW \
+ (SPDIF_BIT_MODE_MODERAW << SPDIF_CTRL_0_BIT_MODE_SHIFT)
+
+
+/*
+ * SPDIF Status Register
+ * -------------------------
+ * Note: IS_P, IS_B, IS_C, and IS_U are sticky bits.
+ * Software must write a 1 to the corresponding bit location
+ * to clear the status.
+ */
+
+/* Register SPDIF_STATUS_0 */
+
+/*
+ * Receiver(RX) shifter is busy receiving data. 1=busy, 0=not busy.
+ * This bit is asserted when the receiver first locked onto the
+ * preamble of the data stream after RX_EN is asserted. This bit is
+ * deasserted when either,
+ * (a) the end of a frame is reached after RX_EN is deeasserted, or
+ * (b) the SPDIF data stream becomes inactive.
+ */
+#define SPDIF_STATUS_0_RX_BSY (1<<29)
+
+
+/*
+ * Transmitter(TX) shifter is busy transmitting data.
+ * 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN is asserted.
+ * This bit is deasserted when the end of a frame is reached after
+ * TX_EN is deasserted.
+ */
+#define SPDIF_STATUS_0_TX_BSY (1<<28)
+
+/*
+ * TX is busy shifting out channel status. 1=busy, 0=not busy.
+ * This bit is asserted when both TX_EN and TC_EN are asserted and
+ * data from CH_STA_TX_A register is loaded into the internal shifter.
+ * This bit is deasserted when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) CH_STA_TX_F register is loaded into the internal shifter.
+ */
+#define SPDIF_STATUS_0_TC_BSY (1<<27)
+
+/*
+ * TX User data FIFO busy. 1=busy, 0=not busy.
+ * This bit is asserted when TX_EN and TXU_EN are asserted and
+ * there's data in the TX user FIFO. This bit is deassert when either,
+ * (a) the end of a frame is reached after TX_EN is deasserted, or
+ * (b) there's no data left in the TX user FIFO.
+ */
+#define SPDIF_STATUS_0_TU_BSY (1<<26)
+
+/* Tx FIFO Underrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_TX_ERR (1<<25)
+
+/* Rx FIFO Overrun error status: 1=error, 0=no error */
+#define SPDIF_STATUS_0_RX_ERR (1<<24)
+
+/* Preamble status: 1=bad/missing preamble, 0=Preamble ok */
+#define SPDIF_STATUS_0_IS_P (1<<23)
+
+/* B-preamble detection status: 0=not detected, 1=B-preamble detected */
+#define SPDIF_STATUS_0_IS_B (1<<22)
+
+/*
+ * RX channel block data receive status:
+ * 1=received entire block of channel status,
+ * 0=entire block not recieved yet.
+ */
+#define SPDIF_STATUS_0_IS_C (1<<21)
+
+/* RX User Data Valid flag: 1=valid IU detected, 0 = no IU detected. */
+#define SPDIF_STATUS_0_IS_U (1<<20)
+
+/*
+ * RX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RU (1<<19)
+
+/*
+ * TX User FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TU (1<<18)
+
+/*
+ * RX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_RX (1<<17)
+
+/*
+ * TX Data FIFO Status:
+ * 1=attention level reached, 0=attention level not reached.
+ */
+#define SPDIF_STATUS_0_QS_TX (1<<16)
+
+
+/* SPDIF FIFO Configuration and Status Register */
+
+/* Register SPDIF_DATA_FIFO_CSR_0 */
+
+#define SPDIF_FIFO_ATN_LVL_ONE_SLOT 0
+#define SPDIF_FIFO_ATN_LVL_FOUR_SLOTS 1
+#define SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS 2
+#define SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS 3
+
+
+/* Clear Receiver User FIFO (RX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_RU_CLR (1<<31)
+
+/*
+ * RX USR.FIFO Attention Level:
+ * 00=1-slot-full, 01=2-slots-full, 10=3-slots-full, 11=4-slots-full.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT (29)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU1 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU2 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU3 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+#define SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4_WORD_FULL \
+ (SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_RU4 << \
+ SPDIF_DATA_FIFO_CSR_0_RU_ATN_LVL_SHIF)
+
+/* Number of RX USR.FIFO levels with valid data. */
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT (24)
+#define SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_MASK \
+ (0x1f << SPDIF_DATA_FIFO_CSR_0_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter User FIFO (TX USR.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TU_CLR (1<<23)
+
+/*
+ * TxUSR.FIFO Attention Level:
+ * 11=4-slots-empty, 10=3-slots-empty, 01=2-slots-empty, 00=1-slot-empty.
+ */
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 (0)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 (1)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 (2)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 (3)
+
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT (21)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU1 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU2 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU3 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4_WORD_EMPTY \
+ (SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_TU4 << \
+ SPDIF_DATA_FIFO_CSR_0_TU_ATN_LVL_SHIFT)
+
+/* Number of Tx USR.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT (16)
+#define SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TU_EMPTY_COUNT_SHIFT)
+
+/* Clear Receiver Data FIFO (RX DATA.FIFO). */
+#define SPDIF_DATA_FIFO_CSR_0_RX_CLR (1<<15)
+
+/*
+ * Rx FIFO Attention Level:
+ * 11=12-slots-full, 10=8-slots-full, 01=4-slots-full, 00=1-slot-full.
+ */
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT (13)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_RX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_RX_ATN_LVL_SHIFT)
+
+
+/* Number of RX DATA.FIFO levels with valid data */
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT (8)
+#define SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_FIELD \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_RX_DATA_FIFO_FULL_COUNT_SHIFT)
+
+/* Clear Transmitter Data FIFO (TX DATA.FIFO) */
+#define SPDIF_DATA_FIFO_CSR_0_TX_CLR (1<<7)
+
+/*
+ * Tx FIFO Attention Level:
+ * 11=12-slots-empty, 10=8-slots-empty, 01=4-slots-empty, 00=1-slot-empty
+ */
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT (5)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK \
+ (0x3 << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX1_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_ONE_SLOT << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX4_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_FOUR_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX8_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+#define SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_TX12_WORD_FULL \
+ (SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS << \
+ SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT)
+
+
+/* Number of Tx DATA.FIFO levels that could be filled. */
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT (0)
+#define SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK \
+ ((0x1f) << SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT)
+
+
+#endif /* __ARCH_ARM_MACH_TEGRA_SPDIF_H */
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/suspend.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#ifndef _MACH_TEGRA_SUSPEND_H_
+#define _MACH_TEGRA_SUSPEND_H_
+
+enum tegra_suspend_mode {
+ TEGRA_SUSPEND_NONE = 0,
+ TEGRA_SUSPEND_LP2, /* CPU voltage off */
+ TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */
+ TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */
+ TEGRA_MAX_SUSPEND_MODE,
+};
+
+struct tegra_suspend_platform_data {
+ unsigned long cpu_timer; /* CPU power good time in us, LP2/LP1 */
+ unsigned long cpu_off_timer; /* CPU power off time us, LP2/LP1 */
+ unsigned long core_timer; /* core power good time in ticks, LP0 */
+ unsigned long core_off_timer; /* core power off time ticks, LP0 */
+ unsigned long wake_enb; /* mask of enabled wake pads */
+ unsigned long wake_high; /* high-level-triggered wake pads */
+ unsigned long wake_low; /* low-level-triggered wake pads */
+ unsigned long wake_any; /* any-edge-triggered wake pads */
+ bool corereq_high; /* Core power request active-high */
+ bool sysclkreq_high; /* System clock request is active-high */
+ bool separate_req; /* Core & CPU power request are separate */
+ enum tegra_suspend_mode suspend_mode;
+};
+
+unsigned long tegra_cpu_power_good_time(void);
+unsigned long tegra_cpu_power_off_time(void);
+enum tegra_suspend_mode tegra_get_suspend_mode(void);
+
+void __tegra_lp1_reset(void);
+void __tegra_iram_end(void);
+
+void lp0_suspend_init(void);
+
+void tegra_pinmux_suspend(void);
+void tegra_irq_suspend(void);
+void tegra_gpio_suspend(void);
+void tegra_clk_suspend(void);
+void tegra_dma_suspend(void);
+void tegra_timer_suspend(void);
+
+void tegra_pinmux_resume(void);
+void tegra_irq_resume(void);
+void tegra_gpio_resume(void);
+void tegra_clk_resume(void);
+void tegra_dma_resume(void);
+void tegra_timer_resume(void);
+
+int tegra_irq_to_wake(int irq);
+int tegra_wake_to_irq(int wake);
+
+int tegra_set_lp0_wake(int irq, int enable);
+int tegra_set_lp0_wake_type(int irq, int flow_type);
+int tegra_set_lp1_wake(int irq, int enable);
+void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any);
+
+void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat);
+
+#endif /* _MACH_TEGRA_SUSPEND_H_ */
#include <mach/hardware.h>
#include <mach/iomap.h>
+extern void (*tegra_reset)(char mode, const char *cmd);
+
static inline void arch_idle(void)
{
}
-static inline void arch_reset(char mode, const char *cmd)
+static inline void tegra_assert_system_reset(void)
{
void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
- u32 reg = readl(reset);
+ u32 reg;
+
+ reg = readl_relaxed(reset);
reg |= 0x04;
- writel(reg, reset);
+ writel_relaxed(reg, reset);
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ if (tegra_reset)
+ tegra_reset(mode, cmd);
+ else
+ tegra_assert_system_reset();
+
+ do { } while (1);
}
#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/tegra2_fuse.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA2_FUSE_H
+#define __MACH_TEGRA2_FUSE_H
+
+#define SBK_DEVKEY_STATUS_SZ sizeof(u32)
+
+/* fuse io parameters */
+enum fuse_io_param {
+ DEVKEY,
+ JTAG_DIS,
+ /*
+ * Programming the odm production fuse at the same
+ * time as the sbk or dev_key is not allowed as it is not possible to
+ * verify that the sbk or dev_key were programmed correctly.
+ */
+ ODM_PROD_MODE,
+ SEC_BOOT_DEV_CFG,
+ SEC_BOOT_DEV_SEL,
+ SBK,
+ SW_RSVD,
+ IGNORE_DEV_SEL_STRAPS,
+ ODM_RSVD,
+ SBK_DEVKEY_STATUS,
+ MASTER_ENB,
+ _PARAMS_U32 = 0x7FFFFFFF
+};
+
+#define MAX_PARAMS ODM_RSVD
+
+/* the order of the members is pre-decided. please do not change */
+struct fuse_data {
+ u32 devkey;
+ u32 jtag_dis;
+ u32 odm_prod_mode;
+ u32 bootdev_cfg;
+ u32 bootdev_sel;
+ u32 sbk[4];
+ u32 sw_rsvd;
+ u32 ignore_devsel_straps;
+ u32 odm_rsvd[8];
+};
+
+/* secondary boot device options */
+enum {
+ SECBOOTDEV_SDMMC,
+ SECBOOTDEV_NOR,
+ SECBOOTDEV_SPI,
+ SECBOOTDEV_NAND,
+ SECBOOTDEV_LBANAND,
+ SECBOOTDEV_MUXONENAND,
+ _SECBOOTDEV_MAX,
+ _SECBOOTDEV_U32 = 0x7FFFFFFF
+};
+
+/*
+ * read the fuse settings
+ * @param: io_param_type - param type enum
+ * @param: size - read size in bytes
+ */
+int tegra_fuse_read(u32 io_param_type, u32 *data, int size);
+
+#define FLAGS_DEVKEY BIT(DEVKEY)
+#define FLAGS_JTAG_DIS BIT(JTAG_DIS)
+#define FLAGS_SBK_DEVKEY_STATUS BIT(SBK_DEVKEY_STATUS)
+#define FLAGS_ODM_PROD_MODE BIT(ODM_PROD_MODE)
+#define FLAGS_SEC_BOOT_DEV_CFG BIT(SEC_BOOT_DEV_CFG)
+#define FLAGS_SEC_BOOT_DEV_SEL BIT(SEC_BOOT_DEV_SEL)
+#define FLAGS_SBK BIT(SBK)
+#define FLAGS_SW_RSVD BIT(SW_RSVD)
+#define FLAGS_IGNORE_DEV_SEL_STRAPS BIT(IGNORE_DEV_SEL_STRAPS)
+#define FLAGS_ODMRSVD BIT(ODM_RSVD)
+
+/*
+ * Prior to invoking this routine, the caller is responsible for supplying
+ * valid fuse programming voltage.
+ *
+ * @param: pgm_data - entire data to be programmed
+ * @flags: program flags (e.g. FLAGS_DEVKEY)
+ */
+int tegra_fuse_program(struct fuse_data *pgm_data, u32 flags);
+
+/* Disables the fuse programming until the next system reset */
+void tegra_fuse_program_disable(void);
+
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/tegra_fb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Platform data structure to be passed to the driver */
+struct tegra_fb_lcd_data {
+ int fb_xres;
+ int fb_yres;
+ /* Resolution of the output to the LCD. If different from the
+ framebuffer resolution, the Tegra display block will scale it */
+ int lcd_xres;
+ int lcd_yres;
+ int bits_per_pixel;
+};
--- /dev/null
+/*
+ * linux/arch/arm/mach-tegra/include/mach/tegra_fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_FIQ_DEBUGGER_H
+#define __MACH_TEGRA_FIQ_DEBUGGER_H
+
+#ifdef CONFIG_TEGRA_FIQ_DEBUGGER
+void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq);
+#else
+static inline void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq)
+{
+}
+#endif
+
+#endif
#include <mach/iomap.h>
-#if defined(CONFIG_TEGRA_DEBUG_UARTA)
-#define DEBUG_UART_BASE TEGRA_UARTA_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
-#define DEBUG_UART_BASE TEGRA_UARTB_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
-#define DEBUG_UART_BASE TEGRA_UARTC_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
-#define DEBUG_UART_BASE TEGRA_UARTD_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
-#define DEBUG_UART_BASE TEGRA_UARTE_BASE
-#else
-#define DEBUG_UART_BASE NULL
-#endif
-
static void putc(int c)
{
- volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE;
+ volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
int shift = 2;
if (uart == NULL)
{
}
+static inline void konk_delay(int delay)
+{
+ int i;
+
+ for (i = 0; i < (1000 * delay); i++) {
+ barrier();
+ }
+}
+
+
static inline void arch_decomp_setup(void)
{
- volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE;
+ volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
int shift = 2;
+ volatile u32 *addr;
if (uart == NULL)
return;
+/*
+ addr = (volatile u32 *)0x70000014;
+ *addr &= ~(1<<29);
+
+ addr = (volatile u32 *)0x70000084;
+ *addr &= ~(3<<2);
+
+ addr = (volatile u32 *)0x700000b0;
+ *addr &= ~(3<<24);
+
+ konk_delay(5);
+
+*/
+
+ /* OSC_CTRL_0 */
+ /*addr = (volatile u32 *)0x60006050;*/
+
+ /* PLLP_BASE_0 */
+ addr = (volatile u32 *)0x600060a0;
+ *addr = 0x5011b00c;
+
+ /* PLLP_OUTA_0 */
+ addr = (volatile u32 *)0x600060a4;
+ *addr = 0x10031c03;
+
+ /* PLLP_OUTB_0 */
+ addr = (volatile u32 *)0x600060a8;
+ *addr = 0x06030a03;
+
+ /* PLLP_MISC_0 */
+ addr = (volatile u32 *)0x600060ac;
+ *addr = 0x00000800;
+
+ konk_delay(1000);
+
+ /* UARTD clock source is PLLP_OUT0 */
+ addr = (volatile u32 *)0x600061c0;
+ *addr = 0;
+
+ /* Enable clock to UARTD */
+ addr = (volatile u32 *)0x60006018;
+ *addr |= (1<<1);
+
+ konk_delay(5);
+
+ /* Deassert reset to UARTD */
+ addr = (volatile u32 *)0x6000600c;
+ *addr &= ~(1<<1);
+
+ konk_delay(5);
+
uart[UART_LCR << shift] |= UART_LCR_DLAB;
uart[UART_DLL << shift] = 0x75;
uart[UART_DLM << shift] = 0x0;
--- /dev/null
+/*
+ * arch/arm/mach-tegra/include/mach/usb_phy.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_USB_PHY_H
+#define __MACH_USB_PHY_H
+
+#include <linux/clk.h>
+#include <linux/usb/otg.h>
+
+struct tegra_utmip_config {
+ u8 hssync_start_delay;
+ u8 elastic_limit;
+ u8 idle_wait_delay;
+ u8 term_range_adj;
+ u8 xcvr_setup;
+ u8 xcvr_lsfslew;
+ u8 xcvr_lsrslew;
+};
+
+struct tegra_ulpi_config {
+ int reset_gpio;
+ const char *clk;
+};
+
+enum tegra_usb_phy_port_speed {
+ TEGRA_USB_PHY_PORT_SPEED_FULL = 0,
+ TEGRA_USB_PHY_PORT_SPEED_LOW,
+ TEGRA_USB_PHY_PORT_SPEED_HIGH,
+};
+
+enum tegra_usb_phy_mode {
+ TEGRA_USB_PHY_MODE_DEVICE,
+ TEGRA_USB_PHY_MODE_HOST,
+};
+
+struct tegra_xtal_freq;
+
+struct tegra_usb_phy {
+ int instance;
+ const struct tegra_xtal_freq *freq;
+ void __iomem *regs;
+ void __iomem *pad_regs;
+ struct clk *clk;
+ struct clk *pll_u;
+ struct clk *pad_clk;
+ enum tegra_usb_phy_mode mode;
+ void *config;
+ struct otg_transceiver *ulpi;
+ int initialized;
+};
+
+struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
+ void *config, enum tegra_usb_phy_mode phy_mode);
+
+int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_power_off(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_preresume(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_postresume(struct tegra_usb_phy *phy);
+
+void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed);
+
+void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy);
+
+void tegra_usb_phy_close(struct tegra_usb_phy *phy);
+
+#endif /* __MACH_USB_PHY_H */
--- /dev/null
+/*
+ * include/mach/w1.h
+ *
+ * Copyright (C) 2010 Motorola, Inc
+ * Author: Andrei Warkentin <andreiw@motorola.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_W1_H
+#define __ASM_ARM_ARCH_TEGRA_W1_H
+
+struct tegra_w1_timings {
+
+ /* tsu, trelease, trdv, tlow0, tlow1 and tslot are formed
+ into the value written into OWR_RST_PRESENCE_TCTL_0 register. */
+
+ /* Read data setup, Tsu = N owr clks, Range = tsu < 1,
+ Typical value = 0x1 */
+ uint32_t tsu;
+
+ /* Release 1-wire time, Trelease = N owr clks,
+ Range = 0 <= trelease < 45, Typical value = 0xf */
+ uint32_t trelease;
+
+ /* Read data valid time, Trdv = N+1 owr clks, Range = Exactly 15 */
+ uint32_t trdv;
+
+ /* Write zero time low, Tlow0 = N+1 owr clks,
+ Range = 60 <= tlow0 < tslot < 120, typical value = 0x3c. */
+ uint32_t tlow0;
+
+ /* Write one time low, or TLOWR both are same Tlow1 = N+1 owr clks,
+ Range = 1 <= tlow1 < 15 TlowR = N+1 owr clks,
+ Range = 1 <= tlowR < 15, Typical value = 0x1. */
+ uint32_t tlow1;
+
+ /* Active time slot for write or read data, Tslot = N+1 owr clks,
+ Range = 60 <= tslot < 120, Typical value = 0x77. */
+ uint32_t tslot;
+
+ /* tpdl, tpdh, trstl, trsth are formed in the the value written
+ into the OWR_RST_PRESENCE_TCTL_0 register. */
+
+ /* Tpdl = N owr clks, Range = 60 <= tpdl < 240,
+ Typical value = 0x78. */
+ uint32_t tpdl;
+
+ /* Tpdh = N+1 owr clks, Range = 15 <= tpdh < 60.
+ Typical value = 0x1e. */
+ uint32_t tpdh;
+
+ /* Trstl = N+1 owr clks, Range = 480 <= trstl < infinity,
+ Typical value = 0x1df. */
+ uint32_t trstl;
+
+ /* Trsth = N+1 owr clks, Range = 480 <= trsth < infinity,
+ Typical value = 0x1df. */
+ uint32_t trsth;
+
+ /* Read data sample clock. Should be <= to (tlow1 - 6) clks,
+ 6 clks are used for deglitch. If deglitch bypassed it
+ is 3 clks, Typical value = 0x7. */
+ uint32_t rdsclk;
+
+ /* Presence sample clock. Should be <= to (tpdl - 6) clks,
+ 6 clks are used for deglitch. If deglitch bypassed it is 3 clks,
+ Typical value = 0x50. */
+ uint32_t psclk;
+};
+
+struct tegra_w1_platform_data {
+ const char *clk_id;
+ struct tegra_w1_timings *timings;
+};
+
+#endif
.length = IO_CPU_SIZE,
.type = MT_DEVICE,
},
+ {
+ .virtual = IO_IRAM_VIRT,
+ .pfn = __phys_to_pfn(IO_IRAM_PHYS),
+ .length = IO_IRAM_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = IO_HOST1X_VIRT,
+ .pfn = __phys_to_pfn(IO_HOST1X_PHYS),
+ .length = IO_HOST1X_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = IO_USB_VIRT,
+ .pfn = __phys_to_pfn(IO_USB_PHYS),
+ .length = IO_USB_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = IO_SDMMC_VIRT,
+ .pfn = __phys_to_pfn(IO_SDMMC_PHYS),
+ .length = IO_SDMMC_SIZE,
+ .type = MT_DEVICE,
+ },
};
void __init tegra_map_common_io(void)
void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type)
{
void __iomem *v = IO_ADDRESS(p);
- if (v == NULL)
+
+ /*
+ * __arm_ioremap fails to set the domain of ioremapped memory
+ * correctly, only use it on physical memory.
+ */
+ if (v == NULL && p < SZ_1G)
v = __arm_ioremap(p, size, type);
+
+ /*
+ * If the physical address was not physical memory or statically
+ * mapped, there's nothing we can do to map it safely.
+ */
+ BUG_ON(v == NULL);
+
return v;
}
EXPORT_SYMBOL(tegra_ioremap);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/iovmm-gart.c
+ *
+ * Tegra I/O VMM implementation for GART devices in Tegra and Tegra 2 series
+ * systems-on-a-chip.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+#include <mach/iovmm.h>
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+#define GART_CONFIG 0x24
+#define GART_ENTRY_ADDR 0x28
+#define GART_ENTRY_DATA 0x2c
+#endif
+
+#define VMM_NAME "iovmm-gart"
+#define DRIVER_NAME "tegra_gart"
+
+#define GART_PAGE_SHIFT (12)
+#define GART_PAGE_MASK (~((1<<GART_PAGE_SHIFT)-1))
+
+struct gart_device {
+ void __iomem *regs;
+ u32 *savedata;
+ u32 page_count; /* total remappable size */
+ tegra_iovmm_addr_t iovmm_base; /* offset to apply to vmm_area */
+ spinlock_t pte_lock;
+ struct tegra_iovmm_device iovmm;
+ struct tegra_iovmm_domain domain;
+ bool enable;
+ bool needs_barrier; /* emulator WAR */
+};
+
+static int gart_map(struct tegra_iovmm_device *, struct tegra_iovmm_area *);
+static void gart_unmap(struct tegra_iovmm_device *,
+ struct tegra_iovmm_area *, bool);
+static void gart_map_pfn(struct tegra_iovmm_device *,
+ struct tegra_iovmm_area *, tegra_iovmm_addr_t, unsigned long);
+static struct tegra_iovmm_domain *gart_alloc_domain(
+ struct tegra_iovmm_device *, struct tegra_iovmm_client *);
+
+static int gart_probe(struct platform_device *);
+static int gart_remove(struct platform_device *);
+static int gart_suspend(struct tegra_iovmm_device *dev);
+static void gart_resume(struct tegra_iovmm_device *dev);
+
+
+static struct tegra_iovmm_device_ops tegra_iovmm_gart_ops = {
+ .map = gart_map,
+ .unmap = gart_unmap,
+ .map_pfn = gart_map_pfn,
+ .alloc_domain = gart_alloc_domain,
+ .suspend = gart_suspend,
+ .resume = gart_resume,
+};
+
+static struct platform_driver tegra_iovmm_gart_drv = {
+ .probe = gart_probe,
+ .remove = gart_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int gart_suspend(struct tegra_iovmm_device *dev)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ unsigned int i;
+ unsigned long reg;
+
+ if (!gart)
+ return -ENODEV;
+
+ if (!gart->enable)
+ return 0;
+
+ spin_lock(&gart->pte_lock);
+ reg = gart->iovmm_base;
+ for (i=0; i<gart->page_count; i++) {
+ writel(reg, gart->regs + GART_ENTRY_ADDR);
+ gart->savedata[i] = readl(gart->regs + GART_ENTRY_DATA);
+ dmb();
+ reg += 1 << GART_PAGE_SHIFT;
+ }
+ spin_unlock(&gart->pte_lock);
+ return 0;
+}
+
+static void do_gart_setup(struct gart_device *gart, const u32 *data)
+{
+ unsigned long reg;
+ unsigned int i;
+
+ writel(1, gart->regs + GART_CONFIG);
+
+ reg = gart->iovmm_base;
+ for (i=0; i<gart->page_count; i++) {
+ writel(reg, gart->regs + GART_ENTRY_ADDR);
+ writel((data) ? data[i] : 0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ reg += 1 << GART_PAGE_SHIFT;
+ }
+ wmb();
+}
+
+static void gart_resume(struct tegra_iovmm_device *dev)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+
+ if (!gart || !gart->enable || (gart->enable && !gart->savedata))
+ return;
+
+ spin_lock(&gart->pte_lock);
+ do_gart_setup(gart, gart->savedata);
+ spin_unlock(&gart->pte_lock);
+}
+
+static int gart_remove(struct platform_device *pdev)
+{
+ struct gart_device *gart = platform_get_drvdata(pdev);
+
+ if (!gart)
+ return 0;
+
+ if (gart->enable)
+ writel(0, gart->regs + GART_CONFIG);
+
+ gart->enable = 0;
+ platform_set_drvdata(pdev, NULL);
+ tegra_iovmm_unregister(&gart->iovmm);
+ if (gart->savedata)
+ vfree(gart->savedata);
+ if (gart->regs)
+ iounmap(gart->regs);
+ kfree(gart);
+ return 0;
+}
+
+static int gart_probe(struct platform_device *pdev)
+{
+ struct gart_device *gart = NULL;
+ struct resource *res, *res_remap;
+ void __iomem *gart_regs = NULL;
+ int e;
+
+ if (!pdev) {
+ pr_err(DRIVER_NAME ": platform_device required\n");
+ return -ENODEV;
+ }
+
+ if (PAGE_SHIFT != GART_PAGE_SHIFT) {
+ pr_err(DRIVER_NAME ": GART and CPU page size must match\n");
+ return -ENXIO;
+ }
+
+ /* the GART memory aperture is required */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ if (!res || !res_remap) {
+ pr_err(DRIVER_NAME ": GART memory aperture expected\n");
+ return -ENXIO;
+ }
+ gart = kzalloc(sizeof(*gart), GFP_KERNEL);
+ if (!gart) {
+ pr_err(DRIVER_NAME ": failed to allocate tegra_iovmm_device\n");
+ e = -ENOMEM;
+ goto fail;
+ }
+
+ gart_regs = ioremap_wc(res->start, res->end - res->start + 1);
+ if (!gart_regs) {
+ pr_err(DRIVER_NAME ": failed to remap GART registers\n");
+ e = -ENXIO;
+ goto fail;
+ }
+
+ gart->iovmm.name = VMM_NAME;
+ gart->iovmm.ops = &tegra_iovmm_gart_ops;
+ gart->iovmm.pgsize_bits = GART_PAGE_SHIFT;
+ spin_lock_init(&gart->pte_lock);
+
+ platform_set_drvdata(pdev, gart);
+
+ e = tegra_iovmm_register(&gart->iovmm);
+ if (e) goto fail;
+
+ e = tegra_iovmm_domain_init(&gart->domain, &gart->iovmm,
+ (tegra_iovmm_addr_t)res_remap->start,
+ (tegra_iovmm_addr_t)res_remap->end+1);
+ if (e) goto fail;
+
+ gart->regs = gart_regs;
+ gart->iovmm_base = (tegra_iovmm_addr_t)res_remap->start;
+ gart->page_count = res_remap->end - res_remap->start + 1;
+ gart->page_count >>= GART_PAGE_SHIFT;
+
+ gart->savedata = vmalloc(sizeof(u32)*gart->page_count);
+ if (!gart->savedata) {
+ pr_err(DRIVER_NAME ": failed to allocate context save area\n");
+ e = -ENOMEM;
+ goto fail;
+ }
+
+ spin_lock(&gart->pte_lock);
+
+ do_gart_setup(gart, NULL);
+ gart->enable = 1;
+
+ spin_unlock(&gart->pte_lock);
+ return 0;
+
+fail:
+ if (gart_regs)
+ iounmap(gart_regs);
+ if (gart && gart->savedata)
+ vfree(gart->savedata);
+ if (gart)
+ kfree(gart);
+ return e;
+}
+
+static int __devinit gart_init(void)
+{
+ return platform_driver_register(&tegra_iovmm_gart_drv);
+}
+
+static void __exit gart_exit(void)
+{
+ return platform_driver_unregister(&tegra_iovmm_gart_drv);
+}
+
+#define GART_PTE(_pfn) (0x80000000ul | ((_pfn)<<PAGE_SHIFT))
+
+
+static int gart_map(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *iovma)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ unsigned long gart_page, count;
+ unsigned int i;
+
+ gart_page = iovma->iovm_start;
+ count = iovma->iovm_length >> GART_PAGE_SHIFT;
+
+ for (i=0; i<count; i++) {
+ unsigned long pfn;
+
+ pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
+ if (!pfn_valid(pfn))
+ goto fail;
+
+ spin_lock(&gart->pte_lock);
+
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
+ wmb();
+ gart_page += 1 << GART_PAGE_SHIFT;
+
+ spin_unlock(&gart->pte_lock);
+ }
+ wmb();
+ return 0;
+
+fail:
+ spin_lock(&gart->pte_lock);
+ while (i--) {
+ iovma->ops->release(iovma, i<<PAGE_SHIFT);
+ gart_page -= 1 << GART_PAGE_SHIFT;
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ }
+ spin_unlock(&gart->pte_lock);
+ wmb();
+ return -ENOMEM;
+}
+
+static void gart_unmap(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *iovma, bool decommit)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ unsigned long gart_page, count;
+ unsigned int i;
+
+ count = iovma->iovm_length >> GART_PAGE_SHIFT;
+ gart_page = iovma->iovm_start;
+
+ spin_lock(&gart->pte_lock);
+ for (i=0; i<count; i++) {
+ if (iovma->ops && iovma->ops->release)
+ iovma->ops->release(iovma, i<<PAGE_SHIFT);
+
+ writel(gart_page, gart->regs + GART_ENTRY_ADDR);
+ writel(0, gart->regs + GART_ENTRY_DATA);
+ wmb();
+ gart_page += 1 << GART_PAGE_SHIFT;
+ }
+ spin_unlock(&gart->pte_lock);
+ wmb();
+}
+
+static void gart_map_pfn(struct tegra_iovmm_device *dev,
+ struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t offs,
+ unsigned long pfn)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+
+ BUG_ON(!pfn_valid(pfn));
+ spin_lock(&gart->pte_lock);
+ writel(offs, gart->regs + GART_ENTRY_ADDR);
+ writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
+ wmb();
+ spin_unlock(&gart->pte_lock);
+ wmb();
+}
+
+static struct tegra_iovmm_domain *gart_alloc_domain(
+ struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
+{
+ struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
+ return &gart->domain;
+}
+
+subsys_initcall(gart_init);
+module_exit(gart_exit);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/iovmm.c
+ *
+ * Tegra I/O VM manager
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <mach/iovmm.h>
+
+/* after the best-fit block is located, the remaining pages not needed for
+ * the allocation will be split into a new free block if the number of
+ * remaining pages is >= MIN_SPLIT_PAGE.
+ */
+#define MIN_SPLIT_PAGE (4)
+#define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE<<(_d)->dev->pgsize_bits)
+
+#define iovmm_start(_b) ((_b)->vm_area.iovm_start)
+#define iovmm_length(_b) ((_b)->vm_area.iovm_length)
+#define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
+
+/* flags for the block */
+#define BK_free 0 /* indicates free mappings */
+#define BK_map_dirty 1 /* used by demand-loaded mappings */
+
+/* flags for the client */
+#define CL_locked 0
+
+/* flags for the domain */
+#define DM_map_dirty 0
+
+struct tegra_iovmm_block {
+ struct tegra_iovmm_area vm_area;
+ atomic_t ref;
+ unsigned long flags;
+ unsigned long poison;
+ struct rb_node free_node;
+ struct rb_node all_node;
+};
+
+struct iovmm_share_group {
+ const char *name;
+ struct tegra_iovmm_domain *domain;
+ struct list_head client_list;
+ struct list_head group_list;
+ spinlock_t lock;
+};
+
+static LIST_HEAD(iovmm_devices);
+static LIST_HEAD(iovmm_groups);
+static DEFINE_MUTEX(iovmm_list_lock);
+static struct kmem_cache *iovmm_cache;
+
+static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev,
+ tegra_iovmm_addr_t addr)
+{
+ addr += (1<<dev->pgsize_bits);
+ addr--;
+ addr &= ~((1<<dev->pgsize_bits)-1);
+ return addr;
+}
+
+static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
+ tegra_iovmm_addr_t addr)
+{
+ addr &= ~((1<<dev->pgsize_bits)-1);
+ return addr;
+}
+
+#define iovmprint(fmt, arg...) snprintf(page+len, count-len, fmt, ## arg)
+
+static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
+ unsigned int *num_blocks, unsigned int *num_free,
+ tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
+ tegra_iovmm_addr_t *max_free)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+
+ *num_blocks = 0;
+ *num_free = 0;
+ *total = (tegra_iovmm_addr_t)0;
+ *total_free = (tegra_iovmm_addr_t)0;
+ *max_free = (tegra_iovmm_addr_t)0;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ (*num_blocks)++;
+ (*total) += iovmm_length(b);
+ if (test_bit(BK_free, &b->flags)) {
+ (*num_free)++;
+ (*total_free) += iovmm_length(b);
+ (*max_free) = max_t(tegra_iovmm_addr_t,
+ (*max_free), iovmm_length(b));
+ }
+ }
+ spin_unlock(&domain->block_lock);
+}
+
+static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct iovmm_share_group *grp;
+ tegra_iovmm_addr_t max_free, total_free, total;
+ unsigned int num, num_free;
+
+ int len = 0;
+
+ mutex_lock(&iovmm_list_lock);
+ len += iovmprint("\ngroups\n");
+ if (list_empty(&iovmm_groups))
+ len += iovmprint("\t<empty>\n");
+ else {
+ list_for_each_entry(grp, &iovmm_groups, group_list) {
+ len += iovmprint("\t%s (device: %s)\n",
+ (grp->name) ? grp->name : "<unnamed>",
+ grp->domain->dev->name);
+ tegra_iovmm_block_stats(grp->domain, &num,
+ &num_free, &total, &total_free, &max_free);
+ total >>= 10;
+ total_free >>= 10;
+ max_free >>= 10;
+ len += iovmprint("\t\tsize: %uKiB free: %uKiB "
+ "largest: %uKiB (%u free / %u total blocks)\n",
+ total, total_free, max_free, num_free, num);
+ }
+ }
+ mutex_unlock(&iovmm_list_lock);
+
+ *eof = 1;
+ return len;
+}
+
+static void iovmm_block_put(struct tegra_iovmm_block *b)
+{
+ BUG_ON(b->poison);
+ BUG_ON(atomic_read(&b->ref)==0);
+ if (!atomic_dec_return(&b->ref)) {
+ b->poison = 0xa5a5a5a5;
+ kmem_cache_free(iovmm_cache, b);
+ }
+}
+
+static void iovmm_free_block(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_block *block)
+{
+ struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
+ struct tegra_iovmm_block *succ = NULL; /* address-order successor */
+ struct rb_node **p;
+ struct rb_node *parent = NULL, *temp;
+ int pred_free = 0, succ_free = 0;
+
+ iovmm_block_put(block);
+
+ spin_lock(&domain->block_lock);
+ temp = rb_prev(&block->all_node);
+ if (temp)
+ pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
+ temp = rb_next(&block->all_node);
+ if (temp)
+ succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
+
+ if (pred) pred_free = test_bit(BK_free, &pred->flags);
+ if (succ) succ_free = test_bit(BK_free, &succ->flags);
+
+ if (pred_free && succ_free) {
+ iovmm_length(pred) += iovmm_length(block);
+ iovmm_length(pred) += iovmm_length(succ);
+ rb_erase(&block->all_node, &domain->all_blocks);
+ rb_erase(&succ->all_node, &domain->all_blocks);
+ rb_erase(&succ->free_node, &domain->free_blocks);
+ rb_erase(&pred->free_node, &domain->free_blocks);
+ iovmm_block_put(block);
+ iovmm_block_put(succ);
+ block = pred;
+ } else if (pred_free) {
+ iovmm_length(pred) += iovmm_length(block);
+ rb_erase(&block->all_node, &domain->all_blocks);
+ rb_erase(&pred->free_node, &domain->free_blocks);
+ iovmm_block_put(block);
+ block = pred;
+ } else if (succ_free) {
+ iovmm_length(block) += iovmm_length(succ);
+ rb_erase(&succ->all_node, &domain->all_blocks);
+ rb_erase(&succ->free_node, &domain->free_blocks);
+ iovmm_block_put(succ);
+ }
+
+ p = &domain->free_blocks.rb_node;
+ while (*p) {
+ struct tegra_iovmm_block *b;
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, free_node);
+ if (iovmm_length(block) >= iovmm_length(b))
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&block->free_node, parent, p);
+ rb_insert_color(&block->free_node, &domain->free_blocks);
+ set_bit(BK_free, &block->flags);
+ spin_unlock(&domain->block_lock);
+}
+
+/* if the best-fit block is larger than the requested size, a remainder
+ * block will be created and inserted into the free list in its place.
+ * since all free blocks are stored in two trees the new block needs to be
+ * linked into both. */
+static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_block *block, unsigned long size)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct tegra_iovmm_block *rem;
+ struct tegra_iovmm_block *b;
+
+ rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
+ if (!rem) return;
+
+ spin_lock(&domain->block_lock);
+ p = &domain->free_blocks.rb_node;
+
+ iovmm_start(rem) = iovmm_start(block) + size;
+ iovmm_length(rem) = iovmm_length(block) - size;
+ atomic_set(&rem->ref, 1);
+ iovmm_length(block) = size;
+
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, free_node);
+ if (iovmm_length(rem) >= iovmm_length(b))
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ set_bit(BK_free, &rem->flags);
+ rb_link_node(&rem->free_node, parent, p);
+ rb_insert_color(&rem->free_node, &domain->free_blocks);
+
+ p = &domain->all_blocks.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct tegra_iovmm_block, all_node);
+ if (iovmm_start(rem) >= iovmm_start(b))
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&rem->all_node, parent, p);
+ rb_insert_color(&rem->all_node, &domain->all_blocks);
+}
+
+static struct tegra_iovmm_block *iovmm_alloc_block(
+ struct tegra_iovmm_domain *domain, unsigned long size)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b, *best;
+ static int splitting = 0;
+
+ BUG_ON(!size);
+ size = iovmm_align_up(domain->dev, size);
+ for (;;) {
+ spin_lock(&domain->block_lock);
+ if (!splitting)
+ break;
+ spin_unlock(&domain->block_lock);
+ schedule();
+ }
+ n = domain->free_blocks.rb_node;
+ best = NULL;
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, free_node);
+ if (iovmm_length(b) < size) n = n->rb_right;
+ else if (iovmm_length(b) == size) {
+ best = b;
+ break;
+ } else {
+ best = b;
+ n = n->rb_left;
+ }
+ }
+ if (!best) {
+ spin_unlock(&domain->block_lock);
+ return NULL;
+ }
+ rb_erase(&best->free_node, &domain->free_blocks);
+ clear_bit(BK_free, &best->flags);
+ atomic_inc(&best->ref);
+ if (iovmm_length(best) >= size+MIN_SPLIT_BYTES(domain)) {
+ splitting = 1;
+ spin_unlock(&domain->block_lock);
+ iovmm_split_free_block(domain, best, size);
+ splitting = 0;
+ }
+
+ spin_unlock(&domain->block_lock);
+
+ return best;
+}
+
+int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
+ struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
+ tegra_iovmm_addr_t end)
+{
+ struct tegra_iovmm_block *b;
+
+ b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
+ if (!b) return -ENOMEM;
+
+ domain->dev = dev;
+ atomic_set(&domain->clients, 0);
+ atomic_set(&domain->locks, 0);
+ atomic_set(&b->ref, 1);
+ spin_lock_init(&domain->block_lock);
+ init_rwsem(&domain->map_lock);
+ init_waitqueue_head(&domain->delay_lock);
+ iovmm_start(b) = iovmm_align_up(dev, start);
+ iovmm_length(b) = iovmm_align_down(dev, end) - iovmm_start(b);
+ set_bit(BK_free, &b->flags);
+ rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
+ rb_insert_color(&b->free_node, &domain->free_blocks);
+ rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
+ rb_insert_color(&b->all_node, &domain->all_blocks);
+ return 0;
+}
+
+struct tegra_iovmm_area *tegra_iovmm_create_vm(
+ struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
+ unsigned long size, pgprot_t pgprot)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_device *dev;
+
+ if (!client) return NULL;
+
+ dev = client->domain->dev;
+
+ b = iovmm_alloc_block(client->domain, size);
+ if (!b) return NULL;
+
+ b->vm_area.domain = client->domain;
+ b->vm_area.pgprot = pgprot;
+ b->vm_area.ops = ops;
+
+ down_read(&b->vm_area.domain->map_lock);
+ if (ops && !test_bit(CL_locked, &client->flags)) {
+ set_bit(BK_map_dirty, &b->flags);
+ set_bit(DM_map_dirty, &client->domain->flags);
+ } else if (ops) {
+ if (dev->ops->map(dev, &b->vm_area))
+ pr_err("%s failed to map locked domain\n", __func__);
+ }
+ up_read(&b->vm_area.domain->map_lock);
+
+ return &b->vm_area;
+}
+
+void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
+ tegra_iovmm_addr_t vaddr, unsigned long pfn)
+{
+ struct tegra_iovmm_device *dev = area->domain->dev;
+ BUG_ON(vaddr & ((1<<dev->pgsize_bits)-1));
+ BUG_ON(vaddr >= area->iovm_start + area->iovm_length);
+ BUG_ON(vaddr < area->iovm_start);
+ BUG_ON(area->ops);
+
+ dev->ops->map_pfn(dev, area, vaddr, pfn);
+}
+
+void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_device *dev;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ dev = vm->domain->dev;
+ /* if the vm area mapping was deferred, don't unmap it since
+ * the memory for the page tables it uses may not be allocated */
+ down_read(&vm->domain->map_lock);
+ if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ dev->ops->unmap(dev, vm, false);
+ up_read(&vm->domain->map_lock);
+}
+
+void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_device *dev;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ dev = vm->domain->dev;
+ if (!vm->ops) return;
+
+ down_read(&vm->domain->map_lock);
+ if (vm->ops) {
+ if (atomic_read(&vm->domain->locks))
+ dev->ops->map(dev, vm);
+ else {
+ set_bit(BK_map_dirty, &b->flags);
+ set_bit(DM_map_dirty, &vm->domain->flags);
+ }
+ }
+ up_read(&vm->domain->map_lock);
+}
+
+void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+
+ if (!vm) return;
+
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ domain = vm->domain;
+ dev = vm->domain->dev;
+ down_read(&domain->map_lock);
+ if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ dev->ops->unmap(dev, vm, true);
+ iovmm_free_block(domain, b);
+ up_read(&domain->map_lock);
+}
+
+struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+
+ BUG_ON(!vm);
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+
+ atomic_inc(&b->ref);
+ return &b->vm_area;
+}
+
+void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
+{
+ struct tegra_iovmm_block *b;
+ BUG_ON(!vm);
+ b = container_of(vm, struct tegra_iovmm_block, vm_area);
+ iovmm_block_put(b);
+}
+
+struct tegra_iovmm_area *tegra_iovmm_find_area_get(
+ struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
+{
+ struct rb_node *n;
+ struct tegra_iovmm_block *b = NULL;
+
+ if (!client) return NULL;
+
+ spin_lock(&client->domain->block_lock);
+ n = client->domain->all_blocks.rb_node;
+
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ if ((iovmm_start(b) <= addr) && (iovmm_end(b) >= addr)) {
+ if (test_bit(BK_free, &b->flags)) b = NULL;
+ break;
+ }
+ if (addr > iovmm_start(b))
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ b = NULL;
+ }
+ if (b) atomic_inc(&b->ref);
+ spin_unlock(&client->domain->block_lock);
+ if (!b) return NULL;
+ return &b->vm_area;
+}
+
+static int _iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+ int v;
+
+ if (unlikely(!client)) return -ENODEV;
+ if (unlikely(test_bit(CL_locked, &client->flags))) {
+ pr_err("attempting to relock client %s\n", client->name);
+ return 0;
+ }
+
+ domain = client->domain;
+ dev = domain->dev;
+ down_write(&domain->map_lock);
+ v = atomic_inc_return(&domain->locks);
+ /* if the device doesn't export the lock_domain function, the device
+ * must guarantee that any valid domain will be locked. */
+ if (v==1 && dev->ops->lock_domain) {
+ if (dev->ops->lock_domain(dev, domain)) {
+ atomic_dec(&domain->locks);
+ up_write(&domain->map_lock);
+ return -EAGAIN;
+ }
+ }
+ if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ if (test_bit(BK_free, &b->flags))
+ continue;
+
+ if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
+ if (!b->vm_area.ops) {
+ pr_err("%s: vm_area ops must exist for lazy maps\n", __func__);
+ continue;
+ }
+ dev->ops->map(dev, &b->vm_area);
+ }
+ }
+ }
+ set_bit(CL_locked, &client->flags);
+ up_write(&domain->map_lock);
+ return 0;
+}
+
+int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
+{
+ return _iovmm_client_lock(client);
+}
+
+int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
+{
+ int ret;
+
+ if (!client) return -ENODEV;
+
+ ret = wait_event_interruptible(client->domain->delay_lock,
+ _iovmm_client_lock(client)!=-EAGAIN);
+
+ if (ret==-ERESTARTSYS) return -EINTR;
+
+ return ret;
+}
+
+void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ struct tegra_iovmm_domain *domain;
+ int do_wake = 0;
+
+ if (!client) return;
+
+ if (!test_and_clear_bit(CL_locked, &client->flags)) {
+ pr_err("unlocking unlocked client %s\n", client->name);
+ return;
+ }
+
+ domain = client->domain;
+ dev = domain->dev;
+ down_write(&domain->map_lock);
+ if (!atomic_dec_return(&client->domain->locks)) {
+ if (dev->ops->unlock_domain)
+ dev->ops->unlock_domain(dev, domain);
+ do_wake = 1;
+ }
+ up_write(&domain->map_lock);
+ if (do_wake) wake_up(&domain->delay_lock);
+}
+
+size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_domain *domain;
+ struct rb_node *n;
+ struct tegra_iovmm_block *b;
+ size_t size = 0;
+
+ if (!client) return 0;
+
+ domain = client->domain;
+
+ spin_lock(&domain->block_lock);
+ n = rb_first(&domain->all_blocks);
+ while (n) {
+ b = rb_entry(n, struct tegra_iovmm_block, all_node);
+ n = rb_next(n);
+ size += iovmm_length(b);
+ }
+ spin_unlock(&domain->block_lock);
+
+ return size;
+}
+
+void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
+{
+ struct tegra_iovmm_device *dev;
+ if (!client) return;
+
+ BUG_ON(!client->domain || !client->domain->dev);
+
+ dev = client->domain->dev;
+
+ if (test_and_clear_bit(CL_locked, &client->flags)) {
+ pr_err("freeing locked client %s\n", client->name);
+ if (!atomic_dec_return(&client->domain->locks)) {
+ down_write(&client->domain->map_lock);
+ if (dev->ops->unlock_domain)
+ dev->ops->unlock_domain(dev, client->domain);
+ up_write(&client->domain->map_lock);
+ wake_up(&client->domain->delay_lock);
+ }
+ }
+ mutex_lock(&iovmm_list_lock);
+ if (!atomic_dec_return(&client->domain->clients))
+ if (dev->ops->free_domain)
+ dev->ops->free_domain(dev, client->domain);
+ list_del(&client->list);
+ if (list_empty(&client->group->client_list)) {
+ list_del(&client->group->group_list);
+ if (client->group->name) kfree(client->group->name);
+ kfree(client->group);
+ }
+ kfree(client->name);
+ kfree(client);
+ mutex_unlock(&iovmm_list_lock);
+}
+
+struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
+ const char *share_group)
+{
+ struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
+ struct iovmm_share_group *grp = NULL;
+ struct tegra_iovmm_device *dev;
+
+ if (!c) return NULL;
+ c->name = kstrdup(name, GFP_KERNEL);
+ if (!c->name) goto fail;
+
+ mutex_lock(&iovmm_list_lock);
+ if (share_group) {
+ list_for_each_entry(grp, &iovmm_groups, group_list) {
+ if (grp->name && !strcmp(grp->name, share_group))
+ break;
+ }
+ }
+ if (!grp || strcmp(grp->name, share_group)) {
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp) goto fail_lock;
+ grp->name = (share_group) ? kstrdup(share_group, GFP_KERNEL) : NULL;
+ if (share_group && !grp->name) {
+ kfree(grp);
+ goto fail_lock;
+ }
+ list_for_each_entry(dev, &iovmm_devices, list) {
+ grp->domain = dev->ops->alloc_domain(dev, c);
+ if (grp->domain) break;
+ }
+ if (!grp->domain) {
+ pr_err("%s: alloc_domain failed for %s\n",
+ __func__, c->name);
+ dump_stack();
+ if (grp->name) kfree(grp->name);
+ kfree(grp);
+ grp = NULL;
+ goto fail_lock;
+ }
+ spin_lock_init(&grp->lock);
+ INIT_LIST_HEAD(&grp->client_list);
+ list_add_tail(&grp->group_list, &iovmm_groups);
+ }
+
+ atomic_inc(&grp->domain->clients);
+ c->group = grp;
+ c->domain = grp->domain;
+ spin_lock(&grp->lock);
+ list_add_tail(&c->list, &grp->client_list);
+ spin_unlock(&grp->lock);
+ mutex_unlock(&iovmm_list_lock);
+ return c;
+
+fail_lock:
+ mutex_unlock(&iovmm_list_lock);
+fail:
+ if (c) {
+ if (c->name) kfree(c->name);
+ kfree(c);
+ }
+ return NULL;
+}
+
+int tegra_iovmm_register(struct tegra_iovmm_device *dev)
+{
+ BUG_ON(!dev);
+ mutex_lock(&iovmm_list_lock);
+ if (list_empty(&iovmm_devices)) {
+ iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
+ if (!iovmm_cache) {
+ pr_err("%s: failed to make kmem cache\n", __func__);
+ mutex_unlock(&iovmm_list_lock);
+ return -ENOMEM;
+ }
+ create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
+ tegra_iovmm_read_proc, NULL);
+ }
+ list_add_tail(&dev->list, &iovmm_devices);
+ mutex_unlock(&iovmm_list_lock);
+ printk("%s: added %s\n", __func__, dev->name);
+ return 0;
+}
+
+int tegra_iovmm_suspend(void)
+{
+ int rc = 0;
+ struct tegra_iovmm_device *dev;
+
+ mutex_lock(&iovmm_list_lock);
+ list_for_each_entry(dev, &iovmm_devices, list) {
+
+ if (!dev->ops->suspend)
+ continue;
+
+ rc = dev->ops->suspend(dev);
+ if (rc) {
+ pr_err("%s: %s suspend returned %d\n",
+ __func__, dev->name, rc);
+ mutex_unlock(&iovmm_list_lock);
+ return rc;
+ }
+ }
+ mutex_unlock(&iovmm_list_lock);
+ return 0;
+}
+
+void tegra_iovmm_resume(void)
+{
+ struct tegra_iovmm_device *dev;
+
+ mutex_lock(&iovmm_list_lock);
+
+ list_for_each_entry(dev, &iovmm_devices, list) {
+ if (dev->ops->resume)
+ dev->ops->resume(dev);
+ }
+
+ mutex_unlock(&iovmm_list_lock);
+}
+
+int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
+{
+ mutex_lock(&iovmm_list_lock);
+ list_del(&dev->list);
+ mutex_unlock(&iovmm_list_lock);
+ return 0;
+}
* Author:
* Colin Cross <ccross@google.com>
*
+ * Copyright (C) 2010, NVIDIA Corporation
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*/
#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/seq_file.h>
#include <asm/hardware/gic.h>
#include <mach/iomap.h>
+#include <mach/legacy_irq.h>
+#include <mach/suspend.h>
#include "board.h"
+#define PMC_CTRL 0x0
+#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
+#define PMC_WAKE_MASK 0xc
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_SAMPLE 0x20
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+
+static u32 tegra_lp0_wake_enb;
+static u32 tegra_lp0_wake_level;
+static u32 tegra_lp0_wake_level_any;
+
+static unsigned int tegra_wake_irq_count[32];
+
+/* ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain */
+static void pmc_32kwritel(u32 val, unsigned long offs)
+{
+ writel(val, pmc + offs);
+ udelay(130);
+}
+
+int tegra_set_lp0_wake(int irq, int enable)
+{
+ int wake = tegra_irq_to_wake(irq);
+
+ if (wake < 0)
+ return -EINVAL;
+
+ if (enable)
+ tegra_lp0_wake_enb |= 1 << wake;
+ else
+ tegra_lp0_wake_enb &= ~(1 << wake);
+
+ return 0;
+}
+
+int tegra_set_lp0_wake_type(int irq, int flow_type)
+{
+ int wake = tegra_irq_to_wake(irq);
+
+ if (wake < 0)
+ return 0;
+
+ switch (flow_type) {
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ tegra_lp0_wake_level &= ~(1 << wake);
+ tegra_lp0_wake_level_any &= ~(1 << wake);
+ break;
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ tegra_lp0_wake_level |= 1 << wake;
+ tegra_lp0_wake_level_any &= ~(1 << wake);
+ break;
+
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ tegra_lp0_wake_level_any |= 1 << wake;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+int tegra_set_lp1_wake(int irq, int enable)
+{
+ return tegra_legacy_irq_set_wake(irq, enable);
+}
+
+void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any)
+{
+ u32 temp;
+ u32 status;
+ u32 lvl;
+
+ wake_level &= wake_enb;
+ wake_any &= wake_enb;
+
+ wake_level |= (tegra_lp0_wake_level & tegra_lp0_wake_enb);
+ wake_any |= (tegra_lp0_wake_level_any & tegra_lp0_wake_enb);
+
+ wake_enb |= tegra_lp0_wake_enb;
+
+ pmc_32kwritel(0, PMC_SW_WAKE_STATUS);
+ temp = readl(pmc + PMC_CTRL);
+ temp |= PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+ temp &= ~PMC_CTRL_LATCH_WAKEUPS;
+ pmc_32kwritel(temp, PMC_CTRL);
+ status = readl(pmc + PMC_SW_WAKE_STATUS);
+ lvl = readl(pmc + PMC_WAKE_LEVEL);
+
+ /* flip the wakeup trigger for any-edge triggered pads
+ * which are currently asserting as wakeups */
+ lvl ^= status;
+ lvl &= wake_any;
+
+ wake_level |= lvl;
+
+ writel(wake_level, pmc + PMC_WAKE_LEVEL);
+ /* Enable DPD sample to trigger sampling pads data and direction
+ * in which pad will be driven during lp0 mode*/
+ writel(0x1, pmc + PMC_DPD_SAMPLE);
+
+ writel(wake_enb, pmc + PMC_WAKE_MASK);
+}
+
+#ifdef CONFIG_PM
+static void tegra_irq_handle_wake(void)
+{
+ int wake;
+ int irq;
+ struct irq_desc *desc;
+
+ unsigned long wake_status = readl(pmc + PMC_WAKE_STATUS);
+ for_each_set_bit(wake, &wake_status, sizeof(wake_status) * 8) {
+ irq = tegra_wake_to_irq(wake);
+ if (!irq) {
+ pr_info("Resume caused by WAKE%d\n", wake);
+ continue;
+ }
+
+ desc = irq_to_desc(irq);
+ if (!desc || !desc->action || !desc->action->name) {
+ pr_info("Resume caused by WAKE%d, irq %d\n", wake, irq);
+ continue;
+ }
+
+ pr_info("Resume caused by WAKE%d, %s\n", wake,
+ desc->action->name);
+
+ tegra_wake_irq_count[wake]++;
+
+ generic_handle_irq(irq);
+ }
+}
+#endif
+
+static void tegra_mask(unsigned int irq)
+{
+ gic_mask_irq(irq);
+ tegra_legacy_mask_irq(irq);
+}
+
+static void tegra_unmask(unsigned int irq)
+{
+ gic_unmask_irq(irq);
+ tegra_legacy_unmask_irq(irq);
+}
+
+static int tegra_set_wake(unsigned int irq, unsigned int enable)
+{
+ int ret;
+ ret = tegra_set_lp1_wake(irq, enable);
+ if (ret)
+ return ret;
+
+ if (tegra_get_suspend_mode() == TEGRA_SUSPEND_LP0)
+ return tegra_set_lp0_wake(irq, enable);
+
+ return 0;
+}
+
+static int tegra_set_type(unsigned int irq, unsigned int flow_type)
+{
+ if (tegra_get_suspend_mode() == TEGRA_SUSPEND_LP0)
+ return tegra_set_lp0_wake_type(irq, flow_type);
+
+ return 0;
+}
+
+static void tegra_ack(unsigned int irq)
+{
+ tegra_legacy_force_irq_clr(irq);
+ gic_ack_irq(irq);
+}
+
+static int tegra_retrigger(unsigned int irq)
+{
+ tegra_legacy_force_irq_set(irq);
+ return 1;
+}
+
+static struct irq_chip tegra_irq = {
+ .name = "PPI",
+ .ack = tegra_ack,
+ .mask = tegra_mask,
+ .unmask = tegra_unmask,
+ .set_wake = tegra_set_wake,
+ .set_type = tegra_set_type,
+#ifdef CONFIG_SMP
+ .set_affinity = gic_set_cpu,
+#endif
+ .retrigger = tegra_retrigger,
+};
+
void __init tegra_init_irq(void)
{
+ unsigned int i;
+ int irq;
+
+ tegra_init_legacy_irq();
+
gic_dist_init(0, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), 29);
gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
+
+ for (i = 0; i < INT_MAIN_NR; i++) {
+ irq = INT_PRI_BASE + i;
+ set_irq_chip(irq, &tegra_irq);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+}
+
+#ifdef CONFIG_PM
+void tegra_irq_suspend(void)
+{
+ tegra_legacy_irq_suspend();
+}
+
+void tegra_irq_resume(void)
+{
+ tegra_legacy_irq_resume();
+ tegra_irq_handle_wake();
}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_wake_irq_debug_show(struct seq_file *s, void *data)
+{
+ int wake;
+ int irq;
+ struct irq_desc *desc;
+ const char *irq_name;
+
+ seq_printf(s, "wake irq count name\n");
+ seq_printf(s, "----------------------\n");
+ for (wake = 0; wake < 32; wake++) {
+ irq = tegra_wake_to_irq(wake);
+ if (irq < 0)
+ continue;
+
+ desc = irq_to_desc(irq);
+ if (tegra_wake_irq_count[wake] == 0 && desc->action == NULL)
+ continue;
+
+ if (!(desc->status & IRQ_WAKEUP))
+ continue;
+
+ irq_name = (desc->action && desc->action->name) ?
+ desc->action->name : "???";
+
+ seq_printf(s, "%4d %3d %5d %s\n",
+ wake, irq, tegra_wake_irq_count[wake], irq_name);
+ }
+ return 0;
+}
+
+static int tegra_wake_irq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_wake_irq_debug_show, NULL);
+}
+
+static const struct file_operations tegra_wake_irq_debug_fops = {
+ .open = tegra_wake_irq_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_irq_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("wake_irq", 0755, NULL, NULL,
+ &tegra_wake_irq_debug_fops);
+ if (!d) {
+ pr_info("Failed to create suspend_mode debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(tegra_irq_debug_init);
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/kfuse.c
+ *
+ * Copyright (C) 2010-2011 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* The kfuse block stores downstream and upstream HDCP keys for use by HDMI
+ * module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+#include <mach/iomap.h>
+#include <mach/kfuse.h>
+
+#include "apbio.h"
+
+/* register definition */
+#define KFUSE_STATE 0x80
+#define KFUSE_STATE_DONE (1u << 16)
+#define KFUSE_STATE_CRCPASS (1u << 17)
+#define KFUSE_KEYADDR 0x88
+#define KFUSE_KEYADDR_AUTOINC (1u << 16)
+#define KFUSE_KEYS 0x8c
+
+static inline u32 tegra_kfuse_readl(unsigned long offset)
+{
+ return tegra_apb_readl(TEGRA_KFUSE_BASE + offset);
+}
+
+static inline void tegra_kfuse_writel(u32 value, unsigned long offset)
+{
+ tegra_apb_writel(value, TEGRA_KFUSE_BASE + offset);
+}
+
+static int wait_for_done(void)
+{
+ u32 reg;
+ int retries = 50;
+ do {
+ reg = tegra_kfuse_readl(KFUSE_STATE);
+ if (reg & KFUSE_STATE_DONE);
+ return 0;
+ msleep(10);
+ } while(--retries);
+ return -ETIMEDOUT;
+}
+
+/* read up to KFUSE_DATA_SZ bytes into dest.
+ * always starts at the first kfuse.
+ */
+int tegra_kfuse_read(void *dest, size_t len)
+{
+ u32 v;
+ unsigned cnt;
+
+ if (len > KFUSE_DATA_SZ)
+ return -EINVAL;
+
+ tegra_kfuse_writel(KFUSE_KEYADDR_AUTOINC, KFUSE_KEYADDR);
+ wait_for_done();
+
+ if ((tegra_kfuse_readl(KFUSE_STATE) & KFUSE_STATE_CRCPASS) == 0) {
+ pr_err("kfuse: crc failed\n");
+ return -EIO;
+ }
+
+ for (cnt = 0; cnt < len; cnt += 4) {
+ v = tegra_kfuse_readl(KFUSE_KEYS);
+ memcpy(dest + cnt, &v, sizeof v);
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/legacy_irq.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/legacy_irq.h>
+
+#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
+#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
+#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
+
+#define ICTLR_CPU_IEP_VFIQ 0x08
+#define ICTLR_CPU_IEP_FIR 0x14
+#define ICTLR_CPU_IEP_FIR_SET 0x18
+#define ICTLR_CPU_IEP_FIR_CLR 0x1c
+
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2C
+
+#define ICTLR_COP_IER 0x30
+#define ICTLR_COP_IER_SET 0x34
+#define ICTLR_COP_IER_CLR 0x38
+#define ICTLR_COP_IEP_CLASS 0x3c
+
+#define NUM_ICTLRS 4
+
+static void __iomem *ictlr_reg_base[] = {
+ IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
+ IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
+};
+
+static u32 tegra_legacy_wake_mask[4];
+static u32 tegra_legacy_saved_mask[4];
+
+/* When going into deep sleep, the CPU is powered down, taking the GIC with it
+ In order to wake, the wake interrupts need to be enabled in the legacy
+ interrupt controller. */
+void tegra_legacy_unmask_irq(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IER_SET);
+}
+
+void tegra_legacy_mask_irq(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IER_CLR);
+}
+
+void tegra_legacy_force_irq_set(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_SET);
+}
+
+void tegra_legacy_force_irq_clr(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_CLR);
+}
+
+int tegra_legacy_force_irq_status(unsigned int irq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ return !!(readl(base + ICTLR_CPU_IEP_FIR) & (1 << (irq & 31)));
+}
+
+void tegra_legacy_select_fiq(unsigned int irq, bool fiq)
+{
+ void __iomem *base;
+ pr_debug("%s: %d\n", __func__, irq);
+
+ irq -= 32;
+ base = ictlr_reg_base[irq>>5];
+ writel(fiq << (irq & 31), base + ICTLR_CPU_IEP_CLASS);
+}
+
+unsigned long tegra_legacy_vfiq(int nr)
+{
+ void __iomem *base;
+ base = ictlr_reg_base[nr];
+ return readl(base + ICTLR_CPU_IEP_VFIQ);
+}
+
+unsigned long tegra_legacy_class(int nr)
+{
+ void __iomem *base;
+ base = ictlr_reg_base[nr];
+ return readl(base + ICTLR_CPU_IEP_CLASS);
+}
+
+int tegra_legacy_irq_set_wake(int irq, int enable)
+{
+ irq -= 32;
+ if (enable)
+ tegra_legacy_wake_mask[irq >> 5] |= 1 << (irq & 31);
+ else
+ tegra_legacy_wake_mask[irq >> 5] &= ~(1 << (irq & 31));
+
+ return 0;
+}
+
+void tegra_legacy_irq_set_lp1_wake_mask(void)
+{
+ void __iomem *base;
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ base = ictlr_reg_base[i];
+ tegra_legacy_saved_mask[i] = readl(base + ICTLR_CPU_IER);
+ writel(tegra_legacy_wake_mask[i], base + ICTLR_CPU_IER);
+ }
+}
+
+void tegra_legacy_irq_restore_mask(void)
+{
+ void __iomem *base;
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ base = ictlr_reg_base[i];
+ writel(tegra_legacy_saved_mask[i], base + ICTLR_CPU_IER);
+ }
+}
+
+void tegra_init_legacy_irq(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(~0, ictlr + ICTLR_CPU_IER_CLR);
+ writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
+ }
+}
+
+#ifdef CONFIG_PM
+static u32 cop_ier[NUM_ICTLRS];
+static u32 cpu_ier[NUM_ICTLRS];
+static u32 cpu_iep[NUM_ICTLRS];
+
+void tegra_legacy_irq_suspend(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
+ cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
+ cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
+ writel(~0, ictlr + ICTLR_COP_IER_CLR);
+ }
+ local_irq_restore(flags);
+}
+
+void tegra_legacy_irq_resume(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < NUM_ICTLRS; i++) {
+ void __iomem *ictlr = ictlr_reg_base[i];
+ writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
+ writel(0, ictlr + ICTLR_COP_IEP_CLASS);
+ writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
+ }
+ local_irq_restore(flags);
+}
+#endif
void __cpuinit local_timer_setup(struct clock_event_device *evt)
{
evt->irq = IRQ_LOCALTIMER;
- twd_timer_setup(evt);
+ twd_timer_setup_scalable(evt, 2500000, 4);
}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/mc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <mach/iomap.h>
+#include <mach/mc.h>
+
+static DEFINE_SPINLOCK(tegra_mc_lock);
+
+void tegra_mc_set_priority(unsigned long client, unsigned long prio)
+{
+ unsigned long mc_base = IO_TO_VIRT(TEGRA_MC_BASE);
+ unsigned long reg = client >> 8;
+ int field = client & 0xff;
+ unsigned long val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra_mc_lock, flags);
+ val = readl(mc_base + reg);
+ val &= ~(TEGRA_MC_PRIO_MASK << field);
+ val |= prio << field;
+ writel(val, mc_base + reg);
+ spin_unlock_irqrestore(&tegra_mc_lock, flags);
+}
--- /dev/null
+/*
+ * linux/arch/arm/mach-tegra/pinmux-t2-tables.c
+ *
+ * Common pinmux configurations for Tegra 2 SoCs
+ *
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <mach/iomap.h>
+#include <mach/pinmux.h>
+#include <mach/suspend.h>
+
+#define DRIVE_PINGROUP(pg_name, r) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg = r \
+ }
+
+const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
+ DRIVE_PINGROUP(AO1, 0x868),
+ DRIVE_PINGROUP(AO2, 0x86c),
+ DRIVE_PINGROUP(AT1, 0x870),
+ DRIVE_PINGROUP(AT2, 0x874),
+ DRIVE_PINGROUP(CDEV1, 0x878),
+ DRIVE_PINGROUP(CDEV2, 0x87c),
+ DRIVE_PINGROUP(CSUS, 0x880),
+ DRIVE_PINGROUP(DAP1, 0x884),
+ DRIVE_PINGROUP(DAP2, 0x888),
+ DRIVE_PINGROUP(DAP3, 0x88c),
+ DRIVE_PINGROUP(DAP4, 0x890),
+ DRIVE_PINGROUP(DBG, 0x894),
+ DRIVE_PINGROUP(LCD1, 0x898),
+ DRIVE_PINGROUP(LCD2, 0x89c),
+ DRIVE_PINGROUP(SDMMC2, 0x8a0),
+ DRIVE_PINGROUP(SDMMC3, 0x8a4),
+ DRIVE_PINGROUP(SPI, 0x8a8),
+ DRIVE_PINGROUP(UAA, 0x8ac),
+ DRIVE_PINGROUP(UAB, 0x8b0),
+ DRIVE_PINGROUP(UART2, 0x8b4),
+ DRIVE_PINGROUP(UART3, 0x8b8),
+ DRIVE_PINGROUP(VI1, 0x8bc),
+ DRIVE_PINGROUP(VI2, 0x8c0),
+ DRIVE_PINGROUP(XM2A, 0x8c4),
+ DRIVE_PINGROUP(XM2C, 0x8c8),
+ DRIVE_PINGROUP(XM2D, 0x8cc),
+ DRIVE_PINGROUP(XM2CLK, 0x8d0),
+ DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+ DRIVE_PINGROUP(SDIO1, 0x8e0),
+ DRIVE_PINGROUP(CRT, 0x8ec),
+ DRIVE_PINGROUP(DDC, 0x8f0),
+ DRIVE_PINGROUP(GMA, 0x8f4),
+ DRIVE_PINGROUP(GMB, 0x8f8),
+ DRIVE_PINGROUP(GMC, 0x8fc),
+ DRIVE_PINGROUP(GMD, 0x900),
+ DRIVE_PINGROUP(GME, 0x904),
+ DRIVE_PINGROUP(OWR, 0x908),
+ DRIVE_PINGROUP(UAD, 0x90c),
+};
+
+#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \
+ tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
+ [TEGRA_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .vddio = TEGRA_VDDIO_ ## vdd, \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_ ## f2, \
+ TEGRA_MUX_ ## f3, \
+ }, \
+ .func_safe = TEGRA_MUX_ ## f_safe, \
+ .tri_reg = tri_r, \
+ .tri_bit = tri_b, \
+ .mux_reg = mux_r, \
+ .mux_bit = mux_b, \
+ .pupd_reg = pupd_r, \
+ .pupd_bit = pupd_b, \
+ }
+
+const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
+ PINGROUP(ATA, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),
+ PINGROUP(ATB, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),
+ PINGROUP(ATC, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),
+ PINGROUP(ATD, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 3, 0x80, 20, 0xA0, 6),
+ PINGROUP(ATE, NAND, IDE, NAND, GMI, RSVD, IDE, 0x18, 25, 0x80, 12, 0xA0, 8),
+ PINGROUP(CDEV1, AUDIO, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, OSC, 0x14, 4, 0x88, 2, 0xA8, 0),
+ PINGROUP(CDEV2, AUDIO, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, OSC, 0x14, 5, 0x88, 4, 0xA8, 2),
+ PINGROUP(CRTP, LCD, CRT, RSVD, RSVD, RSVD, RSVD, 0x20, 14, 0x98, 20, 0xA4, 24),
+ PINGROUP(CSUS, VI, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, PLLC_OUT1, 0x14, 6, 0x88, 6, 0xAC, 24),
+ PINGROUP(DAP1, AUDIO, DAP1, RSVD, GMI, SDIO2, DAP1, 0x14, 7, 0x88, 20, 0xA0, 10),
+ PINGROUP(DAP2, AUDIO, DAP2, TWC, RSVD, GMI, DAP2, 0x14, 8, 0x88, 22, 0xA0, 12),
+ PINGROUP(DAP3, BB, DAP3, RSVD, RSVD, RSVD, DAP3, 0x14, 9, 0x88, 24, 0xA0, 14),
+ PINGROUP(DAP4, UART, DAP4, RSVD, GMI, RSVD, DAP4, 0x14, 10, 0x88, 26, 0xA0, 16),
+ PINGROUP(DDC, LCD, I2C2, RSVD, RSVD, RSVD, RSVD4, 0x18, 31, 0x88, 0, 0xB0, 28),
+ PINGROUP(DTA, VI, RSVD, SDIO2, VI, RSVD, RSVD4, 0x14, 11, 0x84, 20, 0xA0, 18),
+ PINGROUP(DTB, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 12, 0x84, 22, 0xA0, 20),
+ PINGROUP(DTC, VI, RSVD, RSVD, VI, RSVD, RSVD1, 0x14, 13, 0x84, 26, 0xA0, 22),
+ PINGROUP(DTD, VI, RSVD, SDIO2, VI, RSVD, RSVD1, 0x14, 14, 0x84, 28, 0xA0, 24),
+ PINGROUP(DTE, VI, RSVD, RSVD, VI, SPI1, RSVD1, 0x14, 15, 0x84, 30, 0xA0, 26),
+ PINGROUP(DTF, VI, I2C3, RSVD, VI, RSVD, RSVD4, 0x20, 12, 0x98, 30, 0xA0, 28),
+ PINGROUP(GMA, NAND, UARTE, SPI3, GMI, SDIO4, SPI3, 0x14, 28, 0x84, 0, 0xB0, 20),
+ PINGROUP(GMB, NAND, IDE, NAND, GMI, GMI_INT, GMI, 0x18, 29, 0x88, 28, 0xB0, 22),
+ PINGROUP(GMC, NAND, UARTD, SPI4, GMI, SFLASH, SPI4, 0x14, 29, 0x84, 2, 0xB0, 24),
+ PINGROUP(GMD, NAND, RSVD, NAND, GMI, SFLASH, GMI, 0x18, 30, 0x88, 30, 0xB0, 26),
+ PINGROUP(GME, NAND, RSVD, DAP5, GMI, SDIO4, GMI, 0x18, 0, 0x8C, 0, 0xA8, 24),
+ PINGROUP(GPU, UART, PWM, UARTA, GMI, RSVD, RSVD4, 0x14, 16, 0x8C, 4, 0xA4, 20),
+ PINGROUP(GPU7, SYS, RTCK, RSVD, RSVD, RSVD, RTCK, 0x20, 11, 0x98, 28, 0xA4, 6),
+ PINGROUP(GPV, SD, PCIE, RSVD, RSVD, RSVD, PCIE, 0x14, 17, 0x8C, 2, 0xA0, 30),
+ PINGROUP(HDINT, LCD, HDMI, RSVD, RSVD, RSVD, HDMI, 0x1C, 23, 0x84, 4, 0xAC, 22),
+ PINGROUP(I2CP, SYS, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 18, 0x88, 8, 0xA4, 2),
+ PINGROUP(IRRX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 20, 0x88, 18, 0xA8, 22),
+ PINGROUP(IRTX, UART, UARTA, UARTB, GMI, SPI4, UARTB, 0x14, 19, 0x88, 16, 0xA8, 20),
+ PINGROUP(KBCA, SYS, KBC, NAND, SDIO2, EMC_TEST0_DLL, KBC, 0x14, 22, 0x88, 10, 0xA4, 8),
+ PINGROUP(KBCB, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x14, 21, 0x88, 12, 0xA4, 10),
+ PINGROUP(KBCC, SYS, KBC, NAND, TRACE, EMC_TEST1_DLL, KBC, 0x18, 26, 0x88, 14, 0xA4, 12),
+ PINGROUP(KBCD, SYS, KBC, NAND, SDIO2, MIO, KBC, 0x20, 10, 0x98, 26, 0xA4, 14),
+ PINGROUP(KBCE, SYS, KBC, NAND, OWR, RSVD, KBC, 0x14, 26, 0x80, 28, 0xB0, 2),
+ PINGROUP(KBCF, SYS, KBC, NAND, TRACE, MIO, KBC, 0x14, 27, 0x80, 26, 0xB0, 0),
+ PINGROUP(LCSN, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 31, 0x90, 12, 0xAC, 20),
+ PINGROUP(LD0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 0, 0x94, 0, 0xAC, 12),
+ PINGROUP(LD1, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 1, 0x94, 2, 0xAC, 12),
+ PINGROUP(LD10, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 10, 0x94, 20, 0xAC, 12),
+ PINGROUP(LD11, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 11, 0x94, 22, 0xAC, 12),
+ PINGROUP(LD12, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 12, 0x94, 24, 0xAC, 12),
+ PINGROUP(LD13, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 13, 0x94, 26, 0xAC, 12),
+ PINGROUP(LD14, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 14, 0x94, 28, 0xAC, 12),
+ PINGROUP(LD15, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 15, 0x94, 30, 0xAC, 12),
+ PINGROUP(LD16, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 16, 0x98, 0, 0xAC, 12),
+ PINGROUP(LD17, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 17, 0x98, 2, 0xAC, 12),
+ PINGROUP(LD2, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 2, 0x94, 4, 0xAC, 12),
+ PINGROUP(LD3, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 3, 0x94, 6, 0xAC, 12),
+ PINGROUP(LD4, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 4, 0x94, 8, 0xAC, 12),
+ PINGROUP(LD5, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 5, 0x94, 10, 0xAC, 12),
+ PINGROUP(LD6, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 6, 0x94, 12, 0xAC, 12),
+ PINGROUP(LD7, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 7, 0x94, 14, 0xAC, 12),
+ PINGROUP(LD8, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 8, 0x94, 16, 0xAC, 12),
+ PINGROUP(LD9, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 9, 0x94, 18, 0xAC, 12),
+ PINGROUP(LDC, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 30, 0x90, 14, 0xAC, 20),
+ PINGROUP(LDI, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 6, 0x98, 16, 0xAC, 18),
+ PINGROUP(LHP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 18, 0x98, 10, 0xAC, 16),
+ PINGROUP(LHP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 19, 0x98, 4, 0xAC, 14),
+ PINGROUP(LHP2, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 20, 0x98, 6, 0xAC, 14),
+ PINGROUP(LHS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x20, 7, 0x90, 22, 0xAC, 22),
+ PINGROUP(LM0, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, RSVD4, 0x1C, 24, 0x90, 26, 0xAC, 22),
+ PINGROUP(LM1, LCD, DISPLAYA, DISPLAYB, RSVD, CRT, RSVD3, 0x1C, 25, 0x90, 28, 0xAC, 22),
+ PINGROUP(LPP, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 8, 0x98, 14, 0xAC, 18),
+ PINGROUP(LPW0, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 3, 0x90, 0, 0xAC, 20),
+ PINGROUP(LPW1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x20, 4, 0x90, 2, 0xAC, 20),
+ PINGROUP(LPW2, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 5, 0x90, 4, 0xAC, 20),
+ PINGROUP(LSC0, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 27, 0x90, 18, 0xAC, 22),
+ PINGROUP(LSC1, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 28, 0x90, 20, 0xAC, 20),
+ PINGROUP(LSCK, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x1C, 29, 0x90, 16, 0xAC, 20),
+ PINGROUP(LSDA, LCD, DISPLAYA, DISPLAYB, SPI3, HDMI, DISPLAYA, 0x20, 1, 0x90, 8, 0xAC, 20),
+ PINGROUP(LSDI, LCD, DISPLAYA, DISPLAYB, SPI3, RSVD, DISPLAYA, 0x20, 2, 0x90, 6, 0xAC, 20),
+ PINGROUP(LSPI, LCD, DISPLAYA, DISPLAYB, XIO, HDMI, DISPLAYA, 0x20, 0, 0x90, 10, 0xAC, 22),
+ PINGROUP(LVP0, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 21, 0x90, 30, 0xAC, 22),
+ PINGROUP(LVP1, LCD, DISPLAYA, DISPLAYB, RSVD, RSVD, RSVD4, 0x1C, 22, 0x98, 8, 0xAC, 16),
+ PINGROUP(LVS, LCD, DISPLAYA, DISPLAYB, XIO, RSVD, RSVD4, 0x1C, 26, 0x90, 24, 0xAC, 22),
+ PINGROUP(OWC, SYS, OWR, RSVD, RSVD, RSVD, OWR, 0x14, 31, 0x84, 8, 0xB0, 30),
+ PINGROUP(PMC, SYS, PWR_ON, PWR_INTR, RSVD, RSVD, PWR_ON, 0x14, 23, 0x98, 18, -1, -1),
+ PINGROUP(PTA, NAND, I2C2, HDMI, GMI, RSVD, RSVD4, 0x14, 24, 0x98, 22, 0xA4, 4),
+ PINGROUP(RM, UART, I2C, RSVD, RSVD, RSVD, RSVD4, 0x14, 25, 0x80, 14, 0xA4, 0),
+ PINGROUP(SDB, SD, UARTA, PWM, SDIO3, SPI2, PWM, 0x20, 15, 0x8C, 10, -1, -1),
+ PINGROUP(SDC, SD, PWM, TWC, SDIO3, SPI3, TWC, 0x18, 1, 0x8C, 12, 0xAC, 28),
+ PINGROUP(SDD, SD, UARTA, PWM, SDIO3, SPI3, PWM, 0x18, 2, 0x8C, 14, 0xAC, 30),
+ PINGROUP(SDIO1, BB, SDIO1, RSVD, UARTE, UARTA, RSVD2, 0x14, 30, 0x80, 30, 0xB0, 18),
+ PINGROUP(SLXA, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 3, 0x84, 6, 0xA4, 22),
+ PINGROUP(SLXC, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 5, 0x84, 10, 0xA4, 26),
+ PINGROUP(SLXD, SD, SPDIF, SPI4, SDIO3, SPI2, SPI4, 0x18, 6, 0x84, 12, 0xA4, 28),
+ PINGROUP(SLXK, SD, PCIE, SPI4, SDIO3, SPI2, PCIE, 0x18, 7, 0x84, 14, 0xA4, 30),
+ PINGROUP(SPDI, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 8, 0x8C, 8, 0xA4, 16),
+ PINGROUP(SPDO, AUDIO, SPDIF, RSVD, I2C, SDIO2, RSVD2, 0x18, 9, 0x8C, 6, 0xA4, 18),
+ PINGROUP(SPIA, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 10, 0x8C, 30, 0xA8, 4),
+ PINGROUP(SPIB, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 11, 0x8C, 28, 0xA8, 6),
+ PINGROUP(SPIC, AUDIO, SPI1, SPI2, SPI3, GMI, GMI, 0x18, 12, 0x8C, 26, 0xA8, 8),
+ PINGROUP(SPID, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 13, 0x8C, 24, 0xA8, 10),
+ PINGROUP(SPIE, AUDIO, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x18, 14, 0x8C, 22, 0xA8, 12),
+ PINGROUP(SPIF, AUDIO, SPI3, SPI1, SPI2, RSVD, RSVD4, 0x18, 15, 0x8C, 20, 0xA8, 14),
+ PINGROUP(SPIG, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 16, 0x8C, 18, 0xA8, 16),
+ PINGROUP(SPIH, AUDIO, SPI3, SPI2, SPI2_ALT, I2C, SPI2_ALT, 0x18, 17, 0x8C, 16, 0xA8, 18),
+ PINGROUP(UAA, BB, SPI3, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 18, 0x80, 0, 0xAC, 0),
+ PINGROUP(UAB, BB, SPI2, MIPI_HS, UARTA, ULPI, MIPI_HS, 0x18, 19, 0x80, 2, 0xAC, 2),
+ PINGROUP(UAC, BB, OWR, RSVD, RSVD, RSVD, RSVD4, 0x18, 20, 0x80, 4, 0xAC, 4),
+ PINGROUP(UAD, UART, IRDA, SPDIF, UARTA, SPI4, SPDIF, 0x18, 21, 0x80, 6, 0xAC, 6),
+ PINGROUP(UCA, UART, UARTC, RSVD, GMI, RSVD, RSVD4, 0x18, 22, 0x84, 16, 0xAC, 8),
+ PINGROUP(UCB, UART, UARTC, PWM, GMI, RSVD, RSVD4, 0x18, 23, 0x84, 18, 0xAC, 10),
+ PINGROUP(UDA, BB, SPI1, RSVD, UARTD, ULPI, RSVD2, 0x20, 13, 0x80, 8, 0xB0, 16),
+ /* these pin groups only have pullup and pull down control */
+ PINGROUP(CK32, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 14),
+ PINGROUP(DDRC, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xAC, 26),
+ PINGROUP(PMCA, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 4),
+ PINGROUP(PMCB, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 6),
+ PINGROUP(PMCC, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 8),
+ PINGROUP(PMCD, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 10),
+ PINGROUP(PMCE, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xB0, 12),
+ PINGROUP(XM2C, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),
+ PINGROUP(XM2D, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),
+};
+
+#ifdef CONFIG_PM
+#define TRISTATE_REG_A 0x14
+#define TRISTATE_REG_NUM 4
+#define PIN_MUX_CTL_REG_A 0x80
+#define PIN_MUX_CTL_REG_NUM 8
+#define PULLUPDOWN_REG_A 0xa0
+#define PULLUPDOWN_REG_NUM 5
+
+static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM +
+ PULLUPDOWN_REG_NUM +
+ ARRAY_SIZE(tegra_soc_drive_pingroups)];
+
+static inline unsigned long pg_readl(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+static inline void pg_writel(unsigned long value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
+}
+
+void tegra_pinmux_suspend(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++)
+ *ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4);
+
+ for (i = 0; i < PULLUPDOWN_REG_NUM; i++)
+ *ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4);
+
+ for (i = 0; i < TRISTATE_REG_NUM; i++)
+ *ctx++ = pg_readl(TRISTATE_REG_A + i*4);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ *ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg);
+}
+
+void tegra_pinmux_resume(void)
+{
+ unsigned int i;
+ u32 *ctx = pinmux_reg;
+
+ for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++)
+ pg_writel(*ctx++, PIN_MUX_CTL_REG_A + i*4);
+
+ for (i = 0; i < PULLUPDOWN_REG_NUM; i++)
+ pg_writel(*ctx++, PULLUPDOWN_REG_A + i*4);
+
+ for (i = 0; i < TRISTATE_REG_NUM; i++)
+ pg_writel(*ctx++, TRISTATE_REG_A + i*4);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++)
+ pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg);
+}
+#endif
*
*/
-
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <mach/iomap.h>
#include <mach/pinmux.h>
-
-#define TEGRA_TRI_STATE(x) (0x14 + (4 * (x)))
-#define TEGRA_PP_MUX_CTL(x) (0x80 + (4 * (x)))
-#define TEGRA_PP_PU_PD(x) (0xa0 + (4 * (x)))
-
-#define REG_A 0
-#define REG_B 1
-#define REG_C 2
-#define REG_D 3
-#define REG_E 4
-#define REG_F 5
-#define REG_G 6
-
-#define REG_N -1
-
#define HSM_EN(reg) (((reg) >> 2) & 0x1)
#define SCHMT_EN(reg) (((reg) >> 3) & 0x1)
#define LPMD(reg) (((reg) >> 4) & 0x3)
#define SLWR(reg) (((reg) >> 28) & 0x3)
#define SLWF(reg) (((reg) >> 30) & 0x3)
-struct tegra_pingroup_desc {
- const char *name;
- int funcs[4];
- s8 tri_reg; /* offset into the TRISTATE_REG_* register bank */
- s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
- s8 mux_reg; /* offset into the PIN_MUX_CTL_* register bank */
- s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
- s8 pupd_reg; /* offset into the PULL_UPDOWN_REG_* register bank */
- s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
-};
-
-#define PINGROUP(pg_name, f0, f1, f2, f3, \
- tri_r, tri_b, mux_r, mux_b, pupd_r, pupd_b) \
- [TEGRA_PINGROUP_ ## pg_name] = { \
- .name = #pg_name, \
- .funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
- }, \
- .tri_reg = REG_ ## tri_r, \
- .tri_bit = tri_b, \
- .mux_reg = REG_ ## mux_r, \
- .mux_bit = mux_b, \
- .pupd_reg = REG_ ## pupd_r, \
- .pupd_bit = pupd_b, \
- }
-
-static const struct tegra_pingroup_desc pingroups[TEGRA_MAX_PINGROUP] = {
- PINGROUP(ATA, IDE, NAND, GMI, RSVD, A, 0, A, 24, A, 0),
- PINGROUP(ATB, IDE, NAND, GMI, SDIO4, A, 1, A, 16, A, 2),
- PINGROUP(ATC, IDE, NAND, GMI, SDIO4, A, 2, A, 22, A, 4),
- PINGROUP(ATD, IDE, NAND, GMI, SDIO4, A, 3, A, 20, A, 6),
- PINGROUP(ATE, IDE, NAND, GMI, RSVD, B, 25, A, 12, A, 8),
- PINGROUP(CDEV1, OSC, PLLA_OUT, PLLM_OUT1, AUDIO_SYNC, A, 4, C, 2, C, 0),
- PINGROUP(CDEV2, OSC, AHB_CLK, APB_CLK, PLLP_OUT4, A, 5, C, 4, C, 2),
- PINGROUP(CRTP, CRT, RSVD, RSVD, RSVD, D, 14, G, 20, B, 24),
- PINGROUP(CSUS, PLLC_OUT1, PLLP_OUT2, PLLP_OUT3, VI_SENSOR_CLK, A, 6, C, 6, D, 24),
- PINGROUP(DAP1, DAP1, RSVD, GMI, SDIO2, A, 7, C, 20, A, 10),
- PINGROUP(DAP2, DAP2, TWC, RSVD, GMI, A, 8, C, 22, A, 12),
- PINGROUP(DAP3, DAP3, RSVD, RSVD, RSVD, A, 9, C, 24, A, 14),
- PINGROUP(DAP4, DAP4, RSVD, GMI, RSVD, A, 10, C, 26, A, 16),
- PINGROUP(DDC, I2C2, RSVD, RSVD, RSVD, B, 31, C, 0, E, 28),
- PINGROUP(DTA, RSVD, SDIO2, VI, RSVD, A, 11, B, 20, A, 18),
- PINGROUP(DTB, RSVD, RSVD, VI, SPI1, A, 12, B, 22, A, 20),
- PINGROUP(DTC, RSVD, RSVD, VI, RSVD, A, 13, B, 26, A, 22),
- PINGROUP(DTD, RSVD, SDIO2, VI, RSVD, A, 14, B, 28, A, 24),
- PINGROUP(DTE, RSVD, RSVD, VI, SPI1, A, 15, B, 30, A, 26),
- PINGROUP(DTF, I2C3, RSVD, VI, RSVD, D, 12, G, 30, A, 28),
- PINGROUP(GMA, UARTE, SPI3, GMI, SDIO4, A, 28, B, 0, E, 20),
- PINGROUP(GMB, IDE, NAND, GMI, GMI_INT, B, 29, C, 28, E, 22),
- PINGROUP(GMC, UARTD, SPI4, GMI, SFLASH, A, 29, B, 2, E, 24),
- PINGROUP(GMD, RSVD, NAND, GMI, SFLASH, B, 30, C, 30, E, 26),
- PINGROUP(GME, RSVD, DAP5, GMI, SDIO4, B, 0, D, 0, C, 24),
- PINGROUP(GPU, PWM, UARTA, GMI, RSVD, A, 16, D, 4, B, 20),
- PINGROUP(GPU7, RTCK, RSVD, RSVD, RSVD, D, 11, G, 28, B, 6),
- PINGROUP(GPV, PCIE, RSVD, RSVD, RSVD, A, 17, D, 2, A, 30),
- PINGROUP(HDINT, HDMI, RSVD, RSVD, RSVD, C, 23, B, 4, D, 22),
- PINGROUP(I2CP, I2C, RSVD, RSVD, RSVD, A, 18, C, 8, B, 2),
- PINGROUP(IRRX, UARTA, UARTB, GMI, SPI4, A, 20, C, 18, C, 22),
- PINGROUP(IRTX, UARTA, UARTB, GMI, SPI4, A, 19, C, 16, C, 20),
- PINGROUP(KBCA, KBC, NAND, SDIO2, EMC_TEST0_DLL, A, 22, C, 10, B, 8),
- PINGROUP(KBCB, KBC, NAND, SDIO2, MIO, A, 21, C, 12, B, 10),
- PINGROUP(KBCC, KBC, NAND, TRACE, EMC_TEST1_DLL, B, 26, C, 14, B, 12),
- PINGROUP(KBCD, KBC, NAND, SDIO2, MIO, D, 10, G, 26, B, 14),
- PINGROUP(KBCE, KBC, NAND, OWR, RSVD, A, 26, A, 28, E, 2),
- PINGROUP(KBCF, KBC, NAND, TRACE, MIO, A, 27, A, 26, E, 0),
- PINGROUP(LCSN, DISPLAYA, DISPLAYB, SPI3, RSVD, C, 31, E, 12, D, 20),
- PINGROUP(LD0, DISPLAYA, DISPLAYB, XIO, RSVD, C, 0, F, 0, D, 12),
- PINGROUP(LD1, DISPLAYA, DISPLAYB, XIO, RSVD, C, 1, F, 2, D, 12),
- PINGROUP(LD10, DISPLAYA, DISPLAYB, XIO, RSVD, C, 10, F, 20, D, 12),
- PINGROUP(LD11, DISPLAYA, DISPLAYB, XIO, RSVD, C, 11, F, 22, D, 12),
- PINGROUP(LD12, DISPLAYA, DISPLAYB, XIO, RSVD, C, 12, F, 24, D, 12),
- PINGROUP(LD13, DISPLAYA, DISPLAYB, XIO, RSVD, C, 13, F, 26, D, 12),
- PINGROUP(LD14, DISPLAYA, DISPLAYB, XIO, RSVD, C, 14, F, 28, D, 12),
- PINGROUP(LD15, DISPLAYA, DISPLAYB, XIO, RSVD, C, 15, F, 30, D, 12),
- PINGROUP(LD16, DISPLAYA, DISPLAYB, XIO, RSVD, C, 16, G, 0, D, 12),
- PINGROUP(LD17, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 17, G, 2, D, 12),
- PINGROUP(LD2, DISPLAYA, DISPLAYB, XIO, RSVD, C, 2, F, 4, D, 12),
- PINGROUP(LD3, DISPLAYA, DISPLAYB, XIO, RSVD, C, 3, F, 6, D, 12),
- PINGROUP(LD4, DISPLAYA, DISPLAYB, XIO, RSVD, C, 4, F, 8, D, 12),
- PINGROUP(LD5, DISPLAYA, DISPLAYB, XIO, RSVD, C, 5, F, 10, D, 12),
- PINGROUP(LD6, DISPLAYA, DISPLAYB, XIO, RSVD, C, 6, F, 12, D, 12),
- PINGROUP(LD7, DISPLAYA, DISPLAYB, XIO, RSVD, C, 7, F, 14, D, 12),
- PINGROUP(LD8, DISPLAYA, DISPLAYB, XIO, RSVD, C, 8, F, 16, D, 12),
- PINGROUP(LD9, DISPLAYA, DISPLAYB, XIO, RSVD, C, 9, F, 18, D, 12),
- PINGROUP(LDC, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 30, E, 14, D, 20),
- PINGROUP(LDI, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 6, G, 16, D, 18),
- PINGROUP(LHP0, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 18, G, 10, D, 16),
- PINGROUP(LHP1, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 19, G, 4, D, 14),
- PINGROUP(LHP2, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 20, G, 6, D, 14),
- PINGROUP(LHS, DISPLAYA, DISPLAYB, XIO, RSVD, D, 7, E, 22, D, 22),
- PINGROUP(LM0, DISPLAYA, DISPLAYB, SPI3, RSVD, C, 24, E, 26, D, 22),
- PINGROUP(LM1, DISPLAYA, DISPLAYB, RSVD, CRT, C, 25, E, 28, D, 22),
- PINGROUP(LPP, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 8, G, 14, D, 18),
- PINGROUP(LPW0, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 3, E, 0, D, 20),
- PINGROUP(LPW1, DISPLAYA, DISPLAYB, RSVD, RSVD, D, 4, E, 2, D, 20),
- PINGROUP(LPW2, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 5, E, 4, D, 20),
- PINGROUP(LSC0, DISPLAYA, DISPLAYB, XIO, RSVD, C, 27, E, 18, D, 22),
- PINGROUP(LSC1, DISPLAYA, DISPLAYB, SPI3, HDMI, C, 28, E, 20, D, 20),
- PINGROUP(LSCK, DISPLAYA, DISPLAYB, SPI3, HDMI, C, 29, E, 16, D, 20),
- PINGROUP(LSDA, DISPLAYA, DISPLAYB, SPI3, HDMI, D, 1, E, 8, D, 20),
- PINGROUP(LSDI, DISPLAYA, DISPLAYB, SPI3, RSVD, D, 2, E, 6, D, 20),
- PINGROUP(LSPI, DISPLAYA, DISPLAYB, XIO, HDMI, D, 0, E, 10, D, 22),
- PINGROUP(LVP0, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 21, E, 30, D, 22),
- PINGROUP(LVP1, DISPLAYA, DISPLAYB, RSVD, RSVD, C, 22, G, 8, D, 16),
- PINGROUP(LVS, DISPLAYA, DISPLAYB, XIO, RSVD, C, 26, E, 24, D, 22),
- PINGROUP(OWC, OWR, RSVD, RSVD, RSVD, A, 31, B, 8, E, 30),
- PINGROUP(PMC, PWR_ON, PWR_INTR, RSVD, RSVD, A, 23, G, 18, N, -1),
- PINGROUP(PTA, I2C2, HDMI, GMI, RSVD, A, 24, G, 22, B, 4),
- PINGROUP(RM, I2C, RSVD, RSVD, RSVD, A, 25, A, 14, B, 0),
- PINGROUP(SDB, UARTA, PWM, SDIO3, SPI2, D, 15, D, 10, N, -1),
- PINGROUP(SDC, PWM, TWC, SDIO3, SPI3, B, 1, D, 12, D, 28),
- PINGROUP(SDD, UARTA, PWM, SDIO3, SPI3, B, 2, D, 14, D, 30),
- PINGROUP(SDIO1, SDIO1, RSVD, UARTE, UARTA, A, 30, A, 30, E, 18),
- PINGROUP(SLXA, PCIE, SPI4, SDIO3, SPI2, B, 3, B, 6, B, 22),
- PINGROUP(SLXC, SPDIF, SPI4, SDIO3, SPI2, B, 5, B, 10, B, 26),
- PINGROUP(SLXD, SPDIF, SPI4, SDIO3, SPI2, B, 6, B, 12, B, 28),
- PINGROUP(SLXK, PCIE, SPI4, SDIO3, SPI2, B, 7, B, 14, B, 30),
- PINGROUP(SPDI, SPDIF, RSVD, I2C, SDIO2, B, 8, D, 8, B, 16),
- PINGROUP(SPDO, SPDIF, RSVD, I2C, SDIO2, B, 9, D, 6, B, 18),
- PINGROUP(SPIA, SPI1, SPI2, SPI3, GMI, B, 10, D, 30, C, 4),
- PINGROUP(SPIB, SPI1, SPI2, SPI3, GMI, B, 11, D, 28, C, 6),
- PINGROUP(SPIC, SPI1, SPI2, SPI3, GMI, B, 12, D, 26, C, 8),
- PINGROUP(SPID, SPI2, SPI1, SPI2_ALT, GMI, B, 13, D, 24, C, 10),
- PINGROUP(SPIE, SPI2, SPI1, SPI2_ALT, GMI, B, 14, D, 22, C, 12),
- PINGROUP(SPIF, SPI3, SPI1, SPI2, RSVD, B, 15, D, 20, C, 14),
- PINGROUP(SPIG, SPI3, SPI2, SPI2_ALT, I2C, B, 16, D, 18, C, 16),
- PINGROUP(SPIH, SPI3, SPI2, SPI2_ALT, I2C, B, 17, D, 16, C, 18),
- PINGROUP(UAA, SPI3, MIPI_HS, UARTA, ULPI, B, 18, A, 0, D, 0),
- PINGROUP(UAB, SPI2, MIPI_HS, UARTA, ULPI, B, 19, A, 2, D, 2),
- PINGROUP(UAC, OWR, RSVD, RSVD, RSVD, B, 20, A, 4, D, 4),
- PINGROUP(UAD, IRDA, SPDIF, UARTA, SPI4, B, 21, A, 6, D, 6),
- PINGROUP(UCA, UARTC, RSVD, GMI, RSVD, B, 22, B, 16, D, 8),
- PINGROUP(UCB, UARTC, PWM, GMI, RSVD, B, 23, B, 18, D, 10),
- PINGROUP(UDA, SPI1, RSVD, UARTD, ULPI, D, 13, A, 8, E, 16),
- /* these pin groups only have pullup and pull down control */
- PINGROUP(CK32, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 14),
- PINGROUP(DDRC, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, D, 26),
- PINGROUP(PMCA, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 4),
- PINGROUP(PMCB, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 6),
- PINGROUP(PMCC, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 8),
- PINGROUP(PMCD, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 10),
- PINGROUP(PMCE, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, E, 12),
- PINGROUP(XM2C, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, C, 30),
- PINGROUP(XM2D, RSVD, RSVD, RSVD, RSVD, N, -1, N, -1, C, 28),
-};
+static const struct tegra_pingroup_desc *const pingroups = tegra_soc_pingroups;
+static const struct tegra_drive_pingroup_desc *const drive_pingroups = tegra_soc_drive_pingroups;
static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_AHB_CLK] = "AHB_CLK",
[TEGRA_MUX_VI] = "VI",
[TEGRA_MUX_VI_SENSOR_CLK] = "VI_SENSOR_CLK",
[TEGRA_MUX_XIO] = "XIO",
-};
-
-struct tegra_drive_pingroup_desc {
- const char *name;
- s16 reg;
-};
-
-#define DRIVE_PINGROUP(pg_name, r) \
- [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
- .name = #pg_name, \
- .reg = r \
- }
-
-static const struct tegra_drive_pingroup_desc drive_pingroups[TEGRA_MAX_PINGROUP] = {
- DRIVE_PINGROUP(AO1, 0x868),
- DRIVE_PINGROUP(AO2, 0x86c),
- DRIVE_PINGROUP(AT1, 0x870),
- DRIVE_PINGROUP(AT2, 0x874),
- DRIVE_PINGROUP(CDEV1, 0x878),
- DRIVE_PINGROUP(CDEV2, 0x87c),
- DRIVE_PINGROUP(CSUS, 0x880),
- DRIVE_PINGROUP(DAP1, 0x884),
- DRIVE_PINGROUP(DAP2, 0x888),
- DRIVE_PINGROUP(DAP3, 0x88c),
- DRIVE_PINGROUP(DAP4, 0x890),
- DRIVE_PINGROUP(DBG, 0x894),
- DRIVE_PINGROUP(LCD1, 0x898),
- DRIVE_PINGROUP(LCD2, 0x89c),
- DRIVE_PINGROUP(SDMMC2, 0x8a0),
- DRIVE_PINGROUP(SDMMC3, 0x8a4),
- DRIVE_PINGROUP(SPI, 0x8a8),
- DRIVE_PINGROUP(UAA, 0x8ac),
- DRIVE_PINGROUP(UAB, 0x8b0),
- DRIVE_PINGROUP(UART2, 0x8b4),
- DRIVE_PINGROUP(UART3, 0x8b8),
- DRIVE_PINGROUP(VI1, 0x8bc),
- DRIVE_PINGROUP(VI2, 0x8c0),
- DRIVE_PINGROUP(XM2A, 0x8c4),
- DRIVE_PINGROUP(XM2C, 0x8c8),
- DRIVE_PINGROUP(XM2D, 0x8cc),
- DRIVE_PINGROUP(XM2CLK, 0x8d0),
- DRIVE_PINGROUP(MEMCOMP, 0x8d4),
+ [TEGRA_MUX_SAFE] = "<safe>",
};
static const char *tegra_drive_names[TEGRA_MAX_DRIVE] = {
writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
}
-int tegra_pinmux_set_func(enum tegra_pingroup pg, enum tegra_mux_func func)
+static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
{
int mux = -1;
int i;
unsigned long reg;
unsigned long flags;
+ enum tegra_pingroup pg = config->pingroup;
+ enum tegra_mux_func func = config->func;
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].mux_reg == REG_N)
+ if (pingroups[pg].mux_reg < 0)
return -EINVAL;
if (func < 0)
return -ERANGE;
+ if (func == TEGRA_MUX_SAFE)
+ func = pingroups[pg].func_safe;
+
if (func & TEGRA_MUX_RSVD) {
mux = func & 0x3;
} else {
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_PP_MUX_CTL(pingroups[pg].mux_reg));
+ reg = pg_readl(pingroups[pg].mux_reg);
reg &= ~(0x3 << pingroups[pg].mux_bit);
reg |= mux << pingroups[pg].mux_bit;
- pg_writel(reg, TEGRA_PP_MUX_CTL(pingroups[pg].mux_reg));
+ pg_writel(reg, pingroups[pg].mux_reg);
spin_unlock_irqrestore(&mux_lock, flags);
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].tri_reg == REG_N)
+ if (pingroups[pg].tri_reg < 0)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_TRI_STATE(pingroups[pg].tri_reg));
+ reg = pg_readl(pingroups[pg].tri_reg);
reg &= ~(0x1 << pingroups[pg].tri_bit);
if (tristate)
reg |= 1 << pingroups[pg].tri_bit;
- pg_writel(reg, TEGRA_TRI_STATE(pingroups[pg].tri_reg));
+ pg_writel(reg, pingroups[pg].tri_reg);
spin_unlock_irqrestore(&mux_lock, flags);
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
- if (pingroups[pg].pupd_reg == REG_N)
+ if (pingroups[pg].pupd_reg < 0)
return -EINVAL;
if (pupd != TEGRA_PUPD_NORMAL &&
spin_lock_irqsave(&mux_lock, flags);
- reg = pg_readl(TEGRA_PP_PU_PD(pingroups[pg].pupd_reg));
+ reg = pg_readl(pingroups[pg].pupd_reg);
reg &= ~(0x3 << pingroups[pg].pupd_bit);
reg |= pupd << pingroups[pg].pupd_bit;
- pg_writel(reg, TEGRA_PP_PU_PD(pingroups[pg].pupd_reg));
+ pg_writel(reg, pingroups[pg].pupd_reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
-void tegra_pinmux_config_pingroup(enum tegra_pingroup pingroup,
- enum tegra_mux_func func,
- enum tegra_pullupdown pupd,
- enum tegra_tristate tristate)
+static void tegra_pinmux_config_pingroup(const struct tegra_pingroup_config *config)
{
+ enum tegra_pingroup pingroup = config->pingroup;
+ enum tegra_mux_func func = config->func;
+ enum tegra_pullupdown pupd = config->pupd;
+ enum tegra_tristate tristate = config->tristate;
int err;
- if (pingroups[pingroup].mux_reg != REG_N) {
- err = tegra_pinmux_set_func(pingroup, func);
+ if (pingroups[pingroup].mux_reg >= 0) {
+ err = tegra_pinmux_set_func(config);
if (err < 0)
pr_err("pinmux: can't set pingroup %s func to %s: %d\n",
pingroup_name(pingroup), func_name(func), err);
}
- if (pingroups[pingroup].pupd_reg != REG_N) {
+ if (pingroups[pingroup].pupd_reg >= 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown to %s: %d\n",
pingroup_name(pingroup), pupd_name(pupd), err);
}
- if (pingroups[pingroup].tri_reg != REG_N) {
+ if (pingroups[pingroup].tri_reg >= 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate to %s: %d\n",
}
}
-
-
-void tegra_pinmux_config_table(struct tegra_pingroup_config *config, int len)
+void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int len)
{
int i;
for (i = 0; i < len; i++)
- tegra_pinmux_config_pingroup(config[i].pingroup,
- config[i].func,
- config[i].pupd,
- config[i].tristate);
+ tegra_pinmux_config_pingroup(&config[i]);
}
static const char *drive_pinmux_name(enum tegra_drive_pingroup pg)
config[i].slew_falling);
}
+void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *config,
+ int len)
+{
+ int i;
+ struct tegra_pingroup_config c;
+
+ for (i = 0; i < len; i++) {
+ int err;
+ c = config[i];
+ if (c.pingroup < 0 || c.pingroup >= TEGRA_MAX_PINGROUP) {
+ WARN_ON(1);
+ continue;
+ }
+ c.func = pingroups[c.pingroup].func_safe;
+ err = tegra_pinmux_set_func(&c);
+ if (err < 0)
+ pr_err("%s: tegra_pinmux_set_func returned %d setting "
+ "%s to %s\n", __func__, err,
+ pingroup_name(c.pingroup), func_name(c.func));
+ }
+}
+
+void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int err;
+ if (config[i].pingroup < 0 ||
+ config[i].pingroup >= TEGRA_MAX_PINGROUP) {
+ WARN_ON(1);
+ continue;
+ }
+ err = tegra_pinmux_set_func(&config[i]);
+ if (err < 0)
+ pr_err("%s: tegra_pinmux_set_func returned %d setting "
+ "%s to %s\n", __func__, err,
+ pingroup_name(config[i].pingroup),
+ func_name(config[i].func));
+ }
+}
+
+void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_tristate tristate)
+{
+ int i;
+ int err;
+ enum tegra_pingroup pingroup;
+
+ for (i = 0; i < len; i++) {
+ pingroup = config[i].pingroup;
+ if (pingroups[pingroup].tri_reg >= 0) {
+ err = tegra_pinmux_set_tristate(pingroup, tristate);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s tristate"
+ " to %s: %d\n", pingroup_name(pingroup),
+ tri_name(tristate), err);
+ }
+ }
+}
+
+void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
+ int len, enum tegra_pullupdown pupd)
+{
+ int i;
+ int err;
+ enum tegra_pingroup pingroup;
+
+ for (i = 0; i < len; i++) {
+ pingroup = config[i].pingroup;
+ if (pingroups[pingroup].pupd_reg >= 0) {
+ err = tegra_pinmux_set_pullupdown(pingroup, pupd);
+ if (err < 0)
+ pr_err("pinmux: can't set pingroup %s pullupdown"
+ " to %s: %d\n", pingroup_name(pingroup),
+ pupd_name(pupd), err);
+ }
+ }
+}
#ifdef CONFIG_DEBUG_FS
len = strlen(pingroups[i].name);
dbg_pad_field(s, 5 - len);
- if (pingroups[i].mux_reg == REG_N) {
+ if (pingroups[i].mux_reg < 0) {
seq_printf(s, "TEGRA_MUX_NONE");
len = strlen("NONE");
} else {
- mux = (pg_readl(TEGRA_PP_MUX_CTL(pingroups[i].mux_reg)) >>
+ mux = (pg_readl(pingroups[i].mux_reg) >>
pingroups[i].mux_bit) & 0x3;
if (pingroups[i].funcs[mux] == TEGRA_MUX_RSVD) {
seq_printf(s, "TEGRA_MUX_RSVD%1lu", mux+1);
}
dbg_pad_field(s, 13-len);
- if (pingroups[i].mux_reg == REG_N) {
+ if (pingroups[i].pupd_reg < 0) {
seq_printf(s, "TEGRA_PUPD_NORMAL");
len = strlen("NORMAL");
} else {
- pupd = (pg_readl(TEGRA_PP_PU_PD(pingroups[i].pupd_reg)) >>
+ pupd = (pg_readl(pingroups[i].pupd_reg) >>
pingroups[i].pupd_bit) & 0x3;
seq_printf(s, "TEGRA_PUPD_%s", pupd_name(pupd));
len = strlen(pupd_name(pupd));
}
dbg_pad_field(s, 9 - len);
- if (pingroups[i].tri_reg == REG_N) {
+ if (pingroups[i].tri_reg < 0) {
seq_printf(s, "TEGRA_TRI_NORMAL");
} else {
- tri = (pg_readl(TEGRA_TRI_STATE(pingroups[i].tri_reg)) >>
+ tri = (pg_readl(pingroups[i].tri_reg) >>
pingroups[i].tri_bit) & 0x1;
seq_printf(s, "TEGRA_TRI_%s", tri_name(tri));
* Copyright (C) 2009 Palm
* All Rights Reserved
*
+ * Copyright (C) 2010 NVIDIA Corporation
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/localtimer.h>
+#include <asm/tlbflush.h>
#include <asm/smp_scu.h>
+#include <asm/cpu.h>
+#include <asm/mmu_context.h>
#include <mach/iomap.h>
+#include "power.h"
+
extern void tegra_secondary_startup(void);
static DEFINE_SPINLOCK(boot_lock);
static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct completion, cpu_killed);
+extern void tegra_hotplug_startup(void);
+#endif
+
+static DECLARE_BITMAP(cpu_init_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_init_mask = to_cpumask(cpu_init_bits);
+#define cpu_init_map (*(cpumask_t *)cpu_init_mask)
+
#define EVP_CPU_RESET_VECTOR \
(IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
+ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
void __cpuinit platform_secondary_init(unsigned int cpu)
{
trace_hardirqs_off();
-
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
-
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
+#ifdef CONFIG_HOTPLUG_CPU
+ cpu_set(cpu, cpu_init_map);
+ INIT_COMPLETION(per_cpu(cpu_killed, cpu));
+#endif
spin_unlock(&boot_lock);
}
*/
spin_lock(&boot_lock);
-
/* set the reset vector to point to the secondary_startup routine */
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpumask_test_cpu(cpu, cpu_init_mask))
+ boot_vector = virt_to_phys(tegra_hotplug_startup);
+ else
+#endif
+ boot_vector = virt_to_phys(tegra_secondary_startup);
+
+ smp_wmb();
- boot_vector = virt_to_phys(tegra_secondary_startup);
old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
writel(boot_vector, EVP_CPU_RESET_VECTOR);
- /* enable cpu clock on cpu1 */
+ /* enable cpu clock on cpu */
reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
- writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~(1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
- reg = (1<<13) | (1<<9) | (1<<5) | (1<<1);
+ reg = 0x1111<<cpu;
writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
- smp_wmb();
- flush_cache_all();
-
/* unhalt the cpu */
- writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14);
+ writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14 + 0x8*(cpu-1));
- timeout = jiffies + (1 * HZ);
+ timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
break;
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
+#ifdef CONFIG_HOTPLUG_CPU
+ for_each_present_cpu(i) {
+ init_completion(&per_cpu(cpu_killed, i));
+ }
+#endif
+
/*
* Initialise the SCU if there are more than one CPU and let
* them know where to start. Note that, on modern versions of
scu_enable(scu_base);
}
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+extern void vfp_sync_state(struct thread_info *thread);
+
+void __cpuinit secondary_start_kernel(void);
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ unsigned int reg;
+ int e;
+
+ e = wait_for_completion_timeout(&per_cpu(cpu_killed, cpu), 100);
+ printk(KERN_NOTICE "CPU%u: %s shutdown\n", cpu, (e) ? "clean":"forced");
+
+ if (e) {
+ do {
+ reg = readl(CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ cpu_relax();
+ } while (!(reg & (1<<cpu)));
+ } else {
+ writel(0x1111<<cpu, CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ /* put flow controller in WAIT_EVENT mode */
+ writel(2<<29, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE)+0x14 + 0x8*(cpu-1));
+ }
+ spin_lock(&boot_lock);
+ reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | (1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ spin_unlock(&boot_lock);
+ return e;
+}
+
+void platform_cpu_die(unsigned int cpu)
+{
+#ifdef DEBUG
+ unsigned int this_cpu = hard_smp_processor_id();
+
+ if (cpu != this_cpu) {
+ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+ this_cpu, cpu);
+ BUG();
+ }
+#endif
+
+ gic_cpu_exit(0);
+ barrier();
+ complete(&per_cpu(cpu_killed, cpu));
+ flush_cache_all();
+ barrier();
+ __cortex_a9_save(0);
+
+ /* return happens from __cortex_a9_restore */
+ barrier();
+ writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ if (unlikely(!tegra_context_area))
+ return -ENXIO;
+
+ return cpu == 0 ? -EPERM : 0;
+}
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/power-macros.S
+ *
+ * Assembly macros useful for power state save / restore routines
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* returns the ID of the current processor */
+.macro cpu_id, rd
+ mrc p15, 0, \rd, c0, c0, 5
+ and \rd, \rd, #0xF
+.endm
+
+
+.macro mov32, reg, val
+ movw \reg, #:lower16:\val
+ movt \reg, #:upper16:\val
+.endm
+
+/* waits until the microsecond counter (base) ticks, for exact timing loops */
+.macro wait_for_us, rd, base, tmp
+ ldr \rd, [\base]
+1001: ldr \tmp, [\base]
+ cmp \rd, \tmp
+ beq 1001b
+ mov \tmp, \rd
+.endm
+
+/* waits until the microsecond counter (base) is >= rn */
+.macro wait_until, rn, base, tmp
+1002: ldr \tmp, [\base]
+ sub \tmp, \tmp, \rn
+ ands \tmp, \tmp, #0x80000000
+ dmb
+ bne 1002b
+.endm
+
+/* Enable Coresight access on cpu */
+.macro enable_coresite, tmp
+ mov32 \tmp, 0xC5ACCE55
+ mcr p14, 0, \tmp, c7, c12, 6
+.endm
--- /dev/null
+/*
+ * arch/arm/mach-tegra/power.h
+ *
+ * Declarations for power state transition code
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_POWER_H
+#define __MACH_TEGRA_POWER_H
+
+#include <asm/page.h>
+
+#define TEGRA_POWER_SDRAM_SELFREFRESH 0x400 /* SDRAM is in self-refresh */
+
+#define TEGRA_POWER_PWRREQ_POLARITY 0x1 /* core power request polarity */
+#define TEGRA_POWER_PWRREQ_OE 0x2 /* core power request enable */
+#define TEGRA_POWER_SYSCLK_POLARITY 0x4 /* sys clk polarity */
+#define TEGRA_POWER_SYSCLK_OE 0x8 /* system clock enable */
+#define TEGRA_POWER_PWRGATE_DIS 0x10 /* power gate disabled */
+#define TEGRA_POWER_EFFECT_LP0 0x40 /* enter LP0 when CPU pwr gated */
+#define TEGRA_POWER_CPU_PWRREQ_POLARITY 0x80 /* CPU power request polarity */
+#define TEGRA_POWER_CPU_PWRREQ_OE 0x100 /* CPU power request enable */
+#define TEGRA_POWER_PMC_SHIFT 8
+#define TEGRA_POWER_PMC_MASK 0x1ff
+
+/* CPU Context area (1KB per CPU) */
+#define CONTEXT_SIZE_BYTES_SHIFT 10
+#define CONTEXT_SIZE_BYTES (1<<CONTEXT_SIZE_BYTES_SHIFT)
+
+/* layout of IRAM used for LP1 save & restore */
+#define TEGRA_IRAM_CODE_AREA TEGRA_IRAM_BASE + SZ_4K
+#define TEGRA_IRAM_CODE_SIZE SZ_4K
+
+#ifndef __ASSEMBLY__
+extern void *tegra_context_area;
+
+u64 tegra_rtc_read_ms(void);
+void tegra_lp2_set_trigger(unsigned long cycles);
+unsigned long tegra_lp2_timer_remain(void);
+void __cortex_a9_save(unsigned int mode);
+void __cortex_a9_restore(void);
+void __shut_off_mmu(void);
+void tegra_lp2_startup(void);
+unsigned int tegra_suspend_lp2(unsigned int us);
+void tegra_hotplug_startup(void);
+#endif
+
+#endif
--- /dev/null
+/*
+ * drivers/powergate/tegra-powergate.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/powergate.h>
+
+#define PWRGATE_TOGGLE 0x30
+#define PWRGATE_TOGGLE_START (1 << 8)
+
+#define REMOVE_CLAMPING 0x34
+
+#define PWRGATE_STATUS 0x38
+
+static DEFINE_SPINLOCK(tegra_powergate_lock);
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+
+static u32 pmc_read(unsigned long reg)
+{
+ return readl(pmc + reg);
+}
+
+static void pmc_write(u32 val, unsigned long reg)
+{
+ writel(val, pmc + reg);
+}
+
+static int tegra_powergate_set(int id, bool new_state)
+{
+ bool status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra_powergate_lock, flags);
+
+ status = pmc_read(PWRGATE_STATUS) & (1 << id);
+
+ if (status == new_state) {
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+ return -EINVAL;
+ }
+
+ pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ spin_unlock_irqrestore(&tegra_powergate_lock, flags);
+
+ return 0;
+}
+
+int tegra_powergate_power_on(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, true);
+}
+
+int tegra_powergate_power_off(int id)
+{
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, false);
+}
+
+bool tegra_powergate_is_powered(int id)
+{
+ u32 status;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ status = pmc_read(PWRGATE_STATUS) & (1 << id);
+ return !!status;
+}
+
+int tegra_powergate_remove_clamping(int id)
+{
+ u32 mask;
+
+ if (id < 0 || id >= TEGRA_NUM_POWERGATE)
+ return -EINVAL;
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ pmc_write(mask, REMOVE_CLAMPING);
+
+ return 0;
+}
+
+/* Must be called with clk disabled, and returns with clk enabled */
+static int tegra_powergate_reset_module(struct clk *clk)
+{
+ int ret;
+
+ tegra_periph_reset_assert(clk);
+
+ udelay(10);
+
+ ret = clk_enable(clk);
+ if (ret)
+ return ret;
+
+ udelay(10);
+
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+}
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_powergate_sequence_power_up(int id, struct clk *clk)
+{
+ int ret;
+
+ if (tegra_powergate_is_powered(id))
+ return tegra_powergate_reset_module(clk);
+
+ tegra_periph_reset_assert(clk);
+
+ ret = tegra_powergate_power_on(id);
+ if (ret)
+ goto err_power;
+
+ ret = clk_enable(clk);
+ if (ret)
+ goto err_clk;
+
+ udelay(10);
+
+ ret = tegra_powergate_remove_clamping(id);
+ if (ret)
+ goto err_clamp;
+
+ udelay(10);
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+
+err_clamp:
+ clk_disable(clk);
+err_clk:
+ tegra_powergate_power_off(id);
+err_power:
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *powergate_name[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+};
+
+static int powergate_show(struct seq_file *s, void *data)
+{
+ int i;
+
+ seq_printf(s, " powergate powered\n");
+ seq_printf(s, "------------------\n");
+
+ for (i = 0; i < TEGRA_NUM_POWERGATE; i++)
+ seq_printf(s, " %9s %7s\n", powergate_name[i],
+ tegra_powergate_is_powered(i) ? "yes" : "no");
+ return 0;
+}
+
+static int powergate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, powergate_show, inode->i_private);
+}
+
+static const struct file_operations powergate_fops = {
+ .open = powergate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init powergate_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+ &powergate_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+late_initcall(powergate_debugfs_init);
+
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/pwm.c
+ *
+ * Tegra pulse-width-modulation controller driver
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ * Based on arch/arm/plat-mxc/pwm.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define PWM_ENABLE (1 << 31)
+#define PWM_DUTY_WIDTH 8
+#define PWM_DUTY_SHIFT 16
+#define PWM_SCALE_WIDTH 13
+#define PWM_SCALE_SHIFT 0
+
+struct pwm_device {
+ struct list_head node;
+ struct platform_device *pdev;
+
+ const char *label;
+ struct clk *clk;
+
+ int clk_enb;
+ void __iomem *mmio_base;
+
+ unsigned int in_use;
+ unsigned int id;
+};
+
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_list);
+
+static inline int pwm_writel(struct pwm_device *pwm, unsigned long val)
+{
+ int rc;
+
+ rc = clk_enable(pwm->clk);
+ if (WARN_ON(rc))
+ return rc;
+ writel(val, pwm->mmio_base);
+ clk_disable(pwm->clk);
+ return 0;
+}
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long long c;
+ unsigned long rate, hz;
+ u32 val = 0;
+
+ /* convert from duty_ns / period_ns to a fixed number of duty
+ * ticks per (1 << PWM_DUTY_WIDTH) cycles. */
+ c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1);
+ do_div(c, period_ns);
+
+ val = (u32)c << PWM_DUTY_SHIFT;
+
+ /* compute the prescaler value for which (1 << PWM_DUTY_WIDTH)
+ * cycles at the PWM clock rate will take period_ns nanoseconds. */
+ rate = clk_get_rate(pwm->clk) >> PWM_DUTY_WIDTH;
+ hz = 1000000000ul / period_ns;
+
+ rate = (rate + (hz / 2)) / hz;
+
+ if (rate >> PWM_SCALE_WIDTH)
+ return -EINVAL;
+
+ val |= (rate << PWM_SCALE_SHIFT);
+
+ /* the struct clk may be shared across multiple PWM devices, so
+ * only enable the PWM if this device has been enabled */
+ if (pwm->clk_enb)
+ val |= PWM_ENABLE;
+
+ return pwm_writel(pwm, val);
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int rc = 0;
+
+ mutex_lock(&pwm_lock);
+ if (!pwm->clk_enb) {
+ rc = clk_enable(pwm->clk);
+ if (!rc) {
+ u32 val = readl(pwm->mmio_base);
+ writel(val | PWM_ENABLE, pwm->mmio_base);
+ pwm->clk_enb = 1;
+ }
+ }
+ mutex_unlock(&pwm_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ if (pwm->clk_enb) {
+ u32 val = readl(pwm->mmio_base);
+ writel(val & ~PWM_ENABLE, pwm->mmio_base);
+ clk_disable(pwm->clk);
+ pwm->clk_enb = 0;
+ } else
+ dev_warn(&pwm->pdev->dev, "%s called on disabled PWM\n",
+ __func__);
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int found = 0;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->id == pwm_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (!pwm->in_use) {
+ pwm->in_use = 1;
+ pwm->label = label;
+ } else
+ pwm = ERR_PTR(-EBUSY);
+ } else
+ pwm = ERR_PTR(-ENOENT);
+
+ mutex_unlock(&pwm_lock);
+
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ if (pwm->in_use) {
+ pwm->in_use = 0;
+ pwm->label = NULL;
+ } else
+ dev_warn(&pwm->pdev->dev, "PWM device already freed\n");
+
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static int tegra_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+ int ret;
+
+ pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
+ if (!pwm) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ pwm->clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(pwm->clk)) {
+ ret = PTR_ERR(pwm->clk);
+ goto err_free;
+ }
+
+ pwm->clk_enb = 0;
+ pwm->in_use = 0;
+ pwm->id = pdev->id;
+ pwm->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resources defined\n");
+ ret = -ENODEV;
+ goto err_put_clk;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to request memory\n");
+ ret = -EBUSY;
+ goto err_put_clk;
+ }
+
+ pwm->mmio_base = ioremap(r->start, resource_size(r));
+ if (!pwm->mmio_base) {
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+
+ mutex_lock(&pwm_lock);
+ list_add_tail(&pwm->node, &pwm_list);
+ mutex_unlock(&pwm_lock);
+
+ return 0;
+
+err_free_mem:
+ release_mem_region(r->start, resource_size(r));
+err_put_clk:
+ clk_put(pwm->clk);
+err_free:
+ kfree(pwm);
+ return ret;
+}
+
+static int __devexit tegra_pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = platform_get_drvdata(pdev);
+ struct resource *r;
+ int rc;
+
+ if (WARN_ON(!pwm))
+ return -ENODEV;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mutex_lock(&pwm_lock);
+ if (pwm->in_use) {
+ mutex_unlock(&pwm_lock);
+ return -EBUSY;
+ }
+ list_del(&pwm->node);
+ mutex_unlock(&pwm_lock);
+
+ rc = pwm_writel(pwm, 0);
+
+ iounmap(pwm->mmio_base);
+ release_mem_region(r->start, resource_size(r));
+
+ if (pwm->clk_enb)
+ clk_disable(pwm->clk);
+
+ clk_put(pwm->clk);
+
+ kfree(pwm);
+ return rc;
+}
+
+static struct platform_driver tegra_pwm_driver = {
+ .driver = {
+ .name = "tegra_pwm",
+ },
+ .probe = tegra_pwm_probe,
+ .remove = __devexit_p(tegra_pwm_remove),
+};
+
+static int __init tegra_pwm_init(void)
+{
+ return platform_driver_register(&tegra_pwm_driver);
+}
+subsys_initcall(tegra_pwm_init);
+
+static void __exit tegra_pwm_exit(void)
+{
+ platform_driver_unregister(&tegra_pwm_driver);
+}
+module_exit(tegra_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("NVIDIA Corporation");
--- /dev/null
+/*
+ * arch/arm/mach-tegra/suspend-t2.c
+ *
+ * BootROM LP0 scratch register preservation for Tegra 2
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#include <mach/gpio.h>
+#include <mach/iomap.h>
+#include <mach/irqs.h>
+#include <mach/suspend.h>
+
+#include "gpio-names.h"
+
+#define PMC_SCRATCH3 0x5c
+#define PMC_SCRATCH5 0x64
+#define PMC_SCRATCH6 0x68
+#define PMC_SCRATCH7 0x6c
+#define PMC_SCRATCH8 0x70
+#define PMC_SCRATCH9 0x74
+#define PMC_SCRATCH10 0x78
+#define PMC_SCRATCH11 0x7c
+#define PMC_SCRATCH12 0x80
+#define PMC_SCRATCH13 0x84
+#define PMC_SCRATCH14 0x88
+#define PMC_SCRATCH15 0x8c
+#define PMC_SCRATCH16 0x90
+#define PMC_SCRATCH17 0x94
+#define PMC_SCRATCH18 0x98
+#define PMC_SCRATCH19 0x9c
+#define PMC_SCRATCH20 0xa0
+#define PMC_SCRATCH21 0xa4
+#define PMC_SCRATCH22 0xa8
+#define PMC_SCRATCH23 0xac
+#define PMC_SCRATCH25 0x100
+#define PMC_SCRATCH35 0x128
+#define PMC_SCRATCH36 0x12c
+#define PMC_SCRATCH40 0x13c
+
+struct pmc_scratch_field {
+ void __iomem *addr;
+ unsigned int mask;
+ int shift_src;
+ int shift_dst;
+};
+
+#define field(module, offs, field, dst) \
+ { \
+ .addr = IO_ADDRESS(TEGRA_##module##_BASE) + offs, \
+ .mask = 0xfffffffful >> (31 - ((1?field) - (0?field))), \
+ .shift_src = 0?field, \
+ .shift_dst = 0?dst, \
+ }
+
+static const struct pmc_scratch_field pllx[] __initdata = {
+ field(CLK_RESET, 0xe0, 22:20, 17:15), /* PLLX_DIVP */
+ field(CLK_RESET, 0xe0, 17:8, 14:5), /* PLLX_DIVN */
+ field(CLK_RESET, 0xe0, 4:0, 4:0), /* PLLX_DIVM */
+ field(CLK_RESET, 0xe4, 11:8, 25:22), /* PLLX_CPCON */
+ field(CLK_RESET, 0xe4, 7:4, 21:18), /* PLLX_LFCON */
+ field(APB_MISC, 0x8e4, 27:24, 30:27), /* XM2CFGC_VREF_DQ */
+ field(APB_MISC, 0x8c8, 3:3, 26:26), /* XM2CFGC_SCHMT_EN */
+ field(APB_MISC, 0x8d0, 2:2, 31:31), /* XM2CLKCFG_PREEMP_EN */
+};
+
+static const struct pmc_scratch_field emc_0[] __initdata = {
+ field(EMC, 0x3c, 4:0, 31:27), /* R2W */
+ field(EMC, 0x34, 5:0, 20:15), /* RAS */
+ field(EMC, 0x2c, 5:0, 5:0), /* RC */
+ field(EMC, 0x30, 8:0, 14:6), /* RFC */
+ field(EMC, 0x38, 5:0, 26:21), /* RP */
+};
+
+static const struct pmc_scratch_field emc_1[] __initdata = {
+ field(EMC, 0x44, 4:0, 9:5), /* R2P */
+ field(EMC, 0x4c, 5:0, 20:15), /* RD_RCD */
+ field(EMC, 0x54, 3:0, 30:27), /* RRD */
+ field(EMC, 0x48, 4:0, 14:10), /* W2P */
+ field(EMC, 0x40, 4:0, 4:0), /* W2R */
+ field(EMC, 0x50, 5:0, 26:21), /* WR_RCD */
+};
+
+static const struct pmc_scratch_field emc_2[] __initdata = {
+ field(EMC, 0x2b8, 2:2, 31:31), /* CLKCHANGE_SR_ENABLE */
+ field(EMC, 0x2b8, 10:10, 30:30), /* USE_ADDR_CLK */
+ field(EMC, 0x80, 4:0, 29:25), /* PCHG2PDEN */
+ field(EMC, 0x64, 3:0, 15:12), /* QRST */
+ field(EMC, 0x68, 3:0, 19:16), /* QSAFE */
+ field(EMC, 0x60, 3:0, 11:8), /* QUSE */
+ field(EMC, 0x6c, 4:0, 24:20), /* RDV */
+ field(EMC, 0x58, 3:0, 3:0), /* REXT */
+ field(EMC, 0x5c, 3:0, 7:4), /* WDV */
+};
+
+static const struct pmc_scratch_field emc_3[] __initdata = {
+ field(EMC, 0x74, 3:0, 19:16), /* BURST_REFRESH_NUM */
+ field(EMC, 0x7c, 3:0, 27:24), /* PDEX2RD */
+ field(EMC, 0x78, 3:0, 23:20), /* PDEX2WR */
+ field(EMC, 0x70, 4:0, 4:0), /* REFRESH_LO */
+ field(EMC, 0x70, 15:5, 15:5), /* REFRESH */
+ field(EMC, 0xa0, 3:0, 31:28), /* TCLKSTABLE */
+};
+
+static const struct pmc_scratch_field emc_4[] __initdata = {
+ field(EMC, 0x84, 4:0, 4:0), /* ACT2PDEN */
+ field(EMC, 0x88, 4:0, 9:5), /* AR2PDEN */
+ field(EMC, 0x8c, 5:0, 15:10), /* RW2PDEN */
+ field(EMC, 0x94, 3:0, 31:28), /* TCKE */
+ field(EMC, 0x90, 11:0, 27:16), /* TXSR */
+};
+
+static const struct pmc_scratch_field emc_5[] __initdata = {
+ field(EMC, 0x8, 10:10, 30:30), /* AP_REQ_BUSY_CTRL */
+ field(EMC, 0x8, 24:24, 31:31), /* CFG_PRIORITY */
+ field(EMC, 0x8, 2:2, 26:26), /* FORCE_UPDATE */
+ field(EMC, 0x8, 4:4, 27:27), /* MRS_WAIT */
+ field(EMC, 0x8, 5:5, 28:28), /* PERIODIC_QRST */
+ field(EMC, 0x8, 9:9, 29:29), /* READ_DQM_CTRL */
+ field(EMC, 0x8, 0:0, 24:24), /* READ_MUX */
+ field(EMC, 0x8, 1:1, 25:25), /* WRITE_MUX */
+ field(EMC, 0xa4, 3:0, 9:6), /* TCLKSTOP */
+ field(EMC, 0xa8, 13:0, 23:10), /* TREFBW */
+ field(EMC, 0x9c, 5:0, 5:0), /* TRPAB */
+};
+
+static const struct pmc_scratch_field emc_6[] __initdata = {
+ field(EMC, 0xfc, 1:0, 1:0), /* DQSIB_DLY_MSB_BYTE_0 */
+ field(EMC, 0xfc, 9:8, 3:2), /* DQSIB_DLY_MSB_BYTE_1 */
+ field(EMC, 0xfc, 17:16, 5:4), /* DQSIB_DLY_MSB_BYTE_2 */
+ field(EMC, 0xfc, 25:24, 7:6), /* DQSIB_DLY_MSB_BYTE_3 */
+ field(EMC, 0x110, 1:0, 9:8), /* QUSE_DLY_MSB_BYTE_0 */
+ field(EMC, 0x110, 9:8, 11:10), /* QUSE_DLY_MSB_BYTE_1 */
+ field(EMC, 0x110, 17:16, 13:12), /* QUSE_DLY_MSB_BYTE_2 */
+ field(EMC, 0x110, 25:24, 15:14), /* QUSE_DLY_MSB_BYTE_3 */
+ field(EMC, 0xac, 3:0, 25:22), /* QUSE_EXTRA */
+ field(EMC, 0x98, 5:0, 21:16), /* TFAW */
+ field(APB_MISC, 0x8e4, 5:5, 30:30), /* XM2CFGC_VREF_DQ_EN */
+ field(APB_MISC, 0x8e4, 19:16, 29:26), /* XM2CFGC_VREF_DQS */
+};
+
+static const struct pmc_scratch_field emc_dqsib_dly[] __initdata = {
+ field(EMC, 0xf8, 31:0, 31:0), /* DQSIB_DLY_BYTE_0 - DQSIB_DLY_BYTE_3*/
+};
+
+static const struct pmc_scratch_field emc_quse_dly[] __initdata = {
+ field(EMC, 0x10c, 31:0, 31:0), /* QUSE_DLY_BYTE_0 - QUSE_DLY_BYTE_3*/
+};
+
+static const struct pmc_scratch_field emc_clktrim[] __initdata = {
+ field(EMC, 0x2d0, 29:0, 29:0), /* DATA0_CLKTRIM - DATA3_CLKTRIM +
+ * MCLK_ADDR_CLKTRIM */
+};
+
+static const struct pmc_scratch_field emc_autocal_fbio[] __initdata = {
+ field(EMC, 0x2a4, 29:29, 29:29), /* AUTO_CAL_ENABLE */
+ field(EMC, 0x2a4, 30:30, 30:30), /* AUTO_CAL_OVERRIDE */
+ field(EMC, 0x2a4, 12:8, 18:14), /* AUTO_CAL_PD_OFFSET */
+ field(EMC, 0x2a4, 4:0, 13:9), /* AUTO_CAL_PU_OFFSET */
+ field(EMC, 0x2a4, 25:16, 28:19), /* AUTO_CAL_STEP */
+ field(EMC, 0xf4, 16:16, 0:0), /* CFG_DEN_EARLY */
+ field(EMC, 0x104, 8:8, 8:8), /* CTT_TERMINATION */
+ field(EMC, 0x104, 7:7, 7:7), /* DIFFERENTIAL_DQS */
+ field(EMC, 0x104, 9:9, 31:31), /* DQS_PULLD */
+ field(EMC, 0x104, 1:0, 5:4), /* DRAM_TYPE */
+ field(EMC, 0x104, 4:4, 6:6), /* DRAM_WIDTH */
+ field(EMC, 0x114, 2:0, 3:1), /* CFG_QUSE_LATE */
+};
+
+static const struct pmc_scratch_field emc_autocal_interval[] __initdata = {
+ field(EMC, 0x2a8, 27:0, 27:0), /* AUTOCAL_INTERVAL */
+ field(EMC, 0x2b8, 1:1, 29:29), /* CLKCHANGE_PD_ENABLE */
+ field(EMC, 0x2b8, 0:0, 28:28), /* CLKCHANGE_REQ_ENABLE */
+ field(EMC, 0x2b8, 9:8, 31:30), /* PIN_CONFIG */
+};
+
+static const struct pmc_scratch_field emc_cfgs[] __initdata = {
+ field(EMC, 0x10, 9:8, 4:3), /* EMEM_BANKWIDTH */
+ field(EMC, 0x10, 2:0, 2:0), /* EMEM_COLWIDTH */
+ field(EMC, 0x10, 19:16, 8:5), /* EMEM_DEVSIZE */
+ field(EMC, 0x10, 25:24, 10:9), /* EMEM_NUMDEV */
+ field(EMC, 0xc, 24:24, 21:21), /* AUTO_PRE_RD */
+ field(EMC, 0xc, 25:25, 22:22), /* AUTO_PRE_WR */
+ field(EMC, 0xc, 16:16, 20:20), /* CLEAR_AP_PREV_SPREQ */
+ field(EMC, 0xc, 29:29, 23:23), /* DRAM_ACPD */
+ field(EMC, 0xc, 30:30, 24:24), /* DRAM_CLKSTOP_PDSR_ONLY */
+ field(EMC, 0xc, 31:31, 25:25), /* DRAM_CLKSTOP */
+ field(EMC, 0xc, 15:8, 19:12), /* PRE_IDLE_CYCLES */
+ field(EMC, 0xc, 0:0, 11:11), /* PRE_IDLE_EN */
+ field(EMC, 0x2bc, 29:28, 29:28), /* CFG_DLL_LOCK_LIMIT */
+ field(EMC, 0x2bc, 7:6, 31:30), /* CFG_DLL_MODE */
+ field(MC, 0x10c, 0:0, 26:26), /* LL_CTRL */
+ field(MC, 0x10c, 1:1, 27:27), /* LL_SEND_BOTH */
+};
+
+static const struct pmc_scratch_field emc_adr_cfg1[] __initdata = {
+ field(EMC, 0x14, 9:8, 9:8), /* EMEM1_BANKWIDTH */
+ field(EMC, 0x14, 2:0, 7:5), /* EMEM1_COLWIDTH */
+ field(EMC, 0x14, 19:16, 13:10), /* EMEM1_DEVSIZE */
+ field(EMC, 0x2dc, 28:24, 4:0), /* TERM_DRVUP */
+ field(APB_MISC, 0x8d4, 3:0, 17:14), /* XM2COMP_VREF_SEL */
+ field(APB_MISC, 0x8d8, 18:16, 23:21), /* XM2VTTGEN_CAL_DRVDN */
+ field(APB_MISC, 0x8d8, 26:24, 20:18), /* XM2VTTGEN_CAL_DRVUP */
+ field(APB_MISC, 0x8d8, 1:1, 30:30), /* XM2VTTGEN_SHORT_PWRGND */
+ field(APB_MISC, 0x8d8, 0:0, 31:31), /* XM2VTTGEN_SHORT */
+ field(APB_MISC, 0x8d8, 14:12, 26:24), /* XM2VTTGEN_VAUXP_LEVEL */
+ field(APB_MISC, 0x8d8, 10:8, 29:27), /* XM2VTTGEN_VCLAMP_LEVEL */
+};
+
+static const struct pmc_scratch_field emc_digital_dll[] __initdata = {
+ field(EMC, 0x2bc, 1:1, 23:23), /* DLI_TRIMMER_EN */
+ field(EMC, 0x2bc, 0:0, 22:22), /* DLL_EN */
+ field(EMC, 0x2bc, 5:5, 27:27), /* DLL_LOWSPEED */
+ field(EMC, 0x2bc, 2:2, 24:24), /* DLL_OVERRIDE_EN */
+ field(EMC, 0x2bc, 11:8, 31:28), /* DLL_UDSET */
+ field(EMC, 0x2bc, 4:4, 26:26), /* PERBYTE_TRIMMER_OVERRIDE */
+ field(EMC, 0x2bc, 3:3, 25:25), /* USE_SINGLE_DLL */
+ field(MC, 0xc, 21:0, 21:0), /* EMEM_SIZE_KB */
+};
+
+static const struct pmc_scratch_field emc_dqs_clktrim[] __initdata = {
+ field(EMC, 0x2d4, 29:0, 29:0), /* DQS0_CLKTRIM - DQS3 + MCLK*/
+ field(APB_MISC, 0x8e4, 3:3, 31:31), /* XM2CFGC_CTT_HIZ_EN */
+ field(APB_MISC, 0x8e4, 4:4, 30:30), /* XM2CFGC_VREF_DQS_EN */
+};
+
+static const struct pmc_scratch_field emc_dq_clktrim[] __initdata = {
+ field(EMC, 0x2d8, 29:0, 29:0),
+ field(APB_MISC, 0x8e4, 2:2, 30:30), /* XM2CFGC_PREEMP_EN */
+ field(APB_MISC, 0x8e4, 0:0, 31:31), /* XM2CFGC_RX_FT_REC_EN */
+};
+
+static const struct pmc_scratch_field emc_dll_xform_dqs[] __initdata = {
+ field(EMC, 0x2bc, 25:16, 29:20), /* CFG_DLL_OVERRIDE_VAL */
+ field(EMC, 0x2c0, 4:0, 4:0), /* DQS_MULT */
+ field(EMC, 0x2c0, 22:8, 19:5), /* DQS_OFFS */
+ field(MC, 0x10c, 31:31, 30:30), /* LL_DRAM_INTERLEAVE */
+};
+
+static const struct pmc_scratch_field emc_odt_rw[] __initdata = {
+ field(EMC, 0x2c4, 4:0, 4:0), /* QUSE_MULT */
+ field(EMC, 0x2c4, 22:8, 19:5), /* QUSE_OFF */
+ field(EMC, 0xb4, 31:31, 29:29), /* DISABLE_ODT_DURING_READ */
+ field(EMC, 0xb4, 30:30, 28:28), /* B4_READ */
+ field(EMC, 0xb4, 2:0, 27:25), /* RD_DELAY */
+ field(EMC, 0xb0, 31:31, 24:24), /* ENABLE_ODT_DURING_WRITE */
+ field(EMC, 0xb0, 30:30, 23:23), /* B4_WRITE */
+ field(EMC, 0xb0, 2:0, 22:20), /* WR_DELAY */
+};
+
+static const struct pmc_scratch_field arbitration_xbar[] __initdata = {
+ field(AHB_GIZMO, 0xdc, 31:0, 31:0),
+};
+
+static const struct pmc_scratch_field emc_zcal[] __initdata = {
+ field(EMC, 0x2e0, 23:0, 23:0), /* ZCAL_REF_INTERVAL */
+ field(EMC, 0x2e4, 7:0, 31:24), /* ZCAL_WAIT_CNT */
+};
+
+static const struct pmc_scratch_field emc_ctt_term[] __initdata = {
+ field(EMC, 0x2dc, 19:15, 30:26), /* TERM_DRVDN */
+ field(EMC, 0x2dc, 12:8, 25:21), /* TERM_OFFSET */
+ field(EMC, 0x2dc, 31:31, 31:31), /* TERM_OVERRIDE */
+ field(EMC, 0x2dc, 2:0, 20:18), /* TERM_SLOPE */
+ field(EMC, 0x2e8, 23:16, 15:8), /* ZQ_MRW_MA */
+ field(EMC, 0x2e8, 7:0, 7:0), /* ZQ_MRW_OP */
+};
+
+static const struct pmc_scratch_field xm2_cfgd[] __initdata = {
+ field(APB_MISC, 0x8e8, 18:16, 11:9), /* CFGD0_DLYIN_TRM */
+ field(APB_MISC, 0x8e8, 22:20, 8:6), /* CFGD1_DLYIN_TRM */
+ field(APB_MISC, 0x8e8, 26:24, 5:3), /* CFGD2_DLYIN_TRM */
+ field(APB_MISC, 0x8e8, 30:28, 2:0), /* CFGD3_DLYIN_TRM */
+ field(APB_MISC, 0x8e8, 3:3, 12:12), /* XM2CFGD_CTT_HIZ_EN */
+ field(APB_MISC, 0x8e8, 2:2, 13:13), /* XM2CFGD_PREEMP_EN */
+ field(APB_MISC, 0x8e8, 0:0, 14:14), /* CM2CFGD_RX_FT_REC_EN */
+};
+
+struct pmc_scratch_reg {
+ const struct pmc_scratch_field *fields;
+ void __iomem *scratch_addr;
+ int num_fields;
+};
+
+#define scratch(offs, field_list) \
+ { \
+ .scratch_addr = IO_ADDRESS(TEGRA_PMC_BASE) + offs, \
+ .fields = field_list, \
+ .num_fields = ARRAY_SIZE(field_list), \
+ }
+
+static const struct pmc_scratch_reg scratch[] __initdata = {
+ scratch(PMC_SCRATCH3, pllx),
+ scratch(PMC_SCRATCH5, emc_0),
+ scratch(PMC_SCRATCH6, emc_1),
+ scratch(PMC_SCRATCH7, emc_2),
+ scratch(PMC_SCRATCH8, emc_3),
+ scratch(PMC_SCRATCH9, emc_4),
+ scratch(PMC_SCRATCH10, emc_5),
+ scratch(PMC_SCRATCH11, emc_6),
+ scratch(PMC_SCRATCH12, emc_dqsib_dly),
+ scratch(PMC_SCRATCH13, emc_quse_dly),
+ scratch(PMC_SCRATCH14, emc_clktrim),
+ scratch(PMC_SCRATCH15, emc_autocal_fbio),
+ scratch(PMC_SCRATCH16, emc_autocal_interval),
+ scratch(PMC_SCRATCH17, emc_cfgs),
+ scratch(PMC_SCRATCH18, emc_adr_cfg1),
+ scratch(PMC_SCRATCH19, emc_digital_dll),
+ scratch(PMC_SCRATCH20, emc_dqs_clktrim),
+ scratch(PMC_SCRATCH21, emc_dq_clktrim),
+ scratch(PMC_SCRATCH22, emc_dll_xform_dqs),
+ scratch(PMC_SCRATCH23, emc_odt_rw),
+ scratch(PMC_SCRATCH25, arbitration_xbar),
+ scratch(PMC_SCRATCH35, emc_zcal),
+ scratch(PMC_SCRATCH36, emc_ctt_term),
+ scratch(PMC_SCRATCH40, xm2_cfgd),
+};
+
+void __init lp0_suspend_init(void)
+{
+ int i;
+
+ for (i=0; i<ARRAY_SIZE(scratch); i++) {
+ unsigned int r = 0;
+ int j;
+
+ for (j=0; j<scratch[i].num_fields; j++) {
+ unsigned int v = readl(scratch[i].fields[j].addr);
+ v >>= scratch[i].fields[j].shift_src;
+ v &= scratch[i].fields[j].mask;
+ v <<= scratch[i].fields[j].shift_dst;
+ r |= v;
+ }
+
+ writel(r, scratch[i].scratch_addr);
+ }
+}
+
+#define NUM_WAKE_EVENTS 31
+
+static int tegra_wake_event_irq[NUM_WAKE_EVENTS] = {
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PO5),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV3),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PL1),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PB6),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN7),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PA0),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU5),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PU6),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PC7),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS2),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PAA1),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW3),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PW2),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PY6),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV6),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PJ7),
+ INT_RTC,
+ INT_KBC,
+ INT_EXTERNAL_PMU,
+ -EINVAL, /* TEGRA_USB1_VBUS, */
+ -EINVAL, /* TEGRA_USB3_VBUS, */
+ -EINVAL, /* TEGRA_USB1_ID, */
+ -EINVAL, /* TEGRA_USB3_ID, */
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PI5),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PV2),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS4),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS5),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PS0),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PQ6),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PQ7),
+ TEGRA_GPIO_TO_IRQ(TEGRA_GPIO_PN2),
+};
+
+int tegra_irq_to_wake(int irq)
+{
+ int i;
+ for (i = 0; i < NUM_WAKE_EVENTS; i++)
+ if (tegra_wake_event_irq[i] == irq)
+ return i;
+
+ return -EINVAL;
+}
+
+int tegra_wake_to_irq(int wake)
+{
+ if (wake < 0)
+ return -EINVAL;
+
+ if (wake >= NUM_WAKE_EVENTS)
+ return -EINVAL;
+
+ return tegra_wake_event_irq[wake];
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/suspend.c
+ *
+ * CPU complex suspend & resume functions for Tegra SoCs
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <linux/regulator/machine.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/localtimer.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#include <mach/clk.h>
+#include <mach/iomap.h>
+#include <mach/iovmm.h>
+#include <mach/irqs.h>
+#include <mach/legacy_irq.h>
+#include <mach/suspend.h>
+
+#include "board.h"
+#include "power.h"
+
+struct suspend_context {
+ /*
+ * The next 7 values are referenced by offset in __restart_plls
+ * in headsmp-t2.S, and should not be moved
+ */
+ u32 pllx_misc;
+ u32 pllx_base;
+ u32 pllp_misc;
+ u32 pllp_base;
+ u32 pllp_outa;
+ u32 pllp_outb;
+ u32 pll_timeout;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 twd_ctrl;
+ u32 twd_load;
+ u32 cclk_divider;
+};
+
+volatile struct suspend_context tegra_sctx;
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+#ifdef CONFIG_PM
+static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *flow_ctrl = IO_ADDRESS(TEGRA_FLOW_CTRL_BASE);
+static void __iomem *evp_reset = IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE)+0x100;
+static void __iomem *tmrus = IO_ADDRESS(TEGRA_TMRUS_BASE);
+#endif
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
+#define PMC_WAKE_MASK 0xc
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_DPAD_ORIDE 0x1C
+#define PMC_WAKE_DELAY 0xe0
+#define PMC_DPD_SAMPLE 0x20
+
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_COREPWRGOOD_TIMER 0x3c
+#define PMC_SCRATCH0 0x50
+#define PMC_SCRATCH1 0x54
+#define PMC_CPUPWRGOOD_TIMER 0xc8
+#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_COREPWROFF_TIMER PMC_WAKE_DELAY
+#define PMC_SCRATCH38 0x134
+#define PMC_SCRATCH39 0x138
+#define PMC_SCRATCH41 0x140
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_PLLC_BASE 0x80
+#define CLK_RESET_PLLM_BASE 0x90
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+#define CLK_RESET_PLLP_BASE 0xa0
+#define CLK_RESET_PLLP_OUTA 0xa4
+#define CLK_RESET_PLLP_OUTB 0xa8
+#define CLK_RESET_PLLP_MISC 0xac
+
+#define CLK_RESET_SOURCE_CSITE 0x1d4
+
+
+#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
+#define CLK_RESET_CCLK_BURST_POLICY_PLLM 3
+#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
+
+#define FLOW_CTRL_CPU_CSR 0x8
+#define FLOW_CTRL_CPU1_CSR 0x18
+
+#define EMC_MRW_0 0x0e8
+#define EMC_MRW_DEV_SELECTN 30
+#define EMC_MRW_DEV_NONE (3 << EMC_MRW_DEV_SELECTN)
+
+unsigned long tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
+static pgd_t *tegra_pgd;
+void *tegra_context_area = NULL;
+
+static struct clk *tegra_pclk = NULL;
+static const struct tegra_suspend_platform_data *pdata = NULL;
+static unsigned long wb0_restore = 0;
+static enum tegra_suspend_mode current_suspend_mode;
+
+static unsigned int tegra_time_in_suspend[32];
+
+static inline unsigned int time_to_bin(unsigned int time)
+{
+ return fls(time);
+}
+
+unsigned long tegra_cpu_power_good_time(void)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return 5000;
+
+ return pdata->cpu_timer;
+}
+
+unsigned long tegra_cpu_power_off_time(void)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return 5000;
+
+ return pdata->cpu_off_timer;
+}
+
+enum tegra_suspend_mode tegra_get_suspend_mode(void)
+{
+ if (!pdata)
+ return TEGRA_SUSPEND_NONE;
+
+ return pdata->suspend_mode;
+}
+
+static void set_power_timers(unsigned long us_on, unsigned long us_off,
+ long rate)
+{
+ static int last_pclk = 0;
+ unsigned long long ticks;
+ unsigned long long pclk;
+
+ if (WARN_ON_ONCE(rate <= 0))
+ pclk = 100000000;
+ else
+ pclk = rate;
+
+ if (rate != last_pclk) {
+ ticks = (us_on * pclk) + 999999ull;
+ do_div(ticks, 1000000);
+ writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
+
+ ticks = (us_off * pclk) + 999999ull;
+ do_div(ticks, 1000000);
+ writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
+ wmb();
+ }
+ last_pclk = pclk;
+}
+
+static int create_suspend_pgtable(void)
+{
+ int i;
+ pmd_t *pmd;
+ /* arrays of virtual-to-physical mappings which must be
+ * present to safely boot hotplugged / LP2-idled CPUs.
+ * tegra_hotplug_startup (hotplug reset vector) is mapped
+ * VA=PA so that the translation post-MMU is the same as
+ * pre-MMU, IRAM is mapped VA=PA so that SDRAM self-refresh
+ * can safely disable the MMU */
+ unsigned long addr_v[] = {
+ PHYS_OFFSET,
+ IO_IRAM_PHYS,
+ (unsigned long)tegra_context_area,
+#ifdef CONFIG_HOTPLUG_CPU
+ (unsigned long)virt_to_phys(tegra_hotplug_startup),
+#endif
+ (unsigned long)__cortex_a9_restore,
+ (unsigned long)virt_to_phys(__shut_off_mmu),
+ };
+ unsigned long addr_p[] = {
+ PHYS_OFFSET,
+ IO_IRAM_PHYS,
+ (unsigned long)virt_to_phys(tegra_context_area),
+#ifdef CONFIG_HOTPLUG_CPU
+ (unsigned long)virt_to_phys(tegra_hotplug_startup),
+#endif
+ (unsigned long)virt_to_phys(__cortex_a9_restore),
+ (unsigned long)virt_to_phys(__shut_off_mmu),
+ };
+ unsigned int flags = PMD_TYPE_SECT | PMD_SECT_AP_WRITE |
+ PMD_SECT_WBWA | PMD_SECT_S;
+
+ tegra_pgd = pgd_alloc(&init_mm);
+ if (!tegra_pgd)
+ return -ENOMEM;
+
+ for (i=0; i<ARRAY_SIZE(addr_p); i++) {
+ unsigned long v = addr_v[i];
+ pmd = pmd_offset(tegra_pgd + pgd_index(v), v);
+ *pmd = __pmd((addr_p[i] & PGDIR_MASK) | flags);
+ flush_pmd_entry(pmd);
+ outer_clean_range(__pa(pmd), __pa(pmd + 1));
+ }
+
+ tegra_pgd_phys = virt_to_phys(tegra_pgd);
+ __cpuc_flush_dcache_area(&tegra_pgd_phys,
+ sizeof(tegra_pgd_phys));
+ outer_clean_range(__pa(&tegra_pgd_phys),
+ __pa(&tegra_pgd_phys+1));
+
+ __cpuc_flush_dcache_area(&tegra_context_area,
+ sizeof(tegra_context_area));
+ outer_clean_range(__pa(&tegra_context_area),
+ __pa(&tegra_context_area+1));
+
+ return 0;
+}
+
+
+
+#ifdef CONFIG_PM
+/*
+ * suspend_cpu_complex
+ *
+ * disable periodic IRQs used for DVFS to prevent suspend wakeups
+ * disable coresight debug interface
+ *
+ *
+ */
+static noinline void restore_cpu_complex(void)
+{
+ unsigned int reg;
+
+ /* restore original burst policy setting; PLLX state restored
+ * by CPU boot-up code - wait for PLL stabilization if PLLX
+ * was enabled, or if explicitly requested by caller */
+
+ BUG_ON(readl(clk_rst + CLK_RESET_PLLX_BASE) != tegra_sctx.pllx_base);
+
+ if (tegra_sctx.pllx_base & (1<<30)) {
+ while (readl(tmrus)-tegra_sctx.pll_timeout >= 0x80000000UL)
+ cpu_relax();
+ }
+ writel(tegra_sctx.cclk_divider, clk_rst + CLK_RESET_CCLK_DIVIDER);
+ writel(tegra_sctx.cpu_burst, clk_rst + CLK_RESET_CCLK_BURST);
+ writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
+
+ /* do not power-gate the CPU when flow controlled */
+ reg = readl(flow_ctrl + FLOW_CTRL_CPU_CSR);
+ reg &= ~((1<<5) | (1<<4) | 1); /* clear WFE bitmask */
+ reg |= (1<<14); /* write-1-clear event flag */
+ writel(reg, flow_ctrl + FLOW_CTRL_CPU_CSR);
+ wmb();
+
+#ifdef CONFIG_HAVE_ARM_TWD
+ writel(tegra_sctx.twd_ctrl, twd_base + 0x8);
+ writel(tegra_sctx.twd_load, twd_base + 0);
+#endif
+
+ gic_dist_restore(0);
+ get_irq_chip(IRQ_LOCALTIMER)->unmask(IRQ_LOCALTIMER);
+
+ enable_irq(INT_SYS_STATS_MON);
+}
+
+static noinline void suspend_cpu_complex(void)
+{
+ unsigned int reg;
+ int i;
+
+ disable_irq(INT_SYS_STATS_MON);
+
+ /* switch coresite to clk_m, save off original source */
+ tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
+ writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
+
+ tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
+ tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
+ tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
+ tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
+ tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
+ tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
+ tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
+ tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
+
+#ifdef CONFIG_HAVE_ARM_TWD
+ tegra_sctx.twd_ctrl = readl(twd_base + 0x8);
+ tegra_sctx.twd_load = readl(twd_base + 0);
+ local_timer_stop();
+#endif
+
+ reg = readl(flow_ctrl + FLOW_CTRL_CPU_CSR);
+ /* clear any pending events, set the WFE bitmap to specify just
+ * CPU0, and clear any pending events for this CPU */
+ reg &= ~(1<<5); /* clear CPU1 WFE */
+ reg |= (1<<14) | (1<<4) | 1; /* enable CPU0 WFE */
+ writel(reg, flow_ctrl + FLOW_CTRL_CPU_CSR);
+ wmb();
+
+ for (i=1; i<num_present_cpus(); i++) {
+ unsigned int offs = FLOW_CTRL_CPU1_CSR + (i-1)*8;
+ reg = readl(flow_ctrl + offs);
+ writel(reg | (1<<14), flow_ctrl + offs);
+ wmb();
+ }
+
+ gic_cpu_exit(0);
+ gic_dist_save(0);
+}
+
+unsigned int tegra_suspend_lp2(unsigned int us)
+{
+ unsigned int mode;
+ unsigned long orig, reg;
+ unsigned int remain;
+
+ reg = readl(pmc + PMC_CTRL);
+ mode = (reg >> TEGRA_POWER_PMC_SHIFT) & TEGRA_POWER_PMC_MASK;
+ mode |= TEGRA_POWER_CPU_PWRREQ_OE;
+ if (pdata->separate_req)
+ mode |= TEGRA_POWER_PWRREQ_OE;
+ else
+ mode &= ~TEGRA_POWER_PWRREQ_OE;
+ mode &= ~TEGRA_POWER_EFFECT_LP0;
+
+ orig = readl(evp_reset);
+ writel(virt_to_phys(tegra_lp2_startup), evp_reset);
+
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
+ clk_get_rate_all_locked(tegra_pclk));
+
+ if (us)
+ tegra_lp2_set_trigger(us);
+
+ suspend_cpu_complex();
+ stop_critical_timings();
+ flush_cache_all();
+ /* structure is written by reset code, so the L2 lines
+ * must be invalidated */
+ outer_flush_range(__pa(&tegra_sctx),__pa(&tegra_sctx+1));
+ barrier();
+
+ __cortex_a9_save(mode);
+ /* return from __cortex_a9_restore */
+ barrier();
+ restore_cpu_complex();
+ start_critical_timings();
+
+ remain = tegra_lp2_timer_remain();
+ if (us)
+ tegra_lp2_set_trigger(0);
+
+ writel(orig, evp_reset);
+
+ return remain;
+}
+
+/* ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain */
+static void pmc_32kwritel(u32 val, unsigned long offs)
+{
+ writel(val, pmc + offs);
+ udelay(130);
+}
+
+static u8 *iram_save = NULL;
+static unsigned int iram_save_size = 0;
+static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
+
+static void tegra_suspend_dram(bool do_lp0)
+{
+ unsigned int mode = TEGRA_POWER_SDRAM_SELFREFRESH;
+ unsigned long orig, reg;
+
+ orig = readl(evp_reset);
+ /* copy the reset vector and SDRAM shutdown code into IRAM */
+ memcpy(iram_save, iram_code, iram_save_size);
+ memcpy(iram_code, (void *)__tegra_lp1_reset, iram_save_size);
+
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, 32768);
+
+ reg = readl(pmc + PMC_CTRL);
+ mode |= ((reg >> TEGRA_POWER_PMC_SHIFT) & TEGRA_POWER_PMC_MASK);
+
+ if (!do_lp0) {
+ writel(TEGRA_IRAM_CODE_AREA, evp_reset);
+
+ mode |= TEGRA_POWER_CPU_PWRREQ_OE;
+ if (pdata->separate_req)
+ mode |= TEGRA_POWER_PWRREQ_OE;
+ else
+ mode &= ~TEGRA_POWER_PWRREQ_OE;
+ mode &= ~TEGRA_POWER_EFFECT_LP0;
+
+ tegra_legacy_irq_set_lp1_wake_mask();
+ } else {
+ u32 boot_flag = readl(pmc + PMC_SCRATCH0);
+ pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
+ pmc_32kwritel(wb0_restore, PMC_SCRATCH1);
+ writel(0x0, pmc + PMC_SCRATCH39);
+ mode |= TEGRA_POWER_CPU_PWRREQ_OE;
+ mode |= TEGRA_POWER_PWRREQ_OE;
+ mode |= TEGRA_POWER_EFFECT_LP0;
+
+ /* for platforms where the core & CPU power requests are
+ * combined as a single request to the PMU, transition to
+ * LP0 state by temporarily enabling both requests
+ */
+ if (!pdata->separate_req) {
+ reg |= ((mode & TEGRA_POWER_PMC_MASK) <<
+ TEGRA_POWER_PMC_SHIFT);
+ pmc_32kwritel(reg, PMC_CTRL);
+ mode &= ~TEGRA_POWER_CPU_PWRREQ_OE;
+ }
+
+ tegra_set_lp0_wake_pads(pdata->wake_enb, pdata->wake_high,
+ pdata->wake_any);
+ }
+
+ suspend_cpu_complex();
+ flush_cache_all();
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_shutdown();
+#endif
+
+ __cortex_a9_save(mode);
+ restore_cpu_complex();
+
+ writel(orig, evp_reset);
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_restart();
+#endif
+
+ if (!do_lp0) {
+ memcpy(iram_code, iram_save, iram_save_size);
+ tegra_legacy_irq_restore_mask();
+ } else {
+ /* for platforms where the core & CPU power requests are
+ * combined as a single request to the PMU, transition out
+ * of LP0 state by temporarily enabling both requests
+ */
+ if (!pdata->separate_req) {
+ reg = readl(pmc + PMC_CTRL);
+ reg |= (TEGRA_POWER_CPU_PWRREQ_OE << TEGRA_POWER_PMC_SHIFT);
+ pmc_32kwritel(reg, PMC_CTRL);
+ reg &= ~(TEGRA_POWER_PWRREQ_OE << TEGRA_POWER_PMC_SHIFT);
+ writel(reg, pmc + PMC_CTRL);
+ }
+ }
+
+ wmb();
+}
+
+static int tegra_suspend_begin(suspend_state_t state)
+{
+ return regulator_suspend_prepare(state);
+}
+
+static int tegra_suspend_prepare_late(void)
+{
+ disable_irq(INT_SYS_STATS_MON);
+ return tegra_iovmm_suspend();
+}
+
+static void tegra_suspend_wake(void)
+{
+ tegra_iovmm_resume();
+ enable_irq(INT_SYS_STATS_MON);
+}
+
+static u8 uart_state[5];
+
+static int tegra_debug_uart_suspend(void)
+{
+ void __iomem *uart;
+ u32 lcr;
+
+ if (TEGRA_DEBUG_UART_BASE == 0)
+ return 0;
+
+ uart = IO_ADDRESS(TEGRA_DEBUG_UART_BASE);
+
+ lcr = readb(uart + UART_LCR * 4);
+
+ uart_state[0] = lcr;
+ uart_state[1] = readb(uart + UART_MCR * 4);
+
+ /* DLAB = 0 */
+ writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ uart_state[2] = readb(uart + UART_IER * 4);
+
+ /* DLAB = 1 */
+ writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ uart_state[3] = readb(uart + UART_DLL * 4);
+ uart_state[4] = readb(uart + UART_DLM * 4);
+
+ writeb(lcr, uart + UART_LCR * 4);
+
+ return 0;
+}
+
+static void tegra_debug_uart_resume(void)
+{
+ void __iomem *uart;
+ u32 lcr;
+
+ if (TEGRA_DEBUG_UART_BASE == 0)
+ return;
+
+ uart = IO_ADDRESS(TEGRA_DEBUG_UART_BASE);
+
+ lcr = uart_state[0];
+
+ writeb(uart_state[1], uart + UART_MCR * 4);
+
+ /* DLAB = 0 */
+ writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ writeb(uart_state[2], uart + UART_IER * 4);
+
+ /* DLAB = 1 */
+ writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
+
+ writeb(uart_state[3], uart + UART_DLL * 4);
+ writeb(uart_state[4], uart + UART_DLM * 4);
+
+ writeb(lcr, uart + UART_LCR * 4);
+}
+
+#define MC_SECURITY_START 0x6c
+#define MC_SECURITY_SIZE 0x70
+#define MC_SECURITY_CFG2 0x7c
+
+static int tegra_suspend_enter(suspend_state_t state)
+{
+ struct irq_desc *desc;
+ void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
+ void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
+ unsigned long flags;
+ u32 mc_data[3] = {0, 0, 0};
+ int irq;
+ bool do_lp0 = (current_suspend_mode == TEGRA_SUSPEND_LP0);
+ bool do_lp2 = (current_suspend_mode == TEGRA_SUSPEND_LP2);
+ int lp_state;
+ u64 rtc_before;
+ u64 rtc_after;
+ u64 secs;
+ u32 ms;
+
+ if (do_lp2)
+ lp_state = 2;
+ else if (do_lp0)
+ lp_state = 0;
+ else
+ lp_state = 1;
+
+ local_irq_save(flags);
+ local_fiq_disable();
+
+ pr_info("Entering suspend state LP%d\n", lp_state);
+ if (do_lp0) {
+ tegra_irq_suspend();
+ tegra_dma_suspend();
+ tegra_debug_uart_suspend();
+ tegra_pinmux_suspend();
+ tegra_timer_suspend();
+ tegra_gpio_suspend();
+ tegra_clk_suspend();
+
+ mc_data[0] = readl(mc + MC_SECURITY_START);
+ mc_data[1] = readl(mc + MC_SECURITY_SIZE);
+ mc_data[2] = readl(mc + MC_SECURITY_CFG2);
+ }
+
+ for_each_irq_desc(irq, desc) {
+ if ((desc->status & IRQ_WAKEUP) &&
+ (desc->status & IRQ_SUSPENDED)) {
+ get_irq_chip(irq)->unmask(irq);
+ }
+ }
+
+ rtc_before = tegra_rtc_read_ms();
+
+ if (do_lp2)
+ tegra_suspend_lp2(0);
+ else
+ tegra_suspend_dram(do_lp0);
+
+ rtc_after = tegra_rtc_read_ms();
+
+ for_each_irq_desc(irq, desc) {
+ if ((desc->status & IRQ_WAKEUP) &&
+ (desc->status & IRQ_SUSPENDED)) {
+ get_irq_chip(irq)->mask(irq);
+ }
+ }
+
+ /* Clear DPD sample */
+ writel(0x0, pmc + PMC_DPD_SAMPLE);
+
+ if (do_lp0) {
+ writel(mc_data[0], mc + MC_SECURITY_START);
+ writel(mc_data[1], mc + MC_SECURITY_SIZE);
+ writel(mc_data[2], mc + MC_SECURITY_CFG2);
+
+ /* trigger emc mode write */
+ writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
+
+ tegra_clk_resume();
+ tegra_gpio_resume();
+ tegra_timer_resume();
+ tegra_pinmux_resume();
+ tegra_debug_uart_resume();
+ tegra_dma_resume();
+ tegra_irq_resume();
+ }
+
+ secs = rtc_after - rtc_before;
+ ms = do_div(secs, 1000);
+ pr_info("Suspended for %llu.%03u seconds\n", secs, ms);
+
+ tegra_time_in_suspend[time_to_bin(secs)]++;
+
+ local_fiq_enable();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static struct platform_suspend_ops tegra_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = tegra_suspend_begin,
+ .prepare_late = tegra_suspend_prepare_late,
+ .wake = tegra_suspend_wake,
+ .enter = tegra_suspend_enter,
+};
+#endif
+
+void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
+{
+ u32 reg, mode;
+
+ tegra_pclk = clk_get_sys(NULL, "pclk");
+ BUG_ON(!tegra_pclk);
+ pdata = plat;
+ (void)reg;
+ (void)mode;
+
+ if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size) {
+ wb0_restore = tegra_lp0_vec_start;
+ } else {
+ pr_warning("Suspend mode LP0 requested, but missing lp0_vec\n");
+ pr_warning("Disabling LP0\n");
+ plat->suspend_mode = TEGRA_SUSPEND_LP1;
+ }
+
+ tegra_context_area = kzalloc(CONTEXT_SIZE_BYTES * NR_CPUS, GFP_KERNEL);
+
+ if (tegra_context_area && create_suspend_pgtable()) {
+ kfree(tegra_context_area);
+ tegra_context_area = NULL;
+ }
+
+#ifdef CONFIG_PM
+ iram_save_size = (unsigned long)__tegra_iram_end;
+ iram_save_size -= (unsigned long)__tegra_lp1_reset;
+
+ iram_save = kmalloc(iram_save_size, GFP_KERNEL);
+ if (!iram_save) {
+ pr_err("%s: unable to allocate memory for SDRAM self-refresh "
+ "LP0/LP1 unavailable\n", __func__);
+ plat->suspend_mode = TEGRA_SUSPEND_LP2;
+ }
+ /* CPU reset vector for LP0 and LP1 */
+ writel(virt_to_phys(tegra_lp2_startup), pmc + PMC_SCRATCH41);
+
+ /* Always enable CPU power request; just normal polarity is supported */
+ reg = readl(pmc + PMC_CTRL);
+ BUG_ON(reg & (TEGRA_POWER_CPU_PWRREQ_POLARITY << TEGRA_POWER_PMC_SHIFT));
+ reg |= (TEGRA_POWER_CPU_PWRREQ_OE << TEGRA_POWER_PMC_SHIFT);
+ pmc_32kwritel(reg, PMC_CTRL);
+
+ /* Configure core power request and system clock control if LP0
+ is supported */
+ writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
+ writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
+ reg = readl(pmc + PMC_CTRL);
+ mode = (reg >> TEGRA_POWER_PMC_SHIFT) & TEGRA_POWER_PMC_MASK;
+
+ mode &= ~TEGRA_POWER_SYSCLK_POLARITY;
+ mode &= ~TEGRA_POWER_PWRREQ_POLARITY;
+
+ if (!pdata->sysclkreq_high)
+ mode |= TEGRA_POWER_SYSCLK_POLARITY;
+ if (!pdata->corereq_high)
+ mode |= TEGRA_POWER_PWRREQ_POLARITY;
+
+ /* configure output inverters while the request is tristated */
+ reg |= (mode << TEGRA_POWER_PMC_SHIFT);
+ pmc_32kwritel(reg, PMC_CTRL);
+
+ /* now enable requests */
+ reg |= (TEGRA_POWER_SYSCLK_OE << TEGRA_POWER_PMC_SHIFT);
+ if (pdata->separate_req)
+ reg |= (TEGRA_POWER_PWRREQ_OE << TEGRA_POWER_PMC_SHIFT);
+ writel(reg, pmc + PMC_CTRL);
+
+ if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
+ lp0_suspend_init();
+
+ suspend_set_ops(&tegra_suspend_ops);
+#endif
+
+ current_suspend_mode = plat->suspend_mode;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
+ [TEGRA_SUSPEND_NONE] = "none",
+ [TEGRA_SUSPEND_LP2] = "lp2",
+ [TEGRA_SUSPEND_LP1] = "lp1",
+ [TEGRA_SUSPEND_LP0] = "lp0",
+};
+
+static int tegra_suspend_debug_show(struct seq_file *s, void *data)
+{
+ seq_printf(s, "%s\n", tegra_suspend_name[*(int *)s->private]);
+ return 0;
+}
+
+static int tegra_suspend_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_suspend_debug_show, inode->i_private);
+}
+
+static int tegra_suspend_debug_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ int i;
+ struct seq_file *s = file->private_data;
+ enum tegra_suspend_mode *val = s->private;
+
+ memset(buf, 0x00, sizeof(buf));
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ for (i = 0; i < TEGRA_MAX_SUSPEND_MODE; i++) {
+ if (!strnicmp(buf, tegra_suspend_name[i],
+ strlen(tegra_suspend_name[i]))) {
+ if (i > pdata->suspend_mode)
+ return -EINVAL;
+ *val = i;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations tegra_suspend_debug_fops = {
+ .open = tegra_suspend_debug_open,
+ .write = tegra_suspend_debug_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra_suspend_time_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ seq_printf(s, "time (secs) count\n");
+ seq_printf(s, "------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (tegra_time_in_suspend[bin] == 0)
+ continue;
+ seq_printf(s, "%4d - %4d %4u\n",
+ bin ? 1 << (bin - 1) : 0, 1 << bin,
+ tegra_time_in_suspend[bin]);
+ }
+ return 0;
+}
+
+static int tegra_suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations tegra_suspend_time_debug_fops = {
+ .open = tegra_suspend_time_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_suspend_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("suspend_mode", 0755, NULL,
+ (void *)¤t_suspend_mode, &tegra_suspend_debug_fops);
+ if (!d) {
+ pr_info("Failed to create suspend_mode debug file\n");
+ return -ENOMEM;
+ }
+
+ d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+ &tegra_suspend_time_debug_fops);
+ if (!d) {
+ pr_info("Failed to create suspend_time debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(tegra_suspend_debug_init);
+#endif
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/hrtimer.h>
+#include <linux/clk.h>
#include <asm/clkdev.h>
#include <mach/iomap.h>
+#include <mach/suspend.h>
#include "clock.h"
+#include "fuse.h"
+#include "tegra2_emc.h"
#define RST_DEVICES 0x004
#define RST_DEVICES_SET 0x300
#define RST_DEVICES_CLR 0x304
+#define RST_DEVICES_NUM 3
#define CLK_OUT_ENB 0x010
#define CLK_OUT_ENB_SET 0x320
#define CLK_OUT_ENB_CLR 0x324
+#define CLK_OUT_ENB_NUM 3
+
+#define CLK_MASK_ARM 0x44
+#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_FREQ_DET 0x58
#define OSC_FREQ_DET_TRIG (1<<31)
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define PERIPH_CLK_SOURCE_I2S1 0x100
+#define PERIPH_CLK_SOURCE_EMC 0x19c
+#define PERIPH_CLK_SOURCE_OSC 0x1fc
+#define PERIPH_CLK_SOURCE_NUM \
+ ((PERIPH_CLK_SOURCE_OSC - PERIPH_CLK_SOURCE_I2S1) / 4)
+
#define PERIPH_CLK_SOURCE_MASK (3<<30)
#define PERIPH_CLK_SOURCE_SHIFT 30
#define PERIPH_CLK_SOURCE_ENABLE (1<<28)
-#define PERIPH_CLK_SOURCE_DIV_MASK 0xFF
+#define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF
+#define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF
#define PERIPH_CLK_SOURCE_DIV_SHIFT 0
+#define SDMMC_CLK_INT_FB_SEL (1 << 23)
+#define SDMMC_CLK_INT_FB_DLY_SHIFT 16
+#define SDMMC_CLK_INT_FB_DLY_MASK (0xF << SDMMC_CLK_INT_FB_DLY_SHIFT)
+
#define PLL_BASE 0x0
#define PLL_BASE_BYPASS (1<<31)
#define PLL_BASE_ENABLE (1<<30)
#define PLL_BASE_REF_ENABLE (1<<29)
#define PLL_BASE_OVERRIDE (1<<28)
-#define PLL_BASE_LOCK (1<<27)
#define PLL_BASE_DIVP_MASK (0x7<<20)
#define PLL_BASE_DIVP_SHIFT 20
#define PLL_BASE_DIVN_MASK (0x3FF<<8)
#define PLL_OUT_RESET_DISABLE (1<<0)
#define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc)
+
#define PLL_MISC_DCCON_SHIFT 20
-#define PLL_MISC_LOCK_ENABLE (1<<18)
#define PLL_MISC_CPCON_SHIFT 8
#define PLL_MISC_CPCON_MASK (0xF<<PLL_MISC_CPCON_SHIFT)
#define PLL_MISC_LFCON_SHIFT 4
#define PLL_MISC_VCOCON_SHIFT 0
#define PLL_MISC_VCOCON_MASK (0xF<<PLL_MISC_VCOCON_SHIFT)
+#define PLLU_BASE_POST_DIV (1<<20)
+
#define PLLD_MISC_CLKENABLE (1<<30)
#define PLLD_MISC_DIV_RST (1<<23)
#define PLLD_MISC_DCCON_SHIFT 12
-#define PERIPH_CLK_TO_ENB_REG(c) ((c->clk_num / 32) * 4)
-#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->clk_num / 32) * 8)
-#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->clk_num % 32))
+#define PERIPH_CLK_TO_ENB_REG(c) ((c->u.periph.clk_num / 32) * 4)
+#define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->u.periph.clk_num / 32) * 8)
+#define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->u.periph.clk_num % 32))
#define SUPER_CLK_MUX 0x00
#define SUPER_STATE_SHIFT 28
#define BUS_CLK_DISABLE (1<<3)
#define BUS_CLK_DIV_MASK 0x3
+#define PMC_CTRL 0x0
+ #define PMC_CTRL_BLINK_ENB (1 << 7)
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+ #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20)
+
+#define PMC_BLINK_TIMER_DATA_ON_SHIFT 0
+#define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff
+#define PMC_BLINK_TIMER_ENB (1 << 15)
+#define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16
+#define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff
+
static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
+
+/*
+ * Some peripheral clocks share an enable bit, so refcount the enable bits
+ * in registers CLK_ENABLE_L, CLK_ENABLE_H, and CLK_ENABLE_U
+ */
+static int tegra_periph_clk_enable_refcount[3 * 32];
#define clk_writel(value, reg) \
__raw_writel(value, (u32)reg_clk_base + (reg))
#define clk_readl(reg) \
__raw_readl((u32)reg_clk_base + (reg))
+#define pmc_writel(value, reg) \
+ __raw_writel(value, (u32)reg_pmc_base + (reg))
+#define pmc_readl(reg) \
+ __raw_readl((u32)reg_pmc_base + (reg))
unsigned long clk_measure_input_freq(void)
{
}
}
-static int clk_div71_get_divider(struct clk *c, unsigned long rate)
+static int clk_div71_get_divider(unsigned long parent_rate, unsigned long rate)
{
- unsigned long divider_u71;
+ s64 divider_u71 = parent_rate * 2;
+ divider_u71 += rate - 1;
+ do_div(divider_u71, rate);
- divider_u71 = DIV_ROUND_UP(c->rate * 2, rate);
+ if (divider_u71 - 2 < 0)
+ return 0;
- if (divider_u71 - 2 > 255 || divider_u71 - 2 < 0)
+ if (divider_u71 - 2 > 255)
return -EINVAL;
return divider_u71 - 2;
}
-static unsigned long tegra2_clk_recalculate_rate(struct clk *c)
+static int clk_div16_get_divider(unsigned long parent_rate, unsigned long rate)
{
- unsigned long rate;
- rate = c->parent->rate;
+ s64 divider_u16;
- if (c->mul != 0 && c->div != 0)
- c->rate = rate * c->mul / c->div;
- else
- c->rate = rate;
- return c->rate;
-}
+ divider_u16 = parent_rate;
+ divider_u16 += rate - 1;
+ do_div(divider_u16, rate);
+
+ if (divider_u16 - 1 < 0)
+ return 0;
+ if (divider_u16 - 1 > 255)
+ return -EINVAL;
+
+ return divider_u16 - 1;
+}
/* clk_m functions */
static unsigned long tegra2_clk_m_autodetect_rate(struct clk *c)
.disable = tegra2_clk_m_disable,
};
+void tegra2_periph_reset_assert(struct clk *c)
+{
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, true);
+}
+
+void tegra2_periph_reset_deassert(struct clk *c)
+{
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, false);
+}
+
/* super clock functions */
/* "super clocks" on tegra have two-stage muxes and a clock skipping
* super divider. We will ignore the clock skipping divider, since we
}
BUG_ON(sel->input == NULL);
c->parent = sel->input;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_super_clk_enable(struct clk *c)
u32 val;
const struct clk_mux_sel *sel;
int shift;
+
val = clk_readl(c->reg + SUPER_CLK_MUX);;
BUG_ON(((val & SUPER_STATE_MASK) != SUPER_STATE_RUN) &&
((val & SUPER_STATE_MASK) != SUPER_STATE_IDLE));
SUPER_IDLE_SOURCE_SHIFT : SUPER_RUN_SOURCE_SHIFT;
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
- clk_reparent(c, p);
val &= ~(SUPER_SOURCE_MASK << shift);
val |= sel->value << shift;
+
+ if (c->refcnt)
+ clk_enable(p);
+
clk_writel(val, c->reg);
- c->rate = c->parent->rate;
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
return 0;
}
}
return -EINVAL;
}
+/*
+ * Super clocks have "clock skippers" instead of dividers. Dividing using
+ * a clock skipper does not allow the voltage to be scaled down, so instead
+ * adjust the rate of the parent clock. This requires that the parent of a
+ * super clock have no other children, otherwise the rate will change
+ * underneath the other children.
+ */
+static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
static struct clk_ops tegra_super_ops = {
.init = tegra2_super_clk_init,
.enable = tegra2_super_clk_enable,
.disable = tegra2_super_clk_disable,
.set_parent = tegra2_super_clk_set_parent,
- .recalculate_rate = tegra2_clk_recalculate_rate,
+ .set_rate = tegra2_super_clk_set_rate,
+};
+
+/* virtual cpu clock functions */
+/* some clocks can not be stopped (cpu, memory bus) while the SoC is running.
+ To change the frequency of these clocks, the parent pll may need to be
+ reprogrammed, so the clock must be moved off the pll, the pll reprogrammed,
+ and then the clock moved back to the pll. To hide this sequence, a virtual
+ clock handles it.
+ */
+static void tegra2_cpu_clk_init(struct clk *c)
+{
+}
+
+static int tegra2_cpu_clk_enable(struct clk *c)
+{
+ return 0;
+}
+
+static void tegra2_cpu_clk_disable(struct clk *c)
+{
+ pr_debug("%s on clock %s\n", __func__, c->name);
+
+ /* oops - don't disable the CPU clock! */
+ BUG();
+}
+
+static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ /*
+ * Take an extra reference to the main pll so it doesn't turn
+ * off when we move the cpu off of it
+ */
+ clk_enable(c->u.cpu.main);
+
+ ret = clk_set_parent(c->parent, c->u.cpu.backup);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name);
+ goto out;
+ }
+
+ if (rate == clk_get_rate(c->u.cpu.backup))
+ goto out;
+
+ ret = clk_set_rate(c->u.cpu.main, rate);
+ if (ret) {
+ pr_err("Failed to change cpu pll to %lu\n", rate);
+ goto out;
+ }
+
+ ret = clk_set_parent(c->parent, c->u.cpu.main);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name);
+ goto out;
+ }
+
+out:
+ clk_disable(c->u.cpu.main);
+ return ret;
+}
+
+static struct clk_ops tegra_cpu_ops = {
+ .init = tegra2_cpu_clk_init,
+ .enable = tegra2_cpu_clk_enable,
+ .disable = tegra2_cpu_clk_disable,
+ .set_rate = tegra2_cpu_clk_set_rate,
+};
+
+/* virtual cop clock functions. Used to acquire the fake 'cop' clock to
+ * reset the COP block (i.e. AVP) */
+static void tegra2_cop_clk_reset(struct clk *c, bool assert)
+{
+ unsigned long reg = assert ? RST_DEVICES_SET : RST_DEVICES_CLR;
+
+ pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert");
+ clk_writel(1 << 1, reg);
+}
+
+static struct clk_ops tegra_cop_ops = {
+ .reset = tegra2_cop_clk_reset,
};
/* bus clock functions */
c->state = ((val >> c->reg_shift) & BUS_CLK_DISABLE) ? OFF : ON;
c->div = ((val >> c->reg_shift) & BUS_CLK_DIV_MASK) + 1;
c->mul = 1;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_bus_clk_enable(struct clk *c)
static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val = clk_readl(c->reg);
- unsigned long parent_rate = c->parent->rate;
+ unsigned long parent_rate = clk_get_rate(c->parent);
int i;
for (i = 1; i <= 4; i++) {
if (rate == parent_rate / i) {
.enable = tegra2_bus_clk_enable,
.disable = tegra2_bus_clk_disable,
.set_rate = tegra2_bus_clk_set_rate,
- .recalculate_rate = tegra2_clk_recalculate_rate,
};
-/* PLL Functions */
-static unsigned long tegra2_pll_clk_recalculate_rate(struct clk *c)
-{
- u64 rate;
- rate = c->parent->rate;
- rate *= c->n;
- do_div(rate, c->m);
- if (c->p == 2)
- rate >>= 1;
- c->rate = rate;
- return c->rate;
+/* Blink output functions */
+
+static void tegra2_blink_clk_init(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_CTRL);
+ c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF;
+ c->mul = 1;
+ val = pmc_readl(c->reg);
+
+ if (val & PMC_BLINK_TIMER_ENB) {
+ unsigned int on_off;
+
+ on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) &
+ PMC_BLINK_TIMER_DATA_ON_MASK;
+ val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off += val;
+ /* each tick in the blink timer is 4 32KHz clocks */
+ c->div = on_off * 4;
+ } else {
+ c->div = 1;
+ }
}
-static int tegra2_pll_clk_wait_for_lock(struct clk *c)
+static int tegra2_blink_clk_enable(struct clk *c)
{
- ktime_t before;
+ u32 val;
- before = ktime_get();
- while (!(clk_readl(c->reg + PLL_BASE) & PLL_BASE_LOCK)) {
- if (ktime_us_delta(ktime_get(), before) > 5000) {
- pr_err("Timed out waiting for lock bit on pll %s",
- c->name);
- return -1;
- }
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ return 0;
+}
+
+static void tegra2_blink_clk_disable(struct clk *c)
+{
+ u32 val;
+
+ val = pmc_readl(PMC_CTRL);
+ pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL);
+
+ val = pmc_readl(PMC_DPD_PADS_ORIDE);
+ pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE);
+}
+
+static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ if (rate >= parent_rate) {
+ c->div = 1;
+ pmc_writel(0, c->reg);
+ } else {
+ unsigned int on_off;
+ u32 val;
+
+ on_off = DIV_ROUND_UP(parent_rate / 8, rate);
+ c->div = on_off * 8;
+
+ val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) <<
+ PMC_BLINK_TIMER_DATA_ON_SHIFT;
+ on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK;
+ on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT;
+ val |= on_off;
+ val |= PMC_BLINK_TIMER_ENB;
+ pmc_writel(val, c->reg);
}
return 0;
}
+static struct clk_ops tegra_blink_clk_ops = {
+ .init = &tegra2_blink_clk_init,
+ .enable = &tegra2_blink_clk_enable,
+ .disable = &tegra2_blink_clk_disable,
+ .set_rate = &tegra2_blink_clk_set_rate,
+};
+
+/* PLL Functions */
+static int tegra2_pll_clk_wait_for_lock(struct clk *c)
+{
+ udelay(c->u.pll.lock_delay);
+
+ return 0;
+}
+
static void tegra2_pll_clk_init(struct clk *c)
{
u32 val = clk_readl(c->reg + PLL_BASE);
if (c->flags & PLL_FIXED && !(val & PLL_BASE_OVERRIDE)) {
pr_warning("Clock %s has unknown fixed frequency\n", c->name);
- c->n = 1;
- c->m = 0;
- c->p = 1;
+ c->mul = 1;
+ c->div = 1;
} else if (val & PLL_BASE_BYPASS) {
- c->n = 1;
- c->m = 1;
- c->p = 1;
+ c->mul = 1;
+ c->div = 1;
} else {
- c->n = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT;
- c->m = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT;
- c->p = (val & PLL_BASE_DIVP_MASK) ? 2 : 1;
+ c->mul = (val & PLL_BASE_DIVN_MASK) >> PLL_BASE_DIVN_SHIFT;
+ c->div = (val & PLL_BASE_DIVM_MASK) >> PLL_BASE_DIVM_SHIFT;
+ if (c->flags & PLLU)
+ c->div *= (val & PLLU_BASE_POST_DIV) ? 1 : 2;
+ else
+ c->div *= (val & PLL_BASE_DIVP_MASK) ? 2 : 1;
}
-
- val = clk_readl(c->reg + PLL_MISC(c));
- if (c->flags & PLL_HAS_CPCON)
- c->cpcon = (val & PLL_MISC_CPCON_MASK) >> PLL_MISC_CPCON_SHIFT;
-
- tegra2_pll_clk_recalculate_rate(c);
}
static int tegra2_pll_clk_enable(struct clk *c)
val |= PLL_BASE_ENABLE;
clk_writel(val, c->reg + PLL_BASE);
- val = clk_readl(c->reg + PLL_MISC(c));
- val |= PLL_MISC_LOCK_ENABLE;
- clk_writel(val, c->reg + PLL_MISC(c));
-
tegra2_pll_clk_wait_for_lock(c);
return 0;
{
u32 val;
unsigned long input_rate;
- const struct clk_pll_table *sel;
+ const struct clk_pll_freq_table *sel;
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
- BUG_ON(c->refcnt != 0);
- input_rate = c->parent->rate;
- for (sel = c->pll_table; sel->input_rate != 0; sel++) {
+ input_rate = clk_get_rate(c->parent);
+ for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
if (sel->input_rate == input_rate && sel->output_rate == rate) {
- c->n = sel->n;
- c->m = sel->m;
- c->p = sel->p;
- c->cpcon = sel->cpcon;
+ c->mul = sel->n;
+ c->div = sel->m * sel->p;
val = clk_readl(c->reg + PLL_BASE);
if (c->flags & PLL_FIXED)
val |= PLL_BASE_OVERRIDE;
val &= ~(PLL_BASE_DIVP_MASK | PLL_BASE_DIVN_MASK |
PLL_BASE_DIVM_MASK);
- val |= (c->m << PLL_BASE_DIVM_SHIFT) |
- (c->n << PLL_BASE_DIVN_SHIFT);
- BUG_ON(c->p > 2);
- if (c->p == 2)
- val |= 1 << PLL_BASE_DIVP_SHIFT;
+ val |= (sel->m << PLL_BASE_DIVM_SHIFT) |
+ (sel->n << PLL_BASE_DIVN_SHIFT);
+ BUG_ON(sel->p < 1 || sel->p > 2);
+ if (c->flags & PLLU) {
+ if (sel->p == 1)
+ val |= PLLU_BASE_POST_DIV;
+ } else {
+ if (sel->p == 2)
+ val |= 1 << PLL_BASE_DIVP_SHIFT;
+ }
clk_writel(val, c->reg + PLL_BASE);
if (c->flags & PLL_HAS_CPCON) {
- val = c->cpcon << PLL_MISC_CPCON_SHIFT;
- val |= PLL_MISC_LOCK_ENABLE;
+ val = clk_readl(c->reg + PLL_MISC(c));
+ val &= ~PLL_MISC_CPCON_MASK;
+ val |= sel->cpcon << PLL_MISC_CPCON_SHIFT;
clk_writel(val, c->reg + PLL_MISC(c));
}
if (c->state == ON)
tegra2_pll_clk_enable(c);
- c->rate = rate;
return 0;
}
}
.enable = tegra2_pll_clk_enable,
.disable = tegra2_pll_clk_disable,
.set_rate = tegra2_pll_clk_set_rate,
- .recalculate_rate = tegra2_pll_clk_recalculate_rate,
+};
+
+static void tegra2_pllx_clk_init(struct clk *c)
+{
+ tegra2_pll_clk_init(c);
+
+ if (tegra_sku_id() == 7)
+ c->max_rate = 750000000;
+}
+
+static struct clk_ops tegra_pllx_ops = {
+ .init = tegra2_pllx_clk_init,
+ .enable = tegra2_pll_clk_enable,
+ .disable = tegra2_pll_clk_disable,
+ .set_rate = tegra2_pll_clk_set_rate,
};
/* Clock divider ops */
c->div = 1;
c->mul = 1;
}
-
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_pll_div_clk_enable(struct clk *c)
u32 val;
u32 new_val;
int divider_u71;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent, rate);
+ divider_u71 = clk_div71_get_divider(parent_rate, rate);
if (divider_u71 >= 0) {
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
clk_writel(val, c->reg);
c->div = divider_u71 + 2;
c->mul = 2;
- tegra2_clk_recalculate_rate(c);
return 0;
}
} else if (c->flags & DIV_2) {
- if (c->parent->rate == rate * 2) {
- c->rate = rate;
+ if (parent_rate == rate * 2)
return 0;
- }
}
return -EINVAL;
}
+static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(parent_rate, rate);
+ if (divider < 0)
+ return divider;
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
+ } else if (c->flags & DIV_2) {
+ return DIV_ROUND_UP(parent_rate, 2);
+ }
+ return -EINVAL;
+}
static struct clk_ops tegra_pll_div_ops = {
.init = tegra2_pll_div_clk_init,
.enable = tegra2_pll_div_clk_enable,
.disable = tegra2_pll_div_clk_disable,
.set_rate = tegra2_pll_div_clk_set_rate,
- .recalculate_rate = tegra2_clk_recalculate_rate,
+ .round_rate = tegra2_pll_div_clk_round_rate,
};
/* Periph clk ops */
}
if (c->flags & DIV_U71) {
- u32 divu71 = val & PERIPH_CLK_SOURCE_DIV_MASK;
+ u32 divu71 = val & PERIPH_CLK_SOURCE_DIVU71_MASK;
c->div = divu71 + 2;
c->mul = 2;
+ } else if (c->flags & DIV_U16) {
+ u32 divu16 = val & PERIPH_CLK_SOURCE_DIVU16_MASK;
+ c->div = divu16 + 1;
+ c->mul = 1;
} else {
c->div = 1;
c->mul = 1;
if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c))
c->state = OFF;
- tegra2_clk_recalculate_rate(c);
}
static int tegra2_periph_clk_enable(struct clk *c)
u32 val;
pr_debug("%s on clock %s\n", __func__, c->name);
+ tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++;
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] > 1)
+ return 0;
+
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET))
{
pr_debug("%s on clock %s\n", __func__, c->name);
- clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
-}
+ if (c->refcnt)
+ tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--;
-void tegra2_periph_reset_deassert(struct clk *c)
-{
- pr_debug("%s on clock %s\n", __func__, c->name);
- if (!(c->flags & PERIPH_NO_RESET))
+ if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0)
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- RST_DEVICES_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+ CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
}
-void tegra2_periph_reset_assert(struct clk *c)
+static void tegra2_periph_clk_reset(struct clk *c, bool assert)
{
- pr_debug("%s on clock %s\n", __func__, c->name);
+ unsigned long base = assert ? RST_DEVICES_SET : RST_DEVICES_CLR;
+
+ pr_debug("%s %s on clock %s\n", __func__,
+ assert ? "assert" : "deassert", c->name);
if (!(c->flags & PERIPH_NO_RESET))
clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
- RST_DEVICES_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
+ base + PERIPH_CLK_TO_ENB_SET_REG(c));
}
-
static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
{
u32 val;
pr_debug("%s: %s %s\n", __func__, c->name, p->name);
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
- clk_reparent(c, p);
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_MASK;
val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
+
+ if (c->refcnt)
+ clk_enable(p);
+
clk_writel(val, c->reg);
- c->rate = c->parent->rate;
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
return 0;
}
}
static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
- int divider_u71;
- pr_debug("%s: %lu\n", __func__, rate);
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+
if (c->flags & DIV_U71) {
- divider_u71 = clk_div71_get_divider(c->parent, rate);
- if (divider_u71 >= 0) {
+ divider = clk_div71_get_divider(parent_rate, rate);
+ if (divider >= 0) {
val = clk_readl(c->reg);
- val &= ~PERIPH_CLK_SOURCE_DIV_MASK;
- val |= divider_u71;
+ val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
+ val |= divider;
clk_writel(val, c->reg);
- c->div = divider_u71 + 2;
+ c->div = divider + 2;
c->mul = 2;
- tegra2_clk_recalculate_rate(c);
return 0;
}
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(parent_rate, rate);
+ if (divider >= 0) {
+ val = clk_readl(c->reg);
+ val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
+ val |= divider;
+ clk_writel(val, c->reg);
+ c->div = divider + 1;
+ c->mul = 1;
+ return 0;
+ }
+ } else if (parent_rate <= rate) {
+ c->div = 1;
+ c->mul = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static long tegra2_periph_clk_round_rate(struct clk *c,
+ unsigned long rate)
+{
+ int divider;
+ unsigned long parent_rate = clk_get_rate(c->parent);
+ pr_debug("%s: %s %lu\n", __func__, c->name, rate);
+
+ if (c->flags & DIV_U71) {
+ divider = clk_div71_get_divider(parent_rate, rate);
+ if (divider < 0)
+ return divider;
+
+ return DIV_ROUND_UP(parent_rate * 2, divider + 2);
+ } else if (c->flags & DIV_U16) {
+ divider = clk_div16_get_divider(parent_rate, rate);
+ if (divider < 0)
+ return divider;
+ return DIV_ROUND_UP(parent_rate, divider + 1);
}
return -EINVAL;
}
.disable = &tegra2_periph_clk_disable,
.set_parent = &tegra2_periph_clk_set_parent,
.set_rate = &tegra2_periph_clk_set_rate,
- .recalculate_rate = &tegra2_clk_recalculate_rate,
+ .round_rate = &tegra2_periph_clk_round_rate,
+ .reset = &tegra2_periph_clk_reset,
+};
+
+/* The SDMMC controllers have extra bits in the clock source register that
+ * adjust the delay between the clock and data to compenstate for delays
+ * on the PCB. */
+void tegra2_sdmmc_tap_delay(struct clk *c, int delay) {
+ u32 reg;
+
+ delay = clamp(delay, 0, 15);
+ reg = clk_readl(c->reg);
+ reg &= ~SDMMC_CLK_INT_FB_DLY_MASK;
+ reg |= SDMMC_CLK_INT_FB_SEL;
+ reg |= delay << SDMMC_CLK_INT_FB_DLY_SHIFT;
+ clk_writel(reg, c->reg);
+}
+
+/* External memory controller clock ops */
+static void tegra2_emc_clk_init(struct clk *c)
+{
+ tegra2_periph_clk_init(c);
+ c->max_rate = clk_get_rate_locked(c);
+}
+
+static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ long new_rate = rate;
+
+ new_rate = tegra_emc_round_rate(new_rate);
+ if (new_rate < 0)
+ return c->max_rate;
+
+ BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate));
+
+ return new_rate;
+}
+
+static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ int ret;
+ /* The Tegra2 memory controller has an interlock with the clock
+ * block that allows memory shadowed registers to be updated,
+ * and then transfer them to the main registers at the same
+ * time as the clock update without glitches. */
+ ret = tegra_emc_set_rate(rate);
+ if (ret < 0)
+ return ret;
+
+ ret = tegra2_periph_clk_set_rate(c, rate);
+ udelay(1);
+
+ return ret;
+}
+
+static struct clk_ops tegra_emc_clk_ops = {
+ .init = &tegra2_emc_clk_init,
+ .enable = &tegra2_periph_clk_enable,
+ .disable = &tegra2_periph_clk_disable,
+ .set_parent = &tegra2_periph_clk_set_parent,
+ .set_rate = &tegra2_emc_clk_set_rate,
+ .round_rate = &tegra2_emc_clk_round_rate,
+ .reset = &tegra2_periph_clk_reset,
};
/* Clock doubler ops */
if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
PERIPH_CLK_TO_ENB_BIT(c)))
c->state = OFF;
- tegra2_clk_recalculate_rate(c);
};
+static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate)
+{
+ if (rate != 2 * clk_get_rate(c->parent))
+ return -EINVAL;
+ c->mul = 2;
+ c->div = 1;
+ return 0;
+}
+
static struct clk_ops tegra_clk_double_ops = {
.init = &tegra2_clk_double_init,
.enable = &tegra2_periph_clk_enable,
.disable = &tegra2_periph_clk_disable,
- .recalculate_rate = &tegra2_clk_recalculate_rate,
+ .set_rate = &tegra2_clk_double_set_rate,
+};
+
+/* Audio sync clock ops */
+static void tegra2_audio_sync_clk_init(struct clk *c)
+{
+ int source;
+ const struct clk_mux_sel *sel;
+ u32 val = clk_readl(c->reg);
+ c->state = (val & (1<<4)) ? OFF : ON;
+ source = val & 0xf;
+ for (sel = c->inputs; sel->input != NULL; sel++)
+ if (sel->value == source)
+ break;
+ BUG_ON(sel->input == NULL);
+ c->parent = sel->input;
+}
+
+static int tegra2_audio_sync_clk_enable(struct clk *c)
+{
+ clk_writel(0, c->reg);
+ return 0;
+}
+
+static void tegra2_audio_sync_clk_disable(struct clk *c)
+{
+ clk_writel(1, c->reg);
+}
+
+static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
+{
+ u32 val;
+ const struct clk_mux_sel *sel;
+ for (sel = c->inputs; sel->input != NULL; sel++) {
+ if (sel->input == p) {
+ val = clk_readl(c->reg);
+ val &= ~0xf;
+ val |= sel->value;
+
+ if (c->refcnt)
+ clk_enable(p);
+
+ clk_writel(val, c->reg);
+
+ if (c->refcnt && c->parent)
+ clk_disable(c->parent);
+
+ clk_reparent(c, p);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct clk_ops tegra_audio_sync_clk_ops = {
+ .init = tegra2_audio_sync_clk_init,
+ .enable = tegra2_audio_sync_clk_enable,
+ .disable = tegra2_audio_sync_clk_disable,
+ .set_parent = tegra2_audio_sync_clk_set_parent,
+};
+
+/* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */
+
+static void tegra2_cdev_clk_init(struct clk *c)
+{
+ /* We could un-tristate the cdev1 or cdev2 pingroup here; this is
+ * currently done in the pinmux code. */
+ c->state = ON;
+ if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) &
+ PERIPH_CLK_TO_ENB_BIT(c)))
+ c->state = OFF;
+}
+
+static int tegra2_cdev_clk_enable(struct clk *c)
+{
+ clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
+ CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c));
+ return 0;
+}
+
+static void tegra2_cdev_clk_disable(struct clk *c)
+{
+ clk_writel(PERIPH_CLK_TO_ENB_BIT(c),
+ CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c));
+}
+
+static struct clk_ops tegra_cdev_clk_ops = {
+ .init = &tegra2_cdev_clk_init,
+ .enable = &tegra2_cdev_clk_enable,
+ .disable = &tegra2_cdev_clk_disable,
+};
+
+/* shared bus ops */
+/*
+ * Some clocks may have multiple downstream users that need to request a
+ * higher clock rate. Shared bus clocks provide a unique shared_bus_user
+ * clock to each user. The frequency of the bus is set to the highest
+ * enabled shared_bus_user clock, with a minimum value set by the
+ * shared bus.
+ */
+static void tegra_clk_shared_bus_update(struct clk *bus)
+{
+ struct clk *c;
+ unsigned long rate = bus->min_rate;
+
+ list_for_each_entry(c, &bus->shared_bus_list,
+ u.shared_bus_user.node) {
+ if (c->u.shared_bus_user.enabled)
+ rate = max(c->u.shared_bus_user.rate, rate);
+ }
+
+ if (rate != clk_get_rate(bus))
+ clk_set_rate(bus, rate);
+};
+
+static void tegra_clk_shared_bus_init(struct clk *c)
+{
+ c->max_rate = c->parent->max_rate;
+ c->u.shared_bus_user.rate = c->parent->max_rate;
+ c->state = OFF;
+ c->set = true;
+
+ list_add_tail(&c->u.shared_bus_user.node,
+ &c->parent->shared_bus_list);
+}
+
+static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
+{
+ c->u.shared_bus_user.rate = rate;
+ tegra_clk_shared_bus_update(c->parent);
+ return 0;
+}
+
+static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate)
+{
+ return clk_round_rate(c->parent, rate);
+}
+
+static int tegra_clk_shared_bus_enable(struct clk *c)
+{
+ c->u.shared_bus_user.enabled = true;
+ tegra_clk_shared_bus_update(c->parent);
+ return 0;
+}
+
+static void tegra_clk_shared_bus_disable(struct clk *c)
+{
+ c->u.shared_bus_user.enabled = false;
+ tegra_clk_shared_bus_update(c->parent);
+}
+
+static struct clk_ops tegra_clk_shared_bus_ops = {
+ .init = tegra_clk_shared_bus_init,
+ .enable = tegra_clk_shared_bus_enable,
+ .disable = tegra_clk_shared_bus_disable,
+ .set_rate = tegra_clk_shared_bus_set_rate,
+ .round_rate = tegra_clk_shared_bus_round_rate,
};
+
/* Clock definitions */
static struct clk tegra_clk_32k = {
.name = "clk_32k",
- .rate = 32678,
+ .rate = 32768,
.ops = NULL,
+ .max_rate = 32768,
};
-static struct clk_pll_table tegra_pll_s_table[] = {
+static struct clk_pll_freq_table tegra_pll_s_freq_table[] = {
{32768, 12000000, 366, 1, 1, 0},
{32768, 13000000, 397, 1, 1, 0},
{32768, 19200000, 586, 1, 1, 0},
.name = "pll_s",
.flags = PLL_ALT_MISC_REG,
.ops = &tegra_pll_ops,
- .reg = 0xf0,
- .input_min = 32768,
- .input_max = 32768,
.parent = &tegra_clk_32k,
- .cf_min = 0, /* FIXME */
- .cf_max = 0, /* FIXME */
- .vco_min = 12000000,
- .vco_max = 26000000,
- .pll_table = tegra_pll_s_table,
+ .max_rate = 26000000,
+ .reg = 0xf0,
+ .u.pll = {
+ .input_min = 32768,
+ .input_max = 32768,
+ .cf_min = 0, /* FIXME */
+ .cf_max = 0, /* FIXME */
+ .vco_min = 12000000,
+ .vco_max = 26000000,
+ .freq_table = tegra_pll_s_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk_mux_sel tegra_clk_m_sel[] = {
{ .input = &tegra_pll_s, .value = 1},
{ 0, 0},
};
+
static struct clk tegra_clk_m = {
.name = "clk_m",
.flags = ENABLE_ON_INIT,
.ops = &tegra_clk_m_ops,
.inputs = tegra_clk_m_sel,
.reg = 0x1fc,
- .reg_mask = (1<<28),
.reg_shift = 28,
+ .max_rate = 26000000,
};
-static struct clk_pll_table tegra_pll_c_table[] = {
+static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
{ 0, 0, 0, 0, 0, 0 },
};
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0x80,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_c_table,
+ .max_rate = 600000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_c_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_c_out1 = {
.parent = &tegra_pll_c,
.reg = 0x84,
.reg_shift = 0,
+ .max_rate = 600000000,
};
-static struct clk_pll_table tegra_pll_m_table[] = {
+static struct clk_pll_freq_table tegra_pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
{ 0, 0, 0, 0, 0, 0 },
};
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0x90,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1200000000,
- .pll_table = tegra_pll_m_table,
+ .max_rate = 800000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .freq_table = tegra_pll_m_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_m_out1 = {
.parent = &tegra_pll_m,
.reg = 0x94,
.reg_shift = 0,
+ .max_rate = 600000000,
};
-static struct clk_pll_table tegra_pll_p_table[] = {
+static struct clk_pll_freq_table tegra_pll_p_freq_table[] = {
{ 12000000, 216000000, 432, 12, 2, 8},
{ 13000000, 216000000, 432, 13, 2, 8},
{ 19200000, 216000000, 90, 4, 2, 1},
.flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0xa0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_p_table,
+ .max_rate = 432000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_p_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_p_out1 = {
.parent = &tegra_pll_p,
.reg = 0xa4,
.reg_shift = 0,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out2 = {
.parent = &tegra_pll_p,
.reg = 0xa4,
.reg_shift = 16,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out3 = {
.parent = &tegra_pll_p,
.reg = 0xa8,
.reg_shift = 0,
+ .max_rate = 432000000,
};
static struct clk tegra_pll_p_out4 = {
.parent = &tegra_pll_p,
.reg = 0xa8,
.reg_shift = 16,
+ .max_rate = 432000000,
};
-static struct clk_pll_table tegra_pll_a_table[] = {
+static struct clk_pll_freq_table tegra_pll_a_freq_table[] = {
{ 28800000, 56448000, 49, 25, 1, 1},
{ 28800000, 73728000, 64, 25, 1, 1},
- { 28800000, 11289600, 49, 25, 1, 1},
- { 28800000, 12288000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
{ 0, 0, 0, 0, 0, 0 },
};
.flags = PLL_HAS_CPCON,
.ops = &tegra_pll_ops,
.reg = 0xb0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_pll_p_out1,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1400000000,
- .pll_table = tegra_pll_a_table,
+ .max_rate = 73728000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .freq_table = tegra_pll_a_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_pll_a_out0 = {
.parent = &tegra_pll_a,
.reg = 0xb4,
.reg_shift = 0,
+ .max_rate = 73728000,
};
-static struct clk_pll_table tegra_pll_d_table[] = {
+static struct clk_pll_freq_table tegra_pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 19200000, 216000000, 135, 12, 1, 3},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
{ 19200000, 1000000000, 625, 12, 1, 8},
{ 26000000, 1000000000, 1000, 26, 1, 12},
+
{ 0, 0, 0, 0, 0, 0 },
};
.flags = PLL_HAS_CPCON | PLLD,
.ops = &tegra_pll_ops,
.reg = 0xd0,
- .input_min = 2000000,
- .input_max = 40000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 40000000,
- .vco_max = 1000000000,
- .pll_table = tegra_pll_d_table,
+ .max_rate = 1000000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .freq_table = tegra_pll_d_freq_table,
+ .lock_delay = 1000,
+ },
};
static struct clk tegra_pll_d_out0 = {
.ops = &tegra_pll_div_ops,
.flags = DIV_2 | PLLD,
.parent = &tegra_pll_d,
+ .max_rate = 500000000,
};
-static struct clk_pll_table tegra_pll_u_table[] = {
- { 12000000, 480000000, 960, 12, 1, 0},
- { 13000000, 480000000, 960, 13, 1, 0},
- { 19200000, 480000000, 200, 4, 1, 0},
- { 26000000, 480000000, 960, 26, 1, 0},
+static struct clk_pll_freq_table tegra_pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 0},
+ { 13000000, 480000000, 960, 13, 2, 0},
+ { 19200000, 480000000, 200, 4, 2, 0},
+ { 26000000, 480000000, 960, 26, 2, 0},
{ 0, 0, 0, 0, 0, 0 },
};
static struct clk tegra_pll_u = {
.name = "pll_u",
- .flags = 0,
+ .flags = PLLU,
.ops = &tegra_pll_ops,
.reg = 0xc0,
- .input_min = 2000000,
- .input_max = 40000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 480000000,
- .vco_max = 960000000,
- .pll_table = tegra_pll_u_table,
+ .max_rate = 480000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .freq_table = tegra_pll_u_freq_table,
+ .lock_delay = 1000,
+ },
};
-static struct clk_pll_table tegra_pll_x_table[] = {
+static struct clk_pll_freq_table tegra_pll_x_freq_table[] = {
+ /* 1 GHz */
{ 12000000, 1000000000, 1000, 12, 1, 12},
{ 13000000, 1000000000, 1000, 13, 1, 12},
{ 19200000, 1000000000, 625, 12, 1, 8},
{ 26000000, 1000000000, 1000, 26, 1, 12},
- { 12000000, 750000000, 750, 12, 1, 12},
- { 13000000, 750000000, 750, 13, 1, 12},
- { 19200000, 750000000, 625, 16, 1, 8},
- { 26000000, 750000000, 750, 26, 1, 12},
+
+ /* 912 MHz */
+ { 12000000, 912000000, 912, 12, 1, 12},
+ { 13000000, 912000000, 912, 13, 1, 12},
+ { 19200000, 912000000, 760, 16, 1, 8},
+ { 26000000, 912000000, 912, 26, 1, 12},
+
+ /* 816 MHz */
+ { 12000000, 816000000, 816, 12, 1, 12},
+ { 13000000, 816000000, 816, 13, 1, 12},
+ { 19200000, 816000000, 680, 16, 1, 8},
+ { 26000000, 816000000, 816, 26, 1, 12},
+
+ /* 760 MHz */
+ { 12000000, 760000000, 760, 12, 1, 12},
+ { 13000000, 760000000, 760, 13, 1, 12},
+ { 19200000, 760000000, 950, 24, 1, 8},
+ { 26000000, 760000000, 760, 26, 1, 12},
+
+ /* 608 MHz */
+ { 12000000, 608000000, 608, 12, 1, 12},
+ { 13000000, 608000000, 608, 13, 1, 12},
+ { 19200000, 608000000, 380, 12, 1, 8},
+ { 26000000, 608000000, 608, 26, 1, 12},
+
+ /* 456 MHz */
+ { 12000000, 456000000, 456, 12, 1, 12},
+ { 13000000, 456000000, 456, 13, 1, 12},
+ { 19200000, 456000000, 380, 16, 1, 8},
+ { 26000000, 456000000, 456, 26, 1, 12},
+
+ /* 312 MHz */
+ { 12000000, 312000000, 312, 12, 1, 12},
+ { 13000000, 312000000, 312, 13, 1, 12},
+ { 19200000, 312000000, 260, 16, 1, 8},
+ { 26000000, 312000000, 312, 26, 1, 12},
+
{ 0, 0, 0, 0, 0, 0 },
};
static struct clk tegra_pll_x = {
.name = "pll_x",
.flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG,
- .ops = &tegra_pll_ops,
+ .ops = &tegra_pllx_ops,
.reg = 0xe0,
- .input_min = 2000000,
- .input_max = 31000000,
.parent = &tegra_clk_m,
- .cf_min = 1000000,
- .cf_max = 6000000,
- .vco_min = 20000000,
- .vco_max = 1200000000,
- .pll_table = tegra_pll_x_table,
+ .max_rate = 1000000000,
+ .u.pll = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .freq_table = tegra_pll_x_freq_table,
+ .lock_delay = 300,
+ },
};
static struct clk tegra_clk_d = {
.name = "clk_d",
.flags = PERIPH_NO_RESET,
.ops = &tegra_clk_double_ops,
- .clk_num = 90,
.reg = 0x34,
.reg_shift = 12,
.parent = &tegra_clk_m,
+ .max_rate = 52000000,
+ .u.periph = {
+ .clk_num = 90,
+ },
+};
+
+/* dap_mclk1, belongs to the cdev1 pingroup. */
+static struct clk tegra_dev1_clk = {
+ .name = "clk_dev1",
+ .ops = &tegra_cdev_clk_ops,
+ .rate = 26000000,
+ .max_rate = 26000000,
+ .u.periph = {
+ .clk_num = 94,
+ },
+};
+
+/* dap_mclk2, belongs to the cdev2 pingroup. */
+static struct clk tegra_dev2_clk = {
+ .name = "clk_dev2",
+ .ops = &tegra_cdev_clk_ops,
+ .rate = 26000000,
+ .max_rate = 26000000,
+ .u.periph = {
+ .clk_num = 93,
+ },
+};
+
+/* initialized before peripheral clocks */
+static struct clk_mux_sel mux_audio_sync_clk[8+1];
+static const struct audio_sources {
+ const char *name;
+ int value;
+} mux_audio_sync_clk_sources[] = {
+ { .name = "spdif_in", .value = 0 },
+ { .name = "i2s1", .value = 1 },
+ { .name = "i2s2", .value = 2 },
+ { .name = "pll_a_out0", .value = 4 },
+#if 0 /* FIXME: not implemented */
+ { .name = "ac97", .value = 3 },
+ { .name = "ext_audio_clk2", .value = 5 },
+ { .name = "ext_audio_clk1", .value = 6 },
+ { .name = "ext_vimclk", .value = 7 },
+#endif
+ { 0, 0 }
+};
+
+static struct clk tegra_clk_audio = {
+ .name = "audio",
+ .inputs = mux_audio_sync_clk,
+ .reg = 0x38,
+ .max_rate = 73728000,
+ .ops = &tegra_audio_sync_clk_ops
};
-/* FIXME: need tegra_audio
static struct clk tegra_clk_audio_2x = {
- .name = "clk_d",
+ .name = "audio_2x",
.flags = PERIPH_NO_RESET,
+ .max_rate = 48000000,
.ops = &tegra_clk_double_ops,
- .clk_num = 89,
.reg = 0x34,
.reg_shift = 8,
- .parent = &tegra_audio,
+ .parent = &tegra_clk_audio,
+ .u.periph = {
+ .clk_num = 89,
+ },
+};
+
+struct clk_lookup tegra_audio_clk_lookups[] = {
+ { .con_id = "audio", .clk = &tegra_clk_audio },
+ { .con_id = "audio_2x", .clk = &tegra_clk_audio_2x }
+};
+
+/* This is called after peripheral clocks are initialized, as the
+ * audio_sync clock depends on some of the peripheral clocks.
+ */
+
+static void init_audio_sync_clock_mux(void)
+{
+ int i;
+ struct clk_mux_sel *sel = mux_audio_sync_clk;
+ const struct audio_sources *src = mux_audio_sync_clk_sources;
+ struct clk_lookup *lookup;
+
+ for (i = 0; src->name; i++, sel++, src++) {
+ sel->input = tegra_get_clock_by_name(src->name);
+ if (!sel->input)
+ pr_err("%s: could not find clk %s\n", __func__,
+ src->name);
+ sel->value = src->value;
+ }
+
+ lookup = tegra_audio_clk_lookups;
+ for (i = 0; i < ARRAY_SIZE(tegra_audio_clk_lookups); i++, lookup++) {
+ clk_init(lookup->clk);
+ clkdev_add(lookup);
+ }
}
-*/
static struct clk_mux_sel mux_cclk[] = {
{ .input = &tegra_clk_m, .value = 0},
{ 0, 0},
};
-static struct clk tegra_clk_cpu = {
- .name = "cpu",
+static struct clk tegra_clk_cclk = {
+ .name = "cclk",
.inputs = mux_cclk,
.reg = 0x20,
.ops = &tegra_super_ops,
+ .max_rate = 1000000000,
};
-static struct clk tegra_clk_sys = {
- .name = "sys",
+static struct clk tegra_clk_sclk = {
+ .name = "sclk",
.inputs = mux_sclk,
.reg = 0x28,
.ops = &tegra_super_ops,
+ .max_rate = 240000000,
+ .min_rate = 120000000,
+};
+
+static struct clk tegra_clk_virtual_cpu = {
+ .name = "cpu",
+ .parent = &tegra_clk_cclk,
+ .ops = &tegra_cpu_ops,
+ .max_rate = 1000000000,
+ .u.cpu = {
+ .main = &tegra_pll_x,
+ .backup = &tegra_pll_p,
+ },
+};
+
+static struct clk tegra_clk_cop = {
+ .name = "cop",
+ .parent = &tegra_clk_sclk,
+ .ops = &tegra_cop_ops,
+ .max_rate = 240000000,
};
static struct clk tegra_clk_hclk = {
.name = "hclk",
.flags = DIV_BUS,
- .parent = &tegra_clk_sys,
+ .parent = &tegra_clk_sclk,
.reg = 0x30,
.reg_shift = 4,
.ops = &tegra_bus_ops,
+ .max_rate = 240000000,
};
static struct clk tegra_clk_pclk = {
.reg = 0x30,
.reg_shift = 0,
.ops = &tegra_bus_ops,
+ .max_rate = 120000000,
+};
+
+static struct clk tegra_clk_blink = {
+ .name = "blink",
+ .parent = &tegra_clk_32k,
+ .reg = 0x40,
+ .ops = &tegra_blink_clk_ops,
+ .max_rate = 32768,
};
static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = {
{ 0, 0},
};
-static struct clk_mux_sel mux_plla_audio_pllp_clkm[] = {
- {.input = &tegra_pll_a, .value = 0},
- /* FIXME: no mux defined for tegra_audio
- {.input = &tegra_audio, .value = 1},*/
+static struct clk_mux_sel mux_pllaout0_audio2x_pllp_clkm[] = {
+ {.input = &tegra_pll_a_out0, .value = 0},
+ {.input = &tegra_clk_audio_2x, .value = 1},
{.input = &tegra_pll_p, .value = 2},
{.input = &tegra_clk_m, .value = 3},
{ 0, 0},
static struct clk_mux_sel mux_pllp_pllc_audio_clkm_clk32[] = {
{.input = &tegra_pll_p, .value = 0},
{.input = &tegra_pll_c, .value = 1},
- /* FIXME: no mux defined for tegra_audio
- {.input = &tegra_audio, .value = 2},*/
+ {.input = &tegra_clk_audio, .value = 2},
{.input = &tegra_clk_m, .value = 3},
{.input = &tegra_clk_32k, .value = 4},
{ 0, 0},
{ 0, 0},
};
-#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _inputs, _flags) \
+static struct clk tegra_clk_emc = {
+ .name = "emc",
+ .ops = &tegra_emc_clk_ops,
+ .reg = 0x19c,
+ .max_rate = 800000000,
+ .inputs = mux_pllm_pllc_pllp_clkm,
+ .flags = MUX | DIV_U71 | PERIPH_EMC_ENB,
+ .u.periph = {
+ .clk_num = 57,
+ },
+};
+
+#define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \
{ \
.name = _name, \
.lookup = { \
.con_id = _con, \
}, \
.ops = &tegra_periph_clk_ops, \
- .clk_num = _clk_num, \
.reg = _reg, \
.inputs = _inputs, \
.flags = _flags, \
+ .max_rate = _max, \
+ .u.periph = { \
+ .clk_num = _clk_num, \
+ }, \
}
-struct clk tegra_periph_clks[] = {
- PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, mux_clk_32k, PERIPH_NO_RESET),
- PERIPH_CLK("timer", "timer", NULL, 5, 0, mux_clk_m, 0),
- PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
- /* FIXME: spdif has 2 clocks but 1 enable */
- PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, mux_plla_audio_pllp_clkm, MUX | DIV_U71),
- PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, mux_pllp_pllc_pllm, MUX | DIV_U71),
- PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
- PERIPH_CLK("spi", "spi", NULL, 43, 0x114, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("xio", "xio", NULL, 45, 0x120, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("ide", "ide", NULL, 25, 0x144, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- /* FIXME: vfir shares an enable with uartb */
- PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+#define SHARED_CLK(_name, _dev, _con, _parent) \
+ { \
+ .name = _name, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ .ops = &tegra_clk_shared_bus_ops, \
+ .parent = _parent, \
+ }
+
+struct clk tegra_list_clks[] = {
+ PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET),
+ PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("kfuse", "kfuse-tegra", NULL, 40, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
+ PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71),
+ PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
+ PERIPH_CLK("spi", "spi", NULL, 43, 0x114, 40000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("xio", "xio", NULL, 45, 0x120, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc1", "spi_tegra.0", NULL, 41, 0x134, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc2", "spi_tegra.1", NULL, 44, 0x118, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc3", "spi_tegra.2", NULL, 46, 0x11c, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0),
+ PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */
/* FIXME: what is la? */
- PERIPH_CLK("la", "la", NULL, 76, 0x1f8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("owr", "owr", NULL, 71, 0x1cc, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("i2c1_i2c", "tegra-i2c.0", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, mux_pllp_out3, 0),
- PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
- PERIPH_CLK("3d", "3d", NULL, 24, 0x158, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET),
- PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- /* FIXME: vi and vi_sensor share an enable */
- PERIPH_CLK("vi", "vi", NULL, 20, 0x148, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, mux_pllm_pllc_pllp_plla, MUX | DIV_U71),
- /* FIXME: cve and tvo share an enable */
- PERIPH_CLK("cve", "cve", NULL, 49, 0x140, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, mux_pllp_plld_pllc_clkm, MUX | DIV_U71),
- PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, mux_clk_m, 0),
- PERIPH_CLK("usb2", "usb.1", NULL, 58, 0, mux_clk_m, 0),
- PERIPH_CLK("usb3", "usb.2", NULL, 59, 0, mux_clk_m, 0),
- PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB),
- PERIPH_CLK("dsi", "dsi", NULL, 48, 0, mux_plld, 0),
+ PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("owr", "tegra_w1", NULL, 71, 0x1cc, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
+ PERIPH_CLK("nor", "nor", NULL, 42, 0x1d0, 92000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("mipi", "mipi", NULL, 50, 0x174, 60000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */
+ PERIPH_CLK("i2c1", "tegra-i2c.0", NULL, 12, 0x124, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c2", "tegra-i2c.1", NULL, 54, 0x198, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("dvc", "tegra-i2c.3", NULL, 47, 0x128, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U16),
+ PERIPH_CLK("i2c1_i2c", "tegra-i2c.0", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */
+ PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */
+ PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */
+ PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */
+ PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */
+ PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0),
+ PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */
+ PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET),
+
+ SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk),
+ SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc),
+ SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc),
+ SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc),
+ SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc),
+ SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc),
+ SHARED_CLK("host.emc", "tegra_grhost", "emc", &tegra_clk_emc),
+ SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc),
+ SHARED_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc),
};
#define CLK_DUPLICATE(_name, _dev, _con) \
CLK_DUPLICATE("uartc", "tegra_uart.2", NULL),
CLK_DUPLICATE("uartd", "tegra_uart.3", NULL),
CLK_DUPLICATE("uarte", "tegra_uart.4", NULL),
+ CLK_DUPLICATE("usbd", "utmip-pad", NULL),
+ CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
+ CLK_DUPLICATE("usbd", "tegra-otg", NULL),
+ CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"),
+ CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
+ CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
+ CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
+ CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"),
+ CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"),
+ CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"),
+ CLK_DUPLICATE("epp", "tegra_grhost", "epp"),
+ CLK_DUPLICATE("mpe", "tegra_grhost", "mpe"),
+ CLK_DUPLICATE("cop", "tegra-avp", "cop"),
+ CLK_DUPLICATE("vde", "tegra-aes", "vde"),
};
#define CLK(dev, con, ck) \
.clk = ck, \
}
-struct clk_lookup tegra_clk_lookups[] = {
- /* external root sources */
- CLK(NULL, "32k_clk", &tegra_clk_32k),
- CLK(NULL, "pll_s", &tegra_pll_s),
- CLK(NULL, "clk_m", &tegra_clk_m),
- CLK(NULL, "pll_m", &tegra_pll_m),
- CLK(NULL, "pll_m_out1", &tegra_pll_m_out1),
- CLK(NULL, "pll_c", &tegra_pll_c),
- CLK(NULL, "pll_c_out1", &tegra_pll_c_out1),
- CLK(NULL, "pll_p", &tegra_pll_p),
- CLK(NULL, "pll_p_out1", &tegra_pll_p_out1),
- CLK(NULL, "pll_p_out2", &tegra_pll_p_out2),
- CLK(NULL, "pll_p_out3", &tegra_pll_p_out3),
- CLK(NULL, "pll_p_out4", &tegra_pll_p_out4),
- CLK(NULL, "pll_a", &tegra_pll_a),
- CLK(NULL, "pll_a_out0", &tegra_pll_a_out0),
- CLK(NULL, "pll_d", &tegra_pll_d),
- CLK(NULL, "pll_d_out0", &tegra_pll_d_out0),
- CLK(NULL, "pll_u", &tegra_pll_u),
- CLK(NULL, "pll_x", &tegra_pll_x),
- CLK(NULL, "cpu", &tegra_clk_cpu),
- CLK(NULL, "sys", &tegra_clk_sys),
- CLK(NULL, "hclk", &tegra_clk_hclk),
- CLK(NULL, "pclk", &tegra_clk_pclk),
- CLK(NULL, "clk_d", &tegra_clk_d),
+struct clk *tegra_ptr_clks[] = {
+ &tegra_clk_32k,
+ &tegra_pll_s,
+ &tegra_clk_m,
+ &tegra_pll_m,
+ &tegra_pll_m_out1,
+ &tegra_pll_c,
+ &tegra_pll_c_out1,
+ &tegra_pll_p,
+ &tegra_pll_p_out1,
+ &tegra_pll_p_out2,
+ &tegra_pll_p_out3,
+ &tegra_pll_p_out4,
+ &tegra_pll_a,
+ &tegra_pll_a_out0,
+ &tegra_pll_d,
+ &tegra_pll_d_out0,
+ &tegra_pll_u,
+ &tegra_pll_x,
+ &tegra_clk_cclk,
+ &tegra_clk_sclk,
+ &tegra_clk_hclk,
+ &tegra_clk_pclk,
+ &tegra_clk_d,
+ &tegra_dev1_clk,
+ &tegra_dev2_clk,
+ &tegra_clk_virtual_cpu,
+ &tegra_clk_blink,
+ &tegra_clk_cop,
+ &tegra_clk_emc,
};
+static void tegra2_init_one_clock(struct clk *c)
+{
+ clk_init(c);
+ INIT_LIST_HEAD(&c->shared_bus_list);
+ if (!c->lookup.dev_id && !c->lookup.con_id)
+ c->lookup.con_id = c->name;
+ c->lookup.clk = c;
+ clkdev_add(&c->lookup);
+}
+
void __init tegra2_init_clocks(void)
{
int i;
- struct clk_lookup *cl;
struct clk *c;
- struct clk_duplicate *cd;
- for (i = 0; i < ARRAY_SIZE(tegra_clk_lookups); i++) {
- cl = &tegra_clk_lookups[i];
- clk_init(cl->clk);
- clkdev_add(cl);
- }
+ for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++)
+ tegra2_init_one_clock(tegra_ptr_clks[i]);
- for (i = 0; i < ARRAY_SIZE(tegra_periph_clks); i++) {
- c = &tegra_periph_clks[i];
- cl = &c->lookup;
- cl->clk = c;
-
- clk_init(cl->clk);
- clkdev_add(cl);
- }
+ for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++)
+ tegra2_init_one_clock(&tegra_list_clks[i]);
for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) {
- cd = &tegra_clk_duplicates[i];
- c = tegra_get_clock_by_name(cd->name);
- if (c) {
- cl = &cd->lookup;
- cl->clk = c;
- clkdev_add(cl);
- } else {
+ c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name);
+ if (!c) {
pr_err("%s: Unknown duplicate clock %s\n", __func__,
- cd->name);
+ tegra_clk_duplicates[i].name);
+ continue;
}
+
+ tegra_clk_duplicates[i].lookup.clk = c;
+ clkdev_add(&tegra_clk_duplicates[i].lookup);
+ }
+
+ init_audio_sync_clock_mux();
+}
+
+#ifdef CONFIG_PM
+static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM +
+ PERIPH_CLK_SOURCE_NUM + 22];
+
+void tegra_clk_suspend(void)
+{
+ unsigned long off, i;
+ u32 *ctx = clk_rst_suspend;
+
+ *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK;
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+ *ctx++ = clk_readl(tegra_pll_s.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_s.reg + PLL_MISC(&tegra_pll_s));
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+ *ctx++ = clk_readl(tegra_pll_u.reg + PLL_BASE);
+ *ctx++ = clk_readl(tegra_pll_u.reg + PLL_MISC(&tegra_pll_u));
+
+ *ctx++ = clk_readl(tegra_pll_m_out1.reg);
+ *ctx++ = clk_readl(tegra_pll_a_out0.reg);
+ *ctx++ = clk_readl(tegra_pll_c_out1.reg);
+
+ *ctx++ = clk_readl(tegra_clk_cclk.reg);
+ *ctx++ = clk_readl(tegra_clk_cclk.reg + SUPER_CLK_DIVIDER);
+
+ *ctx++ = clk_readl(tegra_clk_sclk.reg);
+ *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ *ctx++ = clk_readl(tegra_clk_pclk.reg);
+
+ *ctx++ = clk_readl(tegra_clk_audio.reg);
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ *ctx++ = clk_readl(off);
+ }
+
+ off = RST_DEVICES;
+ for (i = 0; i < RST_DEVICES_NUM; i++, off += 4)
+ *ctx++ = clk_readl(off);
+
+ off = CLK_OUT_ENB;
+ for (i = 0; i < CLK_OUT_ENB_NUM; i++, off += 4)
+ *ctx++ = clk_readl(off);
+
+ *ctx++ = clk_readl(MISC_CLK_ENB);
+ *ctx++ = clk_readl(CLK_MASK_ARM);
+
+ BUG_ON(ctx - clk_rst_suspend != ARRAY_SIZE(clk_rst_suspend));
+}
+
+void tegra_clk_resume(void)
+{
+ unsigned long off, i;
+ const u32 *ctx = clk_rst_suspend;
+ u32 val;
+
+ val = clk_readl(OSC_CTRL) & ~OSC_CTRL_MASK;
+ val |= *ctx++;
+ clk_writel(val, OSC_CTRL);
+
+ clk_writel(*ctx++, tegra_pll_c.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_c.reg + PLL_MISC(&tegra_pll_c));
+ clk_writel(*ctx++, tegra_pll_a.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a));
+ clk_writel(*ctx++, tegra_pll_s.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_s.reg + PLL_MISC(&tegra_pll_s));
+ clk_writel(*ctx++, tegra_pll_d.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d));
+ clk_writel(*ctx++, tegra_pll_u.reg + PLL_BASE);
+ clk_writel(*ctx++, tegra_pll_u.reg + PLL_MISC(&tegra_pll_u));
+ udelay(1000);
+
+ clk_writel(*ctx++, tegra_pll_m_out1.reg);
+ clk_writel(*ctx++, tegra_pll_a_out0.reg);
+ clk_writel(*ctx++, tegra_pll_c_out1.reg);
+
+ clk_writel(*ctx++, tegra_clk_cclk.reg);
+ clk_writel(*ctx++, tegra_clk_cclk.reg + SUPER_CLK_DIVIDER);
+
+ clk_writel(*ctx++, tegra_clk_sclk.reg);
+ clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER);
+ clk_writel(*ctx++, tegra_clk_pclk.reg);
+
+ clk_writel(*ctx++, tegra_clk_audio.reg);
+
+ /* enable all clocks before configuring clock sources */
+ clk_writel(0xbffffff9ul, CLK_OUT_ENB);
+ clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4);
+ clk_writel(0x77f01bfful, CLK_OUT_ENB + 8);
+ wmb();
+
+ for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC;
+ off += 4) {
+ if (off == PERIPH_CLK_SOURCE_EMC)
+ continue;
+ clk_writel(*ctx++, off);
}
+ wmb();
+
+ off = RST_DEVICES;
+ for (i = 0; i < RST_DEVICES_NUM; i++, off += 4)
+ clk_writel(*ctx++, off);
+ wmb();
+
+ off = CLK_OUT_ENB;
+ for (i = 0; i < CLK_OUT_ENB_NUM; i++, off += 4)
+ clk_writel(*ctx++, off);
+ wmb();
+
+ clk_writel(*ctx++, MISC_CLK_ENB);
+ clk_writel(*ctx++, CLK_MASK_ARM);
}
+#endif
--- /dev/null
+/*
+ * arch/arm/mach-tegra/tegra2_dvfs.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+#include "clock.h"
+#include "dvfs.h"
+#include "fuse.h"
+
+#ifdef CONFIG_TEGRA_CORE_DVFS
+static bool tegra_dvfs_core_disabled;
+#else
+static bool tegra_dvfs_core_disabled = true;
+#endif
+#ifdef CONFIG_TEGRA_CPU_DVFS
+static bool tegra_dvfs_cpu_disabled;
+#else
+static bool tegra_dvfs_cpu_disabled = true;
+#endif
+
+static const int core_millivolts[MAX_DVFS_FREQS] =
+ {950, 1000, 1100, 1200, 1275};
+static const int cpu_millivolts[MAX_DVFS_FREQS] =
+ {750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100};
+
+#define KHZ 1000
+#define MHZ 1000000
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_cpu = {
+ .reg_id = "vdd_cpu",
+ .max_millivolts = 1100,
+ .min_millivolts = 750,
+ .nominal_millivolts = 1100,
+};
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_core = {
+ .reg_id = "vdd_core",
+ .max_millivolts = 1275,
+ .min_millivolts = 950,
+ .nominal_millivolts = 1200,
+ .step = 150, /* step vdd_core by 150 mV to allow vdd_aon to follow */
+};
+
+static struct dvfs_rail tegra2_dvfs_rail_vdd_aon = {
+ .reg_id = "vdd_aon",
+ .max_millivolts = 1275,
+ .min_millivolts = 950,
+ .nominal_millivolts = 1200,
+#ifndef CONFIG_TEGRA_CORE_DVFS
+ .disabled = true,
+#endif
+};
+
+/* vdd_core and vdd_aon must be 50 mV higher than vdd_cpu */
+static int tegra2_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu,
+ struct dvfs_rail *vdd_core)
+{
+ if (vdd_cpu->new_millivolts > vdd_cpu->millivolts &&
+ vdd_core->new_millivolts < vdd_cpu->new_millivolts + 50)
+ return vdd_cpu->new_millivolts + 50;
+
+ if (vdd_core->new_millivolts < vdd_cpu->millivolts + 50)
+ return vdd_cpu->millivolts + 50;
+
+ return vdd_core->new_millivolts;
+}
+
+/* vdd_aon must be within 170 mV of vdd_core */
+static int tegra2_dvfs_rel_vdd_core_vdd_aon(struct dvfs_rail *vdd_core,
+ struct dvfs_rail *vdd_aon)
+{
+ BUG_ON(abs(vdd_aon->millivolts - vdd_core->millivolts) >
+ vdd_aon->step);
+ return vdd_core->millivolts;
+}
+
+static struct dvfs_relationship tegra2_dvfs_relationships[] = {
+ {
+ /* vdd_core must be 50 mV higher than vdd_cpu */
+ .from = &tegra2_dvfs_rail_vdd_cpu,
+ .to = &tegra2_dvfs_rail_vdd_core,
+ .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
+ },
+ {
+ /* vdd_aon must be 50 mV higher than vdd_cpu */
+ .from = &tegra2_dvfs_rail_vdd_cpu,
+ .to = &tegra2_dvfs_rail_vdd_aon,
+ .solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
+ },
+ {
+ /* vdd_aon must be within 170 mV of vdd_core */
+ .from = &tegra2_dvfs_rail_vdd_core,
+ .to = &tegra2_dvfs_rail_vdd_aon,
+ .solve = tegra2_dvfs_rel_vdd_core_vdd_aon,
+ },
+};
+
+static struct dvfs_rail *tegra2_dvfs_rails[] = {
+ &tegra2_dvfs_rail_vdd_cpu,
+ &tegra2_dvfs_rail_vdd_core,
+ &tegra2_dvfs_rail_vdd_aon,
+};
+
+#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .cpu_process_id = _process_id, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = cpu_millivolts, \
+ .auto_dvfs = true, \
+ .dvfs_rail = &tegra2_dvfs_rail_vdd_cpu, \
+ }
+
+#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \
+ { \
+ .clk_name = _clk_name, \
+ .cpu_process_id = -1, \
+ .freqs = {_freqs}, \
+ .freqs_mult = _mult, \
+ .millivolts = core_millivolts, \
+ .auto_dvfs = _auto, \
+ .dvfs_rail = &tegra2_dvfs_rail_vdd_core, \
+ }
+
+static struct dvfs dvfs_init[] = {
+ /* Cpu voltages (mV): 750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100 */
+ CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000),
+ CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000),
+ CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000),
+ CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000),
+
+ /* Core voltages (mV): 950, 1000, 1100, 1200, 1275 */
+ CORE_DVFS("emc", 1, KHZ, 57000, 333000, 333000, 666000, 666000),
+
+#if 0
+ /*
+ * The sdhci core calls the clock ops with a spinlock held, which
+ * conflicts with the sleeping dvfs api.
+ * For now, boards must ensure that the core voltage does not drop
+ * below 1V, or that the sdmmc busses are set to 44 MHz or less.
+ */
+ CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
+ CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
+#endif
+
+ CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000),
+ CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000),
+ CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000),
+ CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000),
+ CORE_DVFS("usbd", 1, KHZ, 0, 0, 0, 480000, 480000),
+ CORE_DVFS("usb2", 1, KHZ, 0, 0, 0, 480000, 480000),
+ CORE_DVFS("usb3", 1, KHZ, 0, 0, 0, 480000, 480000),
+ CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000),
+ CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000),
+ CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000),
+
+ /*
+ * The clock rate for the display controllers that determines the
+ * necessary core voltage depends on a divider that is internal
+ * to the display block. Disable auto-dvfs on the display clocks,
+ * and let the display driver call tegra_dvfs_set_rate manually
+ */
+ CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
+ CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
+ CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500),
+
+ /*
+ * These clocks technically depend on the core process id,
+ * but just use the worst case value for now
+ */
+ CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000),
+ CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
+ CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
+ CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000),
+ CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000),
+ CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000),
+ CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000),
+ CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000),
+ /* What is this? */
+ CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067),
+};
+
+int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
+ else
+ tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_core);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(arg, kp);
+ if (ret)
+ return ret;
+
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
+ else
+ tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_cpu);
+
+ return 0;
+}
+
+int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops tegra_dvfs_disable_core_ops = {
+ .set = tegra_dvfs_disable_core_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = {
+ .set = tegra_dvfs_disable_cpu_set,
+ .get = tegra_dvfs_disable_get,
+};
+
+module_param_cb(disable_core, &tegra_dvfs_disable_core_ops,
+ &tegra_dvfs_core_disabled, 0644);
+module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops,
+ &tegra_dvfs_cpu_disabled, 0644);
+
+void __init tegra2_init_dvfs(void)
+{
+ int i;
+ struct clk *c;
+ struct dvfs *d;
+ int ret;
+ int cpu_process_id = tegra_cpu_process_id();
+
+ tegra_dvfs_init_rails(tegra2_dvfs_rails, ARRAY_SIZE(tegra2_dvfs_rails));
+ tegra_dvfs_add_relationships(tegra2_dvfs_relationships,
+ ARRAY_SIZE(tegra2_dvfs_relationships));
+ /*
+ * VDD_CORE must always be at least 50 mV higher than VDD_CPU
+ * Fill out cpu_core_millivolts based on cpu_millivolts
+ */
+ for (i = 0; i < ARRAY_SIZE(dvfs_init); i++) {
+ d = &dvfs_init[i];
+
+ if (d->cpu_process_id != -1 &&
+ d->cpu_process_id != cpu_process_id)
+ continue;
+
+ c = tegra_get_clock_by_name(d->clk_name);
+
+ if (!c) {
+ pr_debug("tegra_dvfs: no clock found for %s\n",
+ d->clk_name);
+ continue;
+ }
+
+ ret = tegra_enable_dvfs_on_clk(c, d);
+ if (ret)
+ pr_err("tegra_dvfs: failed to enable dvfs on %s\n",
+ c->name);
+ }
+
+ if (tegra_dvfs_core_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
+
+ if (tegra_dvfs_cpu_disabled)
+ tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
+}
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <mach/iomap.h>
+
+#include "tegra2_emc.h"
+
+#define TEGRA_MRR_DIVLD (1<<20)
+#define TEGRA_EMC_STATUS 0x02b4
+#define TEGRA_EMC_MRR 0x00ec
+static DEFINE_MUTEX(tegra_emc_mrr_lock);
+
+#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
+static bool emc_enable = true;
+#else
+static bool emc_enable;
+#endif
+module_param(emc_enable, bool, 0644);
+
+static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
+static const struct tegra_emc_table *tegra_emc_table;
+static int tegra_emc_table_size;
+
+static inline void emc_writel(u32 val, unsigned long addr)
+{
+ writel(val, emc + addr);
+}
+
+static inline u32 emc_readl(unsigned long addr)
+{
+ return readl(emc + addr);
+}
+
+/* read LPDDR2 memory modes */
+static int tegra_emc_read_mrr(unsigned long addr)
+{
+ u32 value;
+ int count = 100;
+
+ mutex_lock(&tegra_emc_mrr_lock);
+ do {
+ emc_readl(TEGRA_EMC_MRR);
+ } while (--count && (emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD));
+ if (count == 0) {
+ pr_err("%s: Failed to read memory type\n", __func__);
+ BUG();
+ }
+ value = (1 << 30) | (addr << 16);
+ emc_writel(value, TEGRA_EMC_MRR);
+
+ count = 100;
+ while (--count && !(emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD));
+ if (count == 0) {
+ pr_err("%s: Failed to read memory type\n", __func__);
+ BUG();
+ }
+ value = emc_readl(TEGRA_EMC_MRR) & 0xFFFF;
+ mutex_unlock(&tegra_emc_mrr_lock);
+
+ return value;
+}
+
+static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = {
+ 0x2c, /* RC */
+ 0x30, /* RFC */
+ 0x34, /* RAS */
+ 0x38, /* RP */
+ 0x3c, /* R2W */
+ 0x40, /* W2R */
+ 0x44, /* R2P */
+ 0x48, /* W2P */
+ 0x4c, /* RD_RCD */
+ 0x50, /* WR_RCD */
+ 0x54, /* RRD */
+ 0x58, /* REXT */
+ 0x5c, /* WDV */
+ 0x60, /* QUSE */
+ 0x64, /* QRST */
+ 0x68, /* QSAFE */
+ 0x6c, /* RDV */
+ 0x70, /* REFRESH */
+ 0x74, /* BURST_REFRESH_NUM */
+ 0x78, /* PDEX2WR */
+ 0x7c, /* PDEX2RD */
+ 0x80, /* PCHG2PDEN */
+ 0x84, /* ACT2PDEN */
+ 0x88, /* AR2PDEN */
+ 0x8c, /* RW2PDEN */
+ 0x90, /* TXSR */
+ 0x94, /* TCKE */
+ 0x98, /* TFAW */
+ 0x9c, /* TRPAB */
+ 0xa0, /* TCLKSTABLE */
+ 0xa4, /* TCLKSTOP */
+ 0xa8, /* TREFBW */
+ 0xac, /* QUSE_EXTRA */
+ 0x114, /* FBIO_CFG6 */
+ 0xb0, /* ODT_WRITE */
+ 0xb4, /* ODT_READ */
+ 0x104, /* FBIO_CFG5 */
+ 0x2bc, /* CFG_DIG_DLL */
+ 0x2c0, /* DLL_XFORM_DQS */
+ 0x2c4, /* DLL_XFORM_QUSE */
+ 0x2e0, /* ZCAL_REF_CNT */
+ 0x2e4, /* ZCAL_WAIT_CNT */
+ 0x2a8, /* AUTO_CAL_INTERVAL */
+ 0x2d0, /* CFG_CLKTRIM_0 */
+ 0x2d4, /* CFG_CLKTRIM_1 */
+ 0x2d8, /* CFG_CLKTRIM_2 */
+};
+
+/* Select the closest EMC rate that is higher than the requested rate */
+long tegra_emc_round_rate(unsigned long rate)
+{
+ int i;
+ int best = -1;
+ unsigned long distance = ULONG_MAX;
+
+ if (!tegra_emc_table)
+ return -EINVAL;
+
+ if (!emc_enable)
+ return -EINVAL;
+
+ pr_debug("%s: %lu\n", __func__, rate);
+
+ /* The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz */
+ rate = rate / 2 / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++) {
+ if (tegra_emc_table[i].rate >= rate &&
+ (tegra_emc_table[i].rate - rate) < distance) {
+ distance = tegra_emc_table[i].rate - rate;
+ best = i;
+ }
+ }
+
+ if (best < 0)
+ return -EINVAL;
+
+ pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
+
+ return tegra_emc_table[best].rate * 2 * 1000;
+}
+
+/* The EMC registers have shadow registers. When the EMC clock is updated
+ * in the clock controller, the shadow registers are copied to the active
+ * registers, allowing glitchless memory bus frequency changes.
+ * This function updates the shadow registers for a new clock frequency,
+ * and relies on the clock lock on the emc clock to avoid races between
+ * multiple frequency changes */
+int tegra_emc_set_rate(unsigned long rate)
+{
+ int i;
+ int j;
+
+ if (!tegra_emc_table)
+ return -EINVAL;
+
+ /* The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz */
+ rate = rate / 2 / 1000;
+
+ for (i = 0; i < tegra_emc_table_size; i++)
+ if (tegra_emc_table[i].rate == rate)
+ break;
+
+ if (i >= tegra_emc_table_size)
+ return -EINVAL;
+
+ pr_debug("%s: setting to %lu\n", __func__, rate);
+
+ for (j = 0; j < TEGRA_EMC_NUM_REGS; j++)
+ emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]);
+
+ emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]);
+
+ return 0;
+}
+
+void tegra_init_emc(const struct tegra_emc_chip *chips, int chips_size)
+{
+ int i;
+ int vid;
+ int rev_id1;
+ int rev_id2;
+ int pid;
+ int chip_matched = -1;
+
+ vid = tegra_emc_read_mrr(5);
+ rev_id1 = tegra_emc_read_mrr(6);
+ rev_id2 = tegra_emc_read_mrr(7);
+ pid = tegra_emc_read_mrr(8);
+
+ for (i = 0; i < chips_size; i++) {
+ if (chips[i].mem_manufacturer_id >= 0) {
+ if (chips[i].mem_manufacturer_id != vid)
+ continue;
+ }
+ if (chips[i].mem_revision_id1 >= 0) {
+ if (chips[i].mem_revision_id1 != rev_id1)
+ continue;
+ }
+ if (chips[i].mem_revision_id2 >= 0) {
+ if (chips[i].mem_revision_id2 != rev_id2)
+ continue;
+ }
+ if (chips[i].mem_pid >= 0) {
+ if (chips[i].mem_pid != pid)
+ continue;
+ }
+
+ chip_matched = i;
+ break;
+ }
+
+ if (chip_matched >= 0) {
+ pr_info("%s: %s memory found\n", __func__,
+ chips[chip_matched].description);
+ tegra_emc_table = chips[chip_matched].table;
+ tegra_emc_table_size = chips[chip_matched].table_size;
+ } else {
+ pr_err("%s: Memory not recognized, memory scaling disabled\n",
+ __func__);
+ pr_info("%s: Memory vid = 0x%04x", __func__, vid);
+ pr_info("%s: Memory rev_id1 = 0x%04x", __func__, rev_id1);
+ pr_info("%s: Memory rev_id2 = 0x%04x", __func__, rev_id2);
+ pr_info("%s: Memory pid = 0x%04x", __func__, pid);
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define TEGRA_EMC_NUM_REGS 46
+
+struct tegra_emc_table {
+ unsigned long rate;
+ u32 regs[TEGRA_EMC_NUM_REGS];
+};
+
+struct tegra_emc_chip {
+ const char *description;
+ int mem_manufacturer_id; /* LPDDR2 MR5 or -1 to ignore */
+ int mem_revision_id1; /* LPDDR2 MR6 or -1 to ignore */
+ int mem_revision_id2; /* LPDDR2 MR7 or -1 to ignore */
+ int mem_pid; /* LPDDR2 MR8 or -1 to ignore */
+
+ const struct tegra_emc_table *table;
+ int table_size;
+};
+
+int tegra_emc_set_rate(unsigned long rate);
+long tegra_emc_round_rate(unsigned long rate);
+void tegra_init_emc(const struct tegra_emc_chip *chips, int chips_size);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/tegra2_fuse.c
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Fuses are one time programmable bits on the chip which are used by
+ * the chip manufacturer and device manufacturers to store chip/device
+ * configurations. The fuse bits are encapsulated in a 32 x 64 array.
+ * If a fuse bit is programmed to 1, it cannot be reverted to 0. Either
+ * another fuse bit has to be used for the same purpose or a new chip
+ * needs to be used.
+ *
+ * Each and every fuse word has its own shadow word which resides adjacent to
+ * a particular fuse word. e.g. Fuse words 0-1 form a fuse-shadow pair.
+ * So in theory we have only 32 fuse words to work with.
+ * The shadow fuse word is a mirror of the actual fuse word at all times
+ * and this is maintained while programming a particular fuse.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <mach/tegra2_fuse.h>
+
+#include "fuse.h"
+
+#define NFUSES 64
+#define STATE_IDLE (0x4 << 16)
+
+/* since fuse burning is irreversible, use this for testing */
+#define ENABLE_FUSE_BURNING 1
+
+/* fuse registers */
+#define FUSE_CTRL 0x000
+#define FUSE_REG_ADDR 0x004
+#define FUSE_REG_READ 0x008
+#define FUSE_REG_WRITE 0x00C
+#define FUSE_TIME_PGM 0x01C
+#define FUSE_PRIV2INTFC 0x020
+#define FUSE_DIS_PGM 0x02C
+#define FUSE_PWR_GOOD_SW 0x034
+
+static u32 fuse_pgm_data[NFUSES / 2];
+static u32 fuse_pgm_mask[NFUSES / 2];
+static u32 tmp_fuse_pgm_data[NFUSES / 2];
+static u32 master_enable;
+
+DEFINE_MUTEX(fuse_lock);
+
+static struct fuse_data fuse_info;
+
+struct param_info {
+ u32 *addr;
+ int sz;
+ u32 start_off;
+ int start_bit;
+ int nbits;
+ int data_offset;
+};
+
+static struct param_info fuse_info_tbl[] = {
+ [DEVKEY] = {
+ .addr = &fuse_info.devkey,
+ .sz = sizeof(fuse_info.devkey),
+ .start_off = 0x12,
+ .start_bit = 8,
+ .nbits = 32,
+ .data_offset = 0,
+ },
+ [JTAG_DIS] = {
+ .addr = &fuse_info.jtag_dis,
+ .sz = sizeof(fuse_info.jtag_dis),
+ .start_off = 0x0,
+ .start_bit = 24,
+ .nbits = 1,
+ .data_offset = 1,
+ },
+ [ODM_PROD_MODE] = {
+ .addr = &fuse_info.odm_prod_mode,
+ .sz = sizeof(fuse_info.odm_prod_mode),
+ .start_off = 0x0,
+ .start_bit = 23,
+ .nbits = 1,
+ .data_offset = 2,
+ },
+ [SEC_BOOT_DEV_CFG] = {
+ .addr = &fuse_info.bootdev_cfg,
+ .sz = sizeof(fuse_info.bootdev_cfg),
+ .start_off = 0x14,
+ .start_bit = 8,
+ .nbits = 16,
+ .data_offset = 3,
+ },
+ [SEC_BOOT_DEV_SEL] = {
+ .addr = &fuse_info.bootdev_sel,
+ .sz = sizeof(fuse_info.bootdev_sel),
+ .start_off = 0x14,
+ .start_bit = 24,
+ .nbits = 3,
+ .data_offset = 4,
+ },
+ [SBK] = {
+ .addr = fuse_info.sbk,
+ .sz = sizeof(fuse_info.sbk),
+ .start_off = 0x0A,
+ .start_bit = 8,
+ .nbits = 128,
+ .data_offset = 5,
+ },
+ [SW_RSVD] = {
+ .addr = &fuse_info.sw_rsvd,
+ .sz = sizeof(fuse_info.sw_rsvd),
+ .start_off = 0x14,
+ .start_bit = 28,
+ .nbits = 4,
+ .data_offset = 9,
+ },
+ [IGNORE_DEV_SEL_STRAPS] = {
+ .addr = &fuse_info.ignore_devsel_straps,
+ .sz = sizeof(fuse_info.ignore_devsel_straps),
+ .start_off = 0x14,
+ .start_bit = 27,
+ .nbits = 1,
+ .data_offset = 10,
+ },
+ [ODM_RSVD] = {
+ .addr = fuse_info.odm_rsvd,
+ .sz = sizeof(fuse_info.odm_rsvd),
+ .start_off = 0x16,
+ .start_bit = 4,
+ .nbits = 256,
+ .data_offset = 11,
+ },
+ [SBK_DEVKEY_STATUS] = {
+ .sz = SBK_DEVKEY_STATUS_SZ,
+ },
+ [MASTER_ENB] = {
+ .addr = &master_enable,
+ .sz = sizeof(u8),
+ .start_off = 0x0,
+ .start_bit = 0,
+ .nbits = 1,
+ },
+};
+
+static void wait_for_idle(void)
+{
+ u32 reg;
+
+ do {
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ } while ((reg & (0xF << 16)) != STATE_IDLE);
+}
+
+#define FUSE_READ 0x1
+#define FUSE_WRITE 0x2
+#define FUSE_SENSE 0x3
+#define FUSE_CMD_MASK 0x3
+
+static u32 fuse_cmd_read(u32 addr)
+{
+ u32 reg;
+
+ tegra_fuse_writel(addr, FUSE_REG_ADDR);
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_READ;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+
+ reg = tegra_fuse_readl(FUSE_REG_READ);
+ return reg;
+}
+
+static void fuse_cmd_write(u32 value, u32 addr)
+{
+ u32 reg;
+
+ tegra_fuse_writel(addr, FUSE_REG_ADDR);
+ tegra_fuse_writel(value, FUSE_REG_WRITE);
+
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_WRITE;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+}
+
+static void fuse_cmd_sense(void)
+{
+ u32 reg;
+
+ reg = tegra_fuse_readl(FUSE_CTRL);
+ reg &= ~FUSE_CMD_MASK;
+ reg |= FUSE_SENSE;
+ tegra_fuse_writel(reg, FUSE_CTRL);
+ wait_for_idle();
+}
+
+static void fuse_reg_hide(void)
+{
+ u32 reg = tegra_fuse_readl(0x48);
+ reg &= ~(1 << 28);
+ tegra_fuse_writel(reg, 0x48);
+}
+
+static void fuse_reg_unhide(void)
+{
+ u32 reg = tegra_fuse_readl(0x48);
+ reg |= (1 << 28);
+ tegra_fuse_writel(reg, 0x48);
+}
+
+static void get_fuse(enum fuse_io_param io_param, u32 *out)
+{
+ int start_bit = fuse_info_tbl[io_param].start_bit;
+ int nbits = fuse_info_tbl[io_param].nbits;
+ int offset = fuse_info_tbl[io_param].start_off;
+ u32 *dst = fuse_info_tbl[io_param].addr;
+ int dst_bit = 0;
+ int i;
+ u32 val;
+ int loops;
+
+ if (out)
+ dst = out;
+
+ do {
+ val = fuse_cmd_read(offset);
+ loops = min(nbits, 32 - start_bit);
+ for (i = 0; i < loops; i++) {
+ if (val & (BIT(start_bit + i)))
+ *dst |= BIT(dst_bit);
+ else
+ *dst &= ~BIT(dst_bit);
+ dst_bit++;
+ if (dst_bit == 32) {
+ dst++;
+ dst_bit = 0;
+ }
+ }
+ nbits -= loops;
+ offset += 2;
+ start_bit = 0;
+ } while (nbits > 0);
+}
+
+int tegra_fuse_read(enum fuse_io_param io_param, u32 *data, int size)
+{
+ int ret = 0, nbits;
+ u32 sbk[4], devkey = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ if (size != fuse_info_tbl[io_param].sz) {
+ pr_err("%s: size mismatch(%d), %d vs %d\n", __func__,
+ (int)io_param, size, fuse_info_tbl[io_param].sz);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fuse_lock);
+ fuse_reg_unhide();
+ fuse_cmd_sense();
+
+ if (io_param == SBK_DEVKEY_STATUS) {
+ *data = 0;
+
+ get_fuse(SBK, sbk);
+ get_fuse(DEVKEY, &devkey);
+ nbits = sizeof(sbk) * BITS_PER_BYTE;
+ if (find_first_bit((unsigned long *)sbk, nbits) != nbits)
+ *data = 1;
+ else if (devkey)
+ *data = 1;
+ } else {
+ get_fuse(io_param, data);
+ }
+
+ fuse_reg_hide();
+ mutex_unlock(&fuse_lock);
+ return ret;
+}
+
+static bool fuse_odm_prod_mode(void)
+{
+ u32 odm_prod_mode = 0;
+
+ get_fuse(ODM_PROD_MODE, &odm_prod_mode);
+ return (odm_prod_mode ? true : false);
+}
+
+static void set_fuse(enum fuse_io_param io_param, u32 *data)
+{
+ int i, start_bit = fuse_info_tbl[io_param].start_bit;
+ int nbits = fuse_info_tbl[io_param].nbits, loops;
+ int offset = fuse_info_tbl[io_param].start_off >> 1;
+ int src_bit = 0;
+ u32 val;
+
+ do {
+ val = *data;
+ loops = min(nbits, 32 - start_bit);
+ for (i = 0; i < loops; i++) {
+ fuse_pgm_mask[offset] |= BIT(start_bit + i);
+ if (val & BIT(src_bit))
+ fuse_pgm_data[offset] |= BIT(start_bit + i);
+ else
+ fuse_pgm_data[offset] &= ~BIT(start_bit + i);
+ src_bit++;
+ if (src_bit == 32) {
+ data++;
+ val = *data;
+ src_bit = 0;
+ }
+ }
+ nbits -= loops;
+ offset++;
+ start_bit = 0;
+ } while (nbits > 0);
+}
+
+static void populate_fuse_arrs(struct fuse_data *info, u32 flags)
+{
+ u32 data = 0;
+ u32 *src = (u32 *)info;
+ int i;
+
+ memset(fuse_pgm_data, 0, sizeof(fuse_pgm_data));
+ memset(fuse_pgm_mask, 0, sizeof(fuse_pgm_mask));
+
+ /* enable program bit */
+ data = 1;
+ set_fuse(MASTER_ENB, &data);
+
+ if ((flags & FLAGS_ODMRSVD)) {
+ set_fuse(ODM_RSVD, info->odm_rsvd);
+ flags &= ~FLAGS_ODMRSVD;
+ }
+
+ /* do not burn any more if secure mode is set */
+ if (fuse_odm_prod_mode())
+ goto out;
+
+ for_each_set_bit(i, (unsigned long *)&flags, MAX_PARAMS)
+ set_fuse(i, src + fuse_info_tbl[i].data_offset);
+
+out:
+ pr_debug("ready to program");
+}
+
+static void fuse_power_enable(void)
+{
+#if ENABLE_FUSE_BURNING
+ tegra_fuse_writel(0x1, FUSE_PWR_GOOD_SW);
+ udelay(1);
+#endif
+}
+
+static void fuse_power_disable(void)
+{
+#if ENABLE_FUSE_BURNING
+ tegra_fuse_writel(0, FUSE_PWR_GOOD_SW);
+ udelay(1);
+#endif
+}
+
+static void fuse_program_array(int pgm_cycles)
+{
+ u32 reg, fuse_val[2];
+ u32 *data = tmp_fuse_pgm_data, addr = 0, *mask = fuse_pgm_mask;
+ int i = 0;
+
+ fuse_reg_unhide();
+ fuse_cmd_sense();
+
+ /* get the first 2 fuse bytes */
+ fuse_val[0] = fuse_cmd_read(0);
+ fuse_val[1] = fuse_cmd_read(1);
+
+ fuse_power_enable();
+
+ /*
+ * The fuse macro is a high density macro. Fuses are
+ * burned using an addressing mechanism, so no need to prepare
+ * the full list, but more write to control registers are needed.
+ * The only bit that can be written at first is bit 0, a special write
+ * protection bit by assumptions all other bits are at 0
+ *
+ * The programming pulse must have a precise width of
+ * [9000, 11000] ns.
+ */
+ if (pgm_cycles > 0) {
+ reg = pgm_cycles;
+ tegra_fuse_writel(reg, FUSE_TIME_PGM);
+ }
+ fuse_val[0] = (0x1 & ~fuse_val[0]);
+ fuse_val[1] = (0x1 & ~fuse_val[1]);
+ fuse_cmd_write(fuse_val[0], 0);
+ fuse_cmd_write(fuse_val[1], 1);
+
+ fuse_power_disable();
+
+ /*
+ * this will allow programming of other fuses
+ * and the reading of the existing fuse values
+ */
+ fuse_cmd_sense();
+
+ /* Clear out all bits that have already been burned or masked out */
+ memcpy(data, fuse_pgm_data, sizeof(fuse_pgm_data));
+
+ for (addr = 0; addr < NFUSES; addr += 2, data++, mask++) {
+ reg = fuse_cmd_read(addr);
+ pr_debug("%d: 0x%x 0x%x 0x%x\n", addr, (u32)(*data),
+ ~reg, (u32)(*mask));
+ *data = (*data & ~reg) & *mask;
+ }
+
+ fuse_power_enable();
+
+ /*
+ * Finally loop on all fuses, program the non zero ones.
+ * Words 0 and 1 are written last and they contain control fuses. We
+ * need to invalidate after writing to a control word (with the exception
+ * of the master enable). This is also the reason we write them last.
+ */
+ for (i = ARRAY_SIZE(fuse_pgm_data) - 1; i >= 0; i--) {
+ if (tmp_fuse_pgm_data[i]) {
+ fuse_cmd_write(tmp_fuse_pgm_data[i], i * 2);
+ fuse_cmd_write(tmp_fuse_pgm_data[i], (i * 2) + 1);
+ }
+
+ if (i < 2) {
+ fuse_power_disable();
+ fuse_cmd_sense();
+ fuse_power_enable();
+ }
+ }
+
+ /* Read all data into the chip options */
+ tegra_fuse_writel(0x1, FUSE_PRIV2INTFC);
+ udelay(1);
+ tegra_fuse_writel(0, FUSE_PRIV2INTFC);
+
+ while (!(tegra_fuse_readl(FUSE_CTRL) & (1 << 30)));
+
+ fuse_reg_hide();
+ fuse_power_disable();
+}
+
+static int fuse_set(enum fuse_io_param io_param, u32 *param, int size)
+{
+ int i, nwords = size / sizeof(u32);
+ u32 *data;
+
+ if (io_param > MAX_PARAMS)
+ return -EINVAL;
+
+ data = (u32*)kzalloc(size, GFP_KERNEL);
+ if (!data) {
+ pr_err("failed to alloc %d bytes\n", nwords);
+ return -ENOMEM;
+ }
+
+ get_fuse(io_param, data);
+
+ for (i = 0; i < nwords; i++) {
+ if ((data[i] | param[i]) != param[i]) {
+ pr_info("hw_val: 0x%x, sw_val: 0x%x, final: 0x%x\n",
+ data[i], param[i], (data[i] | param[i]));
+ param[i] = (data[i] | param[i]);
+ }
+ }
+ kfree(data);
+ return 0;
+}
+
+int tegra_fuse_program(struct fuse_data *pgm_data, u32 flags)
+{
+ u32 reg;
+ int i = 0;
+
+ mutex_lock(&fuse_lock);
+ reg = tegra_fuse_readl(FUSE_DIS_PGM);
+ mutex_unlock(&fuse_lock);
+ if (reg) {
+ pr_err("fuse programming disabled");
+ return -EACCES;
+ }
+
+ if (fuse_odm_prod_mode() && (flags != FLAGS_ODMRSVD)) {
+ pr_err("reserved odm fuses are allowed in secure mode");
+ return -EPERM;
+ }
+
+ if ((flags & FLAGS_ODM_PROD_MODE) &&
+ (flags & (FLAGS_SBK | FLAGS_DEVKEY))) {
+ pr_err("odm production mode and sbk/devkey not allowed");
+ return -EPERM;
+ }
+
+ mutex_lock(&fuse_lock);
+ memcpy(&fuse_info, pgm_data, sizeof(fuse_info));
+ for_each_set_bit(i, (unsigned long *)&flags, MAX_PARAMS) {
+ fuse_set((u32)i, fuse_info_tbl[i].addr,
+ fuse_info_tbl[i].sz);
+ }
+
+ populate_fuse_arrs(&fuse_info, flags);
+ fuse_program_array(0);
+
+ /* disable program bit */
+ reg = 0;
+ set_fuse(MASTER_ENB, ®);
+
+ memset(&fuse_info, 0, sizeof(fuse_info));
+ mutex_unlock(&fuse_lock);
+
+ return 0;
+}
+
+void tegra_fuse_program_disable(void)
+{
+ mutex_lock(&fuse_lock);
+ tegra_fuse_writel(0x1, FUSE_DIS_PGM);
+ mutex_unlock(&fuse_lock);
+}
+
+static int __init tegra_fuse_program_init(void)
+{
+ mutex_init(&fuse_lock);
+ return 0;
+}
+
+module_init(tegra_fuse_program_init);
--- /dev/null
+/*
+ * arch/arm/mach-tegra/tegra2_save.S
+ *
+ * CPU state save & restore routines for CPU hotplug
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/domain.h>
+#include <asm/ptrace.h>
+#include <asm/cache.h>
+#include <asm/vfpmacros.h>
+#include <asm/memory.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <mach/iomap.h>
+#include <mach/io.h>
+
+#include "power.h"
+
+/* .section ".cpuinit.text", "ax"*/
+
+#define TTB_FLAGS 0x6A @ IRGN_WBWA, OC_RGN_WBWA, S, NOS
+
+#define EMC_CFG 0xc
+#define EMC_ADR_CFG 0x10
+#define EMC_REFRESH 0x70
+#define EMC_NOP 0xdc
+#define EMC_SELF_REF 0xe0
+#define EMC_REQ_CTRL 0x2b0
+#define EMC_EMC_STATUS 0x2b4
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_BFI_SHIFT 8
+#define PMC_CTRL_BFI_WIDTH 9
+#define PMC_SCRATCH38 0x134
+#define PMC_SCRATCH41 0x140
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_SCLK_BURST 0x28
+#define CLK_RESET_SCLK_DIVIDER 0x2c
+
+#define CLK_RESET_PLLC_BASE 0x80
+#define CLK_RESET_PLLM_BASE 0x90
+#define CLK_RESET_PLLP_BASE 0xa0
+
+#define FLOW_CTRL_HALT_CPU_EVENTS 0x0
+
+#include "power-macros.S"
+
+.macro emc_device_mask, rd, base
+ ldr \rd, [\base, #EMC_ADR_CFG]
+ tst \rd, #(0x3<<24)
+ moveq \rd, #(0x1<<8) @ just 1 device
+ movne \rd, #(0x3<<8) @ 2 devices
+.endm
+
+/*
+ *
+ * __tear_down_master( r8 = context_pa, sp = power state )
+ *
+ * Set the clock burst policy to the selected wakeup source
+ * Enable CPU power-request mode in the PMC
+ * Put the CPU in wait-for-event mode on the flow controller
+ * Trigger the PMC state machine to put the CPU in reset
+ */
+ENTRY(__tear_down_master)
+__tear_down_master:
+#ifdef CONFIG_CACHE_L2X0
+ /* clean out the dirtied L2 lines, since all power transitions
+ * cause the cache state to get invalidated (although LP1 & LP2
+ * preserve the data in the L2, the control words (L2X0_CTRL,
+ * L2X0_AUX_CTRL, etc.) need to be cleaned to L3 so that they
+ * will be visible on reboot. skip this for LP0, since the L2 cache
+ * will be shutdown before we reach this point */
+ tst sp, #TEGRA_POWER_EFFECT_LP0
+ bne __l2_clean_done
+ mov32 r0, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
+ add r3, r8, #(CONTEXT_SIZE_BYTES)
+ bic r8, r8, #0x1f
+ add r3, r3, #0x1f
+11: str r8, [r0, #L2X0_CLEAN_LINE_PA]
+ add r8, r8, #32
+ cmp r8, r3
+ blo 11b
+12: ldr r1, [r0, #L2X0_CLEAN_LINE_PA]
+ tst r1, #1
+ bne 12b
+ mov r1, #0
+ str r1, [r0, #L2X0_CACHE_SYNC]
+13: ldr r1, [r0, #L2X0_CACHE_SYNC]
+ tst r1, #1
+ bne 13b
+__l2_clean_done:
+#endif
+
+ tst sp, #TEGRA_POWER_SDRAM_SELFREFRESH
+
+ /* preload all the address literals that are needed for the
+ * CPU power-gating process, to avoid loads from SDRAM (which are
+ * not supported once SDRAM is put into self-refresh.
+ * LP0 / LP1 use physical address, since the MMU needs to be
+ * disabled before putting SDRAM into self-refresh to avoid
+ * memory access due to page table walks */
+ mov32 r0, (IO_APB_VIRT-IO_APB_PHYS)
+ mov32 r4, TEGRA_PMC_BASE
+ mov32 r0, (IO_PPSB_VIRT-IO_PPSB_PHYS)
+ mov32 r5, TEGRA_CLK_RESET_BASE
+ mov32 r6, TEGRA_FLOW_CTRL_BASE
+ mov32 r7, TEGRA_TMRUS_BASE
+
+ /* change page table pointer to tegra_pgd_phys, so that IRAM
+ * and MMU shut-off will be mapped virtual == physical */
+ adr r3, __tear_down_master_data
+ ldr r3, [r3] @ &tegra_pgd_phys
+ ldr r3, [r3]
+ orr r3, r3, #TTB_FLAGS
+ mov r2, #0
+ mcr p15, 0, r2, c13, c0, 1 @ reserved context
+ isb
+ mcr p15, 0, r3, c2, c0, 0 @ TTB 0
+ isb
+
+ /* Obtain LP1 information.
+ * R10 = LP1 branch target */
+ mov32 r2, __tegra_lp1_reset
+ mov32 r3, __tear_down_master_sdram
+ sub r2, r3, r2
+ mov32 r3, (TEGRA_IRAM_CODE_AREA)
+ add r10, r2, r3
+
+ mov32 r3, __shut_off_mmu
+
+ /* R9 = LP2 branch target */
+ mov32 r9, __tear_down_master_pll_cpu
+
+ /* Convert the branch targets
+ * to physical addresses */
+ sub r3, r3, #(PAGE_OFFSET - PHYS_OFFSET)
+ sub r9, r9, #(PAGE_OFFSET - PHYS_OFFSET)
+ movne r9, r10
+ bx r3
+ENDPROC(__tear_down_master)
+ .type __tear_down_master_data, %object
+__tear_down_master_data:
+ .long tegra_pgd_phys
+ .size __tear_down_master_data, . - __tear_down_master_data
+
+/* START OF ROUTINES COPIED TO IRAM */
+/*
+ * __tegra_lp1_reset
+ *
+ * reset vector for LP1 restore; copied into IRAM during suspend.
+ * brings the system back up to a safe starting point (SDRAM out of
+ * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP,
+ * system clock running on the same PLL that it suspended at), and
+ * jumps to tegra_lp2_startup to restore PLLX and virtual addressing.
+ * physical address of tegra_lp2_startup expected to be stored in
+ * PMC_SCRATCH41
+ */
+ .align L1_CACHE_SHIFT
+ENTRY(__tegra_lp1_reset)
+__tegra_lp1_reset:
+ /* the CPU and system bus are running at 32KHz and executing from
+ * IRAM when this code is executed; immediately switch to CLKM and
+ * enable PLLP. */
+ mov32 r0, TEGRA_CLK_RESET_BASE
+ mov r1, #(1<<28)
+ str r1, [r0, #CLK_RESET_SCLK_BURST]
+ str r1, [r0, #CLK_RESET_CCLK_BURST]
+ mov r1, #0
+ str r1, [r0, #CLK_RESET_SCLK_DIVIDER]
+ str r1, [r0, #CLK_RESET_CCLK_DIVIDER]
+
+ ldr r1, [r0, #CLK_RESET_PLLM_BASE]
+ tst r1, #(1<<30)
+ orreq r1, r1, #(1<<30)
+ streq r1, [r0, #CLK_RESET_PLLM_BASE]
+ ldr r1, [r0, #CLK_RESET_PLLP_BASE]
+ tst r1, #(1<<30)
+ orreq r1, r1, #(1<<30)
+ streq r1, [r0, #CLK_RESET_PLLP_BASE]
+ ldr r1, [r0, #CLK_RESET_PLLC_BASE]
+ tst r1, #(1<<30)
+ orreq r1, r1, #(1<<30)
+ streq r1, [r0, #CLK_RESET_PLLC_BASE]
+ mov32 r7, TEGRA_TMRUS_BASE
+ ldr r1, [r7]
+
+ /* since the optimized settings are still in SDRAM, there is
+ * no need to store them back into the IRAM-local __lp1_pad_area */
+ add r2, pc, #__lp1_pad_area-(.+8)
+padload:ldmia r2!, {r3-r4}
+ cmp r3, #0
+ beq padload_done
+ str r4, [r3]
+ b padload
+padload_done:
+ ldr r2, [r7]
+ add r2, r2, #0x4 @ 4uS delay for DRAM pad restoration
+ wait_until r2, r7, r3
+ add r1, r1, #0xff @ 255uS delay for PLL stabilization
+ wait_until r1, r7, r3
+
+ str r4, [r0, #CLK_RESET_SCLK_BURST]
+ mov32 r4, ((1<<28) | (4)) @ burst policy is PLLP
+ str r4, [r0, #CLK_RESET_CCLK_BURST]
+
+ mov32 r0, TEGRA_EMC_BASE
+ ldr r1, [r0, #EMC_CFG]
+ bic r1, r1, #(1<<31) @ disable DRAM_CLK_STOP
+ str r1, [r0, #EMC_CFG]
+
+ mov r1, #0
+ str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh
+ mov r1, #1
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_NOP]
+ str r1, [r0, #EMC_REFRESH]
+
+ emc_device_mask r1, r0
+
+exit_selfrefresh_loop:
+ ldr r2, [r0, #EMC_EMC_STATUS]
+ ands r2, r2, r1
+ bne exit_selfrefresh_loop
+
+ mov r1, #0
+ str r1, [r0, #EMC_REQ_CTRL]
+
+ mov32 r0, TEGRA_PMC_BASE
+ ldr r0, [r0, #PMC_SCRATCH41]
+ mov pc, r0
+ENDPROC(__tegra_lp1_reset)
+
+/*
+ * __tear_down_master_sdram
+ *
+ * disables MMU, data cache, and puts SDRAM into self-refresh.
+ * must execute from IRAM.
+ */
+ .align L1_CACHE_SHIFT
+__tear_down_master_sdram:
+ mov32 r1, TEGRA_EMC_BASE
+ mov r2, #3
+ str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests
+
+emcidle:ldr r2, [r1, #EMC_EMC_STATUS]
+ tst r2, #4
+ beq emcidle
+
+ mov r2, #1
+ str r2, [r1, #EMC_SELF_REF]
+
+ emc_device_mask r2, r1
+
+emcself:ldr r3, [r1, #EMC_EMC_STATUS]
+ and r3, r3, r2
+ cmp r3, r2
+ bne emcself @ loop until DDR in self-refresh
+
+ add r2, pc, #__lp1_pad_area-(.+8)
+
+padsave:ldm r2, {r0-r1}
+ cmp r0, #0
+ beq padsave_done
+ ldr r3, [r0]
+ str r1, [r0]
+ str r3, [r2, #4]
+ add r2, r2, #8
+ b padsave
+padsave_done:
+
+ ldr r0, [r5, #CLK_RESET_SCLK_BURST]
+ str r0, [r2, #4]
+ dsb
+ b __tear_down_master_pll_cpu
+ENDPROC(__tear_down_master_sdram)
+
+ .align L1_CACHE_SHIFT
+ .type __lp1_pad_area, %object
+__lp1_pad_area:
+ .word TEGRA_APB_MISC_BASE + 0x8c8 /* XM2CFGCPADCTRL */
+ .word 0x8
+ .word TEGRA_APB_MISC_BASE + 0x8cc /* XM2CFGDPADCTRL */
+ .word 0x8
+ .word TEGRA_APB_MISC_BASE + 0x8d0 /* XM2CLKCFGPADCTRL */
+ .word 0x0
+ .word TEGRA_APB_MISC_BASE + 0x8d4 /* XM2COMPPADCTRL */
+ .word 0x8
+ .word TEGRA_APB_MISC_BASE + 0x8d8 /* XM2VTTGENPADCTRL */
+ .word 0x5500
+ .word TEGRA_APB_MISC_BASE + 0x8e4 /* XM2CFGCPADCTRL2 */
+ .word 0x08080040
+ .word TEGRA_APB_MISC_BASE + 0x8e8 /* XM2CFGDPADCTRL2 */
+ .word 0x0
+ .word 0x0 /* end of list */
+ .word 0x0 /* sclk_burst_policy */
+ .size __lp1_pad_area, . - __lp1_pad_area
+
+ .align L1_CACHE_SHIFT
+__tear_down_master_pll_cpu:
+ ldr r0, [r4, #PMC_CTRL]
+ bfi r0, sp, #PMC_CTRL_BFI_SHIFT, #PMC_CTRL_BFI_WIDTH
+ str r0, [r4, #PMC_CTRL]
+ tst sp, #TEGRA_POWER_SDRAM_SELFREFRESH
+
+ /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */
+ moveq r0, #(2<<28) /* burst policy = run mode */
+ orreq r0, r0, #(4<<4) /* use PLLP in run mode burst */
+ streq r0, [r5, #CLK_RESET_CCLK_BURST]
+ moveq r0, #0
+ streq r0, [r5, #CLK_RESET_CCLK_DIVIDER]
+ beq __cclk_burst_set
+
+ /* in other modes, set system & CPU burst policies to 32KHz.
+ * start by jumping to CLKM to safely disable PLLs, then jump
+ * to CLKS */
+ mov r0, #(1<<28)
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+ str r0, [r5, #CLK_RESET_CCLK_BURST]
+ mov r0, #0
+ str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
+ str r0, [r5, #CLK_RESET_SCLK_DIVIDER]
+
+ /* 2 us delay between changing sclk and disabling PLLs */
+ wait_for_us r1, r7, r9
+ add r1, r1, #2
+ wait_until r1, r7, r9
+
+ /* switch to CLKS */
+ mov r0, #0 /* burst policy = 32KHz */
+ str r0, [r5, #CLK_RESET_SCLK_BURST]
+
+ /* disable PLLP, PLLM, PLLC in LP0 and LP1 states */
+ ldr r0, [r5, #CLK_RESET_PLLM_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLM_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLP_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLP_BASE]
+ ldr r0, [r5, #CLK_RESET_PLLC_BASE]
+ bic r0, r0, #(1<<30)
+ str r0, [r5, #CLK_RESET_PLLC_BASE]
+
+__cclk_burst_set:
+ mov r0, #(4<<29) /* STOP_UNTIL_IRQ */
+ orr r0, r0, #(1<<10) | (1<<8) /* IRQ_0, FIQ_0 */
+ ldr r1, [r7]
+ str r1, [r4, #PMC_SCRATCH38]
+ dsb
+ str r0, [r6, #FLOW_CTRL_HALT_CPU_EVENTS]
+ dsb
+ ldr r0, [r6, #FLOW_CTRL_HALT_CPU_EVENTS] /* memory barrier */
+
+halted: dsb
+ wfe /* CPU should be power gated here */
+ isb
+ b halted
+ENDPROC(__tear_down_master_pll_cpu)
+
+/*
+ * __put_cpu_in_reset(cpu_nr)
+ *
+ * puts the specified CPU in wait-for-event mode on the flow controller
+ * and puts the CPU in reset
+ */
+ENTRY(__put_cpu_in_reset)
+__put_cpu_in_reset:
+ cmp r0, #0
+ subne r1, r0, #1
+ movne r1, r1, lsl #3
+ addne r1, r1, #0x14
+ moveq r1, #0 @ r1 = CPUx_HALT_EVENTS register offset
+ mov32 r7, (TEGRA_FLOW_CTRL_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
+ mov r2, #(0x2<<29)
+ str r2, [r7, r1] @ put flow controller in wait event mode
+ isb
+ dsb
+ movw r1, 0x1011
+ mov r1, r1, lsl r0
+ mov32 r7, (TEGRA_CLK_RESET_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
+ str r1, [r7, #0x340] @ put slave CPU in reset
+ isb
+ dsb
+ b .
+ENDPROC(__put_cpu_in_reset)
+
+/* dummy symbol for end of IRAM */
+ .align L1_CACHE_SHIFT
+ENTRY(__tegra_iram_end)
+__tegra_iram_end:
+ b .
+ENDPROC(__tegra_iram_end)
--- /dev/null
+/*
+ * arch/arm/mach-tegra/fiq_debugger.c
+ *
+ * Serial Debugger Interface for Tegra
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <asm/fiq_debugger.h>
+#include <mach/tegra_fiq_debugger.h>
+#include <mach/system.h>
+#include <mach/fiq.h>
+
+#include <linux/uaccess.h>
+
+#include <mach/legacy_irq.h>
+
+struct tegra_fiq_debugger {
+ struct fiq_debugger_pdata pdata;
+ void __iomem *debug_port_base;
+ bool break_seen;
+};
+
+static inline void tegra_write(struct tegra_fiq_debugger *t,
+ unsigned int val, unsigned int off)
+{
+ __raw_writeb(val, t->debug_port_base + off * 4);
+}
+
+static inline unsigned int tegra_read(struct tegra_fiq_debugger *t,
+ unsigned int off)
+{
+ return __raw_readb(t->debug_port_base + off * 4);
+}
+
+static inline unsigned int tegra_read_lsr(struct tegra_fiq_debugger *t)
+{
+ unsigned int lsr;
+
+ lsr = tegra_read(t, UART_LSR);
+ if (lsr & UART_LSR_BI)
+ t->break_seen = true;
+
+ return lsr;
+}
+
+static int debug_port_init(struct platform_device *pdev)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ if (tegra_read(t, UART_LSR) & UART_LSR_DR)
+ (void)tegra_read(t, UART_RX);
+ /* enable rx and lsr interrupt */
+ tegra_write(t, UART_IER_RLSI | UART_IER_RDI, UART_IER);
+ /* interrupt on every character */
+ tegra_write(t, 0, UART_IIR);
+
+ return 0;
+}
+
+static int debug_getc(struct platform_device *pdev)
+{
+ unsigned int lsr;
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ lsr = tegra_read_lsr(t);
+
+ if (lsr & UART_LSR_BI || t->break_seen) {
+ t->break_seen = false;
+ return FIQ_DEBUGGER_BREAK;
+ }
+
+ if (lsr & UART_LSR_DR)
+ return tegra_read(t, UART_RX);
+
+ return FIQ_DEBUGGER_NO_CHAR;
+}
+
+static void debug_putc(struct platform_device *pdev, unsigned int c)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (!(tegra_read_lsr(t) & UART_LSR_THRE))
+ cpu_relax();
+
+ tegra_write(t, c, UART_TX);
+}
+
+static void debug_flush(struct platform_device *pdev)
+{
+ struct tegra_fiq_debugger *t;
+ t = container_of(dev_get_platdata(&pdev->dev), typeof(*t), pdata);
+
+ while (!(tegra_read_lsr(t) & UART_LSR_TEMT))
+ cpu_relax();
+}
+
+static void fiq_enable(struct platform_device *pdev, unsigned int irq, bool on)
+{
+ if (on)
+ tegra_fiq_enable(irq);
+ else
+ tegra_fiq_disable(irq);
+}
+
+static int tegra_fiq_debugger_id;
+
+void tegra_serial_debug_init(unsigned int base, int irq,
+ struct clk *clk, int signal_irq, int wakeup_irq)
+{
+ struct tegra_fiq_debugger *t;
+ struct platform_device *pdev;
+ struct resource *res;
+ int res_count;
+
+ t = kzalloc(sizeof(struct tegra_fiq_debugger), GFP_KERNEL);
+ if (!t) {
+ pr_err("Failed to allocate for fiq debugger\n");
+ return;
+ }
+
+ t->pdata.uart_init = debug_port_init;
+ t->pdata.uart_getc = debug_getc;
+ t->pdata.uart_putc = debug_putc;
+ t->pdata.uart_flush = debug_flush;
+ t->pdata.fiq_enable = fiq_enable;
+
+ t->debug_port_base = ioremap(base, PAGE_SIZE);
+ if (!t->debug_port_base) {
+ pr_err("Failed to ioremap for fiq debugger\n");
+ goto out1;
+ }
+
+ res = kzalloc(sizeof(struct resource) * 3, GFP_KERNEL);
+ if (!res) {
+ pr_err("Failed to alloc fiq debugger resources\n");
+ goto out2;
+ }
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!pdev) {
+ pr_err("Failed to alloc fiq debugger platform device\n");
+ goto out3;
+ };
+
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].start = irq;
+ res[0].end = irq;
+ res[0].name = "fiq";
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = signal_irq;
+ res[1].end = signal_irq;
+ res[1].name = "signal";
+ res_count = 2;
+
+ if (wakeup_irq >= 0) {
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = wakeup_irq;
+ res[2].end = wakeup_irq;
+ res[2].name = "wakeup";
+ res_count++;
+ }
+
+ pdev->name = "fiq_debugger";
+ pdev->id = tegra_fiq_debugger_id++;
+ pdev->dev.platform_data = &t->pdata;
+ pdev->resource = res;
+ pdev->num_resources = res_count;
+
+ if (platform_device_register(pdev)) {
+ pr_err("Failed to register fiq debugger\n");
+ goto out4;
+ }
+
+ return;
+
+out4:
+ kfree(pdev);
+out3:
+ kfree(res);
+out2:
+ iounmap(t->debug_port_base);
+out1:
+ kfree(t);
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/tegra_i2s_audio.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* TODO:
+ -- replace make I2S_MAX_NUM_BUFS configurable through an ioctl
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/sysfs.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/tegra_audio.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <mach/i2s.h>
+#include <mach/audio.h>
+#include <mach/irqs.h>
+
+#include "clock.h"
+
+#define PCM_BUFFER_MAX_SIZE_ORDER PAGE_SHIFT
+
+#define TEGRA_AUDIO_DSP_NONE 0
+#define TEGRA_AUDIO_DSP_PCM 1
+#define TEGRA_AUDIO_DSP_NETWORK 2
+#define TEGRA_AUDIO_DSP_TDM 3
+
+#define I2S_MAX_NUM_BUFS 4
+#define I2S_DEFAULT_TX_NUM_BUFS 2
+#define I2S_DEFAULT_RX_NUM_BUFS 2
+
+/* per stream (input/output) */
+struct audio_stream {
+ int opened;
+ struct mutex lock;
+
+ bool active; /* is DMA in progress? */
+ int num_bufs;
+ void *buffer[I2S_MAX_NUM_BUFS];
+ dma_addr_t buf_phy[I2S_MAX_NUM_BUFS];
+ struct completion comp[I2S_MAX_NUM_BUFS];
+ struct tegra_dma_req dma_req[I2S_MAX_NUM_BUFS];
+ int last_queued;
+
+ int i2s_fifo_atn_level;
+
+ struct tegra_dma_channel *dma_chan;
+ bool stop;
+ struct completion stop_completion;
+ spinlock_t dma_req_lock;
+
+ struct work_struct allow_suspend_work;
+ struct wake_lock wake_lock;
+ char wake_lock_name[100];
+};
+
+/* per i2s controller */
+struct audio_driver_state {
+ struct list_head next;
+
+ struct platform_device *pdev;
+ struct tegra_audio_platform_data *pdata;
+ phys_addr_t i2s_phys;
+ unsigned long i2s_base;
+
+ unsigned long dma_req_sel;
+
+ int irq;
+ struct tegra_audio_in_config in_config;
+
+ struct miscdevice misc_out;
+ struct miscdevice misc_out_ctl;
+ struct audio_stream out;
+
+ struct miscdevice misc_in;
+ struct miscdevice misc_in_ctl;
+ struct audio_stream in;
+
+ /* Control for whole I2S (Data format, etc.) */
+ struct miscdevice misc_ctl;
+ unsigned int bit_format;
+};
+
+static inline bool pending_buffer_requests(struct audio_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->num_bufs; i++)
+ if (!completion_done(&stream->comp[i]))
+ return true;
+ return false;
+}
+
+static inline int buf_size(struct audio_stream *s __attribute__((unused)))
+{
+ return 1 << PCM_BUFFER_MAX_SIZE_ORDER;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_out);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_out_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_in(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_in);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_in_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_in_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_out(
+ struct audio_stream *aos)
+{
+ return container_of(aos, struct audio_driver_state, out);
+}
+
+static inline struct audio_driver_state *ads_from_in(
+ struct audio_stream *ais)
+{
+ return container_of(ais, struct audio_driver_state, in);
+}
+
+static inline void prevent_suspend(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
+ wake_lock(&as->wake_lock);
+}
+
+static void allow_suspend_worker(struct work_struct *w)
+{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+ pr_debug("%s\n", __func__);
+ wake_unlock(&as->wake_lock);
+}
+
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
+#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
+#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
+#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
+
+#define I2S_I2S_FIFO_RX_BUSY I2S_I2S_STATUS_FIFO2_BSY
+#define I2S_I2S_FIFO_RX_QS I2S_I2S_STATUS_QS_FIFO2
+#define I2S_I2S_FIFO_RX_ERR I2S_I2S_STATUS_FIFO2_ERR
+
+#define I2S_FIFO_ERR (I2S_I2S_STATUS_FIFO1_ERR | I2S_I2S_STATUS_FIFO2_ERR)
+
+static inline void i2s_writel(unsigned long base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 i2s_readl(unsigned long base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void i2s_fifo_write(unsigned long base, int fifo, u32 data)
+{
+ i2s_writel(base, data, fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static inline u32 i2s_fifo_read(unsigned long base, int fifo)
+{
+ return i2s_readl(base, fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static int i2s_set_channel_bit_count(unsigned long base,
+ int sampling, int bitclk)
+{
+ u32 val;
+ int bitcnt = bitclk / (2 * sampling) - 1;
+
+ if (bitcnt < 0 || bitcnt >= 1<<11) {
+ pr_err("%s: bit count %d is out of bounds\n", __func__,
+ bitcnt);
+ return -EINVAL;
+ }
+
+ val = bitcnt;
+ if (bitclk % (2 * sampling)) {
+ pr_info("%s: enabling non-symmetric mode\n", __func__);
+ val |= I2S_I2S_TIMING_NON_SYM_ENABLE;
+ }
+
+ pr_debug("%s: I2S_I2S_TIMING_0 = %08x\n", __func__, val);
+ i2s_writel(base, val, I2S_I2S_TIMING_0);
+ return 0;
+}
+
+static void i2s_set_fifo_mode(unsigned long base, int fifo, int tx)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (fifo == 0) {
+ val &= ~I2S_I2S_CTRL_FIFO1_RX_ENABLE;
+ val |= (!tx) ? I2S_I2S_CTRL_FIFO1_RX_ENABLE : 0;
+ } else {
+ val &= ~I2S_I2S_CTRL_FIFO2_TX_ENABLE;
+ val |= tx ? I2S_I2S_CTRL_FIFO2_TX_ENABLE : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static int i2s_fifo_set_attention_level(unsigned long base,
+ int fifo, unsigned level)
+{
+ u32 val;
+
+ if (level > I2S_FIFO_ATN_LVL_TWELVE_SLOTS) {
+ pr_err("%s: invalid fifo level selector %d\n", __func__,
+ level);
+ return -EINVAL;
+ }
+
+ val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+
+ if (!fifo) {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO1_ATN_LVL_MASK;
+ val |= level << I2S_FIFO1_ATN_LVL_SHIFT;
+ } else {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO2_ATN_LVL_MASK;
+ val |= level << I2S_FIFO2_ATN_LVL_SHIFT;
+ }
+
+ i2s_writel(base, val, I2S_I2S_FIFO_SCR_0);
+ return 0;
+}
+
+static void i2s_fifo_enable(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_CTRL_FIFO1_ENABLE;
+ val |= on ? I2S_I2S_CTRL_FIFO1_ENABLE : 0;
+ } else {
+ val &= ~I2S_I2S_CTRL_FIFO2_ENABLE;
+ val |= on ? I2S_I2S_CTRL_FIFO2_ENABLE : 0;
+ }
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+#if 0
+static bool i2s_is_fifo_enabled(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo)
+ return !!(val & I2S_I2S_CTRL_FIFO1_ENABLE);
+ return !!(val & I2S_I2S_CTRL_FIFO2_ENABLE);
+}
+#endif
+
+static void i2s_fifo_clear(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO1_CLR;
+ val |= I2S_I2S_FIFO_SCR_FIFO1_CLR;
+#if 0
+ /* Per Nvidia, reduces pop on the next run. */
+ if (!(val & I2S_I2S_CTRL_FIFO1_RX_ENABLE)) {
+ int cnt = 16;
+ while (cnt--)
+ i2s_writel(base, 0, I2S_I2S_FIFO1_0);
+ }
+#endif
+ } else {
+ val &= ~I2S_I2S_FIFO_SCR_FIFO2_CLR;
+ val |= I2S_I2S_FIFO_SCR_FIFO2_CLR;
+ }
+
+ i2s_writel(base, val, I2S_I2S_FIFO_SCR_0);
+}
+
+static void i2s_set_master(unsigned long base, int master)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_MASTER_ENABLE;
+ val |= master ? I2S_I2S_CTRL_MASTER_ENABLE : 0;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static int i2s_set_dsp_mode(unsigned long base, unsigned int mode)
+{
+ u32 val;
+ if (mode > TEGRA_AUDIO_DSP_TDM) {
+ pr_err("%s: invalid mode %d.\n", __func__, mode);
+ return -EINVAL;
+ }
+ if (mode == TEGRA_AUDIO_DSP_TDM) {
+ pr_err("TEGRA_AUDIO_DSP_TDM not implemented.\n");
+ return -EINVAL;
+ }
+
+ /* Disable unused modes */
+ if (mode != TEGRA_AUDIO_DSP_PCM) {
+ /* Disable PCM mode */
+ val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
+ val &= ~(I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE);
+ i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
+ }
+ if (mode != TEGRA_AUDIO_DSP_NETWORK) {
+ /* Disable Network mode */
+ val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
+ val &= ~(I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE);
+ i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
+ }
+
+ /* Enable the selected mode. */
+ switch (mode) {
+ case TEGRA_AUDIO_DSP_NETWORK:
+ /* Set DSP Network (Telephony) Mode */
+ val = i2s_readl(base, I2S_I2S_NW_CTRL_0);
+ val |= I2S_I2S_NW_CTRL_TRM_TLPHY_MODE |
+ I2S_I2S_NW_CTRL_RCV_TLPHY_MODE;
+ i2s_writel(base, val, I2S_I2S_NW_CTRL_0);
+ break;
+ case TEGRA_AUDIO_DSP_PCM:
+ /* Set DSP PCM Mode */
+ val = i2s_readl(base, I2S_I2S_PCM_CTRL_0);
+ val |= I2S_I2S_PCM_CTRL_TRM_MODE |
+ I2S_I2S_PCM_CTRL_RCV_MODE;
+ i2s_writel(base, val, I2S_I2S_PCM_CTRL_0);
+ break;
+ }
+
+ return 0;
+}
+
+static int i2s_set_bit_format(unsigned long base, unsigned fmt)
+{
+ u32 val;
+
+ if (fmt > I2S_BIT_FORMAT_DSP) {
+ pr_err("%s: invalid bit-format selector %d\n", __func__, fmt);
+ return -EINVAL;
+ }
+
+ val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_BIT_FORMAT_MASK;
+ val |= fmt << I2S_BIT_FORMAT_SHIFT;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ /* For DSP format, select DSP PCM mode. */
+ /* PCM mode and Network Mode slot 0 are effectively identical. */
+ if (fmt == I2S_BIT_FORMAT_DSP)
+ i2s_set_dsp_mode(base, TEGRA_AUDIO_DSP_PCM);
+ else
+ i2s_set_dsp_mode(base, TEGRA_AUDIO_DSP_NONE);
+
+ return 0;
+}
+
+static int i2s_set_bit_size(unsigned long base, unsigned bit_size)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_BIT_SIZE_MASK;
+
+ if (bit_size > I2S_BIT_SIZE_32) {
+ pr_err("%s: invalid bit_size selector %d\n", __func__,
+ bit_size);
+ return -EINVAL;
+ }
+
+ val |= bit_size << I2S_BIT_SIZE_SHIFT;
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ return 0;
+}
+
+static int i2s_set_fifo_format(unsigned long base, unsigned fmt)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_FIFO_FORMAT_MASK;
+
+ if (fmt > I2S_FIFO_32 && fmt != I2S_FIFO_PACKED) {
+ pr_err("%s: invalid fmt selector %d\n", __func__, fmt);
+ return -EINVAL;
+ }
+
+ val |= fmt << I2S_FIFO_SHIFT;
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+ return 0;
+}
+
+static void i2s_set_left_right_control_polarity(unsigned long base,
+ int high_low)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ val &= ~I2S_I2S_CTRL_L_R_CTRL;
+ val |= high_low ? I2S_I2S_CTRL_L_R_CTRL : 0;
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+#if 0
+static void i2s_set_fifo_irq_on_err(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_IE_FIFO1_ERR;
+ val |= on ? I2S_I2S_IE_FIFO1_ERR : 0;
+ } else {
+ val &= ~I2S_I2S_IE_FIFO2_ERR;
+ val |= on ? I2S_I2S_IE_FIFO2_ERR : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static void i2s_set_fifo_irq_on_qe(unsigned long base, int fifo, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (!fifo) {
+ val &= ~I2S_I2S_QE_FIFO1;
+ val |= on ? I2S_I2S_QE_FIFO1 : 0;
+ } else {
+ val &= ~I2S_I2S_QE_FIFO2;
+ val |= on ? I2S_I2S_QE_FIFO2 : 0;
+ }
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+#endif
+
+static void i2s_enable_fifos(unsigned long base, int on)
+{
+ u32 val = i2s_readl(base, I2S_I2S_CTRL_0);
+ if (on)
+ val |= I2S_I2S_QE_FIFO1 | I2S_I2S_QE_FIFO2 |
+ I2S_I2S_IE_FIFO1_ERR | I2S_I2S_IE_FIFO2_ERR;
+ else
+ val &= ~(I2S_I2S_QE_FIFO1 | I2S_I2S_QE_FIFO2 |
+ I2S_I2S_IE_FIFO1_ERR | I2S_I2S_IE_FIFO2_ERR);
+
+ i2s_writel(base, val, I2S_I2S_CTRL_0);
+}
+
+static inline u32 i2s_get_status(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_STATUS_0);
+}
+
+static inline u32 i2s_get_control(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_CTRL_0);
+}
+
+static inline void i2s_ack_status(unsigned long base)
+{
+ return i2s_writel(base, i2s_readl(base, I2S_I2S_STATUS_0),
+ I2S_I2S_STATUS_0);
+}
+
+static inline u32 i2s_get_fifo_scr(unsigned long base)
+{
+ return i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+}
+
+static inline phys_addr_t i2s_get_fifo_phy_base(unsigned long phy_base,
+ int fifo)
+{
+ return phy_base + (fifo ? I2S_I2S_FIFO2_0 : I2S_I2S_FIFO1_0);
+}
+
+static inline u32 i2s_get_fifo_full_empty_count(unsigned long base, int fifo)
+{
+ u32 val = i2s_readl(base, I2S_I2S_FIFO_SCR_0);
+
+ if (!fifo)
+ val = val >> I2S_I2S_FIFO_SCR_FIFO1_FULL_EMPTY_COUNT_SHIFT;
+ else
+ val = val >> I2S_I2S_FIFO_SCR_FIFO2_FULL_EMPTY_COUNT_SHIFT;
+
+ return val & I2S_I2S_FIFO_SCR_FIFO_FULL_EMPTY_COUNT_MASK;
+}
+
+static int i2s_configure(struct platform_device *pdev)
+{
+ struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
+ struct audio_driver_state *state = pdata->driver_data;
+ bool master;
+ struct clk *i2s_clk;
+ int master_clk;
+
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+
+ if (!state)
+ return -ENOMEM;
+
+ /* disable interrupts from I2S */
+ i2s_enable_fifos(state->i2s_base, 0);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_TX);
+ i2s_fifo_clear(state->i2s_base, I2S_FIFO_RX);
+ i2s_set_left_right_control_polarity(state->i2s_base, 0); /* default */
+
+ i2s_clk = clk_get(&pdev->dev, NULL);
+ if (!i2s_clk) {
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ master = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master : state->pdata->i2s_master;
+
+
+ master_clk = state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP ?
+ state->pdata->dsp_master_clk :
+ state->pdata->i2s_master_clk;
+#define I2S_CLK_TO_BITCLK_RATIO 2 /* Todo, Bitclk based on 2X clock? */
+ if (master)
+ i2s_set_channel_bit_count(state->i2s_base, master_clk,
+ clk_get_rate(i2s_clk)*I2S_CLK_TO_BITCLK_RATIO);
+ i2s_set_master(state->i2s_base, master);
+
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_TX, 1);
+ i2s_set_fifo_mode(state->i2s_base, I2S_FIFO_RX, 0);
+
+ if (state->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ i2s_set_bit_format(state->i2s_base, I2S_BIT_FORMAT_DSP);
+ else
+ i2s_set_bit_format(state->i2s_base, state->pdata->mode);
+ i2s_set_bit_size(state->i2s_base, state->pdata->bit_size);
+ i2s_set_fifo_format(state->i2s_base, state->pdata->fifo_fmt);
+
+ return 0;
+}
+
+static int init_stream_buffer(struct audio_stream *, int);
+
+static int setup_dma(struct audio_driver_state *, int);
+static void tear_down_dma(struct audio_driver_state *, int);
+static void stop_dma_playback(struct audio_stream *);
+static int start_dma_recording(struct audio_stream *, int);
+static void stop_dma_recording(struct audio_stream *);
+
+struct sound_ops {
+ int (*setup)(struct audio_driver_state *, int);
+ void (*tear_down)(struct audio_driver_state *, int);
+ void (*stop_playback)(struct audio_stream *);
+ int (*start_recording)(struct audio_stream *, int);
+ void (*stop_recording)(struct audio_stream *);
+};
+
+static const struct sound_ops dma_sound_ops = {
+ .setup = setup_dma,
+ .tear_down = tear_down_dma,
+ .stop_playback = stop_dma_playback,
+ .start_recording = start_dma_recording,
+ .stop_recording = stop_dma_recording,
+};
+
+static const struct sound_ops *sound_ops = &dma_sound_ops;
+
+static int start_recording_if_necessary(struct audio_stream *ais, int size)
+{
+ int rc = 0;
+ unsigned long flags;
+ prevent_suspend(ais);
+ spin_lock_irqsave(&ais->dma_req_lock, flags);
+ if (!ais->stop && !pending_buffer_requests(ais)) {
+ /* pr_debug("%s: starting recording\n", __func__); */
+ rc = sound_ops->start_recording(ais, size);
+ if (rc) {
+ pr_err("%s start_recording() failed\n", __func__);
+ allow_suspend(ais);
+ }
+ }
+ spin_unlock_irqrestore(&ais->dma_req_lock, flags);
+ return rc;
+}
+
+static bool stop_playback_if_necessary(struct audio_stream *aos)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ pr_debug("%s\n", __func__);
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: no more data to play back\n", __func__);
+ sound_ops->stop_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ allow_suspend(aos);
+ return true;
+ }
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ return false;
+}
+
+/* playback and recording */
+static bool wait_till_stopped(struct audio_stream *as)
+{
+ int rc;
+ pr_debug("%s: wait for completion\n", __func__);
+ rc = wait_for_completion_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out", __func__);
+ if (rc < 0)
+ pr_err("%s: wait error %d\n", __func__, rc);
+ allow_suspend(as);
+ pr_debug("%s: done: %d\n", __func__, rc);
+ return true;
+}
+
+/* Ask for playback and recording to stop. The _nosync means that
+ * as->lock has to be locked by the caller.
+ */
+static void request_stop_nosync(struct audio_stream *as)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+ if (!as->stop) {
+ as->stop = true;
+ if (pending_buffer_requests(as))
+ wait_till_stopped(as);
+ for (i = 0; i < as->num_bufs; i++) {
+ init_completion(&as->comp[i]);
+ complete(&as->comp[i]);
+ }
+ }
+ if (!tegra_dma_is_empty(as->dma_chan))
+ pr_err("%s: DMA not empty!\n", __func__);
+ /* Stop the DMA then dequeue anything that's in progress. */
+ tegra_dma_cancel(as->dma_chan);
+ as->active = false; /* applies to recording only */
+ pr_debug("%s: done\n", __func__);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos);
+
+static void setup_dma_rx_request(struct tegra_dma_req *req,
+ struct audio_stream *ais);
+
+static int setup_dma(struct audio_driver_state *ads, int mask)
+{
+ int rc, i;
+ pr_info("%s\n", __func__);
+
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ /* setup audio playback */
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ ads->out.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->out.buffer[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_TO_DEVICE);
+ BUG_ON(!ads->out.buf_phy[i]);
+ setup_dma_tx_request(&ads->out.dma_req[i], &ads->out);
+ ads->out.dma_req[i].source_addr = ads->out.buf_phy[i];
+ }
+ ads->out.dma_chan = tegra_dma_allocate_channel(
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE);
+ if (!ads->out.dma_chan) {
+ pr_err("%s: error alloc output DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->out.dma_chan));
+ rc = -ENODEV;
+ goto fail_tx;
+ }
+ }
+
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ /* setup audio recording */
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ ads->in.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->in.buffer[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_FROM_DEVICE);
+ BUG_ON(!ads->in.buf_phy[i]);
+ setup_dma_rx_request(&ads->in.dma_req[i], &ads->in);
+ ads->in.dma_req[i].dest_addr = ads->in.buf_phy[i];
+ }
+ ads->in.dma_chan = tegra_dma_allocate_channel(
+ TEGRA_DMA_MODE_CONTINUOUS_SINGLE);
+ if (!ads->in.dma_chan) {
+ pr_err("%s: error allocating input DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->in.dma_chan));
+ rc = -ENODEV;
+ goto fail_rx;
+ }
+ }
+
+ return 0;
+
+fail_rx:
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->in.buf_phy[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_FROM_DEVICE);
+ ads->in.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->in.dma_chan);
+ ads->in.dma_chan = 0;
+ }
+fail_tx:
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ 1 << PCM_BUFFER_MAX_SIZE_ORDER,
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = 0;
+ }
+
+ return rc;
+}
+
+static void tear_down_dma(struct audio_driver_state *ads, int mask)
+{
+ int i;
+ pr_info("%s\n", __func__);
+
+ if (mask & TEGRA_AUDIO_ENABLE_TX) {
+ tegra_dma_free_channel(ads->out.dma_chan);
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ }
+ ads->out.dma_chan = NULL;
+
+ if (mask & TEGRA_AUDIO_ENABLE_RX) {
+ tegra_dma_free_channel(ads->in.dma_chan);
+ for (i = 0; i < ads->in.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->in.buf_phy[i],
+ buf_size(&ads->in),
+ DMA_FROM_DEVICE);
+ ads->in.buf_phy[i] = 0;
+ }
+ }
+ ads->in.dma_chan = NULL;
+}
+
+static void dma_tx_complete_callback(struct tegra_dma_req *req)
+{
+ unsigned long flags;
+ struct audio_stream *aos = req->dev;
+ unsigned req_num;
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+
+ req_num = req - aos->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= aos->num_bufs);
+
+ complete(&aos->comp[req_num]);
+
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: Playback underflow\n", __func__);
+ complete(&aos->stop_completion);
+ }
+
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+}
+
+static void dma_rx_complete_callback(struct tegra_dma_req *req)
+{
+ unsigned long flags;
+ struct audio_stream *ais = req->dev;
+ unsigned req_num;
+
+ spin_lock_irqsave(&ais->dma_req_lock, flags);
+
+ req_num = req - ais->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= ais->num_bufs);
+
+ complete(&ais->comp[req_num]);
+
+ if (!pending_buffer_requests(ais))
+ pr_debug("%s: Capture overflow\n", __func__);
+
+ spin_unlock_irqrestore(&ais->dma_req_lock, flags);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_tx_complete_callback;
+ req->dev = aos;
+ req->to_memory = false;
+ req->dest_addr = i2s_get_fifo_phy_base(ads->i2s_phys, I2S_FIFO_TX);
+ req->dest_wrap = 4;
+ if (ads->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ req->dest_bus_width = ads->pdata->dsp_bus_width;
+ else
+ req->dest_bus_width = ads->pdata->i2s_bus_width;
+ req->source_bus_width = 32;
+ req->source_wrap = 0;
+ req->req_sel = ads->dma_req_sel;
+}
+
+static void setup_dma_rx_request(struct tegra_dma_req *req,
+ struct audio_stream *ais)
+{
+ struct audio_driver_state *ads = ads_from_in(ais);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_rx_complete_callback;
+ req->dev = ais;
+ req->to_memory = true;
+ req->source_addr = i2s_get_fifo_phy_base(ads->i2s_phys, I2S_FIFO_RX);
+ req->source_wrap = 4;
+ if (ads->bit_format == TEGRA_AUDIO_BIT_FORMAT_DSP)
+ req->source_bus_width = ads->pdata->dsp_bus_width;
+ else
+ req->source_bus_width = ads->pdata->i2s_bus_width;
+ req->dest_bus_width = 32;
+ req->dest_wrap = 0;
+ req->req_sel = ads->dma_req_sel;
+}
+
+static int start_playback(struct audio_stream *aos,
+ struct tegra_dma_req *req)
+{
+ int rc;
+ unsigned long flags;
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ pr_debug("%s: (writing %d)\n",
+ __func__, req->size);
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+#if 0
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_TX);
+#endif
+ i2s_fifo_set_attention_level(ads->i2s_base,
+ I2S_FIFO_TX, aos->i2s_fifo_atn_level);
+
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_TX, 1);
+
+ rc = tegra_dma_enqueue_req(aos->dma_chan, req);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ if (rc)
+ pr_err("%s: could not enqueue TX DMA req\n", __func__);
+ return rc;
+}
+
+/* Called with aos->dma_req_lock taken. */
+static void stop_dma_playback(struct audio_stream *aos)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ pr_debug("%s\n", __func__);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_TX, 0);
+ while ((i2s_get_status(ads->i2s_base) & I2S_I2S_FIFO_TX_BUSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+}
+
+/* This function may be called from either interrupt or process context. */
+/* Called with ais->dma_req_lock taken. */
+static int start_dma_recording(struct audio_stream *ais, int size)
+{
+ int i;
+ struct audio_driver_state *ads = ads_from_in(ais);
+
+ pr_debug("%s\n", __func__);
+
+ BUG_ON(pending_buffer_requests(ais));
+
+ for (i = 0; i < ais->num_bufs; i++) {
+ init_completion(&ais->comp[i]);
+ ais->dma_req[i].dest_addr = ais->buf_phy[i];
+ ais->dma_req[i].size = size;
+ tegra_dma_enqueue_req(ais->dma_chan, &ais->dma_req[i]);
+ }
+
+ ais->last_queued = ais->num_bufs - 1;
+
+#if 0
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_RX);
+#endif
+ i2s_fifo_set_attention_level(ads->i2s_base,
+ I2S_FIFO_RX, ais->i2s_fifo_atn_level);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_RX, 1);
+ return 0;
+}
+
+static void stop_dma_recording(struct audio_stream *ais)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_in(ais);
+ pr_debug("%s\n", __func__);
+ tegra_dma_cancel(ais->dma_chan);
+ i2s_fifo_enable(ads->i2s_base, I2S_FIFO_RX, 0);
+ i2s_fifo_clear(ads->i2s_base, I2S_FIFO_RX);
+ while ((i2s_get_status(ads->i2s_base) & I2S_I2S_FIFO_RX_BUSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+}
+
+static irqreturn_t i2s_interrupt(int irq, void *data)
+{
+ struct audio_driver_state *ads = data;
+ u32 status = i2s_get_status(ads->i2s_base);
+
+ pr_debug("%s: %08x\n", __func__, status);
+
+ if (status & I2S_FIFO_ERR)
+ i2s_ack_status(ads->i2s_base);
+
+ pr_debug("%s: done %08x\n", __func__, i2s_get_status(ads->i2s_base));
+ return IRQ_HANDLED;
+}
+
+static ssize_t tegra_audio_write(struct file *file,
+ const char __user *buf, size_t size, loff_t *off)
+{
+ ssize_t rc = 0;
+ int out_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ mutex_lock(&ads->out.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->out)) {
+ pr_err("%s: invalid user size %d\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: write %d bytes\n", __func__, size);
+
+ if (ads->out.stop) {
+ pr_debug("%s: playback has been cancelled\n", __func__);
+ goto done;
+ }
+
+ /* Decide which buf is next. */
+ out_buf = (ads->out.last_queued + 1) % ads->out.num_bufs;
+ req = &ads->out.dma_req[out_buf];
+
+ /* Wait for the buffer to be emptied (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->out).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ pr_debug("%s: waiting for buffer %d\n", __func__, out_buf);
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->out.comp[out_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ /* Fill the buffer and enqueue it. */
+ pr_debug("%s: acquired buffer %d, copying data\n", __func__, out_buf);
+ rc = copy_from_user(ads->out.buffer[out_buf], buf, size);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ prevent_suspend(&ads->out);
+
+ req->size = size;
+ dma_sync_single_for_device(NULL,
+ req->source_addr, req->size, DMA_TO_DEVICE);
+ ads->out.last_queued = out_buf;
+ init_completion(&ads->out.stop_completion);
+
+ rc = start_playback(&ads->out, req);
+ if (!rc)
+ rc = size;
+ else
+ allow_suspend(&ads->out);
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_audio_out_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out_ctl(file);
+ struct audio_stream *aos = &ads->out;
+
+ mutex_lock(&aos->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_OUT_FLUSH:
+ if (pending_buffer_requests(aos)) {
+ pr_debug("%s: flushing\n", __func__);
+ request_stop_nosync(aos);
+ pr_debug("%s: flushed\n", __func__);
+ }
+ if (stop_playback_if_necessary(aos))
+ pr_debug("%s: done (stopped)\n", __func__);
+ aos->stop = false;
+ break;
+ case TEGRA_AUDIO_OUT_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > I2S_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__, num);
+ rc = -EINVAL;
+ break;
+ }
+ if (pending_buffer_requests(aos)) {
+ pr_err("%s: playback in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(aos, num);
+ if (rc < 0)
+ break;
+ aos->num_bufs = num;
+ sound_ops->tear_down(ads, TEGRA_AUDIO_ENABLE_TX);
+ sound_ops->setup(ads, TEGRA_AUDIO_ENABLE_TX);
+ pr_debug("%s: num buf set to %d\n", __func__, num);
+ }
+ break;
+ case TEGRA_AUDIO_OUT_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &aos->num_bufs, sizeof(aos->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&aos->lock);
+ return rc;
+}
+
+static long tegra_audio_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_ctl(file);
+ unsigned int mode;
+ bool dma_restart = false;
+
+ mutex_lock(&ads->out.lock);
+ mutex_lock(&ads->in.lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_SET_BIT_FORMAT:
+ if (copy_from_user(&mode, (const void __user *)arg,
+ sizeof(mode))) {
+ rc = -EFAULT;
+ goto done;
+ }
+ dma_restart = (mode != ads->bit_format);
+ switch (mode) {
+ case TEGRA_AUDIO_BIT_FORMAT_DEFAULT:
+ i2s_set_bit_format(ads->i2s_base, ads->pdata->mode);
+ ads->bit_format = mode;
+ break;
+ case TEGRA_AUDIO_BIT_FORMAT_DSP:
+ i2s_set_bit_format(ads->i2s_base, I2S_BIT_FORMAT_DSP);
+ ads->bit_format = mode;
+ break;
+ default:
+ pr_err("%s: Invald PCM mode %d", __func__, mode);
+ rc = -EINVAL;
+ goto done;
+ }
+ break;
+ case TEGRA_AUDIO_GET_BIT_FORMAT:
+ if (copy_to_user((void __user *)arg, &ads->bit_format,
+ sizeof(mode)))
+ rc = -EFAULT;
+ goto done;
+ }
+
+ if (dma_restart) {
+ pr_debug("%s: Restarting DMA due to configuration change.\n",
+ __func__);
+ if (pending_buffer_requests(&ads->out) || ads->in.active) {
+ pr_err("%s: dma busy, cannot restart.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ sound_ops->tear_down(ads, ads->pdata->mask);
+ i2s_configure(ads->pdev);
+ sound_ops->setup(ads, ads->pdata->mask);
+ }
+
+done:
+ mutex_unlock(&ads->in.lock);
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_audio_in_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_in_ctl(file);
+ struct audio_stream *ais = &ads->in;
+
+ mutex_lock(&ais->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_IN_START:
+ pr_debug("%s: start recording\n", __func__);
+ ais->stop = false;
+ break;
+ case TEGRA_AUDIO_IN_STOP:
+ pr_debug("%s: stop recording\n", __func__);
+ if (ais->active) {
+ /* Clean up DMA/I2S, and complete the completion */
+ sound_ops->stop_recording(ais);
+ complete(&ais->stop_completion);
+ /* Set stop flag and allow suspend. */
+ request_stop_nosync(ais);
+ }
+ break;
+ case TEGRA_AUDIO_IN_SET_CONFIG: {
+ struct tegra_audio_in_config cfg;
+
+ if (ais->active) {
+ pr_err("%s: recording in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ if (copy_from_user(&cfg, (const void __user *)arg,
+ sizeof(cfg))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (cfg.stereo && !ads->pdata->stereo_capture) {
+ pr_err("%s: not capable of stereo capture.",
+ __func__);
+ rc = -EINVAL;
+ }
+ if (!rc) {
+ pr_info("%s: setting input sampling rate to %d, %s\n",
+ __func__, cfg.rate,
+ cfg.stereo ? "stereo" : "mono");
+ ads->in_config = cfg;
+ ads->in_config.stereo = !!ads->in_config.stereo;
+ }
+ }
+ break;
+ case TEGRA_AUDIO_IN_GET_CONFIG:
+ if (copy_to_user((void __user *)arg, &ads->in_config,
+ sizeof(ads->in_config)))
+ rc = -EFAULT;
+ break;
+ case TEGRA_AUDIO_IN_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > I2S_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__,
+ num);
+ rc = -EINVAL;
+ break;
+ }
+ if (ais->active || pending_buffer_requests(ais)) {
+ pr_err("%s: recording in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(ais, num);
+ if (rc < 0)
+ break;
+ ais->num_bufs = num;
+ sound_ops->tear_down(ads, TEGRA_AUDIO_ENABLE_RX);
+ sound_ops->setup(ads, TEGRA_AUDIO_ENABLE_RX);
+ }
+ break;
+ case TEGRA_AUDIO_IN_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &ais->num_bufs, sizeof(ais->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&ais->lock);
+ return rc;
+}
+
+static ssize_t tegra_audio_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ ssize_t rc;
+ ssize_t nr = 0;
+ int in_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ mutex_lock(&ads->in.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->in)) {
+ pr_err("%s: invalid size %d.\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: size %d\n", __func__, size);
+
+ /* If we want recording to stop immediately after it gets cancelled,
+ * then we do not want to wait for the fifo to get drained.
+ */
+ if (ads->in.stop) {
+ pr_debug("%s: recording has been cancelled\n", __func__);
+ rc = 0;
+ goto done;
+ }
+
+ /* This function calls prevent_suspend() internally */
+ rc = start_recording_if_necessary(&ads->in, size);
+ if (rc < 0 && rc != -EALREADY) {
+ pr_err("%s: could not start recording\n", __func__);
+ goto done;
+ }
+
+ ads->in.active = true;
+
+ /* Note that when tegra_audio_read() is called for the first time (or
+ * when all the buffers are empty), then it queues up all
+ * ads->in.num_bufs buffers, and in_buf is set to zero below.
+ */
+ in_buf = (ads->in.last_queued + 1) % ads->in.num_bufs;
+
+ /* Wait for the buffer to be filled (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->in).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->in.comp[in_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ req = &ads->in.dma_req[in_buf];
+
+ nr = size > req->size ? req->size : size;
+ req->size = size;
+ dma_sync_single_for_cpu(NULL, ads->in.dma_req[in_buf].dest_addr,
+ ads->in.dma_req[in_buf].size, DMA_FROM_DEVICE);
+ rc = copy_to_user(buf, ads->in.buffer[in_buf], nr);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ init_completion(&ads->in.stop_completion);
+
+ ads->in.last_queued = in_buf;
+ rc = tegra_dma_enqueue_req(ads->in.dma_chan, req);
+ /* We've successfully enqueued this request before. */
+ BUG_ON(rc);
+
+ rc = nr;
+ *off += nr;
+done:
+ mutex_unlock(&ads->in.lock);
+ pr_debug("%s: done %d\n", __func__, rc);
+ return rc;
+}
+
+static int tegra_audio_out_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+
+ if (ads->out.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->out.opened = 1;
+ ads->out.stop = false;
+
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&ads->out.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static int tegra_audio_out_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ ads->out.opened = 0;
+ request_stop_nosync(&ads->out);
+ if (stop_playback_if_necessary(&ads->out))
+ pr_debug("%s: done (stopped)\n", __func__);
+ allow_suspend(&ads->out);
+ mutex_unlock(&ads->out.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+static int tegra_audio_in_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->in.lock);
+ if (ads->in.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->in.opened = 1;
+ ads->in.stop = false;
+
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->in.comp[i]);
+ /* RX buf rest state is unqueued, complete. */
+ complete(&ads->in.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->in.lock);
+ return rc;
+}
+
+static int tegra_audio_in_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_in(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->in.lock);
+ ads->in.opened = 0;
+ if (ads->in.active) {
+ sound_ops->stop_recording(&ads->in);
+ complete(&ads->in.stop_completion);
+ request_stop_nosync(&ads->in);
+ }
+ allow_suspend(&ads->in);
+ mutex_unlock(&ads->in.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+static const struct file_operations tegra_audio_out_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_out_open,
+ .release = tegra_audio_out_release,
+ .write = tegra_audio_write,
+};
+
+static const struct file_operations tegra_audio_in_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_in_open,
+ .read = tegra_audio_read,
+ .release = tegra_audio_in_release,
+};
+
+static int tegra_audio_ctl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_audio_ctl_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tegra_audio_out_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_out_ioctl,
+};
+
+static const struct file_operations tegra_audio_in_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_in_ioctl,
+};
+
+static const struct file_operations tegra_audio_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_audio_ctl_open,
+ .release = tegra_audio_ctl_release,
+ .unlocked_ioctl = tegra_audio_ioctl,
+};
+
+static int init_stream_buffer(struct audio_stream *s, int num)
+{
+ int i, j;
+ pr_debug("%s (num %d)\n", __func__, num);
+
+ for (i = 0; i < num; i++) {
+ kfree(s->buffer[i]);
+ s->buffer[i] =
+ kmalloc((1 << PCM_BUFFER_MAX_SIZE_ORDER),
+ GFP_KERNEL | GFP_DMA);
+ if (!s->buffer[i]) {
+ pr_err("%s: could not allocate buffer\n", __func__);
+ for (j = i - 1; j >= 0; j--) {
+ kfree(s->buffer[j]);
+ s->buffer[j] = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+
+static int setup_misc_device(struct miscdevice *misc,
+ const struct file_operations *fops,
+ const char *fmt, ...)
+{
+ int rc = 0;
+ va_list args;
+ const int sz = 64;
+
+ va_start(args, fmt);
+
+ memset(misc, 0, sizeof(*misc));
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = kmalloc(sz, GFP_KERNEL);
+ if (!misc->name) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ vsnprintf((char *)misc->name, sz, fmt, args);
+ misc->fops = fops;
+ if (misc_register(misc)) {
+ pr_err("%s: could not register %s\n", __func__, misc->name);
+ kfree(misc->name);
+ rc = -EIO;
+ goto done;
+ }
+
+done:
+ va_end(args);
+ return rc;
+}
+
+static ssize_t dma_toggle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "dma\n");
+}
+
+static ssize_t dma_toggle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ pr_err("%s: Not implemented.", __func__);
+ return 0;
+}
+
+static DEVICE_ATTR(dma_toggle, 0644, dma_toggle_show, dma_toggle_store);
+
+static ssize_t __attr_fifo_atn_read(char *buf, int atn_lvl)
+{
+ switch (atn_lvl) {
+ case I2S_FIFO_ATN_LVL_ONE_SLOT:
+ strncpy(buf, "1\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_FOUR_SLOTS:
+ strncpy(buf, "4\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_EIGHT_SLOTS:
+ strncpy(buf, "8\n", 2);
+ return 2;
+ case I2S_FIFO_ATN_LVL_TWELVE_SLOTS:
+ strncpy(buf, "12\n", 3);
+ return 3;
+ default:
+ BUG_ON(1);
+ return -EIO;
+ }
+}
+
+static ssize_t __attr_fifo_atn_write(struct audio_driver_state *ads,
+ struct audio_stream *as,
+ int *fifo_lvl,
+ const char *buf, size_t size)
+{
+ int lvl;
+
+ if (size > 3) {
+ pr_err("%s: buffer size %d too big\n", __func__, size);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%d", &lvl) != 1) {
+ pr_err("%s: invalid input string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ switch (lvl) {
+ case 1:
+ lvl = I2S_FIFO_ATN_LVL_ONE_SLOT;
+ break;
+ case 4:
+ lvl = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ break;
+ case 8:
+ lvl = I2S_FIFO_ATN_LVL_EIGHT_SLOTS;
+ break;
+ case 12:
+ lvl = I2S_FIFO_ATN_LVL_TWELVE_SLOTS;
+ break;
+ default:
+ pr_err("%s: invalid attention level %d\n", __func__, lvl);
+ return -EINVAL;
+ }
+
+ *fifo_lvl = lvl;
+ pr_info("%s: fifo level %d\n", __func__, *fifo_lvl);
+
+ return size;
+}
+
+static ssize_t tx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->out.i2s_fifo_atn_level);
+}
+
+static ssize_t tx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->out.lock);
+ if (pending_buffer_requests(&ads->out)) {
+ pr_err("%s: playback in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->out,
+ &ads->out.i2s_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(tx_fifo_atn, 0644, tx_fifo_atn_show, tx_fifo_atn_store);
+
+static ssize_t rx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->in.i2s_fifo_atn_level);
+}
+
+static ssize_t rx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->in.lock);
+ if (ads->in.active) {
+ pr_err("%s: recording in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->in,
+ &ads->in.i2s_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->in.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(rx_fifo_atn, 0644, rx_fifo_atn_show, rx_fifo_atn_store);
+
+static int tegra_audio_probe(struct platform_device *pdev)
+{
+ int rc, i;
+ struct resource *res;
+ struct clk *i2s_clk, *dap_mclk;
+ struct audio_driver_state *state;
+
+ pr_info("%s\n", __func__);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->pdev = pdev;
+ state->pdata = pdev->dev.platform_data;
+ state->pdata->driver_data = state;
+ BUG_ON(!state->pdata);
+
+ if (!(state->pdata->mask &
+ (TEGRA_AUDIO_ENABLE_TX | TEGRA_AUDIO_ENABLE_RX))) {
+ dev_err(&pdev->dev, "neither tx nor rx is enabled!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource!\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "memory region already claimed!\n");
+ return -ENOMEM;
+ }
+
+ state->i2s_phys = res->start;
+ state->i2s_base = (unsigned long)ioremap(res->start,
+ res->end - res->start + 1);
+ if (!state->i2s_base) {
+ dev_err(&pdev->dev, "cannot remap iomem!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no dma resource!\n");
+ return -ENODEV;
+ }
+ state->dma_req_sel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource!\n");
+ return -ENODEV;
+ }
+ state->irq = res->start;
+
+ i2s_clk = clk_get(&pdev->dev, NULL);
+ if (!i2s_clk) {
+ dev_err(&pdev->dev, "%s: could not get i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ clk_set_rate(i2s_clk, state->pdata->i2s_clk_rate);
+ if (clk_enable(i2s_clk)) {
+ dev_err(&pdev->dev, "%s: failed to enable i2s clock\n",
+ __func__);
+ return -EIO;
+ }
+ pr_info("%s: i2s_clk rate %ld\n", __func__, clk_get_rate(i2s_clk));
+
+ dap_mclk = tegra_get_clock_by_name(state->pdata->dap_clk);
+ if (!dap_mclk) {
+ dev_err(&pdev->dev, "%s: could not get DAP clock\n",
+ __func__);
+ return -EIO;
+ }
+ clk_enable(dap_mclk);
+
+ rc = i2s_configure(pdev);
+ if (rc < 0)
+ return rc;
+
+ if ((state->pdata->mask & TEGRA_AUDIO_ENABLE_TX)) {
+ state->out.opened = 0;
+ state->out.active = false;
+ mutex_init(&state->out.lock);
+ init_completion(&state->out.stop_completion);
+ spin_lock_init(&state->out.dma_req_lock);
+ state->out.dma_chan = NULL;
+ state->out.i2s_fifo_atn_level = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ state->out.num_bufs = I2S_DEFAULT_TX_NUM_BUFS;
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&state->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&state->out.comp[i]);
+ state->out.buffer[i] = 0;
+ state->out.buf_phy[i] = 0;
+ }
+ state->out.last_queued = 0;
+ rc = init_stream_buffer(&state->out, state->out.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
+
+ snprintf(state->out.wake_lock_name,
+ sizeof(state->out.wake_lock_name),
+ "i2s.%d-audio-out", state->pdev->id);
+ wake_lock_init(&state->out.wake_lock, WAKE_LOCK_SUSPEND,
+ state->out.wake_lock_name);
+
+ rc = setup_misc_device(&state->misc_out,
+ &tegra_audio_out_fops,
+ "audio%d_out", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_out_ctl,
+ &tegra_audio_out_ctl_fops,
+ "audio%d_out_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+ }
+
+ if ((state->pdata->mask & TEGRA_AUDIO_ENABLE_RX)) {
+ state->in.opened = 0;
+ state->in.active = false;
+ mutex_init(&state->in.lock);
+ init_completion(&state->in.stop_completion);
+ spin_lock_init(&state->in.dma_req_lock);
+ state->in.dma_chan = NULL;
+ state->in.i2s_fifo_atn_level = I2S_FIFO_ATN_LVL_FOUR_SLOTS;
+ state->in.num_bufs = I2S_DEFAULT_RX_NUM_BUFS;
+ for (i = 0; i < I2S_MAX_NUM_BUFS; i++) {
+ init_completion(&state->in.comp[i]);
+ /* RX buf rest state is unqueued, complete. */
+ complete(&state->in.comp[i]);
+ state->in.buffer[i] = 0;
+ state->in.buf_phy[i] = 0;
+ }
+ state->in.last_queued = 0;
+ rc = init_stream_buffer(&state->in, state->in.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->in.allow_suspend_work, allow_suspend_worker);
+
+ snprintf(state->in.wake_lock_name,
+ sizeof(state->in.wake_lock_name),
+ "i2s.%d-audio-in", state->pdev->id);
+ wake_lock_init(&state->in.wake_lock, WAKE_LOCK_SUSPEND,
+ state->in.wake_lock_name);
+
+ rc = setup_misc_device(&state->misc_in,
+ &tegra_audio_in_fops,
+ "audio%d_in", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_in_ctl,
+ &tegra_audio_in_ctl_fops,
+ "audio%d_in_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (request_irq(state->irq, i2s_interrupt,
+ IRQF_DISABLED, state->pdev->name, state) < 0) {
+ dev_err(&pdev->dev,
+ "%s: could not register handler for irq %d\n",
+ __func__, state->irq);
+ return -EIO;
+ }
+
+ rc = setup_misc_device(&state->misc_ctl,
+ &tegra_audio_ctl_fops,
+ "audio%d_ctl", state->pdev->id);
+ if (rc < 0)
+ return rc;
+
+ sound_ops->setup(state, state->pdata->mask);
+
+ rc = device_create_file(&pdev->dev, &dev_attr_dma_toggle);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_dma_toggle.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_tx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_tx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_rx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_rx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ state->in_config.rate = 11025;
+ state->in_config.stereo = false;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_audio_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+ return 0;
+}
+
+static int tegra_audio_resume(struct platform_device *pdev)
+{
+ return i2s_configure(pdev);
+}
+#endif /* CONFIG_PM */
+
+static struct platform_driver tegra_audio_driver = {
+ .driver = {
+ .name = "i2s",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_audio_probe,
+#ifdef CONFIG_PM
+ .suspend = tegra_audio_suspend,
+ .resume = tegra_audio_resume,
+#endif
+};
+
+static int __init tegra_audio_init(void)
+{
+ return platform_driver_register(&tegra_audio_driver);
+}
+
+module_init(tegra_audio_init);
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * arch/arm/mach-tegra/tegra_spdif_audio.c
+ *
+ * S/PDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/sysfs.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/tegra_audio.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+
+#include <mach/dma.h>
+#include <mach/iomap.h>
+#include <mach/spdif.h>
+#include <mach/audio.h>
+#include <mach/irqs.h>
+
+#include "clock.h"
+
+#define PCM_BUFFER_MAX_SIZE_ORDER (PAGE_SHIFT)
+
+#define SPDIF_MAX_NUM_BUFS 4
+/* Todo: Add IOCTL to configure the number of buffers. */
+#define SPDIF_DEFAULT_TX_NUM_BUFS 2
+#define SPDIF_DEFAULT_RX_NUM_BUFS 2
+/* per stream (input/output) */
+struct audio_stream {
+ int opened;
+ struct mutex lock;
+
+ bool active; /* is DMA in progress? */
+ int num_bufs;
+ void *buffer[SPDIF_MAX_NUM_BUFS];
+ dma_addr_t buf_phy[SPDIF_MAX_NUM_BUFS];
+ struct completion comp[SPDIF_MAX_NUM_BUFS];
+ struct tegra_dma_req dma_req[SPDIF_MAX_NUM_BUFS];
+ int last_queued;
+
+ int spdif_fifo_atn_level;
+
+ struct tegra_dma_channel *dma_chan;
+ bool stop;
+ struct completion stop_completion;
+ spinlock_t dma_req_lock;
+
+ struct work_struct allow_suspend_work;
+ struct wake_lock wake_lock;
+ char wake_lock_name[100];
+};
+
+struct audio_driver_state {
+ struct list_head next;
+
+ struct platform_device *pdev;
+ struct tegra_audio_platform_data *pdata;
+ phys_addr_t spdif_phys;
+ unsigned long spdif_base;
+
+ unsigned long dma_req_sel;
+ bool fifo_init;
+
+ int irq;
+
+ struct miscdevice misc_out;
+ struct miscdevice misc_out_ctl;
+ struct audio_stream out;
+};
+
+static inline bool pending_buffer_requests(struct audio_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->num_bufs; i++)
+ if (!completion_done(&stream->comp[i]))
+ return true;
+ return false;
+}
+
+static inline int buf_size(struct audio_stream *s __attribute__((unused)))
+{
+ return 1 << PCM_BUFFER_MAX_SIZE_ORDER;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out(struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state, misc_out);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_misc_out_ctl(
+ struct file *file)
+{
+ struct miscdevice *m = file->private_data;
+ struct audio_driver_state *ads =
+ container_of(m, struct audio_driver_state,
+ misc_out_ctl);
+ BUG_ON(!ads);
+ return ads;
+}
+
+static inline struct audio_driver_state *ads_from_out(
+ struct audio_stream *aos)
+{
+ return container_of(aos, struct audio_driver_state, out);
+}
+
+static inline void prevent_suspend(struct audio_stream *as)
+{
+ pr_debug("%s\n", __func__);
+ cancel_work_sync(&as->allow_suspend_work);
+ wake_lock(&as->wake_lock);
+}
+
+static void allow_suspend_worker(struct work_struct *w)
+{
+ struct audio_stream *as = container_of(w,
+ struct audio_stream, allow_suspend_work);
+ pr_debug("%s\n", __func__);
+ wake_unlock(&as->wake_lock);
+}
+
+static inline void allow_suspend(struct audio_stream *as)
+{
+ schedule_work(&as->allow_suspend_work);
+}
+
+#define I2S_I2S_FIFO_TX_BUSY I2S_I2S_STATUS_FIFO1_BSY
+#define I2S_I2S_FIFO_TX_QS I2S_I2S_STATUS_QS_FIFO1
+#define I2S_I2S_FIFO_TX_ERR I2S_I2S_STATUS_FIFO1_ERR
+
+#define I2S_I2S_FIFO_RX_BUSY I2S_I2S_STATUS_FIFO2_BSY
+#define I2S_I2S_FIFO_RX_QS I2S_I2S_STATUS_QS_FIFO2
+#define I2S_I2S_FIFO_RX_ERR I2S_I2S_STATUS_FIFO2_ERR
+
+#define I2S_FIFO_ERR (I2S_I2S_STATUS_FIFO1_ERR | I2S_I2S_STATUS_FIFO2_ERR)
+
+
+static inline void spdif_writel(unsigned long base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 spdif_readl(unsigned long base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void spdif_fifo_write(unsigned long base, u32 data)
+{
+ spdif_writel(base, data, SPDIF_DATA_OUT_0);
+}
+
+static int spdif_fifo_set_attention_level(unsigned long base,
+ unsigned level)
+{
+ u32 val;
+
+ if (level > SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS) {
+ pr_err("%s: invalid fifo level selector %d\n", __func__,
+ level);
+ return -EINVAL;
+ }
+
+ val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+
+ val &= ~SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_MASK;
+ val |= level << SPDIF_DATA_FIFO_CSR_0_TX_ATN_LVL_SHIFT;
+
+
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+ return 0;
+}
+
+static void spdif_fifo_enable(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN | SPDIF_CTRL_0_TU_EN);
+ val |= on ? (SPDIF_CTRL_0_TX_EN) : 0;
+ val |= on ? (SPDIF_CTRL_0_TC_EN) : 0;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+#if 0
+static bool spdif_is_fifo_enabled(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ return !!(val & SPDIF_CTRL_0_TX_EN);
+}
+#endif
+
+static void spdif_fifo_clear(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val &= ~(SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR);
+ val |= SPDIF_DATA_FIFO_CSR_0_TX_CLR | SPDIF_DATA_FIFO_CSR_0_TU_CLR;
+ spdif_writel(base, val, SPDIF_DATA_FIFO_CSR_0);
+}
+
+
+static int spdif_set_bit_mode(unsigned long base, unsigned mode)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_BIT_MODE_MASK;
+
+ if (mode > SPDIF_BIT_MODE_MODERAW) {
+ pr_err("%s: invalid bit_size selector %d\n", __func__,
+ mode);
+ return -EINVAL;
+ }
+
+ val |= mode << SPDIF_CTRL_0_BIT_MODE_SHIFT;
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+static int spdif_set_fifo_packed(unsigned long base, unsigned on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_PACK;
+ val |= on ? SPDIF_CTRL_0_PACK : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+ return 0;
+}
+
+#if 0
+static void spdif_set_fifo_irq_on_err(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ val &= ~SPDIF_CTRL_0_IE_TXE;
+ val |= on ? SPDIF_CTRL_0_IE_TXE : 0;
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+#endif
+
+
+static void spdif_enable_fifos(unsigned long base, int on)
+{
+ u32 val = spdif_readl(base, SPDIF_CTRL_0);
+ if (on)
+ val |= SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE;
+ else
+ val &= ~(SPDIF_CTRL_0_TX_EN | SPDIF_CTRL_0_TC_EN |
+ SPDIF_CTRL_0_IE_TXE);
+
+ spdif_writel(base, val, SPDIF_CTRL_0);
+}
+
+static inline u32 spdif_get_status(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_control(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_CTRL_0);
+}
+
+static inline void spdif_ack_status(unsigned long base)
+{
+ return spdif_writel(base, spdif_readl(base, SPDIF_STATUS_0),
+ SPDIF_STATUS_0);
+}
+
+static inline u32 spdif_get_fifo_scr(unsigned long base)
+{
+ return spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+}
+
+static inline phys_addr_t spdif_get_fifo_phy_base(unsigned long phy_base)
+{
+ return phy_base + SPDIF_DATA_OUT_0;
+}
+
+static inline u32 spdif_get_fifo_full_empty_count(unsigned long base)
+{
+ u32 val = spdif_readl(base, SPDIF_DATA_FIFO_CSR_0);
+ val = val >> SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_SHIFT;
+ return val & SPDIF_DATA_FIFO_CSR_0_TD_EMPTY_COUNT_MASK;
+}
+
+
+static int spdif_set_sample_rate(struct audio_driver_state *state,
+ unsigned int sample_rate)
+{
+ unsigned int clock_freq = 0;
+ struct clk *spdif_clk;
+
+ unsigned int ch_sta[] = {
+ 0x0, /* 44.1, default values */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ };
+
+ switch (sample_rate) {
+ case 32000:
+ clock_freq = 4096000; /* 4.0960 MHz */
+ ch_sta[0] = 0x3 << 24;
+ ch_sta[1] = 0xC << 4;
+ break;
+ case 44100:
+ clock_freq = 5644800; /* 5.6448 MHz */
+ ch_sta[0] = 0x0;
+ ch_sta[1] = 0xF << 4;
+ break;
+ case 48000:
+ clock_freq = 6144000; /* 6.1440MHz */
+ ch_sta[0] = 0x2 << 24;
+ ch_sta[1] = 0xD << 4;
+ break;
+ case 88200:
+ clock_freq = 11289600; /* 11.2896 MHz */
+ break;
+ case 96000:
+ clock_freq = 12288000; /* 12.288 MHz */
+ break;
+ case 176400:
+ clock_freq = 22579200; /* 22.5792 MHz */
+ break;
+ case 192000:
+ clock_freq = 24576000; /* 24.5760 MHz */
+ break;
+ default:
+ return -1;
+ }
+
+ spdif_clk = clk_get(&state->pdev->dev, NULL);
+ if (!spdif_clk) {
+ dev_err(&state->pdev->dev, "%s: could not get spdif clock\n",
+ __func__);
+ return -EIO;
+ }
+
+ clk_set_rate(spdif_clk, clock_freq);
+ if (clk_enable(spdif_clk)) {
+ dev_err(&state->pdev->dev,
+ "%s: failed to enable spdif_clk clock\n", __func__);
+ return -EIO;
+ }
+ pr_info("%s: spdif_clk rate %ld\n", __func__, clk_get_rate(spdif_clk));
+
+ spdif_writel(state->spdif_base, ch_sta[0], SPDIF_CH_STA_TX_A_0);
+ spdif_writel(state->spdif_base, ch_sta[1], SPDIF_CH_STA_TX_B_0);
+ spdif_writel(state->spdif_base, ch_sta[2], SPDIF_CH_STA_TX_C_0);
+ spdif_writel(state->spdif_base, ch_sta[3], SPDIF_CH_STA_TX_D_0);
+ spdif_writel(state->spdif_base, ch_sta[4], SPDIF_CH_STA_TX_E_0);
+ spdif_writel(state->spdif_base, ch_sta[5], SPDIF_CH_STA_TX_F_0);
+
+ return 0;
+}
+
+static int init_stream_buffer(struct audio_stream *, int);
+
+static int setup_dma(struct audio_driver_state *);
+static void tear_down_dma(struct audio_driver_state *);
+static void stop_dma_playback(struct audio_stream *);
+
+
+struct sound_ops {
+ int (*setup)(struct audio_driver_state *);
+ void (*tear_down)(struct audio_driver_state *);
+ void (*stop_playback)(struct audio_stream *);
+};
+
+static const struct sound_ops dma_sound_ops = {
+ .setup = setup_dma,
+ .tear_down = tear_down_dma,
+ .stop_playback = stop_dma_playback,
+};
+
+static const struct sound_ops *sound_ops = &dma_sound_ops;
+
+
+
+static bool stop_playback_if_necessary(struct audio_stream *aos)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+ pr_debug("%s\n", __func__);
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: no more data to play back\n", __func__);
+ sound_ops->stop_playback(aos);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+ allow_suspend(aos);
+ return true;
+ }
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ return false;
+}
+
+/* playback */
+static bool wait_till_stopped(struct audio_stream *as)
+{
+ int rc;
+ pr_debug("%s: wait for completion\n", __func__);
+ rc = wait_for_completion_timeout(
+ &as->stop_completion, HZ);
+ if (!rc)
+ pr_err("%s: wait timed out", __func__);
+ if (rc < 0)
+ pr_err("%s: wait error %d\n", __func__, rc);
+ allow_suspend(as);
+ pr_debug("%s: done: %d\n", __func__, rc);
+ return true;
+}
+
+/* Ask for playback to stop. The _nosync means that
+ * as->lock has to be locked by the caller.
+ */
+static void request_stop_nosync(struct audio_stream *as)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+ if (!as->stop) {
+ as->stop = true;
+ if (pending_buffer_requests(as))
+ wait_till_stopped(as);
+ for (i = 0; i < as->num_bufs; i++) {
+ init_completion(&as->comp[i]);
+ complete(&as->comp[i]);
+ }
+ }
+ if (!tegra_dma_is_empty(as->dma_chan))
+ pr_err("%s: DMA not empty!\n", __func__);
+ /* Stop the DMA then dequeue anything that's in progress. */
+ tegra_dma_cancel(as->dma_chan);
+ as->active = false; /* applies to recording only */
+ pr_debug("%s: done\n", __func__);
+}
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos);
+
+static int setup_dma(struct audio_driver_state *ads)
+{
+ int rc, i;
+ pr_info("%s\n", __func__);
+
+ /* setup audio playback */
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ ads->out.buf_phy[i] = dma_map_single(&ads->pdev->dev,
+ ads->out.buffer[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ BUG_ON(!ads->out.buf_phy[i]);
+ setup_dma_tx_request(&ads->out.dma_req[i], &ads->out);
+ ads->out.dma_req[i].source_addr = ads->out.buf_phy[i];
+ }
+ ads->out.dma_chan =
+ tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS_SINGLE);
+ if (!ads->out.dma_chan) {
+ pr_err("%s: error alloc output DMA channel: %ld\n",
+ __func__, PTR_ERR(ads->out.dma_chan));
+ rc = -ENODEV;
+ goto fail_tx;
+ }
+ return 0;
+
+
+fail_tx:
+
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+ tegra_dma_free_channel(ads->out.dma_chan);
+ ads->out.dma_chan = 0;
+
+
+ return rc;
+}
+
+static void tear_down_dma(struct audio_driver_state *ads)
+{
+ int i;
+ pr_info("%s\n", __func__);
+
+
+ tegra_dma_free_channel(ads->out.dma_chan);
+ for (i = 0; i < ads->out.num_bufs; i++) {
+ dma_unmap_single(&ads->pdev->dev, ads->out.buf_phy[i],
+ buf_size(&ads->out),
+ DMA_TO_DEVICE);
+ ads->out.buf_phy[i] = 0;
+ }
+
+ ads->out.dma_chan = NULL;
+}
+
+static void dma_tx_complete_callback(struct tegra_dma_req *req)
+{
+ struct audio_stream *aos = req->dev;
+ unsigned req_num;
+
+ req_num = req - aos->dma_req;
+ pr_debug("%s: completed buffer %d size %d\n", __func__,
+ req_num, req->bytes_transferred);
+ BUG_ON(req_num >= aos->num_bufs);
+
+ complete(&aos->comp[req_num]);
+
+ if (!pending_buffer_requests(aos)) {
+ pr_debug("%s: Playback underflow", __func__);
+ complete(&aos->stop_completion);
+ }
+}
+
+
+static void setup_dma_tx_request(struct tegra_dma_req *req,
+ struct audio_stream *aos)
+{
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ memset(req, 0, sizeof(*req));
+
+ req->complete = dma_tx_complete_callback;
+ req->dev = aos;
+ req->to_memory = false;
+ req->dest_addr = spdif_get_fifo_phy_base(ads->spdif_phys);
+ req->dest_bus_width = 32;
+ req->dest_wrap = 4;
+ req->source_wrap = 0;
+ req->source_bus_width = 32;
+ req->req_sel = ads->dma_req_sel;
+}
+
+
+static int start_playback(struct audio_stream *aos,
+ struct tegra_dma_req *req)
+{
+ int rc;
+ unsigned long flags;
+ struct audio_driver_state *ads = ads_from_out(aos);
+
+ pr_debug("%s: (writing %d)\n",
+ __func__, req->size);
+
+ spin_lock_irqsave(&aos->dma_req_lock, flags);
+#if 0
+ spdif_fifo_clear(ads->spdif_base);
+#endif
+
+ spdif_fifo_set_attention_level(ads->spdif_base,
+ ads->out.spdif_fifo_atn_level);
+
+ if (ads->fifo_init) {
+ spdif_set_bit_mode(ads->spdif_base, SPDIF_BIT_MODE_MODE16BIT);
+ spdif_set_fifo_packed(ads->spdif_base, 1);
+ ads->fifo_init = false;
+ }
+
+ spdif_fifo_enable(ads->spdif_base, 1);
+
+ rc = tegra_dma_enqueue_req(aos->dma_chan, req);
+ spin_unlock_irqrestore(&aos->dma_req_lock, flags);
+
+ if (rc)
+ pr_err("%s: could not enqueue TX DMA req\n", __func__);
+ return rc;
+}
+
+/* Called with aos->dma_req_lock taken. */
+static void stop_dma_playback(struct audio_stream *aos)
+{
+ int spin = 0;
+ struct audio_driver_state *ads = ads_from_out(aos);
+ pr_debug("%s\n", __func__);
+ spdif_fifo_enable(ads->spdif_base, 0);
+ while ((spdif_get_status(ads->spdif_base) & SPDIF_STATUS_0_TX_BSY) &&
+ spin < 100) {
+ udelay(10);
+ if (spin++ > 50)
+ pr_info("%s: spin %d\n", __func__, spin);
+ }
+ if (spin == 100)
+ pr_warn("%s: spinny\n", __func__);
+ ads->fifo_init = true;
+}
+
+
+
+static irqreturn_t spdif_interrupt(int irq, void *data)
+{
+ struct audio_driver_state *ads = data;
+ u32 status = spdif_get_status(ads->spdif_base);
+
+ pr_debug("%s: %08x\n", __func__, status);
+
+/* if (status & SPDIF_STATUS_0_TX_ERR) */
+ spdif_ack_status(ads->spdif_base);
+
+ pr_debug("%s: done %08x\n", __func__,
+ spdif_get_status(ads->spdif_base));
+ return IRQ_HANDLED;
+}
+
+static ssize_t tegra_spdif_write(struct file *file,
+ const char __user *buf, size_t size, loff_t *off)
+{
+ ssize_t rc = 0;
+ int out_buf;
+ struct tegra_dma_req *req;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ mutex_lock(&ads->out.lock);
+
+ if (!IS_ALIGNED(size, 4) || size < 4 || size > buf_size(&ads->out)) {
+ pr_err("%s: invalid user size %d\n", __func__, size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: write %d bytes\n", __func__, size);
+
+ if (ads->out.stop) {
+ pr_debug("%s: playback has been cancelled\n", __func__);
+ goto done;
+ }
+
+ /* Decide which buf is next. */
+ out_buf = (ads->out.last_queued + 1) % ads->out.num_bufs;
+ req = &ads->out.dma_req[out_buf];
+
+ /* Wait for the buffer to be emptied (complete). The maximum timeout
+ * value could be calculated dynamically based on buf_size(&ads->out).
+ * For a buffer size of 16k, at 44.1kHz/stereo/16-bit PCM, you would
+ * have ~93ms.
+ */
+ pr_debug("%s: waiting for buffer %d\n", __func__, out_buf);
+ rc = wait_for_completion_interruptible_timeout(
+ &ads->out.comp[out_buf], HZ);
+ if (!rc) {
+ pr_err("%s: timeout", __func__);
+ rc = -ETIMEDOUT;
+ goto done;
+ } else if (rc < 0) {
+ pr_err("%s: wait error %d", __func__, rc);
+ goto done;
+ }
+
+ /* Fill the buffer and enqueue it. */
+ pr_debug("%s: acquired buffer %d, copying data\n", __func__, out_buf);
+ rc = copy_from_user(ads->out.buffer[out_buf], buf, size);
+ if (rc) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ prevent_suspend(&ads->out);
+
+ req->size = size;
+ dma_sync_single_for_device(NULL,
+ req->source_addr, req->size, DMA_TO_DEVICE);
+ ads->out.last_queued = out_buf;
+ init_completion(&ads->out.stop_completion);
+
+ rc = start_playback(&ads->out, req);
+ if (!rc)
+ rc = size;
+ else
+ allow_suspend(&ads->out);
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static long tegra_spdif_out_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct audio_driver_state *ads = ads_from_misc_out_ctl(file);
+ struct audio_stream *aos = &ads->out;
+
+ mutex_lock(&aos->lock);
+
+ switch (cmd) {
+ case TEGRA_AUDIO_OUT_FLUSH:
+ if (pending_buffer_requests(aos)) {
+ pr_debug("%s: flushing\n", __func__);
+ request_stop_nosync(aos);
+ pr_debug("%s: flushed\n", __func__);
+ }
+ if (stop_playback_if_necessary(aos))
+ pr_debug("%s: done (stopped)\n", __func__);
+ aos->stop = false;
+ break;
+ case TEGRA_AUDIO_OUT_SET_NUM_BUFS: {
+ unsigned int num;
+ if (copy_from_user(&num, (const void __user *)arg,
+ sizeof(num))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (!num || num > SPDIF_MAX_NUM_BUFS) {
+ pr_err("%s: invalid buffer count %d\n", __func__, num);
+ rc = -EINVAL;
+ break;
+ }
+ if (pending_buffer_requests(aos)) {
+ pr_err("%s: playback in progress\n", __func__);
+ rc = -EBUSY;
+ break;
+ }
+ rc = init_stream_buffer(aos, num);
+ if (rc < 0)
+ break;
+ aos->num_bufs = num;
+ sound_ops->setup(ads);
+ }
+ break;
+ case TEGRA_AUDIO_OUT_GET_NUM_BUFS:
+ if (copy_to_user((void __user *)arg,
+ &aos->num_bufs, sizeof(aos->num_bufs)))
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&aos->lock);
+ return rc;
+}
+
+
+static int tegra_spdif_out_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ int i;
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+
+ if (ads->out.opened) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ ads->out.opened = 1;
+ ads->out.stop = false;
+
+ for (i = 0; i < SPDIF_MAX_NUM_BUFS; i++) {
+ init_completion(&ads->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&ads->out.comp[i]);
+ }
+
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static int tegra_spdif_out_release(struct inode *inode, struct file *file)
+{
+ struct audio_driver_state *ads = ads_from_misc_out(file);
+
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&ads->out.lock);
+ ads->out.opened = 0;
+ request_stop_nosync(&ads->out);
+ if (stop_playback_if_necessary(&ads->out))
+ pr_debug("%s: done (stopped)\n", __func__);
+ allow_suspend(&ads->out);
+ mutex_unlock(&ads->out.lock);
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+
+static const struct file_operations tegra_spdif_out_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_out_open,
+ .release = tegra_spdif_out_release,
+ .write = tegra_spdif_write,
+};
+
+static int tegra_spdif_ctl_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_spdif_ctl_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tegra_spdif_out_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_spdif_ctl_open,
+ .release = tegra_spdif_ctl_release,
+ .unlocked_ioctl = tegra_spdif_out_ioctl,
+};
+
+static int init_stream_buffer(struct audio_stream *s, int num)
+{
+ int i, j;
+ pr_debug("%s (num %d)\n", __func__, num);
+
+ for (i = 0; i < num; i++) {
+ kfree(s->buffer[i]);
+ s->buffer[i] =
+ kmalloc(buf_size(s), GFP_KERNEL | GFP_DMA);
+ if (!s->buffer[i]) {
+ pr_err("%s: could not allocate buffer\n", __func__);
+ for (j = i - 1; j >= 0; j--) {
+ kfree(s->buffer[j]);
+ s->buffer[j] = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+
+static int setup_misc_device(struct miscdevice *misc,
+ const struct file_operations *fops,
+ const char *fmt, ...)
+{
+ int rc = 0;
+ va_list args;
+ const int sz = 64;
+
+ va_start(args, fmt);
+
+ memset(misc, 0, sizeof(*misc));
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = kmalloc(sz, GFP_KERNEL);
+ if (!misc->name) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ vsnprintf((char *)misc->name, sz, fmt, args);
+ misc->fops = fops;
+ if (misc_register(misc)) {
+ pr_err("%s: could not register %s\n", __func__, misc->name);
+ kfree(misc->name);
+ rc = -EIO;
+ goto done;
+ }
+
+done:
+ va_end(args);
+ return rc;
+}
+
+static ssize_t dma_toggle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "dma\n");
+}
+
+static ssize_t dma_toggle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ pr_err("%s: Not implemented.", __func__);
+ return 0;
+}
+
+static DEVICE_ATTR(dma_toggle, 0644, dma_toggle_show, dma_toggle_store);
+
+static ssize_t __attr_fifo_atn_read(char *buf, int atn_lvl)
+{
+ switch (atn_lvl) {
+ case SPDIF_FIFO_ATN_LVL_ONE_SLOT:
+ strncpy(buf, "1\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_FOUR_SLOTS:
+ strncpy(buf, "4\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS:
+ strncpy(buf, "8\n", 2);
+ return 2;
+ case SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS:
+ strncpy(buf, "12\n", 3);
+ return 3;
+ default:
+ BUG_ON(1);
+ return -EIO;
+ }
+}
+
+static ssize_t __attr_fifo_atn_write(struct audio_driver_state *ads,
+ struct audio_stream *as,
+ int *fifo_lvl,
+ const char *buf, size_t size)
+{
+ int lvl;
+
+ if (size > 3) {
+ pr_err("%s: buffer size %d too big\n", __func__, size);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%d", &lvl) != 1) {
+ pr_err("%s: invalid input string [%s]\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ switch (lvl) {
+ case 1:
+ lvl = SPDIF_FIFO_ATN_LVL_ONE_SLOT;
+ break;
+ case 4:
+ lvl = SPDIF_FIFO_ATN_LVL_FOUR_SLOTS;
+ break;
+ case 8:
+ lvl = SPDIF_FIFO_ATN_LVL_EIGHT_SLOTS;
+ break;
+ case 12:
+ lvl = SPDIF_FIFO_ATN_LVL_TWELVE_SLOTS;
+ break;
+ default:
+ pr_err("%s: invalid attention level %d\n", __func__, lvl);
+ return -EINVAL;
+ }
+
+ *fifo_lvl = lvl;
+ pr_info("%s: fifo level %d\n", __func__, *fifo_lvl);
+
+ return size;
+}
+
+static ssize_t tx_fifo_atn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ return __attr_fifo_atn_read(buf, ads->out.spdif_fifo_atn_level);
+}
+
+static ssize_t tx_fifo_atn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc;
+ struct tegra_audio_platform_data *pdata = dev->platform_data;
+ struct audio_driver_state *ads = pdata->driver_data;
+ mutex_lock(&ads->out.lock);
+ if (pending_buffer_requests(&ads->out)) {
+ pr_err("%s: playback in progress.\n", __func__);
+ rc = -EBUSY;
+ goto done;
+ }
+ rc = __attr_fifo_atn_write(ads, &ads->out,
+ &ads->out.spdif_fifo_atn_level,
+ buf, count);
+done:
+ mutex_unlock(&ads->out.lock);
+ return rc;
+}
+
+static DEVICE_ATTR(tx_fifo_atn, 0644, tx_fifo_atn_show, tx_fifo_atn_store);
+
+
+static int spdif_configure(struct platform_device *pdev)
+{
+ struct tegra_audio_platform_data *pdata = pdev->dev.platform_data;
+ struct audio_driver_state *state = pdata->driver_data;
+
+ if (!state)
+ return -ENOMEM;
+
+ /* disable interrupts from SPDIF */
+ spdif_writel(state->spdif_base, 0x0, SPDIF_CTRL_0);
+ spdif_fifo_clear(state->spdif_base);
+ spdif_enable_fifos(state->spdif_base, 0);
+
+ spdif_set_bit_mode(state->spdif_base, SPDIF_BIT_MODE_MODE16BIT);
+ spdif_set_fifo_packed(state->spdif_base, 1);
+
+ spdif_fifo_set_attention_level(state->spdif_base,
+ state->out.spdif_fifo_atn_level);
+
+ spdif_set_sample_rate(state, 44100);
+
+ state->fifo_init = true;
+ return 0;
+}
+
+static int tegra_spdif_probe(struct platform_device *pdev)
+{
+ int rc, i;
+ struct resource *res;
+ struct audio_driver_state *state;
+
+ pr_info("%s\n", __func__);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->pdev = pdev;
+ state->pdata = pdev->dev.platform_data;
+ state->pdata->driver_data = state;
+ BUG_ON(!state->pdata);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource!\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "memory region already claimed!\n");
+ return -ENOMEM;
+ }
+
+ state->spdif_phys = res->start;
+ state->spdif_base = (unsigned long)ioremap(res->start,
+ res->end - res->start + 1);
+ if (!state->spdif_base) {
+ dev_err(&pdev->dev, "cannot remap iomem!\n");
+ return -EIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no dma resource!\n");
+ return -ENODEV;
+ }
+ state->dma_req_sel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource!\n");
+ return -ENODEV;
+ }
+ state->irq = res->start;
+
+ rc = spdif_configure(pdev);
+ if (rc < 0)
+ return rc;
+
+ state->out.opened = 0;
+ state->out.active = false;
+ mutex_init(&state->out.lock);
+ init_completion(&state->out.stop_completion);
+ spin_lock_init(&state->out.dma_req_lock);
+ state->out.dma_chan = NULL;
+ state->out.num_bufs = SPDIF_DEFAULT_TX_NUM_BUFS;
+ for (i = 0; i < SPDIF_MAX_NUM_BUFS; i++) {
+ init_completion(&state->out.comp[i]);
+ /* TX buf rest state is unqueued, complete. */
+ complete(&state->out.comp[i]);
+ state->out.buffer[i] = 0;
+ state->out.buf_phy[i] = 0;
+ }
+ state->out.last_queued = 0;
+ rc = init_stream_buffer(&state->out, state->out.num_bufs);
+ if (rc < 0)
+ return rc;
+
+ INIT_WORK(&state->out.allow_suspend_work, allow_suspend_worker);
+ snprintf(state->out.wake_lock_name, sizeof(state->out.wake_lock_name),
+ "tegra-audio-spdif");
+ wake_lock_init(&state->out.wake_lock, WAKE_LOCK_SUSPEND,
+ state->out.wake_lock_name);
+
+ if (request_irq(state->irq, spdif_interrupt,
+ IRQF_DISABLED, state->pdev->name, state) < 0) {
+ dev_err(&pdev->dev,
+ "%s: could not register handler for irq %d\n",
+ __func__, state->irq);
+ return -EIO;
+ }
+
+ rc = setup_misc_device(&state->misc_out,
+ &tegra_spdif_out_fops,
+ "spdif_out");
+ if (rc < 0)
+ return rc;
+
+ rc = setup_misc_device(&state->misc_out_ctl,
+ &tegra_spdif_out_ctl_fops,
+ "spdif_out_ctl");
+ if (rc < 0)
+ return rc;
+
+ sound_ops->setup(state);
+
+ rc = device_create_file(&pdev->dev, &dev_attr_dma_toggle);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_dma_toggle.attr.name, rc);
+ return rc;
+ }
+
+ rc = device_create_file(&pdev->dev, &dev_attr_tx_fifo_atn);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: could not create sysfs entry %s: %d\n",
+ __func__, dev_attr_tx_fifo_atn.attr.name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_spdif_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ /* dev_info(&pdev->dev, "%s\n", __func__); */
+ return 0;
+}
+
+static int tegra_spdif_resume(struct platform_device *pdev)
+{
+ return spdif_configure(pdev);
+}
+#endif /* CONFIG_PM */
+
+static struct platform_driver tegra_spdif_driver = {
+ .driver = {
+ .name = "spdif_out",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_spdif_probe,
+#ifdef CONFIG_PM
+ .suspend = tegra_spdif_suspend,
+ .resume = tegra_spdif_resume,
+#endif
+};
+
+static int __init tegra_spdif_init(void)
+{
+ return platform_driver_register(&tegra_spdif_driver);
+}
+
+module_init(tegra_spdif_init);
+MODULE_LICENSE("GPL");
#include <mach/iomap.h>
#include <mach/irqs.h>
+#include <mach/suspend.h>
#include "board.h"
#include "clock.h"
+#include "power.h"
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
#define TIMERUS_CNTR_1US 0x10
#define TIMERUS_USEC_CFG 0x14
#define TIMER_PTV 0x0
#define TIMER_PCR 0x4
-struct tegra_timer;
-
static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
+static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE);
#define timer_writel(value, reg) \
__raw_writel(value, (u32)timer_reg_base + (reg))
#define timer_readl(reg) \
__raw_readl((u32)timer_reg_base + (reg))
+static u64 tegra_sched_clock_offset;
+static u64 tegra_sched_clock_suspend_val;
+static u64 tegra_sched_clock_suspend_rtc;
+
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
static cycle_t tegra_clocksource_read(struct clocksource *cs)
{
- return cnt32_to_63(timer_readl(TIMERUS_CNTR_1US));
+ return timer_readl(TIMERUS_CNTR_1US);
}
static struct clock_event_device tegra_clockevent = {
.name = "timer_us",
.rating = 300,
.read = tegra_clocksource_read,
- .mask = 0x7FFFFFFFFFFFFFFFULL,
+ .mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
unsigned long long sched_clock(void)
{
- return clocksource_cyc2ns(tegra_clocksource.read(&tegra_clocksource),
- tegra_clocksource.mult, tegra_clocksource.shift);
+ return tegra_sched_clock_offset +
+ cnt32_to_63(timer_readl(TIMERUS_CNTR_1US)) * NSEC_PER_USEC;
+}
+
+static void tegra_sched_clock_suspend(void)
+{
+ tegra_sched_clock_suspend_val = sched_clock();
+ tegra_sched_clock_suspend_rtc = tegra_rtc_read_ms();
+}
+
+static void tegra_sched_clock_resume(void)
+{
+ u64 rtc_offset_ms = tegra_rtc_read_ms() - tegra_sched_clock_suspend_rtc;
+ tegra_sched_clock_offset = tegra_sched_clock_suspend_val +
+ rtc_offset_ms * NSEC_PER_MSEC -
+ (sched_clock() - tegra_sched_clock_offset);
+}
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+u64 tegra_rtc_read_ms(void)
+{
+ u32 ms = readl(rtc_base + RTC_MILLISECONDS);
+ u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+/*
+ * read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static struct timespec persistent_ts;
+static u64 persistent_ms, last_persistent_ms;
+void read_persistent_clock(struct timespec *ts)
+{
+ u64 delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_persistent_ms = persistent_ms;
+ persistent_ms = tegra_rtc_read_ms();
+ delta = persistent_ms - last_persistent_ms;
+
+ timespec_add_ns(tsp, delta * NSEC_PER_MSEC);
+ *ts = *tsp;
}
static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
.irq = INT_TMR3,
};
+static irqreturn_t tegra_lp2wake_interrupt(int irq, void *dev_id)
+{
+ timer_writel(1<<30, TIMER4_BASE + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction tegra_lp2wake_irq = {
+ .name = "timer_lp2wake",
+ .flags = IRQF_DISABLED,
+ .handler = tegra_lp2wake_interrupt,
+ .dev_id = NULL,
+ .irq = INT_TMR4,
+};
+
static void __init tegra_init_timer(void)
{
unsigned long rate = clk_measure_input_freq();
BUG();
}
+ ret = setup_irq(tegra_lp2wake_irq.irq, &tegra_lp2wake_irq);
+ if (ret) {
+ printk(KERN_ERR "Failed to register LP2 timer IRQ: %d\n", ret);
+ BUG();
+ }
+
clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5);
tegra_clockevent.max_delta_ns =
clockevent_delta2ns(0x1fffffff, &tegra_clockevent);
struct sys_timer tegra_timer = {
.init = tegra_init_timer,
};
+
+void tegra_lp2_set_trigger(unsigned long cycles)
+{
+ timer_writel(0, TIMER4_BASE + TIMER_PTV);
+ if (cycles) {
+ u32 reg = 0x80000000ul | min(0x1ffffffful, cycles);
+ timer_writel(reg, TIMER4_BASE + TIMER_PTV);
+ }
+}
+EXPORT_SYMBOL(tegra_lp2_set_trigger);
+
+unsigned long tegra_lp2_timer_remain(void)
+{
+ return timer_readl(TIMER4_BASE + TIMER_PCR) & 0x1ffffffful;
+}
+
+static u32 usec_config;
+void tegra_timer_suspend(void)
+{
+ tegra_sched_clock_suspend();
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+}
+
+void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+ tegra_sched_clock_resume();
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/usb_phy.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ * Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/resource.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <asm/mach-types.h>
+#include <mach/usb_phy.h>
+#include <mach/iomap.h>
+#include <mach/pinmux.h>
+
+#define ULPI_VIEWPORT 0x170
+#define ULPI_WAKEUP (1 << 31)
+#define ULPI_RUN (1 << 30)
+#define ULPI_RD_WR (1 << 29)
+
+#define USB_PORTSC1 0x184
+#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26)
+#define USB_PORTSC1_PHCD (1 << 23)
+#define USB_PORTSC1_WKOC (1 << 22)
+#define USB_PORTSC1_WKDS (1 << 21)
+#define USB_PORTSC1_WKCN (1 << 20)
+#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
+#define USB_PORTSC1_PP (1 << 12)
+#define USB_PORTSC1_SUSP (1 << 7)
+#define USB_PORTSC1_PE (1 << 2)
+#define USB_PORTSC1_CCS (1 << 0)
+
+#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
+#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
+#define USB_SUSP_CLR (1 << 5)
+#define USB_CLKEN (1 << 6)
+#define USB_PHY_CLK_VALID (1 << 7)
+#define UTMIP_RESET (1 << 11)
+#define UHSIC_RESET (1 << 11)
+#define UTMIP_PHY_ENABLE (1 << 12)
+#define ULPI_PHY_ENABLE (1 << 13)
+#define USB_SUSP_SET (1 << 14)
+#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+
+#define USB1_LEGACY_CTRL 0x410
+#define USB1_NO_LEGACY_MODE (1 << 0)
+#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
+#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
+#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
+ (1 << 1)
+#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
+#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
+
+#define ULPI_TIMING_CTRL_0 0x424
+#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
+#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
+
+#define ULPI_TIMING_CTRL_1 0x428
+#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
+#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
+#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
+#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
+#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
+#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
+
+#define UTMIP_PLL_CFG1 0x804
+#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
+
+#define UTMIP_XCVR_CFG0 0x808
+#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
+#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
+#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
+#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
+#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
+#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
+#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25)
+
+#define UTMIP_BIAS_CFG0 0x80c
+#define UTMIP_OTGPD (1 << 11)
+#define UTMIP_BIASPD (1 << 10)
+
+#define UTMIP_HSRX_CFG0 0x810
+#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
+#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
+
+#define UTMIP_HSRX_CFG1 0x814
+#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+
+#define UTMIP_TX_CFG0 0x820
+#define UTMIP_FS_PREABMLE_J (1 << 19)
+#define UTMIP_HS_DISCON_DISABLE (1 << 8)
+
+#define UTMIP_MISC_CFG0 0x824
+#define UTMIP_DPDM_OBSERVE (1 << 26)
+#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
+
+#define UTMIP_MISC_CFG1 0x828
+#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
+#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+
+#define UTMIP_DEBOUNCE_CFG0 0x82c
+#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
+
+#define UTMIP_BAT_CHRG_CFG0 0x830
+#define UTMIP_PD_CHRG (1 << 0)
+
+#define UTMIP_SPARE_CFG0 0x834
+#define FUSE_SETUP_SEL (1 << 3)
+
+#define UTMIP_XCVR_CFG1 0x838
+#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
+#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
+#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
+#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
+
+#define UTMIP_BIAS_CFG1 0x83c
+#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+
+static DEFINE_SPINLOCK(utmip_pad_lock);
+static int utmip_pad_count;
+
+struct tegra_xtal_freq {
+ int freq;
+ u8 enable_delay;
+ u8 stable_count;
+ u8 active_delay;
+ u8 xtal_freq_count;
+ u16 debounce;
+};
+
+static const struct tegra_xtal_freq tegra_freq_table[] = {
+ {
+ .freq = 12000000,
+ .enable_delay = 0x02,
+ .stable_count = 0x2F,
+ .active_delay = 0x04,
+ .xtal_freq_count = 0x76,
+ .debounce = 0x7530,
+ },
+ {
+ .freq = 13000000,
+ .enable_delay = 0x02,
+ .stable_count = 0x33,
+ .active_delay = 0x05,
+ .xtal_freq_count = 0x7F,
+ .debounce = 0x7EF4,
+ },
+ {
+ .freq = 19200000,
+ .enable_delay = 0x03,
+ .stable_count = 0x4B,
+ .active_delay = 0x06,
+ .xtal_freq_count = 0xBB,
+ .debounce = 0xBB80,
+ },
+ {
+ .freq = 26000000,
+ .enable_delay = 0x04,
+ .stable_count = 0x66,
+ .active_delay = 0x09,
+ .xtal_freq_count = 0xFE,
+ .debounce = 0xFDE8,
+ },
+};
+
+static struct tegra_utmip_config utmip_default[] = {
+ [0] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 9,
+ .xcvr_lsfslew = 1,
+ .xcvr_lsrslew = 1,
+ },
+ [2] = {
+ .hssync_start_delay = 9,
+ .idle_wait_delay = 17,
+ .elastic_limit = 16,
+ .term_range_adj = 6,
+ .xcvr_setup = 9,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ },
+};
+
+static inline bool phy_is_ulpi(struct tegra_usb_phy *phy)
+{
+ return (phy->instance == 1);
+}
+
+static int utmip_pad_open(struct tegra_usb_phy *phy)
+{
+ phy->pad_clk = clk_get_sys("utmip-pad", NULL);
+ if (IS_ERR(phy->pad_clk)) {
+ pr_err("%s: can't get utmip pad clock\n", __func__);
+ return PTR_ERR(phy->pad_clk);
+ }
+
+ if (phy->instance == 0) {
+ phy->pad_regs = phy->regs;
+ } else {
+ phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE);
+ if (!phy->pad_regs) {
+ pr_err("%s: can't remap usb registers\n", __func__);
+ clk_put(phy->pad_clk);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void utmip_pad_close(struct tegra_usb_phy *phy)
+{
+ if (phy->instance != 0)
+ iounmap(phy->pad_regs);
+ clk_put(phy->pad_clk);
+}
+
+static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+{
+ unsigned long val, flags;
+ void __iomem *base = phy->pad_regs;
+
+ clk_enable(phy->pad_clk);
+
+ spin_lock_irqsave(&utmip_pad_lock, flags);
+
+ if (utmip_pad_count++ == 0) {
+ val = readl(base + UTMIP_BIAS_CFG0);
+ val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
+ writel(val, base + UTMIP_BIAS_CFG0);
+ }
+
+ spin_unlock_irqrestore(&utmip_pad_lock, flags);
+
+ clk_disable(phy->pad_clk);
+}
+
+static int utmip_pad_power_off(struct tegra_usb_phy *phy)
+{
+ unsigned long val, flags;
+ void __iomem *base = phy->pad_regs;
+
+ if (!utmip_pad_count) {
+ pr_err("%s: utmip pad already powered off\n", __func__);
+ return -EINVAL;
+ }
+
+ clk_enable(phy->pad_clk);
+
+ spin_lock_irqsave(&utmip_pad_lock, flags);
+
+ if (--utmip_pad_count == 0) {
+ val = readl(base + UTMIP_BIAS_CFG0);
+ val |= UTMIP_OTGPD | UTMIP_BIASPD;
+ writel(val, base + UTMIP_BIAS_CFG0);
+ }
+
+ spin_unlock_irqrestore(&utmip_pad_lock, flags);
+
+ clk_disable(phy->pad_clk);
+
+ return 0;
+}
+
+static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ unsigned long timeout = 2500;
+ do {
+ if ((readl(reg) & mask) == result)
+ return 0;
+ udelay(1);
+ timeout--;
+ } while (timeout);
+ return -1;
+}
+
+static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ if (phy->instance == 0) {
+ val = readl(base + USB_SUSP_CTRL);
+ val |= USB_SUSP_SET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ udelay(10);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_SUSP_SET;
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ if (phy->instance == 2) {
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_PHCD;
+ writel(val, base + USB_PORTSC1);
+ }
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
+}
+
+static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ if (phy->instance == 0) {
+ val = readl(base + USB_SUSP_CTRL);
+ val |= USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
+
+ udelay(10);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ if (phy->instance == 2) {
+ val = readl(base + USB_PORTSC1);
+ val &= ~USB_PORTSC1_PHCD;
+ writel(val, base + USB_PORTSC1);
+ }
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID) < 0)
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
+}
+
+static int utmi_phy_power_on(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_utmip_config *config = phy->config;
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ if (phy->instance == 0) {
+ val = readl(base + USB1_LEGACY_CTRL);
+ val |= USB1_NO_LEGACY_MODE;
+ writel(val, base + USB1_LEGACY_CTRL);
+ }
+
+ val = readl(base + UTMIP_TX_CFG0);
+ val &= ~UTMIP_FS_PREABMLE_J;
+ writel(val, base + UTMIP_TX_CFG0);
+
+ val = readl(base + UTMIP_HSRX_CFG0);
+ val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
+ val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
+ val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
+ writel(val, base + UTMIP_HSRX_CFG0);
+
+ val = readl(base + UTMIP_HSRX_CFG1);
+ val &= ~UTMIP_HS_SYNC_START_DLY(~0);
+ val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
+ writel(val, base + UTMIP_HSRX_CFG1);
+
+ val = readl(base + UTMIP_DEBOUNCE_CFG0);
+ val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
+ val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
+ writel(val, base + UTMIP_DEBOUNCE_CFG0);
+
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
+ writel(val, base + UTMIP_MISC_CFG0);
+
+ val = readl(base + UTMIP_MISC_CFG1);
+ val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0));
+ val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
+ UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
+ writel(val, base + UTMIP_MISC_CFG1);
+
+ val = readl(base + UTMIP_PLL_CFG1);
+ val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
+ val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
+ UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
+ writel(val, base + UTMIP_PLL_CFG1);
+
+ if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ utmip_pad_power_on(phy);
+
+ val = readl(base + UTMIP_XCVR_CFG0);
+ val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
+ UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) |
+ UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) |
+ UTMIP_XCVR_HSSLEW_MSB(~0));
+ val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
+ val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew);
+ val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew);
+ writel(val, base + UTMIP_XCVR_CFG0);
+
+ val = readl(base + UTMIP_XCVR_CFG1);
+ val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
+ UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
+ val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
+ writel(val, base + UTMIP_XCVR_CFG1);
+
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val &= ~UTMIP_PD_CHRG;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+
+ val = readl(base + UTMIP_BIAS_CFG1);
+ val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
+ val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
+ writel(val, base + UTMIP_BIAS_CFG1);
+
+ if (phy->instance == 0) {
+ val = readl(base + UTMIP_SPARE_CFG0);
+ if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
+ val &= ~FUSE_SETUP_SEL;
+ else
+ val |= FUSE_SETUP_SEL;
+ writel(val, base + UTMIP_SPARE_CFG0);
+ }
+
+ if (phy->instance == 2) {
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~UTMIP_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ if (phy->instance == 0) {
+ val = readl(base + USB1_LEGACY_CTRL);
+ val &= ~USB1_VBUS_SENSE_CTL_MASK;
+ val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
+ writel(val, base + USB1_LEGACY_CTRL);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_SUSP_SET;
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ utmi_phy_clk_enable(phy);
+
+ if (phy->instance == 2) {
+ val = readl(base + USB_PORTSC1);
+ val &= ~USB_PORTSC1_PTS(~0);
+ writel(val, base + USB_PORTSC1);
+ }
+
+ return 0;
+}
+
+static void utmi_phy_power_off(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ utmi_phy_clk_disable(phy);
+
+ if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
+ val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
+ writel(val, base + USB_SUSP_CTRL);
+ }
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UTMIP_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val |= UTMIP_PD_CHRG;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+
+ val = readl(base + UTMIP_XCVR_CFG0);
+ val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
+ UTMIP_FORCE_PDZI_POWERDOWN;
+ writel(val, base + UTMIP_XCVR_CFG0);
+
+ val = readl(base + UTMIP_XCVR_CFG1);
+ val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
+ UTMIP_FORCE_PDDR_POWERDOWN;
+ writel(val, base + UTMIP_XCVR_CFG1);
+
+ utmip_pad_power_off(phy);
+}
+
+static void utmi_phy_preresume(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + UTMIP_TX_CFG0);
+ val |= UTMIP_HS_DISCON_DISABLE;
+ writel(val, base + UTMIP_TX_CFG0);
+}
+
+static void utmi_phy_postresume(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + UTMIP_TX_CFG0);
+ val &= ~UTMIP_HS_DISCON_DISABLE;
+ writel(val, base + UTMIP_TX_CFG0);
+}
+
+static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
+ if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
+ else
+ val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
+ writel(val, base + UTMIP_MISC_CFG0);
+ udelay(1);
+
+ val = readl(base + UTMIP_MISC_CFG0);
+ val |= UTMIP_DPDM_OBSERVE;
+ writel(val, base + UTMIP_MISC_CFG0);
+ udelay(10);
+}
+
+static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + UTMIP_MISC_CFG0);
+ val &= ~UTMIP_DPDM_OBSERVE;
+ writel(val, base + UTMIP_MISC_CFG0);
+ udelay(10);
+}
+
+static void ulpi_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ /*Tristate ulpi interface before USB controller resume*/
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAA, TEGRA_TRI_TRISTATE);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAB, TEGRA_TRI_TRISTATE);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UDA, TEGRA_TRI_TRISTATE);
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val &= ~ULPI_OUTPUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+}
+
+static void ulpi_phy_restore_end(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_OUTPUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAA, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UAB, TEGRA_TRI_NORMAL);
+ tegra_pinmux_set_tristate(TEGRA_PINGROUP_UDA, TEGRA_TRI_NORMAL);
+}
+
+static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
+{
+ int ret;
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ struct tegra_ulpi_config *config = phy->config;
+
+ clk_enable(phy->clk);
+ msleep(1);
+
+ if (!phy->initialized) {
+ phy->initialized = 1;
+ gpio_direction_output(config->reset_gpio, 0);
+ msleep(5);
+ gpio_direction_output(config->reset_gpio, 1);
+ }
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= UHSIC_RESET;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + ULPI_TIMING_CTRL_0);
+ val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
+ writel(val, base + ULPI_TIMING_CTRL_0);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= ULPI_PHY_ENABLE;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val |= USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ USB_PHY_CLK_VALID) < 0)
+ pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_CLKEN, USB_CLKEN) < 0)
+ pr_err("%s: timeout waiting for AHB clock\n", __func__);
+
+ val = readl(base + USB_SUSP_CTRL);
+ val &= ~USB_SUSP_CLR;
+ writel(val, base + USB_SUSP_CTRL);
+
+ val = 0;
+ writel(val, base + ULPI_TIMING_CTRL_1);
+
+ val |= ULPI_DATA_TRIMMER_SEL(4);
+ val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
+ val |= ULPI_DIR_TRIMMER_SEL(4);
+ writel(val, base + ULPI_TIMING_CTRL_1);
+ udelay(10);
+
+ val |= ULPI_DATA_TRIMMER_LOAD;
+ val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
+ val |= ULPI_DIR_TRIMMER_LOAD;
+ writel(val, base + ULPI_TIMING_CTRL_1);
+
+ /* Fix VbusInvalid due to floating VBUS */
+ ret = otg_io_write(phy->ulpi, 0x40, 0x08);
+ if (ret) {
+ pr_err("%s: ulpi write failed\n", __func__);
+ return ret;
+ }
+
+ ret = otg_io_write(phy->ulpi, 0x80, 0x0B);
+ if (ret) {
+ pr_err("%s: ulpi write failed\n", __func__);
+ return ret;
+ }
+
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN;
+ writel(val, base + USB_PORTSC1);
+
+ return 0;
+}
+
+static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
+{
+ unsigned long val;
+ void __iomem *base = phy->regs;
+ int ret;
+
+ /* Disable VbusValid, SessEnd comparators */
+ ret = otg_io_write(phy->ulpi, 0x00, 0x0D);
+ if (ret)
+ pr_err("%s: ulpi write 0x0D failed\n", __func__);
+
+ ret = otg_io_write(phy->ulpi, 0x00, 0x10);
+ if (ret)
+ pr_err("%s: ulpi write 0x10 failed\n", __func__);
+
+ /* Disable IdFloat comparator */
+ ret = otg_io_write(phy->ulpi, 0x00, 0x19);
+ if (ret)
+ pr_err("%s: ulpi write 0x19 failed\n", __func__);
+
+ ret = otg_io_write(phy->ulpi, 0x00, 0x1D);
+ if (ret)
+ pr_err("%s: ulpi write 0x1D failed\n", __func__);
+
+ /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
+ * Controller to immediately bring the ULPI PHY out of low power
+ */
+ val = readl(base + USB_PORTSC1);
+ val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
+ writel(val, base + USB_PORTSC1);
+
+ /* Put the PHY in the low power mode */
+ val = readl(base + USB_PORTSC1);
+ val |= USB_PORTSC1_PHCD;
+ writel(val, base + USB_PORTSC1);
+
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ pr_err("%s: timeout waiting for phy to stop\n", __func__);
+
+ clk_disable(phy->clk);
+}
+
+struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
+ void *config, enum tegra_usb_phy_mode phy_mode)
+{
+ struct tegra_usb_phy *phy;
+ struct tegra_ulpi_config *ulpi_config;
+ unsigned long parent_rate;
+ int i;
+ int err;
+
+ phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
+ if (!phy)
+ return ERR_PTR(-ENOMEM);
+
+ phy->instance = instance;
+ phy->regs = regs;
+ phy->config = config;
+ phy->mode = phy_mode;
+ phy->initialized = 0;
+
+ if (!phy->config) {
+ if (phy_is_ulpi(phy)) {
+ pr_err("%s: ulpi phy configuration missing", __func__);
+ err = -EINVAL;
+ goto err0;
+ } else {
+ phy->config = &utmip_default[instance];
+ }
+ }
+
+ phy->pll_u = clk_get_sys(NULL, "pll_u");
+ if (IS_ERR(phy->pll_u)) {
+ pr_err("Can't get pll_u clock\n");
+ err = PTR_ERR(phy->pll_u);
+ goto err0;
+ }
+ clk_enable(phy->pll_u);
+
+ parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
+ for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
+ if (tegra_freq_table[i].freq == parent_rate) {
+ phy->freq = &tegra_freq_table[i];
+ break;
+ }
+ }
+ if (!phy->freq) {
+ pr_err("invalid pll_u parent rate %ld\n", parent_rate);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ if (phy_is_ulpi(phy)) {
+ ulpi_config = config;
+ phy->clk = clk_get_sys(NULL, ulpi_config->clk);
+ if (IS_ERR(phy->clk)) {
+ pr_err("%s: can't get ulpi clock\n", __func__);
+ err = -ENXIO;
+ goto err1;
+ }
+ tegra_gpio_enable(ulpi_config->reset_gpio);
+ gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
+ gpio_direction_output(ulpi_config->reset_gpio, 0);
+ phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
+ phy->ulpi->io_priv = regs + ULPI_VIEWPORT;
+ } else {
+ err = utmip_pad_open(phy);
+ if (err < 0)
+ goto err1;
+ }
+
+ return phy;
+
+err1:
+ clk_disable(phy->pll_u);
+ clk_put(phy->pll_u);
+err0:
+ kfree(phy);
+ return ERR_PTR(err);
+}
+
+int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
+{
+ if (phy_is_ulpi(phy))
+ return ulpi_phy_power_on(phy);
+ else
+ return utmi_phy_power_on(phy);
+}
+
+void tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
+{
+ if (phy_is_ulpi(phy))
+ ulpi_phy_power_off(phy);
+ else
+ utmi_phy_power_off(phy);
+}
+
+void tegra_usb_phy_preresume(struct tegra_usb_phy *phy)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_preresume(phy);
+}
+
+void tegra_usb_phy_postresume(struct tegra_usb_phy *phy)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_postresume(phy);
+}
+
+void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_restore_start(phy, port_speed);
+ else
+ ulpi_phy_restore_start(phy, port_speed);
+}
+
+void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_restore_end(phy);
+ else
+ ulpi_phy_restore_end(phy);
+}
+
+void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_clk_disable(phy);
+}
+
+void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy)
+{
+ if (!phy_is_ulpi(phy))
+ utmi_phy_clk_enable(phy);
+}
+
+void tegra_usb_phy_close(struct tegra_usb_phy *phy)
+{
+ if (phy_is_ulpi(phy))
+ clk_put(phy->clk);
+ else
+ utmip_pad_close(phy);
+ clk_disable(phy->pll_u);
+ clk_put(phy->pll_u);
+ kfree(phy);
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/wakeups-t2.h
+ *
+ * Declarations of Tegra 2 LP0 wakeup sources
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MACH_TEGRA_WAKEUPS_T2_H
+#define __MACH_TEGRA_WAKEUPS_T2_H
+
+#define TEGRA_WAKE_GPIO_PO5 (1 << 0)
+#define TEGRA_WAKE_GPIO_PV3 (1 << 1)
+#define TEGRA_WAKE_GPIO_PL1 (1 << 2)
+#define TEGRA_WAKE_GPIO_PB6 (1 << 3)
+#define TEGRA_WAKE_GPIO_PN7 (1 << 4)
+#define TEGRA_WAKE_GPIO_PA0 (1 << 5)
+#define TEGRA_WAKE_GPIO_PU5 (1 << 6)
+#define TEGRA_WAKE_GPIO_PU6 (1 << 7)
+#define TEGRA_WAKE_GPIO_PC7 (1 << 8)
+#define TEGRA_WAKE_GPIO_PS2 (1 << 9)
+#define TEGRA_WAKE_GPIO_PAA1 (1 << 10)
+#define TEGRA_WAKE_GPIO_PW3 (1 << 11)
+#define TEGRA_WAKE_GPIO_PW2 (1 << 12)
+#define TEGRA_WAKE_GPIO_PY6 (1 << 13)
+#define TEGRA_WAKE_GPIO_PV6 (1 << 14)
+#define TEGRA_WAKE_GPIO_PJ7 (1 << 15)
+#define TEGRA_WAKE_RTC_ALARM (1 << 16)
+#define TEGRA_WAKE_KBC_EVENT (1 << 17)
+#define TEGRA_WAKE_PWR_INT (1 << 18)
+#define TEGRA_WAKE_USB1_VBUS (1 << 19)
+#define TEGRA_WAKE_USB3_VBUS (1 << 20)
+#define TEGRA_WAKE_USB1_ID (1 << 21)
+#define TEGRA_WAKE_USB3_ID (1 << 22)
+#define TEGRA_WAKE_GPIO_PI5 (1 << 23)
+#define TEGRA_WAKE_GPIO_PV2 (1 << 24)
+#define TEGRA_WAKE_GPIO_PS4 (1 << 25)
+#define TEGRA_WAKE_GPIO_PS5 (1 << 26)
+#define TEGRA_WAKE_GPIO_PS0 (1 << 27)
+#define TEGRA_WAKE_GPIO_PQ6 (1 << 28)
+#define TEGRA_WAKE_GPIO_PQ7 (1 << 29)
+#define TEGRA_WAKE_GPIO_PN2 (1 << 30)
+
+#endif
help
This option enables the L2x0 PrimeCell.
+config CACHE_PL310
+ bool
+ depends on CACHE_L2X0
+ default y if CPU_V7
+ help
+ This option enables support for the PL310 cache controller.
+
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
depends on (ARCH_DOVE || ARCH_MMP)
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
-static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+bool l2x0_disabled;
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
+static inline void cache_wait_always(void __iomem *reg, unsigned long mask)
{
/* wait for the operation to complete */
while (readl_relaxed(reg) & mask)
;
}
+#ifdef CONFIG_CACHE_PL310
+
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
+{
+ /* cache operations are atomic */
+}
+
+#define _l2x0_lock(lock, flags) ((void)(flags))
+#define _l2x0_unlock(lock, flags) ((void)(flags))
+
+#define block_end(start, end) (end)
+
+#define L2CC_TYPE "PL310/L2C-310"
+
+#else /* !CONFIG_CACHE_PL310 */
+
+#define cache_wait cache_wait_always
+
+static DEFINE_SPINLOCK(l2x0_lock);
+#define _l2x0_lock(lock, flags) spin_lock_irqsave(lock, flags)
+#define _l2x0_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
+
+#define block_end(start, end) ((start) + min((end) - (start), 4096UL))
+
+#define L2CC_TYPE "L2x0"
+
+#endif /* CONFIG_CACHE_PL310 */
+
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
{
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
}
static inline void l2x0_inv_all(void)
unsigned long flags;
/* invalidate all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ cache_wait_always(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
+}
+
+static inline void l2x0_flush_all(void)
+{
+ unsigned long flags;
+
+ /* flush all ways */
+ _l2x0_lock(&l2x0_lock, flags);
+ writel(0xff, l2x0_base + L2X0_CLEAN_INV_WAY);
+ cache_wait_always(l2x0_base + L2X0_CLEAN_INV_WAY, 0xff);
+ cache_sync();
+ _l2x0_unlock(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
}
while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
+ unsigned long blk_end = block_end(start, end);
while (start < blk_end) {
l2x0_inv_line(start);
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
+ unsigned long blk_end = block_end(start, end);
while (start < blk_end) {
l2x0_clean_line(start);
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
void __iomem *base = l2x0_base;
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
+ unsigned long blk_end = block_end(start, end);
debug_writel(0x03);
while (start < blk_end) {
debug_writel(0x00);
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
+ _l2x0_lock(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ _l2x0_unlock(&l2x0_lock, flags);
}
-void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+void l2x0_shutdown(void)
+{
+ unsigned long flags;
+
+ if (l2x0_disabled)
+ return;
+
+ BUG_ON(num_online_cpus() > 1);
+
+ local_irq_save(flags);
+
+ if (readl(l2x0_base + L2X0_CTRL) & 1) {
+ int m;
+ /* lockdown all ways, all masters to prevent new line
+ * allocation during maintenance */
+ for (m=0; m<8; m++) {
+ writel(l2x0_way_mask,
+ l2x0_base + L2X0_LOCKDOWN_WAY_D + (m*8));
+ writel(l2x0_way_mask,
+ l2x0_base + L2X0_LOCKDOWN_WAY_I + (m*8));
+ }
+ l2x0_flush_all();
+ writel(0, l2x0_base + L2X0_CTRL);
+ /* unlock cache ways */
+ for (m=0; m<8; m++) {
+ writel(0, l2x0_base + L2X0_LOCKDOWN_WAY_D + (m*8));
+ writel(0, l2x0_base + L2X0_LOCKDOWN_WAY_I + (m*8));
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static void l2x0_enable(__u32 aux_val, __u32 aux_mask)
{
__u32 aux;
__u32 cache_id;
int ways;
const char *type;
- l2x0_base = base;
+ if (l2x0_disabled)
+ return;
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
writel_relaxed(1, l2x0_base + L2X0_CTRL);
}
+ /*printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+ ways, cache_id, aux);*/
+}
+
+void l2x0_restart(void)
+{
+ l2x0_enable(0, ~0ul);
+}
+
+void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+{
+ if (l2x0_disabled) {
+ pr_info(L2CC_TYPE " cache controller disabled\n");
+ return;
+ }
+
+ l2x0_base = base;
+
+ l2x0_enable(aux_val, aux_mask);
+
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
+}
- printk(KERN_INFO "%s cache controller enabled\n", type);
- printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
- ways, cache_id, aux);
+static int __init l2x0_disable(char *unused)
+{
+ l2x0_disabled = 1;
+ return 0;
}
+early_param("nol2x0", l2x0_disable);
* - end - virtual end address of region
*/
v6_dma_inv_range:
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
+#endif
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrneb r2, [r1, #-1] @ read for ownership
+ strneb r2, [r1, #-1] @ write for ownership
+#endif
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
-#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
-#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlo r2, [r0] @ read for ownership
+ strlo r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
bhi v6_dma_flush_dcache_all
#endif
- bic r0, r0, #D_CACHE_LINE_SIZE - 1
-1:
+
#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
#endif
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
+1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlob r2, [r0] @ read for ownership
+ strlob r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
return ret;
}
+#if USE_SPLIT_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here. Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTLOCKS */
+
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
pte = pte_offset_map_nested(pmd, address);
- spin_lock(ptl);
+ do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
- spin_unlock(ptl);
+ do_pte_unlock(ptl);
pte_unmap_nested(pte);
return ret;
return;
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(mapping, page);
-#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
+#include <asm/smp_plat.h>
#include "mm.h"
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
+ if (!cache_ops_need_broadcast() &&
+ !PageHighMem(page) && mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
- else
-#endif
- {
+ else {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
}
}
-static void __init arm_bootmem_init(struct meminfo *mi,
- unsigned long start_pfn, unsigned long end_pfn)
+static void __init arm_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
{
unsigned int boot_pages;
phys_addr_t bitmap;
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
- for_each_bank(i, mi) {
- struct membank *bank = &mi->bank[i];
- if (!bank->highmem)
- free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
+ /* Free the lowmem regions from memblock into bootmem. */
+ for (i = 0; i < memblock.memory.cnt; i++) {
+ unsigned long start = memblock_start_pfn(&memblock.memory, i);
+ unsigned long end = memblock_end_pfn(&memblock.memory, i);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
- /*
- * Reserve the memblock reserved regions in bootmem.
- */
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
for (i = 0; i < memblock.reserved.cnt; i++) {
- phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
- if (start >= start_pfn &&
- memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
- reserve_bootmem_node(pgdat, __pfn_to_phys(start),
- memblock_size_bytes(&memblock.reserved, i),
- BOOTMEM_DEFAULT);
+ unsigned long start = memblock_start_pfn(&memblock.reserved, i);
+ unsigned long size = memblock_size_bytes(&memblock.reserved, i);
+
+ if (start >= end_pfn)
+ break;
+ if (start + PFN_UP(size) > end_pfn)
+ size = (end_pfn - start) << PAGE_SHIFT;
+
+ reserve_bootmem(__pfn_to_phys(start), size, BOOTMEM_DEFAULT);
}
}
-static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
- unsigned long max_low, unsigned long max_high)
+static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
int i;
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_bank(i, mi) {
- int idx = 0;
+ for (i = 0; i < memblock.memory.cnt; i++) {
+ unsigned long start = memblock_start_pfn(&memblock.memory, i);
+ unsigned long end = memblock_end_pfn(&memblock.memory, i);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+
+ zhole_size[0] -= low_end - start;
+ }
+
#ifdef CONFIG_HIGHMEM
- if (mi->bank[i].highmem)
- idx = ZONE_HIGHMEM;
+ if (end > max_low) {
+ unsigned long high_start = max(start, max_low);
+
+ zhole_size[ZONE_HIGHMEM] -= end - high_start;
+ }
#endif
- zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
}
/*
find_limits(mi, &min, &max_low, &max_high);
- arm_bootmem_init(mi, min, max_low);
+ arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- arm_bootmem_free(mi, min, max_low, max_high);
+ arm_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
}
}
+static void __init free_highpages(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ int i, j;
+
+ /* set highmem page free */
+ for (i = j = 0; i < memblock.memory.cnt; i++) {
+ unsigned long start = memblock_start_pfn(&memblock.memory, i);
+ unsigned long end = memblock_end_pfn(&memblock.memory, i);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for (; j < memblock.reserved.cnt; j++) {
+ unsigned long res_start;
+ unsigned long res_end;
+
+ res_start = memblock_start_pfn(&memblock.reserved, j);
+ res_end = res_start + PFN_UP(memblock_size_bytes(&memblock.reserved, j));
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ totalhigh_pages += free_area(start, res_start,
+ NULL);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ totalhigh_pages += free_area(start, end, NULL);
+ }
+ totalram_pages += totalhigh_pages;
+#endif
+}
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
-#ifdef CONFIG_HIGHMEM
- /* set highmem page free */
- for_each_bank (i, &meminfo) {
- unsigned long start = bank_pfn_start(&meminfo.bank[i]);
- unsigned long end = bank_pfn_end(&meminfo.bank[i]);
- if (start >= max_low_pfn + PHYS_PFN_OFFSET)
- totalhigh_pages += free_area(start, end, NULL);
- }
- totalram_pages += totalhigh_pages;
-#endif
+ free_highpages();
reserved_pages = free_pages = 0;
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
- for (i = 0; i < meminfo.nr_banks; i++) {
- num_physpages += bank_pfn_size(&meminfo.bank[i]);
- printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
+ for (i = 0; i < memblock.memory.cnt; i++) {
+ unsigned long pages = memblock_size_pages(&memblock.memory, i);
+ num_physpages += pages;
+ printk(" %luMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
static inline void prepare_page_table(void)
{
unsigned long addr;
+ phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
+ /*
+ * Find the end of the first block of lowmem. This is complicated
+ * when we use memblock.
+ */
+ end = memblock.memory.region[0].base + memblock.memory.region[0].size;
+ if (end >= lowmem_end_addr)
+ end = lowmem_end_addr;
+
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
- for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
+ for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
#endif
}
-static inline void map_memory_bank(struct membank *bank)
-{
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-}
-
static void __init map_lowmem(void)
{
- struct meminfo *mi = &meminfo;
int i;
/* Map all the lowmem memory banks. */
- for (i = 0; i < mi->nr_banks; i++) {
- struct membank *bank = &mi->bank[i];
+ for (i = 0; i < memblock.memory.cnt; i++) {
+ phys_addr_t start = memblock.memory.region[i].base;
+ phys_addr_t end = start + memblock.memory.region[i].size;
+ struct map_desc map;
+
+ if (end >= lowmem_end_addr)
+ end = lowmem_end_addr;
+ if (start >= end)
+ break;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
- if (!bank->highmem)
- map_memory_bank(bank);
+ create_mapping(&map);
}
}
.long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB
.long 0x00 @ L_PTE_MT_MINICACHE (not present)
.long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
.long 0x00 @ unused
* NS1 = PRRR[19] = 1 - normal shareable property
* NOS = PRRR[24+n] = 1 - not outer shareable
*/
- ldr r5, =0xff0a81a8 @ PRRR
- ldr r6, =0x40e040e0 @ NMRR
+ ldr r5, =0xff0a89a8 @ PRRR
+#ifdef CONFIG_SMP
+ ldr r6, =0xc0e0c4e0 @ NMRR
+#else
+ ldr r6, =0x40e044e0
+#endif
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
.long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB (not present?)
.long 0x00 @ L_PTE_MT_MINICACHE (not present)
.long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
.long 0x00 @ unused
.long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
- .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB
.long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
.long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
.long 0x00 @ unused
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <mach/hardware.h>
l = dma_read(CCR(lch));
/*
- * Errata: On ES2.0 BUFFERING disable must be set.
- * This will always fail on ES1.0
+ * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
+ * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
+ * bursting is enabled. This might result in data gets stalled in
+ * FIFO at the end of the block.
+ * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
+ * guarantee no data will stay in the DMA FIFO in case inter frame
+ * buffering occurs.
*/
- if (cpu_is_omap24xx())
- l |= OMAP_DMA_CCR_EN;
+ if (cpu_is_omap2420() ||
+ (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
+ l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
l |= OMAP_DMA_CCR_EN;
dma_write(l, CCR(lch));
dma_write(0, CICR(lch));
l = dma_read(CCR(lch));
- l &= ~OMAP_DMA_CCR_EN;
- dma_write(l, CCR(lch));
+ /* OMAP3 Errata i541: sDMA FIFO draining does not finish */
+ if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
+ int i = 0;
+ u32 sys_cf;
+
+ /* Configure No-Standby */
+ l = dma_read(OCP_SYSCONFIG);
+ sys_cf = l;
+ l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
+ l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
+ dma_write(l , OCP_SYSCONFIG);
+
+ l = dma_read(CCR(lch));
+ l &= ~OMAP_DMA_CCR_EN;
+ dma_write(l, CCR(lch));
+
+ /* Wait for sDMA FIFO drain */
+ l = dma_read(CCR(lch));
+ while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
+ OMAP_DMA_CCR_WR_ACTIVE))) {
+ udelay(5);
+ i++;
+ l = dma_read(CCR(lch));
+ }
+ if (i >= 100)
+ printk(KERN_ERR "DMA drain did not complete on "
+ "lch %d\n", lch);
+ /* Restore OCP_SYSCONFIG */
+ dma_write(sys_cf, OCP_SYSCONFIG);
+ } else {
+ l &= ~OMAP_DMA_CCR_EN;
+ dma_write(l, CCR(lch));
+ }
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch;
#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
#define OMAP_DMA_CCR_EN (1 << 7)
+#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
+#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
+#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
+#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
#define OMAP_DMA_DATA_TYPE_S8 0x00
#define OMAP_DMA_DATA_TYPE_S16 0x01
all: linux.bin
-BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
+# With make 3.82 we cannot mix normal and wildcard targets
+BOOT_TARGETS1 = linux.bin linux.bin.gz
+BOOT_TARGETS2 = simpleImage.%
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-$(BOOT_TARGETS): vmlinux
+$(BOOT_TARGETS1): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+$(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
*
* Copyright (c) 2009 Qi Hardware inc.,
* Author: Xiangfu Liu <xiangfu@qi-hardware.com>
- * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de>
+ * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or later
QI_LB60_GPIO_KEYIN(3),
QI_LB60_GPIO_KEYIN(4),
QI_LB60_GPIO_KEYIN(5),
- QI_LB60_GPIO_KEYIN(7),
+ QI_LB60_GPIO_KEYIN(6),
QI_LB60_GPIO_KEYIN8,
};
switch (unit) {
case PM_VPU:
mask = 0x4c; /* byte 0 bits 2,3,6 */
+ break;
case PM_LSU0:
/* byte 2 bits 0,2,3,4,6; all of byte 1 */
mask = 0x085dff00;
+ break;
case PM_LSU1L:
mask = 0x50 << 24; /* byte 3 bits 4,6 */
break;
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
- subpage_protection(pgdir, ea));
+ subpage_protection(mm, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
static int notrace s390_revalidate_registers(struct mci *mci)
{
int kill_task;
- u64 tmpclock;
u64 zero;
void *fpt_save_area, *fpt_creg_save_area;
: "0", "cc");
#endif
/* Revalidate clock comparator register */
- asm volatile(
- " stck 0(%1)\n"
- " sckc 0(%1)"
- : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
-
+ if (S390_lowcore.clock_comparator == -1)
+ set_clock_comparator(S390_lowcore.mcck_clock);
+ else
+ set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
if (!mci->wp)
/*
#include <linux/kernel_stat.h>
#include <linux/rcupdate.h>
#include <linux/posix-timers.h>
+#include <linux/cpu.h>
#include <asm/s390_ext.h>
#include <asm/timer.h>
__ctl_set_bit(0,10);
}
+static int __cpuinit s390_nohz_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct s390_idle_data *idle;
+ long cpu = (long) hcpu;
+
+ idle = &per_cpu(s390_idle, cpu);
+ switch (action) {
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ idle->nohz_delay = 0;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
+ cpu_notifier(s390_nohz_notify, 0);
}
{
unsigned long mask, cr0, cr0_saved;
u64 clock_saved;
+ u64 end;
+ mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ end = get_clock() + (usecs << 12);
clock_saved = local_tick_disable();
- set_clock_comparator(get_clock() + (usecs << 12));
__ctl_store(cr0_saved, 0, 0);
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
__ctl_load(cr0 , 0, 0);
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
lockdep_off();
- trace_hardirqs_on();
- __load_psw_mask(mask);
- local_irq_disable();
+ do {
+ set_clock_comparator(end);
+ trace_hardirqs_on();
+ __load_psw_mask(mask);
+ local_irq_disable();
+ } while (get_clock() < end);
lockdep_on();
__ctl_load(cr0_saved, 0, 0);
local_tick_enable(clock_saved);
asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
-asmlinkage int sys_execve(const char __user *ufilename, char __user * __user *uargv,
- char __user * __user *uenvp, unsigned long r7,
- struct pt_regs __regs);
+asmlinkage int sys_execve(const char __user *ufilename,
+ const char __user *const __user *uargv,
+ const char __user *const __user *uenvp,
+ unsigned long r7, struct pt_regs __regs);
asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
int (*v2_dev_open)(char *devpath);
void (*v2_dev_close)(int d);
int (*v2_dev_read)(int d, char *buf, int nbytes);
- int (*v2_dev_write)(int d, char *buf, int nbytes);
+ int (*v2_dev_write)(int d, const char *buf, int nbytes);
int (*v2_dev_seek)(int d, int hi, int lo);
/* Never issued (multistage load support) */
extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
-/* Device operations. */
-
-/* Open the device described by the passed string. Note, that the format
- * of the string is different on V0 vs. V2->higher proms. The caller must
- * know what he/she is doing! Returns the device descriptor, an int.
- */
-extern int prom_devopen(char *device_string);
-
-/* Close a previously opened device described by the passed integer
- * descriptor.
- */
-extern int prom_devclose(int device_handle);
-
-/* Do a seek operation on the device described by the passed integer
- * descriptor.
- */
-extern void prom_seek(int device_handle, unsigned int seek_hival,
- unsigned int seek_lowval);
-
/* Miscellaneous routines, don't really fit in any category per se. */
/* Reboot the machine with the command line passed. */
/* Get the prom firmware revision. */
extern int prom_getprev(void);
-/* Character operations to/from the console.... */
-
-/* Non-blocking get character from console. */
-extern int prom_nbgetchar(void);
-
-/* Non-blocking put character to console. */
-extern int prom_nbputchar(char character);
-
-/* Blocking get character from console. */
-extern char prom_getchar(void);
-
-/* Blocking put character to console. */
-extern void prom_putchar(char character);
+/* Write a buffer of characters to the console. */
+extern void prom_console_write_buf(const char *buf, int len);
/* Prom's internal routines, don't use in kernel/boot code. */
extern void prom_printf(const char *fmt, ...);
extern int prom_setprop(int node, const char *prop_name, char *prop_value,
int value_size);
-extern int prom_pathtoinode(char *path);
extern int prom_inst2pkg(int);
/* Dorking with Bus ranges... */
/* Boot argument acquisition, returns the boot command line string. */
extern char *prom_getbootargs(void);
-/* Device utilities. */
-
-/* Device operations. */
-
-/* Open the device described by the passed string. Note, that the format
- * of the string is different on V0 vs. V2->higher proms. The caller must
- * know what he/she is doing! Returns the device descriptor, an int.
- */
-extern int prom_devopen(const char *device_string);
-
-/* Close a previously opened device described by the passed integer
- * descriptor.
- */
-extern int prom_devclose(int device_handle);
-
-/* Do a seek operation on the device described by the passed integer
- * descriptor.
- */
-extern void prom_seek(int device_handle, unsigned int seek_hival,
- unsigned int seek_lowval);
-
/* Miscellaneous routines, don't really fit in any category per se. */
/* Reboot the machine with the command line passed. */
/* Halt and power-off the machine. */
extern void prom_halt_power_off(void) __attribute__ ((noreturn));
-/* Set the PROM 'sync' callback function to the passed function pointer.
- * When the user gives the 'sync' command at the prom prompt while the
- * kernel is still active, the prom will call this routine.
- *
- */
-typedef int (*callback_func_t)(long *cmd);
-extern void prom_setcallback(callback_func_t func_ptr);
-
/* Acquire the IDPROM of the root node in the prom device tree. This
* gets passed a buffer where you would like it stuffed. The return value
* is the format type of this idprom or 0xff on error.
*/
extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
-/* Character operations to/from the console.... */
-
-/* Non-blocking get character from console. */
-extern int prom_nbgetchar(void);
-
-/* Non-blocking put character to console. */
-extern int prom_nbputchar(char character);
-
-/* Blocking get character from console. */
-extern char prom_getchar(void);
-
-/* Blocking put character to console. */
-extern void prom_putchar(char character);
+/* Write a buffer of characters to the console. */
+extern void prom_console_write_buf(const char *buf, int len);
/* Prom's internal routines, don't use in kernel/boot code. */
extern void prom_printf(const char *fmt, ...);
extern int prom_setprop(int node, const char *prop_name, char *prop_value,
int value_size);
-extern int prom_pathtoinode(const char *path);
extern int prom_inst2pkg(int);
-extern int prom_service_exists(const char *service_name);
extern void prom_sun4v_guest_soft_state(void);
extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
if (leon3_gptimer_regs && leon3_irqctrl_regs) {
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
- (((1000000 / 100) - 1)));
+ (((1000000 / HZ) - 1)));
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
#ifdef CONFIG_SMP
}
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0);
- LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1)));
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1)));
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0);
# endif
lib-y := bootstr_$(BITS).o
lib-$(CONFIG_SPARC32) += devmap.o
-lib-y += devops_$(BITS).o
lib-y += init_$(BITS).o
lib-$(CONFIG_SPARC32) += memory.o
lib-y += misc_$(BITS).o
extern void restore_current(void);
-/* Non blocking get character from console input device, returns -1
- * if no input was taken. This can be used for polling.
- */
-int
-prom_nbgetchar(void)
-{
- static char inc;
- int i = -1;
- unsigned long flags;
-
- spin_lock_irqsave(&prom_lock, flags);
- switch(prom_vers) {
- case PROM_V0:
- i = (*(romvec->pv_nbgetchar))();
- break;
- case PROM_V2:
- case PROM_V3:
- if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
- i = inc;
- } else {
- i = -1;
- }
- break;
- default:
- i = -1;
- break;
- };
- restore_current();
- spin_unlock_irqrestore(&prom_lock, flags);
- return i; /* Ugh, we could spin forever on unsupported proms ;( */
-}
-
/* Non blocking put character to console device, returns -1 if
* unsuccessful.
*/
-int
-prom_nbputchar(char c)
+static int prom_nbputchar(const char *buf)
{
- static char outc;
unsigned long flags;
int i = -1;
spin_lock_irqsave(&prom_lock, flags);
switch(prom_vers) {
case PROM_V0:
- i = (*(romvec->pv_nbputchar))(c);
+ i = (*(romvec->pv_nbputchar))(*buf);
break;
case PROM_V2:
case PROM_V3:
- outc = c;
- if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
+ if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout,
+ buf, 0x1) == 1)
i = 0;
- else
- i = -1;
break;
default:
- i = -1;
break;
};
restore_current();
return i; /* Ugh, we could spin forever on unsupported proms ;( */
}
-/* Blocking version of get character routine above. */
-char
-prom_getchar(void)
+void prom_console_write_buf(const char *buf, int len)
{
- int character;
- while((character = prom_nbgetchar()) == -1) ;
- return (char) character;
+ while (len) {
+ int n = prom_nbputchar(buf);
+ if (n)
+ continue;
+ len--;
+ buf++;
+ }
}
-/* Blocking version of put character routine above. */
-void
-prom_putchar(char c)
-{
- while(prom_nbputchar(c) == -1) ;
-}
extern int prom_stdin, prom_stdout;
-/* Non blocking get character from console input device, returns -1
- * if no input was taken. This can be used for polling.
- */
-inline int
-prom_nbgetchar(void)
-{
- unsigned long args[7];
- char inc;
-
- args[0] = (unsigned long) "read";
- args[1] = 3;
- args[2] = 1;
- args[3] = (unsigned int) prom_stdin;
- args[4] = (unsigned long) &inc;
- args[5] = 1;
- args[6] = (unsigned long) -1;
-
- p1275_cmd_direct(args);
-
- if (args[6] == 1)
- return inc;
- return -1;
-}
-
-/* Non blocking put character to console device, returns -1 if
- * unsuccessful.
- */
-inline int
-prom_nbputchar(char c)
+static int __prom_console_write_buf(const char *buf, int len)
{
unsigned long args[7];
- char outc;
-
- outc = c;
+ int ret;
args[0] = (unsigned long) "write";
args[1] = 3;
args[2] = 1;
args[3] = (unsigned int) prom_stdout;
- args[4] = (unsigned long) &outc;
- args[5] = 1;
+ args[4] = (unsigned long) buf;
+ args[5] = (unsigned int) len;
args[6] = (unsigned long) -1;
p1275_cmd_direct(args);
- if (args[6] == 1)
- return 0;
- else
+ ret = (int) args[6];
+ if (ret < 0)
return -1;
+ return ret;
}
-/* Blocking version of get character routine above. */
-char
-prom_getchar(void)
-{
- int character;
- while((character = prom_nbgetchar()) == -1) ;
- return (char) character;
-}
-
-/* Blocking version of put character routine above. */
-void
-prom_putchar(char c)
+void prom_console_write_buf(const char *buf, int len)
{
- prom_nbputchar(c);
-}
-
-void
-prom_puts(const char *s, int len)
-{
- unsigned long args[7];
-
- args[0] = (unsigned long) "write";
- args[1] = 3;
- args[2] = 1;
- args[3] = (unsigned int) prom_stdout;
- args[4] = (unsigned long) s;
- args[5] = len;
- args[6] = (unsigned long) -1;
-
- p1275_cmd_direct(args);
+ while (len) {
+ int n = __prom_console_write_buf(buf, len);
+ if (n < 0)
+ continue;
+ len -= n;
+ buf += len;
+ }
}
+++ /dev/null
-/*
- * devops.c: Device operations using the PROM.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-extern void restore_current(void);
-
-/* Open the device described by the string 'dstr'. Returns the handle
- * to that device used for subsequent operations on that device.
- * Returns -1 on failure.
- */
-int
-prom_devopen(char *dstr)
-{
- int handle;
- unsigned long flags;
- spin_lock_irqsave(&prom_lock, flags);
- switch(prom_vers) {
- case PROM_V0:
- handle = (*(romvec->pv_v0devops.v0_devopen))(dstr);
- if(handle == 0) handle = -1;
- break;
- case PROM_V2:
- case PROM_V3:
- handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr);
- break;
- default:
- handle = -1;
- break;
- };
- restore_current();
- spin_unlock_irqrestore(&prom_lock, flags);
-
- return handle;
-}
-
-/* Close the device described by device handle 'dhandle'. */
-int
-prom_devclose(int dhandle)
-{
- unsigned long flags;
- spin_lock_irqsave(&prom_lock, flags);
- switch(prom_vers) {
- case PROM_V0:
- (*(romvec->pv_v0devops.v0_devclose))(dhandle);
- break;
- case PROM_V2:
- case PROM_V3:
- (*(romvec->pv_v2devops.v2_dev_close))(dhandle);
- break;
- default:
- break;
- };
- restore_current();
- spin_unlock_irqrestore(&prom_lock, flags);
- return 0;
-}
-
-/* Seek to specified location described by 'seekhi' and 'seeklo'
- * for device 'dhandle'.
- */
-void
-prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
-{
- unsigned long flags;
- spin_lock_irqsave(&prom_lock, flags);
- switch(prom_vers) {
- case PROM_V0:
- (*(romvec->pv_v0devops.v0_seekdev))(dhandle, seekhi, seeklo);
- break;
- case PROM_V2:
- case PROM_V3:
- (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo);
- break;
- default:
- break;
- };
- restore_current();
- spin_unlock_irqrestore(&prom_lock, flags);
-}
+++ /dev/null
-/*
- * devops.c: Device operations using the PROM.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-/* Open the device described by the string 'dstr'. Returns the handle
- * to that device used for subsequent operations on that device.
- * Returns 0 on failure.
- */
-int
-prom_devopen(const char *dstr)
-{
- unsigned long args[5];
-
- args[0] = (unsigned long) "open";
- args[1] = 1;
- args[2] = 1;
- args[3] = (unsigned long) dstr;
- args[4] = (unsigned long) -1;
-
- p1275_cmd_direct(args);
-
- return (int) args[4];
-}
-
-/* Close the device described by device handle 'dhandle'. */
-int
-prom_devclose(int dhandle)
-{
- unsigned long args[4];
-
- args[0] = (unsigned long) "close";
- args[1] = 1;
- args[2] = 0;
- args[3] = (unsigned int) dhandle;
-
- p1275_cmd_direct(args);
-
- return 0;
-}
-
-/* Seek to specified location described by 'seekhi' and 'seeklo'
- * for device 'dhandle'.
- */
-void
-prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
-{
- unsigned long args[7];
-
- args[0] = (unsigned long) "seek";
- args[1] = 3;
- args[2] = 1;
- args[3] = (unsigned int) dhandle;
- args[4] = seekhi;
- args[5] = seeklo;
- args[6] = (unsigned long) -1;
-
- p1275_cmd_direct(args);
-}
#include <asm/system.h>
#include <asm/ldc.h>
-int prom_service_exists(const char *service_name)
+static int prom_service_exists(const char *service_name)
{
unsigned long args[5];
prom_halt();
}
-/* Set prom sync handler to call function 'funcp'. */
-void prom_setcallback(callback_func_t funcp)
-{
- unsigned long args[5];
- if (!funcp)
- return;
- args[0] = (unsigned long) "set-callback";
- args[1] = 1;
- args[2] = 1;
- args[3] = (unsigned long) funcp;
- args[4] = (unsigned long) -1;
- p1275_cmd_direct(args);
-}
-
/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
* format type. 'num_bytes' is the number of bytes that your idbuf
* has space for. Returns 0xff on error.
#include <linux/kernel.h>
#include <linux/compiler.h>
+#include <linux/spinlock.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
+#define CONSOLE_WRITE_BUF_SIZE 1024
+
static char ppbuf[1024];
+static char console_write_buf[CONSOLE_WRITE_BUF_SIZE];
+static DEFINE_RAW_SPINLOCK(console_write_lock);
void notrace prom_write(const char *buf, unsigned int n)
{
- char ch;
+ unsigned int dest_len;
+ unsigned long flags;
+ char *dest;
+
+ dest = console_write_buf;
+ raw_spin_lock_irqsave(&console_write_lock, flags);
- while (n != 0) {
- --n;
- if ((ch = *buf++) == '\n')
- prom_putchar('\r');
- prom_putchar(ch);
+ dest_len = 0;
+ while (n-- != 0) {
+ char ch = *buf++;
+ if (ch == '\n') {
+ *dest++ = '\r';
+ dest_len++;
+ }
+ *dest++ = ch;
+ dest_len++;
+ if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) {
+ prom_console_write_buf(console_write_buf, dest_len);
+ dest = console_write_buf;
+ dest_len = 0;
+ }
}
+ if (dest_len)
+ prom_console_write_buf(console_write_buf, dest_len);
+
+ raw_spin_unlock_irqrestore(&console_write_lock, flags);
}
void notrace prom_printf(const char *fmt, ...)
if (node == -1) return 0;
return node;
}
-
-/* Return 'node' assigned to a particular prom 'path'
- * FIXME: Should work for v0 as well
- */
-int prom_pathtoinode(char *path)
-{
- int node, inst;
-
- inst = prom_devopen (path);
- if (inst == -1) return 0;
- node = prom_inst2pkg (inst);
- prom_devclose (inst);
- if (node == -1) return 0;
- return node;
-}
return node;
}
-/* Return 'node' assigned to a particular prom 'path'
- * FIXME: Should work for v0 as well
- */
-int
-prom_pathtoinode(const char *path)
-{
- int node, inst;
-
- inst = prom_devopen (path);
- if (inst == 0)
- return 0;
- node = prom_inst2pkg(inst);
- prom_devclose(inst);
- if (node == -1)
- return 0;
- return node;
-}
-
int prom_ihandle2path(int handle, char *buffer, int bufsize)
{
unsigned long args[7];
childregs->regs[0] = 0; /* return value is zero */
childregs->sp = sp; /* override with new user stack pointer */
+ /*
+ * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
+ * which is passed in as arg #5 to sys_clone().
+ */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->tp = regs->regs[4];
+
/*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
static void free_winch(struct winch *winch, int free_irq_ok)
{
+ if (free_irq_ok)
+ free_irq(WINCH_IRQ, winch);
+
list_del(&winch->list);
if (winch->pid != -1)
os_close_file(winch->fd);
if (winch->stack != 0)
free_stack(winch->stack, 0);
- if (free_irq_ok)
- free_irq(WINCH_IRQ, winch);
kfree(winch);
}
_text = .;
_stext = .;
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE);
.text :
long long disable_timer(void)
{
struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
- int remain, max = UM_NSEC_PER_SEC / UM_HZ;
+ long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
printk(UM_KERN_ERR "disable_timer - setitimer failed, "
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
+#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
extern void iounmap(volatile void __iomem *addr);
+extern void set_iounmap_nonlazy(void);
#ifdef __KERNEL__
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
-#define KVM_MAX_CPUID_ENTRIES 40
+#define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88
#define KVM_NR_VAR_MTRR 8
};
extern enum mrst_cpu_type __mrst_cpu_chip;
-static enum mrst_cpu_type mrst_identify_cpu(void)
+static inline enum mrst_cpu_type mrst_identify_cpu(void)
{
return __mrst_cpu_chip;
}
extern unsigned long idle_nomwait;
extern bool c1e_detected;
-/*
- * on systems with caches, caches must be flashed as the absolute
- * last instruction before going into a suspended halt. Otherwise,
- * dirty data can linger in the cache and become stale on resume,
- * leading to strange errors.
- *
- * perform a variety of operations to guarantee that the compiler
- * will not reorder instructions. wbinvd itself is serializing
- * so the processor will not reorder.
- *
- * Systems without cache can just go into halt.
- */
-static inline void wbinvd_halt(void)
-{
- mb();
- /* check for clflush to determine if wbinvd is legal */
- if (cpu_has_clflush)
- asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
- else
- while (1)
- halt();
-}
-
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
- void (*smp_send_stop)(void);
+ void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
static inline void smp_send_stop(void)
{
- smp_ops.smp_send_stop();
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
}
static inline void smp_prepare_boot_cpu(void)
setup_apic_nmi_watchdog(NULL);
apic_pm_activate();
+
+ /*
+ * Now that local APIC setup is completed for BP, configure the fault
+ * handling for interrupt remapping.
+ */
+ if (!smp_processor_id() && intr_remapping_enabled)
+ enable_drhd_fault_handling();
+
}
#ifdef CONFIG_X86_X2APIC
irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = vector;
irte.dest_id = IRTE_DEST(destination);
+ irte.redir_hint = 1;
/* Set source-id of interrupt request */
set_ioapic_sid(&irte, apic_id);
irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
+ irte.redir_hint = 1;
/* Set source-id of interrupt request */
if (pdev)
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+ msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
dmar_msi_write(irq, &msg);
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
}
-
- /*
- * Now that apic routing model is selected, configure the
- * fault handling for intr remapping.
- */
- if (intr_remapping_enabled)
- enable_drhd_fault_handling();
}
/* Same for both flat and physical. */
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
/* fixup topology information on multi-node processors */
- if ((c->x86 == 0x10) && (c->x86_model == 9))
- amd_fixup_dcm(c);
+ amd_fixup_dcm(c);
#endif
}
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
+ kfree(data->freq_table);
kfree(data);
}
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
- if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
+ [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
- [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
+ [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
} else
memcpy(buf, vaddr + offset, csize);
+ set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}
dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p;
+ /* If it's a single step, TRAP bits are random */
+ if (dr6 & DR_STEP)
+ return NOTIFY_DONE;
+
/* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
- if (mc)
- vfree(mc);
+ vfree(mc);
mc = vmalloc(mc_size);
if (!mc)
break;
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
- vfree(mc);
break;
}
if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
- if (new_mc)
- vfree(new_mc);
+ vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
mc = NULL; /* trigger new vmalloc */
leftover -= mc_size;
}
- if (mc)
- vfree(mc);
+ vfree(mc);
if (leftover) {
- if (new_mc)
- vfree(new_mc);
+ vfree(new_mc);
state = UCODE_ERROR;
goto out;
}
goto out;
}
- if (uci->mc)
- vfree(uci->mc);
+ vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
unsigned long flags;
int ret = -EIO;
int i;
+ int restarts = 0;
spin_lock_irqsave(&ec_lock, flags);
if (wait_on_obf(0x6c, 1)) {
printk(KERN_ERR "olpc-ec: timeout waiting for"
" EC to provide data!\n");
- goto restart;
+ if (restarts++ < 10)
+ goto restart;
+ goto err;
}
outbuf[i] = inb(0x68);
pr_devel("olpc-ec: received 0x%x\n", outbuf[i]);
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
- smp_send_stop();
+ stop_other_cpus();
#endif
lapic_shutdown();
irq_exit();
}
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
{
unsigned long flags;
- unsigned long wait;
+ unsigned long timeout;
if (reboot_force)
return;
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
- /* Don't wait longer than a second */
- wait = USEC_PER_SEC;
- while (num_online_cpus() > 1 && wait--)
+ /*
+ * Don't wait longer than a second if the caller
+ * didn't ask us to wait.
+ */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
- .smp_send_stop = native_smp_send_stop,
+ .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
local_irq_disable();
}
+#define MWAIT_SUBSTATE_MASK 0xf
+#define MWAIT_SUBSTATE_SIZE 4
+
+#define CPUID_MWAIT_LEAF 5
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
+
+/*
+ * We need to flush the caches before going to sleep, lest we have
+ * dirty data in our caches when we come back up.
+ */
+static inline void mwait_play_dead(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+ void *mwait_ptr;
+
+ if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT))
+ return;
+ if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH))
+ return;
+ if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ eax = CPUID_MWAIT_LEAF;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ /*
+ * eax will be 0 if EDX enumeration is not valid.
+ * Initialized below to cstate, sub_cstate value when EDX is valid.
+ */
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
+ eax = 0;
+ } else {
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+ }
+
+ /*
+ * This should be a memory location in a cache line which is
+ * unlikely to be touched by other processors. The actual
+ * content is immaterial as it is not actually modified in any way.
+ */
+ mwait_ptr = ¤t_thread_info()->flags;
+
+ wbinvd();
+
+ while (1) {
+ /*
+ * The CLFLUSH is a workaround for erratum AAI65 for
+ * the Xeon 7400 series. It's not clear it is actually
+ * needed, but it should be harmless in either case.
+ * The WBINVD is insufficient due to the spurious-wakeup
+ * case where we return around the loop.
+ */
+ clflush(mwait_ptr);
+ __monitor(mwait_ptr, 0, 0);
+ mb();
+ __mwait(eax, 0);
+ }
+}
+
+static inline void hlt_play_dead(void)
+{
+ if (current_cpu_data.x86 >= 4)
+ wbinvd();
+
+ while (1) {
+ native_halt();
+ }
+}
+
void native_play_dead(void)
{
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
- wbinvd_halt();
+
+ mwait_play_dead(); /* Only returns on failure */
+ hlt_play_dead();
}
#else /* ... !CONFIG_HOTPLUG_CPU */
if (regs->flags & X86_VM_MASK) {
handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, 1);
+ preempt_conditional_cli(regs);
return;
}
int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
if (VMPI.is_vm86pus) {
- if ((trapno == 3) || (trapno == 1))
- return_to_32bit(regs, VM86_TRAP + (trapno << 8));
+ if ((trapno == 3) || (trapno == 1)) {
+ KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
+ /* setting this flag forces the code in entry_32.S to
+ call save_v86_state() and change the stack pointer
+ to KVM86->regs32 */
+ set_thread_flag(TIF_IRET);
+ return 0;
+ }
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0;
}
* Setup init_xstate_buf to represent the init state of
* all the features managed by the xsave
*/
- init_xstate_buf = alloc_bootmem(xstate_size);
+ init_xstate_buf = alloc_bootmem_align(xstate_size,
+ __alignof__(struct xsave_struct));
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
clts();
/* A VMEXIT is required but not yet emulated */
bool exit_required;
+ /*
+ * If we vmexit during an instruction emulation we need this to restore
+ * the l1 guest rip after the emulation
+ */
+ unsigned long vmexit_rip;
+ unsigned long vmexit_rsp;
+ unsigned long vmexit_rax;
+
/* cache for intercepts of the guest */
u16 intercept_cr_read;
u16 intercept_cr_write;
if (old == new) {
/* cr0 write with ts and mp unchanged */
svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
- if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
+ if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
+ svm->nested.vmexit_rip = kvm_rip_read(vcpu);
+ svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
return;
+ }
}
}
return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
}
+static int cr0_write_interception(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ int r;
+
+ r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+
+ if (svm->nested.vmexit_rip) {
+ kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
+ svm->nested.vmexit_rip = 0;
+ }
+
+ return r == EMULATE_DONE;
+}
+
static int cr8_write_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
[SVM_EXIT_READ_CR4] = emulate_on_interception,
[SVM_EXIT_READ_CR8] = emulate_on_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
[SVM_EXIT_WRITE_CR3] = emulate_on_interception,
[SVM_EXIT_WRITE_CR4] = emulate_on_interception,
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
load_host_msrs(vcpu);
+ kvm_load_ldt(ldt_selector);
loadsegment(fs, fs_selector);
#ifdef CONFIG_X86_64
load_gs_index(gs_selector);
#else
loadsegment(gs, gs_selector);
#endif
- kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
switch (func) {
+ case 0x00000001:
+ /* Mask out xsave bit as long as it is not supported by SVM */
+ entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
+ break;
+ case 0x80000001:
+ if (nested)
+ entry->ecx |= (1 << 2); /* Set SVM bit */
+ break;
case 0x8000000A:
entry->eax = 1; /* SVM revision 1 */
entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- }
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_reload_needed)
- loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
+ if (vmx->host_state.fs_reload_needed)
+ loadsegment(fs, vmx->host_state.fs_sel);
reload_tss();
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
- }
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
return PT_PDPE_LEVEL;
}
-static inline u32 bit(int bitno)
-{
- return 1 << (bitno & 31);
-}
-
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
u64 __read_mostly host_xcr0;
-static inline u32 bit(int bitno)
-{
- return 1 << (bitno & 31);
-}
-
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
- F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+ F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
- F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
+ F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
0 /* SKINIT */ | 0 /* WDT */;
/* all calls to cpuid_count() should be made on the same cpu */
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+ events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+ events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
+ memset(&events->reserved, 0, sizeof(events->reserved));
}
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
+ memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;
- spin_lock(&kvm->mmu_lock);
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- spin_unlock(&kvm->mmu_lock);
-
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
if (!dirty_bitmap)
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);
+ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ spin_unlock(&kvm->mmu_lock);
+
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
vfree(dirty_bitmap);
now_ns = timespec_to_ns(&now);
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
user_ns.flags = 0;
+ memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+ if (sregs->cr4 & X86_CR4_OSXSAVE)
+ update_cpuid(vcpu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3);
mmu_reset_needed = 1;
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
}
+static inline u32 bit(int bitno)
+{
+ return 1 << (bitno & 31);
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
return 0;
}
-/* initialize the APIC for the IBS interrupts if available */
+/*
+ * check and reserve APIC extended interrupt LVT offset for IBS if
+ * available
+ *
+ * init_ibs() preforms implicitly cpu-local operations, so pin this
+ * thread to its current CPU
+ */
+
static void init_ibs(void)
{
- ibs_caps = get_ibs_caps();
+ preempt_disable();
+ ibs_caps = get_ibs_caps();
if (!ibs_caps)
- return;
+ goto out;
- if (__init_ibs_nmi()) {
+ if (__init_ibs_nmi() < 0)
ibs_caps = 0;
- return;
- }
+ else
+ printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
- (unsigned)ibs_caps);
+out:
+ preempt_enable();
}
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
export CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
# This makes sure the $(obj) subdirectory exists even though vdso32/
# is not a kbuild sub-make subdirectory.
{
struct sched_shutdown r = { .reason = reason };
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
-
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
BUG();
}
BUG();
}
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
{
- smp_call_function(stop_self, NULL, 0);
+ smp_call_function(stop_self, NULL, wait);
}
static void xen_smp_send_reschedule(int cpu)
.cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead,
- .smp_send_stop = xen_smp_send_stop,
+ .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
unaligned = 1;
break;
}
+ if (!iov[i].iov_len)
+ return -EINVAL;
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
return 0;
fbio = bio;
- cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
- if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ if (!blk_queue_cluster(q))
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
int nsegs, cluster;
nsegs = 0;
- cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ cluster = blk_queue_cluster(q);
/*
* for each bio in rq
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->no_cluster = 0;
+ lim->cluster = 1;
}
EXPORT_SYMBOL(blk_set_default_limits);
* hardware can operate on without reverting to read-modify-write
* operations.
*/
-void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.physical_block_size = size;
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);
-
- if (!t->queue_lock)
- WARN_ON_ONCE(1);
- else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
- unsigned long flags;
- spin_lock_irqsave(t->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
- spin_unlock_irqrestore(t->queue_lock, flags);
- }
}
EXPORT_SYMBOL(blk_queue_stack_limits);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
- t->no_cluster |= b->no_cluster;
+ t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
/* Physical block size a multiple of the logical block size? */
sector_t offset)
{
struct request_queue *t = disk->queue;
- struct request_queue *b = bdev_get_queue(bdev);
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
-
- if (!t->queue_lock)
- WARN_ON_ONCE(1);
- else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
- unsigned long flags;
-
- spin_lock_irqsave(t->queue_lock, flags);
- if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
- queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
- spin_unlock_irqrestore(t->queue_lock, flags);
- }
}
EXPORT_SYMBOL(disk_stack_limits);
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page));
disk->major = MAJOR(devt);
disk->first_minor = MINOR(devt);
+ /* Register BDI before referencing it from bdev */
+ bdi = &disk->queue->backing_dev_info;
+ bdi_register_dev(bdi, disk_devt(disk));
+
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
register_disk(disk);
blk_register_queue(disk);
- bdi = &disk->queue->backing_dev_info;
- bdi_register_dev(bdi, disk_devt(disk));
retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
"bdi");
WARN_ON(retval);
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *iov;
+ struct sg_iovec *sg_iov;
+ struct iovec *iov;
+ int i;
- iov = kmalloc(size, GFP_KERNEL);
- if (!iov) {
+ sg_iov = kmalloc(size, GFP_KERNEL);
+ if (!sg_iov) {
ret = -ENOMEM;
goto out;
}
- if (copy_from_user(iov, hdr->dxferp, size)) {
- kfree(iov);
+ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+ kfree(sg_iov);
ret = -EFAULT;
goto out;
}
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov = (struct iovec *) sg_iov;
+ iov_data_len = 0;
+ for (i = 0; i < hdr->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len) {
+ kfree(sg_iov);
+ ret = -EINVAL;
+ goto out;
+ }
+ iov_data_len += iov[i].iov_len;
+ }
+
/* SG_IO howto says that the shorter of the two wins */
- iov_data_len = iov_length((struct iovec *)iov,
- hdr->iovec_count);
if (hdr->dxfer_len < iov_data_len) {
- hdr->iovec_count = iov_shorten((struct iovec *)iov,
+ hdr->iovec_count = iov_shorten(iov,
hdr->iovec_count,
hdr->dxfer_len);
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(iov);
+ kfree(sg_iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
* we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
+ *
+ * Note 10/2010: Except for the Scope() op. This opcode does
+ * not actually create a new object, it refers to an existing
+ * object. However, for Scope(), we want to indeed open a
+ * new scope.
*/
- status = acpi_ds_load2_begin_op(walk_state, NULL);
+ if (op->common.aml_opcode != AML_SCOPE_OP) {
+ status =
+ acpi_ds_load2_begin_op(walk_state, NULL);
+ } else {
+ status =
+ acpi_ds_scope_stack_push(op->named.node,
+ op->named.node->
+ type, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
}
-
break;
case AML_CLASS_EXECUTE:
* due to bad math.
*/
ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
result = extract_package(battery, buffer.pointer,
info_offsets, ARRAY_SIZE(info_offsets));
kfree(buffer.pointer);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ battery->full_charge_capacity = battery->design_capacity;
return result;
}
battery->rate_now != -1)
battery->rate_now = abs((s16)battery->rate_now);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
+ && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
return result;
}
}
}
+/*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+ * In this case, it reports the Last Full Charged Capacity == 100
+ * and BatteryPresentRate == 0xFFFFFFFF.
+ *
+ * Now we found some battery reports percentage remaining capacity
+ * even if it's rechargeable.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15979
+ *
+ * Handle this correctly so that they won't break userspace.
+ */
+static void acpi_battery_quirks2(struct acpi_battery *battery)
+{
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ return ;
+
+ if (battery->full_charge_capacity == 100 &&
+ battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
+ battery->capacity_now >=0 && battery->capacity_now <= 100) {
+ set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
+ battery->full_charge_capacity = battery->design_capacity;
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
+ }
+}
+
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
if (!battery->bat.dev)
sysfs_add_battery(battery);
#endif
- return acpi_battery_get_state(battery);
+ result = acpi_battery_get_state(battery);
+ acpi_battery_quirks2(battery);
+ return result;
}
/* --------------------------------------------------------------------------
goto error1;
}
+ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+ acpi_sysfs_init();
+
acpi_early_processor_set_pdc();
/*
acpi_scan_init();
acpi_ec_init();
acpi_power_init();
- acpi_sysfs_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
if (!acpi_dir)
goto err;
- cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_dir, NULL, &cm_fops);
if (!cm_dentry)
goto err;
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
{
+ ec_flag_msi, "MSI hardware", {
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
{},
AHCI_CMD_RESET = (1 << 8),
AHCI_CMD_CLR_BUSY = (1 << 10),
+ RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ u8 *rx_fis = pp->rx_fis;
if (pp->fbs_enabled)
- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+ rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+ /*
+ * After a successful execution of an ATA PIO data-in command,
+ * the device doesn't send D2H Reg FIS to update the TF and
+ * the host should take TF and E_Status from the preceding PIO
+ * Setup FIS.
+ */
+ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
+ !(qc->flags & ATA_QCFLAG_FAILED)) {
+ ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
+ qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ } else
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
- ata_tf_from_fis(d2h_fis, &qc->result_tf);
return true;
}
*
* If door lock fails, always clear sdev->locked to
* avoid this infinite loop.
+ *
+ * This may happen before SCSI scan is complete. Make
+ * sure qc->dev->sdev isn't NULL before dereferencing.
*/
- if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+ if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
return ata_sff_idle_irq(ap);
break;
- case HSM_ST:
- case HSM_ST_LAST:
- break;
- default:
+ case HSM_ST_IDLE:
return ata_sff_idle_irq(ap);
+ default:
+ break;
}
/* check main status, clearing INTRQ if needed */
return 0;
}
-static void svia_configure(struct pci_dev *pdev)
+static void svia_configure(struct pci_dev *pdev, int board_id)
{
u8 tmp8;
}
/*
- * vt6421 has problems talking to some drives. The following
+ * vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
*
* When host issues HOLD, device may send up to 20DW of data
*
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
+ * http://thread.gmane.org/gmane.linux.kernel/1062139
*/
- if (pdev->device == 0x3249) {
+ if (board_id == vt6420 || board_id == vt6421) {
pci_read_config_byte(pdev, 0x52, &tmp8);
tmp8 |= 1 << 2;
pci_write_config_byte(pdev, 0x52, tmp8);
if (rc)
return rc;
- svia_configure(pdev);
+ svia_configure(pdev, board_id);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
static const struct block_device_operations xlvbd_block_fops;
-#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
/*
* We have one of these per vbd, whether ide, scsi or 'other'. They
BT_DBG("tty %p", tty);
+ /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
+ the pointer */
if (hu)
return -EEXIST;
+ /* Error if the tty has no write op instead of leaving an exploitable
+ hole */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+
if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- if (IS_I965) {
+ if (IS_G33 || IS_I965) {
u32 pgetbl_ctl;
pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
size = 512;
}
size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
} else if (IS_G4X || IS_PINEVIEW) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting. However,
int size;
if (IS_G33) {
- u16 gmch_ctrl;
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
- /* G33's GTT size defined in gmch_ctrl */
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = 128;
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = 256;
+ break;
+ case I965_PGETBL_SIZE_512KB:
size = 512;
break;
- case I830_GMCH_GMS_STOLEN_1024:
+ case I965_PGETBL_SIZE_1MB:
size = 1024;
break;
- case I830_GMCH_GMS_STOLEN_8192:
- size = 8*1024;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
break;
default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & I830_GMCH_GMS_MASK));
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
size = 512;
}
} else {
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
- gtt_map_size = intel_i915_get_gtt_size();
-
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
-
temp &= 0xfff80000;
intel_private.registers = ioremap(temp, 128 * 4096);
return -ENOMEM;
}
+ gtt_map_size = intel_i915_get_gtt_size();
+
+ intel_private.gtt = ioremap(temp2, gtt_map_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
global_cache_flush(); /* FIXME: ? */
if (irq) {
unsigned long irq_flags;
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED;
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
return 0;
}
+static struct smi_info *smi_info_alloc(void)
+{
+ struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info) {
+ spin_lock_init(&info->si_lock);
+ spin_lock_init(&info->msg_lock);
+ }
+ return info;
+}
+
static int hotmod_handler(const char *val, struct kernel_param *kp)
{
char *str = kstrdup(val, GFP_KERNEL);
}
if (op == HM_ADD) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
rv = -ENOMEM;
goto out;
if (!ports[i] && !addrs[i])
continue;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
return -ENODEV;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
if (!acpi_dev)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
{
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
dev_err(&dev->dev,
if (check_legacy_ioport(ipmi_defaults[i].port))
continue;
#endif
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
goto out_err;
}
- spin_lock_init(&(new_smi->si_lock));
- spin_lock_init(&(new_smi->msg_lock));
-
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
if (msg->len < 128)
*--dp = (msg->len << 1) | EA;
else {
- *--dp = (msg->len >> 6) | EA;
- *--dp = (msg->len & 127) << 1;
+ *--dp = (msg->len >> 7); /* bits 7 - 15 */
+ *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */
}
}
{
struct gsm_msg *msg;
msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+ if (msg == NULL)
+ return;
msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
msg->data[1] = (dlen << 1) | EA;
memcpy(msg->data + 2, data, dlen);
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
+ memset(&new_line, 0, size);
+
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
#include <linux/ioport.h>
#define RAMOOPS_KERNMSG_HDR "===="
-#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
#define RECORD_SIZE 4096
struct ramoops_context, dump);
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
- int res;
- char *buf;
+ int res, hdr_size;
+ char *buf, *buf_orig;
struct timeval timestamp;
/* Only dump oopses if dump_oops is set */
return;
buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+ buf_orig = buf;
+
memset(buf, '\0', RECORD_SIZE);
res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
buf += res;
res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
buf += res;
- l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
- l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
+ hdr_size = buf - buf_orig;
+ l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
+ l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
spin_lock_irqsave(&tty->buf.lock, flags);
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
- struct tty_buffer *head;
+ struct tty_buffer *head, *tail = tty->buf.tail;
+ int seen_tail = 0;
while ((head = tty->buf.head) != NULL) {
int count;
char *char_buf;
if (!count) {
if (head->next == NULL)
break;
+ /*
+ There's a possibility tty might get new buffer
+ added during the unlock window below. We could
+ end up spinning in here forever hogging the CPU
+ completely. To avoid this let's have a rest each
+ time we processed the tail buffer.
+ */
+ if (tail == head)
+ seen_tail = 1;
tty->buf.head = head->next;
tty_buffer_free(tty, head);
continue;
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
- if (!tty->receive_room) {
+ if (!tty->receive_room || seen_tail) {
schedule_delayed_work(&tty->buf.work, 1);
break;
}
tty_lock();
+ /* some functions below drop BTM, so we need this bit */
+ set_bit(TTY_HUPPING, &tty->flags);
+
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
}
spin_unlock(&tty_files_lock);
+ /*
+ * it drops BTM and thus races with reopen
+ * we protect the race by TTY_HUPPING
+ */
tty_ldisc_hangup(tty);
read_lock(&tasklist_lock);
tty->session = NULL;
tty->pgrp = NULL;
tty->ctrl_status = 0;
- set_bit(TTY_HUPPED, &tty->flags);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
/* Account for the p->signal references we killed */
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
tty_ldisc_enable(tty);
tty_unlock();
{
struct tty_driver *driver = tty->driver;
- if (test_bit(TTY_CLOSING, &tty->flags))
+ if (test_bit(TTY_CLOSING, &tty->flags) ||
+ test_bit(TTY_HUPPING, &tty->flags) ||
+ test_bit(TTY_LDISC_CHANGING, &tty->flags))
return -EIO;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
static DEFINE_SPINLOCK(tty_ldisc_lock);
static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
/* Line disc dispatch table */
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
return;
}
local_irq_restore(flags);
+ wake_up(&tty_ldisc_idle);
}
/**
/* BTM here locks versus a hangup event */
WARN_ON(!tty_locked());
ret = ld->ops->open(tty);
+ if (ret)
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
return ret;
}
return 0;
return cancel_delayed_work_sync(&tty->buf.work);
}
+/**
+ * tty_ldisc_wait_idle - wait for the ldisc to become idle
+ * @tty: tty to wait for
+ *
+ * Wait for the line discipline to become idle. The discipline must
+ * have been halted for this to guarantee it remains idle.
+ */
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+ int ret;
+ ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+ atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+ if (ret < 0)
+ return ret;
+ return ret > 0 ? 0 : -EBUSY;
+}
+
/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
flush_scheduled_work();
+ retval = tty_ldisc_wait_idle(tty);
+
tty_lock();
mutex_lock(&tty->ldisc_mutex);
+
+ /* handle wait idle failure locked */
+ if (retval) {
+ tty_ldisc_put(new_ldisc);
+ goto enable;
+ }
+
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
tty_ldisc_put(o_ldisc);
+enable:
/*
* Allow ldisc referencing to occur again
*/
* state closed
*/
-static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
- struct tty_ldisc *ld;
+ struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+
+ if (IS_ERR(ld))
+ return -1;
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
/*
* Switch the line discipline back
*/
- ld = tty_ldisc_get(ldisc);
- BUG_ON(IS_ERR(ld));
tty_ldisc_assign(tty, ld);
tty_set_termios_ldisc(tty, ldisc);
+
+ return 0;
}
/**
a FIXME */
if (tty->ldisc) { /* Not yet closed */
if (reset == 0) {
- tty_ldisc_reinit(tty, tty->termios->c_line);
- err = tty_ldisc_open(tty, tty->ldisc);
+
+ if (!tty_ldisc_reinit(tty, tty->termios->c_line))
+ err = tty_ldisc_open(tty, tty->ldisc);
+ else
+ err = 1;
}
/* If the re-open fails or we reset then go to N_TTY. The
N_TTY open cannot fail */
if (reset || err) {
- tty_ldisc_reinit(tty, N_TTY);
+ BUG_ON(tty_ldisc_reinit(tty, N_TTY));
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
}
tty_ldisc_enable(tty);
struct kbd_struct * kbd;
unsigned int console;
unsigned char ucval;
+ unsigned int uival;
void __user *up = (void __user *)arg;
int i, perm;
int ret = 0;
break;
case KDGETMODE:
- ucval = vc->vc_mode;
+ uival = vc->vc_mode;
goto setint;
case KDMAPDISP:
break;
case KDGKBMODE:
- ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+ uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
(kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
(kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
K_XLATE);
break;
case KDGKBMETA:
- ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+ uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
setint:
- ret = put_user(ucval, (int __user *)arg);
+ ret = put_user(uival, (int __user *)arg);
break;
case KDGETKEYCODE:
for (i = 0; i < MAX_NR_CONSOLES; ++i)
if (! VT_IS_IN_USE(i))
break;
- ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
+ uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
goto setint;
/*
OMAP processors have SHA1/MD5 hw accelerator. Select this if you
want to use the OMAP module for SHA1/MD5 algorithms.
+config CRYPTO_DEV_TEGRA_AES
+ tristate "Support for TEGRA AES hw engine"
+ depends on ARCH_TEGRA_2x_SOC
+ select CRYPTO_AES
+ select TEGRA_ARB_SEMAPHORE
+ help
+ TEGRA processors have AES module accelerator. Select this if you
+ want to use the TEGRA module for AES algorithms.
+
endif # CRYPTO_HW
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (initial));
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
--- /dev/null
+/*
+ * drivers/crypto/tegra-aes.c
+ *
+ * aes driver for NVIDIA tegra aes hardware
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+#include <mach/arb_sema.h>
+#include <mach/clk.h>
+
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <crypto/internal/rng.h>
+
+#include "tegra-aes.h"
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_GIV BIT(2)
+#define FLAGS_RNG BIT(3)
+#define FLAGS_NEW_KEY BIT(4)
+#define FLAGS_NEW_IV BIT(5)
+#define FLAGS_INIT BIT(6)
+#define FLAGS_FAST BIT(7)
+#define FLAGS_BUSY 8
+
+/*
+ * Defines AES engine Max process bytes size in one go, which takes 1 msec.
+ * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
+ * The duration CPU can use the BSE to 1 msec, then the number of available
+ * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
+ * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
+ */
+#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
+
+/*
+ * The key table length is 64 bytes
+ * (This includes first upto 32 bytes key + 16 bytes original initial vector
+ * and 16 bytes updated initial vector)
+ */
+#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
+
+#define AES_HW_IV_SIZE 16
+#define AES_HW_KEYSCHEDULE_LEN 256
+#define ARB_SEMA_TIMEOUT 500
+
+/*
+ * The memory being used is divides as follows:
+ * 1. Key - 32 bytes
+ * 2. Original IV - 16 bytes
+ * 3. Updated IV - 16 bytes
+ * 4. Key schedule - 256 bytes
+ *
+ * 1+2+3 constitute the hw key table.
+ */
+#define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
+
+#define DEFAULT_RNG_BLK_SZ 16
+
+/* As of now only 5 commands are USED for AES encryption/Decryption */
+#define AES_HW_MAX_ICQ_LENGTH 5
+
+#define ICQBITSHIFT_BLKCNT 0
+
+/* memdma_vd command */
+#define MEMDMA_DIR_DTOVRAM 0
+#define MEMDMA_DIR_VTODRAM 1
+#define MEMDMABITSHIFT_DIR 25
+#define MEMDMABITSHIFT_NUM_WORDS 12
+
+/* Define AES Interactive command Queue commands Bit positions */
+enum {
+ ICQBITSHIFT_KEYTABLEADDR = 0,
+ ICQBITSHIFT_KEYTABLEID = 17,
+ ICQBITSHIFT_VRAMSEL = 23,
+ ICQBITSHIFT_TABLESEL = 24,
+ ICQBITSHIFT_OPCODE = 26,
+};
+
+/* Define Ucq opcodes required for AES operation */
+enum {
+ UCQOPCODE_BLKSTARTENGINE = 0x0E,
+ UCQOPCODE_DMASETUP = 0x10,
+ UCQOPCODE_DMACOMPLETE = 0x11,
+ UCQOPCODE_SETTABLE = 0x15,
+ UCQOPCODE_MEMDMAVD = 0x22,
+};
+
+/* Define Aes command values */
+enum {
+ UCQCMD_VRAM_SEL = 0x1,
+ UCQCMD_CRYPTO_TABLESEL = 0x3,
+ UCQCMD_KEYSCHEDTABLESEL = 0x4,
+ UCQCMD_KEYTABLESEL = 0x8,
+};
+
+#define UCQCMD_KEYTABLEADDRMASK 0x1FFFF
+
+#define AES_NR_KEYSLOTS 8
+#define SSK_SLOT_NUM 4
+
+struct tegra_aes_slot {
+ struct list_head node;
+ int slot_num;
+ bool available;
+};
+
+static struct tegra_aes_slot ssk = {
+ .slot_num = SSK_SLOT_NUM,
+ .available = true,
+};
+
+struct tegra_aes_reqctx {
+ unsigned long mode;
+};
+
+#define TEGRA_AES_QUEUE_LENGTH 50
+
+struct tegra_aes_dev {
+ struct device *dev;
+ unsigned long phys_base;
+ void __iomem *io_base;
+ dma_addr_t ivkey_phys_base;
+ void __iomem *ivkey_base;
+ struct clk *iclk;
+ struct clk *pclk;
+ struct tegra_aes_ctx *ctx;
+ unsigned long flags;
+ struct completion op_complete;
+ u32 *buf_in;
+ dma_addr_t dma_buf_in;
+ u32 *buf_out;
+ dma_addr_t dma_buf_out;
+ u8 *iv;
+ u8 dt[DEFAULT_RNG_BLK_SZ];
+ int ivlen;
+ u64 ctr;
+ int res_id;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct tegra_aes_slot *slots;
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+};
+
+static struct tegra_aes_dev *aes_dev;
+
+struct tegra_aes_ctx {
+ struct tegra_aes_dev *dd;
+ unsigned long flags;
+ struct tegra_aes_slot *slot;
+ int keylen;
+};
+
+static struct tegra_aes_ctx rng_ctx = {
+ .flags = FLAGS_NEW_KEY,
+ .keylen = AES_KEYSIZE_128,
+};
+
+/* keep registered devices data here */
+static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(list_lock);
+static DEFINE_MUTEX(aes_lock);
+
+static void aes_workqueue_handler(struct work_struct *work);
+static DECLARE_WORK(aes_work, aes_workqueue_handler);
+static struct workqueue_struct *aes_wq;
+
+extern unsigned long long tegra_chip_uid(void);
+
+static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
+{
+ return readl(dd->io_base + offset);
+}
+
+static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
+{
+ writel(val, dd->io_base + offset);
+}
+
+static int aes_hw_init(struct tegra_aes_dev *dd)
+{
+ int ret = 0;
+
+ ret = clk_enable(dd->pclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: pclock enable fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clk_enable(dd->iclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: iclock enable fail(%d)\n", __func__, ret);
+ clk_disable(dd->pclk);
+ return ret;
+ }
+
+ ret = clk_set_rate(dd->iclk, 240000000);
+ if (ret) {
+ dev_err(dd->dev, "%s: iclk set_rate fail(%d)\n", __func__, ret);
+ clk_disable(dd->iclk);
+ clk_disable(dd->pclk);
+ return ret;
+ }
+
+ aes_writel(dd, 0x33, INT_ENB);
+ return ret;
+}
+
+static void aes_hw_deinit(struct tegra_aes_dev *dd)
+{
+ clk_disable(dd->iclk);
+ clk_disable(dd->pclk);
+}
+
+static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
+ int nblocks, int mode, bool upd_iv)
+{
+ u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
+ int qlen = 0, i, eng_busy, icq_empty, dma_busy, ret = 0;
+ u32 value;
+
+ cmdq[qlen++] = UCQOPCODE_DMASETUP << ICQBITSHIFT_OPCODE;
+ cmdq[qlen++] = in_addr;
+ cmdq[qlen++] = UCQOPCODE_BLKSTARTENGINE << ICQBITSHIFT_OPCODE |
+ (nblocks-1) << ICQBITSHIFT_BLKCNT;
+ cmdq[qlen++] = UCQOPCODE_DMACOMPLETE << ICQBITSHIFT_OPCODE;
+
+ value = aes_readl(dd, CMDQUE_CONTROL);
+ /* access SDRAM through AHB */
+ value &= ~CMDQ_CTRL_SRC_STM_SEL_FIELD;
+ value &= ~CMDQ_CTRL_DST_STM_SEL_FIELD;
+ value |= (CMDQ_CTRL_SRC_STM_SEL_FIELD | CMDQ_CTRL_DST_STM_SEL_FIELD |
+ CMDQ_CTRL_ICMDQEN_FIELD);
+ aes_writel(dd, value, CMDQUE_CONTROL);
+ dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
+
+ value = 0;
+ value |= CONFIG_ENDIAN_ENB_FIELD;
+ aes_writel(dd, value, CONFIG);
+ dev_dbg(dd->dev, "config=0x%x", value);
+
+ value = aes_readl(dd, SECURE_CONFIG_EXT);
+ value &= ~SECURE_OFFSET_CNT_FIELD;
+ aes_writel(dd, value, SECURE_CONFIG_EXT);
+ dev_dbg(dd->dev, "secure_cfg_xt=0x%x", value);
+
+ if (mode & FLAGS_CBC) {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_VCTRAM_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else if (mode & FLAGS_RNG){
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (1 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT) |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ }
+ dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
+ aes_writel(dd, value, SECURE_INPUT_SELECT);
+
+ aes_writel(dd, out_addr, SECURE_DEST_ADDR);
+ INIT_COMPLETION(dd->op_complete);
+
+ for (i = 0; i < qlen - 1; i++) {
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ dma_busy = value & (0x1<<23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+ aes_writel(dd, cmdq[i], ICMDQUE_WR);
+ }
+
+ ret = wait_for_completion_timeout(&dd->op_complete, msecs_to_jiffies(150));
+ if (ret == 0) {
+ dev_err(dd->dev, "timed out (0x%x)\n",
+ aes_readl(dd, INTR_STATUS));
+ return -ETIMEDOUT;
+ }
+
+ aes_writel(dd, cmdq[qlen - 1], ICMDQUE_WR);
+ return 0;
+}
+
+static void aes_release_key_slot(struct tegra_aes_dev *dd)
+{
+ spin_lock(&list_lock);
+ dd->ctx->slot->available = true;
+ dd->ctx->slot = NULL;
+ spin_unlock(&list_lock);
+}
+
+static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd)
+{
+ struct tegra_aes_slot *slot = NULL;
+ bool found = false;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(slot, &dev_list, node) {
+ dev_dbg(dd->dev, "empty:%d, num:%d\n", slot->available,
+ slot->slot_num);
+ if (slot->available) {
+ slot->available = false;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found ? slot : NULL;
+}
+
+static int aes_set_key(struct tegra_aes_dev *dd)
+{
+ u32 value, cmdq[2];
+ struct tegra_aes_ctx *ctx = dd->ctx;
+ int i, eng_busy, icq_empty, dma_busy;
+ bool use_ssk = false;
+
+ if (!ctx) {
+ dev_err(dd->dev, "%s: context invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ /* use ssk? */
+ if (!dd->ctx->slot) {
+ dev_dbg(dd->dev, "using ssk");
+ dd->ctx->slot = &ssk;
+ use_ssk = true;
+ }
+
+ /* disable key read from hw */
+ value = aes_readl(dd, SECURE_SEC_SEL0+(ctx->slot->slot_num*4));
+ value &= ~SECURE_SEL0_KEYREAD_ENB0_FIELD;
+ aes_writel(dd, value, SECURE_SEC_SEL0+(ctx->slot->slot_num*4));
+
+ /* enable key schedule generation in hardware */
+ value = aes_readl(dd, SECURE_CONFIG_EXT);
+ value &= ~SECURE_KEY_SCH_DIS_FIELD;
+ aes_writel(dd, value, SECURE_CONFIG_EXT);
+
+ /* select the key slot */
+ value = aes_readl(dd, SECURE_CONFIG);
+ value &= ~SECURE_KEY_INDEX_FIELD;
+ value |= (ctx->slot->slot_num << SECURE_KEY_INDEX_SHIFT);
+ aes_writel(dd, value, SECURE_CONFIG);
+
+ if (use_ssk)
+ goto out;
+
+ /* copy the key table from sdram to vram */
+ cmdq[0] = 0;
+ cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
+ (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) |
+ (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32))
+ << MEMDMABITSHIFT_NUM_WORDS;
+ cmdq[1] = (u32)dd->ivkey_phys_base;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq); i++)
+ aes_writel(dd, cmdq[i], ICMDQUE_WR);
+
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ dma_busy = value & (0x1<<23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+
+ /* settable command to get key into internal registers */
+ value = 0;
+ value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+ UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+ UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
+ (UCQCMD_KEYTABLESEL | ctx->slot->slot_num)
+ << ICQBITSHIFT_KEYTABLEID;
+ aes_writel(dd, value, ICMDQUE_WR);
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ } while (eng_busy & (!icq_empty));
+
+out:
+ return 0;
+}
+
+static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct tegra_aes_ctx *ctx;
+ struct tegra_aes_reqctx *rctx;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+ int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
+ int ret = 0, nblocks, total;
+ int count = 0;
+ dma_addr_t addr_in, addr_out;
+ struct scatterlist *in_sg, *out_sg;
+
+ if (!dd)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return -ENODATA;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ dev_dbg(dd->dev, "%s: get new req\n", __func__);
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ /* assign new request to device */
+ dd->req = req;
+ dd->total = req->nbytes;
+ dd->in_offset = 0;
+ dd->in_sg = req->src;
+ dd->out_offset = 0;
+ dd->out_sg = req->dst;
+
+ in_sg = dd->in_sg;
+ out_sg = dd->out_sg;
+
+ if (!in_sg || !out_sg) {
+ mutex_unlock(&aes_lock);
+ return -EINVAL;
+ }
+
+ total = dd->total;
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ dd->iv = (u8 *)req->info;
+ dd->ivlen = AES_BLOCK_SIZE;
+
+ if ((dd->flags & FLAGS_CBC) && dd->iv)
+ dd->flags |= FLAGS_NEW_IV;
+ else
+ dd->flags &= ~FLAGS_NEW_IV;
+
+ ctx->dd = dd;
+ if (dd->ctx != ctx) {
+ /* assign new context to device */
+ dd->ctx = ctx;
+ ctx->flags |= FLAGS_NEW_KEY;
+ }
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ret = aes_hw_init(dd);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ goto fail;
+ }
+
+ aes_set_key(dd);
+
+ /* set iv to the aes hw slot */
+ memset(dd->buf_in, 0 , AES_BLOCK_SIZE);
+ memcpy(dd->buf_in, dd->iv, dd->ivlen);
+
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+
+ while (total) {
+ dev_dbg(dd->dev, "remain: 0x%x\n", total);
+
+ ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ goto out;
+ }
+
+ ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg,
+ 1, DMA_TO_DEVICE);
+ goto out;
+ }
+
+ addr_in = sg_dma_address(in_sg);
+ addr_out = sg_dma_address(out_sg);
+ dd->flags |= FLAGS_FAST;
+ count = min((int)sg_dma_len(in_sg), (int)dma_max);
+ WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
+ nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
+
+ ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
+ dd->flags, true);
+
+ dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+ dd->flags &= ~FLAGS_FAST;
+
+ dev_dbg(dd->dev, "out: copied 0x%x\n", count);
+ total -= count;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ WARN_ON(((total != 0) && (!in_sg || !out_sg)));
+ }
+
+out:
+ aes_hw_deinit(dd);
+
+fail:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+
+ dd->total = total;
+
+ /* release the mutex */
+ mutex_unlock(&aes_lock);
+
+ if (dd->req->base.complete)
+ dd->req->base.complete(&dd->req->base, ret);
+
+ dev_dbg(dd->dev, "%s: exit\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_slot *key_slot;
+
+ if (!ctx || !dd) {
+ dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ (unsigned int)ctx, (unsigned int)dd);
+ return -EINVAL;
+ }
+
+ if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
+ (keylen != AES_KEYSIZE_256)) {
+ dev_err(dd->dev, "unsupported key size\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dd->dev, "keylen: %d\n", keylen);
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+
+ if (ctx->slot)
+ aes_release_key_slot(dd);
+
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ return -ENOMEM;
+ }
+
+ ctx->slot = key_slot;
+ ctx->keylen = keylen;
+ ctx->flags |= FLAGS_NEW_KEY;
+
+ /* copy the key */
+ memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->ivkey_base, key, keylen);
+
+ dev_dbg(dd->dev, "done\n");
+ return 0;
+}
+
+static void aes_workqueue_handler(struct work_struct *work)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ int ret;
+
+ set_bit(FLAGS_BUSY, &dd->flags);
+
+ do {
+ ret = tegra_aes_handle_req(dd);
+ } while (!ret);
+}
+
+static irqreturn_t aes_irq(int irq, void *dev_id)
+{
+ struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
+ u32 value = aes_readl(dd, INTR_STATUS);
+
+ dev_dbg(dd->dev, "irq_stat: 0x%x", value);
+ if (!((value & ENGINE_BUSY_FIELD) & !(value & ICQ_EMPTY_FIELD)))
+ complete(&dd->op_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct tegra_aes_dev *dd = aes_dev;
+ unsigned long flags;
+ int err = 0;
+ int busy;
+
+ dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ !!(mode & FLAGS_ENCRYPT),
+ !!(mode & FLAGS_CBC));
+
+ rctx->mode = mode;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ablkcipher_enqueue_request(&dd->queue, req);
+ busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!busy)
+ queue_work(aes_wq, &aes_work);
+
+ return err;
+}
+
+static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, 0);
+}
+
+static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_CBC);
+}
+
+static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx = &rng_ctx;
+ int ret, i;
+ u8 *dest = rdata, *dt = dd->dt;
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ret = aes_hw_init(dd);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ dlen = ret;
+ goto fail;
+ }
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+ dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
+ memset(dd->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
+
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, dd->flags, true);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ dlen = ret;
+ goto out;
+ }
+ memcpy(dest, dd->buf_out, dlen);
+
+ /* update the DT */
+ for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
+ dt[i] += 1;
+ if (dt[i] != 0)
+ break;
+ }
+
+out:
+ aes_hw_deinit(dd);
+
+fail:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+ mutex_unlock(&aes_lock);
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return dlen;
+}
+
+static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
+ unsigned int slen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx = &rng_ctx;
+ struct tegra_aes_slot *key_slot;
+ struct timespec ts;
+ int ret = 0;
+ u64 nsec, tmp[2];
+ u8 *dt;
+
+ if (!ctx || !dd) {
+ dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ (unsigned int)ctx, (unsigned int)dd);
+ return -EINVAL;
+ }
+
+ if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ dev_err(dd->dev, "seed size invalid");
+ return -ENOMEM;
+ }
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ if (!ctx->slot) {
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ mutex_unlock(&aes_lock);
+ return -ENOMEM;
+ }
+ ctx->slot = key_slot;
+ }
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+ dd->ctr = 0;
+
+ ctx->keylen = AES_KEYSIZE_128;
+ ctx->flags |= FLAGS_NEW_KEY;
+
+ /* copy the key to the key slot */
+ memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
+
+ dd->iv = seed;
+ dd->ivlen = slen;
+
+ dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ret = aes_hw_init(dd);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ goto fail;
+ }
+
+ aes_set_key(dd);
+
+ /* set seed to the aes hw slot */
+ memset(dd->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+
+ if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
+ } else {
+ getnstimeofday(&ts);
+ nsec = timespec_to_ns(&ts);
+ do_div(nsec, 1000);
+ nsec ^= dd->ctr << 56;
+ dd->ctr++;
+ tmp[0] = nsec;
+ tmp[1] = tegra_chip_uid();
+ dt = (u8 *)tmp;
+ }
+ memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
+
+out:
+ aes_hw_deinit(dd);
+
+fail:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+ mutex_unlock(&aes_lock);
+
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
+
+ return 0;
+}
+
+static struct crypto_alg algs[] = {
+ {
+ .cra_name = "disabled_ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_ecb_encrypt,
+ .decrypt = tegra_aes_ecb_decrypt,
+ },
+ }, {
+ .cra_name = "disabled_cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_MIN_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_cbc_encrypt,
+ .decrypt = tegra_aes_cbc_decrypt,
+ }
+ }, {
+ .cra_name = "disabled_ansi_cprng",
+ .cra_driver_name = "rng-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.rng = {
+ .rng_make_random = tegra_aes_get_random,
+ .rng_reset = tegra_aes_rng_reset,
+ .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
+ }
+ }
+};
+
+static int tegra_aes_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd;
+ struct resource *res;
+ int err = -ENOMEM, i = 0, j;
+
+ if (aes_dev)
+ return -EEXIST;
+
+ dd = kzalloc(sizeof(struct tegra_aes_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ return -ENOMEM;;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ dd->slots = kzalloc(sizeof(struct tegra_aes_slot) * AES_NR_KEYSLOTS,
+ GFP_KERNEL);
+ if (dd->slots == NULL) {
+ dev_err(dev, "unable to alloc slot struct.\n");
+ goto out;
+ }
+
+ spin_lock_init(&dd->lock);
+ crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
+
+ /* Get the module base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "invalid resource type: base\n");
+ err = -ENODEV;
+ goto out;
+ }
+ dd->phys_base = res->start;
+
+ dd->io_base = ioremap(dd->phys_base, resource_size(res));
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap phys_base\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->res_id = TEGRA_ARB_AES;
+
+ /* Initialise the master bsev clock */
+ dd->pclk = clk_get(dev, "bsev");
+ if (!dd->pclk) {
+ dev_err(dev, "pclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Initialize the vde clock */
+ dd->iclk = clk_get(dev, "vde");
+ if (!dd->iclk) {
+ dev_err(dev, "iclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * the foll contiguous memory is allocated as follows -
+ * - hardware key table
+ * - key schedule
+ */
+ dd->ivkey_base = dma_alloc_coherent(dev, SZ_512, &dd->ivkey_phys_base,
+ GFP_KERNEL);
+ if (!dd->ivkey_base) {
+ dev_err(dev, "can not allocate iv/key buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->dma_buf_in, GFP_KERNEL);
+ if (!dd->buf_in) {
+ dev_err(dev, "can not allocate dma-in buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->dma_buf_out, GFP_KERNEL);
+ if (!dd->buf_out) {
+ dev_err(dev, "can not allocate dma-out buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ init_completion(&dd->op_complete);
+ aes_wq = alloc_workqueue("aes_wq", WQ_HIGHPRI, 16);
+ if (!aes_wq) {
+ dev_err(dev, "alloc_workqueue failed\n");
+ goto out;
+ }
+
+ /* get the irq */
+ err = request_irq(INT_VDE_BSE_V, aes_irq, IRQF_TRIGGER_HIGH,
+ "tegra-aes", dd);
+ if (err) {
+ dev_err(dev, "request_irq failed\n");
+ goto out;
+ }
+
+ spin_lock_init(&list_lock);
+ spin_lock(&list_lock);
+ for (i = 0; i < AES_NR_KEYSLOTS; i++) {
+ dd->slots[i].available = true;
+ dd->slots[i].slot_num = i;
+ INIT_LIST_HEAD(&dd->slots[i].node);
+ list_add_tail(&dd->slots[i].node, &dev_list);
+ }
+ spin_unlock(&list_lock);
+
+ aes_dev = dd;
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ INIT_LIST_HEAD(&algs[i].cra_list);
+ err = crypto_register_alg(&algs[i]);
+ if (err)
+ goto out;
+ }
+
+ dev_info(dev, "registered");
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&algs[j]);
+ if (dd->ivkey_base)
+ dma_free_coherent(dev, SZ_512, dd->ivkey_base,
+ dd->ivkey_phys_base);
+ if (dd->buf_in)
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_in, dd->dma_buf_in);
+ if (dd->buf_out)
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_out, dd->dma_buf_out);
+ if (dd->io_base)
+ iounmap(dd->io_base);
+ if (dd->iclk)
+ clk_put(dd->iclk);
+ if (dd->pclk)
+ clk_put(dd->pclk);
+ if (aes_wq)
+ destroy_workqueue(aes_wq);
+ free_irq(INT_VDE_BSE_V, dd);
+ spin_lock(&list_lock);
+ list_del(&dev_list);
+ spin_unlock(&list_lock);
+
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+ dev_err(dev, "%s: initialization failed.\n", __func__);
+ return err;
+}
+
+static int __devexit tegra_aes_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
+ int i;
+
+ if (!dd)
+ return -ENODEV;
+
+ cancel_work_sync(&aes_work);
+ destroy_workqueue(aes_wq);
+ free_irq(INT_VDE_BSE_V, dd);
+ spin_lock(&list_lock);
+ list_del(&dev_list);
+ spin_unlock(&list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_alg(&algs[i]);
+
+ dma_free_coherent(dev, SZ_512, dd->ivkey_base,
+ dd->ivkey_phys_base);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_in, dd->dma_buf_in);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_out, dd->dma_buf_out);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ clk_put(dd->pclk);
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver tegra_aes_driver = {
+ .probe = tegra_aes_probe,
+ .remove = __devexit_p(tegra_aes_remove),
+ .driver = {
+ .name = "tegra-aes",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_aes_mod_init(void)
+{
+ mutex_init(&aes_lock);
+ INIT_LIST_HEAD(&dev_list);
+ return platform_driver_register(&tegra_aes_driver);
+}
+
+static void __exit tegra_aes_mod_exit(void)
+{
+ platform_driver_unregister(&tegra_aes_driver);
+}
+
+module_init(tegra_aes_mod_init);
+module_exit(tegra_aes_mod_exit);
+
+MODULE_DESCRIPTION("Tegra AES hw acceleration support.");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPLv2");
--- /dev/null
+/*
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __CRYPTODEV_TEGRA_AES_H
+#define __CRYPTODEV_TEGRA_AES_H
+
+#define ICMDQUE_WR 0x1000
+#define CMDQUE_CONTROL 0x1008
+#define INTR_STATUS 0x1018
+#define INT_ENB 0x1040
+#define CONFIG 0x1044
+#define IRAM_ACCESS_CFG 0x10A0
+#define SECURE_DEST_ADDR 0x1100
+#define SECURE_INPUT_SELECT 0x1104
+#define SECURE_CONFIG 0x1108
+#define SECURE_CONFIG_EXT 0x110C
+#define SECURE_SECURITY 0x1110
+#define SECURE_HASH_RESULT0 0x1120
+#define SECURE_HASH_RESULT1 0x1124
+#define SECURE_HASH_RESULT2 0x1128
+#define SECURE_HASH_RESULT3 0x112C
+#define SECURE_SEC_SEL0 0x1140
+#define SECURE_SEC_SEL1 0x1144
+#define SECURE_SEC_SEL2 0x1148
+#define SECURE_SEC_SEL3 0x114C
+#define SECURE_SEC_SEL4 0x1150
+#define SECURE_SEC_SEL5 0x1154
+#define SECURE_SEC_SEL6 0x1158
+#define SECURE_SEC_SEL7 0x115C
+
+/* interrupt status reg masks and shifts */
+#define DMA_BUSY_SHIFT 9
+#define DMA_BUSY_FIELD (0x1 << DMA_BUSY_SHIFT)
+#define ICQ_EMPTY_SHIFT 3
+#define ICQ_EMPTY_FIELD (0x1 << ICQ_EMPTY_SHIFT)
+#define ENGINE_BUSY_SHIFT 0
+#define ENGINE_BUSY_FIELD (0x1 << ENGINE_BUSY_SHIFT)
+
+/* secure select reg masks and shifts */
+#define SECURE_SEL0_KEYREAD_ENB0_SHIFT 0
+#define SECURE_SEL0_KEYREAD_ENB0_FIELD (0x1 << SECURE_SEL0_KEYREAD_ENB0_SHIFT)
+
+/* secure config ext masks and shifts */
+#define SECURE_KEY_SCH_DIS_SHIFT 15
+#define SECURE_KEY_SCH_DIS_FIELD (0x1 << SECURE_KEY_SCH_DIS_SHIFT)
+
+/* secure config masks and shifts */
+#define SECURE_KEY_INDEX_SHIFT 20
+#define SECURE_KEY_INDEX_FIELD (0x1F << SECURE_KEY_INDEX_SHIFT)
+#define SECURE_BLOCK_CNT_SHIFT 0
+#define SECURE_BLOCK_CNT_FIELD (0xFFFFF << SECURE_BLOCK_CNT_SHIFT)
+
+/* stream interface select masks and shifts */
+#define CMDQ_CTRL_SRC_STM_SEL_SHIFT 4
+#define CMDQ_CTRL_SRC_STM_SEL_FIELD (1 << CMDQ_CTRL_SRC_STM_SEL_SHIFT)
+#define CMDQ_CTRL_DST_STM_SEL_SHIFT 5
+#define CMDQ_CTRL_DST_STM_SEL_FIELD (1 << CMDQ_CTRL_DST_STM_SEL_SHIFT)
+#define CMDQ_CTRL_ICMDQEN_SHIFT 1
+#define CMDQ_CTRL_ICMDQEN_FIELD (1 << CMDQ_CTRL_SRC_STM_SEL_SHIFT)
+#define CMDQ_CTRL_UCMDQEN_SHIFT 0
+#define CMDQ_CTRL_UCMDQEN_FIELD (1 << CMDQ_CTRL_DST_STM_SEL_SHIFT)
+
+/* config regsiter masks and shifts */
+#define CONFIG_ENDIAN_ENB_SHIFT 10
+#define CONFIG_ENDIAN_ENB_FIELD (0x1 << CONFIG_ENDIAN_ENB_SHIFT)
+#define CONFIG_MODE_SEL_SHIFT 0
+#define CONFIG_MODE_SEL_FIELD (0x1F << CONFIG_MODE_SEL_SHIFT)
+
+/* extended config */
+#define SECURE_OFFSET_CNT_SHIFT 24
+#define SECURE_OFFSET_CNT_FIELD (0xFF << SECURE_OFFSET_CNT_SHIFT)
+#define SECURE_KEYSCHED_GEN_SHIFT 15
+#define SECURE_KEYSCHED_GEN_FIELD (1 << SECURE_KEYSCHED_GEN_SHIFT)
+
+/* init vector select */
+#define SECURE_IV_SELECT_SHIFT 10
+#define SECURE_IV_SELECT_FIELD (1 << SECURE_IV_SELECT_SHIFT)
+
+/* secure engine input */
+#define SECURE_INPUT_ALG_SEL_SHIFT 28
+#define SECURE_INPUT_ALG_SEL_FIELD (0xF << SECURE_INPUT_ALG_SEL_SHIFT)
+#define SECURE_INPUT_KEY_LEN_SHIFT 16
+#define SECURE_INPUT_KEY_LEN_FIELD (0xFFF << SECURE_INPUT_KEY_LEN_SHIFT)
+#define SECURE_RNG_ENB_SHIFT 11
+#define SECURE_RNG_ENB_FIELD (0x1 << SECURE_RNG_ENB_SHIFT)
+#define SECURE_CORE_SEL_SHIFT 9
+#define SECURE_CORE_SEL_FIELD (0x1 << SECURE_CORE_SEL_SHIFT)
+#define SECURE_VCTRAM_SEL_SHIFT 7
+#define SECURE_VCTRAM_SEL_FIELD (0x3 << SECURE_VCTRAM_SEL_SHIFT)
+#define SECURE_INPUT_SEL_SHIFT 5
+#define SECURE_INPUT_SEL_FIELD (0x3 << SECURE_INPUT_SEL_SHIFT)
+#define SECURE_XOR_POS_SHIFT 3
+#define SECURE_XOR_POS_FIELD (0x3 << SECURE_XOR_POS_SHIFT)
+#define SECURE_HASH_ENB_SHIFT 2
+#define SECURE_HASH_ENB_FIELD (0x1 << SECURE_HASH_ENB_SHIFT)
+#define SECURE_ON_THE_FLY_SHIFT 0
+#define SECURE_ON_THE_FLY_FIELD (1 << SECURE_ON_THE_FLY_SHIFT)
+
+#endif
static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
- __mv_xor_slot_cleanup(chan);
+ mv_xor_slot_cleanup(chan);
}
static struct mv_xor_desc_slot *
debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
hole_off, hole_valid, intlv_sel);
- if (intlv_en ||
+ if (intlv_en &&
(intlv_sel != ((sys_addr >> 12) & intlv_en)))
return -EINVAL;
return NULL;
}
- /* marking MCI offline */
- mci->op_state = OP_OFFLINE;
-
del_mc_from_global_list(mci);
mutex_unlock(&mem_ctls_mutex);
- /* flush workq processes and remove sysfs */
+ /* flush workq processes */
edac_mc_workq_teardown(mci);
+
+ /* marking MCI offline */
+ mci->op_state = OP_OFFLINE;
+
+ /* remove from sysfs */
edac_remove_sysfs_mci_device(mci);
edac_printk(KERN_INFO, EDAC_MC,
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
- unsigned short vendor, device, flags;
+ unsigned short vendor, device, revision, flags;
} ohci_quirks[] = {
- {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
- QUIRK_RESET_PACKET |
- QUIRK_NO_1394A},
- {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
- {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
- {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
- {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
- {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
- {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
- {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
+ {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER},
+
+ {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
+ QUIRK_BE_HEADERS},
+
+ {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
+ QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
+ QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER},
+
+ {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER},
+
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+
+ {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_RESET_PACKET},
+
+ {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
};
/* This overrides anything that was found in ohci_quirks[]. */
d = &ab->descriptor;
if (d->res_count == 0) {
- size_t size, rest, offset;
+ size_t size, size2, rest, pktsize, size3, offset;
dma_addr_t start_bus;
void *start;
*/
offset = offsetof(struct ar_buffer, data);
- start = buffer = ab;
+ start = ab;
start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+ buffer = ab->data;
ab = ab->next;
d = &ab->descriptor;
- size = buffer + PAGE_SIZE - ctx->pointer;
+ size = start + PAGE_SIZE - ctx->pointer;
+ /* valid buffer data in the next page */
rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+ /* what actually fits in this page */
+ size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
memmove(buffer, ctx->pointer, size);
- memcpy(buffer + size, ab->data, rest);
- ctx->current_buffer = ab;
- ctx->pointer = (void *) ab->data + rest;
- end = buffer + size + rest;
+ memcpy(buffer + size, ab->data, size2);
+
+ while (size > 0) {
+ void *next = handle_ar_packet(ctx, buffer);
+ pktsize = next - buffer;
+ if (pktsize >= size) {
+ /*
+ * We have handled all the data that was
+ * originally in this page, so we can now
+ * continue in the next page.
+ */
+ buffer = next;
+ break;
+ }
+ /* move the next packet to the start of the buffer */
+ memmove(buffer, next, size + size2 - pktsize);
+ size -= pktsize;
+ /* fill up this page again */
+ size3 = min(rest - size2,
+ (size_t)PAGE_SIZE - offset - size - size2);
+ memcpy(buffer + size + size2,
+ (void *) ab->data + size2, size3);
+ size2 += size3;
+ }
- while (buffer < end)
- buffer = handle_ar_packet(ctx, buffer);
+ if (rest > 0) {
+ /* handle the packets that are fully in the next page */
+ buffer = (void *) ab->data +
+ (buffer - (start + offset + size));
+ end = (void *) ab->data + rest;
+
+ while (buffer < end)
+ buffer = handle_ar_packet(ctx, buffer);
- dma_free_coherent(ohci->card.device, PAGE_SIZE,
- start, start_bus);
- ar_context_add_page(ctx);
+ ctx->current_buffer = ab;
+ ctx->pointer = end;
+
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ start, start_bus);
+ ar_context_add_page(ctx);
+ } else {
+ ctx->pointer = start + PAGE_SIZE;
+ }
} else {
buffer = ctx->pointer;
ctx->pointer = end =
}
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
- if (ohci_quirks[i].vendor == dev->vendor &&
- (ohci_quirks[i].device == dev->device ||
- ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
+ if ((ohci_quirks[i].vendor == dev->vendor) &&
+ (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
+ ohci_quirks[i].device == dev->device) &&
+ (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
+ ohci_quirks[i].revision >= dev->revision)) {
ohci->quirks = ohci_quirks[i].flags;
break;
}
* registers, see include/linux/cs5535.h.
*/
+static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
+ unsigned int reg)
+{
+ unsigned long addr = chip->base + 0x80 + reg;
+
+ /*
+ * According to the CS5536 errata (#36), after suspend
+ * a write to the high bank GPIO register will clear all
+ * non-selected bits; the recommended workaround is a
+ * read-modify-write operation.
+ *
+ * Don't apply this errata to the edge status GPIOs, as writing
+ * to their lower bits will clear them.
+ */
+ if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
+ if (val & 0xffff)
+ val |= (inl(addr) & 0xffff); /* ignore the high bits */
+ else
+ val |= (inl(addr) ^ (val >> 16));
+ }
+ outl(val, addr);
+}
+
static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
outl(1 << offset, chip->base + reg);
else
/* high bank register */
- outl(1 << (offset - 16), chip->base + 0x80 + reg);
+ errata_outl(chip, 1 << (offset - 16), reg);
}
void cs5535_gpio_set(unsigned offset, unsigned int reg)
outl(1 << (offset + 16), chip->base + reg);
else
/* high bank register */
- outl(1 << offset, chip->base + 0x80 + reg);
+ errata_outl(chip, 1 << offset, reg);
}
void cs5535_gpio_clear(unsigned offset, unsigned int reg)
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = platform_get_drvdata(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
+#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
/* XXX Prevent module unload due to memory corruption bugs. */
__module_get(THIS_MODULE);
+ ips_ping_for_i915_load();
+
return 0;
out_workqueue_free:
i915_gem_lastclose(dev);
intel_cleanup_overlay(dev);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
}
intel_teardown_mchbar(dev);
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
goto done;
+ if (HAS_PCH_CPT(dev))
+ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+ else
+ hotplug_mask = SDE_HOTPLUG_MASK;
+
ret = IRQ_HANDLED;
if (dev->primary->master) {
drm_handle_vblank(dev, 1);
/* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
+ if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
- u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ u32 hotplug_mask;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ if (HAS_PCH_CPT(dev)) {
+ hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ } else {
+ hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ }
+
dev_priv->pch_irq_mask_reg = ~hotplug_mask;
dev_priv->pch_irq_enable_reg = hotplug_mask;
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
#define FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+
/* CPU: FDI_TX */
#define FDI_TXA_CTL 0x60100
#define FDI_TXB_CTL 0x61100
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, temp);
+ /* Make sure hotplug is enabled */
+ I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
(void)I915_READ(PCH_ADPA);
}
reg = I915_READ(trans_dp_ctl);
reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
reg |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
+ reg |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
+ unsigned retry;
int msg_bytes;
int reply_bytes;
int ret;
break;
}
- for (;;) {
- ret = intel_dp_aux_ch(intel_dp,
- msg, msg_bytes,
- reply, reply_bytes);
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("aux_ch nack\n");
+ DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("aux_ch defer\n");
+ DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
}
static int
extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj);
goto out_free_bo;
}
overlay->flip_addr = overlay->reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
DRM_INFO("initialized overlay support\n");
return;
+out_unpin_bo:
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(reg_bo);
out_free:
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
struct work_struct irq_work;
struct work_struct hpd_work;
+ struct {
+ spinlock_t lock;
+ uint32_t hpd0_bits;
+ uint32_t hpd1_bits;
+ } hpd_state;
+
struct list_head vbl_waiting;
struct {
if (dev_priv->card_type >= NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
+ spin_lock_init(&dev_priv->hpd_state.lock);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
}
struct drm_connector *connector;
const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1 = 0;
+ uint32_t hpd0, hpd1;
- hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ spin_lock_irq(&dev_priv->hpd_state.lock);
+ hpd0 = dev_priv->hpd_state.hpd0_bits;
+ dev_priv->hpd_state.hpd0_bits = 0;
+ hpd1 = dev_priv->hpd_state.hpd1_bits;
+ dev_priv->hpd_state.hpd1_bits = 0;
+ spin_unlock_irq(&dev_priv->hpd_state.lock);
+
+ hpd0 &= nv_rd32(dev, 0xe050);
if (dev_priv->chipset >= 0x90)
- hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+ hpd1 &= nv_rd32(dev, 0xe070);
plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
}
- nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
-
drm_helper_hpd_irq_event(dev);
}
uint32_t delayed = 0;
if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- if (!work_pending(&dev_priv->hpd_work))
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
+ uint32_t hpd0_bits, hpd1_bits = 0;
+
+ hpd0_bits = nv_rd32(dev, 0xe054);
+ nv_wr32(dev, 0xe054, hpd0_bits);
+
+ if (dev_priv->chipset >= 0x90) {
+ hpd1_bits = nv_rd32(dev, 0xe074);
+ nv_wr32(dev, 0xe074, hpd1_bits);
+ }
+
+ spin_lock(&dev_priv->hpd_state.lock);
+ dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
+ dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
+ spin_unlock(&dev_priv->hpd_state.lock);
+
+ queue_work(dev_priv->wq, &dev_priv->hpd_work);
}
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
- atombios_blank_crtc(crtc, ATOM_ENABLE);
+ if (radeon_crtc->enabled)
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
- u32 srbm_reset = 0;
u32 grbm_reset = 0;
dev_info(rdev->dev, "GPU softreset \n");
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
-
- /* reset all the system blocks */
- srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
-
- dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- (void)RREG32(SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
evergreen_mc_resume(rdev, &save);
return 0;
}
{
int r;
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
* posting will perform necessary task to bring back GPU into good
* shape.
r = radeon_atombios_init(rdev);
if (r)
return r;
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
if (!evergreen_card_posted(rdev)) {
if (!rdev->bios) {
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
+ if (rdev->mc.aper_size > config_aper_size)
+ config_aper_size = rdev->mc.aper_size;
+
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
+ if (track->textures[u].lookup_disable)
+ continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
unsigned height_11;
bool use_pitch;
bool enabled;
+ bool lookup_disable;
bool roundup_w;
bool roundup_h;
unsigned compress_format;
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
+ if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ track->textures[i].lookup_disable = true;
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
{
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
- u32 actual_temp = 0;
- if ((temp >> 7) & 1)
- actual_temp = 0;
- else
- actual_temp = (temp >> 1) & 0xff;
-
- return actual_temp * 1000;
+ return temp * 1000;
}
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
u32 tmp;
/* flush hdp cache so updates hit vram */
- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
- if (rdev->flags & RADEON_IS_IGP)
- base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
- tiling_config |= GROUP_SIZE(0);
- rdev->config.r600.tiling_group_size = 256;
+ tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.r600.tiling_group_size = 512;
+ else
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
- * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
- rdev->vram_scratch.ptr) {
+ rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
__func__, __LINE__, pitch);
return -EINVAL;
}
- if (!IS_ALIGNED((height / 8), track->nbanks)) {
+ if (!IS_ALIGNED((height / 8), track->npipes)) {
dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
__func__, __LINE__, height);
return -EINVAL;
__func__, __LINE__, pitch);
return -EINVAL;
}
- if ((height / 8) & (track->nbanks - 1)) {
+ if (!IS_ALIGNED((height / 8), track->npipes)) {
dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
__func__, __LINE__, height);
return -EINVAL;
#define R600_HDP_NONSURFACE_BASE 0x2c04
#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
}
return true;
}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
}
if (clk_mask && data_mask) {
+ /* system specific masks */
i2c.mask_clk_mask = clk_mask;
i2c.mask_data_mask = data_mask;
i2c.a_clk_mask = clk_mask;
i2c.en_data_mask = data_mask;
i2c.y_clk_mask = clk_mask;
i2c.y_data_mask = data_mask;
+ } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+ (ddc_line == RADEON_MDGPIO_MASK)) {
+ /* default gpiopad masks */
+ i2c.mask_clk_mask = (0x20 << 8);
+ i2c.mask_data_mask = 0x80;
+ i2c.a_clk_mask = (0x20 << 8);
+ i2c.a_data_mask = 0x80;
+ i2c.en_clk_mask = (0x20 << 8);
+ i2c.en_data_mask = 0x80;
+ i2c.y_clk_mask = (0x20 << 8);
+ i2c.y_data_mask = 0x80;
} else {
+ /* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- clk, data);
+ (1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
rdev->mode_info.load_detect_property,
1);
}
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
1);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- /* turn on display hw */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
-
radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
return 0;
}
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return 0;
-
+ if (!connector) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ return ATOM_ENCODER_MODE_DVI;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+ }
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
switch (radeon_encoder->encoder_id) {
break;
}
+disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
+ sprintf(i2c->adapter.name, "Radeon aux bus %s", name);
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
+
+retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
bo->gobj = gobj;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
-
-retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
+# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27)
# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
-
- gb_tiling_config |= GROUP_SIZE(0);
- rdev->config.rv770.tiling_group_size = 256;
-
+ gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.rv770.tiling_group_size = 512;
+ else
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
bool first; /* is this the first finger in the frame? */
bool valid; /* valid finger data, or just placeholder? */
bool activity; /* at least one active finger previously? */
- __u16 lastx, lasty; /* latest valid (x, y) in the frame */
+ __u16 lastx, lasty, lastz; /* latest valid (x, y, z) in the frame */
};
static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_DG_TIPPRESSURE:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_PRESSURE);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_PRESSURE,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
return 1;
}
return 0;
if (td->valid) {
/* emit multitouch events */
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x >> 3);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y >> 3);
input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
input_mt_sync(input);
*/
td->lastx = td->x;
td->lasty = td->y;
+ td->lastz = td->z;
}
/*
* the oldest on the panel, the one we want for single touch
*/
if (!td->first && td->activity) {
- input_event(input, EV_ABS, ABS_X, td->lastx);
- input_event(input, EV_ABS, ABS_Y, td->lasty);
+ input_event(input, EV_ABS, ABS_X, td->lastx >> 3);
+ input_event(input, EV_ABS, ABS_Y, td->lasty >> 3);
+ input_event(input, EV_ABS, ABS_PRESSURE, td->lastz);
}
if (!td->valid) {
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct adm1026_data *data = i2c_get_clientdata(client);
- int val, orig_div, new_div, shift;
+ int val, orig_div, new_div;
val = simple_strtol(buf, NULL, 10);
new_div = DIV_TO_REG(val);
- if (new_div == 0) {
- return -EINVAL;
- }
+
mutex_lock(&data->update_lock);
orig_div = data->fan_div[nr];
data->fan_div[nr] = DIV_FROM_REG(new_div);
if (nr < 4) { /* 0 <= nr < 4 */
- shift = 2 * nr;
adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3,
- ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) |
- (new_div << shift)));
+ (DIV_TO_REG(data->fan_div[0]) << 0) |
+ (DIV_TO_REG(data->fan_div[1]) << 2) |
+ (DIV_TO_REG(data->fan_div[2]) << 4) |
+ (DIV_TO_REG(data->fan_div[3]) << 6));
} else { /* 3 < nr < 8 */
- shift = 2 * (nr - 4);
adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7,
- ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) |
- (new_div << shift)));
+ (DIV_TO_REG(data->fan_div[4]) << 0) |
+ (DIV_TO_REG(data->fan_div[5]) << 2) |
+ (DIV_TO_REG(data->fan_div[6]) << 4) |
+ (DIV_TO_REG(data->fan_div[7]) << 6));
}
if (data->fan_div[nr] != orig_div) {
switch (data->type) {
case adm1027:
case adt7463:
+ case adt7468:
case emc6d100:
case emc6d102:
data->freq_map = adm1027_freq_map;
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
if (irq) {
ret = request_irq(irq, i2c_pca_pf_handler,
- IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
+ IRQF_TRIGGER_FALLING, pdev->name, i2c);
if (ret)
goto e_reqirq;
}
--- /dev/null
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+#include <mach/pinmux.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_STATUS 0x01C
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+#define I2C_ERR_UNKNOWN_INTERRUPT 0x04
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+struct tegra_i2c_dev;
+
+struct tegra_i2c_bus {
+ struct tegra_i2c_dev *dev;
+ const struct tegra_pingroup_config *mux;
+ int mux_len;
+ unsigned long bus_clk_rate;
+ struct i2c_adapter adapter;
+};
+
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ struct rt_mutex dev_lock;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ bool irq_disabled;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ int msg_transfer_complete;
+ bool is_suspended;
+ int bus_count;
+ const struct tegra_pingroup_config *last_mux;
+ int last_mux_len;
+ unsigned long last_bus_clk;
+ struct tegra_i2c_bus busses[1];
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/* i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return readl(i2c_dev->base + reg);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_set_clk(struct tegra_i2c_dev *i2c_dev, unsigned int freq)
+{
+ clk_set_rate(i2c_dev->clk, freq * 8);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ int word;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ for (word = 0; word < words_to_transfer; word++) {
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ put_unaligned_le32(val, buf);
+ buf += BYTES_PER_FIFO_WORD;
+ buf_remaining -= BYTES_PER_FIFO_WORD;
+ rx_fifo_avail--;
+ }
+
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ int bytes_to_transfer = buf_remaining;
+ int byte;
+ BUG_ON(bytes_to_transfer > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ for (byte = 0; byte < bytes_to_transfer; byte++) {
+ *buf++ = val & 0xFF;
+ val >>= 8;
+ }
+ buf_remaining -= bytes_to_transfer;
+ rx_fifo_avail--;
+ }
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ int word;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ for (word = 0; word < words_to_transfer; word++) {
+ val = get_unaligned_le32(buf);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf += BYTES_PER_FIFO_WORD;
+ buf_remaining -= BYTES_PER_FIFO_WORD;
+ tx_fifo_avail--;
+ }
+
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ int bytes_to_transfer = buf_remaining;
+ int byte;
+ BUG_ON(bytes_to_transfer > 3);
+ val = 0;
+ for (byte = 0; byte < bytes_to_transfer; byte++)
+ val |= (*buf++) << (byte * 8);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining -= bytes_to_transfer;
+ tx_fifo_avail--;
+ }
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/* One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ tegra_i2c_set_clk(i2c_dev, i2c_dev->last_bus_clk);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+
+ if (i2c_dev->irq_disabled) {
+ i2c_dev->irq_disabled = 0;
+ enable_irq(i2c_dev->irq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
+ i2c_readl(i2c_dev, I2C_STATUS),
+ i2c_readl(i2c_dev, I2C_CNFG));
+ i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
+
+ if (! i2c_dev->irq_disabled) {
+ disable_irq_nosync(i2c_dev->irq);
+ i2c_dev->irq_disabled = 1;
+ }
+
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if (status & I2C_INT_PACKET_XFER_COMPLETE)
+ i2c_dev->msg_transfer_complete = 1;
+
+ if (i2c_dev->msg_transfer_complete && !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_bus *i2c_bus,
+ struct i2c_msg *msg, int stop)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_transfer_complete = 0;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (!stop)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ pr_debug("unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("transfer complete: %d %d %d\n", ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_bus *i2c_bus = i2c_get_adapdata(adap);
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ if (i2c_dev->last_mux != i2c_bus->mux) {
+ tegra_pinmux_set_safe_pinmux_table(i2c_dev->last_mux,
+ i2c_dev->last_mux_len);
+ tegra_pinmux_config_pinmux_table(i2c_bus->mux,
+ i2c_bus->mux_len);
+ i2c_dev->last_mux = i2c_bus->mux;
+ i2c_dev->last_mux_len = i2c_bus->mux_len;
+ }
+
+ if (i2c_dev->last_bus_clk != i2c_bus->bus_clk_rate) {
+ tegra_i2c_set_clk(i2c_dev, i2c_bus->bus_clk_rate);
+ i2c_dev->last_bus_clk = i2c_bus->bus_clk_rate;
+ }
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_bus, &msgs[i], stop);
+ if (ret)
+ goto out;
+ }
+ ret = i;
+
+out:
+ clk_disable(i2c_dev->clk);
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return ret;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ /* FIXME: For now keep it simple and don't support protocol mangling
+ features */
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *plat = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int nbus;
+ int i = 0;
+ int ret = 0;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (plat->bus_count <= 0 || plat->adapter_nr < 0) {
+ dev_err(&pdev->dev, "invalid platform data?\n");
+ return -ENODEV;
+ }
+
+ WARN_ON(plat->bus_count > TEGRA_I2C_MAX_BUS);
+ nbus = min(TEGRA_I2C_MAX_BUS, plat->bus_count);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Can't ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (!clk) {
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (!i2c_clk) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev) +
+ (nbus-1) * sizeof(struct tegra_i2c_bus), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->last_bus_clk = plat->bus_clk_rate[0] ?: 100000;
+ rt_mutex_init(&i2c_dev->dev_lock);
+
+ i2c_dev->is_dvc = plat->is_dvc;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret)
+ goto err_free;
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, IRQF_DISABLED,
+ pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ for (i = 0; i < nbus; i++) {
+ struct tegra_i2c_bus *i2c_bus = &i2c_dev->busses[i];
+
+ i2c_bus->dev = i2c_dev;
+ i2c_bus->mux = plat->bus_mux[i];
+ i2c_bus->mux_len = plat->bus_mux_len[i];
+ i2c_bus->bus_clk_rate = plat->bus_clk_rate[i] ?: 100000;
+
+ i2c_bus->adapter.algo = &tegra_i2c_algo;
+ i2c_set_adapdata(&i2c_bus->adapter, i2c_bus);
+ i2c_bus->adapter.owner = THIS_MODULE;
+ i2c_bus->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_bus->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_bus->adapter.name));
+ i2c_bus->adapter.dev.parent = &pdev->dev;
+ i2c_bus->adapter.nr = plat->adapter_nr + i;
+ ret = i2c_add_numbered_adapter(&i2c_bus->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_del_bus;
+ }
+ i2c_dev->bus_count++;
+ }
+
+ return 0;
+
+err_del_bus:
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
+
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+ i2c_dev->is_suspended = true;
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+/*
+ * Some drivers (hdmi) depend on i2c busses already being present,
+ * so init at subsys time.
+ */
+subsys_initcall(tegra_i2c_init_driver);
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+module_exit(tegra_i2c_exit_driver);
case 0x1C: /* 28 - Atom Processor */
case 0x26: /* 38 - Lincroft Atom Processor */
- lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
cpuidle_state_table = atom_cstates;
break;
#ifdef FUTURE_USE
return ret ? ret : in_len;
}
+static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
+{
+ struct ib_uverbs_wc tmp;
+
+ tmp.wr_id = wc->wr_id;
+ tmp.status = wc->status;
+ tmp.opcode = wc->opcode;
+ tmp.vendor_err = wc->vendor_err;
+ tmp.byte_len = wc->byte_len;
+ tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.qp_num = wc->qp->qp_num;
+ tmp.src_qp = wc->src_qp;
+ tmp.wc_flags = wc->wc_flags;
+ tmp.pkey_index = wc->pkey_index;
+ tmp.slid = wc->slid;
+ tmp.sl = wc->sl;
+ tmp.dlid_path_bits = wc->dlid_path_bits;
+ tmp.port_num = wc->port_num;
+ tmp.reserved = 0;
+
+ if (copy_to_user(dest, &tmp, sizeof tmp))
+ return -EFAULT;
+
+ return 0;
+}
+
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_poll_cq cmd;
- struct ib_uverbs_poll_cq_resp *resp;
+ struct ib_uverbs_poll_cq_resp resp;
+ u8 __user *header_ptr;
+ u8 __user *data_ptr;
struct ib_cq *cq;
- struct ib_wc *wc;
- int ret = 0;
- int i;
- int rsize;
+ struct ib_wc wc;
+ int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
- if (!wc)
- return -ENOMEM;
-
- rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
- resp = kmalloc(rsize, GFP_KERNEL);
- if (!resp) {
- ret = -ENOMEM;
- goto out_wc;
- }
-
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
- if (!cq) {
- ret = -EINVAL;
- goto out;
- }
+ if (!cq)
+ return -EINVAL;
- resp->count = ib_poll_cq(cq, cmd.ne, wc);
+ /* we copy a struct ib_uverbs_poll_cq_resp to user space */
+ header_ptr = (void __user *)(unsigned long) cmd.response;
+ data_ptr = header_ptr + sizeof resp;
- put_cq_read(cq);
+ memset(&resp, 0, sizeof resp);
+ while (resp.count < cmd.ne) {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0)
+ goto out_put;
+ if (!ret)
+ break;
+
+ ret = copy_wc_to_user(data_ptr, &wc);
+ if (ret)
+ goto out_put;
- for (i = 0; i < resp->count; i++) {
- resp->wc[i].wr_id = wc[i].wr_id;
- resp->wc[i].status = wc[i].status;
- resp->wc[i].opcode = wc[i].opcode;
- resp->wc[i].vendor_err = wc[i].vendor_err;
- resp->wc[i].byte_len = wc[i].byte_len;
- resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data;
- resp->wc[i].qp_num = wc[i].qp->qp_num;
- resp->wc[i].src_qp = wc[i].src_qp;
- resp->wc[i].wc_flags = wc[i].wc_flags;
- resp->wc[i].pkey_index = wc[i].pkey_index;
- resp->wc[i].slid = wc[i].slid;
- resp->wc[i].sl = wc[i].sl;
- resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
- resp->wc[i].port_num = wc[i].port_num;
+ data_ptr += sizeof(struct ib_uverbs_wc);
+ ++resp.count;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
+ if (copy_to_user(header_ptr, &resp, sizeof resp)) {
ret = -EFAULT;
+ goto out_put;
+ }
-out:
- kfree(resp);
+ ret = in_len;
-out_wc:
- kfree(wc);
- return ret ? ret : in_len;
+out_put:
+ put_cq_read(cq);
+ return ret;
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
-#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
+#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
+#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
/* synaptics modes query bits */
DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
},
+ {
+ /* Sony Vaio VPCZ122GX */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"),
+ },
+ },
{
/* Sony Vaio FS-115b */
.matches = {
To compile this driver as a module, choose M here: the
module will be called migor_ts.
+config TOUCHSCREEN_PANJIT_I2C
+ tristate "PANJIT I2C touchscreen driver"
+ depends on I2C
+ default n
+ help
+ Say Y here to enable PANJIT I2C capacitive touchscreen support,
+ covering devices such as the MGG1010AI06 and EGG1010AI06
+
+ If unsure, say N
+
+ To compile this driver as a module, choose M here: the module will
+ be called panjit_i2c.
+
config TOUCHSCREEN_SYNAPTICS_I2C_RMI
tristate "Synaptics i2c touchscreen"
depends on I2C
obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o
obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PANJIT_I2C) += panjit_i2c.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
--- /dev/null
+/*
+ * drivers/input/touchscreen/panjit_i2c.c
+ *
+ * Touchscreen class input driver for Panjit touch panel using I2C bus
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define CSR 0x00
+ #define CSR_SCAN_EN (1 << 3)
+ #define CSR_SLEEP_EN (1 << 7)
+#define C_FLAG 0x01
+#define X1_H 0x03
+
+#define DRIVER_NAME "panjit_touch"
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void pj_early_suspend(struct early_suspend *h);
+static void pj_late_resume(struct early_suspend *h);
+#endif
+
+struct pj_data {
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ int gpio_reset;
+ struct early_suspend early_suspend;
+};
+
+struct pj_event {
+ __be16 coord[2][2];
+ __u8 fingers;
+ __u8 gesture;
+};
+
+union pj_buff {
+ struct pj_event data;
+ unsigned char buff[sizeof(struct pj_data)];
+};
+
+static void pj_reset(struct pj_data *touch)
+{
+ if (touch->gpio_reset < 0)
+ return;
+
+ gpio_set_value(touch->gpio_reset, 1);
+ msleep(50);
+ gpio_set_value(touch->gpio_reset, 0);
+ msleep(50);
+}
+
+static irqreturn_t pj_irq(int irq, void *dev_id)
+{
+ struct pj_data *touch = dev_id;
+ struct i2c_client *client = touch->client;
+ union pj_buff event;
+ int ret, i;
+
+ ret = i2c_smbus_read_i2c_block_data(client, X1_H,
+ sizeof(event.buff), event.buff);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d reading event data\n", ret);
+ return IRQ_NONE;
+ }
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d clearing interrupt\n", ret);
+ return IRQ_NONE;
+ }
+
+ input_report_key(touch->input_dev, BTN_TOUCH,
+ (event.data.fingers == 1 || event.data.fingers == 2));
+ input_report_key(touch->input_dev, BTN_2, (event.data.fingers == 2));
+
+ if (!event.data.fingers || (event.data.fingers > 2))
+ goto out;
+
+ for (i = 0; i < event.data.fingers; i++) {
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_X,
+ __be16_to_cpu(event.data.coord[i][0]));
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_Y,
+ __be16_to_cpu(event.data.coord[i][1]));
+ input_report_abs(touch->input_dev, ABS_MT_TRACKING_ID, i + 1);
+ input_mt_sync(touch->input_dev);
+ }
+
+out:
+ input_sync(touch->input_dev);
+ return IRQ_HANDLED;
+}
+
+static int pj_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct panjit_i2c_ts_platform_data *pdata = client->dev.platform_data;
+ struct pj_data *touch = NULL;
+ struct input_dev *input_dev = NULL;
+ int ret = 0;
+
+ touch = kzalloc(sizeof(struct pj_data), GFP_KERNEL);
+ if (!touch) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ touch->gpio_reset = -EINVAL;
+
+ if (pdata) {
+ ret = gpio_request(pdata->gpio_reset, "panjit_reset");
+ if (!ret) {
+ ret = gpio_direction_output(pdata->gpio_reset, 1);
+ if (ret < 0)
+ gpio_free(pdata->gpio_reset);
+ }
+
+ if (!ret)
+ touch->gpio_reset = pdata->gpio_reset;
+ else
+ dev_warn(&client->dev, "unable to configure GPIO\n");
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ kfree(touch);
+ return -ENOMEM;
+ }
+
+ touch->client = client;
+ i2c_set_clientdata(client, touch);
+
+ pj_reset(touch);
+
+ /* clear interrupt */
+ ret = i2c_smbus_write_byte_data(touch->client, C_FLAG, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: clear interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* enable scanning */
+ ret = i2c_smbus_write_byte_data(touch->client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: enable interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ touch->input_dev = input_dev;
+ touch->input_dev->name = DRIVER_NAME;
+
+ set_bit(EV_SYN, touch->input_dev->evbit);
+ set_bit(EV_KEY, touch->input_dev->evbit);
+ set_bit(EV_ABS, touch->input_dev->evbit);
+ set_bit(BTN_TOUCH, touch->input_dev->keybit);
+ set_bit(BTN_2, touch->input_dev->keybit);
+
+ /* expose multi-touch capabilities */
+ set_bit(ABS_MT_POSITION_X, touch->input_dev->keybit);
+ set_bit(ABS_MT_POSITION_Y, touch->input_dev->keybit);
+ set_bit(ABS_X, touch->input_dev->keybit);
+ set_bit(ABS_Y, touch->input_dev->keybit);
+
+ /* all coordinates are reported in 0..4095 */
+ input_set_abs_params(touch->input_dev, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1Y, 0, 4095, 0, 0);
+
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_TRACKING_ID, 0, 2, 1, 0);
+
+ ret = input_register_device(touch->input_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: input_register_device failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* get the irq */
+ ret = request_threaded_irq(touch->client->irq, NULL, pj_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ DRIVER_NAME, touch);
+ if (ret) {
+ dev_err(&client->dev, "%s: request_irq(%d) failed\n",
+ __func__, touch->client->irq);
+ goto fail_irq;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ touch->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ touch->early_suspend.suspend = pj_early_suspend;
+ touch->early_suspend.resume = pj_late_resume;
+ register_early_suspend(&touch->early_suspend);
+#endif
+ dev_info(&client->dev, "%s: initialized\n", __func__);
+ return 0;
+
+fail_irq:
+ input_unregister_device(touch->input_dev);
+
+fail_i2c_or_register:
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+
+ input_free_device(input_dev);
+ kfree(touch);
+ return ret;
+}
+
+static int pj_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ disable_irq(client->irq);
+
+ /* disable scanning and enable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SLEEP_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: sleep enable fail\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pj_resume(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ pj_reset(touch);
+
+ /* enable scanning and disable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (ret >= 0)
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: scan enable fail\n", __func__);
+ return ret;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void pj_early_suspend(struct early_suspend *es)
+{
+ struct pj_data *touch;
+ touch = container_of(es, struct pj_data, early_suspend);
+
+ if (pj_suspend(touch->client, PMSG_SUSPEND) != 0)
+ dev_err(&touch->client->dev, "%s: failed\n", __func__);
+}
+
+static void pj_late_resume(struct early_suspend *es)
+{
+ struct pj_data *touch;
+ touch = container_of(es, struct pj_data, early_suspend);
+
+ if (pj_resume(touch->client) != 0)
+ dev_err(&touch->client->dev, "%s: failed\n", __func__);
+}
+#endif
+
+static int pj_remove(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+
+ if (!touch)
+ return -EINVAL;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&touch->early_suspend);
+#endif
+ free_irq(touch->client->irq, touch);
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+ input_unregister_device(touch->input_dev);
+ input_free_device(touch->input_dev);
+ kfree(touch);
+ return 0;
+}
+
+static const struct i2c_device_id panjit_ts_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver panjit_driver = {
+ .probe = pj_probe,
+ .remove = pj_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = pj_suspend,
+ .resume = pj_resume,
+#endif
+ .id_table = panjit_ts_id,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __devinit panjit_init(void)
+{
+ int e;
+
+ e = i2c_add_driver(&panjit_driver);
+ if (e != 0) {
+ pr_err("%s: failed to register with I2C bus with "
+ "error: 0x%x\n", __func__, e);
+ }
+ return e;
+}
+
+static void __exit panjit_exit(void)
+{
+ i2c_del_driver(&panjit_driver);
+}
+
+module_init(panjit_init);
+module_exit(panjit_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Panjit I2C touch driver");
return;
}
- if (ucs->retry_cmd_in++ < BAS_RETRY) {
- dev_notice(cs->dev, "control read: timeout, retry %d\n",
- ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
- return;
- } else {
+ if (ucs->retry_cmd_in++ >= BAS_RETRY) {
dev_err(cs->dev,
"control read: timeout, giving up after %d tries\n",
ucs->retry_cmd_in);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ error_reset(cs);
+ return;
+ }
+
+ gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
+ __func__, ucs->retry_cmd_in);
+ rc = atread_submit(cs, BAS_TIMEOUT);
+ if (rc < 0) {
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ if (rc != -ENODEV)
+ error_reset(cs);
}
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- error_reset(cs);
}
/* read_ctrl_callback
struct cardstate *cs = inbuf->cs;
struct bas_cardstate *ucs = cs->hw.bas;
int status = urb->status;
- int have_data = 0;
unsigned numbytes;
int rc;
update_basstate(ucs, 0, BS_ATRDPEND);
wake_up(&ucs->waitqueue);
-
- if (!ucs->rcvbuf_size) {
- dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
- return;
- }
-
del_timer(&ucs->timer_cmd_in);
switch (status) {
numbytes = ucs->rcvbuf_size;
}
- /* copy received bytes to inbuf */
- have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
-
- if (unlikely(numbytes < ucs->rcvbuf_size)) {
- /* incomplete - resubmit for remaining bytes */
- ucs->rcvbuf_size -= numbytes;
- ucs->retry_cmd_in = 0;
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
- return;
- error_reset(cs);
+ /* copy received bytes to inbuf, notify event layer */
+ if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
+ gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
}
break;
case -EINPROGRESS: /* pending */
case -ENODEV: /* device removed */
case -ESHUTDOWN: /* device shut down */
- /* no action necessary */
+ /* no further action necessary */
gig_dbg(DEBUG_USBREQ, "%s: %s",
__func__, get_usb_statmsg(status));
break;
- default: /* severe trouble */
- dev_warn(cs->dev, "control read: %s\n",
- get_usb_statmsg(status));
+ default: /* other errors: retry */
if (ucs->retry_cmd_in++ < BAS_RETRY) {
- dev_notice(cs->dev, "control read: retry %d\n",
- ucs->retry_cmd_in);
+ gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
+ get_usb_statmsg(status), ucs->retry_cmd_in);
rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0 || rc == -ENODEV)
- /* resubmitted or disconnected */
- /* - bypass regular exit block */
+ if (rc >= 0)
+ /* successfully resubmitted, skip freeing */
return;
- } else {
- dev_err(cs->dev,
- "control read: giving up after %d tries\n",
- ucs->retry_cmd_in);
+ if (rc == -ENODEV)
+ /* disconnect, no further action necessary */
+ break;
}
+ dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
+ get_usb_statmsg(status), ucs->retry_cmd_in);
error_reset(cs);
}
+ /* read finished, free buffer */
kfree(ucs->rcvbuf);
ucs->rcvbuf = NULL;
ucs->rcvbuf_size = 0;
- if (have_data) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- }
}
/* atread_submit
ret = starturbs(bcs);
if (ret < 0) {
+ spin_unlock_irqrestore(&cs->lock, flags);
dev_err(cs->dev,
"could not start isochronous I/O for channel B%d: %s\n",
bcs->channel + 1,
ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
if (ret != -ENODEV)
error_hangup(bcs);
- spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
dev_err(cs->dev, "could not open channel B%d\n",
bcs->channel + 1);
stopurbs(bcs->hw.bas);
- if (ret != -ENODEV)
- error_hangup(bcs);
}
spin_unlock_irqrestore(&cs->lock, flags);
+ if (ret < 0 && ret != -ENODEV)
+ error_hangup(bcs);
return ret;
}
if (unlikely(bcs->ignore)) {
bcs->ignore--;
- hdlc_flush(bcs);
return;
}
skb = bcs->rx_skb;
- if (skb == NULL)
+ if (skb == NULL) {
skb = gigaset_new_rx_skb(bcs);
- bcs->hw.bas->goodbytes += skb->len;
+ if (skb == NULL)
+ return;
+ }
dobytes = bcs->rx_bufsize - skb->len;
while (count > 0) {
dst = skb_put(skb, count < dobytes ? count : dobytes);
if (dobytes == 0) {
dump_bytes(DEBUG_STREAM_DUMP,
"rcv data", skb->data, skb->len);
+ bcs->hw.bas->goodbytes += skb->len;
gigaset_skb_rcvd(bcs, skb);
skb = gigaset_new_rx_skb(bcs);
if (skb == NULL)
DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
}
},
+ {}
};
/*
*/
q->limits = *limits;
- if (limits->no_cluster)
- queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
-
if (!dm_table_supports_discards(t))
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
else
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
- return num_sectors / 2; /* kB for sysfs */
+ return num_sectors;
}
static struct super_type super_types[] = {
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
return;
}
goto abort;
mddev->queue->queuedata = mddev;
- /* Can be unlocked because the queue is new: no concurrency */
- queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
-
blk_queue_make_request(mddev->queue, md_make_request);
disk = alloc_disk(1 << shift);
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
- /* set save_raid_disk if appropriate */
+ /* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks)
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
- rdev->saved_raid_disk = rdev->raid_disk;
+ if (test_bit(In_sync, &rdev->flags))
+ rdev->saved_raid_disk = rdev->raid_disk;
+ else
+ rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
|| kthread_should_stop(),
thread->timeout);
- clear_bit(THREAD_WAKEUP, &thread->flags);
-
- thread->run(thread->mddev);
+ if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
+ thread->run(thread->mddev);
}
return 0;
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
+ !mddev->recovery_disabled &&
mddev->degraded < conf->raid_disks) {
err = -EBUSY;
goto abort;
return 0;
out_free_conf:
+ md_unregister_thread(mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
- md_unregister_thread(mddev->thread);
out:
return -EIO;
}
static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
{
struct saa7146_vv *vv = dev->vv_data;
- struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat);
+ struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
int b_depth = vv->ov_fmt->depth;
int b_bpl = vv->ov_fb.fmt.bytesperline;
struct saa7146_vv *vv = dev->vv_data;
struct saa7146_video_dma vdma1;
- struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
+ struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
int width = buf->fmt->width;
int height = buf->fmt->height;
struct saa7146_video_dma vdma2;
struct saa7146_video_dma vdma3;
- struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
+ struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
int width = buf->fmt->width;
int height = buf->fmt->height;
void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
{
- struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
+ struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
struct saa7146_vv *vv = dev->vv_data;
u32 vdma1_prot_addr;
static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format);
-struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc)
+struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc)
{
int i, j = NUM_FORMATS;
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
struct scatterlist *list = dma->sglist;
int length = dma->sglen;
- struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
+ struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));
}
}
- fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
return -EBUSY;
}
- fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
return -EPERM;
/* check args */
- fmt = format_by_fourcc(dev, fb->fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat);
if (NULL == fmt)
return -EINVAL;
DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
- fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
buf->fmt = &fh->video_fmt;
buf->vb.field = fh->video_fmt.field;
- sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
+ sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
release_all_pagetables(dev, buf);
if( 0 != IS_PLANAR(sfmt->trans)) {
fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24;
fh->video_fmt.bytesperline = 0;
fh->video_fmt.field = V4L2_FIELD_ANY;
- sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8;
videobuf_queue_sg_init(&fh->video_q, &video_qops,
source "drivers/media/video/davinci/Kconfig"
source "drivers/media/video/omap/Kconfig"
+source "drivers/media/video/tegra/Kconfig"
source "drivers/media/video/bt8xx/Kconfig"
obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
case 0x0e:
/* CX23887-15Z */
dev->hwrevision = 0xc0;
+ break;
case 0x0f:
/* CX23887-14Z */
dev->hwrevision = 0xb1;
: USB_ENDPOINT_XFER_ISOC;
i = gspca_dev->alt; /* previous alt setting */
if (gspca_dev->cam.reverse_alts) {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i < gspca_dev->nbalt - 2)
i++;
while (++i < gspca_dev->nbalt) {
ep = alt_xfer(&intf->altsetting[i], xfer);
break;
}
} else {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i > 1)
i--;
while (--i >= 0) {
ep = alt_xfer(&intf->altsetting[i], xfer);
u8 jpegqual; /* webcam quality */
u8 reg18;
+ u8 flags;
s8 ag_cnt;
#define AG_CNT_START 13
SENSOR_SP80708,
};
+/* device flags */
+#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */
+
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
struct cam *cam;
sd->bridge = id->driver_info >> 16;
- sd->sensor = id->driver_info;
+ sd->sensor = id->driver_info >> 8;
+ sd->flags = id->driver_info;
cam = &gspca_dev->cam;
if (sd->sensor == SENSOR_ADCM1700) {
reg1 = 0x44;
reg17 = 0xa2;
break;
- default:
-/* case SENSOR_SP80708: */
+ case SENSOR_SP80708:
init = sp80708_sensor_param1;
if (mode) {
/*?? reg1 = 0x04; * 320 clk 48Mhz */
/* -- module initialisation -- */
#define BS(bridge, sensor) \
.driver_info = (BRIDGE_ ## bridge << 16) \
- | SENSOR_ ## sensor
+ | (SENSOR_ ## sensor << 8)
+#define BSF(bridge, sensor, flags) \
+ .driver_info = (BRIDGE_ ## bridge << 16) \
+ | (SENSOR_ ## sensor << 8) \
+ | (flags)
static const __devinitdata struct usb_device_id device_table[] = {
#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
#endif
- {USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)},
- {USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)},
+ {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
+ {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
mem, dev->bulk_in_size,
hdpvr_read_bulk_callback, buf);
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
buf->status = BUFSTAT_AVAILABLE;
list_add_tail(&buf->buff_list, &dev->free_buff_list);
}
void msp_update_volume(struct msp_state *state)
{
- v4l2_ctrl_s_ctrl(state->volume, v4l2_ctrl_g_ctrl(state->volume));
+ /* Force an update of the volume/mute cluster */
+ v4l2_ctrl_lock(state->volume);
+ state->volume->val = state->volume->cur.val;
+ state->muted->val = state->muted->cur.val;
+ msp_s_ctrl(state->volume);
+ v4l2_ctrl_unlock(state->volume);
}
/* --- v4l2 ioctls --- */
if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
- if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
- csicr1 |= CSICR1_INV_PCLK;
if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)
.subvendor = 0x13c2,
.subdevice = 0x2804,
.driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7190,
+ .driver_data = SAA7134_BOARD_BEHOLD_H7,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7090,
+ .driver_data = SAA7134_BOARD_BEHOLD_A7,
}, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SAA7134_BOARD_UNKNOWN,
- }, {
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
- .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
- .subdevice = 0x7190,
- .driver_data = SAA7134_BOARD_BEHOLD_H7,
- }, {
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
- .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
- .subdevice = 0x7090,
- .driver_data = SAA7134_BOARD_BEHOLD_A7,
},{
/* --- end of list --- */
}
--- /dev/null
+source "drivers/media/video/tegra/avp/Kconfig"
+
+config TEGRA_CAMERA
+ bool "Enable support for tegra camera/isp hardware"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the Tegra camera interface
+
+ If unsure, say Y
--- /dev/null
+obj-y += avp/
+obj-$(CONFIG_TEGRA_CAMERA) += tegra_camera.o
--- /dev/null
+config TEGRA_RPC
+ bool "Enable support for Tegra RPC"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the RPC mechanism necessary for the Tegra
+ multimedia framework. It is both used to communicate locally on the
+ CPU between multiple multimedia components as well as to communicate
+ with the AVP for offloading media decode.
+
+ Exports the local tegra RPC interface on device node
+ /dev/tegra_rpc. Also provides tegra fd based semaphores needed by
+ the tegra multimedia framework.
+
+ If unsure, say Y
+
+config TEGRA_AVP
+ bool "Enable support for the AVP multimedia offload engine"
+ depends on ARCH_TEGRA && TEGRA_RPC
+ default y
+ help
+ Enables support for the multimedia offload engine used by Tegra
+ multimedia framework.
+
+ If unsure, say Y
--- /dev/null
+obj-$(CONFIG_TEGRA_RPC) += tegra_rpc.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_local.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_sema.o
+obj-$(CONFIG_TEGRA_AVP) += avp.o
+obj-$(CONFIG_TEGRA_AVP) += avp_svc.o
+obj-$(CONFIG_TEGRA_AVP) += headavp.o
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "headavp.h"
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ AVP_DBG_TRACE_XPC = 1U << 0,
+ AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
+ AVP_DBG_TRACE_XPC_MSG = 1U << 2,
+ AVP_DBG_TRACE_XPC_CONN = 1U << 3,
+ AVP_DBG_TRACE_TRPC_MSG = 1U << 4,
+ AVP_DBG_TRACE_TRPC_CONN = 1U << 5,
+ AVP_DBG_TRACE_LIB = 1U << 6,
+};
+
+static u32 avp_debug_mask = 0;
+module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
+
+#define TEGRA_AVP_NAME "tegra-avp"
+
+#define TEGRA_AVP_KERNEL_FW "nvrm_avp.bin"
+
+#define TEGRA_AVP_RESET_VECTOR_ADDR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE)
+
+#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP (0x2 << 29)
+#define FLOW_MODE_NONE 0x0
+
+#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+/* Layout of the mailbox registers:
+ * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
+ * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
+ * bit 29 - message valid. peer clears this bit after reading msg
+ * bits 27:0 - message data
+ */
+#define MBOX_MSG_PENDING_INT_EN (1 << 31)
+#define MBOX_MSG_READ_INT_EN (1 << 30)
+#define MBOX_MSG_VALID (1 << 29)
+
+#define AVP_MSG_MAX_CMD_LEN 16
+#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
+
+struct avp_info {
+ struct clk *cop_clk;
+
+ int mbox_from_avp_pend_irq;
+
+ dma_addr_t msg_area_addr;
+ u32 msg;
+ void *msg_to_avp;
+ void *msg_from_avp;
+ struct mutex to_avp_lock;
+ struct mutex from_avp_lock;
+
+ struct work_struct recv_work;
+ struct workqueue_struct *recv_wq;
+
+ struct trpc_node *rpc_node;
+ struct miscdevice misc_dev;
+ bool opened;
+ struct mutex open_lock;
+
+ spinlock_t state_lock;
+ bool initialized;
+ bool shutdown;
+ bool suspending;
+ bool defer_remote;
+
+ struct mutex libs_lock;
+ struct list_head libs;
+ struct nvmap_client *nvmap_libs;
+
+ /* client for driver allocations, persistent */
+ struct nvmap_client *nvmap_drv;
+ struct nvmap_handle_ref *kernel_handle;
+ void *kernel_data;
+ unsigned long kernel_phys;
+
+ struct nvmap_handle_ref *iram_backup_handle;
+ void *iram_backup_data;
+ unsigned long iram_backup_phys;
+ unsigned long resume_addr;
+
+ struct trpc_endpoint *avp_ep;
+ struct rb_root endpoints;
+
+ struct avp_svc_info *avp_svc;
+};
+
+struct remote_info {
+ u32 loc_id;
+ u32 rem_id;
+ struct kref ref;
+
+ struct trpc_endpoint *trpc_ep;
+ struct rb_node rb_node;
+};
+
+struct lib_item {
+ struct list_head list;
+ u32 handle;
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+};
+
+static struct avp_info *tegra_avp;
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
+static void avp_trpc_close(struct trpc_endpoint *ep);
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep);
+static void libs_cleanup(struct avp_info *avp);
+
+static struct trpc_ep_ops remote_ep_ops = {
+ .send = avp_trpc_send,
+ .close = avp_trpc_close,
+ .show = avp_trpc_show,
+};
+
+static struct remote_info *rinfo_alloc(struct avp_info *avp)
+{
+ struct remote_info *rinfo;
+
+ rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
+ if (!rinfo)
+ return NULL;
+ kref_init(&rinfo->ref);
+ return rinfo;
+}
+
+static void _rinfo_release(struct kref *ref)
+{
+ struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
+ kfree(rinfo);
+}
+
+static inline void rinfo_get(struct remote_info *rinfo)
+{
+ kref_get(&rinfo->ref);
+}
+
+static inline void rinfo_put(struct remote_info *rinfo)
+{
+ kref_put(&rinfo->ref, _rinfo_release);
+}
+
+static int remote_insert(struct avp_info *avp, struct remote_info *rinfo)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct remote_info *tmp;
+
+ p = &avp->endpoints.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct remote_info, rb_node);
+
+ if (rinfo->loc_id < tmp->loc_id)
+ p = &(*p)->rb_left;
+ else if (rinfo->loc_id > tmp->loc_id)
+ p = &(*p)->rb_right;
+ else {
+ pr_info("%s: avp endpoint id=%x (%s) already exists\n",
+ __func__, rinfo->loc_id,
+ trpc_name(rinfo->trpc_ep));
+ return -EEXIST;
+ }
+ }
+ rb_link_node(&rinfo->rb_node, parent, p);
+ rb_insert_color(&rinfo->rb_node, &avp->endpoints);
+ rinfo_get(rinfo);
+ return 0;
+}
+
+static struct remote_info *remote_find(struct avp_info *avp, u32 local_id)
+{
+ struct rb_node *n = avp->endpoints.rb_node;
+ struct remote_info *rinfo;
+
+ while (n) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+
+ if (local_id < rinfo->loc_id)
+ n = n->rb_left;
+ else if (local_id > rinfo->loc_id)
+ n = n->rb_right;
+ else
+ return rinfo;
+ }
+ return NULL;
+}
+
+static void remote_remove(struct avp_info *avp, struct remote_info *rinfo)
+{
+ rb_erase(&rinfo->rb_node, &avp->endpoints);
+ rinfo_put(rinfo);
+}
+
+/* test whether or not the trpc endpoint provided is a valid AVP node
+ * endpoint */
+static struct remote_info *validate_trpc_ep(struct avp_info *avp,
+ struct trpc_endpoint *ep)
+{
+ struct remote_info *tmp = trpc_priv(ep);
+ struct remote_info *rinfo;
+
+ if (!tmp)
+ return NULL;
+ rinfo = remote_find(avp, tmp->loc_id);
+ if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
+ return rinfo;
+ return NULL;
+}
+
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ seq_printf(s, " <unknown>\n");
+ goto out;
+ }
+ seq_printf(s, " loc_id:0x%x\n rem_id:0x%x\n",
+ rinfo->loc_id, rinfo->rem_id);
+out:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+static inline void mbox_writel(u32 val, void __iomem *mbox)
+{
+ writel(val, mbox);
+}
+
+static inline u32 mbox_readl(void __iomem *mbox)
+{
+ return readl(mbox);
+}
+
+static inline void msg_ack_remote(struct avp_info *avp, u32 cmd, u32 arg)
+{
+ struct msg_ack *ack = avp->msg_from_avp;
+
+ /* must make sure the arg is there first */
+ ack->arg = arg;
+ wmb();
+ ack->cmd = cmd;
+ wmb();
+}
+
+static inline u32 msg_recv_get_cmd(struct avp_info *avp)
+{
+ volatile u32 *cmd = avp->msg_from_avp;
+ rmb();
+ return *cmd;
+}
+
+static inline int __msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ memcpy(avp->msg_to_avp, hdr, hdr_len);
+ if (buf && len)
+ memcpy(avp->msg_to_avp + hdr_len, buf, len);
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+ return 0;
+}
+
+static inline int msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ;
+
+ /* the other side ack's the message by clearing the first word,
+ * wait for it to do so */
+ rmb();
+ while (*rem_ack != 0 && time_before(jiffies, endtime)) {
+ usleep_range(100, 2000);
+ rmb();
+ }
+ if (*rem_ack != 0)
+ return -ETIMEDOUT;
+ __msg_write(avp, hdr, hdr_len, buf, len);
+ return 0;
+}
+
+static inline int msg_check_ack(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ struct msg_ack ack;
+
+ rmb();
+ memcpy(&ack, avp->msg_to_avp, sizeof(ack));
+ if (ack.cmd != cmd)
+ return -ENOENT;
+ if (arg)
+ *arg = ack.arg;
+ return 0;
+}
+
+/* XXX: add timeout */
+static int msg_wait_ack_locked(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ / 5;
+ int ret;
+
+ do {
+ ret = msg_check_ack(avp, cmd, arg);
+ usleep_range(1000, 5000);
+ } while (ret && time_before(jiffies, endtime));
+
+ /* if we timed out, try one more time */
+ if (ret)
+ ret = msg_check_ack(avp, cmd, arg);
+
+ /* clear out the ack */
+ *rem_ack = 0;
+ wmb();
+ return ret;
+}
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ struct msg_port_data msg;
+ int ret;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
+ __func__, ep, trpc_priv(ep), buf, len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
+ ret = -EBUSY;
+ goto err_state_locked;
+ } else if (avp->shutdown) {
+ ret = -ENODEV;
+ goto err_state_locked;
+ }
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ ret = -ENOTTY;
+ goto err_state_locked;
+ }
+ rinfo_get(rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ msg.cmd = CMD_MESSAGE;
+ msg.port_id = rinfo->rem_id;
+ msg.msg_len = len;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), buf, len);
+ mutex_unlock(&avp->to_avp_lock);
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
+ __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
+ rinfo_put(rinfo);
+ return ret;
+
+err_state_locked:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return ret;
+}
+
+static int _send_disconnect(struct avp_info *avp, u32 port_id)
+{
+ struct msg_disconnect msg;
+ int ret;
+
+ msg.cmd = CMD_DISCONNECT;
+ msg.port_id = port_id;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%x)\n", __func__,
+ port_id);
+ goto err_msg_write;
+ }
+
+ ret = msg_wait_ack_locked(avp, CMD_ACK, NULL);
+ if (ret) {
+ pr_err("%s: remote end won't respond for %x\n", __func__,
+ port_id);
+ goto err_wait_ack;
+ }
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for %x\n",
+ __func__, port_id);
+
+err_wait_ack:
+err_msg_write:
+ mutex_unlock(&avp->to_avp_lock);
+ return ret;
+}
+
+/* Note: Assumes that the rinfo was previously successfully added to the
+ * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
+ * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
+ * we must drop that reference here.
+ * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
+ *
+ * The try_connect function does not use this on error because it needs to
+ * split the close of trpc_ep port and the put.
+ */
+static inline void remote_close(struct remote_info *rinfo)
+{
+ trpc_close(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+}
+
+static void avp_trpc_close(struct trpc_endpoint *ep)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
+ __func__, trpc_name(ep), ep);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
+ trpc_name(ep), rinfo->rem_id);
+
+ ret = _send_disconnect(avp, rinfo->rem_id);
+ if (ret)
+ pr_err("%s: error while closing remote port '%s' (%x)\n",
+ __func__, trpc_name(ep), rinfo->rem_id);
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+}
+
+/* takes and holds avp->from_avp_lock */
+static void recv_msg_lock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ mutex_lock(&avp->from_avp_lock);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+/* MUST be called with avp->from_avp_lock held */
+static void recv_msg_unlock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = false;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static int avp_node_try_connect(struct trpc_node *node,
+ struct trpc_node *src_node,
+ struct trpc_endpoint *from)
+{
+ struct avp_info *avp = tegra_avp;
+ const char *port_name = trpc_name(from);
+ struct remote_info *rinfo;
+ struct msg_connect msg;
+ int ret;
+ unsigned long flags;
+ int len;
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
+ port_name);
+
+ if (node != avp->rpc_node || node->priv != avp)
+ return -ENODEV;
+
+ len = strlen(port_name);
+ if (len > XPC_PORT_NAME_LEN) {
+ pr_err("%s: port name (%s) to long\n", __func__, port_name);
+ return -EINVAL;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->suspending) {
+ ret = -EBUSY;
+ } else if (likely(src_node != avp->rpc_node)) {
+ /* only check for initialized when the source is not ourselves
+ * since we'll end up calling into here during initialization */
+ if (!avp->initialized)
+ ret = -ENODEV;
+ } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
+ /* we only allow connections to ourselves for the cpu-to-avp
+ port */
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ return ret;
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_rinfo;
+ }
+ rinfo->loc_id = (u32)rinfo;
+
+ msg.cmd = CMD_CONNECT;
+ msg.port_id = rinfo->loc_id;
+ memcpy(msg.name, port_name, len);
+ memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
+
+ /* when trying to connect to remote, we need to block remote
+ * messages until we get our ack and can insert it into our lists.
+ * Otherwise, we can get a message from the other side for a port
+ * that we haven't finished setting up.
+ *
+ * 'defer_remote' will force the irq handler to not process messages
+ * at irq context but to schedule work to do so. The work function will
+ * take the from_avp_lock and everything should stay consistent.
+ */
+ recv_msg_lock(avp);
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%s)\n", __func__,
+ port_name);
+ mutex_unlock(&avp->to_avp_lock);
+ goto err_msg_write;
+ }
+ ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
+ mutex_unlock(&avp->to_avp_lock);
+
+ if (ret) {
+ pr_err("%s: remote end won't respond for '%s'\n", __func__,
+ port_name);
+ goto err_wait_ack;
+ }
+ if (!rinfo->rem_id) {
+ pr_err("%s: can't connect to '%s'\n", __func__, port_name);
+ ret = -ECONNREFUSED;
+ goto err_nack;
+ }
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
+ __func__, port_name, rinfo->loc_id, rinfo->rem_id);
+
+ rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
+ rinfo);
+ if (!rinfo->trpc_ep) {
+ pr_err("%s: cannot create peer for %s\n", __func__, port_name);
+ ret = -EINVAL;
+ goto err_create_peer;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ recv_msg_unlock(avp);
+ return 0;
+
+err_ep_insert:
+ trpc_close(rinfo->trpc_ep);
+err_create_peer:
+ _send_disconnect(avp, rinfo->rem_id);
+err_nack:
+err_wait_ack:
+err_msg_write:
+ recv_msg_unlock(avp);
+ rinfo_put(rinfo);
+err_alloc_rinfo:
+ return ret;
+}
+
+static void process_disconnect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
+ unsigned long flags;
+ struct remote_info *rinfo;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
+ disconn_msg->port_id);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
+ sizeof(struct msg_disconnect));
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, disconn_msg->port_id);
+ if (!rinfo) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ pr_warning("%s: got disconnect for unknown port %x\n",
+ __func__, disconn_msg->port_id);
+ goto ack;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+}
+
+static void process_connect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
+ struct trpc_endpoint *trpc_ep;
+ struct remote_info *rinfo;
+ char name[XPC_PORT_NAME_LEN + 1];
+ int ret;
+ u32 local_port_id = 0;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
+ conn_msg->port_id);
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ conn_msg, sizeof(struct msg_connect));
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto ack;
+ }
+ rinfo->loc_id = (u32)rinfo;
+ rinfo->rem_id = conn_msg->port_id;
+
+ memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
+ name[XPC_PORT_NAME_LEN] = '\0';
+ trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
+ rinfo, 0);
+ if (IS_ERR(trpc_ep)) {
+ pr_err("%s: remote requested unknown port '%s' (%d)\n",
+ __func__, name, (int)PTR_ERR(trpc_ep));
+ goto nack;
+ }
+ rinfo->trpc_ep = trpc_ep;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ local_port_id = rinfo->loc_id;
+ goto ack;
+
+err_ep_insert:
+ trpc_close(trpc_ep);
+nack:
+ rinfo_put(rinfo);
+ local_port_id = 0;
+ack:
+ msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
+}
+
+static int process_message(struct avp_info *avp, struct msg_data *raw_msg,
+ gfp_t gfp_flags)
+{
+ struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int len;
+ int ret;
+
+ len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
+ pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
+ port_msg->cmd, port_msg->port_id, port_msg->msg_len);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
+ sizeof(struct msg_port_data) + len);
+ }
+
+ if (len != port_msg->msg_len)
+ pr_err("%s: message sent is too long (%d bytes)\n", __func__,
+ port_msg->msg_len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, port_msg->port_id);
+ if (rinfo) {
+ rinfo_get(rinfo);
+ trpc_get(rinfo->trpc_ep);
+ } else {
+ pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ ret = -ENOENT;
+ goto ack;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
+ len, gfp_flags);
+ if (ret == -ENOMEM) {
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ goto no_ack;
+ } else if (ret) {
+ pr_err("%s: cannot queue message for port %s/%x (%d)\n",
+ __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
+ ret);
+ } else {
+ DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
+ }
+
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+no_ack:
+ return ret;
+}
+
+static void process_avp_message(struct work_struct *work)
+{
+ struct avp_info *avp = container_of(work, struct avp_info, recv_work);
+ struct msg_data *msg = avp->msg_from_avp;
+
+ mutex_lock(&avp->from_avp_lock);
+ rmb();
+ switch (msg->cmd) {
+ case CMD_CONNECT:
+ process_connect_locked(avp, msg);
+ break;
+ case CMD_DISCONNECT:
+ process_disconnect_locked(avp, msg);
+ break;
+ case CMD_MESSAGE:
+ process_message(avp, msg, GFP_KERNEL);
+ break;
+ default:
+ pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
+ break;
+ }
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
+{
+ struct avp_info *avp = data;
+ struct msg_data *msg = avp->msg_from_avp;
+ u32 mbox_msg;
+ unsigned long flags;
+ int ret;
+
+ mbox_msg = mbox_readl(MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_FROM_AVP);
+
+ DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
+
+ /* XXX: re-use previous message? */
+ if (!(mbox_msg & MBOX_MSG_VALID)) {
+ WARN_ON(1);
+ goto done;
+ }
+
+ mbox_msg <<= 4;
+ if (mbox_msg == 0x2f00bad0UL) {
+ pr_info("%s: petting watchdog\n", __func__);
+ goto done;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto done;
+ } else if (avp->defer_remote) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto defer;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ rmb();
+ if (msg->cmd == CMD_MESSAGE) {
+ ret = process_message(avp, msg, GFP_ATOMIC);
+ if (ret != -ENOMEM)
+ goto done;
+ pr_info("%s: deferring message (%d)\n", __func__, ret);
+ }
+defer:
+ queue_work(avp->recv_wq, &avp->recv_work);
+done:
+ return IRQ_HANDLED;
+}
+
+static int avp_reset(struct avp_info *avp, unsigned long reset_addr)
+{
+ unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+ dma_addr_t stub_data_phys;
+ unsigned long timeout;
+ int ret = 0;
+
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+ _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+ _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+ wmb();
+ stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+
+ writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
+
+ tegra_periph_reset_assert(avp->cop_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(avp->cop_clk);
+
+ writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+ /* the AVP firmware will reprogram its reset vector as the kernel
+ * starts, so a dead kernel can be detected by polling this value */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout)) {
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
+ break;
+ cpu_relax();
+ }
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys)
+ ret = -EINVAL;
+ WARN_ON(ret);
+ dma_unmap_single(NULL, stub_data_phys,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static void avp_halt(struct avp_info *avp)
+{
+ /* ensure the AVP is halted */
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+ tegra_periph_reset_assert(avp->cop_clk);
+
+ /* set up the initial memory areas and mailbox contents */
+ *((u32 *)avp->msg_from_avp) = 0;
+ *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
+ mbox_writel(0, MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_TO_AVP);
+}
+
+/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
+ * node, but are actually meant to be processed on our side (either
+ * by the svc thread for processing remote calls or by the client
+ * of the char dev for receiving replies for managing remote
+ * libraries/modules. */
+
+static int avp_init(struct avp_info *avp, const char *fw_file)
+{
+ const struct firmware *avp_fw;
+ int ret;
+ struct trpc_endpoint *ep;
+
+ avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
+ if (IS_ERR(avp->nvmap_libs)) {
+ pr_err("%s: cannot create libs nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_libs);
+ goto err_nvmap_create_libs_client;
+ }
+
+ /* put the address of the shared mem area into the mailbox for AVP
+ * to read out when its kernel boots. */
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+
+ ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
+ goto err_req_fw;
+ }
+ pr_info("%s: read firmware from '%s' (%d bytes)\n", __func__,
+ fw_file, avp_fw->size);
+ memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
+ memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
+ wmb();
+ release_firmware(avp_fw);
+
+ ret = avp_reset(avp, AVP_KERNEL_VIRT_BASE);
+ if (ret) {
+ pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
+ goto err_reset;
+ }
+
+ enable_irq(avp->mbox_from_avp_pend_irq);
+ /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
+ * ready for remote commands. Then, connect to the
+ * remote RPC_AVP_PORT to be able to send library load/unload and
+ * suspend commands to it */
+ ret = avp_svc_start(avp->avp_svc);
+ if (ret)
+ goto err_avp_svc_start;
+
+ ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
+ NULL, -1);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_rpc_avp_port;
+ }
+ avp->avp_ep = ep;
+
+ avp->initialized = true;
+ smp_wmb();
+ pr_info("%s: avp init done\n", __func__);
+ return 0;
+
+err_rpc_avp_port:
+ avp_svc_stop(avp->avp_svc);
+err_avp_svc_start:
+ disable_irq(avp->mbox_from_avp_pend_irq);
+err_reset:
+ avp_halt(avp);
+err_req_fw:
+ nvmap_client_put(avp->nvmap_libs);
+err_nvmap_create_libs_client:
+ avp->nvmap_libs = NULL;
+ return ret;
+}
+
+static void avp_uninit(struct avp_info *avp)
+{
+ unsigned long flags;
+ struct rb_node *n;
+ struct remote_info *rinfo;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->initialized = false;
+ avp->shutdown = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+ cancel_work_sync(&avp->recv_work);
+
+ avp_halt(avp);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ while ((n = rb_first(&avp->endpoints)) != NULL) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ avp_svc_stop(avp->avp_svc);
+
+ if (avp->avp_ep) {
+ trpc_close(avp->avp_ep);
+ avp->avp_ep = NULL;
+ }
+
+ libs_cleanup(avp);
+
+ avp->shutdown = false;
+ smp_wmb();
+ pr_info("%s: avp teardown done\n", __func__);
+}
+
+/* returns the remote lib handle in lib->handle */
+static int _load_lib(struct avp_info *avp, struct tegra_avp_lib *lib)
+{
+ struct svc_lib_attach svc;
+ struct svc_lib_attach_resp resp;
+ const struct firmware *fw;
+ void *args;
+ struct nvmap_handle_ref *lib_handle;
+ void *lib_data;
+ unsigned long lib_phys;
+ int ret;
+
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: loading library '%s'\n", lib->name);
+
+ args = kmalloc(lib->args_len, GFP_KERNEL);
+ if (!args) {
+ pr_err("avp_lib: can't alloc mem for args (%d)\n",
+ lib->args_len);
+ return -ENOMEM;
+ }
+ if (copy_from_user(args, lib->args, lib->args_len)) {
+ pr_err("avp_lib: can't copy lib args\n");
+ ret = -EFAULT;
+ goto err_cp_args;
+ }
+
+ ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
+ goto err_req_fw;
+ }
+
+ lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(lib_handle)) {
+ pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_alloc;
+ }
+
+ lib_data = nvmap_mmap(lib_handle);
+ if (!lib_data) {
+ pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
+ if (IS_ERR((void *)lib_phys)) {
+ pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_pin;
+ }
+
+ memcpy(lib_data, fw->data, fw->size);
+
+ svc.svc_id = SVC_LIBRARY_ATTACH;
+ svc.address = lib_phys;
+ svc.args_len = lib->args_len;
+ svc.lib_size = fw->size;
+ svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
+ AVP_LIB_REASON_ATTACH;
+ memcpy(svc.args, args, lib->args_len);
+ wmb();
+
+ /* send message, wait for reply */
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret)
+ goto err_send_msg;
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
+ goto err_recv_msg;
+ } else if (resp.err) {
+ pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
+ resp.err, lib->name);
+ ret = -EPROTO;
+ goto err_recv_msg;
+ }
+ lib->handle = resp.lib_id;
+ ret = 0;
+ DBG(AVP_DBG_TRACE_LIB,
+ "avp_lib: Successfully loaded library %s (lib_id=%x)\n",
+ lib->name, resp.lib_id);
+
+ /* We free the memory here because by this point the AVP has already
+ * requested memory for the library for all the sections since it does
+ * it's own relocation and memory management. So, our allocations were
+ * temporary to hand the library code over to the AVP.
+ */
+
+err_recv_msg:
+err_send_msg:
+ nvmap_unpin(avp->nvmap_libs, lib_handle);
+err_nvmap_pin:
+ nvmap_munmap(lib_handle, lib_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_libs, lib_handle);
+err_nvmap_alloc:
+ release_firmware(fw);
+err_req_fw:
+err_cp_args:
+ kfree(args);
+ return ret;
+}
+
+static int send_unload_lib_msg(struct avp_info *avp, u32 handle,
+ const char *name)
+{
+ struct svc_lib_detach svc;
+ struct svc_lib_detach_resp resp;
+ int ret;
+
+ svc.svc_id = SVC_LIBRARY_DETACH;
+ svc.reason = AVP_LIB_REASON_DETACH;
+ svc.lib_id = handle;
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("avp_lib: can't send unload message to avp for '%s'\n",
+ name);
+ goto err;
+ }
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
+ name, ret);
+ } else if (resp.err) {
+ pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
+ resp.err, name);
+ ret = -EPROTO;
+ } else
+ ret = 0;
+err:
+ return ret;
+}
+
+static struct lib_item *_find_lib_locked(struct avp_info *avp, u32 handle)
+{
+ struct lib_item *item;
+
+ list_for_each_entry(item, &avp->libs, list) {
+ if (item->handle == handle)
+ return item;
+ }
+ return NULL;
+}
+
+static int _insert_lib_locked(struct avp_info *avp, u32 handle, char *name)
+{
+ struct lib_item *item;
+
+ item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->handle = handle;
+ strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
+ list_add_tail(&item->list, &avp->libs);
+ return 0;
+}
+
+static void _delete_lib_locked(struct avp_info *avp, struct lib_item *item)
+{
+ list_del(&item->list);
+ kfree(item);
+}
+
+static int handle_load_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct tegra_avp_lib lib;
+ int ret;
+
+ if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
+ return -EFAULT;
+ lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+ if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+ pr_err("%s: library args too long (%d)\n", __func__,
+ lib.args_len);
+ return -E2BIG;
+ }
+
+ mutex_lock(&avp->libs_lock);
+ ret = _load_lib(avp, &lib);
+ if (ret)
+ goto err_load_lib;
+
+ if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
+ /* TODO: probably need to free the library from remote
+ * we just loaded */
+ ret = -EFAULT;
+ goto err_copy_to_user;
+ }
+ ret = _insert_lib_locked(avp, lib.handle, lib.name);
+ if (ret) {
+ pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+ goto err_insert_lib;
+ }
+
+ mutex_unlock(&avp->libs_lock);
+ return 0;
+
+err_insert_lib:
+err_copy_to_user:
+ send_unload_lib_msg(avp, lib.handle, lib.name);
+err_load_lib:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static int handle_unload_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct lib_item *item;
+ int ret;
+
+ mutex_lock(&avp->libs_lock);
+ item = _find_lib_locked(avp, (u32)arg);
+ if (!item) {
+ pr_err("avp_lib: avp lib with handle 0x%x not found\n",
+ (u32)arg);
+ ret = -ENOENT;
+ goto err_find;
+ }
+ ret = send_unload_lib_msg(avp, item->handle, item->name);
+ if (!ret)
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
+ else
+ pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
+ item->handle, ret);
+ _delete_lib_locked(avp, item);
+
+err_find:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static void libs_cleanup(struct avp_info *avp)
+{
+ struct lib_item *lib;
+ struct lib_item *lib_tmp;
+
+ mutex_lock(&avp->libs_lock);
+ list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
+ _delete_lib_locked(avp, lib);
+ }
+
+ nvmap_client_put(avp->nvmap_libs);
+ avp->nvmap_libs = NULL;
+ mutex_unlock(&avp->libs_lock);
+}
+
+static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_AVP_IOCTL_LOAD_LIB:
+ ret = handle_load_lib_ioctl(avp, arg);
+ break;
+ case TEGRA_AVP_IOCTL_UNLOAD_LIB:
+ ret = handle_unload_lib_ioctl(avp, arg);
+ break;
+ default:
+ pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static int tegra_avp_open(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ nonseekable_open(inode, file);
+
+ mutex_lock(&avp->open_lock);
+ /* only one userspace client at a time */
+ if (avp->opened) {
+ pr_err("%s: already have client, aborting\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = avp_init(avp, TEGRA_AVP_KERNEL_FW);
+ avp->opened = !ret;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int tegra_avp_release(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s: release\n", __func__);
+ mutex_lock(&avp->open_lock);
+ if (!avp->opened) {
+ pr_err("%s: releasing while in invalid state\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ avp_uninit(avp);
+
+ avp->opened = false;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int avp_enter_lp0(struct avp_info *avp)
+{
+ volatile u32 *avp_suspend_done =
+ avp->iram_backup_data + TEGRA_IRAM_SIZE;
+ struct svc_enter_lp0 svc;
+ unsigned long endtime;
+ int ret;
+
+ svc.svc_id = SVC_ENTER_LP0;
+ svc.src_addr = (u32)TEGRA_IRAM_BASE;
+ svc.buf_addr = (u32)avp->iram_backup_phys;
+ svc.buf_size = TEGRA_IRAM_SIZE;
+
+ *avp_suspend_done = 0;
+ wmb();
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: cannot send AVP suspend message\n", __func__);
+ return ret;
+ }
+
+ endtime = jiffies + msecs_to_jiffies(1000);
+ rmb();
+ while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
+ udelay(10);
+ rmb();
+ }
+
+ rmb();
+ if (*avp_suspend_done == 0) {
+ pr_err("%s: AVP failed to suspend\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct avp_info *avp = tegra_avp;
+ unsigned long flags;
+ int ret;
+
+ pr_info("%s()+\n", __func__);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (!avp->initialized) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return 0;
+ }
+ avp->suspending = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = avp_enter_lp0(avp);
+ if (ret)
+ goto err;
+
+ avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
+ if (!avp->resume_addr) {
+ pr_err("%s: AVP failed to set it's resume address\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
+ avp->resume_addr &= 0xfffffffeUL;
+ pr_info("%s()-\n", __func__);
+
+ return 0;
+
+err:
+ /* TODO: we need to kill the AVP so that when we come back
+ * it could be reinitialized.. We'd probably need to kill
+ * the users of it so they don't have the wrong state.
+ */
+ return ret;
+}
+
+static int tegra_avp_resume(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s()+\n", __func__);
+ smp_rmb();
+ if (!avp->initialized)
+ goto out;
+
+ BUG_ON(!avp->resume_addr);
+
+ avp_reset(avp, avp->resume_addr);
+ avp->resume_addr = 0;
+ avp->suspending = false;
+ smp_wmb();
+ enable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("%s()-\n", __func__);
+
+out:
+ return ret;
+}
+
+static const struct file_operations tegra_avp_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_avp_open,
+ .release = tegra_avp_release,
+ .unlocked_ioctl = tegra_avp_ioctl,
+};
+
+static struct trpc_node avp_trpc_node = {
+ .name = "avp-remote",
+ .type = TRPC_NODE_REMOTE,
+ .try_connect = avp_node_try_connect,
+};
+
+static int tegra_avp_probe(struct platform_device *pdev)
+{
+ void *msg_area;
+ struct avp_info *avp;
+ int ret = 0;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
+ if (irq < 0) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ avp = kzalloc(sizeof(struct avp_info), GFP_KERNEL);
+ if (!avp) {
+ pr_err("%s: cannot allocate avp_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
+ if (IS_ERR(avp->nvmap_drv)) {
+ pr_err("%s: cannot create drv nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_drv);
+ goto err_nvmap_create_drv_client;
+ }
+
+ avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->kernel_handle)) {
+ pr_err("%s: cannot create handle\n", __func__);
+ ret = PTR_ERR(avp->kernel_handle);
+ goto err_nvmap_alloc;
+ }
+
+ avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+ if (!avp->kernel_data) {
+ pr_err("%s: cannot map kernel handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ avp->kernel_phys = nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
+ if (IS_ERR((void *)avp->kernel_phys)) {
+ pr_err("%s: cannot pin kernel handle\n", __func__);
+ ret = PTR_ERR((void *)avp->kernel_phys);
+ goto err_nvmap_pin;
+ }
+
+ /* allocate an extra 4 bytes at the end which AVP uses to signal to
+ * us that it is done suspending.
+ */
+ avp->iram_backup_handle =
+ nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
+ L1_CACHE_BYTES, NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->iram_backup_handle)) {
+ pr_err("%s: cannot create handle for iram backup\n", __func__);
+ ret = PTR_ERR(avp->iram_backup_handle);
+ goto err_iram_nvmap_alloc;
+ }
+ avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
+ if (!avp->iram_backup_data) {
+ pr_err("%s: cannot map iram backup handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_iram_nvmap_mmap;
+ }
+ avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
+ avp->iram_backup_handle);
+ if (IS_ERR((void *)avp->iram_backup_phys)) {
+ pr_err("%s: cannot pin iram backup handle\n", __func__);
+ ret = PTR_ERR((void *)avp->iram_backup_phys);
+ goto err_iram_nvmap_pin;
+ }
+
+ avp->mbox_from_avp_pend_irq = irq;
+ avp->endpoints = RB_ROOT;
+ spin_lock_init(&avp->state_lock);
+ mutex_init(&avp->open_lock);
+ mutex_init(&avp->to_avp_lock);
+ mutex_init(&avp->from_avp_lock);
+ INIT_WORK(&avp->recv_work, process_avp_message);
+
+ mutex_init(&avp->libs_lock);
+ INIT_LIST_HEAD(&avp->libs);
+
+ avp->recv_wq = alloc_workqueue("avp-msg-recv",
+ WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
+ if (!avp->recv_wq) {
+ pr_err("%s: can't create recve workqueue\n", __func__);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ avp->cop_clk = clk_get(&pdev->dev, "cop");
+ if (IS_ERR(avp->cop_clk)) {
+ pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
+ ret = -ENOENT;
+ goto err_get_cop_clk;
+ }
+
+ msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
+ &avp->msg_area_addr, GFP_KERNEL);
+ if (!msg_area) {
+ pr_err("%s: cannot allocate msg_area\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_msg_area;
+ }
+ memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
+ avp->msg = ((avp->msg_area_addr >> 4) |
+ MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
+ avp->msg_to_avp = msg_area;
+ avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
+
+ avp_halt(avp);
+
+ avp_trpc_node.priv = avp;
+ ret = trpc_node_register(&avp_trpc_node);
+ if (ret) {
+ pr_err("%s: Can't register avp rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ avp->rpc_node = &avp_trpc_node;
+
+ avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
+ if (IS_ERR(avp->avp_svc)) {
+ pr_err("%s: Cannot initialize avp_svc\n", __func__);
+ ret = PTR_ERR(avp->avp_svc);
+ goto err_avp_svc_init;
+ }
+
+ avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ avp->misc_dev.name = "tegra_avp";
+ avp->misc_dev.fops = &tegra_avp_fops;
+
+ ret = misc_register(&avp->misc_dev);
+ if (ret) {
+ pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
+ goto err_misc_reg;
+ }
+
+ ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
+ if (ret) {
+ pr_err("%s: cannot register irq handler\n", __func__);
+ goto err_req_irq_pend;
+ }
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ tegra_avp = avp;
+
+ pr_info("%s: driver registered, kernel %lx(%p), msg area %lx/%lx\n",
+ __func__, avp->kernel_phys, avp->kernel_data,
+ (unsigned long)avp->msg_area_addr,
+ (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
+
+ return 0;
+
+err_req_irq_pend:
+ misc_deregister(&avp->misc_dev);
+err_misc_reg:
+ avp_svc_destroy(avp->avp_svc);
+err_avp_svc_init:
+ trpc_node_unregister(avp->rpc_node);
+err_node_reg:
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
+ avp->msg_area_addr);
+err_alloc_msg_area:
+ clk_put(avp->cop_clk);
+err_get_cop_clk:
+ destroy_workqueue(avp->recv_wq);
+err_create_wq:
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_pin:
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+err_iram_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_alloc:
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_pin:
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_alloc:
+ nvmap_client_put(avp->nvmap_drv);
+err_nvmap_create_drv_client:
+ kfree(avp);
+ tegra_avp = NULL;
+ return ret;
+}
+
+static int tegra_avp_remove(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+
+ if (!avp)
+ return 0;
+
+ mutex_lock(&avp->open_lock);
+ if (avp->opened) {
+ mutex_unlock(&avp->open_lock);
+ return -EBUSY;
+ }
+ /* ensure that noone can open while we tear down */
+ avp->opened = true;
+ mutex_unlock(&avp->open_lock);
+
+ misc_deregister(&avp->misc_dev);
+
+ avp_halt(avp);
+
+ avp_svc_destroy(avp->avp_svc);
+ trpc_node_unregister(avp->rpc_node);
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
+ avp->msg_area_addr);
+ clk_put(avp->cop_clk);
+ destroy_workqueue(avp->recv_wq);
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_client_put(avp->nvmap_drv);
+ kfree(avp);
+ tegra_avp = NULL;
+ return 0;
+}
+
+static struct platform_driver tegra_avp_driver = {
+ .probe = tegra_avp_probe,
+ .remove = tegra_avp_remove,
+ .suspend = tegra_avp_suspend,
+ .resume = tegra_avp_resume,
+ .driver = {
+ .name = TEGRA_AVP_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_avp_init(void)
+{
+ return platform_driver_register(&tegra_avp_driver);
+}
+
+static void __exit tegra_avp_exit(void)
+{
+ platform_driver_unregister(&tegra_avp_driver);
+}
+
+module_init(tegra_avp_init);
+module_exit(tegra_avp_exit);
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
+#define __MEDIA_VIDEO_TEGRA_AVP_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "trpc.h"
+
+struct avp_svc_info;
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node);
+void avp_svc_destroy(struct avp_svc_info *avp_svc);
+int avp_svc_start(struct avp_svc_info *svc);
+void avp_svc_stop(struct avp_svc_info *svc);
+
+#endif
--- /dev/null
+/* drivers/media/video/tegra/avp/avp_msg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+/* Note: the port name string is not NUL terminated, so make sure to
+ * allocate appropriate space locally when operating on the string */
+#define XPC_PORT_NAME_LEN 16
+
+#define SVC_ARGS_MAX_LEN 220
+#define SVC_MAX_STRING_LEN 200
+
+#define AVP_ERR_ENOTSUP 0x2
+#define AVP_ERR_EINVAL 0x4
+#define AVP_ERR_ENOMEM 0x6
+#define AVP_ERR_EACCES 0x00030010
+
+enum {
+ SVC_NVMAP_CREATE = 0,
+ SVC_NVMAP_CREATE_RESPONSE = 1,
+ SVC_NVMAP_FREE = 3,
+ SVC_NVMAP_ALLOC = 4,
+ SVC_NVMAP_ALLOC_RESPONSE = 5,
+ SVC_NVMAP_PIN = 6,
+ SVC_NVMAP_PIN_RESPONSE = 7,
+ SVC_NVMAP_UNPIN = 8,
+ SVC_NVMAP_UNPIN_RESPONSE = 9,
+ SVC_NVMAP_GET_ADDRESS = 10,
+ SVC_NVMAP_GET_ADDRESS_RESPONSE = 11,
+ SVC_NVMAP_FROM_ID = 12,
+ SVC_NVMAP_FROM_ID_RESPONSE = 13,
+ SVC_MODULE_CLOCK = 14,
+ SVC_MODULE_CLOCK_RESPONSE = 15,
+ SVC_MODULE_RESET = 16,
+ SVC_MODULE_RESET_RESPONSE = 17,
+ SVC_POWER_REGISTER = 18,
+ SVC_POWER_UNREGISTER = 19,
+ SVC_POWER_STARVATION = 20,
+ SVC_POWER_BUSY_HINT = 21,
+ SVC_POWER_BUSY_HINT_MULTI = 22,
+ SVC_DFS_GETSTATE = 23,
+ SVC_DFS_GETSTATE_RESPONSE = 24,
+ SVC_POWER_RESPONSE = 25,
+ SVC_POWER_MAXFREQ = 26,
+ SVC_ENTER_LP0 = 27,
+ SVC_ENTER_LP0_RESPONSE = 28,
+ SVC_PRINTF = 29,
+ SVC_LIBRARY_ATTACH = 30,
+ SVC_LIBRARY_ATTACH_RESPONSE = 31,
+ SVC_LIBRARY_DETACH = 32,
+ SVC_LIBRARY_DETACH_RESPONSE = 33,
+ SVC_AVP_WDT_RESET = 34,
+ SVC_DFS_GET_CLK_UTIL = 35,
+ SVC_DFS_GET_CLK_UTIL_RESPONSE = 36,
+};
+
+struct svc_msg {
+ u32 svc_id;
+ u8 data[0];
+};
+
+struct svc_common_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+struct svc_printf {
+ u32 svc_id;
+ const char str[SVC_MAX_STRING_LEN];
+};
+
+struct svc_enter_lp0 {
+ u32 svc_id;
+ u32 src_addr;
+ u32 buf_addr;
+ u32 buf_size;
+};
+
+/* nvmap messages */
+struct svc_nvmap_create {
+ u32 svc_id;
+ u32 size;
+};
+
+struct svc_nvmap_create_resp {
+ u32 svc_id;
+ u32 handle_id;
+ u32 err;
+};
+
+enum {
+ AVP_NVMAP_HEAP_EXTERNAL = 1,
+ AVP_NVMAP_HEAP_GART = 2,
+ AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT = 3,
+ AVP_NVMAP_HEAP_IRAM = 4,
+};
+
+struct svc_nvmap_alloc {
+ u32 svc_id;
+ u32 handle_id;
+ u32 heaps[4];
+ u32 num_heaps;
+ u32 align;
+ u32 mapping_type;
+};
+
+struct svc_nvmap_free {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+struct svc_nvmap_unpin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_from_id {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_get_addr {
+ u32 svc_id;
+ u32 handle_id;
+ u32 offs;
+};
+
+struct svc_nvmap_get_addr_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+/* library management messages */
+enum {
+ AVP_LIB_REASON_ATTACH = 0,
+ AVP_LIB_REASON_DETACH = 1,
+ AVP_LIB_REASON_ATTACH_GREEDY = 2,
+};
+
+struct svc_lib_attach {
+ u32 svc_id;
+ u32 address;
+ u32 args_len;
+ u32 lib_size;
+ u8 args[SVC_ARGS_MAX_LEN];
+ u32 reason;
+};
+
+struct svc_lib_attach_resp {
+ u32 svc_id;
+ u32 err;
+ u32 lib_id;
+};
+
+struct svc_lib_detach {
+ u32 svc_id;
+ u32 reason;
+ u32 lib_id;
+};
+
+struct svc_lib_detach_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+/* hw module management from the AVP side */
+enum {
+ AVP_MODULE_ID_AVP = 2,
+ AVP_MODULE_ID_VCP = 3,
+ AVP_MODULE_ID_BSEA = 27,
+ AVP_MODULE_ID_VDE = 28,
+ AVP_MODULE_ID_MPE = 29,
+};
+
+struct svc_module_ctrl {
+ u32 svc_id;
+ u32 module_id;
+ u32 client_id;
+ u8 enable;
+};
+
+/* power messages */
+struct svc_pwr_register {
+ u32 svc_id;
+ u32 client_id;
+ u32 unused;
+};
+
+struct svc_pwr_register_resp {
+ u32 svc_id;
+ u32 err;
+ u32 client_id;
+};
+
+struct svc_pwr_starve_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u8 starving;
+};
+
+struct svc_pwr_busy_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u32 boost_ms; /* duration */
+ u32 boost_freq; /* in khz */
+};
+
+struct svc_pwr_max_freq {
+ u32 svc_id;
+ u32 module_id;
+};
+
+struct svc_pwr_max_freq_resp {
+ u32 svc_id;
+ u32 freq;
+};
+
+/* dfs related messages */
+enum {
+ AVP_DFS_STATE_INVALID = 0,
+ AVP_DFS_STATE_DISABLED = 1,
+ AVP_DFS_STATE_STOPPED = 2,
+ AVP_DFS_STATE_CLOSED_LOOP = 3,
+ AVP_DFS_STATE_PROFILED_LOOP = 4,
+};
+
+struct svc_dfs_get_state_resp {
+ u32 svc_id;
+ u32 state;
+};
+
+enum {
+ AVP_DFS_CLK_CPU = 1,
+ AVP_DFS_CLK_AVP = 2,
+ AVP_DFS_CLK_SYSTEM = 3,
+ AVP_DFS_CLK_AHB = 4,
+ AVP_DFS_CLK_APB = 5,
+ AVP_DFS_CLK_VDE = 6,
+ /* external memory controller */
+ AVP_DFS_CLK_EMC = 7,
+};
+
+struct avp_clk_usage {
+ u32 min;
+ u32 max;
+ u32 curr_min;
+ u32 curr_max;
+ u32 curr;
+ u32 avg; /* average activity.. whatever that means */
+};
+
+struct svc_dfs_get_clk_util {
+ u32 svc_id;
+ u32 dfs_clk_id;
+};
+
+/* all units are in kHz */
+struct svc_dfs_get_clk_util_resp {
+ u32 svc_id;
+ u32 err;
+ struct avp_clk_usage usage;
+};
+
+/************************/
+
+enum {
+ CMD_ACK = 0,
+ CMD_CONNECT = 2,
+ CMD_DISCONNECT = 3,
+ CMD_MESSAGE = 4,
+ CMD_RESPONSE = 5,
+};
+
+struct msg_data {
+ u32 cmd;
+ u8 data[0];
+};
+
+struct msg_ack {
+ u32 cmd;
+ u32 arg;
+};
+
+struct msg_connect {
+ u32 cmd;
+ u32 port_id;
+ /* not NUL terminated, just 0 padded */
+ char name[XPC_PORT_NAME_LEN];
+};
+
+struct msg_connect_reply {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect_reply {
+ u32 cmd;
+ u32 ack;
+};
+
+struct msg_port_data {
+ u32 cmd;
+ u32 port_id;
+ u32 msg_len;
+ u8 data[0];
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+
+#include <mach/clk.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ AVP_DBG_TRACE_SVC = 1U << 0,
+};
+
+static u32 debug_mask = 0;
+module_param_named(debug_mask, debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(debug_mask & (flag))) pr_info(args); } while (0)
+
+enum {
+ CLK_REQUEST_VCP = 0,
+ CLK_REQUEST_BSEA = 1,
+ CLK_REQUEST_VDE = 2,
+ NUM_CLK_REQUESTS,
+};
+
+struct avp_module {
+ const char *name;
+ u32 clk_req;
+};
+
+static struct avp_module avp_modules[] = {
+ [AVP_MODULE_ID_VCP] = {
+ .name = "vcp",
+ .clk_req = CLK_REQUEST_VCP,
+ },
+ [AVP_MODULE_ID_BSEA] = {
+ .name = "bsea",
+ .clk_req = CLK_REQUEST_BSEA,
+ },
+ [AVP_MODULE_ID_VDE] = {
+ .name = "vde",
+ .clk_req = CLK_REQUEST_VDE,
+ },
+};
+#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
+
+struct avp_clk {
+ struct clk *clk;
+ int refcnt;
+ struct avp_module *mod;
+};
+
+struct avp_svc_info {
+ struct avp_clk clks[NUM_CLK_REQUESTS];
+ /* used for dvfs */
+ struct clk *sclk;
+ struct clk *emcclk;
+
+ struct mutex clk_lock;
+
+ struct trpc_endpoint *cpu_ep;
+ struct task_struct *svc_thread;
+
+ /* client for remote allocations, for easy tear down */
+ struct nvmap_client *nvmap_remote;
+ struct trpc_node *rpc_node;
+};
+
+static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
+ struct svc_nvmap_create_resp resp;
+ struct nvmap_handle_ref *handle;
+ u32 handle_id = 0;
+ u32 err = 0;
+
+ handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
+ if (unlikely(IS_ERR(handle))) {
+ pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
+ msg->size);
+ err = AVP_ERR_ENOMEM;
+ } else
+ handle_id = (u32)nvmap_ref_to_id(handle);
+
+ resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
+ resp.err = err;
+ resp.handle_id = handle_id;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+ /* TODO: do we need to put the handle if send_msg failed? */
+}
+
+static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle *handle;
+ u32 err = 0;
+ u32 heap_mask = 0;
+ int i;
+ size_t align;
+
+ handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
+ err = AVP_ERR_EACCES;
+ goto out;
+ }
+
+ if (msg->num_heaps > 4) {
+ pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
+ msg->num_heaps);
+ /* TODO: should we error out instead ? */
+ msg->num_heaps = 0;
+ }
+ if (msg->num_heaps == 0)
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
+
+ for (i = 0; i < msg->num_heaps; i++) {
+ switch (msg->heaps[i]) {
+ case AVP_NVMAP_HEAP_EXTERNAL:
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ break;
+ case AVP_NVMAP_HEAP_GART:
+ heap_mask |= NVMAP_HEAP_IOVMM;
+ break;
+ case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ break;
+ case AVP_NVMAP_HEAP_IRAM:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ align = max_t(size_t, L1_CACHE_BYTES, msg->align);
+ err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
+ heap_mask, align, 0);
+ nvmap_handle_put(handle);
+ if (err) {
+ pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
+ msg->handle_id, err);
+ err = AVP_ERR_ENOMEM;
+ }
+
+out:
+ resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
+
+ nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+}
+
+static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
+ struct svc_nvmap_pin_resp resp;
+ struct nvmap_handle_ref *handle;
+ unsigned long addr = ~0UL;
+ unsigned long id = msg->handle_id;
+ int err;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't dup handle %lx\n", id);
+ goto out;
+ }
+ err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
+ if (err) {
+ pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
+ goto out;
+ }
+ addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
+
+out:
+ resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
+ resp.addr = addr;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
+ struct svc_common_resp resp;
+ unsigned long id = msg->handle_id;
+
+ nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
+ nvmap_free_handle_id(avp_svc->nvmap_remote, id);
+
+ resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle_ref *handle;
+ int err = 0;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
+ msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
+ msg->handle_id, (int)PTR_ERR(handle));
+ err = AVP_ERR_ENOMEM;
+ }
+
+ resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
+ struct svc_nvmap_get_addr_resp resp;
+
+ resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
+ resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
+ resp.addr += msg->offs;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
+ struct svc_pwr_register_resp resp;
+
+ resp.svc_id = SVC_POWER_RESPONSE;
+ resp.err = 0;
+ resp.client_id = msg->client_id;
+
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
+{
+ if (id < NUM_AVP_MODULES && avp_modules[id].name)
+ return &avp_modules[id];
+ return NULL;
+}
+
+static void do_svc_module_reset(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ if (msg->module_id == AVP_MODULE_ID_AVP)
+ pr_err("avp_svc: AVP suicidal?!?!\n");
+ else
+ pr_err("avp_svc: Unknown module reset requested: %d\n",
+ msg->module_id);
+ /* other side doesn't handle errors for reset */
+ resp.err = 0;
+ goto send_response;
+ }
+
+ aclk = &avp_svc->clks[mod->clk_req];
+ tegra_periph_reset_assert(aclk->clk);
+ udelay(10);
+ tegra_periph_reset_deassert(aclk->clk);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_RESET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+
+ mutex_lock(&avp_svc->clk_lock);
+ aclk = &avp_svc->clks[mod->clk_req];
+ if (msg->enable) {
+ if (aclk->refcnt++ == 0) {
+ clk_enable(avp_svc->emcclk);
+ clk_enable(avp_svc->sclk);
+ clk_enable(aclk->clk);
+ }
+ } else {
+ if (unlikely(aclk->refcnt == 0)) {
+ pr_err("avp_svc: unbalanced clock disable for '%s'\n",
+ aclk->mod->name);
+ } else if (--aclk->refcnt == 0) {
+ clk_disable(aclk->clk);
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_null_response(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len, u32 resp_svc_id)
+{
+ struct svc_common_resp resp;
+ resp.svc_id = resp_svc_id;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_state_resp resp;
+ resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
+ resp.state = AVP_DFS_STATE_STOPPED;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_clk_util_resp resp;
+
+ resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
+ resp.err = 0;
+ memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_max_freq_resp resp;
+
+ resp.svc_id = SVC_POWER_MAXFREQ;
+ resp.freq = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_printf *msg = (struct svc_printf *)_msg;
+ char tmp_str[SVC_MAX_STRING_LEN];
+
+ /* ensure we null terminate the source */
+ strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
+ pr_info("[AVP]: %s", tmp_str);
+}
+
+static int dispatch_svc_message(struct avp_svc_info *avp_svc,
+ struct svc_msg *msg,
+ size_t len)
+{
+ int ret = 0;
+
+ switch (msg->svc_id) {
+ case SVC_NVMAP_CREATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
+ do_svc_nvmap_create(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_ALLOC:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
+ do_svc_nvmap_alloc(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FREE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
+ do_svc_nvmap_free(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_PIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
+ do_svc_nvmap_pin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_UNPIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
+ do_svc_nvmap_unpin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FROM_ID:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
+ do_svc_nvmap_from_id(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_GET_ADDRESS:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
+ do_svc_nvmap_get_addr(avp_svc, msg, len);
+ break;
+ case SVC_POWER_REGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
+ do_svc_pwr_register(avp_svc, msg, len);
+ break;
+ case SVC_POWER_UNREGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT_MULTI:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
+ __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT:
+ case SVC_POWER_STARVATION:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
+ __func__);
+ do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
+ break;
+ case SVC_POWER_MAXFREQ:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
+ __func__);
+ do_svc_pwr_max_freq(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GETSTATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
+ do_svc_dfs_get_state(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_RESET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
+ do_svc_module_reset(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_CLOCK:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
+ do_svc_module_clock(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GET_CLK_UTIL:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
+ do_svc_dfs_get_clk_util(avp_svc, msg, len);
+ break;
+ case SVC_PRINTF:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
+ do_svc_printf(avp_svc, msg, len);
+ break;
+ case SVC_AVP_WDT_RESET:
+ pr_err("avp_svc: AVP has been reset by watchdog\n");
+ break;
+ default:
+ pr_err("avp_svc: invalid SVC call 0x%x\n", msg->svc_id);
+ ret = -ENOMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int avp_svc_thread(void *data)
+{
+ struct avp_svc_info *avp_svc = data;
+ u8 buf[TEGRA_RPC_MAX_MSG_LEN];
+ struct svc_msg *msg = (struct svc_msg *)buf;
+ int ret;
+
+ BUG_ON(!avp_svc->cpu_ep);
+
+ ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
+ if (ret) {
+ /* XXX: teardown?! */
+ pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
+ goto err;
+ }
+
+ pr_info("%s: got remote peer\n", __func__);
+
+ while (!kthread_should_stop()) {
+ DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
+ ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
+ TEGRA_RPC_MAX_MSG_LEN, -1);
+ DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
+ if (ret < 0) {
+ pr_err("%s: couldn't receive msg\n", __func__);
+ /* XXX: port got closed? we should exit? */
+ goto err;
+ } else if (!ret) {
+ pr_err("%s: received msg of len 0?!\n", __func__);
+ continue;
+ }
+ dispatch_svc_message(avp_svc, msg, ret);
+ }
+
+err:
+ trpc_put(avp_svc->cpu_ep);
+ pr_info("%s: done\n", __func__);
+ return ret;
+}
+
+int avp_svc_start(struct avp_svc_info *avp_svc)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
+ if (IS_ERR(avp_svc->nvmap_remote)) {
+ pr_err("%s: cannot create remote nvmap client\n", __func__);
+ ret = PTR_ERR(avp_svc->nvmap_remote);
+ goto err_nvmap_create_remote_client;
+ }
+
+ ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_cpu_port_create;
+ }
+
+ /* TODO: protect this */
+ avp_svc->cpu_ep = ep;
+
+ /* the service thread should get an extra reference for the port */
+ trpc_get(avp_svc->cpu_ep);
+ avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
+ "avp_svc_thread");
+ if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
+ avp_svc->svc_thread = NULL;
+ pr_err("%s: can't create svc thread\n", __func__);
+ ret = -ENOMEM;
+ goto err_kthread;
+ }
+ return 0;
+
+err_kthread:
+ trpc_close(avp_svc->cpu_ep);
+ trpc_put(avp_svc->cpu_ep);
+ avp_svc->cpu_ep = NULL;
+err_cpu_port_create:
+ nvmap_client_put(avp_svc->nvmap_remote);
+err_nvmap_create_remote_client:
+ avp_svc->nvmap_remote = NULL;
+ return ret;
+}
+
+void avp_svc_stop(struct avp_svc_info *avp_svc)
+{
+ int ret;
+ int i;
+
+ trpc_close(avp_svc->cpu_ep);
+ ret = kthread_stop(avp_svc->svc_thread);
+ if (ret == -EINTR) {
+ /* the thread never started, drop it's extra reference */
+ trpc_put(avp_svc->cpu_ep);
+ }
+ avp_svc->cpu_ep = NULL;
+
+ nvmap_client_put(avp_svc->nvmap_remote);
+ avp_svc->nvmap_remote = NULL;
+
+ mutex_lock(&avp_svc->clk_lock);
+ for (i = 0; i < NUM_CLK_REQUESTS; i++) {
+ struct avp_clk *aclk = &avp_svc->clks[i];
+ BUG_ON(aclk->refcnt < 0);
+ if (aclk->refcnt > 0) {
+ pr_info("%s: remote left clock '%s' on\n", __func__,
+ aclk->mod->name);
+ clk_disable(aclk->clk);
+ /* sclk/emcclk was enabled once for every clock */
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ aclk->refcnt = 0;
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+}
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node)
+{
+ struct avp_svc_info *avp_svc;
+ int ret;
+ int i;
+ int cnt = 0;
+
+ BUG_ON(!rpc_node);
+
+ avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
+ if (!avp_svc) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
+
+ for (i = 0; i < NUM_AVP_MODULES; i++) {
+ struct avp_module *mod = &avp_modules[i];
+ struct clk *clk;
+ if (!mod->name)
+ continue;
+ BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
+ cnt++ >= NUM_CLK_REQUESTS);
+
+ clk = clk_get(&pdev->dev, mod->name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("avp_svc: Couldn't get required clocks\n");
+ goto err_get_clks;
+ }
+ avp_svc->clks[mod->clk_req].clk = clk;
+ avp_svc->clks[mod->clk_req].mod = mod;
+ avp_svc->clks[mod->clk_req].refcnt = 0;
+ }
+
+ avp_svc->sclk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(avp_svc->sclk)) {
+ pr_err("avp_svc: Couldn't get sclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+
+ avp_svc->emcclk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(avp_svc->emcclk)) {
+ pr_err("avp_svc: Couldn't get emcclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+
+ /*
+ * The emc is a shared clock, it will be set to the highest
+ * requested rate from any user. Set the rate to ULONG_MAX to
+ * always request the max rate whenever this request is enabled
+ */
+ clk_set_rate(avp_svc->emcclk, ULONG_MAX);
+
+ avp_svc->rpc_node = rpc_node;
+
+ mutex_init(&avp_svc->clk_lock);
+
+ return avp_svc;
+
+err_get_clks:
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ if (avp_svc->clks[i].clk)
+ clk_put(avp_svc->clks[i].clk);
+ if (!IS_ERR_OR_NULL(avp_svc->sclk))
+ clk_put(avp_svc->sclk);
+ if (!IS_ERR_OR_NULL(avp_svc->emcclk))
+ clk_put(avp_svc->emcclk);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+void avp_svc_destroy(struct avp_svc_info *avp_svc)
+{
+ int i;
+
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ clk_put(avp_svc->clks[i].clk);
+ clk_put(avp_svc->sclk);
+ clk_put(avp_svc->emcclk);
+
+ kfree(avp_svc);
+}
--- /dev/null
+/*
+ * arch/arm/mach-tegra/headavp.S
+ *
+ * AVP kernel launcher stub; programs the AVP MMU and jumps to the
+ * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
+ * in ARM mode.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "headavp.h"
+
+#define PTE0_COMPARE 0
+/* the default translation will translate any VA within
+ * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
+ * _tegra_avp_boot_stub_data.map_phys_addr
+ */
+#define PTE0_DEFAULT (AVP_KERNEL_VIRT_BASE | 0x3ff0)
+
+#define PTE0_TRANSLATE 4
+
+#define TRANSLATE_DATA (1 << 11)
+#define TRANSLATE_CODE (1 << 10)
+#define TRANSLATE_WR (1 << 9)
+#define TRANSLATE_RD (1 << 8)
+#define TRANSLATE_HIT (1 << 7)
+#define TRANSLATE_EN (1 << 2)
+
+#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
+ TRANSLATE_RD | TRANSLATE_HIT)
+
+ENTRY(_tegra_avp_boot_stub)
+ adr r4, _tegra_avp_boot_stub_data
+ ldmia r4, {r0-r3}
+ str r2, [r0, #PTE0_COMPARE]
+ bic r3, r3, #0xff0
+ bic r3, r3, #0x00f
+ orr r3, r3, #TRANSLATE_OPT
+ orr r3, r3, #TRANSLATE_EN
+ str r3, [r0, #PTE0_TRANSLATE]
+ bx r1
+ b .
+ENDPROC(_tegra_avp_boot_stub)
+ .type _tegra_avp_boot_stub_data, %object
+ENTRY(_tegra_avp_boot_stub_data)
+ .long AVP_MMU_TLB_BASE
+ .long 0xdeadbeef
+ .long PTE0_DEFAULT
+ .long 0xdeadd00d
+ .size _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
--- /dev/null
+/*
+ * arch/arm/mach-tegra/headavp.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_HEADAVP_H
+#define _MACH_TEGRA_HEADAVP_H
+
+#define AVP_MMU_TLB_BASE 0xF000F000
+#define AVP_KERNEL_VIRT_BASE 0x00100000
+
+#ifndef __ASSEMBLY__
+
+struct tegra_avp_boot_stub_data {
+ unsigned long mmu_tlb_base;
+ unsigned long jump_addr;
+ unsigned long map_virt_addr;
+ unsigned long map_phys_addr;
+};
+
+extern void _tegra_avp_boot_stub(void);
+extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+
+struct trpc_port;
+struct trpc_endpoint {
+ struct list_head msg_list;
+ wait_queue_head_t msg_waitq;
+
+ struct trpc_endpoint *out;
+ struct trpc_port *port;
+
+ struct trpc_node *owner;
+
+ struct completion *connect_done;
+ bool ready;
+ struct trpc_ep_ops *ops;
+ void *priv;
+};
+
+struct trpc_port {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+
+ /* protects peer and closed state */
+ spinlock_t lock;
+ struct trpc_endpoint peers[2];
+ bool closed;
+
+ /* private */
+ struct kref ref;
+ struct rb_node rb_node;
+};
+
+enum {
+ TRPC_TRACE_MSG = 1U << 0,
+ TRPC_TRACE_CONN = 1U << 1,
+ TRPC_TRACE_PORT = 1U << 2,
+};
+
+static u32 trpc_debug_mask = 0;
+module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0)
+
+struct tegra_rpc_info {
+ struct kmem_cache *msg_cache;
+
+ spinlock_t ports_lock;
+ struct rb_root ports;
+
+ struct list_head node_list;
+ struct mutex node_lock;
+};
+
+struct trpc_msg {
+ struct list_head list;
+
+ size_t len;
+ u8 payload[TEGRA_RPC_MAX_MSG_LEN];
+};
+
+static struct tegra_rpc_info *tegra_rpc;
+static struct dentry *trpc_debug_root;
+
+static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep);
+
+/* a few accessors for the outside world to keep the trpc_endpoint struct
+ * definition private to this module */
+void *trpc_priv(struct trpc_endpoint *ep)
+{
+ return ep->priv;
+}
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep)
+{
+ return ep->out;
+}
+
+const char *trpc_name(struct trpc_endpoint *ep)
+{
+ return ep->port->name;
+}
+
+static inline bool is_connected(struct trpc_port *port)
+{
+ return port->peers[0].ready && port->peers[1].ready;
+}
+
+static inline bool is_closed(struct trpc_port *port)
+{
+ return port->closed;
+}
+
+static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port)
+{
+ struct trpc_msg *msg;
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct list_head *list = &port->peers[i].msg_list;
+ while (!list_empty(list)) {
+ msg = list_first_entry(list, struct trpc_msg, list);
+ list_del(&msg->list);
+ kmem_cache_free(info->msg_cache, msg);
+ }
+ }
+ kfree(port);
+}
+
+static void _rpc_port_release(struct kref *kref)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = container_of(kref, struct trpc_port, ref);
+ unsigned long flags;
+
+ DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__,
+ port->name, port);
+ spin_lock_irqsave(&info->ports_lock, flags);
+ rb_erase(&port->rb_node, &info->ports);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ rpc_port_free(info, port);
+}
+
+/* note that the refcount is actually on the port and not on the endpoint */
+void trpc_put(struct trpc_endpoint *ep)
+{
+ kref_put(&ep->port->ref, _rpc_port_release);
+}
+
+void trpc_get(struct trpc_endpoint *ep)
+{
+ kref_get(&ep->port->ref);
+}
+
+/* Searches the rb_tree for a port with the provided name. If one is not found,
+ * the new port in inserted. Otherwise, the existing port is returned.
+ * Must be called with the ports_lock held */
+static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info,
+ struct trpc_port *port)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct trpc_port *tmp;
+ int ret = 0;
+
+ p = &info->ports.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct trpc_port, rb_node);
+
+ ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else
+ return tmp;
+ }
+ rb_link_node(&port->rb_node, parent, p);
+ rb_insert_color(&port->rb_node, &info->ports);
+ DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__,
+ port->name, port);
+ return port;
+}
+
+static int nodes_try_connect(struct tegra_rpc_info *info,
+ struct trpc_node *src,
+ struct trpc_endpoint *from)
+{
+ struct trpc_node *node;
+ int ret;
+
+ mutex_lock(&info->node_lock);
+ list_for_each_entry(node, &info->node_list, list) {
+ if (!node->try_connect)
+ continue;
+ ret = node->try_connect(node, src, from);
+ if (!ret) {
+ mutex_unlock(&info->node_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&info->node_lock);
+ return -ECONNREFUSED;
+}
+
+static struct trpc_port *rpc_port_alloc(const char *name)
+{
+ struct trpc_port *port;
+ int i;
+
+ port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("%s: can't alloc rpc_port\n", __func__);
+ return NULL;
+ }
+ BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers));
+
+ spin_lock_init(&port->lock);
+ kref_init(&port->ref);
+ strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN);
+ for (i = 0; i < 2; i++) {
+ struct trpc_endpoint *ep = port->peers + i;
+ INIT_LIST_HEAD(&ep->msg_list);
+ init_waitqueue_head(&ep->msg_waitq);
+ ep->port = port;
+ }
+ port->peers[0].out = &port->peers[1];
+ port->peers[1].out = &port->peers[0];
+
+ return port;
+}
+
+/* must be holding the ports lock */
+static inline void handle_port_connected(struct trpc_port *port)
+{
+ int i;
+
+ DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name);
+
+ for (i = 0; i < 2; i++)
+ if (port->peers[i].connect_done)
+ complete(port->peers[i].connect_done);
+}
+
+static inline void _ready_ep(struct trpc_endpoint *ep,
+ struct trpc_node *owner,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ ep->ready = true;
+ ep->owner = owner;
+ ep->ops = ops;
+ ep->priv = priv;
+}
+
+/* this keeps a reference on the port */
+static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info,
+ struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(port->closed);
+ if (peer->ready || !ep->ready) {
+ peer = NULL;
+ goto out;
+ }
+ _ready_ep(peer, owner, ops, priv);
+ if (WARN_ON(!is_connected(port)))
+ pr_warning("%s: created peer but no connection established?!\n",
+ __func__);
+ else
+ handle_port_connected(port);
+ trpc_get(peer);
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return peer;
+}
+
+/* Exported code. This is out interface to the outside world */
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *ep;
+ struct trpc_port *new_port;
+ struct trpc_port *port;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ /* we always allocate a new port even if one already might exist. This
+ * is slightly inefficient, but it allows us to do the allocation
+ * without holding our ports_lock spinlock. */
+ new_port = rpc_port_alloc(name);
+ if (!new_port) {
+ pr_err("%s: can't allocate memory for '%s'\n", __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ port = rpc_port_find_insert(info, new_port);
+ if (port != new_port) {
+ rpc_port_free(info, new_port);
+ /* There was already a port by that name in the rb_tree,
+ * so just try to create its peer[1], i.e. peer for peer[0]
+ */
+ ep = _create_peer(info, owner, &port->peers[0], ops, priv);
+ if (!ep) {
+ pr_err("%s: port '%s' is not in a connectable state\n",
+ __func__, port->name);
+ ep = ERR_PTR(-EINVAL);
+ }
+ goto out;
+ }
+ /* don't need to grab the individual port lock here since we must be
+ * holding the ports_lock to add the new element, and never dropped
+ * it, and thus noone could have gotten a reference to this port
+ * and thus the state couldn't have been touched */
+ ep = &port->peers[0];
+ _ready_ep(ep, owner, ops, priv);
+out:
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return ep;
+}
+
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ peer = _create_peer(info, owner, ep, ops, priv);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return peer;
+}
+
+/* timeout == -1, waits forever
+ * timeout == 0, return immediately
+ */
+int trpc_connect(struct trpc_endpoint *from, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = from->port;
+ struct trpc_node *src = from->owner;
+ int ret;
+ bool no_retry = !timeout;
+ unsigned long endtime = jiffies + msecs_to_jiffies(timeout);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* XXX: add state for connections and ports to prevent invalid
+ * states like multiple connections, etc. ? */
+ if (unlikely(is_closed(port))) {
+ ret = -ECONNRESET;
+ pr_err("%s: can't connect to %s, closed\n", __func__,
+ port->name);
+ goto out;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ do {
+ ret = nodes_try_connect(info, src, from);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ } else if (no_retry) {
+ goto out;
+ } else if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ usleep_range(5000, 20000);
+ } while (timeout < 0 || time_before(jiffies, endtime));
+
+ return -ETIMEDOUT;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+/* convenience function for doing this common pattern in a single call */
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src,
+ char *name,
+ struct trpc_ep_ops *ops,
+ void *priv,
+ long timeout)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ ep = trpc_create(src, name, ops, priv);
+ if (IS_ERR(ep))
+ return ep;
+
+ ret = trpc_connect(ep, timeout);
+ if (ret) {
+ trpc_close(ep);
+ return ERR_PTR(ret);
+ }
+
+ return ep;
+}
+
+void trpc_close(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ bool need_close_op = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(!ep->ready);
+ ep->ready = false;
+ port->closed = true;
+ if (peer->ready) {
+ need_close_op = true;
+ /* the peer may be waiting for a message */
+ wake_up_all(&peer->msg_waitq);
+ if (peer->connect_done)
+ complete(peer->connect_done);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (need_close_op && peer->ops && peer->ops->close)
+ peer->ops->close(peer);
+ trpc_put(ep);
+}
+
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout)
+{
+ struct trpc_port *port = ep->port;
+ DECLARE_COMPLETION_ONSTACK(event);
+ int ret;
+ unsigned long flags;
+
+ if (timeout < 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else if (timeout > 0)
+ timeout = msecs_to_jiffies(timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (ep->connect_done) {
+ ret = -EBUSY;
+ goto done;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto done;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto done;
+ } else if (!timeout) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ ep->connect_done = &event;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = wait_for_completion_interruptible_timeout(&event, timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ep->connect_done = NULL;
+
+ if (is_connected(port)) {
+ ret = 0;
+ } else {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ }
+
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static inline int _ep_id(struct trpc_endpoint *ep)
+{
+ return ep - ep->port->peers;
+}
+
+static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+ struct trpc_msg *msg;
+ unsigned long flags;
+ int ret;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+ /* shouldn't be enqueueing to the endpoint */
+ BUG_ON(peer->ops && peer->ops->send);
+
+ DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__,
+ port->name, _ep_id(peer));
+
+ msg = kmem_cache_alloc(info->msg_cache, gfp_flags);
+ if (!msg) {
+ pr_err("%s: can't alloc memory for msg\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(msg->payload, buf, len);
+ msg->len = len;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_closed(port)) {
+ pr_err("%s: cannot send message for closed port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ECONNRESET;
+ goto err;
+ } else if (!is_connected(port)) {
+ pr_err("%s: cannot send message for unconnected port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ list_add_tail(&msg->list, &peer->msg_list);
+ if (peer->ops && peer->ops->notify_recv)
+ peer->ops->notify_recv(peer);
+ wake_up_all(&peer->msg_waitq);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&port->lock, flags);
+ kmem_cache_free(info->msg_cache, msg);
+ return ret;
+}
+
+/* Returns -ENOMEM if failed to allocate memory for the message. */
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+
+ DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n",
+ __func__, port->name, _ep_id(from), port->name, _ep_id(peer));
+
+ if (peer->ops && peer->ops->send) {
+ might_sleep();
+ return peer->ops->send(peer, buf, len);
+ } else {
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+ return queue_msg(src, from, buf, len, gfp_flags);
+ }
+}
+
+static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep)
+{
+ struct trpc_msg *msg = NULL;
+
+ if (!list_empty(&ep->msg_list)) {
+ msg = list_first_entry(&ep->msg_list, struct trpc_msg, list);
+ list_del_init(&msg->list);
+ }
+
+ return msg;
+}
+
+static bool __should_wake(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = !list_empty(&ep->msg_list) || is_closed(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t buf_len, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = ep->port;
+ struct trpc_msg *msg;
+ size_t len;
+ long ret;
+ unsigned long flags;
+
+ BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN);
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* we allow closed ports to finish receiving already-queued messages */
+ msg = dequeue_msg_locked(ep);
+ if (msg) {
+ goto got_msg;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto out;
+ } else if (!is_connected(port)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ if (timeout == 0) {
+ ret = 0;
+ goto out;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__,
+ port->name, _ep_id(ep));
+
+ ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep),
+ timeout);
+
+ DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name);
+ spin_lock_irqsave(&port->lock, flags);
+ msg = dequeue_msg_locked(ep);
+ if (!msg) {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ pr_err("%s: error (%d) while receiving msg for '%s'\n",
+ __func__, (int)ret, port->name);
+ goto out;
+ }
+
+got_msg:
+ spin_unlock_irqrestore(&port->lock, flags);
+ len = min(buf_len, msg->len);
+ memcpy(buf, msg->payload, len);
+ kmem_cache_free(info->msg_cache, msg);
+ return len;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_node_register(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ if (!info)
+ return -ENOMEM;
+
+ pr_info("%s: Adding '%s' to node list\n", __func__, node->name);
+
+ mutex_lock(&info->node_lock);
+ if (node->type == TRPC_NODE_LOCAL)
+ list_add(&node->list, &info->node_list);
+ else
+ list_add_tail(&node->list, &info->node_list);
+ mutex_unlock(&info->node_lock);
+ return 0;
+}
+
+void trpc_node_unregister(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ mutex_lock(&info->node_lock);
+ list_del(&node->list);
+ mutex_unlock(&info->node_lock);
+}
+
+static int trpc_debug_ports_show(struct seq_file *s, void *data)
+{
+ struct tegra_rpc_info *info = s->private;
+ struct rb_node *n;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ for (n = rb_first(&info->ports); n; n = rb_next(n)) {
+ struct trpc_port *port = rb_entry(n, struct trpc_port, rb_node);
+ seq_printf(s, "port: %s\n closed:%s\n", port->name,
+ port->closed ? "yes" : "no");
+
+ spin_lock(&port->lock);
+ for (i = 0; i < ARRAY_SIZE(port->peers); i++) {
+ struct trpc_endpoint *ep = &port->peers[i];
+ seq_printf(s, " peer%d: %s\n ready:%s\n", i,
+ ep->owner ? ep->owner->name: "<none>",
+ ep->ready ? "yes" : "no");
+ if (ep->ops && ep->ops->show)
+ ep->ops->show(s, ep);
+ }
+ spin_unlock(&port->lock);
+ }
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+
+ return 0;
+}
+
+static int trpc_debug_ports_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, trpc_debug_ports_show, inode->i_private);
+}
+
+static struct file_operations trpc_debug_ports_fops = {
+ .open = trpc_debug_ports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void trpc_debug_init(struct tegra_rpc_info *info)
+{
+ trpc_debug_root = debugfs_create_dir("tegra_rpc", NULL);
+ if (IS_ERR_OR_NULL(trpc_debug_root)) {
+ pr_err("%s: couldn't create debug files\n", __func__);
+ return;
+ }
+
+ debugfs_create_file("ports", 0664, trpc_debug_root, info,
+ &trpc_debug_ports_fops);
+}
+
+static int __init tegra_rpc_init(void)
+{
+ struct tegra_rpc_info *rpc_info;
+ int ret;
+
+ rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
+ if (!rpc_info) {
+ pr_err("%s: error allocating rpc_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ rpc_info->ports = RB_ROOT;
+ spin_lock_init(&rpc_info->ports_lock);
+ INIT_LIST_HEAD(&rpc_info->node_list);
+ mutex_init(&rpc_info->node_lock);
+
+ rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0);
+ if (!rpc_info->msg_cache) {
+ pr_err("%s: unable to create message cache\n", __func__);
+ ret = -ENOMEM;
+ goto err_kmem_cache;
+ }
+
+ trpc_debug_init(rpc_info);
+ tegra_rpc = rpc_info;
+
+ return 0;
+
+err_kmem_cache:
+ kfree(rpc_info);
+ return ret;
+}
+
+subsys_initcall(tegra_rpc_init);
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_H
+#define __ARM_MACH_TEGRA_RPC_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/tegra_rpc.h>
+
+struct trpc_endpoint;
+struct trpc_ep_ops {
+ /* send is allowed to sleep */
+ int (*send)(struct trpc_endpoint *ep, void *buf, size_t len);
+ /* notify_recv is NOT allowed to sleep */
+ void (*notify_recv)(struct trpc_endpoint *ep);
+ /* close is allowed to sleep */
+ void (*close)(struct trpc_endpoint *ep);
+ /* not allowed to sleep, not allowed to call back into trpc */
+ void (*show)(struct seq_file *s, struct trpc_endpoint *ep);
+};
+
+enum {
+ TRPC_NODE_LOCAL,
+ TRPC_NODE_REMOTE,
+};
+
+struct trpc_node {
+ struct list_head list;
+ const char *name;
+ int type;
+ void *priv;
+
+ int (*try_connect)(struct trpc_node *node,
+ struct trpc_node *src,
+ struct trpc_endpoint *from);
+};
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep);
+void *trpc_priv(struct trpc_endpoint *ep);
+const char *trpc_name(struct trpc_endpoint *ep);
+
+void trpc_put(struct trpc_endpoint *ep);
+void trpc_get(struct trpc_endpoint *ep);
+
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *ep, void *buf,
+ size_t len, gfp_t gfp_flags);
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t len, long timeout);
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv);
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, char *name,
+ struct trpc_ep_ops *ops, void *priv,
+ long timeout);
+int trpc_connect(struct trpc_endpoint *from, long timeout);
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv);
+void trpc_close(struct trpc_endpoint *ep);
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout);
+
+int trpc_node_register(struct trpc_node *node);
+void trpc_node_unregister(struct trpc_node *node);
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+#include "trpc_sema.h"
+
+struct rpc_info {
+ struct trpc_endpoint *rpc_ep;
+ struct file *sema_file;
+};
+
+/* ports names reserved for system functions, i.e. communicating with the
+ * AVP */
+static const char reserved_ports[][TEGRA_RPC_MAX_NAME_LEN] = {
+ "RPC_AVP_PORT",
+ "RPC_CPU_PORT",
+};
+static int num_reserved_ports = ARRAY_SIZE(reserved_ports);
+
+static void rpc_notify_recv(struct trpc_endpoint *ep);
+
+/* TODO: do we need to do anything when port is closed from the other side? */
+static struct trpc_ep_ops ep_ops = {
+ .notify_recv = rpc_notify_recv,
+};
+
+static struct trpc_node rpc_node = {
+ .name = "local",
+ .type = TRPC_NODE_LOCAL,
+};
+
+static void rpc_notify_recv(struct trpc_endpoint *ep)
+{
+ struct rpc_info *info = trpc_priv(ep);
+
+ if (WARN_ON(!info))
+ return;
+ if (info->sema_file)
+ trpc_sema_signal(info->sema_file);
+}
+
+static int local_rpc_open(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info;
+
+ info = kzalloc(sizeof(struct rpc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ file->private_data = info;
+ return 0;
+}
+
+static int local_rpc_release(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info = file->private_data;
+
+ if (info->rpc_ep)
+ trpc_close(info->rpc_ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ kfree(info);
+ file->private_data = NULL;
+ return 0;
+}
+
+static int __get_port_desc(struct tegra_rpc_port_desc *desc,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int size = _IOC_SIZE(cmd);
+
+ if (size != sizeof(struct tegra_rpc_port_desc))
+ return -EINVAL;
+ if (copy_from_user(desc, (void __user *)arg, sizeof(*desc)))
+ return -EFAULT;
+
+ desc->name[TEGRA_RPC_MAX_NAME_LEN - 1] = '\0';
+ return 0;
+}
+
+static char uniq_name[] = "aaaaaaaa+";
+static const int uniq_len = sizeof(uniq_name) - 1;
+static DEFINE_MUTEX(uniq_lock);
+
+static void _gen_port_name(char *new_name)
+{
+ int i;
+
+ mutex_lock(&uniq_lock);
+ for (i = 0; i < uniq_len - 1; i++) {
+ ++uniq_name[i];
+ if (uniq_name[i] != 'z')
+ break;
+ uniq_name[i] = 'a';
+ }
+ strlcpy(new_name, uniq_name, TEGRA_RPC_MAX_NAME_LEN);
+ mutex_unlock(&uniq_lock);
+}
+
+static int _validate_port_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < num_reserved_ports; i++)
+ if (!strncmp(name, reserved_ports[i], TEGRA_RPC_MAX_NAME_LEN))
+ return -EINVAL;
+ return 0;
+}
+
+static long local_rpc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rpc_info *info = file->private_data;
+ struct tegra_rpc_port_desc desc;
+ struct trpc_endpoint *ep;
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != TEGRA_RPC_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_RPC_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_RPC_IOCTL_MAX_NR) {
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ switch (cmd) {
+ case TEGRA_RPC_IOCTL_PORT_CREATE:
+ if (info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = __get_port_desc(&desc, cmd, arg);
+ if (ret)
+ goto err;
+ if (desc.name[0]) {
+ ret = _validate_port_name(desc.name);
+ if (ret)
+ goto err;
+ } else {
+ _gen_port_name(desc.name);
+ }
+ if (desc.notify_fd != -1) {
+ /* grab a reference to the trpc_sema fd */
+ info->sema_file = trpc_sema_get_from_fd(desc.notify_fd);
+ if (IS_ERR(info->sema_file)) {
+ ret = PTR_ERR(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ }
+ ep = trpc_create(&rpc_node, desc.name, &ep_ops, info);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ info->rpc_ep = ep;
+ break;
+ case TEGRA_RPC_IOCTL_PORT_GET_NAME:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_to_user((void __user *)arg,
+ trpc_name(info->rpc_ep),
+ TEGRA_RPC_MAX_NAME_LEN)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_CONNECT:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_connect(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: can't connect to '%s' (%d)\n", __func__,
+ trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_LISTEN:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_wait_peer(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: error waiting for peer for '%s' (%d)\n",
+ __func__, trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: unknown cmd %d\n", __func__, _IOC_NR(cmd));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("tegra_rpc: pid=%d ioctl=%x/%lx (%x) ret=%d\n",
+ current->pid, cmd, arg, _IOC_NR(cmd), ret);
+ return (long)ret;
+}
+
+static ssize_t local_rpc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+ else if (count > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ ret = trpc_send_msg(&rpc_node, info->rpc_ep, data, count,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t local_rpc_read(struct file *file, char __user *buf, size_t max,
+ loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ int ret;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+
+ if (max > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ ret = trpc_recv_msg(&rpc_node, info->rpc_ep, data,
+ TEGRA_RPC_MAX_MSG_LEN, 0);
+ if (ret == 0)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else if (ret > max)
+ return -ENOSPC;
+ else if (copy_to_user(buf, data, ret))
+ return -EFAULT;
+
+ return ret;
+}
+
+static const struct file_operations local_rpc_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = local_rpc_open,
+ .release = local_rpc_release,
+ .unlocked_ioctl = local_rpc_ioctl,
+ .write = local_rpc_write,
+ .read = local_rpc_read,
+};
+
+static struct miscdevice local_rpc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_rpc",
+ .fops = &local_rpc_misc_fops,
+};
+
+int __init rpc_local_init(void)
+{
+ int ret;
+
+ ret = trpc_sema_init();
+ if (ret) {
+ pr_err("%s: error in trpc_sema_init\n", __func__);
+ goto err_sema_init;
+ }
+
+ ret = misc_register(&local_rpc_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ goto err_misc;
+ }
+
+ ret = trpc_node_register(&rpc_node);
+ if (ret) {
+ pr_err("%s: can't register rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ return 0;
+
+err_node_reg:
+ misc_deregister(&local_rpc_misc_device);
+err_misc:
+err_sema_init:
+ return ret;
+}
+
+module_init(rpc_local_init);
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_sema.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc_sema.h"
+
+struct trpc_sema {
+ wait_queue_head_t wq;
+ spinlock_t lock;
+ int count;
+};
+
+static int rpc_sema_minor = -1;
+
+static inline bool is_trpc_sema_file(struct file *file)
+{
+ dev_t rdev = file->f_dentry->d_inode->i_rdev;
+
+ if (MAJOR(rdev) == MISC_MAJOR && MINOR(rdev) == rpc_sema_minor)
+ return true;
+ return false;
+}
+
+struct file *trpc_sema_get_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ pr_err("%s: fd %d is invalid\n", __func__, fd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!is_trpc_sema_file(file)) {
+ pr_err("%s: fd (%d) is not a trpc_sema file\n", __func__, fd);
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file;
+}
+
+int trpc_sema_signal(struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+ unsigned long flags;
+
+ if (!info)
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->count++;
+ wake_up_interruptible_all(&info->wq);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int trpc_sema_wait(struct trpc_sema *info, long *timeleft)
+{
+ unsigned long flags;
+ int ret = 0;
+ unsigned long endtime;
+ long timeout = *timeleft;
+
+ *timeleft = 0;
+ if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else if (timeout > 0) {
+ timeout = msecs_to_jiffies(timeout);
+ endtime = jiffies + timeout;
+ }
+
+again:
+ if (timeout)
+ ret = wait_event_interruptible_timeout(info->wq,
+ info->count > 0,
+ timeout);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->count > 0) {
+ info->count--;
+ ret = 0;
+ } else if (ret == 0 || timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ ret = -EINTR;
+ if (timeout != MAX_SCHEDULE_TIMEOUT &&
+ time_before(jiffies, endtime))
+ *timeleft = jiffies_to_msecs(endtime - jiffies);
+ else
+ *timeleft = 0;
+ } else {
+ /* we woke up but someone else got the semaphore and we have
+ * time left, try again */
+ timeout = ret;
+ spin_unlock_irqrestore(&info->lock, flags);
+ goto again;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ return ret;
+}
+
+static int trpc_sema_open(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info;
+
+ info = kzalloc(sizeof(struct trpc_sema), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ init_waitqueue_head(&info->wq);
+ spin_lock_init(&info->lock);
+ file->private_data = info;
+ return 0;
+}
+
+static int trpc_sema_release(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+
+ file->private_data = NULL;
+ kfree(info);
+ return 0;
+}
+
+static long trpc_sema_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct trpc_sema *info = file->private_data;
+ int ret;
+ long timeout;
+
+ if (_IOC_TYPE(cmd) != TEGRA_SEMA_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_SEMA_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_SEMA_IOCTL_MAX_NR)
+ return -ENOTTY;
+ else if (!info)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TEGRA_SEMA_IOCTL_WAIT:
+ if (copy_from_user(&timeout, (void __user *)arg, sizeof(long)))
+ return -EFAULT;
+ ret = trpc_sema_wait(info, &timeout);
+ if (ret != -EINTR)
+ break;
+ if (copy_to_user((void __user *)arg, &timeout, sizeof(long)))
+ ret = -EFAULT;
+ break;
+ case TEGRA_SEMA_IOCTL_SIGNAL:
+ ret = trpc_sema_signal(file);
+ break;
+ default:
+ pr_err("%s: Unknown tegra_sema ioctl 0x%x\n", __func__,
+ _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trpc_sema_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = trpc_sema_open,
+ .release = trpc_sema_release,
+ .unlocked_ioctl = trpc_sema_ioctl,
+};
+
+static struct miscdevice trpc_sema_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_sema",
+ .fops = &trpc_sema_misc_fops,
+};
+
+int __init trpc_sema_init(void)
+{
+ int ret;
+
+ if (rpc_sema_minor >= 0) {
+ pr_err("%s: trpc_sema already registered\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = misc_register(&trpc_sema_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ return ret;
+ }
+
+ rpc_sema_minor = trpc_sema_misc_device.minor;
+ pr_info("%s: registered misc dev %d:%d\n", __func__, MISC_MAJOR,
+ rpc_sema_minor);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_SEMA_H
+#define __ARM_MACH_TEGRA_RPC_SEMA_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+struct file *trpc_sema_get_from_fd(int fd);
+int trpc_sema_signal(struct file *file);
+int __init trpc_sema_init(void);
+
+#endif
--- /dev/null
+/*
+ * drivers/media/video/tegra/tegra_camera.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <mach/iomap.h>
+#include <mach/clk.h>
+
+#include <media/tegra_camera.h>
+
+/* Eventually this should handle all clock and reset calls for the isp, vi,
+ * vi_sensor, and csi modules, replacing nvrm and nvos completely for camera
+ */
+#define TEGRA_CAMERA_NAME "tegra_camera"
+DEFINE_MUTEX(tegra_camera_lock);
+
+struct tegra_camera_block {
+ int (*enable) (void);
+ int (*disable) (void);
+ bool is_enabled;
+};
+
+
+static struct clk *isp_clk;
+static struct clk *vi_clk;
+static struct clk *vi_sensor_clk;
+static struct clk *csus_clk;
+static struct clk *csi_clk;
+static struct regulator *tegra_camera_regulator_csi;
+
+static int tegra_camera_enable_isp(void)
+{
+ return clk_enable(isp_clk);
+}
+
+static int tegra_camera_disable_isp(void)
+{
+ clk_disable(isp_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_vi(void)
+{
+ clk_enable(vi_clk);
+ clk_enable(vi_sensor_clk);
+ clk_enable(csus_clk);
+ return 0;
+}
+
+static int tegra_camera_disable_vi(void)
+{
+ clk_disable(vi_clk);
+ clk_disable(vi_sensor_clk);
+ clk_disable(csus_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_csi(void)
+{
+ int ret;
+
+ ret = regulator_enable(tegra_camera_regulator_csi);
+ if (ret)
+ return ret;
+ clk_enable(csi_clk);
+ return 0;
+}
+
+static int tegra_camera_disable_csi(void)
+{
+ int ret;
+
+ ret = regulator_disable(tegra_camera_regulator_csi);
+ if (ret)
+ return ret;
+ clk_disable(csi_clk);
+ return 0;
+}
+
+struct tegra_camera_block tegra_camera_block[] = {
+ [TEGRA_CAMERA_MODULE_ISP] = {tegra_camera_enable_isp,
+ tegra_camera_disable_isp, false},
+ [TEGRA_CAMERA_MODULE_VI] = {tegra_camera_enable_vi,
+ tegra_camera_disable_vi, false},
+ [TEGRA_CAMERA_MODULE_CSI] = {tegra_camera_enable_csi,
+ tegra_camera_disable_csi, false},
+};
+
+#define TEGRA_CAMERA_VI_CLK_SEL_INTERNAL 0
+#define TEGRA_CAMERA_VI_CLK_SEL_EXTERNAL (1<<24)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_VI_SENSOR_CLK (1<<25)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_PD2VI_CLK 0
+
+static int tegra_camera_clk_set_rate(struct tegra_camera_clk_info *info)
+{
+ u32 offset;
+ struct clk *clk;
+
+ if (info->id != TEGRA_CAMERA_MODULE_VI) {
+ pr_err("%s: Set rate only aplies to vi module %d\n", __func__,
+ info->id);
+ return -EINVAL;
+ }
+
+ switch (info->clk_id) {
+ case TEGRA_CAMERA_VI_CLK:
+ clk = vi_clk;
+ offset = 0x148;
+ break;
+ case TEGRA_CAMERA_VI_SENSOR_CLK:
+ clk = vi_sensor_clk;
+ offset = 0x1a8;
+ break;
+ default:
+ pr_err("%s: invalid clk id for set rate %d\n", __func__,
+ info->clk_id);
+ return -EINVAL;
+ }
+
+ clk_set_rate(clk, info->rate);
+
+ if (info->clk_id == TEGRA_CAMERA_VI_CLK) {
+ u32 val;
+ void __iomem *car = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+ void __iomem *apb_misc = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+ writel(0x2, car + offset);
+
+ val = readl(apb_misc + 0x42c);
+ writel(val | 0x1, apb_misc + 0x42c);
+ }
+
+ info->rate = clk_get_rate(clk);
+ return 0;
+
+}
+static int tegra_camera_reset(uint id)
+{
+ struct clk *clk;
+
+ switch (id) {
+ case TEGRA_CAMERA_MODULE_VI:
+ clk = vi_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_ISP:
+ clk = isp_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_CSI:
+ clk = csi_clk;
+ break;
+ default:
+ return -EINVAL;
+ }
+ tegra_periph_reset_assert(clk);
+ udelay(10);
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+}
+
+static long tegra_camera_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ uint id;
+
+ /* first element of arg must be u32 with id of module to talk to */
+ if (copy_from_user(&id, (const void __user *)arg, sizeof(uint))) {
+ pr_err("%s: Failed to copy arg from user", __func__);
+ return -EFAULT;
+ }
+
+ if (id >= ARRAY_SIZE(tegra_camera_block)) {
+ pr_err("%s: Invalid id to tegra isp ioctl%d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case TEGRA_CAMERA_IOCTL_ENABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&tegra_camera_lock);
+ if (!tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].enable();
+ tegra_camera_block[id].is_enabled = true;
+ }
+ mutex_unlock(&tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_DISABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&tegra_camera_lock);
+ if (tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].disable();
+ tegra_camera_block[id].is_enabled = false;
+ }
+ mutex_unlock(&tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_CLK_SET_RATE:
+ {
+ struct tegra_camera_clk_info info;
+ int ret;
+
+ if (copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct tegra_camera_clk_info))) {
+ pr_err("%s: Failed to copy arg from user\n", __func__);
+ return -EFAULT;
+ }
+ ret = tegra_camera_clk_set_rate(&info);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &info,
+ sizeof(struct tegra_camera_clk_info))) {
+ pr_err("%s: Failed to copy arg to user\n", __func__);
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case TEGRA_CAMERA_IOCTL_RESET:
+ return tegra_camera_reset(id);
+ default:
+ pr_err("%s: Unknown tegra_camera ioctl.\n", TEGRA_CAMERA_NAME);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tegra_camera_release(struct inode *inode, struct file *file)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_camera_block); i++)
+ if (tegra_camera_block[i].is_enabled) {
+ tegra_camera_block[i].disable();
+ tegra_camera_block[i].is_enabled = false;
+ }
+
+ return 0;
+}
+
+static const struct file_operations tegra_camera_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tegra_camera_ioctl,
+ .release = tegra_camera_release,
+};
+
+static struct miscdevice tegra_camera_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = TEGRA_CAMERA_NAME,
+ .fops = &tegra_camera_fops,
+};
+
+static int tegra_camera_clk_get(struct platform_device *pdev, const char *name,
+ struct clk **clk)
+{
+ *clk = clk_get(&pdev->dev, name);
+ if (IS_ERR_OR_NULL(*clk)) {
+ pr_err("%s: unable to get clock for %s\n", __func__, name);
+ *clk = NULL;
+ return PTR_ERR(*clk);
+ }
+ return 0;
+}
+
+static int tegra_camera_probe(struct platform_device *pdev)
+{
+ int err;
+
+ pr_info("%s: probe\n", TEGRA_CAMERA_NAME);
+ tegra_camera_regulator_csi = regulator_get(&pdev->dev, "vcsi");
+ if (IS_ERR_OR_NULL(tegra_camera_regulator_csi)) {
+ pr_err("%s: Couldn't get regulator vcsi\n", TEGRA_CAMERA_NAME);
+ return PTR_ERR(tegra_camera_regulator_csi);
+ }
+
+ err = misc_register(&tegra_camera_device);
+ if (err) {
+ pr_err("%s: Unable to register misc device!\n",
+ TEGRA_CAMERA_NAME);
+ goto misc_register_err;
+ }
+
+ err = tegra_camera_clk_get(pdev, "isp", &isp_clk);
+ if (err)
+ goto misc_register_err;
+ err = tegra_camera_clk_get(pdev, "vi", &vi_clk);
+ if (err)
+ goto vi_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "vi_sensor", &vi_sensor_clk);
+ if (err)
+ goto vi_sensor_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csus", &csus_clk);
+ if (err)
+ goto csus_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csi", &csi_clk);
+ if (err)
+ goto csi_clk_get_err;
+
+ return 0;
+
+csi_clk_get_err:
+ clk_put(csus_clk);
+csus_clk_get_err:
+ clk_put(vi_sensor_clk);
+vi_sensor_clk_get_err:
+ clk_put(vi_clk);
+vi_clk_get_err:
+ clk_put(isp_clk);
+misc_register_err:
+ regulator_put(tegra_camera_regulator_csi);
+ return err;
+}
+
+static int tegra_camera_remove(struct platform_device *pdev)
+{
+ clk_put(isp_clk);
+ clk_put(vi_clk);
+ clk_put(vi_sensor_clk);
+ clk_put(csus_clk);
+ clk_put(csi_clk);
+
+ regulator_put(tegra_camera_regulator_csi);
+ misc_deregister(&tegra_camera_device);
+ return 0;
+}
+
+static struct platform_driver tegra_camera_driver = {
+ .probe = tegra_camera_probe,
+ .remove = tegra_camera_remove,
+ .driver = { .name = TEGRA_CAMERA_NAME }
+};
+
+static int __init tegra_camera_init(void)
+{
+ return platform_driver_register(&tegra_camera_driver);
+}
+
+static void __exit tegra_camera_exit(void)
+{
+ platform_driver_unregister(&tegra_camera_driver);
+}
+
+module_init(tegra_camera_init);
+module_exit(tegra_camera_exit);
+
This driver is necessary for jz4740-battery and jz4740-hwmon driver.
config MFD_TPS6586X
- tristate "TPS6586x Power Management chips"
- depends on I2C && GPIOLIB
+ bool "TPS6586x Power Management chips"
+ depends on I2C && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
help
If you say yes here you get support for the TPS6586X series of
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#define TPS6586X_GPIOSET1 0x5d
#define TPS6586X_GPIOSET2 0x5e
+/* interrupt control registers */
+#define TPS6586X_INT_ACK1 0xb5
+#define TPS6586X_INT_ACK2 0xb6
+#define TPS6586X_INT_ACK3 0xb7
+#define TPS6586X_INT_ACK4 0xb8
+
+/* interrupt mask registers */
+#define TPS6586X_INT_MASK1 0xb0
+#define TPS6586X_INT_MASK2 0xb1
+#define TPS6586X_INT_MASK3 0xb2
+#define TPS6586X_INT_MASK4 0xb3
+#define TPS6586X_INT_MASK5 0xb4
+
/* device id */
#define TPS6586X_VERSIONCRC 0xcd
-#define TPS658621A_VERSIONCRC 0x15
+
+struct tps6586x_irq_data {
+ u8 mask_reg;
+ u8 mask_mask;
+};
+
+#define TPS6586X_IRQ(_reg, _mask) \
+ { \
+ .mask_reg = (_reg) - TPS6586X_INT_MASK1, \
+ .mask_mask = (_mask), \
+ }
+
+static const struct tps6586x_irq_data tps6586x_irqs[] = {
+ [TPS6586X_INT_PLDO_0] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 0),
+ [TPS6586X_INT_PLDO_1] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 1),
+ [TPS6586X_INT_PLDO_2] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 2),
+ [TPS6586X_INT_PLDO_3] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 3),
+ [TPS6586X_INT_PLDO_4] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 4),
+ [TPS6586X_INT_PLDO_5] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 5),
+ [TPS6586X_INT_PLDO_6] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 6),
+ [TPS6586X_INT_PLDO_7] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 7),
+ [TPS6586X_INT_COMP_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 0),
+ [TPS6586X_INT_ADC] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 1),
+ [TPS6586X_INT_PLDO_8] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 2),
+ [TPS6586X_INT_PLDO_9] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 3),
+ [TPS6586X_INT_PSM_0] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 4),
+ [TPS6586X_INT_PSM_1] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 5),
+ [TPS6586X_INT_PSM_2] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 6),
+ [TPS6586X_INT_PSM_3] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 7),
+ [TPS6586X_INT_RTC_ALM1] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 4),
+ [TPS6586X_INT_ACUSB_OVP] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 0x03),
+ [TPS6586X_INT_USB_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 2),
+ [TPS6586X_INT_AC_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 3),
+ [TPS6586X_INT_BAT_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 1 << 0),
+ [TPS6586X_INT_CHG_STAT] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 0xfc),
+ [TPS6586X_INT_CHG_TEMP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0x06),
+ [TPS6586X_INT_PP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0xf0),
+ [TPS6586X_INT_RESUME] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 5),
+ [TPS6586X_INT_LOW_SYS] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 6),
+ [TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
+};
struct tps6586x {
struct mutex lock;
struct i2c_client *client;
struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ int irq_base;
+ u32 irq_en;
+ u8 mask_cache[5];
+ u8 mask_reg[5];
};
static inline int __tps6586x_read(struct i2c_client *client,
return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
}
+static void tps6586x_irq_lock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ mutex_lock(&tps6586x->irq_lock);
+}
+
+static void tps6586x_irq_enable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
+ tps6586x->irq_en |= (1 << __irq);
+}
+
+static void tps6586x_irq_disable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
+ tps6586x->irq_en &= ~(1 << __irq);
+}
+
+static void tps6586x_irq_sync_unlock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
+ if (tps6586x->mask_reg[i] != tps6586x->mask_cache[i]) {
+ if (!WARN_ON(tps6586x_write(tps6586x->dev,
+ TPS6586X_INT_MASK1 + i,
+ tps6586x->mask_reg[i])))
+ tps6586x->mask_cache[i] = tps6586x->mask_reg[i];
+ }
+ }
+
+ mutex_unlock(&tps6586x->irq_lock);
+}
+
+static irqreturn_t tps6586x_irq(int irq, void *data)
+{
+ struct tps6586x *tps6586x = data;
+ u32 acks;
+ int ret = 0;
+
+ ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1,
+ sizeof(acks), (uint8_t *)&acks);
+
+ if (ret < 0) {
+ dev_err(tps6586x->dev, "failed to read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ acks = le32_to_cpu(acks);
+
+ while (acks) {
+ int i = __ffs(acks);
+
+ if (tps6586x->irq_en & (1 << i))
+ handle_nested_irq(tps6586x->irq_base + i);
+
+ acks &= ~(1 << i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
+ int irq_base)
+{
+ int i, ret;
+ u8 tmp[4];
+
+ if (!irq_base) {
+ dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&tps6586x->irq_lock);
+ for (i = 0; i < 5; i++) {
+ tps6586x->mask_cache[i] = 0xff;
+ tps6586x->mask_reg[i] = 0xff;
+ tps6586x_write(tps6586x->dev, TPS6586X_INT_MASK1 + i, 0xff);
+ }
+
+ tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
+
+ tps6586x->irq_base = irq_base;
+
+ tps6586x->irq_chip.name = "tps6586x";
+ tps6586x->irq_chip.enable = tps6586x_irq_enable;
+ tps6586x->irq_chip.disable = tps6586x_irq_disable;
+ tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
+ tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
+ int __irq = i + tps6586x->irq_base;
+ set_irq_chip_data(__irq, tps6586x);
+ set_irq_chip_and_handler(__irq, &tps6586x->irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
+ "tps6586x", tps6586x);
+
+ if (!ret) {
+ device_init_wakeup(tps6586x->dev, 1);
+ enable_irq_wake(irq);
+ }
+
+ return ret;
+}
+
static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
struct tps6586x_platform_data *pdata)
{
return -EIO;
}
- if (ret != TPS658621A_VERSIONCRC) {
- dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
- return -ENODEV;
- }
+ dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
if (tps6586x == NULL)
mutex_init(&tps6586x->lock);
+ if (client->irq) {
+ ret = tps6586x_irq_init(tps6586x, client->irq,
+ pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto err_irq_init;
+ }
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
return 0;
err_add_devs:
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+err_irq_init:
kfree(tps6586x);
return ret;
}
static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
{
+ struct tps6586x *tps6586x = i2c_get_clientdata(client);
+
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+
return 0;
}
dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret);
goto err;
}
- if (ret != 0x6204) {
+ switch (ret) {
+ case 0x6204:
+ case 0x6246:
+ break;
+ default:
dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret);
ret = -EINVAL;
goto err;
case WM8321:
ret = mfd_add_devices(wm831x->dev, -1,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, 0);
+ NULL, wm831x->irq_base);
break;
default:
If you say yes here you get support for Asahi Kasei's
orientation sensor AK8975.
+config SENSORS_NCT1008
+ tristate "ON Semiconductor Temperature Sensor"
+ default n
+ depends on I2C
+ help
+ Say yes here if you wish to include the ON Semiconductor
+ NCT1008 Temperature sensor.
+
config EP93XX_PWM
tristate "EP93xx PWM support"
depends on ARCH_EP93XX
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_APANIC) += apanic.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+obj-$(CONFIG_SENSORS_NCT1008) += nct1008.o
static int write16(void *client, u8 reg, u8 val)
{
u8 data[2] = {reg, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 2);
}
static int write24(void *client, u8 reg, u16 val)
{
u8 data[3] = {reg, val >> 8, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 3);
}
static int read8(void *client)
--- /dev/null
+/*
+ * drivers/misc/nct1008.c
+ *
+ * Driver for NCT1008, temperature monitoring device from ON Semiconductors
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include <linux/nct1008.h>
+
+#define DRIVER_NAME "nct1008"
+
+/* Register Addresses */
+#define LOCAL_TEMP_RD 0x00
+#define STATUS_RD 0x02
+#define CONFIG_RD 0x03
+
+#define CONFIG_WR 0x09
+#define CONV_RATE_WR 0x0A
+#define LOCAL_TEMP_HI_LIMIT_WR 0x0B
+#define EXT_TEMP_HI_LIMIT_HI_BYTE 0x0D
+#define OFFSET_WR 0x11
+#define EXT_THERM_LIMIT_WR 0x19
+#define LOCAL_THERM_LIMIT_WR 0x20
+#define THERM_HYSTERESIS_WR 0x21
+
+/* Configuration Register Bits */
+#define EXTENDED_RANGE_BIT (0x1 << 2)
+#define THERM2_BIT (0x1 << 5)
+#define STANDBY_BIT (0x1 << 6)
+
+/* Max Temperature Measurements */
+#define EXTENDED_RANGE_OFFSET 64U
+#define STANDARD_RANGE_MAX 127U
+#define EXTENDED_RANGE_MAX (150U + EXTENDED_RANGE_OFFSET)
+
+struct nct1008_data {
+ struct work_struct work;
+ struct i2c_client *client;
+ struct mutex mutex;
+ u8 config;
+ void (*alarm_fn)(bool raised);
+};
+
+static void nct1008_enable(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+
+ i2c_smbus_write_byte_data(client, CONFIG_WR,
+ data->config & ~STANDBY_BIT);
+}
+
+static void nct1008_disable(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+
+ i2c_smbus_write_byte_data(client, CONFIG_WR,
+ data->config | STANDBY_BIT);
+}
+
+
+static void nct1008_work_func(struct work_struct *work)
+{
+ struct nct1008_data *data = container_of(work, struct nct1008_data, work);
+ int irq = data->client->irq;
+
+ mutex_lock(&data->mutex);
+
+ if (data->alarm_fn) {
+ /* Therm2 line is active low */
+ data->alarm_fn(!gpio_get_value(irq_to_gpio(irq)));
+ }
+
+ mutex_unlock(&data->mutex);
+}
+
+static irqreturn_t nct1008_irq(int irq, void *dev_id)
+{
+ struct nct1008_data *data = dev_id;
+ schedule_work(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static inline u8 value_to_temperature(bool extended, u8 value)
+{
+ return (extended ? (u8)(value - EXTENDED_RANGE_OFFSET) : value);
+}
+
+static inline u8 temperature_to_value(bool extended, u8 temp)
+{
+ return (extended ? (u8)(temp + EXTENDED_RANGE_OFFSET) : temp);
+}
+
+static int __devinit nct1008_configure_sensor(struct nct1008_data* data)
+{
+ struct i2c_client *client = data->client;
+ struct nct1008_platform_data *pdata = client->dev.platform_data;
+ u8 value;
+ int err;
+
+ if (!pdata || !pdata->supported_hwrev)
+ return -ENODEV;
+
+ /*
+ * Initial Configuration - device is placed in standby and
+ * ALERT/THERM2 pin is configured as THERM2
+ */
+ data->config = value = pdata->ext_range ?
+ (STANDBY_BIT | THERM2_BIT | EXTENDED_RANGE_BIT) :
+ (STANDBY_BIT | THERM2_BIT);
+
+ err = i2c_smbus_write_byte_data(client, CONFIG_WR, value);
+ if (err < 0)
+ goto error;
+
+ /* Temperature conversion rate */
+ err = i2c_smbus_write_byte_data(client, CONV_RATE_WR, pdata->conv_rate);
+ if (err < 0)
+ goto error;
+
+ /* External temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, pdata->shutdown_ext_limit);
+ err = i2c_smbus_write_byte_data(client, EXT_THERM_LIMIT_WR, value);
+ if (err < 0)
+ goto error;
+
+ /* Local temperature h/w shutdown limit */
+ value = temperature_to_value(pdata->ext_range, pdata->shutdown_local_limit);
+ err = i2c_smbus_write_byte_data(client, LOCAL_THERM_LIMIT_WR, value);
+ if (err < 0)
+ goto error;
+
+ /* External Temperature Throttling limit */
+ value = temperature_to_value(pdata->ext_range, pdata->throttling_ext_limit);
+ err = i2c_smbus_write_byte_data(client, EXT_TEMP_HI_LIMIT_HI_BYTE, value);
+ if (err < 0)
+ goto error;
+
+ /* Local Temperature Throttling limit */
+ value = pdata->ext_range ? EXTENDED_RANGE_MAX : STANDARD_RANGE_MAX;
+ err = i2c_smbus_write_byte_data(client, LOCAL_TEMP_HI_LIMIT_WR, value);
+ if (err < 0)
+ goto error;
+
+ /* Remote channel offset */
+ err = i2c_smbus_write_byte_data(client, OFFSET_WR, pdata->offset);
+ if (err < 0)
+ goto error;
+
+ /* THERM hysteresis */
+ err = i2c_smbus_write_byte_data(client, THERM_HYSTERESIS_WR, pdata->hysteresis);
+ if (err < 0)
+ goto error;
+
+ data->alarm_fn = pdata->alarm_fn;
+ return 0;
+error:
+ return err;
+}
+
+static int __devinit nct1008_configure_irq(struct nct1008_data *data)
+{
+ INIT_WORK(&data->work, nct1008_work_func);
+
+ return request_irq(data->client->irq, nct1008_irq, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, DRIVER_NAME, data);
+}
+
+static int __devinit nct1008_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct nct1008_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct nct1008_data), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->mutex);
+
+ err = nct1008_configure_sensor(data); /* sensor is in standby */
+ if (err < 0)
+ goto error;
+
+ err = nct1008_configure_irq(data);
+ if (err < 0)
+ goto error;
+
+ nct1008_enable(client); /* sensor is running */
+
+ schedule_work(&data->work); /* check initial state */
+
+ return 0;
+
+error:
+ kfree(data);
+ return err;
+}
+
+static int __devexit nct1008_remove(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+
+ free_irq(data->client->irq, data);
+ cancel_work_sync(&data->work);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int nct1008_suspend(struct i2c_client *client, pm_message_t state)
+{
+ disable_irq(client->irq);
+ nct1008_disable(client);
+
+ return 0;
+}
+
+static int nct1008_resume(struct i2c_client *client)
+{
+ struct nct1008_data *data = i2c_get_clientdata(client);
+
+ nct1008_enable(client);
+ enable_irq(client->irq);
+ schedule_work(&data->work);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id nct1008_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct1008_id);
+
+static struct i2c_driver nct1008_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = nct1008_probe,
+ .remove = __devexit_p(nct1008_remove),
+ .id_table = nct1008_id,
+#ifdef CONFIG_PM
+ .suspend = nct1008_suspend,
+ .resume = nct1008_resume,
+#endif
+};
+
+static int __init nct1008_init(void)
+{
+ return i2c_add_driver(&nct1008_driver);
+}
+
+static void __exit nct1008_exit(void)
+{
+ i2c_del_driver(&nct1008_driver);
+}
+
+MODULE_DESCRIPTION("Temperature sensor driver for OnSemi NCT1008");
+MODULE_LICENSE("GPL");
+
+module_init (nct1008_init);
+module_exit (nct1008_exit);
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
- max_regions = 64;
region_size = xp_region_size;
- switch (region_size) {
- case 128:
- max_regions *= 2;
- case 64:
- max_regions *= 2;
- case 32:
- max_regions *= 2;
- region_size = 16;
- DBUG_ON(!is_shub2());
+ if (is_uv())
+ max_regions = 256;
+ else {
+ max_regions = 64;
+
+ switch (region_size) {
+ case 128:
+ max_regions *= 2;
+ case 64:
+ max_regions *= 2;
+ case 32:
+ max_regions *= 2;
+ region_size = 16;
+ DBUG_ON(!is_shub2());
+ }
}
for (region = 0; region < max_regions; region++) {
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work(&host->detect);
+ cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
if (mmc_bus_manual_resume(host)) {
int ret;
u8 speed;
- if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
- return 0;
+ if (!(card->host->caps & MMC_CAP_FORCE_HS)) {
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
- if (!card->cccr.high_speed)
- return 0;
+ if (!card->cccr.high_speed)
+ return 0;
+ }
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
This provides support for the SD/MMC cell found in TC6393XB,
T7L66XB and also HTC ASIC3
+config MMC_SDHCI_TEGRA
+ tristate "Tegra SD/MMC Controller Support"
+ depends on ARCH_TEGRA && MMC_SDHCI
+ help
+ This selects the Tegra SD/MMC controller.
+
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
+obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
#include <linux/highmem.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sdio.h>
#include <asm/io.h>
#include <asm/irq.h>
else if (data->flags & MMC_DATA_WRITE)
cmdr |= AT91_MCI_TRCMD_START;
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= AT91_MCI_TRTYP_STREAM;
- if (data->blocks > 1)
- cmdr |= AT91_MCI_TRTYP_MULTIPLE;
+ if (cmd->opcode == SD_IO_RW_EXTENDED) {
+ cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
+ } else {
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= AT91_MCI_TRTYP_STREAM;
+ if (data->blocks > 1)
+ cmdr |= AT91_MCI_TRTYP_MULTIPLE;
+ }
}
else {
block_length = 0;
#include <linux/stat.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sdio.h>
#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
data = cmd->data;
if (data) {
cmdr |= MCI_CMDR_START_XFER;
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= MCI_CMDR_STREAM;
- else if (data->blocks > 1)
- cmdr |= MCI_CMDR_MULTI_BLOCK;
- else
- cmdr |= MCI_CMDR_BLOCK;
+
+ if (cmd->opcode == SD_IO_RW_EXTENDED) {
+ cmdr |= MCI_CMDR_SDIO_BLOCK;
+ } else {
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= MCI_CMDR_STREAM;
+ else if (data->blocks > 1)
+ cmdr |= MCI_CMDR_MULTI_BLOCK;
+ else
+ cmdr |= MCI_CMDR_BLOCK;
+ }
if (data->flags & MMC_DATA_READ)
cmdr |= MCI_CMDR_TRDIR_READ;
--- /dev/null
+/*
+ * drivers/mmc/host/sdhci-tegra.c
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+
+#define DRIVER_NAME "sdhci-tegra"
+
+#define SDHCI_VENDOR_CLOCK_CNTRL 0x100
+
+struct tegra_sdhci_host {
+ struct sdhci_host *sdhci;
+ struct clk *clk;
+ int clk_enabled;
+};
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ sdhci_card_detect_callback(sdhost);
+ return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_enable_dma(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static void tegra_sdhci_enable_clock(struct tegra_sdhci_host *host, int enable)
+{
+ if (enable && !host->clk_enabled) {
+ clk_enable(host->clk);
+ sdhci_writeb(host->sdhci, 1, SDHCI_VENDOR_CLOCK_CNTRL);
+ host->clk_enabled = 1;
+ } else if (!enable && host->clk_enabled) {
+ sdhci_writeb(host->sdhci, 0, SDHCI_VENDOR_CLOCK_CNTRL);
+ clk_disable(host->clk);
+ host->clk_enabled = 0;
+ }
+}
+
+static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
+{
+ struct tegra_sdhci_host *host = sdhci_priv(sdhci);
+ pr_debug("tegra sdhci clock %s %u enabled=%d\n",
+ mmc_hostname(sdhci->mmc), clock, host->clk_enabled);
+
+ tegra_sdhci_enable_clock(host, clock);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+ .enable_dma = tegra_sdhci_enable_dma,
+ .set_clock = tegra_sdhci_set_clock,
+};
+
+static int __devinit tegra_sdhci_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct tegra_sdhci_platform_data *plat;
+ struct sdhci_host *sdhci;
+ struct tegra_sdhci_host *host;
+ struct resource *res;
+ int irq;
+ void __iomem *ioaddr;
+
+ plat = pdev->dev.platform_data;
+ if (plat == NULL)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ ioaddr = ioremap(res->start, res->end - res->start);
+
+ sdhci = sdhci_alloc_host(&pdev->dev, sizeof(struct tegra_sdhci_host));
+ if (IS_ERR(sdhci)) {
+ rc = PTR_ERR(sdhci);
+ goto err_unmap;
+ }
+
+ host = sdhci_priv(sdhci);
+ host->sdhci = sdhci;
+
+ host->clk = clk_get(&pdev->dev, plat->clk_id);
+ if (IS_ERR(host->clk)) {
+ rc = PTR_ERR(host->clk);
+ goto err_free_host;
+ }
+
+ rc = clk_enable(host->clk);
+ if (rc != 0)
+ goto err_clkput;
+
+ host->clk_enabled = 1;
+ sdhci->hw_name = "tegra";
+ sdhci->ops = &tegra_sdhci_ops;
+ sdhci->irq = irq;
+ sdhci->ioaddr = ioaddr;
+ sdhci->version = SDHCI_SPEC_200;
+ sdhci->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP |
+ SDHCI_QUIRK_BROKEN_WRITE_PROTECT |
+ SDHCI_QUIRK_BROKEN_CTRL_HISPD |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_8_BIT_DATA |
+ SDHCI_QUIRK_NO_VERSION_REG |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_NO_SDIO_IRQ;
+
+ if (plat->force_hs != 0)
+ sdhci->quirks |= SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE;
+
+ if (plat->rt_disable != 0)
+ sdhci->quirks |= SDHCI_QUIRK_RUNTIME_DISABLE;
+
+ rc = sdhci_add_host(sdhci);
+ if (rc)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, host);
+
+ if (plat->cd_gpio != -1) {
+ rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(sdhci->mmc), sdhci);
+
+ if (rc)
+ goto err_remove_host;
+ }
+
+ if (plat->board_probe)
+ plat->board_probe(pdev->id, sdhci->mmc);
+
+ printk(KERN_INFO "sdhci%d: initialized irq %d ioaddr %p\n", pdev->id,
+ sdhci->irq, sdhci->ioaddr);
+
+ return 0;
+
+err_remove_host:
+ sdhci_remove_host(sdhci, 1);
+err_clk_disable:
+ clk_disable(host->clk);
+err_clkput:
+ clk_put(host->clk);
+err_free_host:
+ if (sdhci)
+ sdhci_free_host(sdhci);
+err_unmap:
+ iounmap(sdhci->ioaddr);
+
+ return rc;
+}
+
+static int tegra_sdhci_remove(struct platform_device *pdev)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ if (host) {
+ struct tegra_sdhci_platform_data *plat;
+ plat = pdev->dev.platform_data;
+ if (plat && plat->board_probe)
+ plat->board_probe(pdev->id, host->sdhci->mmc);
+
+ sdhci_remove_host(host->sdhci, 0);
+ sdhci_free_host(host->sdhci);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_sdhci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = sdhci_suspend_host(host->sdhci, state);
+ if (ret)
+ pr_err("%s: failed, error = %d\n", __func__, ret);
+
+ tegra_sdhci_enable_clock(host, 0);
+ return ret;
+}
+
+static int tegra_sdhci_resume(struct platform_device *pdev)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ tegra_sdhci_enable_clock(host, 1);
+ ret = sdhci_resume_host(host->sdhci);
+ if (ret)
+ pr_err("%s: failed, error = %d\n", __func__, ret);
+
+ return ret;
+}
+#else
+#define tegra_sdhci_suspend NULL
+#define tegra_sdhci_resume NULL
+#endif
+
+static struct platform_driver tegra_sdhci_driver = {
+ .probe = tegra_sdhci_probe,
+ .remove = tegra_sdhci_remove,
+ .suspend = tegra_sdhci_suspend,
+ .resume = tegra_sdhci_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_sdhci_init(void)
+{
+ return platform_driver_register(&tegra_sdhci_driver);
+}
+
+static void __exit tegra_sdhci_exit(void)
+{
+ platform_driver_unregister(&tegra_sdhci_driver);
+}
+
+module_init(tegra_sdhci_init);
+module_exit(tegra_sdhci_exit);
+
+MODULE_DESCRIPTION("Tegra SDHCI controller driver");
+MODULE_LICENSE("GPL");
#include <linux/leds.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
out:
+
host->clock = clock;
}
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
- else
- ctrl &= ~SDHCI_CTRL_4BITBUS;
if (ios->timing == MMC_TIMING_SD_HS &&
!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
present = 0;
- else
+ } else if (!(host->quirks & SDHCI_QUIRK_BROKEN_WRITE_PROTECT)) {
present = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ present = !(present & SDHCI_WRITE_PROTECT);
+ } else if (host->ops->get_ro) {
+ present = host->ops->get_ro(host);
+ } else {
+ present = 0;
+ }
spin_unlock_irqrestore(&host->lock, flags);
if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
return !!(present & SDHCI_WRITE_PROTECT);
- return !(present & SDHCI_WRITE_PROTECT);
+ return present;
}
static int sdhci_enable(struct mmc_host *mmc)
sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
else
sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
+
+ if (host->quirks & SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP) {
+ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ if (enable)
+ gap_ctrl |= 0x8;
+ else
+ gap_ctrl &= ~0x8;
+ writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ }
+
out:
mmiowb();
.enable_sdio_irq = sdhci_enable_sdio_irq,
};
-/*****************************************************************************\
- * *
- * Tasklets *
- * *
-\*****************************************************************************/
-
-static void sdhci_tasklet_card(unsigned long param)
+void sdhci_card_detect_callback(struct sdhci_host *host)
{
- struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)param;
-
spin_lock_irqsave(&host->lock, flags);
if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
+EXPORT_SYMBOL_GPL(sdhci_card_detect_callback);
+
+/*****************************************************************************\
+ * *
+ * Tasklets *
+ * *
+\*****************************************************************************/
+
+static void sdhci_tasklet_card(unsigned long param)
+{
+ struct sdhci_host *host;
+
+ host = (struct sdhci_host *)param;
+
+ sdhci_card_detect_callback(host);
+}
static void sdhci_tasklet_finish(unsigned long param)
{
host->cmd->error = -EILSEQ;
if (host->cmd->error) {
- tasklet_schedule(&host->finish_tasklet);
+ if (intmask & SDHCI_INT_RESPONSE)
+ tasklet_schedule(&host->finish_tasklet);
return;
}
int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
sdhci_disable_card_detection(host);
- ret = mmc_suspend_host(host->mmc);
- if (ret)
- return ret;
+ if (mmc->card && (mmc->card->type != MMC_TYPE_SDIO))
+ ret = mmc_suspend_host(host->mmc);
- free_irq(host->irq, host);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
if (host->vmmc)
ret = regulator_disable(host->vmmc);
+ if (host->irq)
+ disable_irq(host->irq);
+
return ret;
}
int sdhci_resume_host(struct sdhci_host *host)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
if (host->vmmc) {
int ret = regulator_enable(host->vmmc);
host->ops->enable_dma(host);
}
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
+ if (host->irq)
+ enable_irq(host->irq);
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
- ret = mmc_resume_host(host->mmc);
+ if (mmc->card && (mmc->card->type != MMC_TYPE_SDIO))
+ ret = mmc_resume_host(host->mmc);
+
sdhci_enable_card_detection(host);
return ret;
sdhci_reset(host, SDHCI_RESET_ALL);
- host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
- host->version = (host->version & SDHCI_SPEC_VER_MASK)
- >> SDHCI_SPEC_VER_SHIFT;
+ if (!(host->quirks & SDHCI_QUIRK_NO_VERSION_REG)) {
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+ >> SDHCI_SPEC_VER_SHIFT;
+ }
+
if (host->version > SDHCI_SPEC_200) {
printk(KERN_ERR "%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
else
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps |= MMC_CAP_SDIO_IRQ;
+ mmc->caps = 0;
+
+ if (host->quirks & SDHCI_QUIRK_8_BIT_DATA)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
- if (caps & SDHCI_CAN_DO_HISPD)
+ if (!(host->quirks & SDHCI_QUIRK_NO_SDIO_IRQ))
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ if (caps & SDHCI_CAN_DO_HISPD) {
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ }
+
+ if (host->quirks & SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE)
+ mmc->caps |= MMC_CAP_FORCE_HS;
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc_set_disable_delay(mmc, 50);
}
+ mmc->caps |= MMC_CAP_ERASE;
+
mmc->ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
- if (host->flags & SDHCI_USE_ADMA)
- mmc->max_seg_size = 65536;
- else
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ mmc->max_seg_size = 0xffff;
+ else
+ mmc->max_seg_size = 65536;
+ } else {
mmc->max_seg_size = mmc->max_req_size;
+ }
/*
* Maximum block size. This varies from controller to controller and
* Maximum block count.
*/
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
-
+
/*
* Init tasklets.
*/
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
#define SDHCI_CTRL_4BITBUS 0x02
+#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_CTRL_HISPD 0x04
#define SDHCI_CTRL_DMA_MASK 0x18
#define SDHCI_CTRL_SDMA 0x00
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
- unsigned int quirks; /* Deviations from spec. */
+ u64 quirks; /* Deviations from spec. */
/* Controller doesn't honor resets unless we touch the clock register */
#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1LL<<0)
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
int (*enable_dma)(struct sdhci_host *host);
+ int (*get_ro)(struct sdhci_host *host);
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
extern struct sdhci_host *sdhci_alloc_host(struct device *dev,
size_t priv_size);
extern void sdhci_free_host(struct sdhci_host *host);
+extern void sdhci_card_detect_callback(struct sdhci_host *host);
static inline void *sdhci_priv(struct sdhci_host *host)
{
other key product data. The second half is programmed with a
unique-to-each-chip bit pattern at the factory.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND Controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA
+ help
+ Enables NAND flash support for NVIDIA's Tegra family of chips.
+
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
depends on SPI_MASTER && EXPERIMENTAL
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
--- /dev/null
+/*
+ * drivers/mtd/devices/tegra_nand.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Derived from: drivers/mtd/nand/nand_base.c
+ * drivers/mtd/nand/pxa3xx.c
+ *
+ * TODO:
+ * - Add support for 16bit bus width
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/nand.h>
+
+#include "tegra_nand.h"
+
+#define DRIVER_NAME "tegra_nand"
+#define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
+
+#define MAX_DMA_SZ SZ_64K
+#define ECC_BUF_SZ SZ_1K
+
+/* FIXME: is this right?!
+ * NvRM code says it should be 128 bytes, but that seems awfully small
+ */
+
+/*#define TEGRA_NAND_DEBUG
+#define TEGRA_NAND_DEBUG_PEDANTIC*/
+
+#ifdef TEGRA_NAND_DEBUG
+#define TEGRA_DBG(fmt, args...) \
+ do { pr_info(fmt, ##args); } while (0)
+#else
+#define TEGRA_DBG(fmt, args...)
+#endif
+
+/* TODO: will vary with devices, move into appropriate device spcific header */
+#define SCAN_TIMING_VAL 0x3f0bd214
+#define SCAN_TIMING2_VAL 0xb
+
+/* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
+ * so we don't have to redefine them */
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+struct tegra_nand_chip {
+ spinlock_t lock;
+ uint32_t chipsize;
+ int num_chips;
+ int curr_chip;
+
+ /* addr >> chip_shift == chip number */
+ uint32_t chip_shift;
+ /* (addr >> page_shift) & page_mask == page number within chip */
+ uint32_t page_shift;
+ uint32_t page_mask;
+ /* column within page */
+ uint32_t column_mask;
+ /* addr >> block_shift == block number (across the whole mtd dev, not
+ * just a single chip. */
+ uint32_t block_shift;
+
+ void *priv;
+};
+
+struct tegra_nand_info {
+ struct tegra_nand_chip chip;
+ struct mtd_info mtd;
+ struct tegra_nand_platform *plat;
+ struct device *dev;
+ struct mtd_partition *parts;
+
+ /* synchronizes access to accessing the actual NAND controller */
+ struct mutex lock;
+
+
+ void *oob_dma_buf;
+ dma_addr_t oob_dma_addr;
+ /* ecc error vector info (offset into page and data mask to apply */
+ void *ecc_buf;
+ dma_addr_t ecc_addr;
+ /* ecc error status (page number, err_cnt) */
+ uint32_t *ecc_errs;
+ uint32_t num_ecc_errs;
+ uint32_t max_ecc_errs;
+ spinlock_t ecc_lock;
+
+ uint32_t command_reg;
+ uint32_t config_reg;
+ uint32_t dmactrl_reg;
+
+ struct completion cmd_complete;
+ struct completion dma_complete;
+
+ /* bad block bitmap: 1 == good, 0 == bad/unknown */
+ unsigned long *bb_bitmap;
+
+ struct clk *clk;
+};
+#define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
+
+/* 64 byte oob block info for large page (== 2KB) device
+ *
+ * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
+ * Skipped bytes(4)
+ * Main area Ecc(36)
+ * Tag data(20)
+ * Tag data Ecc(4)
+ *
+ * Yaffs2 will use 16 tag bytes.
+ */
+
+static struct nand_ecclayout tegra_nand_oob_64 = {
+ .eccbytes = 36,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobavail = 20,
+ .oobfree = {
+ { .offset = 40,
+ .length = 20,
+ },
+ },
+};
+
+static struct nand_flash_dev *
+find_nand_flash_device(int dev_id)
+{
+ struct nand_flash_dev *dev = &nand_flash_ids[0];
+
+ while (dev->name && dev->id != dev_id)
+ dev++;
+ return dev->name ? dev : NULL;
+}
+
+static struct nand_manufacturers *
+find_nand_flash_vendor(int vendor_id)
+{
+ struct nand_manufacturers *vendor = &nand_manuf_ids[0];
+
+ while (vendor->id && vendor->id != vendor_id)
+ vendor++;
+ return vendor->id ? vendor : NULL;
+}
+
+#define REG_NAME(name) { name, #name }
+static struct {
+ uint32_t addr;
+ char *name;
+} reg_names[] = {
+ REG_NAME(COMMAND_REG),
+ REG_NAME(STATUS_REG),
+ REG_NAME(ISR_REG),
+ REG_NAME(IER_REG),
+ REG_NAME(CONFIG_REG),
+ REG_NAME(TIMING_REG),
+ REG_NAME(RESP_REG),
+ REG_NAME(TIMING2_REG),
+ REG_NAME(CMD_REG1),
+ REG_NAME(CMD_REG2),
+ REG_NAME(ADDR_REG1),
+ REG_NAME(ADDR_REG2),
+ REG_NAME(DMA_MST_CTRL_REG),
+ REG_NAME(DMA_CFG_A_REG),
+ REG_NAME(DMA_CFG_B_REG),
+ REG_NAME(FIFO_CTRL_REG),
+ REG_NAME(DATA_BLOCK_PTR_REG),
+ REG_NAME(TAG_PTR_REG),
+ REG_NAME(ECC_PTR_REG),
+ REG_NAME(DEC_STATUS_REG),
+ REG_NAME(HWSTATUS_CMD_REG),
+ REG_NAME(HWSTATUS_MASK_REG),
+ { 0, NULL },
+};
+#undef REG_NAME
+
+
+static int
+dump_nand_regs(void)
+{
+ int i = 0;
+
+ TEGRA_DBG("%s: dumping registers\n", __func__);
+ while (reg_names[i].name != NULL) {
+ TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name, readl(reg_names[i].addr));
+ i++;
+ }
+ TEGRA_DBG("%s: end of reg dump\n", __func__);
+ return 1;
+}
+
+
+static inline void
+enable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) | mask, IER_REG);
+}
+
+
+static inline void
+disable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) & ~mask, IER_REG);
+}
+
+
+static inline void
+split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr, uint32_t *page,
+ uint32_t *column)
+{
+ *chipnr = (int)(offset >> info->chip.chip_shift);
+ *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
+ *column = offset & info->chip.column_mask;
+}
+
+
+static irqreturn_t
+tegra_nand_irq(int irq, void *dev_id)
+{
+ struct tegra_nand_info *info = dev_id;
+ uint32_t isr;
+ uint32_t ier;
+ uint32_t dma_ctrl;
+ uint32_t tmp;
+
+ isr = readl(ISR_REG);
+ ier = readl(IER_REG);
+ dma_ctrl = readl(DMA_MST_CTRL_REG);
+#ifdef DEBUG_DUMP_IRQ
+ pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
+ isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
+#endif
+ if (isr & ISR_CMD_DONE) {
+ if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
+ complete(&info->cmd_complete);
+ else
+ pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
+ }
+
+ if (isr & ISR_ECC_ERR) {
+ /* always want to read the decode status so xfers don't stall. */
+ tmp = readl(DEC_STATUS_REG);
+
+ /* was ECC check actually enabled */
+ if ((ier & IER_ECC_ERR)) {
+ unsigned long flags;
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ info->ecc_errs[info->num_ecc_errs++] = tmp;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+ }
+ }
+
+ if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
+ (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
+ complete(&info->dma_complete);
+ writel(dma_ctrl, DMA_MST_CTRL_REG);
+ }
+
+ if ((isr & ISR_UND) && (ier & IER_UND))
+ pr_err("%s: fifo underrun.\n", __func__);
+
+ if ((isr & ISR_OVR) && (ier & IER_OVR))
+ pr_err("%s: fifo overrun.\n", __func__);
+
+ /* clear ALL interrupts?! */
+ writel(isr & 0xfffc, ISR_REG);
+
+ return IRQ_HANDLED;
+}
+
+static inline int
+tegra_nand_is_cmd_done(struct tegra_nand_info *info)
+{
+ return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
+}
+
+static int
+tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
+{
+ uint32_t timeout = (2 * HZ); /* TODO: make this realistic */
+ int ret;
+
+ ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(!ret && dump_nand_regs());
+#endif
+
+ return ret ? 0 : ret;
+}
+
+static inline void
+select_chip(struct tegra_nand_info *info, int chipnr)
+{
+ BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
+ info->chip.curr_chip = chipnr;
+}
+
+static void
+cfg_hwstatus_mon(struct tegra_nand_info *info)
+{
+ uint32_t val;
+
+ val = (HWSTATUS_RDSTATUS_MASK(1) |
+ HWSTATUS_RDSTATUS_EXP_VAL(0) |
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
+ HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
+ writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
+ writel(val, HWSTATUS_MASK_REG);
+}
+
+/* Tells the NAND controller to initiate the command. */
+static int
+tegra_nand_go(struct tegra_nand_info *info)
+{
+ BUG_ON(!tegra_nand_is_cmd_done(info));
+
+ INIT_COMPLETION(info->cmd_complete);
+ writel(info->command_reg | COMMAND_GO, COMMAND_REG);
+
+ if (unlikely(tegra_nand_wait_cmd_done(info))) {
+ /* TODO: abort command if needed? */
+ pr_err("%s: Timeout while waiting for command\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* TODO: maybe wait for dma here? */
+ return 0;
+}
+
+static void
+tegra_nand_prep_readid(struct tegra_nand_info *info)
+{
+ info->command_reg = (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_READID, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(0, CONFIG_REG);
+}
+
+static int
+tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
+{
+ int err;
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(info->chip.curr_chip == -1);
+#endif
+
+ tegra_nand_prep_readid(info);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *chip_id = readl(RESP_REG);
+ return 0;
+}
+
+
+/* assumes right locks are held */
+static int
+nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
+{
+ int err;
+
+ info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_RBSY_CHK | (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_STATUS, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *status = readl(RESP_REG) & 0xff;
+ return 0;
+}
+
+
+/* must be called with lock held */
+static int
+check_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
+ return 0;
+
+ offs &= ~(mtd->erasesize - 1);
+
+ /* Only set COM_BSY. */
+ /* TODO: should come from board file */
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* check fist two pages of the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX | COMMAND_PIO |
+ COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
+ COMMAND_SEC_CMD;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ /* ... poison me ... */
+ writel(0xaa55aa55, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0) {
+ pr_info("baaaaaad\n");
+ goto out;
+ }
+
+ if ((readl(RESP_REG) & 0xffff) != 0xffff) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Note: The assumption here is that we cannot cross chip
+ * boundary since the we are only looking at the first 2 pages in
+ * a block, i.e. erasesize > writesize ALWAYS */
+ page++;
+ }
+
+out:
+ /* update the bitmap if the block is good */
+ if (ret == 0)
+ set_bit(block, info->bb_bitmap);
+ return ret;
+}
+
+
+static int
+tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int ret;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ mutex_lock(&info->lock);
+ ret = check_block_isbad(mtd, offs);
+ mutex_unlock(&info->lock);
+
+#if 0
+ if (ret > 0)
+ pr_info("block @ 0x%llx is bad.\n", offs);
+ else if (ret < 0)
+ pr_err("error checking block @ 0x%llx for badness.\n", offs);
+#endif
+
+ return ret;
+}
+
+
+static int
+tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ pr_info("tegra_nand: setting block %d bad\n", block);
+
+ mutex_lock(&info->lock);
+ offs &= ~(mtd->erasesize - 1);
+
+ /* mark the block bad in our bitmap */
+ clear_bit(block, info->bb_bitmap);
+ mtd->ecc_stats.badblocks++;
+
+ /* Only set COM_BSY. */
+ /* TODO: should come from board file */
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* write to fist two pages in the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX | COMMAND_PIO |
+ COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
+ COMMAND_AFT_DAT | COMMAND_SEC_CMD;
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ writel(0x0, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0)
+ goto out;
+
+ /* TODO: check if the program op worked? */
+ page++;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+
+static int
+tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t num_blocks;
+ uint32_t offs;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint32_t status = 0;
+
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
+ instr->len);
+
+ if ((instr->addr + instr->len) > mtd->size) {
+ pr_err("tegra_nand_erase: Can't erase past end of device\n");
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
+ instr->addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
+ instr->len);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ mutex_lock(&info->lock);
+
+ instr->state = MTD_ERASING;
+
+ offs = instr->addr;
+ num_blocks = instr->len >> info->chip.block_shift;
+
+ select_chip(info, -1);
+
+ while (num_blocks--) {
+ split_addr(info, offs, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs, page);
+
+ if (check_block_isbad(mtd, offs)) {
+ pr_info("%s: skipping bad block @ 0x%08x\n", __func__, offs);
+ goto next_block;
+ }
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(2) | COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_ERASE1, CMD_REG1);
+ writel(NAND_CMD_ERASE2, CMD_REG2);
+
+ writel(page & 0xffffff, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ if (tegra_nand_go(info) != 0) {
+ instr->fail_addr = offs;
+ goto out_err;
+ }
+
+ /* TODO: do we want a timeout here? */
+ if ((nand_cmd_get_status(info, &status) != 0) ||
+ (status & NAND_STATUS_FAIL) ||
+ ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
+ instr->fail_addr = offs;
+ pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
+ __func__, offs, status);
+ goto out_err;
+ }
+next_block:
+ offs += mtd->erasesize;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mutex_unlock(&info->lock);
+ mtd_erase_callback(instr);
+ return 0;
+
+out_err:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&info->lock);
+ return -EIO;
+}
+
+
+static inline void
+dump_mtd_oob_ops(struct mtd_oob_ops *ops)
+{
+ pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
+ "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
+ (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
+ (ops->mode == MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")),
+ ops->len, ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
+}
+
+static int
+tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = mtd->read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static void
+correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf, u8 *oobbuf, unsigned int a_len, unsigned int b_len) {
+ int i;
+ int all_ff = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ if (info->num_ecc_errs) {
+ if (datbuf) {
+ for (i = 0; i < a_len; i++)
+ if (datbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (oobbuf) {
+ for (i = 0; i < b_len; i++)
+ if (oobbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (all_ff)
+ info->num_ecc_errs = 0;
+ }
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static void
+update_ecc_counts(struct tegra_nand_info *info, int check_oob)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ for (i = 0; i < info->num_ecc_errs; ++i) {
+ /* correctable */
+ info->mtd.ecc_stats.corrected +=
+ DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
+
+ /* uncorrectable */
+ if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
+ info->mtd.ecc_stats.failed++;
+ if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
+ info->mtd.ecc_stats.failed++;
+ }
+ info->num_ecc_errs = 0;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static inline void
+clear_regs(struct tegra_nand_info *info)
+{
+ info->command_reg = 0;
+ info->config_reg = 0;
+ info->dmactrl_reg = 0;
+}
+
+static void
+prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc, uint32_t page,
+ uint32_t column, dma_addr_t data_dma,
+ uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
+{
+ uint32_t tag_sz = oob_len;
+
+#if 0
+ pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
+ "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
+ rx, do_ecc, page, column, data_dma, data_len, oob_dma,
+ oob_len);
+#endif
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
+ COMMAND_TRANS_SIZE(8);
+
+ info->config_reg = (CONFIG_PAGE_SIZE_SEL(3) | CONFIG_PIPELINE_EN |
+ CONFIG_COM_BSY);
+
+ info->dmactrl_reg = (DMA_CTRL_DMA_GO |
+ DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
+ DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
+
+ if (rx) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ERR_CORRECTION;
+ info->command_reg |= COMMAND_RX;
+ info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+ } else {
+ info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
+ info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+ }
+
+ if (data_len) {
+ if (do_ecc)
+ info->config_reg |=
+ CONFIG_HW_ECC | CONFIG_ECC_SEL | CONFIG_TVALUE(0) |
+ CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
+ info->command_reg |= COMMAND_A_VALID;
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
+ writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
+ writel(data_dma, DATA_BLOCK_PTR_REG);
+ } else {
+ column = info->mtd.writesize;
+ if (do_ecc)
+ column += info->mtd.ecclayout->oobfree[0].offset;
+ writel(0, DMA_CFG_A_REG);
+ writel(0, DATA_BLOCK_PTR_REG);
+ }
+
+ if (oob_len) {
+ oob_len = info->mtd.oobavail;
+ tag_sz = info->mtd.oobavail;
+ if (do_ecc) {
+ tag_sz += 4; /* size of tag ecc */
+ if (rx)
+ oob_len += 4; /* size of tag ecc */
+ info->config_reg |= CONFIG_ECC_EN_TAG;
+ }
+ if (data_len && rx)
+ oob_len += 4; /* num of skipped bytes */
+
+ info->command_reg |= COMMAND_B_VALID;
+ info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
+ writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
+ writel(oob_dma, TAG_PTR_REG);
+ } else {
+ writel(0, DMA_CFG_B_REG);
+ writel(0, TAG_PTR_REG);
+ }
+
+ writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+}
+
+static dma_addr_t
+tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+/* if mode == RAW, then we read data only, with no ECC
+ * if mode == PLACE, we read ONLY the OOB data from a raw offset into the spare
+ * area (ooboffs).
+ * if mode == AUTO, we read main data and the OOB data from the oobfree areas as
+ * specified by nand_ecclayout.
+ */
+static int
+do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct mtd_ecc_stats old_ecc_stats;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(mtd, ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* TODO: Worry about reads from non-page boundaries later */
+ if (unlikely(from & info->chip.column_mask)) {
+ pr_err("%s: Unaligned read (from 0x%llx) not supported\n",
+ __func__, from);
+ return -EINVAL;
+ }
+
+ if (likely(ops->mode == MTD_OOB_AUTO)) {
+ oobsz = mtd->oobavail;
+ } else {
+ oobsz = mtd->oobsize;
+ do_ecc = 0;
+ }
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't read OOB from multiple pages (%d > %d)\n", __func__,
+ ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf) {
+ page_count = 1;
+ } else {
+ page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
+ }
+
+ mutex_lock(&info->lock);
+
+ memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
+
+ if (do_ecc) {
+ enable_ints(info, IER_ECC_ERR);
+ writel(info->ecc_addr, ECC_PTR_REG);
+ } else
+ disable_ints(info, IER_ECC_ERR);
+
+ split_addr(info, from, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ /* reset it to point back to beginning of page */
+ from -= column;
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize - column, len);
+ int b_len = min(oobsz, ooblen);
+
+#if 0
+ pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
+ page, column);
+#endif
+
+ clear_regs(info);
+ if (datbuf)
+ datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_FROM_DEVICE);
+
+ prep_transfer_dma(info, 1, do_ecc, page, column, datbuf_dma_addr,
+ a_len, info->oob_dma_addr,
+ b_len);
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ err = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ /*pr_info("tegra_read_oob: DMA complete\n");*/
+
+ /* if we are here, transfer is done */
+ if (datbuf)
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_FROM_DEVICE);
+
+ if (oobbuf) {
+ uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
+ memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
+ }
+
+ correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len, b_len);
+
+ if (datbuf) {
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ update_ecc_counts(info, oobbuf != NULL);
+
+ if (!page_count)
+ break;
+
+ from += mtd->writesize;
+ column = 0;
+
+ split_addr(info, from, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ disable_ints(info, IER_ECC_ERR);
+
+ if (mtd->ecc_stats.failed != old_ecc_stats.failed)
+ err = -EBADMSG;
+ else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
+ err = -EUCLEAN;
+ else
+ err = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ disable_ints(info, IER_ECC_ERR);
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+/* just does some parameter checking and calls do_read_oob */
+static int
+tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
+ pr_err("%s: Can't read past end of device.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Reading 0 bytes from OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->mode != MTD_OOB_AUTO)) {
+ if (ops->oobbuf && ops->datbuf) {
+ pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
+ pr_err("%s: Raw mode only supports reading data area.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return do_read_oob(mtd, from, ops);
+}
+
+static int
+tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ret = mtd->write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int
+do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(mtd, ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ if (!ops->len)
+ return 0;
+
+
+ if (likely(ops->mode == MTD_OOB_AUTO)) {
+ oobsz = mtd->oobavail;
+ } else {
+ oobsz = mtd->oobsize;
+ do_ecc = 0;
+ }
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf) {
+ page_count = 1;
+ } else
+ page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
+
+ mutex_lock(&info->lock);
+
+ split_addr(info, to, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize, len);
+ int b_len = min(oobsz, ooblen);
+
+ if (datbuf)
+ datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_TO_DEVICE);
+ if (oobbuf)
+ memcpy(info->oob_dma_buf, oobbuf, b_len);
+
+ clear_regs(info);
+ prep_transfer_dma(info, 0, do_ecc, page, column, datbuf_dma_addr,
+ a_len, info->oob_dma_addr, b_len);
+
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ goto out_err;
+ }
+
+ if (datbuf) {
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_TO_DEVICE);
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ if (!page_count)
+ break;
+
+ to += mtd->writesize;
+ column = 0;
+
+ split_addr(info, to, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+static int
+tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+
+ if (unlikely(to & info->chip.column_mask)) {
+ pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
+ __func__, to);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ return do_write_oob(mtd, to, ops);
+}
+
+static int
+tegra_nand_suspend(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+static void
+tegra_nand_resume(struct mtd_info *mtd)
+{
+}
+
+static int
+scan_bad_blocks(struct tegra_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift;
+ uint32_t block;
+ int is_bad = 0;
+
+ for (block = 0; block < num_blocks; ++block) {
+ /* make sure the bit is cleared, meaning it's bad/unknown before
+ * we check. */
+ clear_bit(block, info->bb_bitmap);
+ is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
+
+ if (is_bad == 0)
+ set_bit(block, info->bb_bitmap);
+ else if (is_bad > 0)
+ pr_info("block 0x%08x is bad.\n", block);
+ else {
+ pr_err("Fatal error (%d) while scanning for "
+ "bad blocks\n", is_bad);
+ return is_bad;
+ }
+ }
+ return 0;
+}
+
+static void
+set_chip_timing(struct tegra_nand_info *info)
+{
+ struct tegra_nand_chip_parms *chip_parms = &info->plat->chip_parms[0];
+ uint32_t tmp;
+
+ /* TODO: Actually search the chip_parms list for the correct device. */
+ /* TODO: Get the appropriate frequency from the clock subsystem */
+#define NAND_CLK_FREQ 108000
+#define CNT(t) (((((t) * NAND_CLK_FREQ) + 1000000 - 1) / 1000000) - 1)
+ tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
+ TIMING_TWB(CNT(chip_parms->timing.twb)) |
+ TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
+ TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
+ TIMING_TCS(CNT(chip_parms->timing.tcs)) |
+ TIMING_TWH(CNT(chip_parms->timing.twh)) |
+ TIMING_TWP(CNT(chip_parms->timing.twp)) |
+ TIMING_TRH(CNT(chip_parms->timing.trh)) |
+ TIMING_TRP(CNT(chip_parms->timing.trp)));
+ writel(tmp, TIMING_REG);
+ writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
+#undef CNT
+#undef NAND_CLK_FREQ
+}
+
+/* Scans for nand flash devices, identifies them, and fills in the
+ * device info. */
+static int
+tegra_nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct nand_flash_dev *dev_info;
+ struct nand_manufacturers *vendor_info;
+ uint32_t tmp;
+ uint32_t dev_id;
+ uint32_t vendor_id;
+ uint32_t dev_parms;
+ uint32_t mlc_parms;
+ int cnt;
+ int err = 0;
+
+ writel(SCAN_TIMING_VAL, TIMING_REG);
+ writel(SCAN_TIMING2_VAL, TIMING2_REG);
+ writel(0, CONFIG_REG);
+
+ select_chip(info, 0);
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+
+ vendor_id = tmp & 0xff;
+ dev_id = (tmp >> 8) & 0xff;
+ mlc_parms = (tmp >> 16) & 0xff;
+ dev_parms = (tmp >> 24) & 0xff;
+
+ dev_info = find_nand_flash_device(dev_id);
+ if (dev_info == NULL) {
+ pr_err("%s: unknown flash device id (0x%02x) found.\n", __func__,
+ dev_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ vendor_info = find_nand_flash_vendor(vendor_id);
+ if (vendor_info == NULL) {
+ pr_err("%s: unknown flash vendor id (0x%02x) found.\n", __func__,
+ vendor_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ /* loop through and see if we can find more devices */
+ for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
+ select_chip(info, cnt);
+ /* TODO: figure out what to do about errors here */
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+ if ((dev_id != ((tmp >> 8) & 0xff)) ||
+ (vendor_id != (tmp & 0xff)))
+ break;
+ }
+
+ pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
+ DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
+ dev_info->name);
+ info->chip.num_chips = cnt;
+ info->chip.chipsize = dev_info->chipsize << 20;
+ mtd->size = info->chip.num_chips * info->chip.chipsize;
+
+ /* format of 4th id byte returned by READ ID
+ * bit 7 = rsvd
+ * bit 6 = bus width. 1 == 16bit, 0 == 8bit
+ * bits 5:4 = data block size. 64kb * (2^val)
+ * bit 3 = rsvd
+ * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
+ * bits 1:0 = page size. 1kb * (2^val)
+ */
+
+ /* TODO: we should reconcile the information read from chip and
+ * the data given to us in tegra_nand_platform->chip_parms??
+ * platform data will give us timing information. */
+
+ /* page_size */
+ tmp = dev_parms & 0x3;
+ mtd->writesize = 1024 << tmp;
+ info->chip.column_mask = mtd->writesize - 1;
+
+ /* Note: See oob layout description of why we only support 2k pages. */
+ if (mtd->writesize > 2048) {
+ pr_err("%s: Large page devices with pagesize > 2kb are NOT "
+ "supported\n", __func__);
+ goto out_error;
+ } else if (mtd->writesize < 2048) {
+ pr_err("%s: Small page devices are NOT supported\n", __func__);
+ goto out_error;
+ }
+
+ /* spare area, must be at least 64 bytes */
+ tmp = (dev_parms >> 2) & 0x1;
+ tmp = (8 << tmp) * (mtd->writesize / 512);
+ if (tmp < 64) {
+ pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
+ goto out_error;
+ }
+ mtd->oobsize = tmp;
+ mtd->oobavail = tegra_nand_oob_64.oobavail;
+
+ /* data block size (erase size) (w/o spare) */
+ tmp = (dev_parms >> 4) & 0x3;
+ mtd->erasesize = (64 * 1024) << tmp;
+ info->chip.block_shift = ffs(mtd->erasesize) - 1;
+
+ /* used to select the appropriate chip/page in case multiple devices
+ * are connected */
+ info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
+ info->chip.page_shift = ffs(mtd->writesize) - 1;
+ info->chip.page_mask =
+ (info->chip.chipsize >> info->chip.page_shift) - 1;
+
+ /* now fill in the rest of the mtd fields */
+ mtd->ecclayout = &tegra_nand_oob_64;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+
+ mtd->erase = tegra_nand_erase;
+ mtd->lock = NULL;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = tegra_nand_read;
+ mtd->write = tegra_nand_write;
+ mtd->read_oob = tegra_nand_read_oob;
+ mtd->write_oob = tegra_nand_write_oob;
+
+ mtd->resume = tegra_nand_resume;
+ mtd->suspend = tegra_nand_suspend;
+ mtd->block_isbad = tegra_nand_block_isbad;
+ mtd->block_markbad = tegra_nand_block_markbad;
+
+ /* TODO: should take vendor_id/device_id */
+ set_chip_timing(info);
+
+ return 0;
+
+out_error:
+ pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
+ return err;
+}
+
+static int __devinit
+tegra_nand_probe(struct platform_device *pdev)
+{
+ struct tegra_nand_platform *plat = pdev->dev.platform_data;
+ struct tegra_nand_info *info = NULL;
+ struct tegra_nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+ uint64_t num_erase_blocks;
+
+ pr_debug("%s: probing (%p)\n", __func__, pdev);
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ return -EINVAL;
+ } else if (!plat->chip_parms) {
+ pr_err("%s: no platform nand parms\n", __func__);
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: no memory for flash info\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->plat = plat;
+
+ platform_set_drvdata(pdev, info);
+
+ init_completion(&info->cmd_complete);
+ init_completion(&info->dma_complete);
+
+ mutex_init(&info->lock);
+ spin_lock_init(&info->ecc_lock);
+
+ chip = &info->chip;
+ chip->priv = &info->mtd;
+ chip->curr_chip = -1;
+
+ mtd = &info->mtd;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->priv = &info->chip;
+ mtd->owner = THIS_MODULE;
+
+ /* HACK: allocate a dma buffer to hold 1 page oob data */
+ info->oob_dma_buf = dma_alloc_coherent(NULL, 64,
+ &info->oob_dma_addr, GFP_KERNEL);
+ if (!info->oob_dma_buf) {
+ err = -ENOMEM;
+ goto out_free_info;
+ }
+
+ /* this will store the ecc error vector info */
+ info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
+ GFP_KERNEL);
+ if (!info->ecc_buf) {
+ err = -ENOMEM;
+ goto out_free_dma_buf;
+ }
+
+ /* grab the irq */
+ if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
+ pr_err("NAND IRQ resource not defined\n");
+ err = -EINVAL;
+ goto out_free_ecc_buf;
+ }
+
+ err = request_irq(pdev->resource[0].start, tegra_nand_irq,
+ IRQF_SHARED, DRIVER_NAME, info);
+ if (err) {
+ pr_err("Unable to request IRQ %d (%d)\n",
+ pdev->resource[0].start, err);
+ goto out_free_ecc_buf;
+ }
+
+ /* TODO: configure pinmux here?? */
+ info->clk = clk_get(&pdev->dev, NULL);
+ clk_set_rate(info->clk, 108000000);
+
+ cfg_hwstatus_mon(info);
+
+ /* clear all pending interrupts */
+ writel(readl(ISR_REG), ISR_REG);
+
+ /* clear dma interrupt */
+ writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
+
+ /* enable interrupts */
+ disable_ints(info, 0xffffffff);
+ enable_ints(info, IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
+ IER_ECC_ERR | IER_GIE);
+
+ if (tegra_nand_scan(mtd, plat->max_chips)) {
+ err = -ENXIO;
+ goto out_dis_irq;
+ }
+ pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
+ DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
+
+ /* allocate memory to hold the ecc error info */
+ info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
+ info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!info->ecc_errs) {
+ err = -ENOMEM;
+ goto out_dis_irq;
+ }
+
+ /* alloc the bad block bitmap */
+ num_erase_blocks = mtd->size;
+ do_div(num_erase_blocks, mtd->erasesize);
+ info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!info->bb_bitmap) {
+ err = -ENOMEM;
+ goto out_free_ecc;
+ }
+
+ err = scan_bad_blocks(info);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+#if 0
+ dump_nand_regs();
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(mtd, info->parts, err);
+ } else if (err <= 0 && plat->parts) {
+ err = add_mtd_partitions(mtd, plat->parts, plat->nr_parts);
+ } else
+#endif
+ err = add_mtd_device(mtd);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ pr_debug("%s: probe done.\n", __func__);
+ return 0;
+
+out_free_bbbmap:
+ kfree(info->bb_bitmap);
+
+out_free_ecc:
+ kfree(info->ecc_errs);
+
+out_dis_irq:
+ disable_ints(info, 0xffffffff);
+ free_irq(pdev->resource[0].start, info);
+
+out_free_ecc_buf:
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+
+out_free_dma_buf:
+ dma_free_coherent(NULL, 64, info->oob_dma_buf,
+ info->oob_dma_addr);
+
+out_free_info:
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+
+ return err;
+}
+
+static int __devexit
+tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (info) {
+ free_irq(pdev->resource[0].start, info);
+ kfree(info->bb_bitmap);
+ kfree(info->ecc_errs);
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+ dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
+ info->oob_dma_buf, info->oob_dma_addr);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tegra_nand_driver = {
+ .probe = tegra_nand_probe,
+ .remove = __devexit_p(tegra_nand_remove),
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = "tegra_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+tegra_nand_init(void)
+{
+ return platform_driver_register(&tegra_nand_driver);
+}
+
+static void __exit
+tegra_nand_exit(void)
+{
+ platform_driver_unregister(&tegra_nand_driver);
+}
+
+module_init(tegra_nand_init);
+module_exit(tegra_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
--- /dev/null
+/*
+ * drivers/mtd/devices/tegra_nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTD_DEV_TEGRA_NAND_H
+#define __MTD_DEV_TEGRA_NAND_H
+
+#include <mach/io.h>
+
+#define __BITMASK0(len) ((1 << (len)) - 1)
+#define __BITMASK(start, len) (__BITMASK0(len) << (start))
+#define REG_BIT(bit) (1 << (bit))
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len))))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra nand registers... */
+#define TEGRA_NAND_PHYS 0x70008000
+#define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS)
+#define COMMAND_REG (TEGRA_NAND_BASE + 0x00)
+#define STATUS_REG (TEGRA_NAND_BASE + 0x04)
+#define ISR_REG (TEGRA_NAND_BASE + 0x08)
+#define IER_REG (TEGRA_NAND_BASE + 0x0c)
+#define CONFIG_REG (TEGRA_NAND_BASE + 0x10)
+#define TIMING_REG (TEGRA_NAND_BASE + 0x14)
+#define RESP_REG (TEGRA_NAND_BASE + 0x18)
+#define TIMING2_REG (TEGRA_NAND_BASE + 0x1c)
+#define CMD_REG1 (TEGRA_NAND_BASE + 0x20)
+#define CMD_REG2 (TEGRA_NAND_BASE + 0x24)
+#define ADDR_REG1 (TEGRA_NAND_BASE + 0x28)
+#define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c)
+#define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30)
+#define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34)
+#define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38)
+#define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c)
+#define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40)
+#define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44)
+#define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48)
+#define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c)
+#define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50)
+#define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54)
+#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
+#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
+#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
+
+/* nand_command bits */
+#define COMMAND_GO REG_BIT(31)
+#define COMMAND_CLE REG_BIT(30)
+#define COMMAND_ALE REG_BIT(29)
+#define COMMAND_PIO REG_BIT(28)
+#define COMMAND_TX REG_BIT(27)
+#define COMMAND_RX REG_BIT(26)
+#define COMMAND_SEC_CMD REG_BIT(25)
+#define COMMAND_AFT_DAT REG_BIT(24)
+#define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4)
+#define COMMAND_A_VALID REG_BIT(19)
+#define COMMAND_B_VALID REG_BIT(18)
+#define COMMAND_RD_STATUS_CHK REG_BIT(17)
+#define COMMAND_RBSY_CHK REG_BIT(16)
+#define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7))
+#define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2)
+#define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4)
+
+/* nand isr bits */
+#define ISR_UND REG_BIT(7)
+#define ISR_OVR REG_BIT(6)
+#define ISR_CMD_DONE REG_BIT(5)
+#define ISR_ECC_ERR REG_BIT(4)
+
+/* nand ier bits */
+#define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4)
+#define IER_UND REG_BIT(7)
+#define IER_OVR REG_BIT(6)
+#define IER_CMD_DONE REG_BIT(5)
+#define IER_ECC_ERR REG_BIT(4)
+#define IER_GIE REG_BIT(0)
+
+/* nand config bits */
+#define CONFIG_HW_ECC REG_BIT(31)
+#define CONFIG_ECC_SEL REG_BIT(30)
+#define CONFIG_HW_ERR_CORRECTION REG_BIT(29)
+#define CONFIG_PIPELINE_EN REG_BIT(28)
+#define CONFIG_ECC_EN_TAG REG_BIT(27)
+#define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2)
+#define CONFIG_SKIP_SPARE REG_BIT(23)
+#define CONFIG_COM_BSY REG_BIT(22)
+#define CONFIG_BUS_WIDTH REG_BIT(21)
+#define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3)
+#define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2)
+#define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8)
+
+/* nand timing bits */
+#define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4)
+#define TIMING_TWB(val) REG_FIELD((val), 24, 4)
+#define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4)
+#define TIMING_TWHR(val) REG_FIELD((val), 16, 4)
+#define TIMING_TCS(val) REG_FIELD((val), 14, 2)
+#define TIMING_TWH(val) REG_FIELD((val), 12, 2)
+#define TIMING_TWP(val) REG_FIELD((val), 8, 4)
+#define TIMING_TRH(val) REG_FIELD((val), 4, 2)
+#define TIMING_TRP(val) REG_FIELD((val), 0, 4)
+
+/* nand timing2 bits */
+#define TIMING2_TADL(val) REG_FIELD((val), 0, 4)
+
+/* nand dma_mst_ctrl bits */
+#define DMA_CTRL_DMA_GO REG_BIT(31)
+#define DMA_CTRL_DIR REG_BIT(30)
+#define DMA_CTRL_DMA_PERF_EN REG_BIT(29)
+#define DMA_CTRL_IE_DMA_DONE REG_BIT(28)
+#define DMA_CTRL_REUSE_BUFFER REG_BIT(27)
+#define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3)
+#define DMA_CTRL_IS_DMA_DONE REG_BIT(20)
+#define DMA_CTRL_DMA_EN_A REG_BIT(2)
+#define DMA_CTRL_DMA_EN_B REG_BIT(1)
+
+/* nand dma_cfg_a/cfg_b bits */
+#define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16)
+
+/* nand dec_status bits */
+#define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8)
+#define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8)
+#define DEC_STATUS_ECC_FAIL_A REG_BIT(1)
+#define DEC_STATUS_ECC_FAIL_B REG_BIT(0)
+
+/* nand hwstatus_mask bits */
+#define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8)
+#define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8)
+#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
+#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
+
+#endif
+
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+ memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
bond_for_each_slave(bond, slave, i) {
if (slave->dev == slave_dev) {
- break;
+ return slave;
}
}
- return slave;
+ return 0;
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__E1000_DOWN, &adapter->flags);
/* disable receives in the hardware */
rctl = er32(RCTL);
e1000_irq_disable(adapter);
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * timers and tasks from rescheduling.
+ */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
rcu_read_unlock();
dev_kfree_skb(skb);
stats->tx_dropped++;
+ if (skb_queue_len(&dp->tq) != 0)
+ goto resched;
break;
}
rcu_read_unlock();
}
}
+static inline void
+jme_phy_on(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr &= ~BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+}
+
static int
jme_open(struct net_device *netdev)
{
jme_start_irq(jme);
- if (test_bit(JME_FLAG_SSET, &jme->flags))
+ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
+ jme_phy_on(jme);
jme_set_settings(netdev, &jme->old_ecmd);
- else
+ } else {
jme_reset_phy_processor(jme);
+ }
jme_reset_link(jme);
jme_clear_pm(jme);
pci_restore_state(pdev);
- if (test_bit(JME_FLAG_SSET, &jme->flags))
+ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
+ jme_phy_on(jme);
jme_set_settings(netdev, &jme->old_ecmd);
- else
+ } else {
jme_reset_phy_processor(jme);
+ }
jme_start_irq(jme);
netif_device_attach(netdev);
PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
- mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
- MII_88E1121_PHY_MSCR_DELAY_MASK;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
- MII_88E1121_PHY_MSCR_TX_DELAY);
- else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
- else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
+ if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
- if (err < 0)
- return err;
+ mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
+ MII_88E1121_PHY_MSCR_DELAY_MASK;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
+ MII_88E1121_PHY_MSCR_TX_DELAY);
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
+
+ err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+ if (err < 0)
+ return err;
+ }
phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
abort:
kfree_skb(skb);
- return 0;
+ return 1;
}
/************************************************************************
/* Multicast Address 1~4 case */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- if (i < MCAST_MAX) {
- adrp = (u16 *) ha->addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
- } else {
- iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
- }
+ if (i >= MCAST_MAX)
+ break;
+ adrp = (u16 *) ha->addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ while (i < MCAST_MAX) {
+ iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
i++;
}
}
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
}
-static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
+static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 opts1 = le32_to_cpu(desc->opts1);
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
continue;
}
- rtl8169_rx_csum(skb, desc);
-
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
dma_sync_single_for_device(&pdev->dev, addr,
pkt_size, PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
+ rtl8169_rx_csum(skb, status);
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_phy(dev, tp);
if (netif_running(dev))
__rtl8169_resume(dev);
tp->saved_wolopts = 0;
spin_unlock_irq(&tp->lock);
+ rtl8169_init_phy(dev, tp);
+
__rtl8169_resume(dev);
return 0;
ENTER;
master = READ_REG(priv, regINIT_SEMAPHORE);
if (!READ_REG(priv, regINIT_STATUS) && master) {
- rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
+ rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
if (rc)
goto out;
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(BDX_DRV_DESC);
-MODULE_FIRMWARE("tehuti/firmware.bin");
+MODULE_FIRMWARE("tehuti/bdx.bin");
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
u8 splitmic;
struct ath_regulatory regulatory;
val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
}
+ if (AR_SREV_9280(ah))
+ val |= AR_WA_BIT22;
+
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00008014, 0x044c044c, 0x08980898},
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x03721821, 0x03721821},
+ {0x00009e00, 0x0372131c, 0x0372131c},
{0x0000a230, 0x0000000b, 0x00000016},
{0x0000a254, 0x00000898, 0x00001130},
};
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
{0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
+ {0x00009834, 0x6400a290},
{0x00009838, 0x0108ecff},
{0x0000983c, 0x14750600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a1e},
+ {0x0000a22c, 0x01036a27},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
+ {0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
{0x0000a448, 0x06000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a600, 0x00000000},
- {0x0000a604, 0x00000000},
- {0x0000a608, 0x00000000},
- {0x0000a60c, 0x00000000},
- {0x0000a610, 0x00000000},
- {0x0000a614, 0x00000000},
- {0x0000a618, 0x00000000},
- {0x0000a61c, 0x00000000},
- {0x0000a620, 0x00000000},
- {0x0000a624, 0x00000000},
- {0x0000a628, 0x00000000},
- {0x0000a62c, 0x00000000},
- {0x0000a630, 0x00000000},
- {0x0000a634, 0x00000000},
- {0x0000a638, 0x00000000},
- {0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00000637},
+ {0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
- {0x0000b2dc, 0x00000000},
- {0x0000b2e0, 0x00000000},
- {0x0000b2e4, 0x00000000},
- {0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
{0x0000b8f4, 0x00000000},
{0x0000c2d0, 0x00000080},
{0x0000c2d4, 0x00000000},
- {0x0000c2dc, 0x00000000},
- {0x0000c2e0, 0x00000000},
- {0x0000c2e4, 0x00000000},
- {0x0000c2e8, 0x00000000},
{0x0000c2ec, 0x00000000},
{0x0000c2f0, 0x00000000},
{0x0000c2f4, 0x00000000},
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00008144, 0xffffffff},
{0x00008168, 0x00000000},
{0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
{0x000081c0, 0x00000000},
{0x000081c4, 0x33332210},
{0x000081c8, 0x00000000},
{0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
{0x000081ec, 0x00000000},
{0x000081f0, 0x00000000},
{0x000081f4, 0x00000000},
#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
+
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
}
},
.ctlPowerData_2G = {
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
},
.modalHeader5G = {
/* 4 idle,t1,t2,b (4 bits per setting) */
.ctlPowerData_5G = {
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 0}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 0}, {60, 1}, {60, 1}, {60, 0},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
- {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 0}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
}
},
}
struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
if (is2GHz)
- return ctl_2g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
else
- return ctl_5g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
}
static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
if (is2GHz) {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
- ctl_2g[idx].ctlEdges[edge - 1].flag)
- return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
} else {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
- ctl_5g[idx].ctlEdges[edge - 1].flag)
- return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
}
return AR9300_MAX_RATE_POWER;
u8 tPow2x[14];
} __packed;
-struct cal_ctl_edge_pwr {
- u8 tPower:6,
- flag:2;
-} __packed;
-
struct cal_ctl_data_2g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
} __packed;
struct cal_ctl_data_5g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
struct ar9300_eeprom {
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
} else if (rxsp->status11 & AR_MichaelErr) {
rxs->rs_status |= ATH9K_RXERR_MIC;
- }
+ } else if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
return 0;
(((Y[6] - Y[3]) * 1 << scale_factor) +
(x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
Y_intercept =
(G_fxp * (x_est[0] - x_est[3]) +
(1 << scale_factor)) / (1 << scale_factor) + Y[3];
for (i = 0; i <= 3; i++) {
y_est[i] = i * 32;
-
- /* prevent division by zero */
- if (G_fxp == 0)
- return false;
-
x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
}
+ if (y_est[max_index] == 0)
+ return false;
+
x_est_fxp1_nonlin =
x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
G_fxp) / G_fxp;
Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
scale_B = scale_B / (1 << Q_scale_B);
+ if (scale_B == 0)
+ return false;
Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
beta_raw = beta_raw / (1 << Q_beta);
u8 rxotherant;
u32 *rxlink;
unsigned int rxfilter;
- spinlock_t rxflushlock;
+ spinlock_t pcu_lock;
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
void ath_tx_edma_tasklet(struct ath_softc *sc);
void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
-void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
- u16 tid, u16 *ssn);
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath9k_enable_ps(struct ath_softc *sc);
#define SC_OP_RXFLUSH BIT(7)
#define SC_OP_LED_ASSOCIATED BIT(8)
#define SC_OP_LED_ON BIT(9)
-#define SC_OP_SCANNING BIT(10)
#define SC_OP_TSF_RESET BIT(11)
#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
#define SC_OP_BT_SCAN BIT(13)
ath_print(common, ATH_DBG_BEACON,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
- ath_reset(sc, false);
+ ath_reset(sc, true);
}
return;
set_bit(idx, common->keymap);
if (key->alg == ALG_TKIP) {
set_bit(idx + 64, common->keymap);
+ set_bit(idx, common->tkip_keymap);
+ set_bit(idx + 64, common->tkip_keymap);
if (common->splitmic) {
set_bit(idx + 32, common->keymap);
set_bit(idx + 64 + 32, common->keymap);
+ set_bit(idx + 32, common->tkip_keymap);
+ set_bit(idx + 64 + 32, common->tkip_keymap);
}
}
return;
clear_bit(key->hw_key_idx + 64, common->keymap);
+
+ clear_bit(key->hw_key_idx, common->tkip_keymap);
+ clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
+
if (common->splitmic) {
ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+
+ clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath9k_cmn_key_delete);
for (i = 0; (i < num_band_edges) &&
(pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
- twiceMaxEdgePower = pRdEdgesPower[i].tPower;
+ twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
break;
} else if ((i > 0) &&
(freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
is2GHz))) {
if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
is2GHz) < freq &&
- pRdEdgesPower[i - 1].flag) {
+ CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
twiceMaxEdgePower =
- pRdEdgesPower[i - 1].tPower;
+ CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
}
break;
}
#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
+#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+
enum eeprom_param {
EEP_NFTHRESH_5,
EEP_NFTHRESH_2,
u8 tPow2x[8];
} __packed;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-struct cal_ctl_edges {
- u8 bChannel;
- u8 flag:2, tPower:6;
-} __packed;
-#else
struct cal_ctl_edges {
u8 bChannel;
- u8 tPower:6, flag:2;
+ u8 ctl;
} __packed;
-#endif
struct cal_data_op_loop_ar9287 {
u8 pwrPdg[2][5];
int addr, eep_start_loc;
eep_data = (u16 *)eep;
- if (ah->hw_version.devid == 0x7015)
+ if (AR9287_HTC_DEVID(ah))
eep_start_loc = AR9287_HTC_EEP_START_LOC;
else
eep_start_loc = AR9287_EEP_START_LOC;
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
- scaledPower = max((u16)0, scaledPower);
-
if (IS_CHAN_2GHZ(chan)) {
numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
SUB_NUM_CTL_MODES_AT_2G_40;
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
+ { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
+ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
+ { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
{ },
};
}
kfree(buf);
- if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015))
+ switch (hif_dev->device_id) {
+ case 0x7010:
+ case 0x7015:
+ case 0x9018:
+ case 0xA704:
+ case 0x1200:
firm_offset = AR7010_FIRMWARE_TEXT;
- else
+ break;
+ default:
firm_offset = AR9271_FIRMWARE_TEXT;
+ break;
+ }
/*
* Issue FW download complete command to firmware.
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
priv->htc->credits = 45;
break;
default:
tx_hdr.data_type = ATH9K_HTC_NORMAL;
}
- if (ieee80211_is_data(fc)) {
+ if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
"Failed allocating banks for "
"external radio\n");
+ ath9k_hw_rf_free_ext_banks(ah);
return ecode;
}
val = REG_READ(ah, AR7010_GPIO_IN);
return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
} else if (AR_SREV_9300_20_OR_LATER(ah))
- return MS_REG_READ(AR9300, gpio) != 0;
+ return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
else if (AR_SREV_9287_10_OR_LATER(ah))
* on 5 MHz steps, we support the channels which we know
* we have calibration data for all cards though to make
* this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
CHAN2G(2412, 0), /* Channel 1 */
CHAN2G(2417, 1), /* Channel 2 */
CHAN2G(2422, 2), /* Channel 3 */
* on 5 MHz steps, we support the channels which we know
* we have calibration data for all cards though to make
* this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
/* _We_ call this UNII 1 */
CHAN5G(5180, 14), /* Channel 36 */
CHAN5G(5200, 15), /* Channel 40 */
return -EIO;
}
-static void ath9k_init_channels_rates(struct ath_softc *sc)
+static int ath9k_init_channels_rates(struct ath_softc *sc)
{
+ void *channels;
+
if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ channels = kmemdup(ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
ARRAY_SIZE(ath9k_2ghz_chantable);
}
if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ channels = kmemdup(ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
+ if (!channels) {
+ if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
+ kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
ARRAY_SIZE(ath9k_5ghz_chantable);
sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates) - 4;
}
+ return 0;
}
static void ath9k_init_misc(struct ath_softc *sc)
if (ret)
goto err_btcoex;
+ ret = ath9k_init_channels_rates(sc);
+ if (ret)
+ goto err_btcoex;
+
ath9k_init_crypto(sc);
- ath9k_init_channels_rates(sc);
ath9k_init_misc(sc);
return 0;
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (AR_SREV_5416(sc->sc_ah))
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->queues = 4;
hw->max_rates = 4;
{
int i = 0;
+ if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
+ kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
+
+ if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
+ kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
+
if ((sc->btcoex.no_stomp_timer) &&
sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
+ else if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
}
return 0;
*/
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false);
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
stopped = ath_stoprecv(sc);
/* XXX: do not flush receive queue here. We don't want
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore;
}
spin_unlock_bh(&sc->sc_resetlock);
ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n");
r = -EIO;
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
ath9k_hw_set_interrupts(ah, ah->imask);
- if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) {
- ath_start_ani(common);
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
ath_beacon_config(sc, NULL);
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ ath_start_ani(common);
}
ps_restore:
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_common *common = ath9k_hw_common(ah);
int chain;
if (!caldata || !caldata->paprd_done)
ath9k_ps_wakeup(sc);
ar9003_paprd_enable(ah, false);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->caps.tx_chainmask & BIT(chain)))
+ if (!(common->tx_chainmask & BIT(chain)))
continue;
ar9003_paprd_populate_single_table(ah, caldata, chain);
struct ieee80211_supported_band *sband = &sc->sbands[band];
struct ath_tx_control txctl;
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_common *common = ath9k_hw_common(ah);
int qnum, ftype;
int chain_ok = 0;
int chain;
ath9k_ps_wakeup(sc);
ar9003_paprd_init_table(ah);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->caps.tx_chainmask & BIT(chain)))
+ if (!(common->tx_chainmask & BIT(chain)))
continue;
chain_ok = 0;
msleep(1);
}
- ath_reset(sc, false);
+ ath_reset(sc, true);
out:
ath9k_ps_restore(sc);
ath9k_ps_wakeup(sc);
if (status & ATH9K_INT_FATAL) {
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
return;
}
rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
if (status & rxmask) {
- spin_lock_bh(&sc->rx.rxflushlock);
+ spin_lock_bh(&sc->rx.pcu_lock);
/* Check for high priority Rx first */
if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
- spin_unlock_bh(&sc->rx.rxflushlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
}
if (status & ATH9K_INT_TX) {
if (!ah->curchan)
ah->curchan = ath_get_curchannel(sc, sc->hw);
+ spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
if (ath_startrecv(sc) != 0) {
ath_print(common, ATH_DBG_FATAL,
"Unable to restart recv logic\n");
+ spin_unlock_bh(&sc->rx.pcu_lock);
return;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, false); /* clear pending tx frames */
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
ath_stoprecv(sc); /* turn off frame recv */
ath_flushrecv(sc); /* flush recv queue */
spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
+
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc);
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
+
+ spin_lock_bh(&sc->rx.pcu_lock);
+
ath_stoprecv(sc);
ath_flushrecv(sc);
ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n");
+ spin_unlock_bh(&sc->rx.pcu_lock);
+
/*
* We may be doing a reset in response to a request
* that changes the channel so update any state that
ath_update_txpow(sc);
- if (sc->sc_flags & SC_OP_BEACONS)
+ if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
ath_beacon_config(sc, NULL); /* restart beacons */
ath9k_hw_set_interrupts(ah, ah->imask);
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
+ spin_lock_bh(&sc->rx.pcu_lock);
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
"(freq %u MHz)\n", r,
curchan->center_freq);
spin_unlock_bh(&sc->sc_resetlock);
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock;
}
spin_unlock_bh(&sc->sc_resetlock);
ath_print(common, ATH_DBG_FATAL,
"Unable to start recv logic\n");
r = -EIO;
+ spin_unlock_bh(&sc->rx.pcu_lock);
goto mutex_unlock;
}
+ spin_unlock_bh(&sc->rx.pcu_lock);
/* Setup our intr mask. */
ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
* before setting the invalid flag. */
ath9k_hw_set_interrupts(ah, 0);
+ spin_lock_bh(&sc->rx.pcu_lock);
if (!(sc->sc_flags & SC_OP_INVALID)) {
ath_drain_all_txq(sc, false);
ath_stoprecv(sc);
ath9k_hw_phy_disable(ah);
} else
sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->rx.pcu_lock);
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ bool bs_valid = false;
int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
"slot\n", __func__);
sc->beacon.bslot[i] = NULL;
sc->beacon.bslot_aphy[i] = NULL;
- }
+ } else if (sc->beacon.bslot[i])
+ bs_valid = true;
+ }
+ if (!bs_valid && (sc->sc_ah->imask & ATH9K_INT_SWBA)) {
+ /* Disable SWBA interrupt */
+ sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+ ath9k_ps_restore(sc);
}
sc->nvifs--;
* IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (conf->flags & IEEE80211_CONF_PS) {
sc->ps_flags |= PS_ENABLED;
/*
sc->ps_enabled = false;
sc->ps_flags &= ~(PS_ENABLED |
PS_NULLFUNC_COMPLETED);
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
}
}
}
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
break;
case IEEE80211_AMPDU_TX_START:
ath9k_ps_wakeup(sc);
- ath_tx_aggr_start(sc, sta, tid, ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = ath_tx_aggr_start(sc, sta, tid, ssn);
+ if (!ret)
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP:
aphy->state = ATH_WIPHY_SCAN;
ath9k_wiphy_pause_all_forced(sc, aphy);
- sc->sc_flags |= SC_OP_SCANNING;
mutex_unlock(&sc->mutex);
}
mutex_lock(&sc->mutex);
aphy->state = ATH_WIPHY_ACTIVE;
- sc->sc_flags &= ~SC_OP_SCANNING;
mutex_unlock(&sc->mutex);
}
for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) {
u32 phy = rate_table->info[j].phy;
- u16 rate_flags = rate_table->info[i].rate_flags;
+ u16 rate_flags = rate_table->info[j].rate_flags;
u8 rate = rateset->rs_rates[i];
u8 dot11rate = rate_table->info[j].dot11rate;
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
return;
+ if (!(tx_info->flags & IEEE80211_TX_STAT_AMPDU)) {
+ tx_info->status.ampdu_ack_len =
+ (tx_info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
+ tx_info->status.ampdu_len = 1;
+ }
+
/*
* If an underrun error is seen assume it as an excessive retry only
* if max frame trigger level has been reached (2 KB for singel stream,
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
- spin_unlock_bh(&sc->rx.rxbuflock);
-
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
+ ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
}
static void ath_edma_stop_recv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxbuflock);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
- spin_unlock_bh(&sc->rx.rxbuflock);
}
int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf;
int error = 0;
- spin_lock_init(&sc->rx.rxflushlock);
+ spin_lock_init(&sc->rx.pcu_lock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
ath9k_hw_rxena(ah);
start_recv:
- spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
+ ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
return 0;
}
struct ath_hw *ah = sc->sc_ah;
bool stopped;
- ath9k_hw_stoppcurecv(ah);
+ spin_lock_bh(&sc->rx.rxbuflock);
+ ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
ath_edma_stop_recv(sc);
else
sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->rx.rxbuflock);
return stopped;
}
void ath_flushrecv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_unlock_bh(&sc->rx.rxflushlock);
}
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->ps_flags &= ~PS_WAIT_FOR_CAB;
+ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
- else
+ /*
+ * The MIC error bit is only valid if the frame
+ * is not a control frame or fragment, and it was
+ * decrypted using a valid TKIP key.
+ */
+ if (!ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap))
rxs->flag |= RX_FLAG_MMIC_ERROR;
+ else
+ rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
}
/*
* Reject error frames with the exception of
int hdrlen, padpos, padsize;
u8 keyix;
__le16 fc;
+ bool is_mc;
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *) skb->data;
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
padpos = ath9k_cmn_padpos(hdr->frame_control);
keyix = rx_stats->rs_keyix;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ if ((is_mc || !(keyix == ATH9K_RXKEYIX_INVALID)) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
u8 rx_status_len = ah->caps.rx_status_len;
u64 tsf = 0;
u32 tsf_lower = 0;
+ unsigned long flags;
if (edma)
dma_type = DMA_BIDIRECTIONAL;
sc->rx.rxotherant = 0;
}
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (unlikely(ath9k_check_auto_sleep(sc) ||
(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA))))
ath_rx_ps(sc, skb);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
#define AR_WA_ANALOG_SHIFT (1 << 20)
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
+#define AR_WA_BIT22 (1 << 22)
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
#define AR_DEVID_7010(_ah) \
(((_ah)->hw_version.devid == 0x7010) || \
((_ah)->hw_version.devid == 0x7015) || \
- ((_ah)->hw_version.devid == 0x9018))
+ ((_ah)->hw_version.devid == 0x9018) || \
+ ((_ah)->hw_version.devid == 0xA704) || \
+ ((_ah)->hw_version.devid == 0x1200))
+
+#define AR9287_HTC_DEVID(_ah) \
+ (((_ah)->hw_version.devid == 0x7015) || \
+ ((_ah)->hw_version.devid == 0x1200))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
#define AR9287_GPIO_IN_VAL_S 11
#define AR9271_GPIO_IN_VAL 0xFFFF0000
#define AR9271_GPIO_IN_VAL_S 16
-#define AR9300_GPIO_IN_VAL 0x0001FFFF
-#define AR9300_GPIO_IN_VAL_S 0
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
+#define AR_GPIO_IN 0x404c
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
+
#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
struct ath_tx_status *ts, int txok);
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
+static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ int seqno);
enum {
MCS_HT20,
struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
struct ath_buf *bf;
struct list_head bf_head;
- INIT_LIST_HEAD(&bf_head);
+ struct ath_tx_status ts;
- WARN_ON(!tid->paused);
+ INIT_LIST_HEAD(&bf_head);
+ memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
- tid->paused = false;
while (!list_empty(&tid->buf_q)) {
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- BUG_ON(bf_isretried(bf));
list_move_tail(&bf->list, &bf_head);
- ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
+
+ if (bf_isretried(bf)) {
+ ath_tx_update_baw(sc, tid, bf->bf_seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+ } else {
+ ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
+ }
}
spin_unlock_bh(&txq->axq_lock);
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
struct ieee80211_tx_rate rates[4];
+ int nframes;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
hw = bf->aphy->hw;
memcpy(rates, tx_info->control.rates, sizeof(rates));
+ nframes = bf->bf_nframes;
rcu_read_lock();
!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_rc_status(bf, ts, 0, 0, false);
+ ath_tx_rc_status(bf, ts, 1, 0, false);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
0, 0);
list_move_tail(&bf->list, &bf_head);
}
- if (!txpending) {
+ if (!txpending || (tid->state & AGGR_CLEANUP)) {
/*
* complete the acked-ones/xretried ones; update
* block-ack window
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
+ bf->bf_nframes = nframes;
ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
}
if (tid->state & AGGR_CLEANUP) {
+ ath_tx_flush_tid(sc, tid);
+
if (tid->baw_head == tid->baw_tail) {
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
-
- /* send buffered frames as singles */
- ath_tx_flush_tid(sc, tid);
}
- rcu_read_unlock();
- return;
}
rcu_read_unlock();
status != ATH_AGGR_BAW_CLOSED);
}
-void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
{
struct ath_atx_tid *txtid;
struct ath_node *an;
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
+
+ if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
+ return -EAGAIN;
+
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start;
+
+ return 0;
}
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
- struct ath_tx_status ts;
- struct ath_buf *bf;
- struct list_head bf_head;
-
- memset(&ts, 0, sizeof(ts));
- INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
return;
return;
}
- /* drop all software retried frames and mark this TID */
spin_lock_bh(&txq->axq_lock);
txtid->paused = true;
- while (!list_empty(&txtid->buf_q)) {
- bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
- if (!bf_isretried(bf)) {
- /*
- * NB: it's based on the assumption that
- * software retried frame will always stay
- * at the head of software queue.
- */
- break;
- }
- list_move_tail(&bf->list, &bf_head);
- ath_tx_update_baw(sc, txtid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- }
- spin_unlock_bh(&txq->axq_lock);
- if (txtid->baw_head != txtid->baw_tail) {
+ /*
+ * If frames are still being transmitted for this TID, they will be
+ * cleaned up during tx completion. To prevent race conditions, this
+ * TID can only be reused after all in-progress subframes have been
+ * completed.
+ */
+ if (txtid->baw_head != txtid->baw_tail)
txtid->state |= AGGR_CLEANUP;
- } else {
+ else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- ath_tx_flush_tid(sc, txtid);
- }
+ spin_unlock_bh(&txq->axq_lock);
+
+ ath_tx_flush_tid(sc, txtid);
}
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
- /* flush any pending frames if aggregation is enabled */
- if (sc->sc_flags & SC_OP_TXAGGR) {
- if (!retry_tx) {
- spin_lock_bh(&txq->axq_lock);
- ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
- }
- }
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
spin_lock_bh(&txq->axq_lock);
while (!list_empty(&txq->txq_fifo_pending)) {
}
spin_unlock_bh(&txq->axq_lock);
}
+
+ /* flush any pending frames if aggregation is enabled */
+ if (sc->sc_flags & SC_OP_TXAGGR) {
+ if (!retry_tx) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_drain_pending_buffers(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ BUG_ON(nbad > bf->bf_nframes);
+
+ tx_info->status.ampdu_len = bf->bf_nframes;
+ tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
(ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
- tx_info->status.ampdu_len = bf->bf_nframes;
- tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
}
}
*/
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &ts, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
ath9k_ps_wakeup(sc);
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
}
if (!bf_isampdu(bf)) {
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &txs, 0, txok, true);
+ ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
err_free_ssb:
kfree(sdio);
err_disable_func:
+ sdio_claim_host(func);
sdio_disable_func(func);
err_release_host:
sdio_release_host(func);
struct b43_sdio *sdio = sdio_get_drvdata(func);
ssb_bus_unregister(&sdio->ssb);
+ sdio_claim_host(func);
sdio_disable_func(func);
+ sdio_release_host(func);
kfree(sdio);
sdio_set_drvdata(func, NULL);
}
struct net_device *dev = priv->ndev;
int err = 0;
+ /* If we've called commit, we are reconfiguring or bringing the
+ * interface up. Maintaining countermeasures across this would
+ * be confusing, so note that we've disabled them. The port will
+ * be enabled later in orinoco_commit or __orinoco_up. */
+ priv->tkip_cm_active = 0;
+
err = orinoco_hw_program_rids(priv);
/* FIXME: what about netif_tx_lock */
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
/*
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping, and putting the
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
hw->eeprom_pda = true;
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
/*
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping, and putting the
*/
if (param->value) {
priv->tkip_cm_active = 1;
- ret = hermes_enable_port(hw, 0);
+ ret = hermes_disable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
- ret = hermes_disable_port(hw, 0);
+ ret = hermes_enable_port(hw, 0);
}
break;
list->max_entries = max_channel_num;
list->channels = kzalloc(sizeof(struct p54_channel_entry) *
max_channel_num, GFP_KERNEL);
- if (!list->channels)
+ if (!list->channels) {
+ ret = -ENOMEM;
goto free;
+ }
for (i = 0; i < max_channel_num; i++) {
if (i < priv->iq_autocal_len) {
MODULE_FIRMWARE("isl3886usb");
MODULE_FIRMWARE("isl3887usb");
+/*
+ * Note:
+ *
+ * Always update our wiki's device list (located at:
+ * http://wireless.kernel.org/en/users/Drivers/p54/devices ),
+ * whenever you add a new device.
+ */
+
static struct usb_device_id p54u_table[] __devinitdata = {
/* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
{USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
+ {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
+ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
+ {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
+ {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
+ {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */
{USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
{USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
{USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
{USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
+ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
#ifdef CONFIG_PM
/* ISL3887 needs a full reset on resume */
udev->reset_resume = 1;
+#endif /* CONFIG_PM */
err = p54u_device_reset(dev);
-#endif
priv->hw_type = P54U_3887;
dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
- for (i = 0; i < 14; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 0; i < 14; i++) {
+ info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER);
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
return 0;
}
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
- for (i = 0; i < 14; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 0; i < 14; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
if (spec->num_channels > 14) {
- for (i = 14; i < spec->num_channels; i++)
- info[i].tx_power1 = DEFAULT_TXPOWER;
+ for (i = 14; i < spec->num_channels; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = DEFAULT_TXPOWER;
+ }
}
return 0;
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
- for (i = 0; i < 14; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 0; i < 14; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
if (spec->num_channels > 14) {
- for (i = 14; i < spec->num_channels; i++)
- info[i].tx_power1 = DEFAULT_TXPOWER;
+ for (i = 14; i < spec->num_channels; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = DEFAULT_TXPOWER;
+ }
}
return 0;
#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
+/*
+ * EEPROM Maximum TX power values
+ */
+#define EEPROM_MAX_TX_POWER 0x0027
+#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
+#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
+
/*
* EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
* This is delta in 40MHZ.
* double meaning, and we should set a 7DBm boost flag.
*/
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
- (info->tx_power1 >= 0));
+ (info->default_power1 >= 0));
- if (info->tx_power1 < 0)
- info->tx_power1 += 7;
+ if (info->default_power1 < 0)
+ info->default_power1 += 7;
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
- TXPOWER_A_TO_DEV(info->tx_power1));
+ TXPOWER_A_TO_DEV(info->default_power1));
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
- (info->tx_power2 >= 0));
+ (info->default_power2 >= 0));
- if (info->tx_power2 < 0)
- info->tx_power2 += 7;
+ if (info->default_power2 < 0)
+ info->default_power2 += 7;
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
- TXPOWER_A_TO_DEV(info->tx_power2));
+ TXPOWER_A_TO_DEV(info->default_power2));
} else {
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
- TXPOWER_G_TO_DEV(info->tx_power1));
+ TXPOWER_G_TO_DEV(info->default_power1));
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
- TXPOWER_G_TO_DEV(info->tx_power2));
+ TXPOWER_G_TO_DEV(info->default_power2));
}
rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
- TXPOWER_G_TO_DEV(info->tx_power1));
+ rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
- TXPOWER_G_TO_DEV(info->tx_power2));
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
default_lna_gain);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
+ if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
+ if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power1;
- char *tx_power2;
+ char *default_power1;
+ char *default_power2;
unsigned int i;
+ unsigned short max_power;
u16 eeprom;
/*
spec->channels_info = info;
- tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
- tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
+ max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
+ default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+ default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
for (i = 0; i < 14; i++) {
- info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
- info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
+ info[i].max_power = max_power;
+ info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
+ info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
}
if (spec->num_channels > 14) {
- tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
- tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+ max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
+ default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
+ default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
for (i = 14; i < spec->num_channels; i++) {
- info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
- info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
+ info[i].max_power = max_power;
+ info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
+ info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
}
}
unsigned int flags;
#define GEOGRAPHY_ALLOWED 0x00000001
- short tx_power1;
- short tx_power2;
+ short max_power;
+ short default_power1;
+ short default_power2;
};
/*
for (i = 0; i < spec->num_channels; i++) {
rt2x00lib_channel(&channels[i],
spec->channels[i].channel,
- spec->channels_info[i].tx_power1, i);
+ spec->channels_info[i].max_power, i);
}
/*
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
- for (i = 0; i < 14; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 0; i < 14; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
if (spec->num_channels > 14) {
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
- for (i = 14; i < spec->num_channels; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 14; i < spec->num_channels; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
}
return 0;
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
- for (i = 0; i < 14; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 0; i < 14; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
if (spec->num_channels > 14) {
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
- for (i = 14; i < spec->num_channels; i++)
- info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ for (i = 14; i < spec->num_channels; i++) {
+ info[i].max_power = MAX_TXPOWER;
+ info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+ }
}
return 0;
#define GRANT_INVALID_REF 0
-#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
-#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
struct netfront_info {
#include "oprof.h"
static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
+static int ctr_running;
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
{
{
struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
+ if (!ctr_running)
+ return;
+
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = oprofile_hrtimer_notify;
static int oprofile_hrtimer_start(void)
{
+ get_online_cpus();
+ ctr_running = 1;
on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
+ put_online_cpus();
return 0;
}
{
struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
+ if (!ctr_running)
+ return;
+
hrtimer_cancel(hrtimer);
}
{
int cpu;
+ get_online_cpus();
for_each_online_cpu(cpu)
__oprofile_hrtimer_stop(cpu);
+ ctr_running = 0;
+ put_online_cpus();
}
static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}
+
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(iommu->irq, iommu);
}
return 0;
#ifdef HAVE_PCI_MMAP
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size;
+ unsigned long nr, start, size, pci_start;
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (start < size && size - start >= nr)
+ pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
+ pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
return 1;
- WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
- current->comm, start, start+nr, pci_name(pdev), resno, size);
return 0;
}
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (!pci_mmap_fits(pdev, i, vma))
+ if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
+ "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
+ pci_name(pdev), i,
+ pci_resource_start(pdev, i), pci_resource_len(pdev, i));
return -EINVAL;
+ }
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
#endif
extern void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
+ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
+};
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma))
+ if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
+#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
+#define VTUNCERRMSK_REG 0x1ac
+#define VTD_MSK_SPEC_ERRORS (1 << 31)
+/*
+ * This is a quirk for masking vt-d spec defined errors to platform error
+ * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
+ * on the RAS config settings of the platform) when a vt-d fault happens.
+ * The resulting SMI caused the system to hang.
+ *
+ * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * need to report the same error through other channels.
+ */
+static void vtd_mask_spec_errors(struct pci_dev *dev)
+{
+ u32 word;
+
+ pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
+ pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+#endif
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
va_end(args);
}
}
+EXPORT_SYMBOL(soc_pcmcia_debug);
#endif
*/
static int asus_gps_rfkill_set(void *data, bool blocked)
{
- acpi_handle handle = data;
+ struct asus_laptop *asus = data;
- return asus_gps_switch(handle, !blocked);
+ return asus_gps_switch(asus, !blocked);
}
static const struct rfkill_ops asus_gps_rfkill_ops = {
asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
RFKILL_TYPE_GPS,
- &asus_gps_rfkill_ops, NULL);
+ &asus_gps_rfkill_ops, asus);
if (!asus->gps_rfkill)
return -EINVAL;
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include "intel_ips.h"
#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
#define thm_writel(off, val) writel((val), ips->regmap + (off))
static const int IPS_ADJUST_PERIOD = 5000; /* ms */
+static bool late_i915_load = false;
/* For initial average collection */
static const int IPS_SAMPLE_PERIOD = 200; /* ms */
u64 orig_turbo_ratios;
};
+static bool
+ips_gpu_turbo_enabled(struct ips_driver *ips);
+
/**
* ips_cpu_busy - is CPU busy?
* @ips: IPS driver struct
*/
static bool ips_gpu_busy(struct ips_driver *ips)
{
- if (!ips->gpu_turbo_enabled)
+ if (!ips_gpu_turbo_enabled(ips))
return false;
return ips->gpu_busy();
*/
static void ips_gpu_raise(struct ips_driver *ips)
{
- if (!ips->gpu_turbo_enabled)
+ if (!ips_gpu_turbo_enabled(ips))
return;
if (!ips->gpu_raise())
*/
static void ips_gpu_lower(struct ips_driver *ips)
{
- if (!ips->gpu_turbo_enabled)
+ if (!ips_gpu_turbo_enabled(ips))
return;
if (!ips->gpu_lower())
return false;
}
+static bool
+ips_gpu_turbo_enabled(struct ips_driver *ips)
+{
+ if (!ips->gpu_busy && late_i915_load) {
+ if (ips_get_i915_syms(ips)) {
+ dev_info(&ips->dev->dev,
+ "i915 driver attached, reenabling gpu turbo\n");
+ ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
+ }
+ }
+
+ return ips->gpu_turbo_enabled;
+}
+
+void
+ips_link_to_i915_driver()
+{
+ /* We can't cleanly get at the various ips_driver structs from
+ * this caller (the i915 driver), so just set a flag saying
+ * that it's time to try getting the symbols again.
+ */
+ late_i915_load = true;
+}
+EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
+
static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
--- /dev/null
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+void ips_link_to_i915_driver(void);
wblock = list_entry(p, struct wmi_block, list);
gblock = &wblock->gblock;
- if (strncmp(gblock->guid, guid_string, 16) == 0)
+ if (memcmp(gblock->guid, guid_string, 16) == 0)
return true;
}
return false;
#include "../base.h"
#include "pnpacpi.h"
-static int num = 0;
+static int num;
/* We need only to blacklist devices that have already an acpi driver that
* can't use pnp layer. We don't need to blacklist device that are directly
};
EXPORT_SYMBOL(pnpacpi_protocol);
+static char *pnpacpi_get_id(struct acpi_device *device)
+{
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &device->pnp.ids, list) {
+ if (ispnpidacpi(id->id))
+ return id->id;
+ }
+
+ return NULL;
+}
+
static int __init pnpacpi_add_device(struct acpi_device *device)
{
acpi_handle temp = NULL;
acpi_status status;
struct pnp_dev *dev;
+ char *pnpid;
struct acpi_hardware_id *id;
/*
* driver should not be loaded.
*/
status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
- is_exclusive_device(device) || (!device->status.present))
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ pnpid = pnpacpi_get_id(device);
+ if (!pnpid)
+ return 0;
+
+ if (is_exclusive_device(device) || !device->status.present)
return 0;
- dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
+ dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
if (!dev)
return -ENOMEM;
pnpacpi_parse_resource_option_data(dev);
list_for_each_entry(id, &device->pnp.ids, list) {
- if (!strcmp(id->id, acpi_device_hid(device)))
+ if (!strcmp(id->id, pnpid))
continue;
if (!ispnpidacpi(id->id))
continue;
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
};
#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
-{ \
+ ereg0, ebit0, ereg1, ebit1) \
.desc = { \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_##_ops, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1), \
- .voltages = tps6586x_##vdata##_voltages, \
-}
+ .voltages = tps6586x_##vdata##_voltages,
+
+#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
+ .go_reg = TPS6586X_##goreg, \
+ .go_bit = (gobit),
#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
+{ \
TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, 0, 0)
+ ereg0, ebit0, ereg1, ebit1) \
+}
#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
+{ \
TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, goreg, gobit)
+ ereg0, ebit0, ereg1, ebit1) \
+ TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
+}
static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6),
+ TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1),
+ TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
uint8_t val1, val2;
int ret;
+ if (ri->enable_reg[0] == ri->enable_reg[1] &&
+ ri->enable_bit[0] == ri->enable_bit[1])
+ return 0;
+
ret = tps6586x_read(parent, ri->enable_reg[0], &val1);
if (ret)
return ret;
if (ret)
return ret;
- if (!(val2 & ri->enable_bit[1]))
+ if (!(val2 & (1 << ri->enable_bit[1])))
return 0;
/*
* The regulator is on, but it's enabled with the bit we don't
* want to use, so we switch the enable bits
*/
- if (!(val1 & ri->enable_bit[0])) {
+ if (!(val1 & (1 << ri->enable_bit[0]))) {
ret = tps6586x_set_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
if (ret)
help
Supports the RTC firmware in the MSP430 on the DM355 EVM.
+config RTC_DRV_TPS6586X
+ tristate "TI TPS6586X RTC"
+ depends on I2C
+ select MFD_TPS6586X
+ help
+ This driver supports TPS6586X RTC
+
config RTC_DRV_TWL92330
boolean "TI TWL92330/Menelaus"
depends on MENELAUS
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct rs5c372 *rs5c = i2c_get_clientdata(client);
- unsigned char buf[8];
+ unsigned char buf[7];
int addr;
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
--- /dev/null
+/*
+ * drivers/rtc/rtc-tps6586x.c
+ *
+ * RTC driver for TI TPS6586x
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define TPS_EPOCH 2009
+
+#define RTC_CTRL 0xc0
+# define RTC_ENABLE (1 << 5) /* enables tick updates */
+# define RTC_HIRES (1 << 4) /* 1Khz or 32Khz updates */
+#define RTC_ALARM1_HI 0xc1
+#define RTC_COUNT4 0xc6
+
+struct tps6586x_rtc {
+ unsigned long epoch_start;
+ int irq;
+ bool irq_en;
+ struct rtc_device *rtc;
+};
+
+static inline struct device *to_tps6586x_dev(struct device *dev)
+{
+ return dev->parent;
+}
+
+static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks = 0;
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+ int i;
+
+ err = tps6586x_reads(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to read counter\n");
+ return err;
+ }
+
+ for (i = 0; i < sizeof(buff); i++) {
+ ticks <<= 8;
+ ticks |= buff[i];
+ }
+
+ seconds = ticks >> 10;
+
+ seconds += rtc->epoch_start;
+ rtc_time_to_tm(seconds, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks;
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+
+ rtc_tm_to_time(tm, &seconds);
+
+ if (WARN_ON(seconds < rtc->epoch_start)) {
+ dev_err(dev, "requested time unsupported\n");
+ return -EINVAL;
+ }
+
+ seconds -= rtc->epoch_start;
+
+ ticks = (unsigned long long)seconds << 10;
+ buff[0] = (ticks >> 32) & 0xff;
+ buff[1] = (ticks >> 24) & 0xff;
+ buff[2] = (ticks >> 16) & 0xff;
+ buff[3] = (ticks >> 8) & 0xff;
+ buff[4] = ticks & 0xff;
+
+ err = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to clear RTC_ENABLE\n");
+ return err;
+ }
+
+ err = tps6586x_writes(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to program new time\n");
+ return err;
+ }
+
+ err = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to set RTC_ENABLE\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long seconds;
+ unsigned long ticks;
+ u8 buff[3];
+ int err;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev, "can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (rtc->irq_en && rtc->irq_en && (rtc->irq != -1)) {
+ disable_irq(rtc->irq);
+ rtc->irq_en = false;
+ }
+
+ seconds -= rtc->epoch_start;
+ ticks = (unsigned long long)seconds << 10;
+
+ buff[0] = (ticks >> 16) & 0xff;
+ buff[1] = (ticks >> 8) & 0xff;
+ buff[2] = ticks & 0xff;
+
+ err = tps6586x_writes(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err) {
+ dev_err(tps_dev, "unable to program alarm\n");
+ return err;
+ }
+
+ if (alrm->enabled && (rtc->irq != -1)) {
+ enable_irq(rtc->irq);
+ rtc->irq_en = true;
+ }
+
+ return err;
+}
+
+static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long ticks;
+ unsigned long seconds;
+ u8 buff[3];
+ int err;
+
+ err = tps6586x_reads(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err)
+ return err;
+
+ ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
+ seconds = ticks >> 10;
+ seconds += rtc->epoch_start;
+
+ rtc_time_to_tm(seconds, &alrm->time);
+ alrm->enabled = rtc->irq_en;
+
+ return 0;
+}
+
+static int tps6586x_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ enabled = !!enabled;
+ if (enabled == rtc->irq_en)
+ return 0;
+
+ if (enabled)
+ enable_irq(rtc->irq);
+ else
+ disable_irq(rtc->irq);
+
+ rtc->irq_en = enabled;
+ return 0;
+}
+
+static const struct rtc_class_ops tps6586x_rtc_ops = {
+ .read_time = tps6586x_rtc_read_time,
+ .set_time = tps6586x_rtc_set_time,
+ .set_alarm = tps6586x_rtc_set_alarm,
+ .read_alarm = tps6586x_rtc_read_alarm,
+ .update_irq_enable = tps6586x_rtc_update_irq_enable,
+};
+
+static irqreturn_t tps6586x_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6586x_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
+ struct tps6586x_rtc *rtc;
+ int err;
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+ if (!pdata || (pdata->irq < 0))
+ dev_warn(&pdev->dev, "no IRQ specified, wakeup is disabled\n");
+
+ rtc->epoch_start = mktime(TPS_EPOCH, 1, 1, 0, 0, 0);
+
+ rtc->rtc = rtc_device_register("tps6586x-rtc", &pdev->dev,
+ &tps6586x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+
+ /* disable high-res mode, enable tick counting */
+ err = tps6586x_update(tps_dev, RTC_CTRL,
+ (RTC_ENABLE | RTC_HIRES), RTC_ENABLE);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to start counter\n");
+ goto fail;
+ }
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, tps6586x_rtc_irq,
+ IRQF_ONESHOT, "tps6586x-rtc",
+ &pdev->dev);
+ if (err) {
+ dev_warn(&pdev->dev, "unable to request IRQ\n");
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ disable_irq(rtc->irq);
+ enable_irq_wake(rtc->irq);
+ }
+ }
+
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit tps6586x_rtc_remove(struct platform_device *pdev)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver tps6586x_rtc_driver = {
+ .driver = {
+ .name = "tps6586x-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6586x_rtc_probe,
+ .remove = __devexit_p(tps6586x_rtc_remove),
+};
+
+static int __init tps6586x_rtc_init(void)
+{
+ return platform_driver_register(&tps6586x_rtc_driver);
+}
+module_init(tps6586x_rtc_init);
+
+static void __exit tps6586x_rtc_exit(void)
+{
+ platform_driver_unregister(&tps6586x_rtc_driver);
+}
+module_exit(tps6586x_rtc_exit);
+
+MODULE_DESCRIPTION("TI TPS6586x RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
ha = gdth_find_ha(gen.ionode);
if (!ha)
return -EFAULT;
+
+ if (gen.data_len > INT_MAX)
+ return -EINVAL;
+ if (gen.sense_len > INT_MAX)
+ return -EINVAL;
+ if (gen.data_len + gen.sense_len > INT_MAX)
+ return -EINVAL;
+
if (gen.data_len + gen.sense_len != 0) {
if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
FALSE, &paddr)))
static struct ata_port_operations sas_sata_ops = {
.phy_reset = sas_ata_phy_reset,
.post_internal_cmd = sas_ata_post_internal,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
struct pmcraid_control_block *ioa_cb;
dma_addr_t ioa_cb_bus_addr;
dma_addr_t dma_handle;
- u8 *sense_buffer;
/* pointer to mid layer structure of SCSI commands */
struct scsi_cmnd *scsi_cmd;
extern int ql2xdbwr;
extern int ql2xdontresethba;
extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
extern int ql2xenabledif;
extern int ql2xenablehba_err_chk;
extern int ql2xtargetreset;
continue;
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
- if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
- new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
+ if (ql2xgffidenable &&
+ (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
/* Locate matching device in database. */
fcp_cmnd->additional_cdb_len |= 2;
int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+ host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
goto queuing_error_fcp_cmnd;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
"Enable target reset."
"Default is 1 - use hw defaults.");
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xgffidenable,
+ "Enables GFF_ID checks of port type. "
+ "Default is 0 - Do not use GFF_ID information.");
int ql2xasynctmfenable;
module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
ha->isp_ops = &qla82xx_isp_ops;
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/pci.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
- /* New queue, no concurrency on queue_flags */
if (!shost->use_clustering)
- queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+ q->limits.cluster = 0;
/*
* set a reasonable default alignment on word boundaries: the
sdev->sdev_state = SDEV_RUNNING;
else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
sdev->sdev_state = SDEV_CREATED;
- else
+ else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- sdev->sdev_state == SDEV_DEL)
+ scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
+ scsi_device_put(sdev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
index = sdkp->index;
dev = &sdp->sdev_gendev;
- if (index < SD_MAX_DISKS) {
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
- gd->minors = SD_MINORS;
- }
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
+
gd->fops = &sd_fops;
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
if (error)
goto out_put;
+ if (index >= SD_MAX_DISKS) {
+ error = -ENODEV;
+ sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
+ goto out_free_index;
+ }
+
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error)
goto out_free_index;
help
Serial port support for Samsung's S5P Family of SoC's
+config SERIAL_TEGRA
+ boolean "High speed serial support for NVIDIA Tegra SoCs"
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on NVIDIA Tegra SoC, providing
+ /dev/ttyHSx, where x is determined by the number of UARTs on the
+ platform
config SERIAL_MAX3100
tristate "MAX3100 support"
obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
+obj-$(CONFIG_SERIAL_TEGRA) += tegra_hsuart.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
- u32 mul = 0x3600;
- u32 ps = 0x10;
+ u32 ps, mul;
switch (termios->c_cflag & CSIZE) {
case CS5:
ps = 0xC;
quot = 1;
break;
- case 2500000:
- mul = 0x2710;
- ps = 0x10;
- quot = 1;
- break;
case 18432000:
mul = 0x2400;
ps = 0x10;
quot = 1;
break;
+ case 3000000:
+ case 2500000:
+ case 2000000:
case 1500000:
- mul = 0x1D4C;
- ps = 0xc;
- quot = 1;
+ case 1000000:
+ case 500000:
+ /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
+ mul = baud / 500000 * 0x9C4;
break;
default:
;
--- /dev/null
+/*
+ * drivers/serial/tegra_hsuart.c
+ *
+ * High-speed serial driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/termios.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+
+#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define BYTES_TO_ALIGN(x) ((unsigned long)(ALIGN((x), sizeof(u32))) - \
+ (unsigned long)(x))
+
+#define UART_RX_DMA_BUFFER_SIZE (2048*4)
+
+#define UART_LSR_FIFOE 0x80
+#define UART_IER_EORD 0x20
+#define UART_MCR_RTS_EN 0x40
+#define UART_MCR_CTS_EN 0x20
+#define UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
+ UART_LSR_PE | UART_LSR_FE)
+
+#define TX_FORCE_PIO 0
+#define RX_FORCE_PIO 0
+
+const int dma_req_sel[] = {
+ TEGRA_DMA_REQ_SEL_UARTA,
+ TEGRA_DMA_REQ_SEL_UARTB,
+ TEGRA_DMA_REQ_SEL_UARTC,
+ TEGRA_DMA_REQ_SEL_UARTD,
+ TEGRA_DMA_REQ_SEL_UARTE,
+};
+
+#define TEGRA_TX_PIO 1
+#define TEGRA_TX_DMA 2
+
+#define TEGRA_UART_MIN_DMA 16
+#define TEGRA_UART_FIFO_SIZE 8
+
+/* Tx fifo trigger level setting in tegra uart is in
+ * reverse way then conventional uart */
+#define TEGRA_UART_TX_TRIG_16B 0x00
+#define TEGRA_UART_TX_TRIG_8B 0x10
+#define TEGRA_UART_TX_TRIG_4B 0x20
+#define TEGRA_UART_TX_TRIG_1B 0x30
+
+struct tegra_uart_port {
+ struct uart_port uport;
+ char port_name[32];
+
+ /* Module info */
+ unsigned long size;
+ struct clk *clk;
+ unsigned int baud;
+
+ /* Register shadow */
+ unsigned char fcr_shadow;
+ unsigned char mcr_shadow;
+ unsigned char lcr_shadow;
+ unsigned char ier_shadow;
+ bool use_cts_control;
+ bool rts_active;
+
+ int tx_in_progress;
+ unsigned int tx_bytes;
+
+ dma_addr_t xmit_dma_addr;
+
+ /* TX DMA */
+ struct tegra_dma_req tx_dma_req;
+ struct tegra_dma_channel *tx_dma;
+ struct work_struct tx_work;
+
+ /* RX DMA */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+
+ bool use_rx_dma;
+ bool use_tx_dma;
+
+ bool rx_timeout;
+ int rx_in_progress;
+};
+
+static inline u8 uart_readb(struct tegra_uart_port *t, unsigned long reg)
+{
+ u8 val = readb(t->uport.membase + (reg << t->uport.regshift));
+ dev_vdbg(t->uport.dev, "%s: %p %03lx = %02x\n", __func__,
+ t->uport.membase, reg << t->uport.regshift, val);
+ return val;
+}
+
+static inline void uart_writeb(struct tegra_uart_port *t, u8 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %02x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writeb(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static inline void uart_writel(struct tegra_uart_port *t, u32 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %08x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writel(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud);
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl);
+static void do_handle_rx_pio(struct tegra_uart_port *t);
+static void do_handle_rx_dma(struct tegra_uart_port *t);
+static void set_rts(struct tegra_uart_port *t, bool active);
+static void set_dtr(struct tegra_uart_port *t, bool active);
+
+static void fill_tx_fifo(struct tegra_uart_port *t, int max_bytes)
+{
+ int i;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+
+ for (i = 0; i < max_bytes; i++) {
+ BUG_ON(uart_circ_empty(xmit));
+ uart_writeb(t, xmit->buf[xmit->tail], UART_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ t->uport.icount.tx++;
+ }
+}
+
+static void tegra_start_pio_tx(struct tegra_uart_port *t, unsigned int bytes)
+{
+ if (bytes > TEGRA_UART_FIFO_SIZE)
+ bytes = TEGRA_UART_FIFO_SIZE;
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ t->tx_in_progress = TEGRA_TX_PIO;
+ t->tx_bytes = bytes;
+ t->ier_shadow |= UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+}
+
+static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
+{
+ struct circ_buf *xmit;
+ xmit = &t->uport.state->xmit;
+
+ dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->tx_bytes = bytes & ~(sizeof(u32)-1);
+ t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
+ t->tx_dma_req.size = t->tx_bytes;
+
+ t->tx_in_progress = TEGRA_TX_DMA;
+
+ tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
+}
+
+/* Called with u->lock taken */
+static void tegra_start_next_tx(struct tegra_uart_port *t)
+{
+ unsigned long tail;
+ unsigned long count;
+
+ struct circ_buf *xmit;
+
+ xmit = &t->uport.state->xmit;
+ tail = (unsigned long)&xmit->buf[xmit->tail];
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+
+ dev_vdbg(t->uport.dev, "+%s %lu %d\n", __func__, count,
+ t->tx_in_progress);
+
+ if (count == 0)
+ goto out;
+
+ if (!t->use_tx_dma || count < TEGRA_UART_MIN_DMA)
+ tegra_start_pio_tx(t, count);
+ else if (BYTES_TO_ALIGN(tail) > 0)
+ tegra_start_pio_tx(t, BYTES_TO_ALIGN(tail));
+ else
+ tegra_start_dma_tx(t, count);
+
+out:
+ dev_vdbg(t->uport.dev, "-%s", __func__);
+}
+
+/* Called by serial core driver with u->lock taken. */
+static void tegra_start_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ struct circ_buf *xmit;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ xmit = &u->state->xmit;
+
+ if (!uart_circ_empty(xmit) && !t->tx_in_progress)
+ tegra_start_next_tx(t);
+}
+
+static int tegra_start_dma_rx(struct tegra_uart_port *t)
+{
+ wmb();
+ if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req)) {
+ dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void tegra_rx_dma_threshold_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ do_handle_rx_dma(t);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+/* must be called with uart lock held */
+static void tegra_rx_dma_complete_req(struct tegra_uart_port *t,
+ struct tegra_dma_req *req)
+{
+ struct uart_port *u = &t->uport;
+ struct tty_struct *tty = u->state->port.tty;
+
+ /* If we are here, DMA is stopped */
+
+ dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
+ req->status);
+ if (req->bytes_transferred) {
+ t->uport.icount.rx += req->bytes_transferred;
+ tty_insert_flip_string(tty,
+ ((unsigned char *)(req->virt_addr)),
+ req->bytes_transferred);
+ }
+
+ do_handle_rx_pio(t);
+
+ /* Push the read data later in caller place. */
+ if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
+ return;
+
+ tty_flip_buffer_push(u->state->port.tty);
+}
+
+static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ unsigned long flags;
+
+ /*
+ * should never get called, dma should be dequeued during threshold
+ * callback
+ */
+
+ dev_warn(t->uport.dev, "possible rx overflow\n");
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+ tegra_rx_dma_complete_req(t, req);
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+/* Lock already taken */
+static void do_handle_rx_dma(struct tegra_uart_port *t)
+{
+ struct uart_port *u = &t->uport;
+ if (t->rts_active)
+ set_rts(t, false);
+ if (!tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req))
+ tegra_rx_dma_complete_req(t, &t->rx_dma_req);
+
+ tty_flip_buffer_push(u->state->port.tty);
+ /* enqueue the request again */
+ tegra_start_dma_rx(t);
+ if (t->rts_active)
+ set_rts(t, true);
+}
+
+/* Wait for a symbol-time. */
+static void wait_sym_time(struct tegra_uart_port *t, unsigned int syms)
+{
+
+ /* Definitely have a start bit. */
+ unsigned int bits = 1;
+ switch (t->lcr_shadow & 3) {
+ case UART_LCR_WLEN5:
+ bits += 5;
+ break;
+ case UART_LCR_WLEN6:
+ bits += 6;
+ break;
+ case UART_LCR_WLEN7:
+ bits += 7;
+ break;
+ default:
+ bits += 8;
+ break;
+ }
+
+ /* Technically 5 bits gets 1.5 bits of stop... */
+ if (t->lcr_shadow & UART_LCR_STOP) {
+ bits += 2;
+ } else {
+ bits++;
+ }
+
+ if (t->lcr_shadow & UART_LCR_PARITY)
+ bits++;
+
+ if (likely(t->baud))
+ udelay(DIV_ROUND_UP(syms * bits * 1000000, t->baud));
+}
+
+/* Flush desired FIFO. */
+static void tegra_fifo_reset(struct tegra_uart_port *t, u8 fcr_bits)
+{
+ unsigned char fcr = t->fcr_shadow;
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ uart_writeb(t, fcr, UART_FCR);
+ uart_readb(t, UART_SCR); /* Dummy read to ensure the write is posted */
+ wait_sym_time(t, 1); /* Wait for the flush to propagate. */
+}
+
+static char do_decode_rx_error(struct tegra_uart_port *t, u8 lsr)
+{
+ char flag = TTY_NORMAL;
+
+ if (unlikely(lsr & UART_LSR_ANY)) {
+ if (lsr & UART_LSR_OE) {
+ /* Overrrun error */
+ flag |= TTY_OVERRUN;
+ t->uport.icount.overrun++;
+ dev_err(t->uport.dev, "Got overrun errors\n");
+ } else if (lsr & UART_LSR_PE) {
+ /* Parity error */
+ flag |= TTY_PARITY;
+ t->uport.icount.parity++;
+ dev_err(t->uport.dev, "Got Parity errors\n");
+ } else if (lsr & UART_LSR_FE) {
+ flag |= TTY_FRAME;
+ t->uport.icount.frame++;
+ dev_err(t->uport.dev, "Got frame errors\n");
+ } else if (lsr & UART_LSR_BI) {
+ dev_err(t->uport.dev, "Got Break\n");
+ t->uport.icount.brk++;
+ /* If FIFO read error without any data, reset Rx FIFO */
+ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
+ tegra_fifo_reset(t, UART_FCR_CLEAR_RCVR);
+ }
+ }
+ return flag;
+}
+
+static void do_handle_rx_pio(struct tegra_uart_port *t)
+{
+ int count = 0;
+ do {
+ char flag = TTY_NORMAL;
+ unsigned char lsr = 0;
+ unsigned char ch;
+
+
+ lsr = uart_readb(t, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ break;
+
+ flag = do_decode_rx_error(t, lsr);
+ ch = uart_readb(t, UART_RX);
+ t->uport.icount.rx++;
+ count++;
+
+ if (!uart_handle_sysrq_char(&t->uport, c))
+ uart_insert_char(&t->uport, lsr, UART_LSR_OE, ch, flag);
+ } while (1);
+
+ dev_dbg(t->uport.dev, "PIO received %d bytes\n", count);
+
+ return;
+}
+
+static void do_handle_modem_signal(struct uart_port *u)
+{
+ unsigned char msr;
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ msr = uart_readb(t, UART_MSR);
+ if (msr & UART_MSR_CTS)
+ dev_dbg(u->dev, "CTS triggered\n");
+ if (msr & UART_MSR_DSR)
+ dev_dbg(u->dev, "DSR enabled\n");
+ if (msr & UART_MSR_DCD)
+ dev_dbg(u->dev, "CD enabled\n");
+ if (msr & UART_MSR_RI)
+ dev_dbg(u->dev, "RI enabled\n");
+ return;
+}
+
+static void do_handle_tx_pio(struct tegra_uart_port *t)
+{
+ struct circ_buf *xmit = &t->uport.state->xmit;
+
+ fill_tx_fifo(t, t->tx_bytes);
+
+ t->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ tegra_start_next_tx(t);
+ return;
+}
+
+static void tegra_tx_dma_complete_work(struct work_struct *work)
+{
+ struct tegra_uart_port *t =
+ container_of(work, struct tegra_uart_port, tx_work);
+ struct tegra_dma_req *req = &t->tx_dma_req;
+ unsigned long flags;
+ int timeout = 20;
+
+ while ((uart_readb(t, UART_LSR) & TX_EMPTY_STATUS) != TX_EMPTY_STATUS) {
+ timeout--;
+ if (timeout == 0) {
+ dev_err(t->uport.dev,
+ "timed out waiting for TX FIFO to empty\n");
+ return;
+ }
+ msleep(1);
+ }
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ t->tx_in_progress = 0;
+
+ if (req->status != -TEGRA_DMA_REQ_ERROR_ABORTED)
+ tegra_start_next_tx(t);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+/* must be called with uart lock held */
+static void tegra_tx_dma_complete_req(struct tegra_uart_port *t,
+ struct tegra_dma_req *req)
+{
+ struct circ_buf *xmit = &t->uport.state->xmit;
+ int count = req->bytes_transferred;
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ schedule_work(&t->tx_work);
+}
+
+static void tegra_tx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ unsigned long flags;
+
+ dev_vdbg(t->uport.dev, "%s: %d\n", __func__, req->bytes_transferred);
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ tegra_tx_dma_complete_req(t, req);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static irqreturn_t tegra_uart_isr(int irq, void *data)
+{
+ struct tegra_uart_port *t = data;
+ struct uart_port *u = &t->uport;
+ unsigned char iir;
+ unsigned char ier;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ t = container_of(u, struct tegra_uart_port, uport);
+ while (1) {
+ iir = uart_readb(t, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ if (likely(t->use_rx_dma) && is_rx_int) {
+ do_handle_rx_dma(t);
+
+ if (t->rx_in_progress) {
+ ier = t->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(u->dev, "tegra_uart_isr iir = 0x%x (%d)\n", iir,
+ (iir >> 1) & 0x7);
+ switch ((iir >> 1) & 0x7) {
+ case 0: /* Modem signal change interrupt */
+ do_handle_modem_signal(u);
+ break;
+ case 1: /* Transmit interrupt only triggered when using PIO */
+ t->ier_shadow &= ~UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+ do_handle_tx_pio(t);
+ break;
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+ case 2: /* Receive */
+ if (likely(t->use_rx_dma)) {
+ if (!is_rx_int) {
+ is_rx_int = true;
+ /* Disable interrups */
+ ier = t->ier_shadow;
+ ier |= UART_IER_RDI;
+ uart_writeb(t, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ } else {
+ do_handle_rx_pio(t);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ tty_flip_buffer_push(u->state->port.tty);
+ spin_lock_irqsave(&u->lock, flags);
+ }
+ break;
+ case 3: /* Receive error */
+ /* FIXME how to handle this? Why do we get here */
+ do_decode_rx_error(t, uart_readb(t, UART_LSR));
+ break;
+ case 5: /* break nothing to handle */
+ case 7: /* break nothing to handle */
+ break;
+ }
+ }
+}
+
+static void tegra_stop_rx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned char ier;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->rts_active)
+ set_rts(t, false);
+
+ if (t->rx_in_progress) {
+ wait_sym_time(t, 1); /* wait a character interval */
+
+ ier = t->ier_shadow;
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ t->rx_in_progress = 0;
+
+ if (t->use_rx_dma && t->rx_dma) {
+ if (!tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req))
+ tegra_rx_dma_complete_req(t, &t->rx_dma_req);
+ } else {
+ do_handle_rx_pio(t);
+ }
+ tty_flip_buffer_push(u->state->port.tty);
+ }
+
+ return;
+}
+
+static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
+{
+ unsigned long flags;
+
+ flush_work(&t->tx_work);
+
+ /* Disable interrupts */
+ uart_writeb(t, 0, UART_IER);
+
+ while ((uart_readb(t, UART_LSR) & UART_LSR_TEMT) != UART_LSR_TEMT);
+ udelay(200);
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ /* Reset the Rx and Tx FIFOs */
+ tegra_fifo_reset(t, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+
+ clk_disable(t->clk);
+ t->baud = 0;
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static void tegra_uart_free_rx_dma(struct tegra_uart_port *t)
+{
+ if (!t->use_rx_dma)
+ return;
+
+ tegra_dma_free_channel(t->rx_dma);
+ t->rx_dma = NULL;
+
+ if (likely(t->rx_dma_req.dest_addr))
+ dma_free_coherent(t->uport.dev, t->rx_dma_req.size,
+ t->rx_dma_req.virt_addr, t->rx_dma_req.dest_addr);
+ t->rx_dma_req.dest_addr = 0;
+ t->rx_dma_req.virt_addr = NULL;
+
+ t->use_rx_dma = false;
+}
+
+static int tegra_uart_hw_init(struct tegra_uart_port *t)
+{
+ unsigned char ier;
+
+ dev_vdbg(t->uport.dev, "+tegra_uart_hw_init\n");
+
+ t->fcr_shadow = 0;
+ t->mcr_shadow = 0;
+ t->lcr_shadow = 0;
+ t->ier_shadow = 0;
+ t->baud = 0;
+
+ clk_enable(t->clk);
+
+ /* Reset the UART controller to clear all previous status.*/
+ tegra_periph_reset_assert(t->clk);
+ udelay(100);
+ tegra_periph_reset_deassert(t->clk);
+ udelay(100);
+
+ t->rx_in_progress = 0;
+
+ /* Set the trigger level
+ *
+ * For PIO mode:
+ *
+ * For receive, this will interrupt the CPU after that many number of
+ * bytes are received, for the remaining bytes the receive timeout
+ * interrupt is received.
+ *
+ * Rx high watermark is set to 4.
+ *
+ * For transmit, if the trasnmit interrupt is enabled, this will
+ * interrupt the CPU when the number of entries in the FIFO reaches the
+ * low watermark.
+ *
+ * Tx low watermark is set to 8.
+ *
+ * For DMA mode:
+ *
+ * Set the Tx trigger to 4. This should match the DMA burst size that
+ * programmed in the DMA registers.
+ * */
+ t->fcr_shadow = UART_FCR_ENABLE_FIFO;
+ t->fcr_shadow |= UART_FCR_R_TRIG_01;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ if (t->use_rx_dma) {
+ /* initialize the UART for a simple default configuration
+ * so that the receive DMA buffer may be enqueued */
+ t->lcr_shadow = 3; /* no parity, stop, 8 data bits */
+ tegra_set_baudrate(t, 115200);
+ t->fcr_shadow |= UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ if (tegra_start_dma_rx(t)) {
+ dev_err(t->uport.dev, "Rx DMA enqueue failed\n");
+ tegra_uart_free_rx_dma(t);
+ t->fcr_shadow &= ~UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ }
+ }
+ else
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->rx_in_progress = 1;
+
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ *
+ * Don't get confused, believe in the magic of nvidia hw...:-)
+ */
+ ier = 0;
+ ier |= UART_IER_RLSI | UART_IER_RTOIE;
+ if (t->use_rx_dma)
+ ier |= UART_IER_EORD;
+ else
+ ier |= UART_IER_RDI;
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+
+ dev_vdbg(t->uport.dev, "-tegra_uart_hw_init\n");
+ return 0;
+}
+
+static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
+{
+ dma_addr_t rx_dma_phys;
+ void *rx_dma_virt;
+
+ t->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS);
+ if (!t->rx_dma) {
+ dev_err(t->uport.dev, "%s: failed to allocate RX DMA.\n", __func__);
+ return -ENODEV;
+ }
+
+ t->rx_dma_req.size = UART_RX_DMA_BUFFER_SIZE;
+ rx_dma_virt = dma_alloc_coherent(t->uport.dev,
+ t->rx_dma_req.size, &rx_dma_phys, GFP_KERNEL);
+ if (!rx_dma_virt) {
+ dev_err(t->uport.dev, "DMA buffers allocate failed\n");
+ goto fail;
+ }
+ t->rx_dma_req.dest_addr = rx_dma_phys;
+ t->rx_dma_req.virt_addr = rx_dma_virt;
+
+ t->rx_dma_req.source_addr = (unsigned long)t->uport.mapbase;
+ t->rx_dma_req.source_wrap = 4;
+ t->rx_dma_req.dest_wrap = 0;
+ t->rx_dma_req.to_memory = 1;
+ t->rx_dma_req.source_bus_width = 8;
+ t->rx_dma_req.dest_bus_width = 32;
+ t->rx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->rx_dma_req.complete = tegra_rx_dma_complete_callback;
+ t->rx_dma_req.threshold = tegra_rx_dma_threshold_callback;
+ t->rx_dma_req.dev = t;
+
+ return 0;
+fail:
+ tegra_uart_free_rx_dma(t);
+ return -ENODEV;
+}
+
+static int tegra_startup(struct uart_port *u)
+{
+ struct tegra_uart_port *t = container_of(u,
+ struct tegra_uart_port, uport);
+ int ret = 0;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ sprintf(t->port_name, "tegra_uart_%d", u->line);
+
+ t->use_tx_dma = false;
+ if (!TX_FORCE_PIO) {
+ t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (t->tx_dma)
+ t->use_tx_dma = true;
+ else
+ pr_err("%s: failed to allocate TX DMA.\n", __func__);
+ }
+ if (t->use_tx_dma) {
+ t->tx_dma_req.instance = u->line;
+ t->tx_dma_req.complete = tegra_tx_dma_complete_callback;
+ t->tx_dma_req.to_memory = 0;
+
+ t->tx_dma_req.dest_addr = (unsigned long)t->uport.mapbase;
+ t->tx_dma_req.dest_wrap = 4;
+ t->tx_dma_req.source_wrap = 0;
+ t->tx_dma_req.source_bus_width = 32;
+ t->tx_dma_req.dest_bus_width = 8;
+ t->tx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->tx_dma_req.dev = t;
+ t->tx_dma_req.size = 0;
+ t->xmit_dma_addr = dma_map_single(t->uport.dev,
+ t->uport.state->xmit.buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ }
+ t->tx_in_progress = 0;
+
+ t->use_rx_dma = false;
+ if (!RX_FORCE_PIO) {
+ if (!tegra_uart_init_rx_dma(t))
+ t->use_rx_dma = true;
+ }
+
+ ret = tegra_uart_hw_init(t);
+ if (ret)
+ goto fail;
+
+ dev_dbg(u->dev, "Requesting IRQ %d\n", u->irq);
+ msleep(1);
+
+ ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ t->port_name, t);
+ if (ret) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+ goto fail;
+ }
+ dev_dbg(u->dev,"Started UART port %d\n", u->line);
+
+ return 0;
+fail:
+ dev_err(u->dev, "Tegra UART startup failed\n");
+ return ret;
+}
+
+static void tegra_shutdown(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_shutdown\n");
+
+ tegra_uart_hw_deinit(t);
+
+ t->rx_in_progress = 0;
+ t->tx_in_progress = 0;
+
+ tegra_uart_free_rx_dma(t);
+ if (t->use_tx_dma) {
+ tegra_dma_free_channel(t->tx_dma);
+ t->tx_dma = NULL;
+ t->use_tx_dma = false;
+ dma_unmap_single(t->uport.dev, t->xmit_dma_addr, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ t->xmit_dma_addr = 0;
+ }
+
+ free_irq(u->irq, t);
+ dev_vdbg(u->dev, "-tegra_shutdown\n");
+}
+
+static unsigned int tegra_get_mctrl(struct uart_port *u)
+{
+ /* RI - Ring detector is active
+ * CD/DCD/CAR - Carrier detect is always active. For some reason
+ * linux has different names for carrier detect.
+ * DSR - Data Set ready is active as the hardware doesn't support it.
+ * Don't know if the linux support this yet?
+ * CTS - Clear to send. Always set to active, as the hardware handles
+ * CTS automatically.
+ * */
+ return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void set_rts(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void set_dtr(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_DTR;
+ else
+ mcr &= ~UART_MCR_DTR;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl)
+{
+ unsigned char mcr;
+ struct tegra_uart_port *t;
+
+ dev_dbg(u->dev, "tegra_set_mctrl called with %d\n", mctrl);
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ mcr = t->mcr_shadow;
+ if (mctrl & TIOCM_RTS) {
+ t->rts_active = true;
+ set_rts(t, true);
+ } else {
+ t->rts_active = false;
+ set_rts(t, false);
+ }
+
+ if (mctrl & TIOCM_DTR)
+ set_dtr(t, true);
+ else
+ set_dtr(t, false);
+ return;
+}
+
+static void tegra_break_ctl(struct uart_port *u, int break_ctl)
+{
+ struct tegra_uart_port *t;
+ unsigned char lcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ lcr = t->lcr_shadow;
+ if (break_ctl)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+}
+
+static int tegra_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static void tegra_release_port(struct uart_port *u)
+{
+
+}
+
+static unsigned int tegra_tx_empty(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned int ret = 0;
+ unsigned long flags;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_tx_empty\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+ if (!t->tx_in_progress)
+ ret = TIOCSER_TEMT;
+ spin_unlock_irqrestore(&u->lock, flags);
+
+ dev_vdbg(u->dev, "-tegra_tx_empty\n");
+ return ret;
+}
+
+static void tegra_stop_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->use_tx_dma) {
+ if (!tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req))
+ tegra_tx_dma_complete_req(t, &t->tx_dma_req);
+ }
+
+ return;
+}
+
+static void tegra_enable_ms(struct uart_port *u)
+{
+}
+
+#define UART_CLOCK_ACCURACY 5
+
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud)
+{
+ unsigned long rate;
+ unsigned int divisor;
+ unsigned char lcr;
+
+ if (t->baud == baud)
+ return;
+
+ rate = clk_get_rate(t->clk);
+
+ divisor = rate;
+ do_div(divisor, 16);
+ divisor += baud/2;
+ do_div(divisor, baud);
+
+ lcr = t->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+
+ uart_writel(t, divisor & 0xFF, UART_TX);
+ uart_writel(t, ((divisor >> 8) & 0xFF), UART_IER);
+
+ lcr &= ~UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+ uart_readb(t, UART_SCR); /* Dummy read to ensure the write is posted */
+
+ t->baud = baud;
+ wait_sym_time(t, 2); /* wait two character intervals at new rate */
+ dev_dbg(t->uport.dev, "Baud %u clock freq %lu and divisor of %u\n",
+ baud, rate, divisor);
+}
+
+static void tegra_set_termios(struct uart_port *u, struct ktermios *termios,
+ struct ktermios *oldtermios)
+{
+ struct tegra_uart_port *t;
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int lcr;
+ unsigned int c_cflag = termios->c_cflag;
+ unsigned char mcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(t->uport.dev, "+tegra_set_termios\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (t->rts_active)
+ set_rts(t, false);
+
+ /* Parity */
+ lcr = t->lcr_shadow;
+ lcr &= ~UART_LCR_PARITY;
+ if (PARENB == (c_cflag & PARENB)) {
+ if (CMSPAR == (c_cflag & CMSPAR)) {
+ /* FIXME What is space parity? */
+ /* data |= SPACE_PARITY; */
+ } else if (c_cflag & PARODD) {
+ lcr |= UART_LCR_PARITY;
+ lcr &= ~UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ } else {
+ lcr |= UART_LCR_PARITY;
+ lcr |= UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ }
+ }
+
+ lcr &= ~UART_LCR_WLEN8;
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+ else
+ lcr &= ~UART_LCR_STOP;
+
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+
+ /* Baud rate. */
+ baud = uart_get_baud_rate(u, termios, oldtermios, 200, 4000000);
+ tegra_set_baudrate(t, baud);
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+ mcr = t->mcr_shadow;
+ mcr |= UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = true;
+ /* if top layer has asked to set rts active then do so here */
+ if (t->rts_active)
+ set_rts(t, true);
+ } else {
+ mcr = t->mcr_shadow;
+ mcr &= ~UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = false;
+ }
+
+ /* update the port timeout based on new settings */
+ uart_update_timeout(u, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ dev_vdbg(t->uport.dev, "-tegra_set_termios\n");
+ return;
+}
+
+/*
+ * Flush any TX data submitted for DMA. Called when the TX circular
+ * buffer is reset.
+ */
+static void tegra_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ dev_vdbg(u->dev, "%s called", __func__);
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->use_tx_dma) {
+ if (!tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req))
+ tegra_tx_dma_complete_req(t, &t->tx_dma_req);
+ t->tx_dma_req.size = 0;
+ }
+ return;
+}
+
+
+static void tegra_pm(struct uart_port *u, unsigned int state,
+ unsigned int oldstate)
+{
+
+}
+
+static const char *tegra_type(struct uart_port *u)
+{
+ return 0;
+}
+
+static struct uart_ops tegra_uart_ops = {
+ .tx_empty = tegra_tx_empty,
+ .set_mctrl = tegra_set_mctrl,
+ .get_mctrl = tegra_get_mctrl,
+ .stop_tx = tegra_stop_tx,
+ .start_tx = tegra_start_tx,
+ .stop_rx = tegra_stop_rx,
+ .flush_buffer = tegra_flush_buffer,
+ .enable_ms = tegra_enable_ms,
+ .break_ctl = tegra_break_ctl,
+ .startup = tegra_startup,
+ .shutdown = tegra_shutdown,
+ .set_termios = tegra_set_termios,
+ .pm = tegra_pm,
+ .type = tegra_type,
+ .request_port = tegra_request_port,
+ .release_port = tegra_release_port,
+};
+
+static int tegra_uart_probe(struct platform_device *pdev);
+static int __devexit tegra_uart_remove(struct platform_device *pdev);
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state);
+static int tegra_uart_resume(struct platform_device *pdev);
+
+static struct platform_driver tegra_uart_platform_driver = {
+ .remove = tegra_uart_remove,
+ .probe = tegra_uart_probe,
+ .suspend = tegra_uart_suspend,
+ .resume = tegra_uart_resume,
+ .driver = {
+ .name = "tegra_uart"
+ }
+};
+
+static struct uart_driver tegra_uart_driver =
+{
+ .owner = THIS_MODULE,
+ .driver_name = "tegra_uart",
+ .dev_name = "ttyHS",
+ .cons = 0,
+ .nr = 5,
+};
+
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_suspend_port(&tegra_uart_driver, u);
+
+ flush_work(&t->tx_work);
+ return 0;
+}
+
+static int tegra_uart_resume(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_resume_port(&tegra_uart_driver, u);
+ return 0;
+}
+
+
+
+static int __devexit tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_remove_one_port(&tegra_uart_driver, u);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pr_info("Unregistered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+ kfree(t);
+ return 0;
+}
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t;
+ struct uart_port *u;
+ struct resource *resource;
+ int ret;
+ char name[64];
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr) {
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+ return -ENODEV;
+ }
+
+ t = kzalloc(sizeof(struct tegra_uart_port), GFP_KERNEL);
+ if (!t) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+ u = &t->uport;
+ u->dev = &pdev->dev;
+ platform_set_drvdata(pdev, u);
+ u->line = pdev->id;
+ u->ops = &tegra_uart_ops;
+ u->type = ~PORT_UNKNOWN;
+ u->fifosize = 32;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+
+ u->mapbase = resource->start;
+ u->membase = IO_ADDRESS(u->mapbase);
+ if (unlikely(!u->membase))
+ return -ENOMEM;
+
+ u->irq = platform_get_irq(pdev, 0);
+ if (unlikely(u->irq < 0))
+ return -ENXIO;
+
+ u->regshift = 2;
+
+ t->clk = clk_get(&pdev->dev, NULL);
+ if (!t->clk) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ goto fail;
+ }
+
+ ret = uart_add_one_port(&tegra_uart_driver, u);
+ if (ret) {
+ pr_err("%s: Failed(%d) to add uart port %s%d\n",
+ __func__, ret, tegra_uart_driver.dev_name, u->line);
+ kfree(t);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ snprintf(name, sizeof(name), "tegra_hsuart_%d", u->line);
+ pr_info("Registered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+
+ INIT_WORK(&t->tx_work, tegra_tx_dma_complete_work);
+ return ret;
+fail:
+ kfree(t);
+ return -ENODEV;
+}
+
+static int __init tegra_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_uart_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register %s driver\n",
+ tegra_uart_driver.driver_name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_uart_platform_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register the UART platfrom "
+ "driver\n");
+ uart_unregister_driver(&tegra_uart_driver);
+ return ret;
+ }
+
+ pr_info("Initialized tegra uart driver\n");
+ return 0;
+}
+
+static void __exit tegra_uart_exit(void)
+{
+ pr_info("Unloading tegra uart driver\n");
+ platform_driver_unregister(&tegra_uart_platform_driver);
+ uart_unregister_driver(&tegra_uart_driver);
+}
+
+module_init(tegra_uart_init);
+module_exit(tegra_uart_exit);
+MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
help
SPI driver for Freescale STMP37xx/378x SoC SSP interface
+config SPI_TEGRA
+ tristate "Nvidia Tegra SPI controller"
+ depends on ARCH_TEGRA
+ select TEGRA_SYSTEM_DMA
+ help
+ SPI driver for NVidia Tegra SoCs
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
+obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
--- /dev/null
+/*
+ * Driver for Nvidia TEGRA spi controller.
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/spi/spi.h>
+
+#include <mach/dma.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+static const unsigned long spi_tegra_req_sels[] = {
+ TEGRA_DMA_REQ_SEL_SL2B1,
+ TEGRA_DMA_REQ_SEL_SL2B2,
+ TEGRA_DMA_REQ_SEL_SL2B3,
+ TEGRA_DMA_REQ_SEL_SL2B4,
+};
+
+#define BB_LEN 32
+
+struct spi_tegra_data {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long phys;
+
+ u32 cur_speed;
+
+ struct list_head queue;
+ struct spi_transfer *cur;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned cur_bytes_per_word;
+
+ /* The tegra spi controller has a bug which causes the first word
+ * in PIO transactions to be garbage. Since packed DMA transactions
+ * require transfers to be 4 byte aligned we need a bounce buffer
+ * for the generic case.
+ */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+ u32 *rx_bb;
+ dma_addr_t rx_bb_phys;
+ bool is_suspended;
+ unsigned long save_slink_cmd;
+};
+
+
+static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
+ unsigned long val,
+ unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+}
+
+static void spi_tegra_go(struct spi_tegra_data *tspi)
+{
+ unsigned long val;
+
+ wmb();
+
+ val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
+ val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
+ val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+
+ tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+}
+
+static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
+ tspi->cur_bytes_per_word);
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_WORD_SIZE(~0);
+ val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = 0;
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ val |= tx_buf[i + j] << j * 8;
+
+ spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
+ }
+
+ tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4;
+
+ return len;
+}
+
+static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = tspi->cur_len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ rx_buf[i + j] = (val >> (j * 8)) & 0xff;
+ }
+
+ return len;
+}
+
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned long val;
+
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+
+ tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (speed != tspi->cur_speed)
+ clk_set_rate(tspi->clk, speed);
+
+ if (tspi->cur_speed == 0)
+ clk_enable(tspi->clk);
+
+ tspi->cur_speed = speed;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND2);
+ val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
+ if (t->rx_buf)
+ val |= SLINK_RXEN;
+ if (t->tx_buf)
+ val |= SLINK_TXEN;
+ val |= SLINK_SS_EN_CS(spi->chip_select);
+ val |= SLINK_SPIE;
+ val |= SLINK_SS_SETUP(3);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND2);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_BIT_LENGTH(~0);
+ val |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ /* FIXME: should probably control CS manually so that we can be sure
+ * it does not go low between transfer and to support delay_usecs
+ * correctly.
+ */
+ val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
+
+ if (spi->mode & SPI_CPHA)
+ val |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ val |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ val |= SLINK_M_S;
+
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
+
+ tspi->cur = t;
+ tspi->cur_pos = 0;
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
+
+ spi_tegra_go(tspi);
+}
+
+static void spi_tegra_start_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = 0;
+
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, t);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ unsigned long flags;
+ struct spi_message *m;
+ struct spi_device *spi;
+ int timeout = 0;
+ unsigned long val;
+
+ /* the SPI controller may come back with both the BSY and RDY bits
+ * set. In this case we need to wait for the BSY bit to clear so
+ * that we are sure the DMA is finished. 1000 reads was empirically
+ * determined to be long enough.
+ */
+ while (timeout++ < 1000) {
+ if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
+ break;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_STATUS);
+ val |= SLINK_RDY;
+ spi_tegra_writel(tspi, val, SLINK_STATUS);
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+
+ if (timeout >= 1000)
+ m->status = -EIO;
+
+ spi = m->state;
+
+ tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
+ m->actual_length += tspi->cur_pos;
+
+ if (tspi->cur_pos < tspi->cur->len) {
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
+ spi_tegra_go(tspi);
+ } else if (!list_is_last(&tspi->cur->transfer_list,
+ &m->transfers)) {
+ tspi->cur = list_first_entry(&tspi->cur->transfer_list,
+ struct spi_transfer,
+ transfer_list);
+ spi_tegra_start_transfer(spi, tspi->cur);
+ } else {
+ list_del(&m->queue);
+
+ m->complete(m->context);
+
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message,
+ queue);
+ spi = m->state;
+ spi_tegra_start_message(spi, m);
+ } else {
+ clk_disable(tspi->clk);
+ tspi->cur_speed = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+}
+
+static int spi_tegra_setup(struct spi_device *spi)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long cs_bit;
+ unsigned long val;
+ unsigned long flags;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+
+ switch (spi->chip_select) {
+ case 0:
+ cs_bit = SLINK_CS_POLARITY;
+ break;
+
+ case 1:
+ cs_bit = SLINK_CS_POLARITY1;
+ break;
+
+ case 2:
+ cs_bit = SLINK_CS_POLARITY2;
+ break;
+
+ case 4:
+ cs_bit = SLINK_CS_POLARITY3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_bit;
+ else
+ val &= ~cs_bit;
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long flags;
+ int was_empty;
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word < 0 || t->bits_per_word > 32)
+ return -EINVAL;
+
+ if (t->len == 0)
+ return -EINVAL;
+
+ if (!t->rx_buf && !t->tx_buf)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ if (WARN_ON(tspi->is_suspended)) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return -EBUSY;
+ }
+
+ m->state = spi;
+
+ was_empty = list_empty(&tspi->queue);
+ list_add_tail(&m->queue, &tspi->queue);
+
+ if (was_empty)
+ spi_tegra_start_message(spi, m);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int __init spi_tegra_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *tspi);
+ if (master == NULL) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ master->bus_num = pdev->id;
+
+ master->setup = spi_tegra_setup;
+ master->transfer = spi_tegra_transfer;
+ master->num_chipselect = 4;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->pdev = pdev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ tspi->phys = r->start;
+ tspi->base = ioremap(r->start, r->end - r->start + 1);
+ if (!tspi->base) {
+ dev_err(&pdev->dev, "can't ioremap iomem\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ tspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR_OR_NULL(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto err2;
+ }
+
+ INIT_LIST_HEAD(&tspi->queue);
+
+ tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+ TEGRA_DMA_SHARED);
+ if (!tspi->rx_dma) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+
+ tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ &tspi->rx_bb_phys, GFP_KERNEL);
+ if (!tspi->rx_bb) {
+ dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
+ ret = -ENOMEM;
+ goto err4;
+ }
+
+ tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
+ tspi->rx_dma_req.to_memory = 1;
+ tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
+ tspi->rx_dma_req.dest_bus_width = 32;
+ tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
+ tspi->rx_dma_req.source_bus_width = 32;
+ tspi->rx_dma_req.source_wrap = 4;
+ tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->rx_dma_req.dev = tspi;
+
+ ret = spi_register_master(master);
+
+ if (ret < 0)
+ goto err5;
+
+ return ret;
+
+err5:
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+err4:
+ tegra_dma_free_channel(tspi->rx_dma);
+err3:
+ clk_put(tspi->clk);
+err2:
+ iounmap(tspi->base);
+err1:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+err0:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit spi_tegra_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+
+ clk_put(tspi->clk);
+ iounmap(tspi->base);
+
+ spi_master_put(master);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+ unsigned limit = 500;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+ tspi->is_suspended = true;
+ WARN_ON(!list_empty(&tspi->queue));
+
+ while (!list_empty(&tspi->queue) && limit--) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&tspi->lock, flags);
+ }
+
+ tspi->save_slink_cmd = spi_tegra_readl(tspi, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+
+static int spi_tegra_resume(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+ clk_enable(tspi->clk);
+ spi_tegra_writel(tspi, tspi->save_slink_cmd, SLINK_COMMAND);
+ clk_disable(tspi->clk);
+ tspi->cur_speed = 0;
+ tspi->is_suspended = false;
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+#endif
+
+MODULE_ALIAS("platform:spi_tegra");
+
+static struct platform_driver spi_tegra_driver = {
+ .driver = {
+ .name = "spi_tegra",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(spi_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = spi_tegra_suspend,
+ .resume = spi_tegra_resume,
+#endif
+};
+
+static int __init spi_tegra_init(void)
+{
+ return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
+}
+module_init(spi_tegra_init);
+
+static void __exit spi_tegra_exit(void)
+{
+ platform_driver_unregister(&spi_tegra_driver);
+}
+module_exit(spi_tegra_exit);
+
+MODULE_LICENSE("GPL");
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
-static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
+static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
-static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
+static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
static int asus_oled_probe(struct usb_interface *interface,
const struct usb_device_id *id)
skb_pull_rcsum(skb, hdr_size);
/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
+ kfree_skb(skb);
+ return;
+ }
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
return ARRAY_SIZE(formats);
}
-struct cx25821_fmt *format_by_fourcc(unsigned int fourcc)
+struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
pix_format =
(dev->channels[ch_id].pixel_formats ==
PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV;
- fh->fmt = format_by_fourcc(pix_format);
+ fh->fmt = cx25821_format_by_fourcc(pix_format);
v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
if (0 != err)
return err;
- fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
fh->vidq.field = f->fmt.pix.field;
/* check if width and height is valid based on set standard */
enum v4l2_field field;
unsigned int maxw, maxh;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
#define FORMAT_FLAGS_PACKED 0x01
extern struct cx25821_fmt formats[];
-extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc);
+extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
t->value = temp; \
return count; \
} \
- static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
+ static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
show_int(enable);
show_int(offline);
adis16220_write_reset, 0);
#define IIO_DEV_ATTR_CAPTURE(_store) \
- IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0)
+ IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
VARIAX_PARAM_R(float, mix1);
VARIAX_PARAM_R(int, pickup_wiring);
-static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak);
-static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position,
+static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak);
+static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position,
pod_set_wah_position);
-static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO,
pod_get_compression_gain, pod_set_compression_gain);
-static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO,
pod_get_vol_pedal_position, pod_set_vol_pedal_position);
-static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO,
pod_get_compression_threshold,
pod_set_compression_threshold);
-static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan);
-static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup,
+static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan);
+static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup,
pod_set_amp_model_setup);
-static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model,
+static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model,
pod_set_amp_model);
-static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive);
-static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass);
-static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid);
-static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
-static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble);
-static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid,
+static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive);
+static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass);
+static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid);
+static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
+static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble);
+static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid,
pod_set_highmid);
-static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol,
+static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol,
pod_set_chan_vol);
-static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix,
+static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix,
pod_set_reverb_mix);
-static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup,
+static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup,
pod_set_effect_setup);
-static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO,
pod_get_band_1_frequency, pod_set_band_1_frequency);
-static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence,
+static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence,
pod_set_presence);
-static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO,
pod_get_treble__bass, pod_set_treble__bass);
-static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO,
pod_get_noise_gate_enable, pod_set_noise_gate_enable);
-static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold,
+static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold,
pod_set_gate_threshold);
-static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time,
+static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time,
pod_set_gate_decay_time);
-static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable,
+static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable,
pod_set_stomp_enable);
-static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable,
+static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable,
pod_set_comp_enable);
-static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time,
+static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time,
pod_set_stomp_time);
-static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable,
+static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable,
pod_set_delay_enable);
-static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1,
+static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1,
pod_set_mod_param_1);
-static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1,
+static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1,
pod_set_delay_param_1);
-static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO,
pod_get_delay_param_1_note_value,
pod_set_delay_param_1_note_value);
-static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO,
pod_get_band_2_frequency__bass,
pod_set_band_2_frequency__bass);
-static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2,
+static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2,
pod_set_delay_param_2);
-static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO,
pod_get_delay_volume_mix, pod_set_delay_volume_mix);
-static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3,
+static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3,
pod_set_delay_param_3);
-static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable,
+static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable,
pod_set_reverb_enable);
-static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type,
+static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type,
pod_set_reverb_type);
-static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay,
+static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay,
pod_set_reverb_decay);
-static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone,
+static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone,
pod_set_reverb_tone);
-static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO,
pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
-static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post,
+static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post,
pod_set_reverb_pre_post);
-static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO,
pod_get_band_2_frequency, pod_set_band_2_frequency);
-static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO,
pod_get_band_3_frequency__bass,
pod_set_band_3_frequency__bass);
-static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable,
+static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable,
pod_set_wah_enable);
-static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO,
pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
-static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO,
pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
-static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO,
pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
-static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post,
+static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post,
pod_set_eq_pre_post);
-static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post,
+static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post,
pod_set_volume_pre_post);
-static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model,
+static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model,
pod_set_di_model);
-static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay,
+static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay,
pod_set_di_delay);
-static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable,
+static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable,
pod_set_mod_enable);
-static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO,
pod_get_mod_param_1_note_value,
pod_set_mod_param_1_note_value);
-static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2,
+static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2,
pod_set_mod_param_2);
-static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3,
+static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3,
pod_set_mod_param_3);
-static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4,
+static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4,
pod_set_mod_param_4);
-static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5,
+static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5,
pod_set_mod_param_5);
-static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix,
+static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix,
pod_set_mod_volume_mix);
-static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post,
+static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post,
pod_set_mod_pre_post);
-static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO,
pod_get_modulation_model, pod_set_modulation_model);
-static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO,
pod_get_band_3_frequency, pod_set_band_3_frequency);
-static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO,
pod_get_band_4_frequency__bass,
pod_set_band_4_frequency__bass);
-static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO,
pod_get_mod_param_1_double_precision,
pod_set_mod_param_1_double_precision);
-static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO,
pod_get_delay_param_1_double_precision,
pod_set_delay_param_1_double_precision);
-static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable,
+static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable,
pod_set_eq_enable);
-static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap);
-static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap);
+static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO,
pod_get_volume_tweak_pedal_assign,
pod_set_volume_tweak_pedal_assign);
-static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO,
pod_get_band_5_frequency, pod_set_band_5_frequency);
-static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner);
-static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection,
+static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner);
+static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection,
pod_set_mic_selection);
-static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model,
+static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model,
pod_set_cabinet_model);
-static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model,
+static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model,
pod_set_stomp_model);
-static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel,
+static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel,
pod_set_roomlevel);
-static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO,
pod_get_band_4_frequency, pod_set_band_4_frequency);
-static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO,
pod_get_band_6_frequency, pod_set_band_6_frequency);
-static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO,
pod_get_stomp_param_1_note_value,
pod_set_stomp_param_1_note_value);
-static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2,
+static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2,
pod_set_stomp_param_2);
-static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3,
+static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3,
pod_set_stomp_param_3);
-static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4,
+static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4,
pod_set_stomp_param_4);
-static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5,
+static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5,
pod_set_stomp_param_5);
-static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6,
+static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6,
pod_set_stomp_param_6);
-static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO,
pod_get_amp_switch_select, pod_set_amp_switch_select);
-static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4,
+static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4,
pod_set_delay_param_4);
-static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5,
+static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5,
pod_set_delay_param_5);
-static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post,
+static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post,
pod_set_delay_pre_post);
-static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model,
+static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model,
pod_set_delay_model);
-static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO,
pod_get_delay_verb_model, pod_set_delay_verb_model);
-static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb,
+static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb,
pod_set_tempo_msb);
-static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb,
+static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb,
pod_set_tempo_lsb);
-static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model,
+static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model,
pod_set_wah_model);
-static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume,
+static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume,
pod_set_bypass_volume);
-static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off,
+static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off,
pod_set_fx_loop_on_off);
-static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO,
pod_get_tweak_param_select, pod_set_tweak_param_select);
-static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage,
+static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage,
pod_set_amp1_engage);
-static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain,
+static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain,
pod_set_band_1_gain);
-static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO,
pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
-static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain,
+static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain,
pod_set_band_2_gain);
-static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO,
pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
-static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain,
+static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain,
pod_set_band_3_gain);
-static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO,
pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
-static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO,
pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
-static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain,
+static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain,
pod_set_band_4_gain);
-static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO,
pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write);
static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable,
return count;
}
-static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
-static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
+static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
+static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
/* MIDI device destructor */
static int snd_line6_midi_free(struct snd_device *device)
#undef GET_SYSTEM_PARAM
/* POD special files: */
-static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, pod_set_channel);
+static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel);
static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
-static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump);
-static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
-static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish);
+static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
+static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
+static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write);
-static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
-static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
+static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
+static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
-static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_amp_setup);
-static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, pod_set_retrieve_channel);
-static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_effects_setup);
-static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, pod_set_routing);
+static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup);
+static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel);
+static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup);
+static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing);
static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write);
-static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, pod_set_store_amp_setup);
-static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, pod_set_store_channel);
-static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, pod_set_store_effects_setup);
-static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
-static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
+static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup);
+static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel);
+static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup);
+static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
+static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
#if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
#endif
/*
return count;
}
-static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read,
+static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read,
toneport_set_led_red);
-static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read,
+static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read,
toneport_set_led_green);
static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
#endif
/* Variax workbench special files: */
-static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, variax_set_model);
-static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, variax_set_volume);
-static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone);
+static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model);
+static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume);
+static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
-static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, variax_set_active);
+static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active);
#if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
-static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2);
+static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
#endif
};
const struct ata_port_info *ppi[] = { &info, NULL };
- ret = ata_pci_sff_init_one(pdev, ppi, &phison_sht, NULL, 0);
+ ret = ata_pci_bmdma_init_one(pdev, ppi, &phison_sht, NULL, 0);
dev_dbg(&pdev->dev, "phison_init_one(), ret = %x\n", ret);
{USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
{USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
{USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
+ {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */
{USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
{USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
{USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
{USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
{USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
{USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */
+ {USB_DEVICE(0x0411, 0x015D)}, /* Buffalo Airstation WLI-UC-GN */
{} /* Terminating entry */
};
}
udelay(10);
}
- if (TryCnt == TC_3W_POLL_MAX_TRY_CNT)
- panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp);
+ if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
+ printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:"
+ " %#X RE|WE bits are not clear!!\n", u1bTmp);
+ dump_stack();
+ return 0;
+ }
// RTL8187S HSSI Read/Write Function
u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
int idx;
int ByteCnt = nDataBufBitCnt / 8;
//printk("%d\n",nDataBufBitCnt);
- if ((nDataBufBitCnt % 8) != 0)
- panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n",
- nDataBufBitCnt);
+ if ((nDataBufBitCnt % 8) != 0) {
+ printk(KERN_ERR "rtl8187se: "
+ "HwThreeWire(): nDataBufBitCnt(%d)"
+ " should be multiple of 8!!!\n",
+ nDataBufBitCnt);
+ dump_stack();
+ nDataBufBitCnt += 8;
+ nDataBufBitCnt &= ~7;
+ }
- if (nDataBufBitCnt > 64)
- panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n",
- nDataBufBitCnt);
+ if (nDataBufBitCnt > 64) {
+ printk(KERN_ERR "rtl8187se: HwThreeWire():"
+ " nDataBufBitCnt(%d) should <= 64!!!\n",
+ nDataBufBitCnt);
+ dump_stack();
+ nDataBufBitCnt = 64;
+ }
for(idx = 0; idx < ByteCnt; idx++)
{
}
return count;
}
-static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO,
+static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO,
get_silent_state, set_silent_state);
__ATTR_RO(metrics_bytes_sent),
__ATTR_RO(metrics_cpu_kcycles_used),
__ATTR_RO(metrics_misc),
- __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store),
+ __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store),
__ATTR_RW(use_defio),
};
ud->eh_ops.shutdown(ud);
ud->event &= ~USBIP_EH_SHUTDOWN;
-
- break;
}
- /* Stop the error handler. */
- if (ud->event & USBIP_EH_BYE)
- return -1;
-
/* Reset the device. */
if (ud->event & USBIP_EH_RESET) {
ud->eh_ops.reset(ud);
ud->event &= ~USBIP_EH_RESET;
-
- break;
}
/* Mark the device as unusable. */
ud->eh_ops.unusable(ud);
ud->event &= ~USBIP_EH_UNUSABLE;
-
- break;
}
- /* NOTREACHED */
- printk(KERN_ERR "%s: unknown event\n", __func__);
- return -1;
+ /* Stop the error handler. */
+ if (ud->event & USBIP_EH_BYE)
+ return -1;
}
return 0;
* spin_unlock(&vdev->ud.lock); */
spin_unlock_irqrestore(&the_controller->lock, flags);
+
+ usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
return ret;
}
-static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
+static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
static ssize_t read_human_status(struct device *dev,
struct device_attribute *attr, char *buf)
return ret;
}
-static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO,
- read_human_status, NULL);
+static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
return ret;
}
-static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
+static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
#define UEA_ATTR(name, reset) \
\
static int proc_connectinfo(struct dev_state *ps, void __user *arg)
{
- struct usbdevfs_connectinfo ci;
+ struct usbdevfs_connectinfo ci = {
+ .devnum = ps->dev->devnum,
+ .slow = ps->dev->speed == USB_SPEED_LOW
+ };
- ci.devnum = ps->dev->devnum;
- ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ if (hcd->driver->unmap_urb_for_dma)
+ hcd->driver->unmap_urb_for_dma(hcd, urb);
+ else
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+}
+
+void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
enum dma_data_direction dir;
URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
}
+EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_for_dma);
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
+{
+ if (hcd->driver->map_urb_for_dma)
+ return hcd->driver->map_urb_for_dma(hcd, urb, mem_flags);
+ else
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+}
+
+int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
{
enum dma_data_direction dir;
int ret = 0;
}
if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
URB_SETUP_MAP_LOCAL)))
- unmap_urb_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
}
return ret;
}
+EXPORT_SYMBOL_GPL(usb_hcd_map_urb_for_dma);
/*-------------------------------------------------------------------------*/
else
i = udev->descriptor.bMaxPacketSize0;
if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
- if (udev->speed != USB_SPEED_FULL ||
+ if (udev->speed == USB_SPEED_LOW ||
!(i == 8 || i == 16 || i == 32 || i == 64)) {
- dev_err(&udev->dev, "ep0 maxpacket = %d\n", i);
+ dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
retval = -EMSGSIZE;
goto fail;
}
- dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ if (udev->speed == USB_SPEED_FULL)
+ dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ else
+ dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
{
int i;
- dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
- skip_ep0 ? "non-ep0" : "all");
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
-
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
+
+ dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+ skip_ep0 ? "non-ep0" : "all");
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
}
/**
config USB_GADGET_FSL_USB2
boolean "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC || ARCH_MXC || ARCH_TEGRA
select USB_GADGET_DUALSPEED
help
Some of Freescale PowerPC processors have a High Speed
ifeq ($(CONFIG_ARCH_MXC),y)
fsl_usb2_udc-objs += fsl_mxc_udc.o
endif
+ifeq ($(CONFIG_ARCH_TEGRA),y)
+fsl_usb2_udc-objs += fsl_tegra_udc.o
+endif
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
} else {
disable_irq(gpio_to_irq(udc->vbus_pin));
}
+ } else {
+ /* gpio_request fail so use -EINVAL for gpio_is_valid */
+ udc->vbus_pin = -EINVAL;
}
}
*/
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- if (cdev->config == NULL)
- return value;
-
- f = cdev->config->interface[intf];
+ if (cdev->config)
+ f = cdev->config->interface[intf];
break;
case USB_RECIP_ENDPOINT:
*/
usb_ep_autoconfig_reset(cdev->gadget);
- /* standardized runtime overrides for device ID data */
- if (idVendor)
- cdev->desc.idVendor = cpu_to_le16(idVendor);
- if (idProduct)
- cdev->desc.idProduct = cpu_to_le16(idProduct);
- if (bcdDevice)
- cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
-
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
* power state and consumption, etc
cdev->desc = *composite->dev;
cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
+ /* standardized runtime overrides for device ID data */
+ if (idVendor)
+ cdev->desc.idVendor = cpu_to_le16(idVendor);
+ if (idProduct)
+ cdev->desc.idProduct = cpu_to_le16(idProduct);
+ if (bcdDevice)
+ cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
+
/* strings can't be assigned before bind() allocates the
* releavnt identifiers
*/
--- /dev/null
+/*
+ * Description:
+ * Helper functions to support the tegra USB controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <mach/usb_phy.h>
+
+static struct tegra_usb_phy *phy;
+static struct clk *udc_clk;
+static struct clk *emc_clk;
+static void *udc_base;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err;
+ int instance;
+ struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+
+ udc_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc_clk)) {
+ dev_err(&pdev->dev, "Can't get udc clock\n");
+ return PTR_ERR(udc_clk);
+ }
+
+ clk_enable(udc_clk);
+
+ emc_clk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc_clk)) {
+ dev_err(&pdev->dev, "Can't get emc clock\n");
+ err = PTR_ERR(emc_clk);
+ goto err_emc;
+ }
+
+ clk_enable(emc_clk);
+ clk_set_rate(emc_clk, 300000000);
+
+ /* we have to remap the registers ourselves as fsl_udc does not
+ * export them for us.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENXIO;
+ goto err0;
+ }
+ udc_base = ioremap(res->start, resource_size(res));
+ if (!udc_base) {
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ instance = pdev->id;
+ if (instance == -1)
+ instance = 0;
+
+ phy = tegra_usb_phy_open(instance, udc_base, pdata->phy_config,
+ TEGRA_USB_PHY_MODE_DEVICE);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "Can't open phy\n");
+ err = PTR_ERR(phy);
+ goto err1;
+ }
+
+ tegra_usb_phy_power_on(phy);
+
+ return 0;
+err1:
+ iounmap(udc_base);
+err0:
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
+err_emc:
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+ return err;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+
+void fsl_udc_clk_release(void)
+{
+ tegra_usb_phy_close(phy);
+
+ iounmap(udc_base);
+
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+
+ clk_disable(emc_clk);
+ clk_put(emc_clk);
+}
+
+void fsl_udc_clk_suspend(void)
+{
+ tegra_usb_phy_power_off(phy);
+ clk_disable(udc_clk);
+ clk_disable(emc_clk);
+}
+
+void fsl_udc_clk_resume(void)
+{
+ clk_enable(emc_clk);
+ clk_enable(udc_clk);
+ tegra_usb_phy_power_on(phy);
+}
#include "fsl_usb2_udc.h"
+#ifdef CONFIG_ARCH_TEGRA
+#define DRIVER_DESC "NVidia Tegra High-Speed USB SOC Device Controller driver"
+#else
#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
+#endif
#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
#define DRIVER_VERSION "Apr 20, 2007"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define STATUS_BUFFER_SIZE 8
+#ifdef CONFIG_ARCH_TEGRA
+static const char driver_name[] = "fsl-tegra-udc";
+#else
static const char driver_name[] = "fsl-usb2-udc";
+#endif
static const char driver_desc[] = DRIVER_DESC;
static struct usb_dr_device *dr_regs;
};
static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+static int reset_queues(struct fsl_udc *udc);
#ifdef CONFIG_PPC32
#define fsl_readl(addr) in_le32(addr)
#define fsl_writel(val32, addr) writel(val32, addr)
#endif
+/*
+ * High speed test mode packet(53 bytes).
+ * See USB 2.0 spec, section 7.1.20.
+ */
+static const u8 fsl_udc_test_packet[53] = {
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+};
+
/********************************************************************
* Internal Used Function
********************************************************************/
Internal Hardware related function
------------------------------------------------------------------*/
+#define FSL_UDC_RESET_TIMEOUT 1000
+static int dr_controller_reset(struct fsl_udc *udc)
+{
+ unsigned int tmp;
+ unsigned long timeout;
+
+ /* Stop and reset the usb controller */
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp &= ~USB_CMD_RUN_STOP;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp |= USB_CMD_CTRL_RESET;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ /* Wait for reset to complete */
+ timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+ while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc reset timeout!\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
static int dr_controller_setup(struct fsl_udc *udc)
{
unsigned int tmp, portctrl;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
unsigned int ctrl;
#endif
+#ifdef CONFIG_ARCH_TEGRA
unsigned long timeout;
-#define FSL_UDC_RESET_TIMEOUT 1000
+#endif
+ int status;
/* Config PHY interface */
portctrl = fsl_readl(&dr_regs->portsc1);
}
fsl_writel(portctrl, &dr_regs->portsc1);
- /* Stop and reset the usb controller */
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp &= ~USB_CMD_RUN_STOP;
- fsl_writel(tmp, &dr_regs->usbcmd);
+ status = dr_controller_reset(udc);
+ if (status)
+ return status;
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp |= USB_CMD_CTRL_RESET;
- fsl_writel(tmp, &dr_regs->usbcmd);
+ /* Set the controller as device mode */
+ tmp = fsl_readl(&dr_regs->usbmode);
+ tmp |= USB_MODE_CTRL_MODE_DEVICE;
+ /* Disable Setup Lockout */
+ tmp |= USB_MODE_SETUP_LOCK_OFF;
+ fsl_writel(tmp, &dr_regs->usbmode);
- /* Wait for reset to complete */
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to switch to device mode */
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
- while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ while ((fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_DEVICE) !=
+ USB_MODE_CTRL_MODE_DEVICE) {
if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
+ ERR("udc device mode setup timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
}
-
- /* Set the controller as device mode */
- tmp = fsl_readl(&dr_regs->usbmode);
- tmp |= USB_MODE_CTRL_MODE_DEVICE;
- /* Disable Setup Lockout */
- tmp |= USB_MODE_SETUP_LOCK_OFF;
- fsl_writel(tmp, &dr_regs->usbmode);
+#endif
/* Clear the setup status */
fsl_writel(0, &dr_regs->usbsts);
fsl_readl(&dr_regs->endpointlistaddr));
/* Config control enable i/o output, cpu endian register */
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
__raw_writel(ctrl, &usb_sys_regs->control);
static void dr_controller_run(struct fsl_udc *udc)
{
u32 temp;
+#ifdef CONFIG_ARCH_TEGRA
+ unsigned long timeout;
+#define FSL_UDC_RUN_TIMEOUT 1000
+#endif
+ /* Clear stopped bit */
+ udc->stopped = 0;
/* Enable DR irq reg */
temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
fsl_writel(temp, &dr_regs->usbintr);
- /* Clear stopped bit */
- udc->stopped = 0;
-
/* Set the controller as device mode */
temp = fsl_readl(&dr_regs->usbmode);
temp |= USB_MODE_CTRL_MODE_DEVICE;
temp |= USB_CMD_RUN_STOP;
fsl_writel(temp, &dr_regs->usbcmd);
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to start */
+ timeout = jiffies + FSL_UDC_RUN_TIMEOUT;
+ while ((fsl_readl(&dr_regs->usbcmd) & USB_CMD_RUN_STOP) !=
+ USB_CMD_RUN_STOP) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc start timeout!\n");
+ return;
+ }
+ cpu_relax();
+ }
+#endif
+
return;
}
? (1 << (ep_index(ep) + 16))
: (1 << (ep_index(ep)));
+ /* Flush all the dTD structs out to memory */
+ wmb();
+
/* check if the pipe is empty */
if (!(list_empty(&ep->queue))) {
/* Add td to the end */
lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
lastreq->tail->next_td_ptr =
cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK);
+ wmb();
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
goto out;
* @is_last: return flag if it is the last dTD of the request
* return: pointer to the built dTD */
static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
- dma_addr_t *dma, int *is_last)
+ dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
{
u32 swap_temp;
struct ep_td_struct *dtd;
*length = min(req->req.length - req->req.actual,
(unsigned)EP_MAX_LENGTH_TRANSFER);
- dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
+ dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
if (dtd == NULL)
return dtd;
}
/* Generate dtd chain for a request */
-static int fsl_req_to_dtd(struct fsl_req *req)
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
{
unsigned count;
int is_last;
dma_addr_t dma;
do {
- dtd = fsl_build_dtd(req, &count, &dma, &is_last);
+ dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
if (dtd == NULL)
return -ENOMEM;
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
+ enum dma_data_direction dir;
int is_iso = 0;
+ int status;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
VDBG("%s, bad params", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->desc)) {
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (unlikely(!ep->desc)) {
VDBG("%s, bad ep", __func__);
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
+
if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- if (req->req.length > ep->ep.maxpacket)
+ if (req->req.length > ep->ep.maxpacket) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EMSGSIZE;
+ }
is_iso = 1;
}
- udc = ep->udc;
+ dir = ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* map virtual address to hardware */
if (req->req.dma == DMA_ADDR_INVALID) {
- req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
- req->req.buf,
- req->req.length, ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ req->req.dma = dma_map_single(udc->gadget.dev.parent,
+ req->req.buf, req->req.length, dir);
req->mapped = 1;
} else {
- dma_sync_single_for_device(ep->udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ dma_sync_single_for_device(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
req->mapped = 0;
}
req->req.actual = 0;
req->dtd_count = 0;
- spin_lock_irqsave(&udc->lock, flags);
/* build dtds and push them to device queue */
- if (!fsl_req_to_dtd(req)) {
- fsl_queue_td(ep, req);
- } else {
+ status = fsl_req_to_dtd(req, gfp_flags);
+ if (status)
+ goto err_unmap;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* re-check if the ep has not been disabled */
+ if (unlikely(!ep->desc)) {
spin_unlock_irqrestore(&udc->lock, flags);
- return -ENOMEM;
+ status = -EINVAL;
+ goto err_unmap;
}
+ fsl_queue_td(ep, req);
+
/* Update ep0 state */
if ((ep_index(ep) == 0))
udc->ep0_state = DATA_STATE_XMIT;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
+
+err_unmap:
+ if (req->mapped) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+ return status;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+
VDBG("VBUS %s", is_active ? "on" : "off");
+
+ if (udc->transceiver) {
+ if (udc->vbus_active && !is_active) {
+ /* reset all internal Queues and inform client driver */
+ reset_queues(udc);
+ /* stop the controller and turn off the clocks */
+ dr_controller_stop(udc);
+ dr_controller_reset(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ fsl_udc_clk_suspend();
+ udc->vbus_active = 0;
+ udc->usb_state = USB_STATE_DEFAULT;
+ } else if (!udc->vbus_active && is_active) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ fsl_udc_clk_resume();
+ /* setup the controller in the device mode */
+ dr_controller_setup(udc);
+ /* setup EP0 for setup packet */
+ ep0_setup(udc);
+ /* initialize the USB and EP states */
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ udc->vbus_active = 1;
+ /* start the controller */
+ dr_controller_run(udc);
+ }
+ return 0;
+ }
+
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
req->req.complete = NULL;
req->dtd_count = 0;
- if (fsl_req_to_dtd(req) == 0)
+ if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
fsl_queue_td(ep, req);
else
return -ENOMEM;
req->dtd_count = 0;
/* prime the data phase */
- if ((fsl_req_to_dtd(req) == 0))
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
fsl_queue_td(ep, req);
else /* no mem */
goto stall;
ep0stall(udc);
}
+static void udc_test_mode(struct fsl_udc *udc, u32 test_mode)
+{
+ struct fsl_req *req;
+ struct fsl_ep *ep;
+ u32 portsc, bitmask;
+ unsigned long timeout;
+
+ /* Ack the ep0 IN */
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+
+ /* get the ep0 */
+ ep = &udc->eps[0];
+ bitmask = ep_is_in(ep)
+ ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep)));
+
+ timeout = jiffies + HZ;
+ /* Wait until ep0 IN endpoint txfr is complete */
+ while (!(fsl_readl(&dr_regs->endptcomplete) & bitmask)) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("Timeout for Ep0 IN Ack\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ switch (test_mode << PORTSCX_PTC_BIT_POS) {
+ case PORTSCX_PTC_JSTATE:
+ VDBG("TEST_J\n");
+ break;
+ case PORTSCX_PTC_KSTATE:
+ VDBG("TEST_K\n");
+ break;
+ case PORTSCX_PTC_SEQNAK:
+ VDBG("TEST_SE0_NAK\n");
+ break;
+ case PORTSCX_PTC_PACKET:
+ VDBG("TEST_PACKET\n");
+
+ /* get the ep and configure for IN direction */
+ ep = &udc->eps[0];
+ udc->ep0_dir = USB_DIR_IN;
+
+ /* Initialize ep0 status request structure */
+ req = container_of(fsl_alloc_request(NULL, GFP_ATOMIC),
+ struct fsl_req, req);
+ /* allocate a small amount of memory to get valid address */
+ req->req.buf = kmalloc(sizeof(fsl_udc_test_packet), GFP_ATOMIC);
+ req->req.dma = virt_to_phys(req->req.buf);
+
+ /* Fill in the reqest structure */
+ memcpy(req->req.buf, fsl_udc_test_packet, sizeof(fsl_udc_test_packet));
+ req->ep = ep;
+ req->req.length = sizeof(fsl_udc_test_packet);
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+ req->mapped = 0;
+
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ /* prime the data phase */
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+ fsl_queue_td(ep, req);
+ else /* no mem */
+ goto stall;
+
+ list_add_tail(&req->queue, &ep->queue);
+ udc->ep0_state = DATA_STATE_XMIT;
+ break;
+ case PORTSCX_PTC_FORCE_EN:
+ VDBG("TEST_FORCE_EN\n");
+ break;
+ default:
+ ERR("udc unknown test mode[%d]!\n", test_mode);
+ goto stall;
+ }
+
+ /* read the portsc register */
+ portsc = fsl_readl(&dr_regs->portsc1);
+ /* set the test mode selector */
+ portsc |= test_mode << PORTSCX_PTC_BIT_POS;
+ fsl_writel(portsc, &dr_regs->portsc1);
+
+ /*
+ * The device must have its power cycled to exit test mode.
+ * See USB 2.0 spec, section 9.4.9 for test modes operation in "Set Feature"
+ * See USB 2.0 spec, section 7.1.20 for test modes.
+ */
+ pr_info("udc entering the test mode, power cycle to exit test mode\n");
+ return;
+stall:
+ ep0stall(udc);
+}
+
static void setup_received_irq(struct fsl_udc *udc,
struct usb_ctrlrequest *setup)
{
{
int rc = -EOPNOTSUPP;
- if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ if (setup->bRequestType == USB_RECIP_DEVICE &&
+ wValue == USB_DEVICE_TEST_MODE) {
+ /*
+ * If the feature selector is TEST_MODE, then the most
+ * significant byte of wIndex is used to specify the specific
+ * test mode and the lower byte of wIndex must be zero.
+ */
+ udc_test_mode(udc, wIndex >> 8);
+ return;
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
int pipe = get_pipe_by_windex(wIndex);
struct fsl_ep *ep;
if (!bit_pos)
return;
+#ifdef CONFIG_ARCH_TEGRA
+ /* XXX what's going on here */
+ for (i = 0; i < udc->max_ep; i++) {
+#else
for (i = 0; i < udc->max_ep * 2; i++) {
+#endif
ep_num = i >> 1;
direction = i % 2;
/* Write 1s to the flush register */
fsl_writel(0xffffffff, &dr_regs->endptflush);
+#if defined(CONFIG_ARCH_TEGRA)
+ /* When the bus reset is seen on Tegra, the PORTSCX_PORT_RESET bit
+ * is not set */
+ VDBG("Bus reset");
+ /* Reset all the queues, include XD, dTD, EP queue
+ * head and TR Queue */
+ reset_queues(udc);
+ udc->usb_state = USB_STATE_DEFAULT;
+#else
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
VDBG("Bus reset");
/* Reset all the queues, include XD, dTD, EP queue
dr_controller_run(udc);
udc->usb_state = USB_STATE_ATTACHED;
}
+#endif
}
/*
irqreturn_t status = IRQ_NONE;
unsigned long flags;
+ spin_lock_irqsave(&udc->lock, flags);
+
/* Disable ISR for OTG host mode */
- if (udc->stopped)
+ if (udc->stopped) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_NONE;
- spin_lock_irqsave(&udc->lock, flags);
+ }
+
irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
}
/* Enable DR IRQ reg and Set usbcmd reg Run bit */
- dr_controller_run(udc_controller);
- udc_controller->usb_state = USB_STATE_ATTACHED;
- udc_controller->ep0_state = WAIT_FOR_SETUP;
- udc_controller->ep0_dir = 0;
+ if (!udc_controller->transceiver) {
+ dr_controller_run(udc_controller);
+ udc_controller->usb_state = USB_STATE_ATTACHED;
+ udc_controller->ep0_state = WAIT_FOR_SETUP;
+ udc_controller->ep0_dir = 0;
+ }
+
printk(KERN_INFO "%s: bind to driver %s\n",
udc_controller->gadget.name, driver->driver.name);
if (!driver || driver != udc_controller->driver || !driver->unbind)
return -EINVAL;
- if (udc_controller->transceiver)
- otg_set_peripheral(udc_controller->transceiver, NULL);
-
/* stop DR, disable intr */
dr_controller_stop(udc_controller);
#include <linux/seq_file.h>
+#ifdef CONFIG_ARCH_TEGRA
+static const char proc_filename[] = "driver/fsl_tegra_udc";
+#else
static const char proc_filename[] = "driver/fsl_usb2_udc";
+#endif
static int fsl_proc_read(char *page, char **start, off_t off, int count,
int *eof, void *_dev)
size -= t;
next += t;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
tmp_reg = usb_sys_regs->snoop1;
t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
size -= t;
static void fsl_udc_release(struct device *dev)
{
complete(udc_controller->done);
+#ifndef CONFIG_ARCH_TEGRA
dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
udc_controller->ep_qh, udc_controller->ep_qh_dma);
+#endif
kfree(udc_controller);
}
return -1;
}
+#ifdef CONFIG_ARCH_TEGRA
+ /* Tegra uses hardware queue heads */
+ size = udc->max_ep * sizeof(struct ep_queue_head);
+ udc->ep_qh = (struct ep_queue_head *)((u8 *)dr_regs + QH_OFFSET);
+ udc->ep_qh_dma = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start +
+ QH_OFFSET;
+#else
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
if (size < QH_ALIGNMENT)
kfree(udc->eps);
return -1;
}
+#endif
udc->ep_qh_size = size;
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
/* allocate a small amount of memory to get valid address */
- udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
+ udc->status_req->req.buf = dma_alloc_coherent(&pdev->dev,
+ STATUS_BUFFER_SIZE, &udc->status_req->req.dma,
+ GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("alloc status_req buffer failed\n");
+#ifndef CONFIG_ARCH_TEGRA
+ dma_free_coherent(&pdev->dev, size, udc->ep_qh, udc->ep_qh_dma);
+#endif
+ kfree(udc->eps);
+ return -ENOMEM;
+ }
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
int ret = -ENODEV;
unsigned int i;
u32 dccparams;
+#if defined(CONFIG_ARCH_TEGRA)
+ struct resource *res_sys = NULL;
+#endif
if (strcmp(pdev->name, driver_name)) {
VDBG("Wrong device");
goto err_release_mem_region;
}
-#ifndef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_TEGRA)
+ /* If the PHY registers are NOT provided as a seperate aperture, then
+ * we should be using the registers inside the controller aperture. */
+ res_sys = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_sys) {
+ usb_sys_regs = ioremap(res_sys->start, resource_size(res_sys));
+ if (!usb_sys_regs)
+ goto err_release_mem_region;
+ } else {
+ usb_sys_regs = (struct usb_sys_interface *)
+ ((u32)dr_regs + USB_DR_SYS_OFFSET);
+ }
+#endif
+
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
usb_sys_regs = (struct usb_sys_interface *)
((u32)dr_regs + USB_DR_SYS_OFFSET);
#endif
goto err_unregister;
}
create_proc_file();
+
+#ifdef CONFIG_USB_OTG_UTILS
+ udc_controller->transceiver = otg_get_transceiver();
+ if (udc_controller->transceiver) {
+ dr_controller_stop(udc_controller);
+ dr_controller_reset(udc_controller);
+ fsl_udc_clk_suspend();
+ udc_controller->vbus_active = 0;
+ udc_controller->usb_state = USB_STATE_DEFAULT;
+ otg_set_peripheral(udc_controller->transceiver, &udc_controller->gadget);
+ }
+#else
+#ifdef CONFIG_ARCH_TEGRA
+ /* Power down the phy if cable is not connected */
+ if (!(fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_VBUS_STATUS))
+ fsl_udc_clk_suspend();
+#endif
+#endif
+
return 0;
err_unregister:
return -ENODEV;
udc_controller->done = &done;
+ if (udc_controller->transceiver)
+ otg_set_peripheral(udc_controller->transceiver, NULL);
+
fsl_udc_clk_release();
/* DR has been stopped in usb_gadget_unregister_driver() */
remove_proc_file();
/* Free allocated memory */
- kfree(udc_controller->status_req->req.buf);
+ dma_free_coherent(&pdev->dev, STATUS_BUFFER_SIZE,
+ udc_controller->status_req->req.buf,
+ udc_controller->status_req->req.dma);
kfree(udc_controller->status_req);
kfree(udc_controller->eps);
-----------------------------------------------------------------*/
static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
+ if (udc_controller->transceiver &&
+ udc_controller->transceiver->state != OTG_STATE_B_PERIPHERAL)
+ return 0;
+
dr_controller_stop(udc_controller);
return 0;
}
*-----------------------------------------------------------------*/
static int fsl_udc_resume(struct platform_device *pdev)
{
+ if (udc_controller->transceiver &&
+ udc_controller->transceiver->state != OTG_STATE_B_PERIPHERAL)
+ return 0;
+
/* Enable DR irq reg and set controller Run */
if (udc_controller->stopped) {
dr_controller_setup(udc_controller);
};
/* non-EHCI USB system interface registers (Big Endian) */
+#ifdef CONFIG_ARCH_TEGRA
+struct usb_sys_interface {
+ u32 suspend_ctrl;
+ u32 vbus_sensors;
+ u32 vbus_wakeup;
+ u32 vbus_alt_status;
+ u32 legacy_ctrl;
+};
+#else
struct usb_sys_interface {
u32 snoop1;
u32 snoop2;
u8 res[236];
u32 control; /* General Purpose Control Register */
};
+#endif
/* ep0 transfer state */
#define WAIT_FOR_SETUP 0
/* Alignment requirements; must be a power of two */
#define DTD_ALIGNMENT 0x20
#define QH_ALIGNMENT 2048
+#define QH_OFFSET 0x1000
/* Controller dma boundary */
#define UDC_DMA_BOUNDARY 0x1000
+#define USB_SYS_VBUS_ASESSION_INT_EN 0x10000
+#define USB_SYS_VBUS_ASESSION_CHANGED 0x20000
+#define USB_SYS_VBUS_ASESSION 0x40000
+#define USB_SYS_VBUS_WAKEUP_ENABLE 0x40000000
+#define USB_SYS_VBUS_WAKEUP_INT_ENABLE 0x100
+#define USB_SYS_VBUS_WAKEUP_INT_STATUS 0x200
+#define USB_SYS_VBUS_STATUS 0x400
+
/*-------------------------------------------------------------------------*/
/* ### driver private data
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
struct platform_device;
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_ARCH_TEGRA)
int fsl_udc_clk_init(struct platform_device *pdev);
void fsl_udc_clk_finalize(struct platform_device *pdev);
void fsl_udc_clk_release(void);
+void fsl_udc_clk_suspend(void);
+void fsl_udc_clk_resume(void);
#else
static inline int fsl_udc_clk_init(struct platform_device *pdev)
{
static inline void fsl_udc_clk_release(void)
{
}
+static inline void fsl_udc_clk_suspend(void)
+{
+}
+static inline void fsl_udc_clk_resume(void)
+{
+}
#endif
#endif
MODULE_LICENSE("GPL");
-static unsigned short gfs_vendor_id = 0x0525; /* XXX NetChip */
-static unsigned short gfs_product_id = 0xa4ac; /* XXX */
+static unsigned short gfs_vendor_id = 0x1d6b; /* Linux Foundation */
+static unsigned short gfs_product_id = 0x0105; /* FunctionFS Gadget */
static struct usb_device_descriptor gfs_dev_desc = {
.bLength = sizeof gfs_dev_desc,
/***************************** Device Descriptor ****************************/
-#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */
-#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */
+#define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */
+#define MULTI_PRODUCT_NUM 0x0104 /* Multifunction Composite Gadget */
enum {
---help---
Variation of ARC USB block used in some Freescale chips.
+config USB_EHCI_TEGRA
+ boolean "NVIDIA Tegra HCD support"
+ depends on USB_EHCI_HCD && ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ help
+ This driver enables support for the internal USB Host Controller
+ found in NVIDIA Tegra SoCs. The Tegra controller is EHCI compliant.
+
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
depends on USB_EHCI_HCD && PPC_OF
&debug_registers_fops))
goto file_error;
- if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus,
+ if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus,
&debug_lpm_fops))
goto file_error;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
- /* periodic qh self-unlinks on empty */
- if (!tmp)
- goto nogood;
- unlink_async (ehci, qh);
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ unlink_async(ehci, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
}
/* else FALL THROUGH */
default:
-nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
#define PLATFORM_DRIVER ehci_atmel_driver
#endif
+#ifdef CONFIG_USB_EHCI_TEGRA
+#include "ehci-tegra.c"
+#define PLATFORM_DRIVER tegra_ehci_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
+ && !ehci->port_reset_no_wait
&& time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
if (pdev->revision < 0xa4)
ehci->no_selective_suspend = 1;
break;
+
+ /* MCP89 chips on the MacBookAir3,1 give EPROTO when
+ * fetching device descriptors unless LPM is disabled.
+ * There are also intermittent problems enumerating
+ * devices with PPCD enabled.
+ */
+ case 0x0d9d:
+ ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
+ ehci->has_lpm = 0;
+ ehci->has_ppcd = 0;
+ ehci->command &= ~CMD_PPCEE;
+ break;
}
break;
case PCI_VENDOR_ID_VIA:
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
+ qh->stamp = ehci->periodic_stamp;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
+ wmb();
/* qtd completions reported later by interrupt */
}
}
clock &= mod - 1;
clock_frame = clock >> 3;
+ ++ehci->periodic_stamp;
for (;;) {
union ehci_shadow q, *q_p;
temp.qh = qh_get (q.qh);
type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
q = q.qh->qh_next;
- modified = qh_completions (ehci, temp.qh);
- if (unlikely(list_empty(&temp.qh->qtd_list) ||
- temp.qh->needs_rescan))
- intr_deschedule (ehci, temp.qh);
+ if (temp.qh->stamp != ehci->periodic_stamp) {
+ modified = qh_completions(ehci, temp.qh);
+ if (!modified)
+ temp.qh->stamp = ehci->periodic_stamp;
+ if (unlikely(list_empty(&temp.qh->qtd_list) ||
+ temp.qh->needs_rescan))
+ intr_deschedule(ehci, temp.qh);
+ }
qh_put (temp.qh);
break;
case Q_TYPE_FSTN:
free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
+ ++ehci->periodic_stamp;
} else {
now_uframe++;
now_uframe &= mod - 1;
--- /dev/null
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/irq.h>
+#include <linux/usb/otg.h>
+#include <mach/usb_phy.h>
+
+#define TEGRA_USB_DMA_ALIGN 32
+
+#define STS_SRI (1<<7) /* SOF Recieved */
+
+struct tegra_ehci_hcd {
+ struct ehci_hcd *ehci;
+ struct tegra_usb_phy *phy;
+ struct clk *clk;
+ struct clk *emc_clk;
+ struct otg_transceiver *transceiver;
+ int host_resumed;
+ int bus_suspended;
+ int port_resuming;
+ int power_down_on_bus_suspend;
+ enum tegra_usb_phy_port_speed port_speed;
+};
+
+static void tegra_ehci_power_up(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ clk_enable(tegra->emc_clk);
+ clk_enable(tegra->clk);
+ tegra_usb_phy_power_on(tegra->phy);
+ tegra->host_resumed = 1;
+}
+
+static void tegra_ehci_power_down(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ tegra->host_resumed = 0;
+ tegra_usb_phy_power_off(tegra->phy);
+ clk_disable(tegra->clk);
+ clk_disable(tegra->emc_clk);
+}
+
+static int tegra_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ u32 __iomem *status_reg;
+ u32 temp;
+ u32 usbsts_reg;
+ unsigned long flags;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /*
+ * In ehci_hub_control() for USB_PORT_FEAT_ENABLE clears the other bits
+ * that are write on clear, by writing back the register read value, so
+ * USB_PORT_FEAT_ENABLE is handled by masking the set on clear bits
+ */
+ if (typeReq == ClearPortFeature && wValue == USB_PORT_FEAT_ENABLE) {
+ temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
+ ehci_writel(ehci, temp & ~PORT_PE, status_reg);
+ goto done;
+ }
+
+ else if (typeReq == GetPortStatus) {
+ temp = ehci_readl(ehci, status_reg);
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND) &&
+ time_after_eq(jiffies, ehci->reset_done[wIndex-1])) {
+ /* Resume completed, re-enable disconnect detection */
+ tegra->port_resuming = 0;
+ clear_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ ehci->reset_done[wIndex-1] = 0;
+ tegra_usb_phy_postresume(tegra->phy);
+ }
+ }
+
+ else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ /*
+ * If a transaction is in progress, there may be a delay in
+ * suspending the port. Poll until the port is suspended.
+ */
+ if (handshake(ehci, status_reg, PORT_SUSPEND,
+ PORT_SUSPEND, 5000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ /*
+ * Tegra host controller will time the resume operation to clear the bit
+ * when the port control state switches to HS or FS Idle. This behavior
+ * is different from EHCI where the host controller driver is required
+ * to set this bit to a zero after the resume duration is timed in the
+ * driver.
+ */
+ else if (typeReq == ClearPortFeature &&
+ wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ if (!(temp & PORT_SUSPEND))
+ goto done;
+
+ tegra->port_resuming = 1;
+
+ /* Disable disconnect detection during port resume */
+ tegra_usb_phy_preresume(tegra->phy);
+
+ ehci_dbg(ehci, "%s:USBSTS = 0x%x", __func__,
+ ehci_readl(ehci, &ehci->regs->status));
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, usbsts_reg, &ehci->regs->status);
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ udelay(20);
+
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, STS_SRI, 2000))
+ pr_err("%s: timeout set for STS_SRI\n", __func__);
+
+ usbsts_reg = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, usbsts_reg, &ehci->regs->status);
+
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, 0, 2000))
+ pr_err("%s: timeout clear STS_SRI\n", __func__);
+
+ if (handshake(ehci, &ehci->regs->status, STS_SRI, STS_SRI, 2000))
+ pr_err("%s: timeout set STS_SRI\n", __func__);
+
+ udelay(20);
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ /* start resume signaling */
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+ /* whoever resumes must GetPortStatus to complete it!! */
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
+static void tegra_ehci_restart(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci_reset(ehci);
+
+ /* setup the frame list and Async q heads */
+ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+ ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+ /* setup the command register and set the controller in RUN mode */
+ ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ ehci->command |= CMD_RUN;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ down_write(&ehci_cf_port_reset_rwsem);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ /* flush posted writes */
+ ehci_readl(ehci, &ehci->regs->command);
+ up_write(&ehci_cf_port_reset_rwsem);
+}
+
+static int tegra_usb_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct ehci_regs __iomem *hw = tegra->ehci->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+
+ tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+ ehci_halt(tegra->ehci);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+
+ tegra_ehci_power_down(hcd);
+ return 0;
+}
+
+static int tegra_usb_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_regs __iomem *hw = ehci->regs;
+ unsigned long val;
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ tegra_ehci_power_up(hcd);
+
+ if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
+ /* Wait for the phy to detect new devices
+ * before we restart the controller */
+ msleep(10);
+ goto restart;
+ }
+
+ /* Force the phy to keep data lines in suspend state */
+ tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+
+ /* Enable host mode */
+ tdi_reset(ehci);
+
+ /* Enable Port Power */
+ val = readl(&hw->port_status[0]);
+ val |= PORT_POWER;
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Check if the phy resume from LP0. When the phy resume from LP0
+ * USB register will be reset. */
+ if (!readl(&hw->async_next)) {
+ /* Program the field PTC based on the saved speed mode */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ val |= PORT_TEST_FORCE;
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
+ val |= PORT_TEST(6);
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= PORT_TEST(7);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Disable test mode by setting PTC field to NORMAL_OP */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+ }
+
+ /* Poll until CCS is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
+ PORT_CONNECT, 2000)) {
+ pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
+ goto restart;
+ }
+
+ /* Poll until PE is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_PE,
+ PORT_PE, 2000)) {
+ pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
+ goto restart;
+ }
+
+ /* Clear the PCI status, to avoid an interrupt taken upon resume */
+ val = readl(&hw->status);
+ val |= STS_PCD;
+ writel(val, &hw->status);
+
+ /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
+ val = readl(&hw->port_status[0]);
+ if ((val & PORT_POWER) && (val & PORT_PE)) {
+ val |= PORT_SUSPEND;
+ writel(val, &hw->port_status[0]);
+
+ /* Wait until port suspend completes */
+ if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
+ PORT_SUSPEND, 1000)) {
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n",
+ __func__);
+ goto restart;
+ }
+ }
+
+ tegra_ehci_phy_restore_end(tegra->phy);
+ return 0;
+
+restart:
+ if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ tegra_ehci_phy_restore_end(tegra->phy);
+
+ tegra_ehci_restart(hcd);
+ return 0;
+}
+
+static void tegra_ehci_shutdown(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ /* ehci_shutdown touches the USB controller registers, make sure
+ * controller has clocks to it */
+ if (!tegra->host_resumed)
+ tegra_ehci_power_up(hcd);
+
+ ehci_shutdown(hcd);
+}
+
+static int tegra_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(readl(&ehci->caps->hc_capbase));
+
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* switch to host mode */
+ hcd->has_tt = 1;
+ ehci_reset(ehci);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci->sbrn = 0x20;
+
+ ehci_port_power(ehci, 1);
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ int error_status = 0;
+
+ error_status = ehci_bus_suspend(hcd);
+ if (!error_status && tegra->power_down_on_bus_suspend) {
+ tegra_usb_suspend(hcd);
+ tegra->bus_suspended = 1;
+ }
+
+ return error_status;
+}
+
+static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
+ tegra_usb_resume(hcd);
+ tegra->bus_suspended = 0;
+ }
+
+ return ehci_bus_resume(hcd);
+}
+#endif
+
+struct temp_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void free_temp_buffer(struct urb *urb)
+{
+ enum dma_data_direction dir;
+ struct temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ temp = container_of(urb->transfer_buffer, struct temp_buffer,
+ data);
+
+ if (dir == DMA_FROM_DEVICE)
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ struct temp_buffer *temp, *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct temp_buffer) + TEGRA_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct temp_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
+
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (dir == DMA_TO_DEVICE)
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ free_temp_buffer(urb);
+
+ return ret;
+}
+
+static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ free_temp_buffer(urb);
+}
+
+static const struct hc_driver tegra_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tegra EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ .reset = tegra_ehci_setup,
+ .irq = ehci_irq,
+
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = tegra_ehci_shutdown,
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .map_urb_for_dma = tegra_ehci_map_urb_for_dma,
+ .unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .get_frame_number = ehci_get_frame,
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = tegra_ehci_hub_control,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+#ifdef CONFIG_PM
+ .bus_suspend = tegra_ehci_bus_suspend,
+ .bus_resume = tegra_ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct tegra_ehci_hcd *tegra;
+ struct tegra_ehci_platform_data *pdata;
+ int err = 0;
+ int irq;
+ int instance = pdev->id;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data missing\n");
+ return -EINVAL;
+ }
+
+ tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get ehci clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto fail_clken;
+
+ tegra->emc_clk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(tegra->emc_clk)) {
+ dev_err(&pdev->dev, "Can't get emc clock\n");
+ err = PTR_ERR(tegra->emc_clk);
+ goto fail_emc_clk;
+ }
+
+ clk_enable(tegra->emc_clk);
+ clk_set_rate(tegra->emc_clk, 300000000);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
+ TEGRA_USB_PHY_MODE_HOST);
+ if (IS_ERR(tegra->phy)) {
+ dev_err(&pdev->dev, "Failed to open USB phy\n");
+ err = -ENXIO;
+ goto fail_phy;
+ }
+
+ err = tegra_usb_phy_power_on(tegra->phy);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to power on the phy\n");
+ goto fail;
+ }
+
+ tegra->host_resumed = 1;
+ tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
+ tegra->ehci = hcd_to_ehci(hcd);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+ set_irq_flags(irq, IRQF_VALID);
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->operating_mode == TEGRA_USB_OTG) {
+ tegra->transceiver = otg_get_transceiver();
+ if (tegra->transceiver)
+ otg_set_host(tegra->transceiver, &hcd->self);
+ }
+#endif
+
+ err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ return err;
+
+fail:
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+ tegra_usb_phy_close(tegra->phy);
+fail_phy:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(tegra->emc_clk);
+ clk_put(tegra->emc_clk);
+fail_emc_clk:
+ clk_disable(tegra->clk);
+fail_clken:
+ clk_put(tegra->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(tegra);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_resume(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ return tegra_usb_resume(hcd);
+}
+
+static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ if (time_before(jiffies, tegra->ehci->next_statechange))
+ msleep(10);
+
+ return tegra_usb_suspend(hcd);
+}
+#endif
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra == NULL || hcd == NULL)
+ return -EINVAL;
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ tegra_usb_phy_close(tegra->phy);
+ iounmap(hcd->regs);
+
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+
+ clk_disable(tegra->emc_clk);
+ clk_put(tegra->emc_clk);
+
+ kfree(tegra);
+ return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+ .probe = tegra_ehci_probe,
+ .remove = tegra_ehci_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_ehci_suspend,
+ .resume = tegra_ehci_resume,
+#endif
+ .shutdown = tegra_ehci_hcd_shutdown,
+ .driver = {
+ .name = "tegra-ehci",
+ }
+};
struct timer_list watchdog;
unsigned long actions;
unsigned stamp;
+ unsigned periodic_stamp;
unsigned random_frame;
unsigned long next_statechange;
ktime_t last_periodic_enable;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
unsigned fs_i_thresh:1; /* Intel iso scheduling */
+ unsigned port_reset_no_wait:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
},
};
-MODULE_ALIAS("platfrom:jz4740-ohci");
+MODULE_ALIAS("platform:jz4740-ohci");
int odd = len & 0x0001;
len = len / 2;
- ioread16_rep(fifoaddr, buf, len);
+ iowrite16_rep(fifoaddr, buf, len);
if (unlikely(odd)) {
buf = &buf[len];
iowrite8((unsigned char)*buf, fifoaddr);
static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
u32 __iomem *addr, u32 port_status)
{
+ /* Don't allow the USB core to disable SuperSpeed ports. */
+ if (xhci->port_array[wIndex] == 0x03) {
+ xhci_dbg(xhci, "Ignoring request to disable "
+ "SuperSpeed port.\n");
+ return;
+ }
+
/* Write 1 to disable the port */
xhci_writel(xhci, port_status | PORT_PE, addr);
port_status = xhci_readl(xhci, addr);
if (udev->speed == USB_SPEED_SUPER)
return ep->ss_ep_comp.wBytesPerInterval;
- max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1);
/* Fall through */
case USB_SPEED_FULL:
case USB_SPEED_LOW:
- max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
break;
default:
xhci->dcbaa = NULL;
scratchpad_free(xhci);
+
+ xhci->num_usb2_ports = 0;
+ xhci->num_usb3_ports = 0;
+ kfree(xhci->usb2_ports);
+ kfree(xhci->usb3_ports);
+ kfree(xhci->port_array);
+
xhci->page_size = 0;
xhci->page_shift = 0;
}
&xhci->ir_set->erst_dequeue);
}
+static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ u32 __iomem *addr, u8 major_revision)
+{
+ u32 temp, port_offset, port_count;
+ int i;
+
+ if (major_revision > 0x03) {
+ xhci_warn(xhci, "Ignoring unknown port speed, "
+ "Ext Cap %p, revision = 0x%x\n",
+ addr, major_revision);
+ /* Ignoring port protocol we can't understand. FIXME */
+ return;
+ }
+
+ /* Port offset and count in the third dword, see section 7.2 */
+ temp = xhci_readl(xhci, addr + 2);
+ port_offset = XHCI_EXT_PORT_OFF(temp);
+ port_count = XHCI_EXT_PORT_COUNT(temp);
+ xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
+ "count = %u, revision = 0x%x\n",
+ addr, port_offset, port_count, major_revision);
+ /* Port count includes the current port offset */
+ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
+ /* WTF? "Valid values are ‘1’ to MaxPorts" */
+ return;
+ port_offset--;
+ for (i = port_offset; i < (port_offset + port_count); i++) {
+ /* Duplicate entry. Ignore the port if the revisions differ. */
+ if (xhci->port_array[i] != 0) {
+ xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
+ " port %u\n", addr, i);
+ xhci_warn(xhci, "Port was marked as USB %u, "
+ "duplicated as USB %u\n",
+ xhci->port_array[i], major_revision);
+ /* Only adjust the roothub port counts if we haven't
+ * found a similar duplicate.
+ */
+ if (xhci->port_array[i] != major_revision &&
+ xhci->port_array[i] != (u8) -1) {
+ if (xhci->port_array[i] == 0x03)
+ xhci->num_usb3_ports--;
+ else
+ xhci->num_usb2_ports--;
+ xhci->port_array[i] = (u8) -1;
+ }
+ /* FIXME: Should we disable the port? */
+ continue;
+ }
+ xhci->port_array[i] = major_revision;
+ if (major_revision == 0x03)
+ xhci->num_usb3_ports++;
+ else
+ xhci->num_usb2_ports++;
+ }
+ /* FIXME: Should we disable ports not in the Extended Capabilities? */
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be. We can't count on the port
+ * speed bits in the PORTSC register being correct until a device is connected,
+ * but we need to set up the two fake roothubs with the correct number of USB
+ * 3.0 and USB 2.0 ports at host controller initialization time.
+ */
+static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+{
+ u32 __iomem *addr;
+ u32 offset;
+ unsigned int num_ports;
+ int i, port_index;
+
+ addr = &xhci->cap_regs->hcc_params;
+ offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
+ if (offset == 0) {
+ xhci_err(xhci, "No Extended Capability registers, "
+ "unable to set up roothub.\n");
+ return -ENODEV;
+ }
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
+ if (!xhci->port_array)
+ return -ENOMEM;
+
+ /*
+ * For whatever reason, the first capability offset is from the
+ * capability register base, not from the HCCPARAMS register.
+ * See section 5.3.6 for offset calculation.
+ */
+ addr = &xhci->cap_regs->hc_capbase + offset;
+ while (1) {
+ u32 cap_id;
+
+ cap_id = xhci_readl(xhci, addr);
+ if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
+ xhci_add_in_port(xhci, num_ports, addr,
+ (u8) XHCI_EXT_PORT_MAJOR(cap_id));
+ offset = XHCI_EXT_CAPS_NEXT(cap_id);
+ if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
+ == num_ports)
+ break;
+ /*
+ * Once you're into the Extended Capabilities, the offset is
+ * always relative to the register holding the offset.
+ */
+ addr += offset;
+ }
+
+ if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
+ xhci_warn(xhci, "No ports on the roothubs?\n");
+ return -ENODEV;
+ }
+ xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
+ xhci->num_usb2_ports, xhci->num_usb3_ports);
+ /*
+ * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
+ * Not sure how the USB core will handle a hub with no ports...
+ */
+ if (xhci->num_usb2_ports) {
+ xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
+ xhci->num_usb2_ports, flags);
+ if (!xhci->usb2_ports)
+ return -ENOMEM;
+
+ port_index = 0;
+ for (i = 0; i < num_ports; i++) {
+ if (xhci->port_array[i] == 0x03 ||
+ xhci->port_array[i] == 0 ||
+ xhci->port_array[i] == -1)
+ continue;
+
+ xhci->usb2_ports[port_index] =
+ &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ xhci_dbg(xhci, "USB 2.0 port at index %u, "
+ "addr = %p\n", i,
+ xhci->usb2_ports[port_index]);
+ port_index++;
+ }
+ }
+ if (xhci->num_usb3_ports) {
+ xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
+ xhci->num_usb3_ports, flags);
+ if (!xhci->usb3_ports)
+ return -ENOMEM;
+
+ port_index = 0;
+ for (i = 0; i < num_ports; i++)
+ if (xhci->port_array[i] == 0x03) {
+ xhci->usb3_ports[port_index] =
+ &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ xhci_dbg(xhci, "USB 3.0 port at index %u, "
+ "addr = %p\n", i,
+ xhci->usb3_ports[port_index]);
+ port_index++;
+ }
+ }
+ return 0;
+}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
if (scratchpad_alloc(xhci, flags))
goto fail;
+ if (xhci_setup_port_arrays(xhci, flags))
+ goto fail;
return 0;
if (!(status & STS_EINT)) {
spin_unlock(&xhci->lock);
- xhci_warn(xhci, "Spurious interrupt.\n");
return IRQ_NONE;
}
xhci_dbg(xhci, "op reg status = %08x\n", status);
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
+
+ /* Enqueue pointer can be left pointing to the link TRB,
+ * we must handle that
+ */
+ if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
+ == TRB_TYPE(TRB_LINK))
+ command->command_trb =
+ xhci->cmd_ring->enq_seg->next->trbs;
+
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
+
+ /* Enqueue pointer can be left pointing to the link TRB,
+ * we must handle that
+ */
+ if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
+ == TRB_TYPE(TRB_LINK))
+ reset_device_cmd->command_trb =
+ xhci->cmd_ring->enq_seg->next->trbs;
+
list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
ret = xhci_queue_reset_device(xhci, slot_id);
if (ret) {
#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
+/**
+ * struct xhci_protocol_caps
+ * @revision: major revision, minor revision, capability ID,
+ * and next capability pointer.
+ * @name_string: Four ASCII characters to say which spec this xHC
+ * follows, typically "USB ".
+ * @port_info: Port offset, count, and protocol-defined information.
+ */
+struct xhci_protocol_caps {
+ u32 revision;
+ u32 name_string;
+ u32 port_info;
+};
+
+#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
+#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
+
/**
* struct xhci_container_ctx
* @type: Type of context. Used to calculated offsets to contained contexts.
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
+/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
+ * USB2.0 spec 9.6.6.
+ */
+#define GET_MAX_PACKET(p) ((p) & 0x7ff)
+
/* tx_info bitmasks */
#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
+
+ /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
+ u8 *port_array;
+ /* Array of pointers to USB 3.0 PORTSC registers */
+ u32 __iomem **usb3_ports;
+ unsigned int num_usb3_ports;
+ /* Array of pointers to USB 2.0 PORTSC registers */
+ u32 __iomem **usb2_ports;
+ unsigned int num_usb2_ports;
};
/* For testing purposes */
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
-static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO,
- get_port0_handler, set_port0_handler);
+static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
-static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO,
- get_port1_handler, set_port1_handler);
+static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
static int cypress_probe(struct usb_interface *interface,
/* needed for power consumption */
struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
+ memset(&info, 0, sizeof(info));
/* directly from the descriptor */
info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
info.product = dev->product_id;
#else
x.sisusb_conactive = 0;
#endif
+ memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
if (copy_to_user((void __user *)arg, &x, sizeof(x)))
retval = -EFAULT;
return count;
}
-static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed);
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
change_color(led); \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value);
show_set(blue);
show_set(red);
show_set(green);
\
return count; \
} \
-static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
static ssize_t show_attr_text(struct device *dev,
struct device_attribute *attr, char *buf)
return count;
}
-static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
+static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
static ssize_t show_attr_decimals(struct device *dev,
struct device_attribute *attr, char *buf)
return count;
}
-static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
- show_attr_decimals, set_attr_decimals);
+static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
static ssize_t show_attr_textmode(struct device *dev,
struct device_attribute *attr, char *buf)
return -EINVAL;
}
-static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
- show_attr_textmode, set_attr_textmode);
+static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
/*
* uss720.c -- USS720 USB Parport Cable.
*
- * Copyright (C) 1999, 2005
+ * Copyright (C) 1999, 2005, 2010
* Thomas Sailer (t.sailer@alumni.ethz.ch)
*
* This program is free software; you can redistribute it and/or modify
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
{ USB_DEVICE(0x1293, 0x0002) },
+ { USB_DEVICE(0x1293, 0x0002) },
+ { USB_DEVICE(0x050d, 0x0002) },
{ } /* Terminating entry */
};
usb_nop_xceiv_register();
musb->xceiv = otg_get_transceiver();
- if (!musb->xceiv)
+ if (!musb->xceiv) {
+ gpio_free(musb->config->gpio_vrsel);
return -ENODEV;
+ }
if (ANOMALY_05000346) {
bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
int musb_platform_exit(struct musb *musb)
{
-
gpio_free(musb->config->gpio_vrsel);
+ otg_put_transceiver(musb->xceiv);
+ usb_nop_xceiv_unregister();
return 0;
}
fail:
clk_disable(musb->clock);
+ otg_put_transceiver(musb->xceiv);
usb_nop_xceiv_unregister();
return -ENODEV;
}
clk_disable(musb->clock);
+ otg_put_transceiver(musb->xceiv);
usb_nop_xceiv_unregister();
return 0;
dma_controller_destroy(c);
}
-#ifdef CONFIG_USB_MUSB_OTG
- put_device(musb->xceiv->dev);
-#endif
-
#ifdef CONFIG_USB_MUSB_HDRC_HCD
usb_put_hcd(musb_to_hcd(musb));
#else
#endif
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_free(musb);
iounmap(ctrl_base);
musb_platform_suspend(musb);
+ otg_put_transceiver(musb->xceiv);
return 0;
}
if (ret < 0) {
if (sync)
iounmap(sync);
+
+ otg_put_transceiver(musb->xceiv);
usb_nop_xceiv_unregister();
}
return ret;
musb->board_set_power(0);
iounmap(musb->sync_va);
+
+ otg_put_transceiver(musb->xceiv);
usb_nop_xceiv_unregister();
return 0;
}
Enable this to support ULPI connected USB OTG transceivers which
are likely found on embedded boards.
+config USB_ULPI_VIEWPORT
+ bool
+ depends on USB_ULPI
+ help
+ Provides read/write operations to the ULPI phy register set for
+ controllers with a viewport register (e.g. Chipidea/ARC controllers).
+
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030
built-in with usb ip or which are autonomous and doesn't require any
phy programming such as ISP1x04 etc.
+config USB_TEGRA_OTG
+ boolean "Tegra OTG Driver"
+ depends on USB && ARCH_TEGRA
+ select USB_OTG_UTILS
+ help
+ Enable this driver on boards which use the internal VBUS and ID
+ sensing of the Tegra USB PHY.
+
endif # USB || OTG
# transceiver drivers
obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+obj-$(CONFIG_USB_TEGRA_OTG) += tegra-otg.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
obj-$(CONFIG_USB_ULPI) += ulpi.o
+obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
--- /dev/null
+/*
+ * drivers/usb/otg/tegra-otg.c
+ *
+ * OTG transceiver driver for Tegra UTMI phy
+ *
+ * Copyright (C) 2010 NVIDIA Corp.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#define USB_PHY_WAKEUP 0x408
+#define USB_ID_INT_EN (1 << 0)
+#define USB_ID_INT_STATUS (1 << 1)
+#define USB_ID_STATUS (1 << 2)
+#define USB_ID_PIN_WAKEUP_EN (1 << 6)
+#define USB_VBUS_WAKEUP_EN (1 << 30)
+#define USB_VBUS_INT_EN (1 << 8)
+#define USB_VBUS_INT_STATUS (1 << 9)
+#define USB_VBUS_STATUS (1 << 10)
+#define USB_INTS (USB_VBUS_INT_STATUS | USB_ID_INT_STATUS)
+
+struct tegra_otg_data {
+ struct otg_transceiver otg;
+ unsigned long int_status;
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct platform_device *host;
+ struct platform_device *pdev;
+};
+
+static inline unsigned long otg_readl(struct tegra_otg_data *tegra,
+ unsigned int offset)
+{
+ return readl(tegra->regs + offset);
+}
+
+static inline void otg_writel(struct tegra_otg_data *tegra, unsigned long val,
+ unsigned int offset)
+{
+ writel(val, tegra->regs + offset);
+}
+
+static const char *tegra_state_name(enum usb_otg_state state)
+{
+ if (state == OTG_STATE_A_HOST)
+ return "HOST";
+ if (state == OTG_STATE_B_PERIPHERAL)
+ return "PERIPHERAL";
+ if (state == OTG_STATE_A_SUSPEND)
+ return "SUSPEND";
+ return "INVALID";
+}
+
+void tegra_start_host(struct tegra_otg_data *tegra)
+{
+ int retval;
+ struct platform_device *pdev;
+ struct platform_device *host = tegra->host;
+ void *platform_data;
+
+ pdev = platform_device_alloc(host->name, host->id);
+ if (!pdev)
+ return;
+
+ if (host->resource) {
+ retval = platform_device_add_resources(pdev, host->resource,
+ host->num_resources);
+ if (retval)
+ goto error;
+ }
+
+ pdev->dev.dma_mask = host->dev.dma_mask;
+ pdev->dev.coherent_dma_mask = host->dev.coherent_dma_mask;
+
+ platform_data = kmalloc(sizeof(struct tegra_ehci_platform_data), GFP_KERNEL);
+ if (!platform_data)
+ goto error;
+
+ memcpy(platform_data, host->dev.platform_data,
+ sizeof(struct tegra_ehci_platform_data));
+ pdev->dev.platform_data = platform_data;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto error_add;
+
+ tegra->pdev = pdev;
+ return;
+
+error_add:
+ kfree(platform_data);
+error:
+ pr_err("%s: failed to add the host contoller device\n", __func__);
+ platform_device_put(pdev);
+}
+
+void tegra_stop_host(struct tegra_otg_data *tegra)
+{
+ if (tegra->pdev) {
+ platform_device_unregister(tegra->pdev);
+ tegra->pdev = NULL;
+ }
+}
+
+static irqreturn_t tegra_otg_irq_thread(int irq, void *data)
+{
+ struct tegra_otg_data *tegra = data;
+ struct otg_transceiver *otg = &tegra->otg;
+ enum usb_otg_state from = otg->state;
+ enum usb_otg_state to = OTG_STATE_UNDEFINED;
+ unsigned long flags;
+ unsigned long status;
+
+ clk_enable(tegra->clk);
+
+ status = otg_readl(tegra, USB_PHY_WAKEUP);
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ if (tegra->int_status & USB_ID_INT_STATUS) {
+ if (status & USB_ID_STATUS)
+ to = OTG_STATE_A_SUSPEND;
+ else
+ to = OTG_STATE_A_HOST;
+ } else if (tegra->int_status & USB_VBUS_INT_STATUS) {
+ if (status & USB_VBUS_STATUS)
+ to = OTG_STATE_B_PERIPHERAL;
+ else
+ to = OTG_STATE_A_SUSPEND;
+ }
+
+ tegra->int_status = 0;
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ otg->state = to;
+
+ dev_info(tegra->otg.dev, "%s --> %s", tegra_state_name(from),
+ tegra_state_name(to));
+
+ if (to == OTG_STATE_A_SUSPEND) {
+ if (from == OTG_STATE_A_HOST && tegra->host)
+ tegra_stop_host(tegra);
+ else if (from == OTG_STATE_B_PERIPHERAL && otg->gadget)
+ usb_gadget_vbus_disconnect(otg->gadget);
+ } else if (to == OTG_STATE_B_PERIPHERAL && otg->gadget) {
+ if (from == OTG_STATE_A_SUSPEND)
+ usb_gadget_vbus_connect(otg->gadget);
+ } else if (to == OTG_STATE_A_HOST && tegra->host) {
+ if (from == OTG_STATE_A_SUSPEND)
+ tegra_start_host(tegra);
+ }
+
+ clk_disable(tegra->clk);
+
+ return IRQ_HANDLED;
+
+}
+
+static irqreturn_t tegra_otg_irq(int irq, void *data)
+{
+ struct tegra_otg_data *tegra = data;
+ unsigned long val;
+
+ clk_enable(tegra->clk);
+
+ spin_lock(&tegra->lock);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+
+ /* and the interrupt enables into the interrupt status bits */
+ val = (val & (val << 1)) & USB_INTS;
+
+ tegra->int_status |= val;
+
+ spin_unlock(&tegra->lock);
+
+ clk_disable(tegra->clk);
+
+ return (val) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static int tegra_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->gadget = gadget;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val &= ~(USB_VBUS_INT_STATUS | USB_ID_INT_STATUS);
+
+ if (gadget)
+ val |= (USB_VBUS_INT_EN | USB_VBUS_WAKEUP_EN);
+ else
+ val &= ~(USB_VBUS_INT_EN | USB_VBUS_WAKEUP_EN);
+
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ clk_disable(tegra->clk);
+
+ return 0;
+}
+
+static int tegra_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->host = host;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val &= ~(USB_VBUS_INT_STATUS | USB_ID_INT_STATUS);
+
+ if (host)
+ val |= USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN;
+ else
+ val &= ~(USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ clk_disable(tegra->clk);
+
+ return 0;
+}
+
+static int tegra_otg_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ return 0;
+}
+
+static int tegra_otg_set_suspend(struct otg_transceiver *otg, int suspend)
+{
+ return 0;
+}
+
+static int tegra_otg_probe(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra;
+ struct resource *res;
+ unsigned long val;
+ int err;
+
+ tegra = kzalloc(sizeof(struct tegra_otg_data), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->otg.dev = &pdev->dev;
+ tegra->otg.label = "tegra-otg";
+ tegra->otg.state = OTG_STATE_UNDEFINED;
+ tegra->otg.set_host = tegra_otg_set_host;
+ tegra->otg.set_peripheral = tegra_otg_set_peripheral;
+ tegra->otg.set_suspend = tegra_otg_set_suspend;
+ tegra->otg.set_power = tegra_otg_set_power;
+ tegra->host = pdev->dev.platform_data;
+ spin_lock_init(&tegra->lock);
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get otg clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto err_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto err_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto err_io;
+ }
+ tegra->regs = ioremap(res->start, resource_size(res));
+ if (!tegra->regs) {
+ err = -ENOMEM;
+ goto err_io;
+ }
+
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+
+ val &= ~(USB_VBUS_INT_STATUS | USB_VBUS_INT_EN |
+ USB_ID_INT_STATUS | USB_ID_INT_EN |
+ USB_VBUS_WAKEUP_EN | USB_ID_PIN_WAKEUP_EN);
+
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+
+ tegra->otg.state = OTG_STATE_A_SUSPEND;
+
+ err = otg_set_transceiver(&tegra->otg);
+ if (err) {
+ dev_err(&pdev->dev, "can't register transceiver (%d)\n", err);
+ goto err_otg;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENXIO;
+ goto err_irq;
+ }
+ tegra->irq = res->start;
+ err = request_threaded_irq(tegra->irq, tegra_otg_irq,
+ tegra_otg_irq_thread,
+ IRQF_SHARED, "tegra-otg", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register IRQ\n");
+ goto err_irq;
+ }
+
+ dev_info(&pdev->dev, "otg transceiver registered\n");
+ return 0;
+
+err_irq:
+ otg_set_transceiver(NULL);
+err_otg:
+ iounmap(tegra->regs);
+err_io:
+ clk_disable(tegra->clk);
+err_clken:
+ clk_put(tegra->clk);
+err_clk:
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+ return err;
+}
+
+static int __exit tegra_otg_remove(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra = platform_get_drvdata(pdev);
+
+ free_irq(tegra->irq, tegra);
+ otg_set_transceiver(NULL);
+ iounmap(tegra->regs);
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+
+ return 0;
+}
+
+static struct platform_driver tegra_otg_driver = {
+ .driver = {
+ .name = "tegra-otg",
+ },
+ .remove = __exit_p(tegra_otg_remove),
+ .probe = tegra_otg_probe,
+};
+
+static int __init tegra_otg_init(void)
+{
+ return platform_driver_register(&tegra_otg_driver);
+}
+subsys_initcall(tegra_otg_init);
+
+static void __exit tegra_otg_exit(void)
+{
+ platform_driver_unregister(&tegra_otg_driver);
+}
+module_exit(tegra_otg_exit);
--- /dev/null
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+
+#define ULPI_VIEW_WAKEUP (1 << 31)
+#define ULPI_VIEW_RUN (1 << 30)
+#define ULPI_VIEW_WRITE (1 << 29)
+#define ULPI_VIEW_READ (0 << 29)
+#define ULPI_VIEW_ADDR(x) (((x) & 0xff) << 16)
+#define ULPI_VIEW_DATA_READ(x) (((x) >> 8) & 0xff)
+#define ULPI_VIEW_DATA_WRITE(x) ((x) & 0xff)
+
+static int ulpi_viewport_wait(void __iomem *view, u32 mask)
+{
+ unsigned long usec = 2000;
+
+ while (usec--) {
+ if (!(readl(view) & mask))
+ return 0;
+
+ udelay(1);
+ };
+
+ return -ETIMEDOUT;
+}
+
+static int ulpi_viewport_read(struct otg_transceiver *otg, u32 reg)
+{
+ int ret;
+ void __iomem *view = otg->io_priv;
+
+ writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
+ if (ret)
+ return ret;
+
+ writel(ULPI_VIEW_RUN | ULPI_VIEW_READ | ULPI_VIEW_ADDR(reg), view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_RUN);
+ if (ret)
+ return ret;
+
+ return ULPI_VIEW_DATA_READ(readl(view));
+}
+
+static int ulpi_viewport_write(struct otg_transceiver *otg, u32 val, u32 reg)
+{
+ int ret;
+ void __iomem *view = otg->io_priv;
+
+ writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
+ if (ret)
+ return ret;
+
+ writel(ULPI_VIEW_RUN | ULPI_VIEW_WRITE | ULPI_VIEW_DATA_WRITE(val) |
+ ULPI_VIEW_ADDR(reg), view);
+
+ return ulpi_viewport_wait(view, ULPI_VIEW_RUN);
+}
+
+struct otg_io_access_ops ulpi_viewport_access_ops = {
+ .read = ulpi_viewport_read,
+ .write = ulpi_viewport_write,
+};
static int debug;
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
+ { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
{ USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
- { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+
+ /* Papouch devices based on FTDI chip */
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
+
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
{ USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
"urb failed to set to rts/cts flow control\n");
}
- /* raise DTR/RTS */
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
} else {
/*
* Xon/Xoff code
}
}
- /* lower DTR/RTS */
- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
return;
}
#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+#define FTDI_OPENDCC_GBM_PID 0xBFDC
/*
* RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
+/* Vardaan Enterprises Serial Interface VEUSB422R3 */
+#define FTDI_VARDAAN_PID 0xF070
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
/*
* Bayer Ascensia Contour blood glucose meter USB-converter cable.
*/
#define PAPOUCH_VID 0x5050 /* Vendor ID */
+#define PAPOUCH_SB485_PID 0x0100 /* Papouch SB485 USB-485/422 Converter */
+#define PAPOUCH_AP485_PID 0x0101 /* AP485 USB-RS485 Converter */
+#define PAPOUCH_SB422_PID 0x0102 /* Papouch SB422 USB-RS422 Converter */
+#define PAPOUCH_SB485_2_PID 0x0103 /* Papouch SB485 USB-485/422 Converter */
+#define PAPOUCH_AP485_2_PID 0x0104 /* AP485 USB-RS485 Converter */
+#define PAPOUCH_SB422_2_PID 0x0105 /* Papouch SB422 USB-RS422 Converter */
+#define PAPOUCH_SB485S_PID 0x0106 /* Papouch SB485S USB-485/422 Converter */
+#define PAPOUCH_SB485C_PID 0x0107 /* Papouch SB485C USB-485/422 Converter */
+#define PAPOUCH_LEC_PID 0x0300 /* LEC USB Converter */
+#define PAPOUCH_SB232_PID 0x0301 /* Papouch SB232 USB-RS232 Converter */
#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
-#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
+#define PAPOUCH_IRAMP_PID 0x0500 /* Papouch IRAmp Duplex */
+#define PAPOUCH_DRAK5_PID 0x0700 /* Papouch DRAK5 */
+#define PAPOUCH_QUIDO8x8_PID 0x0800 /* Papouch Quido 8/8 Module */
+#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Papouch Quido 4/4 Module */
+#define PAPOUCH_QUIDO2x2_PID 0x0a00 /* Papouch Quido 2/2 Module */
+#define PAPOUCH_QUIDO10x1_PID 0x0b00 /* Papouch Quido 10/1 Module */
+#define PAPOUCH_QUIDO30x3_PID 0x0c00 /* Papouch Quido 30/3 Module */
+#define PAPOUCH_QUIDO60x3_PID 0x0d00 /* Papouch Quido 60(100)/3 Module */
+#define PAPOUCH_QUIDO2x16_PID 0x0e00 /* Papouch Quido 2/16 Module */
+#define PAPOUCH_QUIDO3x32_PID 0x0f00 /* Papouch Quido 3/32 Module */
+#define PAPOUCH_DRAK6_PID 0x1000 /* Papouch DRAK6 */
+#define PAPOUCH_UPSUSB_PID 0x8000 /* Papouch UPS-USB adapter */
+#define PAPOUCH_MU_PID 0x8001 /* MU controller */
+#define PAPOUCH_SIMUKEY_PID 0x8002 /* Papouch SimuKey */
#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
+#define PAPOUCH_GMUX_PID 0x8004 /* Papouch GOLIATH MUX */
+#define PAPOUCH_GMSR_PID 0x8005 /* Papouch GOLIATH MSR */
/*
* Marvell SheevaPlug
#define MJSG_XM_RADIO_PID 0x937A
#define MJSG_HD_RADIO_PID 0x937C
+/*
+ * D.O.Tec products (http://www.directout.eu)
+ */
+#define FTDI_DOTEC_PID 0x9868
+
/*
* Xverve Signalyzer tools (http://www.signalyzer.com/)
*/
* Submitted by John G. Rogers
*/
#define SEGWAY_RMP200_PID 0xe729
+
+
+/*
+ * Accesio USB Data Acquisition products (http://www.accesio.com/)
+ */
+#define ACCESIO_COM4SM_PID 0xD578
+
+/* www.sciencescope.co.uk educational dataloggers */
+#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
+#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
+#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
+
+/*
+ * Milkymist One JTAG/Serial
+ */
+#define QIHARDWARE_VID 0x20B7
+#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
+
{
dbg("%s port %d", __func__, port->number);
- usb_serial_generic_close(port);
- if (port->serial->dev)
+ if (port->serial->dev) {
+ /* shutdown our urbs */
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
+ }
} /* mct_u232_close */
/* real data, send it to the tty layer */
tty = tty_port_tty_get(&port->port);
if (tty) {
- tty_insert_flip_string(tty, data,
- data_length);
+ tty_insert_flip_string(tty, data + 2,
+ data_length);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
priv->bulk_address),
priv->bulk_in_buffer, priv->buffer_size,
opticon_bulk_callback, priv);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
- { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
+ /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
+ /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
.no_dynamic_id = 1,
+ .supports_autosuspend = 1,
};
/* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead
return -ENODEV;
fixup_generic(driver);
+ if (driver->usb_driver)
+ driver->usb_driver->supports_autosuspend = 1;
if (!driver->description)
driver->description = driver->driver.name;
static int clie_5_attach(struct usb_serial *serial)
{
+ struct usb_serial_port *port;
+ unsigned int pipe;
+ int j;
+
dbg("%s", __func__);
/* TH55 registers 2 ports.
return -1;
/* port 0 now uses the modified endpoint Address */
- serial->port[0]->bulk_out_endpointAddress =
+ port = serial->port[0];
+ port->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
+ pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
+ for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
+ port->write_urbs[j]->pipe = pipe;
+
return 0;
}
}
return result;
}
-static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
int sierra_ms_init(struct us_data *us)
{
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64),
+/* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */
+UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
+ "Samsung",
+ "YP-CP3",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
+
/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
* Device uses standards-violating 32-byte Bulk Command Block Wrappers and
* reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
+source "drivers/video/tegra/Kconfig"
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+obj-y += tegra/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
{
struct backlight_device *bd = to_backlight_device(dev);
- if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
- mutex_lock(&bd->ops_lock);
+ mutex_lock(&bd->ops_lock);
+ if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
bd->props.state |= BL_CORE_SUSPENDED;
backlight_update_status(bd);
- mutex_unlock(&bd->ops_lock);
}
+ mutex_unlock(&bd->ops_lock);
return 0;
}
{
struct backlight_device *bd = to_backlight_device(dev);
- if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
- mutex_lock(&bd->ops_lock);
+ mutex_lock(&bd->ops_lock);
+ if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
bd->props.state &= ~BL_CORE_SUSPENDED;
backlight_update_status(bd);
- mutex_unlock(&bd->ops_lock);
}
+ mutex_unlock(&bd->ops_lock);
return 0;
}
DPRINTK("========================================\n");
}
+void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+{
+ unsigned char *block;
+ unsigned char *dtd_block;
+ struct fb_videomode *mode, *m;
+ int num = 0, i, first = 1;
+
+ if (edid == NULL)
+ return;
+
+ if (!edid_checksum(edid))
+ return;
+
+ if (edid[0] != 0x2)
+ return;
+
+ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
+ if (mode == NULL)
+ return;
+
+ block = edid + 0x4;
+ dtd_block = edid + edid[0x2];
+
+ DPRINTK(" Short Video Modes\n");
+ while (block < dtd_block) {
+ unsigned tag = block[0] >> 5;
+ unsigned len = block[0] & 0x1f;
+
+ block++;
+ if (dtd_block - block < len)
+ break;
+
+ if (tag == 0x2) {
+ for (i = 0; i < len; i++) {
+ unsigned m = block[i];
+ if (m > 0 && m < CEA_MODEDB_SIZE) {
+ memcpy(&mode[num], &cea_modes[m],
+ sizeof(mode[num]));
+ DPRINTK(" %d: %dx%d @ %d\n", m,
+ cea_modes[m].xres, cea_modes[m].yres,
+ cea_modes[m].refresh);
+
+ num++;
+ }
+ }
+ } else if (tag == 0x3) {
+ if (len >= 3) {
+ u32 ieee_reg = block[0] | (block[1] << 8) |
+ (block[2] << 16);
+ if (ieee_reg == 0x000c03)
+ specs->misc |= FB_MISC_HDMI;
+ }
+ }
+
+ block += len;
+ }
+
+ DPRINTK(" Extended Detailed Timings\n");
+
+ for (i = 0; i < (128 - edid[0x2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
+ i++, dtd_block += DETAILED_TIMING_DESCRIPTION_SIZE) {
+ if (!(dtd_block[0] == 0x00 && dtd_block[1] == 0x00)) {
+ get_detailed_timing(dtd_block, &mode[num]);
+ if (first) {
+ mode[num].flag |= FB_MODE_IS_FIRST;
+ first = 0;
+ }
+ num++;
+ }
+ }
+
+ /* Yikes, EDID data is totally useless */
+ if (!num) {
+ kfree(mode);
+ return;
+ }
+
+ m = kzalloc((specs->modedb_len + num) *
+ sizeof(struct fb_videomode), GFP_KERNEL);
+
+ if (!m) {
+ kfree(mode);
+ return;
+ }
+
+ memmove(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
+ memmove(m + specs->modedb_len, mode, num * sizeof(struct fb_videomode));
+ kfree(mode);
+ kfree(specs->modedb);
+ specs->modedb = m;
+ specs->modedb_len = specs->modedb_len + num;
+}
+
/*
* VESA Generalized Timing Formula (GTF)
*/
FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
EXPORT_SYMBOL(vesa_modes);
+
+const struct fb_videomode cea_modes[CEA_MODEDB_SIZE] = {
+ {},
+ /* 1: 640x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 640, .yres = 480, .pixclock = 39721,
+ .left_margin = 48, .right_margin = 16,
+ .upper_margin = 33, .lower_margin = 1,
+ .hsync_len = 96, .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 2: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 3: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 4: 1280x720p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 5: 1920x1080i @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 10: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 11: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 12: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 13: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 14: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 15: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 16: 1920x1080p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 17: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 18: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 19: 1280x720p @ 50Hz */
+ {.refresh = 50, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 20: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 21: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 22: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 23: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 24: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 25: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 26: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 27: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 28: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 29: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 30: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 31: 1920x1080p @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 32: 1920x1080p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 638,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 33: 1920x1080p @ 25Hz */
+ {.refresh = 25, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 34: 1920x1080p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 35: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 36: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 37: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 38: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 39: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13888,
+ .left_margin = 184, .right_margin = 32,
+ .upper_margin = 57, .lower_margin = 2,
+ .hsync_len = 168, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 40: 1920x1080i @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 41: 1280x720p @ 100Hz */
+ {.refresh = 100, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 42: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 43: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 44: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 45: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 46: 1920x1080i @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 47: 1280x720p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 48: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 49: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 50: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 51: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 52: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 53: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 54: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 55: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 56: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 57: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 58: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 59: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 60: 1280x720p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1280, .yres = 720, .pixclock = 16835,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 61: 1280x720p @ 25Hz */
+ {.refresh = 25, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 2420,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 62: 1280x720p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 63: 1920x1080p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 64: 1920x1080p @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+};
+EXPORT_SYMBOL(cea_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
/**
if (!modelist)
return -ENOMEM;
modelist->mode = *mode;
- list_add(&modelist->list, head);
+ list_add_tail(&modelist->list, head);
}
return 0;
}
--- /dev/null
+if ARCH_TEGRA
+
+comment "NVIDIA Tegra Display Driver options"
+
+config TEGRA_GRHOST
+ tristate "Tegra graphics host driver"
+ depends on TEGRA_IOVMM
+ default n
+ help
+ Driver for the Tegra graphics host hardware.
+
+config TEGRA_DC
+ tristate "Tegra Display Contoller"
+ depends on ARCH_TEGRA
+ select FB_MODE_HELPERS
+ select I2C
+ help
+ Tegra display controller support.
+
+config FB_TEGRA
+ tristate "Tegra Framebuffer driver"
+ depends on TEGRA_DC && FB = y
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default FB
+ help
+ Framebuffer device support for the Tegra display controller.
+
+config TEGRA_NVMAP
+ bool "Tegra GPU memory management driver (nvmap)"
+ default y
+ help
+ Say Y here to include the memory management driver for the Tegra
+ GPU, multimedia and display subsystems
+
+config NVMAP_RECLAIM_UNPINNED_VM
+ bool "Virtualize IOVMM memory in nvmap"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to enable nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other handles. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
+
+config NVMAP_ALLOW_SYSMEM
+ bool "Allow physical system memory to be used by nvmap"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ Say Y here to allow nvmap to use physical system memory (i.e.,
+ shared with the operating system but not translated through
+ an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+ bool "Use only HIGHMEM for nvmap"
+ depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM
+ default n
+ help
+ Say Y here to restrict nvmap system memory allocations (both
+ physical system memory and IOVMM) to just HIGHMEM pages.
+
+config NVMAP_CARVEOUT_KILLER
+ bool "Reclaim nvmap carveout by killing processes"
+ depends on TEGRA_NVMAP
+ default n
+ help
+ Say Y here to allow the system to reclaim carveout space by killing
+ processes. This will kill the largest consumers of lowest priority
+ first.
+
+endif
+
--- /dev/null
+obj-$(CONFIG_TEGRA_GRHOST) += host/
+obj-$(CONFIG_TEGRA_DC) += dc/
+obj-$(CONFIG_FB_TEGRA) += fb.o
+obj-$(CONFIG_TEGRA_NVMAP) += nvmap/
--- /dev/null
+obj-y += dc.o
+obj-y += rgb.o
+obj-y += hdmi.o
+obj-y += nvhdcp.o
+obj-y += edid.o
--- /dev/null
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/mc.h>
+#include <mach/nvhost.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+static int no_vsync;
+
+module_param_named(no_vsync, no_vsync, int, S_IRUGO | S_IWUSR);
+
+struct tegra_dc *tegra_dcs[TEGRA_MAX_DC];
+
+DEFINE_MUTEX(tegra_dc_lock);
+
+static inline int tegra_dc_fmt_bpp(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_P1:
+ return 1;
+
+ case TEGRA_WIN_FMT_P2:
+ return 2;
+
+ case TEGRA_WIN_FMT_P4:
+ return 4;
+
+ case TEGRA_WIN_FMT_P8:
+ return 8;
+
+ case TEGRA_WIN_FMT_B4G4R4A4:
+ case TEGRA_WIN_FMT_B5G5R5A:
+ case TEGRA_WIN_FMT_B5G6R5:
+ case TEGRA_WIN_FMT_AB5G5R5:
+ return 16;
+
+ case TEGRA_WIN_FMT_B8G8R8A8:
+ case TEGRA_WIN_FMT_R8G8B8A8:
+ case TEGRA_WIN_FMT_B6x2G6x2R6x2A8:
+ case TEGRA_WIN_FMT_R6x2G6x2B6x2A8:
+ return 32;
+
+ /* for planar formats, size of the Y plane, 8bit */
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return 8;
+
+ case TEGRA_WIN_FMT_YCbCr422:
+ case TEGRA_WIN_FMT_YUV422:
+ case TEGRA_WIN_FMT_YCbCr422R:
+ case TEGRA_WIN_FMT_YUV422R:
+ case TEGRA_WIN_FMT_YCbCr422RA:
+ case TEGRA_WIN_FMT_YUV422RA:
+ /* FIXME: need to know the bpp of these formats */
+ return 0;
+ }
+ return 0;
+}
+
+static inline bool tegra_dc_is_yuv_planar(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return true;
+ }
+ return false;
+}
+
+#define DUMP_REG(a) do { \
+ snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_dc_readl(dc, a)); \
+ print(data, buff); \
+ } while (0)
+
+static void _dump_regs(struct tegra_dc *dc, void *data,
+ void (* print)(void *data, const char *str))
+{
+ int i;
+ char buff[256];
+
+ tegra_dc_io_start(dc);
+ clk_enable(dc->clk);
+
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+
+
+ for (i = 0; i < 3; i++) {
+ print(data, "\n");
+ snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i);
+ print(data, buff);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INCREMENT);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WIN_CSC_YOF);
+ DUMP_REG(DC_WIN_CSC_KYRGB);
+ DUMP_REG(DC_WIN_CSC_KUR);
+ DUMP_REG(DC_WIN_CSC_KVR);
+ DUMP_REG(DC_WIN_CSC_KUG);
+ DUMP_REG(DC_WIN_CSC_KVG);
+ DUMP_REG(DC_WIN_CSC_KUB);
+ DUMP_REG(DC_WIN_CSC_KVB);
+ }
+
+ clk_disable(dc->clk);
+ tegra_dc_io_end(dc);
+}
+
+#undef DUMP_REG
+
+#ifdef DEBUG
+static void dump_regs_print(void *data, const char *str)
+{
+ struct tegra_dc *dc = data;
+ dev_dbg(&dc->ndev->dev, "%s", str);
+}
+
+static void dump_regs(struct tegra_dc *dc)
+{
+ _dump_regs(dc, dc, dump_regs_print);
+}
+#else
+
+static void dump_regs(struct tegra_dc *dc) {}
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static void dbg_regs_print(void *data, const char *str)
+{
+ struct seq_file *s = data;
+
+ seq_printf(s, "%s", str);
+}
+
+#undef DUMP_REG
+
+static int dbg_dc_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc *dc = s->private;
+
+ _dump_regs(dc, s, dbg_regs_print);
+
+ return 0;
+}
+
+
+static int dbg_dc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dc_show, inode->i_private);
+}
+
+static const struct file_operations dbg_fops = {
+ .open = dbg_dc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra_dc_dbg_add(struct tegra_dc *dc)
+{
+ char name[32];
+
+ snprintf(name, sizeof(name), "tegra_dc%d_regs", dc->ndev->id);
+ (void) debugfs_create_file(name, S_IRUGO, NULL, dc, &dbg_fops);
+}
+#else
+static void tegra_dc_dbg_add(struct tegra_dc *dc) {}
+
+#endif
+
+
+static int tegra_dc_add(struct tegra_dc *dc, int index)
+{
+ int ret = 0;
+
+ mutex_lock(&tegra_dc_lock);
+ if (index >= TEGRA_MAX_DC) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tegra_dcs[index] != NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tegra_dcs[index] = dc;
+
+out:
+ mutex_unlock(&tegra_dc_lock);
+
+ return ret;
+}
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx)
+{
+ if (idx < TEGRA_MAX_DC)
+ return tegra_dcs[idx];
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(tegra_dc_get_dc);
+
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win)
+{
+ if (win >= dc->n_windows)
+ return NULL;
+
+ return &dc->windows[win];
+}
+EXPORT_SYMBOL(tegra_dc_get_window);
+
+static int get_topmost_window(u32 *depths, unsigned long *wins)
+{
+ int idx, best = -1;
+
+ for_each_set_bit(idx, wins, DC_N_WINDOWS) {
+ if (best == -1 || depths[idx] < depths[best])
+ best = idx;
+ }
+ clear_bit(best, wins);
+ return best;
+}
+
+static u32 blend_topwin(u32 flags)
+{
+ if (flags & TEGRA_WIN_FLAG_BLEND_COVERAGE)
+ return BLEND(NOKEY, ALPHA, 0xff, 0xff);
+ else if (flags & TEGRA_WIN_FLAG_BLEND_PREMULT)
+ return BLEND(NOKEY, PREMULT, 0xff, 0xff);
+ else
+ return BLEND(NOKEY, FIX, 0xff, 0xff);
+}
+
+static u32 blend_2win(int idx, unsigned long behind_mask, u32* flags, int xy)
+{
+ int other;
+
+ for (other = 0; other < DC_N_WINDOWS; other++) {
+ if (other != idx && (xy-- == 0))
+ break;
+ }
+ if (BIT(other) & behind_mask)
+ return blend_topwin(flags[idx]);
+ else if (flags[other])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x00, 0x00);
+}
+
+static u32 blend_3win(int idx, unsigned long behind_mask, u32* flags)
+{
+ unsigned long infront_mask;
+ int first;
+
+ infront_mask = ~(behind_mask | BIT(idx));
+ infront_mask &= (BIT(DC_N_WINDOWS) - 1);
+ first = ffs(infront_mask) - 1;
+
+ if (!infront_mask)
+ return blend_topwin(flags[idx]);
+ else if (behind_mask && first != -1 && flags[first])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x0, 0x0);
+}
+
+static void tegra_dc_set_blending(struct tegra_dc *dc, struct tegra_dc_blend *blend)
+{
+ unsigned long mask = BIT(DC_N_WINDOWS) - 1;
+
+ while (mask) {
+ int idx = get_topmost_window(blend->z, &mask);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_1WIN);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 0),
+ DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 1),
+ DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, blend_3win(idx, mask, blend->flags),
+ DC_WIN_BLEND_3WIN_XY);
+ }
+}
+
+static void tegra_dc_set_csc(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+}
+
+static void tegra_dc_set_scaling_filter(struct tegra_dc *dc)
+{
+ unsigned i;
+ unsigned v0 = 128;
+ unsigned v1 = 0;
+ /* linear horizontal and vertical filters */
+ for (i = 0; i < 16; i++) {
+ tegra_dc_writel(dc, (v1 << 16) | (v0 << 8),
+ DC_WIN_H_FILTER_P(i));
+
+ tegra_dc_writel(dc, v0,
+ DC_WIN_V_FILTER_P(i));
+ v0 -= 8;
+ v1 += 8;
+ }
+}
+
+/* does not support updating windows on multiple dcs in one call */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n)
+{
+ struct tegra_dc *dc;
+ unsigned long update_mask = GENERAL_ACT_REQ;
+ unsigned long val;
+ bool update_blend = false;
+ int i;
+
+ dc = windows[0]->dc;
+
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -EFAULT;
+ }
+
+ if (no_vsync)
+ tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS);
+ else
+ tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS);
+
+ for (i = 0; i < n; i++) {
+ struct tegra_dc_win *win = windows[i];
+ unsigned h_dda;
+ unsigned v_dda;
+ unsigned h_offset;
+ unsigned v_offset;
+ bool invert_h = (win->flags & TEGRA_WIN_FLAG_INVERT_H) != 0;
+ bool invert_v = (win->flags & TEGRA_WIN_FLAG_INVERT_V) != 0;
+ bool yuvp = tegra_dc_is_yuv_planar(win->fmt);
+
+ if (win->z != dc->blend.z[win->idx]) {
+ dc->blend.z[win->idx] = win->z;
+ update_blend = true;
+ }
+ if ((win->flags & TEGRA_WIN_BLEND_FLAGS_MASK) !=
+ dc->blend.flags[win->idx]) {
+ dc->blend.flags[win->idx] =
+ win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+ update_blend = true;
+ }
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << win->idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ if (!no_vsync)
+ update_mask |= WIN_A_ACT_REQ << win->idx;
+
+ if (!(win->flags & TEGRA_WIN_FLAG_ENABLED)) {
+ tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS);
+ continue;
+ }
+
+ tegra_dc_writel(dc, win->fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ tegra_dc_writel(dc,
+ V_POSITION(win->out_y) | H_POSITION(win->out_x),
+ DC_WIN_POSITION);
+ tegra_dc_writel(dc,
+ V_SIZE(win->out_h) | H_SIZE(win->out_w),
+ DC_WIN_SIZE);
+ tegra_dc_writel(dc,
+ V_PRESCALED_SIZE(win->h) |
+ H_PRESCALED_SIZE(win->w * tegra_dc_fmt_bpp(win->fmt) / 8),
+ DC_WIN_PRESCALED_SIZE);
+
+ h_dda = ((win->w - 1) * 0x1000) / max_t(int, win->out_w - 1, 1);
+ v_dda = ((win->h - 1) * 0x1000) / max_t(int, win->out_h - 1, 1);
+ tegra_dc_writel(dc, V_DDA_INC(v_dda) | H_DDA_INC(h_dda),
+ DC_WIN_DDA_INCREMENT);
+ tegra_dc_writel(dc, 0, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, 0, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, (unsigned long)win->phys_addr,
+ DC_WINBUF_START_ADDR);
+
+ if (!yuvp) {
+ tegra_dc_writel(dc, win->stride, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr +
+ (unsigned long)win->offset_u,
+ DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr +
+ (unsigned long)win->offset_v,
+ DC_WINBUF_START_ADDR_V);
+ tegra_dc_writel(dc,
+ LINE_STRIDE(win->stride) |
+ UV_LINE_STRIDE(win->stride_uv),
+ DC_WIN_LINE_STRIDE);
+ }
+
+ h_offset = win->x;
+ if (invert_h) {
+ h_offset += win->w - 1;
+ }
+ h_offset *= tegra_dc_fmt_bpp(win->fmt) / 8;
+
+ v_offset = win->y;
+ if (invert_v) {
+ v_offset += win->h - 1;
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ if (win->flags & TEGRA_WIN_FLAG_TILED)
+ tegra_dc_writel(dc,
+ DC_WIN_BUFFER_ADDR_MODE_TILE |
+ DC_WIN_BUFFER_ADDR_MODE_TILE_UV,
+ DC_WIN_BUFFER_ADDR_MODE);
+ else
+ tegra_dc_writel(dc,
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV,
+ DC_WIN_BUFFER_ADDR_MODE);
+
+ val = WIN_ENABLE;
+ if (yuvp)
+ val |= CSC_ENABLE;
+ else if (tegra_dc_fmt_bpp(win->fmt) < 24)
+ val |= COLOR_EXPAND;
+
+ if (win->w != win->out_w)
+ val |= H_FILTER_ENABLE;
+ if (win->h != win->out_h)
+ val |= V_FILTER_ENABLE;
+
+ if (invert_h)
+ val |= H_DIRECTION_DECREMENT;
+ if (invert_v)
+ val |= V_DIRECTION_DECREMENT;
+
+ tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+
+ win->dirty = no_vsync ? 0 : 1;
+ }
+
+ if (update_blend) {
+ tegra_dc_set_blending(dc, &dc->blend);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (!no_vsync)
+ dc->windows[i].dirty = 1;
+ update_mask |= WIN_A_ACT_REQ << i;
+ }
+ }
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ if (!no_vsync) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val |= FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_windows);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc)
+{
+ return dc->syncpt_id;
+}
+EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
+
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc)
+{
+ u32 max;
+
+ mutex_lock(&dc->lock);
+ max = nvhost_syncpt_incr_max(&dc->ndev->host->syncpt, dc->syncpt_id, 1);
+ dc->syncpt_max = max;
+ mutex_unlock(&dc->lock);
+
+ return max;
+}
+
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, u32 val)
+{
+ mutex_lock(&dc->lock);
+ while (dc->syncpt_min < val) {
+ dc->syncpt_min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
+ }
+ mutex_unlock(&dc->lock);
+}
+
+static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[],
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (windows[i]->dirty)
+ return false;
+ }
+
+ return true;
+}
+
+/* does not support syncing windows on multiple dcs in one call */
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n)
+{
+ if (n < 1 || n > DC_N_WINDOWS)
+ return -EINVAL;
+
+ if (!windows[0]->dc->enabled)
+ return -EFAULT;
+
+ return wait_event_interruptible_timeout(windows[0]->dc->wq,
+ tegra_dc_windows_are_clean(windows, n),
+ HZ);
+}
+EXPORT_SYMBOL(tegra_dc_sync_windows);
+
+static unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk)
+{
+ unsigned long rate;
+ unsigned long div;
+
+ rate = clk_get_rate(dc->clk);
+
+ div = DIV_ROUND_CLOSEST(rate * 2, pclk);
+
+ if (div < 2)
+ return 0;
+
+ return rate * 2 / div;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk)
+{
+ int pclk;
+
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ unsigned long rate;
+ struct clk *pll_d_out0_clk =
+ clk_get_sys(NULL, "pll_d_out0");
+ struct clk *pll_d_clk =
+ clk_get_sys(NULL, "pll_d");
+
+ if (dc->mode.pclk > 70000000)
+ rate = 594000000;
+ else
+ rate = 216000000;
+
+ if (rate != clk_get_rate(pll_d_clk))
+ clk_set_rate(pll_d_clk, rate);
+
+ if (clk_get_parent(clk) != pll_d_out0_clk)
+ clk_set_parent(clk, pll_d_out0_clk);
+ }
+
+ pclk = tegra_dc_pclk_round_rate(dc, dc->mode.pclk);
+ tegra_dvfs_set_rate(clk, pclk);
+
+}
+
+static int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode)
+{
+ unsigned long val;
+ unsigned long rate;
+ unsigned long div;
+ unsigned long pclk;
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, mode->h_ref_to_sync | (mode->v_ref_to_sync << 16),
+ DC_DISP_REF_TO_SYNC);
+ tegra_dc_writel(dc, mode->h_sync_width | (mode->v_sync_width << 16),
+ DC_DISP_SYNC_WIDTH);
+ tegra_dc_writel(dc, mode->h_back_porch | (mode->v_back_porch << 16),
+ DC_DISP_BACK_PORCH);
+ tegra_dc_writel(dc, mode->h_active | (mode->v_active << 16),
+ DC_DISP_DISP_ACTIVE);
+ tegra_dc_writel(dc, mode->h_front_porch | (mode->v_front_porch << 16),
+ DC_DISP_FRONT_PORCH);
+
+ tegra_dc_writel(dc, DE_SELECT_ACTIVE | DE_CONTROL_NORMAL,
+ DC_DISP_DATA_ENABLE_OPTIONS);
+
+ val = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY1);
+ if (mode->flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC)
+ val |= PIN1_LVS_OUTPUT;
+ else
+ val &= ~PIN1_LVS_OUTPUT;
+
+ if (mode->flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC)
+ val |= PIN1_LHS_OUTPUT;
+ else
+ val &= ~PIN1_LHS_OUTPUT;
+ tegra_dc_writel(dc, val, DC_COM_PIN_OUTPUT_POLARITY1);
+
+ /* TODO: MIPI/CRT/HDMI clock cals */
+
+ val = DISP_DATA_FORMAT_DF1P1C;
+
+ if (dc->out->align == TEGRA_DC_ALIGN_MSB)
+ val |= DISP_DATA_ALIGNMENT_MSB;
+ else
+ val |= DISP_DATA_ALIGNMENT_LSB;
+
+ if (dc->out->order == TEGRA_DC_ORDER_RED_BLUE)
+ val |= DISP_DATA_ORDER_RED_BLUE;
+ else
+ val |= DISP_DATA_ORDER_BLUE_RED;
+
+ tegra_dc_writel(dc, val, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ rate = clk_get_rate(dc->clk);
+
+ pclk = tegra_dc_pclk_round_rate(dc, mode->pclk);
+ if (pclk < (mode->pclk / 100 * 99) ||
+ pclk > (mode->pclk / 100 * 109)) {
+ dev_err(&dc->ndev->dev,
+ "can't divide %ld clock to %d -1/+9%% %ld %d %d\n",
+ rate, mode->pclk,
+ pclk, (mode->pclk / 100 * 99),
+ (mode->pclk / 100 * 109));
+ return -EINVAL;
+ }
+
+ div = (rate * 2 / pclk) - 2;
+
+ tegra_dc_writel(dc, 0x00010001,
+ DC_DISP_SHIFT_CLOCK_OPTIONS);
+ tegra_dc_writel(dc, PIXEL_CLK_DIVIDER_PCD1 | SHIFT_CLK_DIVIDER(div),
+ DC_DISP_DISP_CLOCK_CONTROL);
+
+ return 0;
+}
+
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode)
+{
+ memcpy(&dc->mode, mode, sizeof(dc->mode));
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_set_mode);
+
+static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
+{
+ dc->out = out;
+
+ if (out->n_modes > 0)
+ tegra_dc_set_mode(dc, &dc->out->modes[0]);
+
+ switch (out->type) {
+ case TEGRA_DC_OUT_RGB:
+ dc->out_ops = &tegra_dc_rgb_ops;
+ break;
+
+ case TEGRA_DC_OUT_HDMI:
+ dc->out_ops = &tegra_dc_hdmi_ops;
+ break;
+
+ default:
+ dc->out_ops = NULL;
+ break;
+ }
+
+ if (dc->out_ops && dc->out_ops->init)
+ dc->out_ops->init(dc);
+
+}
+
+unsigned tegra_dc_get_out_height(struct tegra_dc *dc)
+{
+ if (dc->out)
+ return dc->out->height;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_height);
+
+unsigned tegra_dc_get_out_width(struct tegra_dc *dc)
+{
+ if (dc->out)
+ return dc->out->width;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_get_out_width);
+
+static irqreturn_t tegra_dc_irq(int irq, void *ptr)
+{
+ struct tegra_dc *dc = ptr;
+ unsigned long status;
+ unsigned long val;
+ unsigned long underflow_mask;
+ int i;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ int completed = 0;
+ int dirty = 0;
+
+ val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (!(val & (WIN_A_UPDATE << i))) {
+ dc->windows[i].dirty = 0;
+ completed = 1;
+ } else {
+ dirty = 1;
+ }
+ }
+
+ if (!dirty) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val &= ~FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+ }
+
+ if (completed)
+ wake_up(&dc->wq);
+ }
+
+
+ /*
+ * Overlays can get thier internal state corrupted during and underflow
+ * condition. The only way to fix this state is to reset the DC.
+ * if we get 4 consecutive frames with underflows, assume we're
+ * hosed and reset.
+ */
+ underflow_mask = status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT);
+ if (underflow_mask) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val |= V_BLANK_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+ dc->underflow_mask |= underflow_mask;
+ }
+
+ if (status & V_BLANK_INT) {
+ int i;
+
+ for (i = 0; i< DC_N_WINDOWS; i++) {
+ if (dc->underflow_mask & (WIN_A_UF_INT <<i)) {
+ dc->windows[i].underflows++;
+
+ if (dc->windows[i].underflows > 4)
+ schedule_work(&dc->reset_work);
+ } else {
+ dc->windows[i].underflows = 0;
+ }
+ }
+
+ if (!dc->underflow_mask) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val &= ~V_BLANK_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+ }
+
+ dc->underflow_mask = 0;
+ }
+
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_dc_set_color_control(struct tegra_dc *dc)
+{
+ u32 color_control;
+
+ switch (dc->out->depth) {
+ case 3:
+ color_control = BASE_COLOR_SIZE111;
+ break;
+
+ case 6:
+ color_control = BASE_COLOR_SIZE222;
+ break;
+
+ case 8:
+ color_control = BASE_COLOR_SIZE332;
+ break;
+
+ case 9:
+ color_control = BASE_COLOR_SIZE333;
+ break;
+
+ case 12:
+ color_control = BASE_COLOR_SIZE444;
+ break;
+
+ case 15:
+ color_control = BASE_COLOR_SIZE555;
+ break;
+
+ case 16:
+ color_control = BASE_COLOR_SIZE565;
+ break;
+
+ case 18:
+ color_control = BASE_COLOR_SIZE666;
+ break;
+
+ default:
+ color_control = BASE_COLOR_SIZE888;
+ break;
+ }
+
+ tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL);
+}
+
+static void tegra_dc_init(struct tegra_dc *dc)
+{
+ u32 disp_syncpt;
+ u32 vblank_syncpt;
+ int i;
+
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ if (dc->ndev->id == 0) {
+ disp_syncpt = NVSYNCPT_DISP0;
+ vblank_syncpt = NVSYNCPT_VBLANK0;
+
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC,
+ TEGRA_MC_PRIO_HIGH);
+ } else if (dc->ndev->id == 1) {
+ disp_syncpt = NVSYNCPT_DISP1;
+ vblank_syncpt = NVSYNCPT_VBLANK1;
+
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB,
+ TEGRA_MC_PRIO_HIGH);
+ }
+ tegra_dc_writel(dc, 0x00000100 | vblank_syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
+ tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
+ tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY);
+ tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ tegra_dc_writel(dc, (FRAME_END_INT |
+ V_BLANK_INT |
+ WIN_A_UF_INT |
+ WIN_B_UF_INT |
+ WIN_C_UF_INT), DC_CMD_INT_MASK);
+ tegra_dc_writel(dc, (WIN_A_UF_INT |
+ WIN_B_UF_INT |
+ WIN_C_UF_INT), DC_CMD_INT_ENABLE);
+
+ tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
+
+ tegra_dc_set_color_control(dc);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_set_csc(dc);
+ tegra_dc_set_scaling_filter(dc);
+ }
+
+
+ dc->syncpt_id = disp_syncpt;
+
+ dc->syncpt_min = dc->syncpt_max =
+ nvhost_syncpt_read(&dc->ndev->host->syncpt, disp_syncpt);
+
+ if (dc->mode.pclk)
+ tegra_dc_program_mode(dc, &dc->mode);
+}
+
+static bool _tegra_dc_enable(struct tegra_dc *dc)
+{
+ if (dc->mode.pclk == 0)
+ return false;
+
+ tegra_dc_io_start(dc);
+
+ if (dc->out && dc->out->enable)
+ dc->out->enable();
+
+ tegra_dc_setup_clk(dc, dc->clk);
+ clk_enable(dc->clk);
+ clk_enable(dc->emc_clk);
+ tegra_periph_reset_deassert(dc->clk);
+ msleep(10);
+
+ enable_irq(dc->irq);
+
+ tegra_dc_init(dc);
+
+ if (dc->out_ops && dc->out_ops->enable)
+ dc->out_ops->enable(dc);
+
+ /* force a full blending update */
+ dc->blend.z[0] = -1;
+
+ return true;
+}
+
+void tegra_dc_enable(struct tegra_dc *dc)
+{
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled)
+ dc->enabled = _tegra_dc_enable(dc);
+
+ mutex_unlock(&dc->lock);
+}
+
+static void _tegra_dc_disable(struct tegra_dc *dc)
+{
+ disable_irq(dc->irq);
+
+ if (dc->out_ops && dc->out_ops->disable)
+ dc->out_ops->disable(dc);
+
+ clk_disable(dc->emc_clk);
+ clk_disable(dc->clk);
+ tegra_dvfs_set_rate(dc->clk, 0);
+
+ if (dc->out && dc->out->disable)
+ dc->out->disable();
+
+ /* flush any pending syncpt waits */
+ while (dc->syncpt_min < dc->syncpt_max) {
+ dc->syncpt_min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
+ }
+
+ tegra_dc_io_end(dc);
+}
+
+
+void tegra_dc_disable(struct tegra_dc *dc)
+{
+ mutex_lock(&dc->lock);
+
+ if (dc->enabled) {
+ dc->enabled = false;
+
+ if (!dc->suspended)
+ _tegra_dc_disable(dc);
+ }
+
+ mutex_unlock(&dc->lock);
+}
+
+static void tegra_dc_reset_worker(struct work_struct *work)
+{
+ struct tegra_dc *dc =
+ container_of(work, struct tegra_dc, reset_work);
+
+ dev_warn(&dc->ndev->dev, "overlay stuck in underflow state. resetting.\n");
+
+ mutex_lock(&dc->lock);
+ if (dc->enabled && !dc->suspended) {
+ _tegra_dc_disable(dc);
+
+ /* A necessary wait. */
+ msleep(100);
+ tegra_periph_reset_assert(dc->clk);
+
+ /* _tegra_dc_enable deasserts reset */
+ _tegra_dc_enable(dc);
+ }
+ mutex_unlock(&dc->lock);
+}
+
+
+static int tegra_dc_probe(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc;
+ struct clk *clk;
+ struct clk *emc_clk;
+ struct resource *res;
+ struct resource *base_res;
+ struct resource *fb_mem = NULL;
+ int ret = 0;
+ void __iomem *base;
+ int irq;
+ int i;
+ unsigned long emc_clk_rate;
+
+ if (!ndev->dev.platform_data) {
+ dev_err(&ndev->dev, "no platform data\n");
+ return -ENOENT;
+ }
+
+ dc = kzalloc(sizeof(struct tegra_dc), GFP_KERNEL);
+ if (!dc) {
+ dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n");
+ return -ENOMEM;
+ }
+
+ irq = nvhost_get_irq_byname(ndev, "irq");
+ if (irq <= 0) {
+ dev_err(&ndev->dev, "no irq\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ res = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ dev_err(&ndev->dev, "no mem resource\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), ndev->name);
+ if (!base_res) {
+ dev_err(&ndev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&ndev->dev, "registers can't be mapped\n");
+ ret = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
+
+ clk = clk_get(&ndev->dev, NULL);
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&ndev->dev, "can't get clock\n");
+ ret = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ emc_clk = clk_get(&ndev->dev, "emc");
+ if (IS_ERR_OR_NULL(emc_clk)) {
+ dev_err(&ndev->dev, "can't get emc clock\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ dc->clk = clk;
+ dc->emc_clk = emc_clk;
+ dc->base_res = base_res;
+ dc->base = base;
+ dc->irq = irq;
+ dc->ndev = ndev;
+ dc->pdata = ndev->dev.platform_data;
+
+ /*
+ * The emc is a shared clock, it will be set based on
+ * the requirements for each user on the bus.
+ */
+ emc_clk_rate = dc->pdata->emc_clk_rate;
+ clk_set_rate(emc_clk, emc_clk_rate ? emc_clk_rate : ULONG_MAX);
+
+ if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED)
+ dc->enabled = true;
+
+ mutex_init(&dc->lock);
+ init_waitqueue_head(&dc->wq);
+ INIT_WORK(&dc->reset_work, tegra_dc_reset_worker);
+
+ dc->n_windows = DC_N_WINDOWS;
+ for (i = 0; i < dc->n_windows; i++) {
+ dc->windows[i].idx = i;
+ dc->windows[i].dc = dc;
+ }
+
+ if (request_irq(irq, tegra_dc_irq, IRQF_DISABLED,
+ dev_name(&ndev->dev), dc)) {
+ dev_err(&ndev->dev, "request_irq %d failed\n", irq);
+ ret = -EBUSY;
+ goto err_put_emc_clk;
+ }
+
+ /* hack to ballence enable_irq calls in _tegra_dc_enable() */
+ disable_irq(dc->irq);
+
+ ret = tegra_dc_add(dc, ndev->id);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "can't add dc\n");
+ goto err_free_irq;
+ }
+
+ nvhost_set_drvdata(ndev, dc);
+
+ if (dc->pdata->default_out)
+ tegra_dc_set_out(dc, dc->pdata->default_out);
+ else
+ dev_err(&ndev->dev, "No default output specified. Leaving output disabled.\n");
+
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+
+ tegra_dc_dbg_add(dc);
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (dc->pdata->fb) {
+ if (dc->pdata->fb->bits_per_pixel == -1) {
+ unsigned long fmt;
+ tegra_dc_writel(dc,
+ WINDOW_A_SELECT << dc->pdata->fb->win,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH);
+ dc->pdata->fb->bits_per_pixel =
+ tegra_dc_fmt_bpp(fmt);
+ }
+
+ dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem);
+ if (IS_ERR_OR_NULL(dc->fb))
+ dc->fb = NULL;
+ }
+
+ if (dc->out_ops && dc->out_ops->detect)
+ dc->out_ops->detect(dc);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, dc);
+err_put_emc_clk:
+ clk_put(emc_clk);
+err_put_clk:
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+ if (fb_mem)
+ release_resource(fb_mem);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free:
+ kfree(dc);
+
+ return ret;
+}
+
+static int tegra_dc_remove(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ if (dc->fb) {
+ tegra_fb_unregister(dc->fb);
+ if (dc->fb_mem)
+ release_resource(dc->fb_mem);
+ }
+
+
+ if (dc->enabled)
+ _tegra_dc_disable(dc);
+
+ free_irq(dc->irq, dc);
+ clk_put(dc->emc_clk);
+ clk_put(dc->clk);
+ iounmap(dc->base);
+ if (dc->fb_mem)
+ release_resource(dc->base_res);
+ kfree(dc);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "suspend\n");
+
+ mutex_lock(&dc->lock);
+
+ if (dc->out_ops && dc->out_ops->suspend)
+ dc->out_ops->suspend(dc);
+
+ if (dc->enabled) {
+ tegra_fb_suspend(dc->fb);
+ _tegra_dc_disable(dc);
+
+ dc->suspended = true;
+ }
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+static int tegra_dc_resume(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "resume\n");
+
+ mutex_lock(&dc->lock);
+ dc->suspended = false;
+
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+
+ if (dc->out_ops && dc->out_ops->resume)
+ dc->out_ops->resume(dc);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+#endif
+
+extern int suspend_set(const char *val, struct kernel_param *kp)
+{
+ if (!strcmp(val, "dump"))
+ dump_regs(tegra_dcs[0]);
+#ifdef CONFIG_PM
+ else if (!strcmp(val, "suspend"))
+ tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND);
+ else if (!strcmp(val, "resume"))
+ tegra_dc_resume(tegra_dcs[0]->ndev);
+#endif
+
+ return 0;
+}
+
+extern int suspend_get(char *buffer, struct kernel_param *kp)
+{
+ return 0;
+}
+
+int suspend;
+
+module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644);
+
+struct nvhost_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegradc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_dc_suspend,
+ .resume = tegra_dc_resume,
+#endif
+};
+
+static int __init tegra_dc_module_init(void)
+{
+ return nvhost_driver_register(&tegra_dc_driver);
+}
+
+static void __exit tegra_dc_module_exit(void)
+{
+ nvhost_driver_unregister(&tegra_dc_driver);
+}
+
+module_exit(tegra_dc_module_exit);
+module_init(tegra_dc_module_init);
--- /dev/null
+/*
+ * drivers/video/tegra/dc/dc_priv.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "../host/dev.h"
+
+struct tegra_dc;
+
+struct tegra_dc_blend {
+ unsigned z[DC_N_WINDOWS];
+ unsigned flags[DC_N_WINDOWS];
+};
+
+struct tegra_dc_out_ops {
+ /* initialize output. dc clocks are not on at this point */
+ int (*init)(struct tegra_dc *dc);
+ /* destroy output. dc clocks are not on at this point */
+ void (*destroy)(struct tegra_dc *dc);
+ /* detect connected display. can sleep.*/
+ bool (*detect)(struct tegra_dc *dc);
+ /* enable output. dc clocks are on at this point */
+ void (*enable)(struct tegra_dc *dc);
+ /* disable output. dc clocks are on at this point */
+ void (*disable)(struct tegra_dc *dc);
+
+ /* suspend output. dc clocks are on at this point */
+ void (*suspend)(struct tegra_dc *dc);
+ /* resume output. dc clocks are on at this point */
+ void (*resume)(struct tegra_dc *dc);
+};
+
+struct tegra_dc {
+ struct list_head list;
+
+ struct nvhost_device *ndev;
+ struct tegra_dc_platform_data *pdata;
+
+ struct resource *base_res;
+ void __iomem *base;
+ int irq;
+
+ struct clk *clk;
+ struct clk *emc_clk;
+
+ bool enabled;
+ bool suspended;
+
+ struct tegra_dc_out *out;
+ struct tegra_dc_out_ops *out_ops;
+ void *out_data;
+
+ struct tegra_dc_mode mode;
+
+ struct tegra_dc_win windows[DC_N_WINDOWS];
+ struct tegra_dc_blend blend;
+ int n_windows;
+
+ wait_queue_head_t wq;
+
+ struct mutex lock;
+
+ struct resource *fb_mem;
+ struct tegra_fb_info *fb;
+
+ u32 syncpt_id;
+ u32 syncpt_min;
+ u32 syncpt_max;
+
+ unsigned long underflow_mask;
+ struct work_struct reset_work;
+};
+
+static inline void tegra_dc_io_start(struct tegra_dc *dc)
+{
+ nvhost_module_busy(&dc->ndev->host->mod);
+}
+
+static inline void tegra_dc_io_end(struct tegra_dc *dc)
+{
+ nvhost_module_idle(&dc->ndev->host->mod);
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ return readl(dc->base + reg * 4);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ writel(val, dc->base + reg * 4);
+}
+
+static inline void _tegra_dc_write_table(struct tegra_dc *dc, const u32 *table,
+ unsigned len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ tegra_dc_writel(dc, table[i * 2 + 1], table[i * 2]);
+}
+
+#define tegra_dc_write_table(dc, table) \
+ _tegra_dc_write_table(dc, table, ARRAY_SIZE(table) / 2)
+
+static inline void tegra_dc_set_outdata(struct tegra_dc *dc, void *data)
+{
+ dc->out_data = data;
+}
+
+static inline void *tegra_dc_get_outdata(struct tegra_dc *dc)
+{
+ return dc->out_data;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk);
+
+extern struct tegra_dc_out_ops tegra_dc_rgb_ops;
+extern struct tegra_dc_out_ops tegra_dc_hdmi_ops;
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/dc_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_COMMAND_RAISE (1 << 0)
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_COMMAND_RAISE_VECTOR(x) (((x) & 0x1f) << 22)
+#define DISP_COMMAND_RAISE_CHANNEL_ID(x) (((x) & 0xf) << 27)
+
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+#define SPI_ENABLE (1 << 24)
+#define HSPI_ENABLE (1 << 25)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define V_BLANK_INT (1 << 2)
+#define H_BLANK_INT (1 << 3)
+#define V_PULSE3_INT (1 << 4)
+#define SPI_BUSY_INT (1 << 7)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define MSF_INT (1 << 12)
+#define SSF_INT (1 << 13)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+#define GPIO_0_INT (1 << 18)
+#define GPIO_1_INT (1 << 19)
+#define GPIO_2_INT (1 << 20)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX_ASSEMBLY (0 << 0)
+#define READ_MUX_ACTIVE (1 << 0)
+#define WRITE_MUX_ASSEMBLY (0 << 2)
+#define WRITE_MUX_ACTIVE (1 << 2)
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE0 0x302
+#define DC_COM_PIN_OUTPUT_ENABLE1 0x303
+#define DC_COM_PIN_OUTPUT_ENABLE2 0x304
+#define DC_COM_PIN_OUTPUT_ENABLE3 0x305
+#define DC_COM_PIN_OUTPUT_POLARITY0 0x306
+#define DC_COM_PIN_OUTPUT_POLARITY1 0x307
+#define DC_COM_PIN_OUTPUT_POLARITY2 0x308
+#define DC_COM_PIN_OUTPUT_POLARITY3 0x309
+#define DC_COM_PIN_OUTPUT_DATA0 0x30a
+#define DC_COM_PIN_OUTPUT_DATA1 0x30b
+#define DC_COM_PIN_OUTPUT_DATA2 0x30c
+#define DC_COM_PIN_OUTPUT_DATA3 0x30d
+#define DC_COM_PIN_INPUT_ENABLE0 0x30e
+#define DC_COM_PIN_INPUT_ENABLE1 0x30f
+#define DC_COM_PIN_INPUT_ENABLE2 0x310
+#define DC_COM_PIN_INPUT_ENABLE3 0x311
+#define DC_COM_PIN_INPUT_DATA0 0x312
+#define DC_COM_PIN_INPUT_DATA1 0x313
+#define DC_COM_PIN_OUTPUT_SELECT0 0x314
+#define DC_COM_PIN_OUTPUT_SELECT1 0x315
+#define DC_COM_PIN_OUTPUT_SELECT2 0x316
+#define DC_COM_PIN_OUTPUT_SELECT3 0x317
+#define DC_COM_PIN_OUTPUT_SELECT4 0x318
+#define DC_COM_PIN_OUTPUT_SELECT5 0x319
+#define DC_COM_PIN_OUTPUT_SELECT6 0x31a
+
+#define PIN1_LHS_OUTPUT (1 << 30)
+#define PIN1_LVS_OUTPUT (1 << 28)
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PM0_CONTROL 0x31c
+#define DC_COM_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PM1_CONTROL 0x31e
+#define DC_COM_PM1_DUTY_CYCLE 0x31f
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+#define V_PULSE_0_ENABLE (1 << 16)
+#define V_PULSE_1_ENABLE (1 << 18)
+#define V_PULSE_2_ENABLE (1 << 19)
+#define V_PULSE_3_ENABLE (1 << 20)
+#define M0_ENABLE (1 << 24)
+#define M1_ENABLE (1 << 26)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+#define DI_ENABLE (1 << 16)
+#define PP_ENABLE (1 << 18)
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define CURSOR_ENABLE (1 << 16)
+#define TVO_ENABLE (1 << 28)
+#define DSI_ENABLE (1 << 29)
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_MEM_HIGH_PRIORITY 0x403
+#define DC_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) ((x) & 0xfff)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (5 << 0)
+#define DISP_DATA_FORMAT_DF3S (6 << 0)
+#define DISP_DATA_FORMAT_DFSPI (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (8 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (9 << 0)
+#define DISP_DATA_ALIGNMENT_MSB (0 << 8)
+#define DISP_DATA_ALIGNMENT_LSB (1 << 8)
+#define DISP_DATA_ORDER_RED_BLUE (0 << 9)
+#define DISP_DATA_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK 0x0
+#define DE_SELECT_ACTIVE 0x1
+#define DE_SELECT_ACTIVE_IS 0x2
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY0C_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+
+#define DC_WIN_COLOR_PALETTE(x) (0x500 + (x))
+
+#define DC_WIN_PALETTE_COLOR_EXT 0x600
+#define DC_WIN_H_FILTER_P(x) (0x601 + (x))
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+#define DC_WIN_V_FILTER_P(x) (0x619 + (x))
+#define DC_WIN_WIN_OPTIONS 0x700
+#define H_DIRECTION_INCREMENT (0 << 0)
+#define H_DIRECTION_DECREMENT (1 << 0)
+#define V_DIRECTION_INCREMENT (0 << 2)
+#define V_DIRECTION_DECREMENT (1 << 2)
+#define COLOR_EXPAND (1 << 6)
+#define H_FILTER_ENABLE (1 << 8)
+#define V_FILTER_ENABLE (1 << 10)
+#define CP_ENABLE (1 << 16)
+#define CSC_ENABLE (1 << 18)
+#define DV_ENABLE (1 << 20)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP 0
+#define BYTE_SWAP_SWAP2 1
+#define BYTE_SWAP_SWAP4 2
+#define BYTE_SWAP_SWAP4HW 3
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST 0
+#define BUFFER_CONTROL_VI 1
+#define BUFFER_CONTROL_EPP 2
+#define BUFFER_CONTROL_MPEGE 3
+#define BUFFER_CONTROL_SB2D 4
+
+#define DC_WIN_COLOR_DEPTH 0x703
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0xfff) << 0)
+#define V_POSITION(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0xfff) << 0)
+#define V_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x3fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INCREMENT 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define LINE_STRIDE(x) (x)
+#define UV_LINE_STRIDE(x) (((x) & 0xffff) << 16)
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
+#define DC_WIN_DV_CONTROL 0x70e
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND_3WIN_XY 0x713
+#define CKEY_NOKEY (0 << 0)
+#define CKEY_KEY0 (1 << 0)
+#define CKEY_KEY1 (2 << 0)
+#define CKEY_KEY01 (3 << 0)
+#define BLEND_CONTROL_FIX (0 << 2)
+#define BLEND_CONTROL_ALPHA (1 << 2)
+#define BLEND_CONTROL_DEPENDANT (2 << 2)
+#define BLEND_CONTROL_PREMULT (3 << 2)
+#define BLEND_WEIGHT0(x) (((x) & 0xff) << 8)
+#define BLEND_WEIGHT1(x) (((x) & 0xff) << 16)
+#define BLEND(key, control, weight0, weight1) \
+ (CKEY_ ## key | BLEND_CONTROL_ ## control | \
+ BLEND_WEIGHT0(weight0) | BLEND_WEIGHT1(weight1))
+
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/edid.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DEBUG
+
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include "edid.h"
+
+struct tegra_edid {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ int bus;
+
+ u8 *data;
+ unsigned len;
+};
+
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+static int tegra_edid_show(struct seq_file *s, void *unused)
+{
+ struct tegra_edid *edid = s->private;
+ int i;
+
+ for (i = 0; i < edid->len; i++) {
+ if (i % 16 == 0)
+ seq_printf(s, "edid[%03x] =", i);
+
+ seq_printf(s, " %02x", edid->data[i]);
+
+ if (i % 16 == 15)
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_edid_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_edid_show, inode->i_private);
+}
+
+static const struct file_operations tegra_edid_debug_fops = {
+ .open = tegra_edid_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+ char name[] = "edidX";
+
+ snprintf(name, sizeof(name), "edid%1d", edid->bus);
+ debugfs_create_file(name, S_IRUGO, NULL, edid, &tegra_edid_debug_fops);
+}
+#else
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+}
+#endif
+
+#ifdef DEBUG
+static char tegra_edid_dump_buff[16 * 1024];
+
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+ struct seq_file s;
+ int i;
+ char c;
+
+ memset(&s, 0x0, sizeof(s));
+
+ s.buf = tegra_edid_dump_buff;
+ s.size = sizeof(tegra_edid_dump_buff);
+ s.private = edid;
+
+ tegra_edid_show(&s, NULL);
+
+ i = 0;
+ while (i < s.count ) {
+ if ((s.count - i) > 256) {
+ c = s.buf[i + 256];
+ s.buf[i + 256] = 0;
+ printk("%s", s.buf + i);
+ s.buf[i + 256] = c;
+ } else {
+ printk("%s", s.buf + i);
+ }
+ i += 256;
+ }
+}
+#else
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+}
+#endif
+
+int tegra_edid_read_block(struct tegra_edid *edid, int block, u8 *data)
+{
+ u8 block_buf[] = {block >> 1};
+ u8 cmd_buf[] = {(block & 0x1) * 128};
+ int status;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x30,
+ .flags = 0,
+ .len = 1,
+ .buf = block_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 128,
+ .buf = data,
+ }};
+ struct i2c_msg *m;
+ int msg_len;
+
+ if (block > 1) {
+ msg_len = 3;
+ m = msg;
+ } else {
+ msg_len = 2;
+ m = &msg[1];
+ }
+
+ status = i2c_transfer(edid->client->adapter, m, msg_len);
+
+ if (status < 0)
+ return status;
+
+ if (status != msg_len)
+ return -EIO;
+
+ return 0;
+}
+
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs)
+{
+ int i;
+ int ret;
+ int extension_blocks;
+
+ ret = tegra_edid_read_block(edid, 0, edid->data);
+ if (ret)
+ return ret;
+
+ memset(specs, 0x0, sizeof(struct fb_monspecs));
+ fb_edid_to_monspecs(edid->data, specs);
+ if (specs->modedb == NULL)
+ return -EINVAL;
+
+ extension_blocks = edid->data[0x7e];
+
+ for (i = 1; i <= extension_blocks; i++) {
+ ret = tegra_edid_read_block(edid, i, edid->data + i * 128);
+ if (ret < 0)
+ break;
+
+ if (edid->data[i * 128] == 0x2)
+ fb_edid_add_monspecs(edid->data + i * 128, specs);
+ }
+
+ edid->len = i * 128;
+
+ tegra_edid_dump(edid);
+
+ return 0;
+}
+
+struct tegra_edid *tegra_edid_create(int bus)
+{
+ struct tegra_edid *edid;
+ struct i2c_adapter *adapter;
+ int err;
+
+ edid = kzalloc(sizeof(struct tegra_edid), GFP_KERNEL);
+ if (!edid)
+ return ERR_PTR(-ENOMEM);
+
+ edid->data = vmalloc(SZ_32K);
+ if (!edid->data) {
+ err = -ENOMEM;
+ goto free_edid;
+ }
+ strlcpy(edid->info.type, "tegra_edid", sizeof(edid->info.type));
+ edid->bus = bus;
+ edid->info.addr = 0x50;
+ edid->info.platform_data = edid;
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ pr_err("can't get adpater for bus %d\n", bus);
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ edid->client = i2c_new_device(adapter, &edid->info);
+ i2c_put_adapter(adapter);
+
+ if (!edid->client) {
+ pr_err("can't create new device\n");
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ tegra_edid_debug_add(edid);
+
+ return edid;
+
+free_edid:
+ vfree(edid->data);
+ kfree(edid);
+
+ return ERR_PTR(err);
+}
+
+void tegra_edid_destroy(struct tegra_edid *edid)
+{
+ i2c_release_client(edid->client);
+ vfree(edid->data);
+ kfree(edid);
+}
+
+static const struct i2c_device_id tegra_edid_id[] = {
+ { "tegra_edid", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tegra_edid_id);
+
+static struct i2c_driver tegra_edid_driver = {
+ .id_table = tegra_edid_id,
+ .driver = {
+ .name = "tegra_edid",
+ },
+};
+
+static int __init tegra_edid_init(void)
+{
+ return i2c_add_driver(&tegra_edid_driver);
+}
+
+static void __exit tegra_edid_exit(void)
+{
+ i2c_del_driver(&tegra_edid_driver);
+}
+
+module_init(tegra_edid_init);
+module_exit(tegra_edid_exit);
--- /dev/null
+/*
+ * drivers/video/tegra/dc/edid.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+#define __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+
+#include <linux/i2c.h>
+#include <linux/wait.h>
+
+struct tegra_edid;
+
+struct tegra_edid *tegra_edid_create(int bus);
+void tegra_edid_destroy(struct tegra_edid *edid);
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/hdmi.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/nvhost.h>
+
+#include <video/tegrafb.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+#include "edid.h"
+#include "nvhdcp.h"
+
+/* datasheet claims this will always be 216MHz */
+#define HDMI_AUDIOCLK_FREQ 216000000
+
+#define HDMI_REKEY_DEFAULT 56
+
+struct tegra_dc_hdmi_data {
+ struct tegra_dc *dc;
+ struct tegra_edid *edid;
+ struct tegra_nvhdcp *nvhdcp;
+ struct delayed_work work;
+
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct clk *disp1_clk;
+ struct clk *disp2_clk;
+
+ struct switch_dev hpd_switch;
+
+ spinlock_t suspend_lock;
+ bool suspended;
+ bool hpd_pending;
+
+ bool dvi;
+};
+
+const struct fb_videomode tegra_dc_hdmi_supported_modes[] = {
+ /* 1280x720p 60hz: EIA/CEA-861-B Format 4 */
+ {
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 40, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 220, /* h_back_porch */
+ .upper_margin = 20, /* v_back_porch */
+ .right_margin = 110, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */
+ {
+ .xres = 720,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 62, /* h_sync_width */
+ .vsync_len = 6, /* v_sync_width */
+ .left_margin = 60, /* h_back_porch */
+ .upper_margin = 30, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 9, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 640x480p 60hz: EIA/CEA-861-B Format 1 */
+ {
+ .xres = 640,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(25200),
+ .hsync_len = 96, /* h_sync_width */
+ .vsync_len = 2, /* v_sync_width */
+ .left_margin = 48, /* h_back_porch */
+ .upper_margin = 33, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 10, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */
+ {
+ .xres = 720,
+ .yres = 576,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 64, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 68, /* h_back_porch */
+ .upper_margin = 39, /* v_back_porch */
+ .right_margin = 12, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+
+ /* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */
+ {
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(148500),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 88, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+};
+
+struct tegra_hdmi_audio_config {
+ unsigned pix_clock;
+ unsigned n;
+ unsigned cts;
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ {25200000, 4096, 25250},
+ {27000000, 4096, 27000},
+ {54000000, 4096, 54000},
+ {74250000, 4096, 74250},
+ {148500000, 4096, 148500},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ {25200000, 14112, 63125},
+ {27000000, 6272, 30000},
+ {54000000, 6272, 60000},
+ {74250000, 6272, 82500},
+ {148500000, 6272, 165000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ {25200000, 6144, 25250},
+ {27000000, 6144, 27000},
+ {54000000, 6144, 54000},
+ {74250000, 6144, 74250},
+ {148500000, 6144, 148500},
+ {0, 0, 0},
+};
+
+static const struct tegra_hdmi_audio_config
+*tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case 32000:
+ table = tegra_hdmi_audio_32k;
+ break;
+
+ case 44100:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+
+ case 48000:
+ table = tegra_hdmi_audio_48k;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ while (table->pix_clock) {
+ if (table->pix_clock == pix_clock)
+ return table;
+ table++;
+ }
+
+ return NULL;
+}
+
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->base + reg * 4);
+}
+
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, hdmi->base + reg * 4);
+}
+
+static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long val = tegra_hdmi_readl(hdmi, reg);
+ val &= ~clr;
+ val |= set;
+ tegra_hdmi_writel(hdmi, val, reg);
+}
+
+#define DUMP_REG(a) do { \
+ printk("HDMI %-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_hdmi_readl(hdmi, a)); \
+ } while (0)
+
+#ifdef DEBUG
+static void hdmi_dumpregs(struct tegra_dc_hdmi_data *hdmi)
+{
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+}
+#endif
+
+#define PIXCLOCK_TOLERANCE 200
+
+static bool tegra_dc_hdmi_mode_equal(const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2)
+{
+ return mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->vmode == mode2->vmode;
+}
+
+static bool tegra_dc_hdmi_mode_filter(struct fb_videomode *mode)
+{
+ int i;
+ int clocks;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_modes); i++) {
+ if (tegra_dc_hdmi_mode_equal(&tegra_dc_hdmi_supported_modes[i],
+ mode)) {
+ memcpy(mode, &tegra_dc_hdmi_supported_modes[i], sizeof(*mode));
+ mode->flag = FB_MODE_IS_DETAILED;
+ clocks = (mode->left_margin + mode->xres + mode->right_margin + mode->hsync_len) *
+ (mode->upper_margin + mode->yres + mode->lower_margin + mode->vsync_len);
+ mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000) / clocks;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc)
+{
+ int sense;
+ int level;
+
+ level = gpio_get_value(dc->out->hotplug_gpio);
+
+ sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK;
+
+ return (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) ||
+ (sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level);
+}
+
+static bool tegra_dc_hdmi_detect(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct fb_monspecs specs;
+ int err;
+
+ if (!tegra_dc_hdmi_hpd(dc))
+ goto fail;
+
+ err = tegra_edid_get_monspecs(hdmi->edid, &specs);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error reading edid\n");
+ goto fail;
+ }
+
+ /* monitors like to lie about these but they are still useful for
+ * detecting aspect ratios
+ */
+ dc->out->h_size = specs.max_x * 1000;
+ dc->out->v_size = specs.max_y * 1000;
+
+
+ hdmi->dvi = !(specs.misc & FB_MISC_HDMI);
+
+ tegra_fb_update_monspecs(dc->fb, &specs, tegra_dc_hdmi_mode_filter);
+ switch_set_state(&hdmi->hpd_switch, 1);
+ dev_info(&dc->ndev->dev, "display detected\n");
+ return true;
+
+fail:
+ switch_set_state(&hdmi->hpd_switch, 0);
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+ return false;
+}
+
+
+static void tegra_dc_hdmi_detect_worker(struct work_struct *work)
+{
+ struct tegra_dc_hdmi_data *hdmi =
+ container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work);
+ struct tegra_dc *dc = hdmi->dc;
+
+ if (!tegra_dc_hdmi_detect(dc)) {
+ tegra_dc_disable(dc);
+ tegra_fb_update_monspecs(dc->fb, NULL, NULL);
+ }
+}
+
+static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr)
+{
+ struct tegra_dc *dc = ptr;
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ if (hdmi->suspended) {
+ hdmi->hpd_pending = true;
+ } else {
+ cancel_delayed_work(&hdmi->work);
+ if (tegra_dc_hdmi_hpd(dc))
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(100));
+ else
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(30));
+ }
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_dc_hdmi_suspend(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ tegra_nvhdcp_suspend(hdmi->nvhdcp);
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = true;
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static void tegra_dc_hdmi_resume(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = false;
+ if (hdmi->hpd_pending) {
+ if (tegra_dc_hdmi_hpd(dc))
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(100));
+ else
+ queue_delayed_work(system_nrt_wq, &hdmi->work,
+ msecs_to_jiffies(30));
+ hdmi->hpd_pending = false;
+ }
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static int tegra_dc_hdmi_init(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi;
+ struct resource *res;
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *clk = NULL;
+ struct clk *disp1_clk = NULL;
+ struct clk *disp2_clk = NULL;
+ int err;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs");
+ if (!res) {
+ dev_err(&dc->ndev->dev, "hdmi: no mem resource\n");
+ err = -ENOENT;
+ goto err_free_hdmi;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name);
+ if (!base_res) {
+ dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n");
+ err = -EBUSY;
+ goto err_free_hdmi;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n");
+ err = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ clk = clk_get(&dc->ndev->dev, "hdmi");
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get clock\n");
+ err = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ disp1_clk = clk_get_sys("tegradc.0", NULL);
+ if (IS_ERR_OR_NULL(disp1_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ disp2_clk = clk_get_sys("tegradc.1", NULL);
+ if (IS_ERR_OR_NULL(disp2_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ /* TODO: support non-hotplug */
+ if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&dc->ndev->dev), dc)) {
+ dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n",
+ gpio_to_irq(dc->out->hotplug_gpio));
+ err = -EBUSY;
+ goto err_put_clock;
+ }
+ enable_irq_wake(gpio_to_irq(dc->out->hotplug_gpio));
+
+ hdmi->edid = tegra_edid_create(dc->out->dcc_bus);
+ if (IS_ERR_OR_NULL(hdmi->edid)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't create edid\n");
+ err = PTR_ERR(hdmi->edid);
+ goto err_free_irq;
+ }
+
+ hdmi->nvhdcp = tegra_nvhdcp_create(hdmi, dc->ndev->id,
+ dc->out->dcc_bus);
+ if (IS_ERR_OR_NULL(hdmi->nvhdcp)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't create nvhdcp\n");
+ err = PTR_ERR(hdmi->nvhdcp);
+ goto err_edid_destroy;
+ }
+
+ INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker);
+
+ hdmi->dc = dc;
+ hdmi->base = base;
+ hdmi->base_res = base_res;
+ hdmi->clk = clk;
+ hdmi->disp1_clk = disp1_clk;
+ hdmi->disp2_clk = disp2_clk;
+ hdmi->suspended = false;
+ hdmi->hpd_pending = false;
+ spin_lock_init(&hdmi->suspend_lock);
+
+ hdmi->hpd_switch.name = "hdmi";
+ switch_dev_register(&hdmi->hpd_switch);
+
+ dc->out->depth = 24;
+
+ tegra_dc_set_outdata(dc, hdmi);
+
+ /* boards can select default content protection policy */
+ if (dc->out->flags & TEGRA_DC_OUT_NVHDCP_POLICY_ON_DEMAND) {
+ tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+ TEGRA_NVHDCP_POLICY_ON_DEMAND);
+ } else {
+ tegra_nvhdcp_set_policy(hdmi->nvhdcp,
+ TEGRA_NVHDCP_POLICY_ALWAYS_ON);
+ }
+ return 0;
+
+err_edid_destroy:
+ tegra_edid_destroy(hdmi->edid);
+err_free_irq:
+ disable_irq_wake(gpio_to_irq(dc->out->hotplug_gpio));
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+err_put_clock:
+ if (!IS_ERR_OR_NULL(disp2_clk))
+ clk_put(disp2_clk);
+ if (!IS_ERR_OR_NULL(disp1_clk))
+ clk_put(disp1_clk);
+ if (!IS_ERR_OR_NULL(clk))
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free_hdmi:
+ kfree(hdmi);
+ return err;
+}
+
+static void tegra_dc_hdmi_destroy(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ disable_irq_wake(gpio_to_irq(dc->out->hotplug_gpio));
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+ cancel_delayed_work_sync(&hdmi->work);
+ switch_dev_unregister(&hdmi->hpd_switch);
+ iounmap(hdmi->base);
+ release_resource(hdmi->base_res);
+ clk_put(hdmi->clk);
+ clk_put(hdmi->disp1_clk);
+ clk_put(hdmi->disp2_clk);
+ tegra_edid_destroy(hdmi->edid);
+ tegra_nvhdcp_destroy(hdmi->nvhdcp);
+
+ kfree(hdmi);
+
+}
+
+static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int i;
+ unsigned freqs[] = {
+ 32000,
+ 44100,
+ 48000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned f = freqs[i];
+ unsigned eight_half;
+ unsigned delta;;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 48000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta),
+ HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ const struct tegra_hdmi_audio_config *config;
+ unsigned long audio_n;
+ unsigned audio_freq = 44100; /* TODO: find some way of configuring this */
+
+ tegra_hdmi_writel(hdmi,
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_SOURCE_SELECT_AUTO,
+ HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk);
+ if (!config) {
+ dev_err(&dc->ndev->dev,
+ "hdmi: can't set audio to %d at %d pix_clock",
+ audio_freq, dc->mode.pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS |
+ SPARE_CTS_RESET_VAL(1),
+ HDMI_NV_PDISP_HDMI_SPARE);
+
+ audio_n &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_dc_hdmi_setup_audio_fs_tables(dc);
+
+ return 0;
+}
+
+static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg,
+ u8 type, u8 version, void *data, int len)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ u32 subpack[2]; /* extra byte for zero padding of subpack */
+ int i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + len - 1;
+ for (i = 1; i < len; i++)
+ csum +=((u8 *)data)[i];
+ ((u8 *)data)[0] = 0x100 - csum;
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(len - 1),
+ header_reg);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ len = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < len; i++) {
+ int subpack_idx = i % 7;
+
+ if (subpack_idx == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i];
+
+ if (subpack_idx == 6 || (i + 1 == len)) {
+ int reg = header_reg + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_avi_infoframe avi;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&avi, 0x0, sizeof(avi));
+
+ avi.r = HDMI_AVI_R_SAME;
+
+ if (dc->mode.v_active == 480) {
+ if (dc->mode.h_active == 640) {
+ avi.m = HDMI_AVI_M_4_3;
+ avi.vic = 1;
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 3;
+ }
+ } else if (dc->mode.v_active == 576) {
+ /* CEC modes 17 and 18 differ only by the pysical size of the
+ * screen so we have to calculation the physical aspect
+ * ratio. 4 * 10 / 3 is 13
+ */
+ if ((dc->out->h_size * 10) / dc->out->v_size > 14) {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 18;
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 17;
+ }
+ } else if (dc->mode.v_active == 720) {
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 110)
+ avi.vic = 4; /* 60 Hz */
+ else
+ avi.vic = 19; /* 50 Hz */
+ } else if (dc->mode.v_active == 1080) {
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 88)
+ avi.vic = 16; /* 60 Hz */
+ else if (dc->mode.h_front_porch == 528)
+ avi.vic = 31; /* 50 Hz */
+ else
+ avi.vic = 32; /* 24 Hz */
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 0;
+ }
+
+
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_AVI_VERSION,
+ &avi, sizeof(avi));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_audio_infoframe audio;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&audio, 0x0, sizeof(audio));
+
+ audio.cc = HDMI_AUDIO_CC_2;
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &audio, sizeof(audio));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_enable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int pulse_start;
+ int dispclk_div_8_2;
+ int pll0;
+ int pll1;
+ int ds;
+ int retries;
+ int rekey;
+ int err;
+ unsigned long val;
+
+ /* enbale power, clocks, resets, etc. */
+
+ /* The upstream DC needs to be clocked for accesses to HDMI to not
+ * hard lock the system. Because we don't know if HDMI is conencted
+ * to disp1 or disp2 we need to enable both until we set the DC mux.
+ */
+ clk_enable(hdmi->disp1_clk);
+ clk_enable(hdmi->disp2_clk);
+ tegra_dc_setup_clk(dc, hdmi->clk);
+ clk_set_rate(hdmi->clk, dc->mode.pclk);
+
+ clk_enable(hdmi->clk);
+ tegra_periph_reset_assert(hdmi->clk);
+ mdelay(1);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ /* TODO: copy HDCP keys from KFUSE to HDMI */
+
+ /* Program display timing registers: handled by dc */
+
+ /* program HDMI registers and SOR sequencer */
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width +
+ dc->mode.h_back_porch - 10;
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ tegra_dc_writel(dc,
+ PULSE_MODE_NORMAL |
+ PULSE_POLARITY_HIGH |
+ PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A,
+ DC_DISP_H_PULSE2_CONTROL);
+ tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8),
+ DC_DISP_H_PULSE2_POSITION_A);
+
+ tegra_hdmi_writel(hdmi,
+ VSYNC_WINDOW_END(0x210) |
+ VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE,
+ HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ tegra_hdmi_writel(hdmi,
+ (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) |
+ ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ clk_disable(hdmi->disp1_clk);
+ clk_disable(hdmi->disp2_clk);
+
+ dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ tegra_hdmi_writel(hdmi,
+ SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) |
+ SOR_REFCLK_DIV_FRAC(dispclk_div_8_2),
+ HDMI_NV_PDISP_SOR_REFCLK);
+
+
+ if (!hdmi->dvi) {
+ err = tegra_dc_hdmi_setup_audio(dc);
+
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+
+ rekey = HDMI_REKEY_DEFAULT;
+ val = HDMI_CTRL_REKEY(rekey);
+ val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width +
+ dc->mode.h_back_porch +
+ dc->mode.h_front_porch -
+ rekey - 18) / 32);
+ if (!hdmi->dvi)
+ val |= HDMI_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (hdmi->dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+
+ tegra_dc_hdmi_setup_avi_infoframe(dc, hdmi->dvi);
+ tegra_dc_hdmi_setup_audio_infoframe(dc, hdmi->dvi);
+
+ /* TMDS CONFIG */
+ pll0 = 0x200033f;
+ pll1 = 0;
+
+ pll0 &= ~SOR_PLL_PWR & ~SOR_PLL_VCOPD & ~SOR_PLL_PDBG & ~SOR_PLL_PDPORT & ~SOR_PLL_PULLDOWN &
+ ~SOR_PLL_VCOCAP(~0) & ~SOR_PLL_ICHPMP(~0);
+ pll0 |= SOR_PLL_RESISTORSEL;
+
+ if (dc->mode.pclk <= 27000000)
+ pll0 |= SOR_PLL_VCOCAP(0);
+ else if (dc->mode.pclk <= 74250000)
+ pll0 |= SOR_PLL_VCOCAP(1);
+ else
+ pll0 |= SOR_PLL_VCOCAP(3);
+
+ if (dc->mode.h_active == 1080) {
+ pll0 |= SOR_PLL_ICHPMP(1) | SOR_PLL_TX_REG_LOAD(3) |
+ SOR_PLL_TX_REG_LOAD(3) | SOR_PLL_BG_V17_S(3);
+ pll1 |= SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN;
+ } else {
+ pll0 |= SOR_PLL_ICHPMP(2);
+ }
+
+ tegra_hdmi_writel(hdmi, pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, pll1, HDMI_NV_PDISP_SOR_PLL1);
+
+ if (pll1 & SOR_PLL_PE_EN) {
+ tegra_hdmi_writel(hdmi,
+ PE_CURRENT0(0xf) |
+ PE_CURRENT1(0xf) |
+ PE_CURRENT2(0xf) |
+ PE_CURRENT3(0xf),
+ HDMI_NV_PDISP_PE_CURRENT);
+ }
+
+ /* enable SOR */
+ if (dc->mode.h_active == 1080)
+ ds = DRIVE_CURRENT_13_500_mA;
+ else
+ ds = DRIVE_CURRENT_5_250_mA;
+
+ tegra_hdmi_writel(hdmi,
+ DRIVE_CURRENT_LANE0(ds) |
+ DRIVE_CURRENT_LANE1(ds) |
+ DRIVE_CURRENT_LANE2(ds) |
+ DRIVE_CURRENT_LANE3(ds) |
+ DRIVE_CURRENT_FUSE_OVERRIDE,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ val = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8);
+
+ val = 0x1c800;
+ val &= ~SOR_CSTM_ROTCLK(~0);
+ val |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM);
+
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ retries = 1000;
+ do {
+ BUG_ON(--retries < 0);
+ val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (val & SOR_PWR_SETTING_NEW_PENDING);
+
+ val = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_H_SYNC)
+ val |= SOR_STATE_ASY_HSYNCPOL_NEG;
+ else
+ val |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (dc->mode.flags & TEGRA_DC_MODE_FLAG_NEG_V_SYNC)
+ val |= SOR_STATE_ASY_VSYNCPOL_NEG;
+ else
+ val |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE2);
+
+ val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 1);
+}
+
+static void tegra_dc_hdmi_disable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ tegra_nvhdcp_set_plug(hdmi->nvhdcp, 0);
+
+ tegra_periph_reset_assert(hdmi->clk);
+ clk_disable(hdmi->clk);
+}
+
+struct tegra_dc_out_ops tegra_dc_hdmi_ops = {
+ .init = tegra_dc_hdmi_init,
+ .destroy = tegra_dc_hdmi_destroy,
+ .enable = tegra_dc_hdmi_enable,
+ .disable = tegra_dc_hdmi_disable,
+ .detect = tegra_dc_hdmi_detect,
+ .suspend = tegra_dc_hdmi_suspend,
+ .resume = tegra_dc_hdmi_resume,
+};
+
--- /dev/null
+/*
+ * drivers/video/tegra/dc/hdmi.h
+ *
+ * non-tegra specific HDMI declarations
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __attribute__((packed));
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __attribute__((packed));
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+struct tegra_dc_hdmi_data;
+
+unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg);
+void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long val, unsigned long reg);
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/hdmi_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+
+#define HDMI_CTXSW 0x00
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_SLEEP (0 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_SNOOSE (1 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_SAFE (0 << 2)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+#define SOR_STATE_ARM_SHOW_VGA (1 << 4)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define REPEATER (1 << 31)
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDCP_RUN_YES (1 << 0)
+#define CRYPT_ENABLED (1 << 1)
+#define ONEONE_ENABLED (1 << 3)
+#define AN_VALID (1 << 8)
+#define R0_VALID (1 << 9)
+#define SPRIME_VALID (1 << 10)
+#define MPRIME_VALID (1 << 11)
+#define SROM_ERR (1 << 13)
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define TMDS0_LINK0 (1 << 4)
+#define READ_S (1 << 0)
+#define READ_M (2 << 0)
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define STATUS_CS (1 << 6)
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+
+#define INFOFRAME_HEADER_TYPE(x) ((x) & 0xff)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xf) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+#define ACR_SB3(x) (((x) & 0xff) << 8)
+#define ACR_SB2(x) (((x) & 0xff) << 16)
+#define ACR_SB1(x) (((x) & 0xff) << 24)
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+
+#define ACR_SB6(x) (((x) & 0xff) << 0)
+#define ACR_SB5(x) (((x) & 0xff) << 8)
+#define ACR_SB4(x) (((x) & 0xff) << 16)
+#define ACR_ENABLE (1 << 31)
+#define ACR_SUBPACK_N(x) ((x) & 0xffffff)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_AUDIO_LAYOUT (1 << 8)
+#define HDMI_CTRL_SAMPLE_FLAT (1 << 12)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+#define SPARE_ACR_PRIORITY_HIGH (0 << 31)
+#define SPARE_ACR_PRIORITY_LOW (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SAFE_START_NORMAL (0 << 17)
+#define SOR_PWR_SAFE_START_ALT (1 << 17)
+#define SOR_PWR_HALT_DELAY (1 << 24)
+#define SOR_PWR_MODE (1 << 28)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCOPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0x3) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_PD_TXDA_0 (1 << 0)
+#define SOR_CSTM_PD_TXDA_1 (1 << 1)
+#define SOR_CSTM_PD_TXDA_2 (1 << 2)
+#define SOR_CSTM_PD_TXDA_3 (1 << 3)
+#define SOR_CSTM_PD_TXDB_0 (1 << 4)
+#define SOR_CSTM_PD_TXDB_1 (1 << 5)
+#define SOR_CSTM_PD_TXDB_2 (1 << 6)
+#define SOR_CSTM_PD_TXDB_3 (1 << 7)
+#define SOR_CSTM_PD_TXCA (1 << 8)
+#define SOR_CSTM_PD_TXCB (1 << 9)
+#define SOR_CSTM_UPPER (1 << 11)
+#define SOR_CSTM_MODE(x) (((x) & 0x3) << 12)
+#define SOR_CSTM_LINKACTA (1 << 14)
+#define SOR_CSTM_LINKACTB (1 << 15)
+#define SOR_CSTM_LVDS_EN (1 << 16)
+#define SOR_CSTM_DUP_SYNC (1 << 17)
+#define SOR_CSTM_NEW_MODE (1 << 18)
+#define SOR_CSTM_BALANCED (1 << 19)
+#define SOR_CSTM_PLLDIV (1 << 21)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_ROTDAT(x) (((x) & 0x7) << 28)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST0 0x60
+#define HDMI_NV_PDISP_SOR_SEQ_INST1 0x61
+#define HDMI_NV_PDISP_SOR_SEQ_INST2 0x62
+#define HDMI_NV_PDISP_SOR_SEQ_INST3 0x63
+#define HDMI_NV_PDISP_SOR_SEQ_INST4 0x64
+#define HDMI_NV_PDISP_SOR_SEQ_INST5 0x65
+#define HDMI_NV_PDISP_SOR_SEQ_INST6 0x66
+#define HDMI_NV_PDISP_SOR_SEQ_INST7 0x67
+#define HDMI_NV_PDISP_SOR_SEQ_INST8 0x68
+#define HDMI_NV_PDISP_SOR_SEQ_INST9 0x69
+#define HDMI_NV_PDISP_SOR_SEQ_INSTA 0x6a
+#define HDMI_NV_PDISP_SOR_SEQ_INSTB 0x6b
+#define HDMI_NV_PDISP_SOR_SEQ_INSTC 0x6c
+#define HDMI_NV_PDISP_SOR_SEQ_INSTD 0x6d
+#define HDMI_NV_PDISP_SOR_SEQ_INSTE 0x6e
+#define HDMI_NV_PDISP_SOR_SEQ_INSTF 0x6f
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_ASSERT_PLL_RESETV (1 << 29)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+/* note: datasheet defines FS1..FS7. we have FS(0)..FS(6) */
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOFT_RESET (1 << 8)
+#define AUDIO_CNTRL0_SOFT_RESET_ALL (1 << 12)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_UNKNOWN (1 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_32K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_44_1K (0 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_48K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_88_2K (8 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_96K (10 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_176_4K (12 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_192K (14 << 16)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNALTE (1 << 24)
+#define AUDIO_N_LOOKUP_ENABLE (1 << 28)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define LOCAL_KEYS (1 << 0)
+#define AUTOINC (1 << 1)
+#define WRITE16 (1 << 4)
+#define PKEY_REQUEST_RELOAD_TRIGGER (1 << 5)
+#define PKEY_LOADED (1 << 6)
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/nvhdcp.c
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include <mach/dc.h>
+#include <mach/nvhost.h>
+#include <mach/kfuse.h>
+
+#include <video/nvhdcp.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+
+/* for 0x40 Bcaps */
+#define BCAPS_REPEATER (1 << 6)
+#define BCAPS_READY (1 << 5)
+#define BCAPS_11 (1 << 1) /* used for both Bcaps and Ainfo */
+
+/* for 0x41 Bstatus */
+#define BSTATUS_MAX_DEVS_EXCEEDED (1 << 7)
+#define BSTATUS_MAX_CASCADE_EXCEEDED (1 << 11)
+
+#ifdef VERBOSE_DEBUG
+#define nvhdcp_vdbg(...) \
+ printk("nvhdcp: " __VA_ARGS__)
+#else
+#define nvhdcp_vdbg(...) \
+({ \
+ if(0) \
+ printk("nvhdcp: " __VA_ARGS__); \
+ 0; \
+})
+#endif
+#define nvhdcp_debug(...) \
+ pr_debug("nvhdcp: " __VA_ARGS__)
+#define nvhdcp_err(...) \
+ pr_err("nvhdcp: Error: " __VA_ARGS__)
+#define nvhdcp_info(...) \
+ pr_info("nvhdcp: " __VA_ARGS__)
+
+
+/* for nvhdcp.state */
+enum tegra_nvhdcp_state {
+ STATE_OFF,
+ STATE_UNAUTHENTICATED,
+ STATE_LINK_VERIFY,
+ STATE_RENEGOTIATE,
+};
+
+struct tegra_nvhdcp {
+ struct work_struct work;
+ struct tegra_dc_hdmi_data *hdmi;
+ struct workqueue_struct *downstream_wq;
+ struct mutex lock;
+ struct miscdevice miscdev;
+ char name[12];
+ unsigned id;
+ bool plugged; /* true if hotplug detected */
+ atomic_t policy; /* set policy */
+ enum tegra_nvhdcp_state state; /* STATE_xxx */
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ int bus;
+ u32 b_status;
+ u64 a_n;
+ u64 c_n;
+ u64 a_ksv;
+ u64 b_ksv;
+ u64 c_ksv;
+ u64 d_ksv;
+ u8 v_prime[20];
+ u64 m_prime;
+ u32 num_bksv_list;
+ u64 bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+ int fail_count;
+};
+
+static inline bool nvhdcp_is_plugged(struct tegra_nvhdcp *nvhdcp)
+{
+ rmb();
+ return nvhdcp->plugged;
+}
+
+static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged)
+{
+ nvhdcp->plugged = plugged;
+ wmb();
+ return plugged;
+}
+
+static int nvhdcp_i2c_read(struct tegra_nvhdcp *nvhdcp, u8 reg,
+ size_t len, void *data)
+{
+ int status;
+ int retries = 15;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = 0,
+ .len = 1,
+ .buf = ®,
+ },
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect during i2c xfer\n");
+ return -EIO;
+ }
+ status = i2c_transfer(nvhdcp->client->adapter,
+ msg, ARRAY_SIZE(msg));
+ if (retries > 1)
+ msleep(250);
+ } while ((status < 0) && retries--);
+
+ if (status < 0) {
+ nvhdcp_err("i2c xfer error %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int nvhdcp_i2c_write(struct tegra_nvhdcp *nvhdcp, u8 reg,
+ size_t len, const void *data)
+{
+ int status;
+ u8 buf[len + 1];
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x74 >> 1, /* primary link */
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ },
+ };
+ int retries = 15;
+
+ buf[0] = reg;
+ memcpy(buf + 1, data, len);
+
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect during i2c xfer\n");
+ return -EIO;
+ }
+ status = i2c_transfer(nvhdcp->client->adapter,
+ msg, ARRAY_SIZE(msg));
+ if (retries > 1)
+ msleep(250);
+ } while ((status < 0) && retries--);
+
+ if (status < 0) {
+ nvhdcp_err("i2c xfer error %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static inline int nvhdcp_i2c_read8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 *val)
+{
+ return nvhdcp_i2c_read(nvhdcp, reg, 1, val);
+}
+
+static inline int nvhdcp_i2c_write8(struct tegra_nvhdcp *nvhdcp, u8 reg, u8 val)
+{
+ return nvhdcp_i2c_write(nvhdcp, reg, 1, &val);
+}
+
+static inline int nvhdcp_i2c_read16(struct tegra_nvhdcp *nvhdcp,
+ u8 reg, u16 *val)
+{
+ u8 buf[2];
+ int e;
+
+ e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+ if (e)
+ return e;
+
+ if (val)
+ *val = buf[0] | (u16)buf[1] << 8;
+
+ return 0;
+}
+
+static int nvhdcp_i2c_read40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 *val)
+{
+ u8 buf[5];
+ int e, i;
+ u64 n;
+
+ e = nvhdcp_i2c_read(nvhdcp, reg, sizeof buf, buf);
+ if (e)
+ return e;
+
+ for(i = 0, n = 0; i < 5; i++ ) {
+ n <<= 8;
+ n |= buf[4 - i];
+ }
+
+ if (val)
+ *val = n;
+
+ return 0;
+}
+
+static int nvhdcp_i2c_write40(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+ char buf[5];
+ int i;
+ for(i = 0; i < 5; i++ ) {
+ buf[i] = val;
+ val >>= 8;
+ }
+ return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+static int nvhdcp_i2c_write64(struct tegra_nvhdcp *nvhdcp, u8 reg, u64 val)
+{
+ char buf[8];
+ int i;
+ for(i = 0; i < 8; i++ ) {
+ buf[i] = val;
+ val >>= 8;
+ }
+ return nvhdcp_i2c_write(nvhdcp, reg, sizeof buf, buf);
+}
+
+
+/* 64-bit link encryption session random number */
+static inline u64 get_an(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ return r;
+}
+
+/* 64-bit upstream exchange random number */
+static inline void set_cn(struct tegra_dc_hdmi_data *hdmi, u64 c_n)
+{
+ tegra_hdmi_writel(hdmi, (u32)c_n, HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ tegra_hdmi_writel(hdmi, c_n >> 32, HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+}
+
+
+/* 40-bit transmitter's key selection vector */
+static inline u64 get_aksv(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ return r;
+}
+
+/* 40-bit receiver's key selection vector */
+static inline void set_bksv(struct tegra_dc_hdmi_data *hdmi, u64 b_ksv, bool repeater)
+{
+ if (repeater)
+ b_ksv |= (u64)REPEATER << 32;
+ tegra_hdmi_writel(hdmi, (u32)b_ksv, HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ tegra_hdmi_writel(hdmi, b_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+}
+
+
+/* 40-bit software's key selection vector */
+static inline void set_cksv(struct tegra_dc_hdmi_data *hdmi, u64 c_ksv)
+{
+ tegra_hdmi_writel(hdmi, (u32)c_ksv, HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ tegra_hdmi_writel(hdmi, c_ksv >> 32, HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+}
+
+/* 40-bit connection state */
+static inline u64 get_cs(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ return r;
+}
+
+/* 40-bit upstream key selection vector */
+static inline u64 get_dksv(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ return r;
+}
+
+/* 64-bit encrypted M0 value */
+static inline u64 get_mprime(struct tegra_dc_hdmi_data *hdmi)
+{
+ u64 r;
+ r = (u64)tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB) << 32;
+ r |= tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ return r;
+}
+
+static inline u16 get_transmitter_ri(struct tegra_dc_hdmi_data *hdmi)
+{
+ return tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_RI);
+}
+
+static inline int get_receiver_ri(struct tegra_nvhdcp *nvhdcp, u16 *r)
+{
+ return nvhdcp_i2c_read16(nvhdcp, 0x8, r); /* long read */
+}
+
+static int get_bcaps(struct tegra_nvhdcp *nvhdcp, u8 *b_caps)
+{
+ return nvhdcp_i2c_read8(nvhdcp, 0x40, b_caps);
+}
+
+static int get_ksvfifo(struct tegra_nvhdcp *nvhdcp,
+ unsigned num_bksv_list, u64 *ksv_list)
+{
+ u8 *buf, *p;
+ int e;
+ unsigned i;
+ size_t buf_len = num_bksv_list * 5;
+
+ if (!ksv_list || num_bksv_list > TEGRA_NVHDCP_MAX_DEVS)
+ return -EINVAL;
+
+ if (num_bksv_list == 0)
+ return 0;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(buf))
+ return -ENOMEM;
+
+ e = nvhdcp_i2c_read(nvhdcp, 0x43, buf_len, buf);
+ if (e) {
+ kfree(buf);
+ return e;
+ }
+
+ /* load 40-bit keys from repeater into array of u64 */
+ p = buf;
+ for (i = 0; i < num_bksv_list; i++) {
+ ksv_list[i] = p[0] | ((u64)p[1] << 8) | ((u64)p[2] << 16)
+ | ((u64)p[3] << 24) | ((u64)p[4] << 32);
+ p += 5;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+/* get V' 160-bit SHA-1 hash from repeater */
+static int get_vprime(struct tegra_nvhdcp *nvhdcp, u8 *v_prime)
+{
+ int e, i;
+
+ for (i = 0; i < 20; i += 4) {
+ e = nvhdcp_i2c_read(nvhdcp, 0x20 + i, 4, v_prime + i);
+ if (e)
+ return e;
+ }
+ return 0;
+}
+
+
+/* set or clear RUN_YES */
+static void hdcp_ctrl_run(struct tegra_dc_hdmi_data *hdmi, bool v)
+{
+ u32 ctrl;
+
+ if (v) {
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ ctrl |= HDCP_RUN_YES;
+ } else {
+ ctrl = 0;
+ }
+
+ tegra_hdmi_writel(hdmi, ctrl, HDMI_NV_PDISP_RG_HDCP_CTRL);
+}
+
+/* wait for any bits in mask to be set in HDMI_NV_PDISP_RG_HDCP_CTRL
+ * sleeps up to 120mS */
+static int wait_hdcp_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask, u32 *v)
+{
+ int retries = 13;
+ u32 ctrl;
+
+ do {
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ if ((ctrl | (mask))) {
+ if (v)
+ *v = ctrl;
+ break;
+ }
+ if (retries > 1)
+ msleep(10);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("ctrl read timeout (mask=0x%x)\n", mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* wait for any bits in mask to be set in HDMI_NV_PDISP_KEY_CTRL
+ * waits up to 100mS */
+static int wait_key_ctrl(struct tegra_dc_hdmi_data *hdmi, u32 mask)
+{
+ int retries = 101;
+ u32 ctrl;
+
+ do {
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+ if ((ctrl | (mask)))
+ break;
+ if (retries > 1)
+ msleep(1);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("key ctrl read timeout (mask=0x%x)\n", mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* check that key selection vector is well formed.
+ * NOTE: this function assumes KSV has already been checked against
+ * revocation list.
+ */
+static int verify_ksv(u64 k)
+{
+ unsigned i;
+
+ /* count set bits, must be exactly 20 set to be valid */
+ for(i = 0; k; i++)
+ k ^= k & -k;
+
+ return (i != 20) ? -EINVAL : 0;
+}
+
+/* get Status and Kprime signature - READ_S on TMDS0_LINK0 only */
+static int get_s_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ u32 sp_msb, sp_lsb1, sp_lsb2;
+ int e;
+
+ /* if connection isn't authenticated ... */
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state != STATE_LINK_VERIFY) {
+ memset(pkt, 0, sizeof *pkt);
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+ e = 0;
+ goto err;
+ }
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+ /* we will be taking c_n, c_ksv as input */
+ if (!(pkt->value_flags & TEGRA_NVHDCP_FLAG_CN)
+ || !(pkt->value_flags & TEGRA_NVHDCP_FLAG_CKSV)) {
+ nvhdcp_err("missing value_flags (0x%x)\n", pkt->value_flags);
+ e = -EINVAL;
+ goto err;
+ }
+
+ pkt->value_flags = 0;
+
+ pkt->a_ksv = nvhdcp->a_ksv;
+ pkt->a_n = nvhdcp->a_n;
+ pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+ nvhdcp_vdbg("%s():cn %llx cksv %llx\n", __func__, pkt->c_n, pkt->c_ksv);
+
+ set_cn(hdmi, pkt->c_n);
+
+ tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_S,
+ HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+ set_cksv(hdmi, pkt->c_ksv);
+
+ e = wait_hdcp_ctrl(hdmi, SPRIME_VALID, NULL);
+ if (e) {
+ nvhdcp_err("Sprime read timeout\n");
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+ e = -EIO;
+ goto err;
+ }
+
+ msleep(50);
+
+ /* read 56-bit Sprime plus 16 status bits */
+ sp_msb = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ sp_lsb1 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ sp_lsb2 = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+
+ /* top 8 bits of LSB2 and bottom 8 bits of MSB hold status bits. */
+ pkt->hdcp_status = ( sp_msb << 8 ) | ( sp_lsb2 >> 24);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_S;
+
+ /* 56-bit Kprime */
+ pkt->k_prime = ((u64)(sp_lsb2 & 0xffffff) << 32) | sp_lsb1;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_KP;
+
+ /* is connection state supported? */
+ if (sp_msb & STATUS_CS) {
+ pkt->cs = get_cs(hdmi);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_CS;
+ }
+
+ /* load Dksv */
+ pkt->d_ksv = get_dksv(hdmi);
+ if (verify_ksv(pkt->d_ksv)) {
+ nvhdcp_err("Dksv invalid!\n");
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+ e = -EIO; /* treat bad Dksv as I/O error */
+ }
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+ /* copy current Bksv */
+ pkt->b_ksv = nvhdcp->b_ksv;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+ mutex_unlock(&nvhdcp->lock);
+ return 0;
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return e;
+}
+
+/* get M prime - READ_M on TMDS0_LINK0 only */
+static inline int get_m_prime(struct tegra_nvhdcp *nvhdcp, struct tegra_nvhdcp_packet *pkt)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int e;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_UNSUCCESSFUL;
+
+ /* if connection isn't authenticated ... */
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state != STATE_LINK_VERIFY) {
+ memset(pkt, 0, sizeof *pkt);
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_LINK_FAILED;
+ e = 0;
+ goto err;
+ }
+
+ pkt->a_ksv = nvhdcp->a_ksv;
+ pkt->a_n = nvhdcp->a_n;
+ pkt->value_flags = TEGRA_NVHDCP_FLAG_AKSV | TEGRA_NVHDCP_FLAG_AN;
+
+ set_cn(hdmi, pkt->c_n);
+
+ tegra_hdmi_writel(hdmi, TMDS0_LINK0 | READ_M,
+ HDMI_NV_PDISP_RG_HDCP_CMODE);
+
+ /* Cksv write triggers Mprime update */
+ set_cksv(hdmi, pkt->c_ksv);
+
+ e = wait_hdcp_ctrl(hdmi, MPRIME_VALID, NULL);
+ if (e) {
+ nvhdcp_err("Mprime read timeout\n");
+ e = -EIO;
+ goto err;
+ }
+ msleep(50);
+
+ /* load Mprime */
+ pkt->m_prime = get_mprime(hdmi);
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_MP;
+
+ pkt->b_status = nvhdcp->b_status;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BSTATUS;
+
+ /* copy most recent KSVFIFO, if it is non-zero */
+ pkt->num_bksv_list = nvhdcp->num_bksv_list;
+ if( nvhdcp->num_bksv_list ) {
+ BUILD_BUG_ON(sizeof(pkt->bksv_list) != sizeof(nvhdcp->bksv_list));
+ memcpy(pkt->bksv_list, nvhdcp->bksv_list,
+ nvhdcp->num_bksv_list * sizeof(*pkt->bksv_list));
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSVLIST;
+ }
+
+ /* copy v_prime */
+ BUILD_BUG_ON(sizeof(pkt->v_prime) != sizeof(nvhdcp->v_prime));
+ memcpy(pkt->v_prime, nvhdcp->v_prime, sizeof(nvhdcp->v_prime));
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_V;
+
+ /* load Dksv */
+ pkt->d_ksv = get_dksv(hdmi);
+ if (verify_ksv(pkt->d_ksv)) {
+ nvhdcp_err("Dksv invalid!\n");
+ e = -EIO;
+ goto err;
+ }
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_DKSV;
+
+ /* copy current Bksv */
+ pkt->b_ksv = nvhdcp->b_ksv;
+ pkt->value_flags |= TEGRA_NVHDCP_FLAG_BKSV;
+
+ pkt->packet_results = TEGRA_NVHDCP_RESULT_SUCCESS;
+ mutex_unlock(&nvhdcp->lock);
+ return 0;
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return e;
+}
+
+static int load_kfuse(struct tegra_dc_hdmi_data *hdmi)
+{
+ unsigned buf[KFUSE_DATA_SZ / 4];
+ int e, i;
+ u32 ctrl;
+ u32 tmp;
+ int retries;
+
+ /* copy load kfuse into buffer - only needed for early Tegra parts */
+ e = tegra_kfuse_read(buf, sizeof buf);
+ if (e) {
+ nvhdcp_err("Kfuse read failure\n");
+ return e;
+ }
+
+ /* write the kfuse to HDMI SRAM */
+
+ tegra_hdmi_writel(hdmi, 1, HDMI_NV_PDISP_KEY_CTRL); /* LOAD_KEYS */
+
+ /* issue a reload */
+ ctrl = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_CTRL);
+ tegra_hdmi_writel(hdmi, ctrl | PKEY_REQUEST_RELOAD_TRIGGER
+ | LOCAL_KEYS , HDMI_NV_PDISP_KEY_CTRL);
+
+ e = wait_key_ctrl(hdmi, PKEY_LOADED);
+ if (e) {
+ nvhdcp_err("key reload timeout\n");
+ return -EIO;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_KEY_SKEY_INDEX);
+
+ /* wait for SRAM to be cleared */
+ retries = 6;
+ do {
+ tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_KEY_DEBUG0);
+ if ((tmp & 1) == 0) break;
+ if (retries > 1)
+ mdelay(1);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("key SRAM clear timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < KFUSE_DATA_SZ / 4; i += 4) {
+
+ /* load 128-bits*/
+ tegra_hdmi_writel(hdmi, buf[i], HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ tegra_hdmi_writel(hdmi, buf[i+1], HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ tegra_hdmi_writel(hdmi, buf[i+2], HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ tegra_hdmi_writel(hdmi, buf[i+3], HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+
+ /* trigger LOAD_HDCP_KEY */
+ tegra_hdmi_writel(hdmi, 0x100, HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+
+ tmp = LOCAL_KEYS | WRITE16;
+ if (i)
+ tmp |= AUTOINC;
+ tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_KEY_CTRL);
+
+ /* wait for WRITE16 to complete */
+ e = wait_key_ctrl(hdmi, 0x10); /* WRITE16 */
+ if (e) {
+ nvhdcp_err("key write timeout\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int verify_link(struct tegra_nvhdcp *nvhdcp, bool wait_ri)
+{
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int retries = 3;
+ u16 old, rx, tx;
+ int e;
+
+ old = 0;
+ rx = 0;
+ tx = 0;
+ /* retry 3 times to deal with I2C link issues */
+ do {
+ if (wait_ri)
+ old = get_transmitter_ri(hdmi);
+
+ e = get_receiver_ri(nvhdcp, &rx);
+ if (!e) {
+ if (!rx) {
+ nvhdcp_err("Ri is 0!\n");
+ return -EINVAL;
+ }
+
+ tx = get_transmitter_ri(hdmi);
+ } else {
+ rx = ~tx;
+ msleep(50);
+ }
+
+ } while (wait_ri && --retries && old != tx);
+
+ nvhdcp_debug("R0 Ri poll:rx=0x%04x tx=0x%04x\n", rx, tx);
+
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("aborting verify links - lost hdmi connection\n");
+ return -EIO;
+ }
+
+ if (rx != tx)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int get_repeater_info(struct tegra_nvhdcp *nvhdcp)
+{
+ int e, retries;
+ u8 b_caps;
+ u16 b_status;
+
+ nvhdcp_vdbg("repeater found:fetching repeater info\n");
+
+ /* wait up to 5 seconds for READY on repeater */
+ retries = 51;
+ do {
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("disconnect while waiting for repeater\n");
+ return -EIO;
+ }
+
+ e = get_bcaps(nvhdcp, &b_caps);
+ if (!e && (b_caps & BCAPS_READY)) {
+ nvhdcp_debug("Bcaps READY from repeater\n");
+ break;
+ }
+ if (retries > 1)
+ msleep(100);
+ } while (--retries);
+ if (!retries) {
+ nvhdcp_err("repeater Bcaps read timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ memset(nvhdcp->v_prime, 0, sizeof nvhdcp->v_prime);
+ e = get_vprime(nvhdcp, nvhdcp->v_prime);
+ if (e) {
+ nvhdcp_err("repeater Vprime read failure!\n");
+ return e;
+ }
+
+ e = nvhdcp_i2c_read16(nvhdcp, 0x41, &b_status);
+ if (e) {
+ nvhdcp_err("Bstatus read failure!\n");
+ return e;
+ }
+
+ if (b_status & BSTATUS_MAX_DEVS_EXCEEDED) {
+ nvhdcp_err("repeater:max devices (0x%04x)\n", b_status);
+ return -EINVAL;
+ }
+
+ if (b_status & BSTATUS_MAX_CASCADE_EXCEEDED) {
+ nvhdcp_err("repeater:max cascade (0x%04x)\n", b_status);
+ return -EINVAL;
+ }
+
+ nvhdcp->b_status = b_status;
+ nvhdcp->num_bksv_list = b_status & 0x7f;
+ nvhdcp_vdbg("Bstatus 0x%x (devices: %d)\n",
+ b_status, nvhdcp->num_bksv_list);
+
+ memset(nvhdcp->bksv_list, 0, sizeof nvhdcp->bksv_list);
+ e = get_ksvfifo(nvhdcp, nvhdcp->num_bksv_list, nvhdcp->bksv_list);
+ if (e) {
+ nvhdcp_err("repeater:could not read KSVFIFO (err %d)\n", e);
+ return e;
+ }
+
+ return 0;
+}
+
+static void nvhdcp_downstream_worker(struct work_struct *work)
+{
+ struct tegra_nvhdcp *nvhdcp =
+ container_of(work, struct tegra_nvhdcp, work);
+ struct tegra_dc_hdmi_data *hdmi = nvhdcp->hdmi;
+ int e;
+ u8 b_caps;
+ u32 tmp;
+ u32 res;
+
+ nvhdcp_vdbg("%s():started thread %s\n", __func__, nvhdcp->name);
+
+ mutex_lock(&nvhdcp->lock);
+ if (nvhdcp->state == STATE_OFF) {
+ nvhdcp_err("nvhdcp failure - giving up\n");
+ goto err;
+ }
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+
+ /* check plug state to terminate early in case flush_workqueue() */
+ if (!nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp_err("worker started while unplugged!\n");
+ goto lost_hdmi;
+ }
+ nvhdcp_vdbg("%s():hpd=%d\n", __func__, nvhdcp->plugged);
+
+ nvhdcp->a_ksv = 0;
+ nvhdcp->b_ksv = 0;
+ nvhdcp->a_n = 0;
+
+ e = get_bcaps(nvhdcp, &b_caps);
+ if (e) {
+ nvhdcp_err("Bcaps read failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("read Bcaps = 0x%02x\n", b_caps);
+
+ nvhdcp_vdbg("kfuse loading ...\n");
+
+ /* repeater flag in Bskv must be configured before loading fuses */
+ set_bksv(hdmi, 0, (b_caps & BCAPS_REPEATER));
+
+ e = load_kfuse(hdmi);
+ if (e) {
+ nvhdcp_err("kfuse could not be loaded\n");
+ goto failure;
+ }
+
+ hdcp_ctrl_run(hdmi, 1);
+
+ nvhdcp_vdbg("wait AN_VALID ...\n");
+
+ /* wait for hardware to generate HDCP values */
+ e = wait_hdcp_ctrl(hdmi, AN_VALID | SROM_ERR, &res);
+ if (e) {
+ nvhdcp_err("An key generation timeout\n");
+ goto failure;
+ }
+ if (res & SROM_ERR) {
+ nvhdcp_err("SROM error\n");
+ goto failure;
+ }
+
+ msleep(25);
+
+ nvhdcp->a_ksv = get_aksv(hdmi);
+ nvhdcp->a_n = get_an(hdmi);
+ nvhdcp_vdbg("Aksv is 0x%016llx\n", nvhdcp->a_ksv);
+ nvhdcp_vdbg("An is 0x%016llx\n", nvhdcp->a_n);
+ if (verify_ksv(nvhdcp->a_ksv)) {
+ nvhdcp_err("Aksv verify failure! (0x%016llx)\n", nvhdcp->a_ksv);
+ goto failure;
+ }
+
+ /* write Ainfo to receiver - set 1.1 only if b_caps supports it */
+ e = nvhdcp_i2c_write8(nvhdcp, 0x15, b_caps & BCAPS_11);
+ if (e) {
+ nvhdcp_err("Ainfo write failure\n");
+ goto failure;
+ }
+
+ /* write An to receiver */
+ e = nvhdcp_i2c_write64(nvhdcp, 0x18, nvhdcp->a_n);
+ if (e) {
+ nvhdcp_err("An write failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("wrote An = 0x%016llx\n", nvhdcp->a_n);
+
+ /* write Aksv to receiver - triggers auth sequence */
+ e = nvhdcp_i2c_write40(nvhdcp, 0x10, nvhdcp->a_ksv);
+ if (e) {
+ nvhdcp_err("Aksv write failure\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("wrote Aksv = 0x%010llx\n", nvhdcp->a_ksv);
+
+ /* bail out if unplugged in the middle of negotiation */
+ if (!nvhdcp_is_plugged(nvhdcp))
+ goto lost_hdmi;
+
+ /* get Bksv from receiver */
+ e = nvhdcp_i2c_read40(nvhdcp, 0x00, &nvhdcp->b_ksv);
+ if (e) {
+ nvhdcp_err("Bksv read failure\n");
+ goto failure;
+ }
+ nvhdcp_vdbg("Bksv is 0x%016llx\n", nvhdcp->b_ksv);
+ if (verify_ksv(nvhdcp->b_ksv)) {
+ nvhdcp_err("Bksv verify failure!\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("read Bksv = 0x%010llx from device\n", nvhdcp->b_ksv);
+
+ set_bksv(hdmi, nvhdcp->b_ksv, (b_caps & BCAPS_REPEATER));
+
+ nvhdcp_vdbg("loaded Bksv into controller\n");
+
+ e = wait_hdcp_ctrl(hdmi, R0_VALID, NULL);
+ if (e) {
+ nvhdcp_err("R0 read failure!\n");
+ goto failure;
+ }
+
+ nvhdcp_vdbg("R0 valid\n");
+
+ msleep(100); /* can't read R0' within 100ms of writing Aksv */
+
+ nvhdcp_vdbg("verifying links ...\n");
+
+ e = verify_link(nvhdcp, false);
+ if (e) {
+ nvhdcp_err("link verification failed err %d\n", e);
+ goto failure;
+ }
+
+ tmp = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_RG_HDCP_CTRL);
+ tmp |= CRYPT_ENABLED;
+ if (b_caps & BCAPS_11) /* HDCP 1.1 ? */
+ tmp |= ONEONE_ENABLED;
+ tegra_hdmi_writel(hdmi, tmp, HDMI_NV_PDISP_RG_HDCP_CTRL);
+
+ nvhdcp_vdbg("CRYPT enabled\n");
+
+ /* if repeater then get repeater info */
+ if (b_caps & BCAPS_REPEATER) {
+ e = get_repeater_info(nvhdcp);
+ if (e) {
+ nvhdcp_err("get repeater info failed\n");
+ goto failure;
+ }
+ }
+
+ nvhdcp->state = STATE_LINK_VERIFY;
+ nvhdcp_info("link verified!\n");
+
+ while (1) {
+ if (!nvhdcp_is_plugged(nvhdcp))
+ goto lost_hdmi;
+
+ if (nvhdcp->state != STATE_LINK_VERIFY)
+ goto failure;
+
+ e = verify_link(nvhdcp, true);
+ if (e) {
+ nvhdcp_err("link verification failed err %d\n", e);
+ goto failure;
+ }
+ mutex_unlock(&nvhdcp->lock);
+ msleep(1500);
+ mutex_lock(&nvhdcp->lock);
+
+ }
+
+failure:
+ nvhdcp->fail_count++;
+ if(nvhdcp->fail_count > 5) {
+ nvhdcp_err("nvhdcp failure - too many failures, giving up!\n");
+ } else {
+ nvhdcp_err("nvhdcp failure - renegotiating in 1.75 seconds\n");
+ mutex_unlock(&nvhdcp->lock);
+ msleep(1750);
+ mutex_lock(&nvhdcp->lock);
+ queue_work(nvhdcp->downstream_wq, &nvhdcp->work);
+ }
+
+lost_hdmi:
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+ hdcp_ctrl_run(hdmi, 0);
+
+err:
+ mutex_unlock(&nvhdcp->lock);
+ return;
+}
+
+static int tegra_nvhdcp_on(struct tegra_nvhdcp *nvhdcp)
+{
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+ if (nvhdcp_is_plugged(nvhdcp)) {
+ nvhdcp->fail_count = 0;
+ queue_work(nvhdcp->downstream_wq, &nvhdcp->work);
+ }
+ return 0;
+}
+
+static int tegra_nvhdcp_off(struct tegra_nvhdcp *nvhdcp)
+{
+ mutex_lock(&nvhdcp->lock);
+ nvhdcp->state = STATE_OFF;
+ nvhdcp_set_plugged(nvhdcp, false);
+ mutex_unlock(&nvhdcp->lock);
+ flush_workqueue(nvhdcp->downstream_wq);
+ return 0;
+}
+
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd)
+{
+ nvhdcp_debug("hdmi hotplug detected (hpd = %d)\n", hpd);
+
+ if (hpd) {
+ nvhdcp_set_plugged(nvhdcp, true);
+ tegra_nvhdcp_on(nvhdcp);
+ } else {
+ tegra_nvhdcp_off(nvhdcp);
+ }
+}
+
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol)
+{
+ if (pol == TEGRA_NVHDCP_POLICY_ALWAYS_ON) {
+ nvhdcp_info("using \"always on\" policy.\n");
+ if (atomic_xchg(&nvhdcp->policy, pol) != pol) {
+ /* policy changed, start working */
+ tegra_nvhdcp_on(nvhdcp);
+ }
+ } else {
+ /* unsupported policy */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_nvhdcp_renegotiate(struct tegra_nvhdcp *nvhdcp)
+{
+ mutex_lock(&nvhdcp->lock);
+ nvhdcp->state = STATE_RENEGOTIATE;
+ mutex_unlock(&nvhdcp->lock);
+ tegra_nvhdcp_on(nvhdcp);
+ return 0;
+}
+
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp)
+{
+ if (!nvhdcp) return;
+ tegra_nvhdcp_off(nvhdcp);
+}
+
+
+static long nvhdcp_dev_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tegra_nvhdcp *nvhdcp = filp->private_data;
+ struct tegra_nvhdcp_packet *pkt;
+ int e = -ENOTTY;
+
+ switch (cmd) {
+ case TEGRAIO_NVHDCP_ON:
+ return tegra_nvhdcp_on(nvhdcp);
+
+ case TEGRAIO_NVHDCP_OFF:
+ return tegra_nvhdcp_off(nvhdcp);
+
+ case TEGRAIO_NVHDCP_SET_POLICY:
+ return tegra_nvhdcp_set_policy(nvhdcp, arg);
+
+ case TEGRAIO_NVHDCP_READ_M:
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ e = get_m_prime(nvhdcp, pkt);
+ if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ kfree(pkt);
+ return e;
+
+ case TEGRAIO_NVHDCP_READ_S:
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ if (copy_from_user(pkt, (void __user *)arg, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ e = get_s_prime(nvhdcp, pkt);
+ if (copy_to_user((void __user *)arg, pkt, sizeof(*pkt))) {
+ e = -EFAULT;
+ goto kfree_pkt;
+ }
+ kfree(pkt);
+ return e;
+
+ case TEGRAIO_NVHDCP_RENEGOTIATE:
+ e = tegra_nvhdcp_renegotiate(nvhdcp);
+ break;
+ }
+
+ return e;
+kfree_pkt:
+ kfree(pkt);
+ return e;
+}
+
+static int nvhdcp_dev_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct tegra_nvhdcp *nvhdcp =
+ container_of(miscdev, struct tegra_nvhdcp, miscdev);
+ filp->private_data = nvhdcp;
+ return 0;
+}
+
+static int nvhdcp_dev_release(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations nvhdcp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = nvhdcp_dev_ioctl,
+ .open = nvhdcp_dev_open,
+ .release = nvhdcp_dev_release,
+};
+
+/* we only support one AP right now, so should only call this once. */
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+ int id, int bus)
+{
+ static struct tegra_nvhdcp *nvhdcp; /* prevent multiple calls */
+ struct i2c_adapter *adapter;
+ int e;
+
+ if (nvhdcp)
+ return ERR_PTR(-EMFILE);
+
+ nvhdcp = kzalloc(sizeof(*nvhdcp), GFP_KERNEL);
+ if (!nvhdcp)
+ return ERR_PTR(-ENOMEM);
+
+ nvhdcp->id = id;
+ snprintf(nvhdcp->name, sizeof(nvhdcp->name), "nvhdcp%u", id);
+ nvhdcp->hdmi = hdmi;
+ mutex_init(&nvhdcp->lock);
+
+ strlcpy(nvhdcp->info.type, nvhdcp->name, sizeof(nvhdcp->info.type));
+ nvhdcp->bus = bus;
+ nvhdcp->info.addr = 0x74 >> 1;
+ nvhdcp->info.platform_data = nvhdcp;
+ nvhdcp->fail_count = 0;
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ nvhdcp_err("can't get adapter for bus %d\n", bus);
+ e = -EBUSY;
+ goto free_nvhdcp;
+ }
+
+ nvhdcp->client = i2c_new_device(adapter, &nvhdcp->info);
+ i2c_put_adapter(adapter);
+
+ if (!nvhdcp->client) {
+ nvhdcp_err("can't create new device\n");
+ e = -EBUSY;
+ goto free_nvhdcp;
+ }
+
+ nvhdcp->state = STATE_UNAUTHENTICATED;
+
+ nvhdcp->downstream_wq = create_singlethread_workqueue(nvhdcp->name);
+ INIT_WORK(&nvhdcp->work, nvhdcp_downstream_worker);
+
+ nvhdcp->miscdev.minor = MISC_DYNAMIC_MINOR;
+ nvhdcp->miscdev.name = nvhdcp->name;
+ nvhdcp->miscdev.fops = &nvhdcp_fops;
+
+ e = misc_register(&nvhdcp->miscdev);
+ if (e)
+ goto free_workqueue;
+
+ nvhdcp_vdbg("%s(): created misc device %s\n", __func__, nvhdcp->name);
+
+ return nvhdcp;
+free_workqueue:
+ destroy_workqueue(nvhdcp->downstream_wq);
+ i2c_release_client(nvhdcp->client);
+free_nvhdcp:
+ kfree(nvhdcp);
+ nvhdcp_err("unable to create device.\n");
+ return ERR_PTR(e);
+}
+
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp)
+{
+ misc_deregister(&nvhdcp->miscdev);
+ tegra_nvhdcp_off(nvhdcp);
+ destroy_workqueue(nvhdcp->downstream_wq);
+ i2c_release_client(nvhdcp->client);
+ kfree(nvhdcp);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/dc/nvhdcp.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#define __DRIVERS_VIDEO_TEGRA_DC_NVHDCP_H
+#include <video/nvhdcp.h>
+
+struct tegra_nvhdcp;
+void tegra_nvhdcp_set_plug(struct tegra_nvhdcp *nvhdcp, bool hpd);
+int tegra_nvhdcp_set_policy(struct tegra_nvhdcp *nvhdcp, int pol);
+void tegra_nvhdcp_suspend(struct tegra_nvhdcp *nvhdcp);
+struct tegra_nvhdcp *tegra_nvhdcp_create(struct tegra_dc_hdmi_data *hdmi,
+ int id, int bus);
+void tegra_nvhdcp_destroy(struct tegra_nvhdcp *nvhdcp);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/dc/rgb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <mach/dc.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+
+static const u32 tegra_dc_rgb_enable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x01000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA1, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA2, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00210222,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00002200,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00020000,
+};
+
+static const u32 tegra_dc_rgb_disable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x55150005,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x55555555,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA1, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA2, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA3, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00000000,
+};
+
+void tegra_dc_rgb_enable(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_write_table(dc, tegra_dc_rgb_enable_pintable);
+}
+
+void tegra_dc_rgb_disable(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x00000000, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_write_table(dc, tegra_dc_rgb_disable_pintable);
+}
+
+struct tegra_dc_out_ops tegra_dc_rgb_ops = {
+ .enable = tegra_dc_rgb_enable,
+ .disable = tegra_dc_rgb_disable,
+};
+
--- /dev/null
+/*
+ * drivers/video/tegra/fb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * Colin Cross <ccross@android.com>
+ * Travis Geiselbrecht <travis@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+
+#include <video/tegrafb.h>
+
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "host/dev.h"
+#include "nvmap/nvmap.h"
+
+struct tegra_fb_info {
+ struct tegra_dc_win *win;
+ struct nvhost_device *ndev;
+ struct fb_info *info;
+ bool valid;
+
+ struct resource *fb_mem;
+
+ int xres;
+ int yres;
+
+ atomic_t in_use;
+ struct nvmap_client *user_nvmap;
+ struct nvmap_client *fb_nvmap;
+
+ struct workqueue_struct *flip_wq;
+};
+
+struct tegra_fb_flip_win {
+ struct tegra_fb_windowattr attr;
+ struct nvmap_handle_ref *handle;
+ dma_addr_t phys_addr;
+};
+
+struct tegra_fb_flip_data {
+ struct work_struct work;
+ struct tegra_fb_info *fb;
+ struct tegra_fb_flip_win win[TEGRA_FB_FLIP_N_WINDOWS];
+ u32 syncpt_max;
+};
+
+/* palette array used by the fbcon */
+static u32 pseudo_palette[16];
+
+static int tegra_fb_open(struct fb_info *info, int user)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ if (atomic_xchg(&tegra_fb->in_use, 1))
+ return -EBUSY;
+
+ tegra_fb->user_nvmap = NULL;
+
+ return 0;
+}
+
+static int tegra_fb_release(struct fb_info *info, int user)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+
+ flush_workqueue(tegra_fb->flip_wq);
+
+ if (tegra_fb->win->cur_handle) {
+ nvmap_unpin(tegra_fb->fb_nvmap, tegra_fb->win->cur_handle);
+ nvmap_free(tegra_fb->fb_nvmap, tegra_fb->win->cur_handle);
+
+ tegra_fb->win->cur_handle = NULL;
+
+ tegra_fb->win->x = 0;
+ tegra_fb->win->y = 0;
+ tegra_fb->win->w = var->xres;
+ tegra_fb->win->h = var->yres;
+ tegra_fb->win->out_x = 0;
+ tegra_fb->win->out_y = 0;
+ tegra_fb->win->out_w = var->xres;
+ tegra_fb->win->out_h = var->yres;
+ tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED;
+ }
+
+ if (tegra_fb->user_nvmap) {
+ nvmap_client_put(tegra_fb->user_nvmap);
+ tegra_fb->user_nvmap = NULL;
+ }
+
+ WARN_ON(!atomic_xchg(&tegra_fb->in_use, 0));
+
+ return 0;
+}
+
+static int tegra_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) >
+ info->screen_size)
+ return -EINVAL;
+
+ /* double yres_virtual to allow double buffering through pan_display */
+ var->yres_virtual = var->yres * 2;
+
+ return 0;
+}
+
+static int tegra_fb_set_par(struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+
+ if (var->bits_per_pixel) {
+ /* we only support RGB ordering for now */
+ switch (var->bits_per_pixel) {
+ case 32:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ tegra_fb->win->stride = info->fix.line_length;
+ tegra_fb->win->stride_uv = 0;
+ tegra_fb->win->offset_u = 0;
+ tegra_fb->win->offset_v = 0;
+ }
+
+ if (var->pixclock) {
+ struct tegra_dc_mode mode;
+
+ info->mode = (struct fb_videomode *)
+ fb_find_best_mode(var, &info->modelist);
+ if (!info->mode) {
+ dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n");
+ return -EINVAL;
+ }
+
+ mode.pclk = PICOS2KHZ(info->mode->pixclock) * 1000;
+ mode.h_ref_to_sync = 1;
+ mode.v_ref_to_sync = 1;
+ mode.h_sync_width = info->mode->hsync_len;
+ mode.v_sync_width = info->mode->vsync_len;
+ mode.h_back_porch = info->mode->left_margin;
+ mode.v_back_porch = info->mode->upper_margin;
+ mode.h_active = info->mode->xres;
+ mode.v_active = info->mode->yres;
+ mode.h_front_porch = info->mode->right_margin;
+ mode.v_front_porch = info->mode->lower_margin;
+
+ mode.flags = 0;
+
+ if (!(info->mode->sync & FB_SYNC_HOR_HIGH_ACT))
+ mode.flags |= TEGRA_DC_MODE_FLAG_NEG_H_SYNC;
+
+ if (!(info->mode->sync & FB_SYNC_VERT_HIGH_ACT))
+ mode.flags |= TEGRA_DC_MODE_FLAG_NEG_V_SYNC;
+
+ tegra_dc_set_mode(tegra_fb->win->dc, &mode);
+
+ tegra_fb->win->w = info->mode->xres;
+ tegra_fb->win->h = info->mode->yres;
+ tegra_fb->win->out_w = info->mode->xres;
+ tegra_fb->win->out_h = info->mode->yres;
+ }
+ return 0;
+}
+
+static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ red = (red >> (16 - info->var.red.length));
+ green = (green >> (16 - info->var.green.length));
+ blue = (blue >> (16 - info->var.blue.length));
+
+ v = (red << var->red.offset) |
+ (green << var->green.offset) |
+ (blue << var->blue.offset);
+
+ ((u32 *)info->pseudo_palette)[regno] = v;
+ }
+
+ return 0;
+}
+
+static int tegra_fb_blank(int blank, struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ dev_dbg(&tegra_fb->ndev->dev, "unblank\n");
+ tegra_dc_enable(tegra_fb->win->dc);
+ return 0;
+
+ case FB_BLANK_POWERDOWN:
+ dev_dbg(&tegra_fb->ndev->dev, "blank\n");
+ flush_workqueue(tegra_fb->flip_wq);
+ tegra_dc_disable(tegra_fb->win->dc);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+void tegra_fb_suspend(struct tegra_fb_info *tegra_fb)
+{
+ flush_workqueue(tegra_fb->flip_wq);
+}
+
+
+static int tegra_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ char __iomem *flush_start;
+ char __iomem *flush_end;
+ u32 addr;
+
+ if (!tegra_fb->win->cur_handle) {
+ flush_start = info->screen_base + (var->yoffset * info->fix.line_length);
+ flush_end = flush_start + (var->yres * info->fix.line_length);
+
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+
+ addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) +
+ (var->xoffset * (var->bits_per_pixel/8));
+
+ tegra_fb->win->phys_addr = addr;
+ /* TODO: update virt_addr */
+
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+ }
+
+ return 0;
+}
+
+static void tegra_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+}
+
+static void tegra_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+}
+
+static void tegra_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+}
+
+/* TODO: implement ALLOC, FREE, BLANK ioctls */
+
+static int tegra_fb_set_nvmap_fd(struct tegra_fb_info *tegra_fb, int fd)
+{
+ struct nvmap_client *nvmap = NULL;
+
+ if (fd < 0)
+ return -EINVAL;
+
+ nvmap = nvmap_client_get_file(fd);
+ if (IS_ERR(nvmap))
+ return PTR_ERR(nvmap);
+
+ if (tegra_fb->user_nvmap)
+ nvmap_client_put(tegra_fb->user_nvmap);
+
+ tegra_fb->user_nvmap = nvmap;
+
+ return 0;
+}
+
+static int tegra_fb_pin_window(struct tegra_fb_info *tegra_fb,
+ struct tegra_fb_flip_win *flip_win)
+{
+ struct nvmap_handle_ref *win_dupe;
+ struct nvmap_handle *win_handle;
+ unsigned long buff_id = flip_win->attr.buff_id;
+
+ if (!buff_id)
+ return 0;
+
+ win_handle = nvmap_get_handle_id(tegra_fb->user_nvmap, buff_id);
+ if (win_handle == NULL) {
+ dev_err(&tegra_fb->ndev->dev, "%s: flip invalid "
+ "handle %08lx\n", current->comm, buff_id);
+ return -EPERM;
+ }
+
+ /* duplicate the new framebuffer's handle into the fb driver's
+ * nvmap context, to ensure that the handle won't be freed as
+ * long as it is in-use by the fb driver */
+ win_dupe = nvmap_duplicate_handle_id(tegra_fb->fb_nvmap, buff_id);
+ nvmap_handle_put(win_handle);
+
+ if (IS_ERR(win_dupe)) {
+ dev_err(&tegra_fb->ndev->dev, "couldn't duplicate handle\n");
+ return PTR_ERR(win_dupe);
+ }
+
+ flip_win->handle = win_dupe;
+
+ flip_win->phys_addr = nvmap_pin(tegra_fb->fb_nvmap, win_dupe);
+ if (IS_ERR((void *)flip_win->phys_addr)) {
+ dev_err(&tegra_fb->ndev->dev, "couldn't pin handle\n");
+ nvmap_free(tegra_fb->fb_nvmap, win_dupe);
+ return PTR_ERR((void *)flip_win->phys_addr);
+ }
+
+ return 0;
+}
+
+static int tegra_fb_set_windowattr(struct tegra_fb_info *tegra_fb,
+ struct tegra_dc_win *win,
+ const struct tegra_fb_flip_win *flip_win)
+{
+ if (flip_win->handle == NULL) {
+ win->flags = 0;
+ win->cur_handle = NULL;
+ return 0;
+ }
+
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+ if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_PREMULT)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+ else if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_COVERAGE)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_H)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_H;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_INVERT_V)
+ win->flags |= TEGRA_WIN_FLAG_INVERT_V;
+ if (flip_win->attr.flags & TEGRA_FB_WIN_FLAG_TILED)
+ win->flags |= TEGRA_WIN_FLAG_TILED;
+
+ win->fmt = flip_win->attr.pixformat;
+ win->x = flip_win->attr.x;
+ win->y = flip_win->attr.y;
+ win->w = flip_win->attr.w;
+ win->h = flip_win->attr.h;
+ win->out_x = flip_win->attr.out_x;
+ win->out_y = flip_win->attr.out_y;
+ win->out_w = flip_win->attr.out_w;
+ win->out_h = flip_win->attr.out_h;
+ win->z = flip_win->attr.z;
+ win->cur_handle = flip_win->handle;
+
+ /* STOPSHIP verify that this won't read outside of the surface */
+ win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+ win->offset_u = flip_win->attr.offset_u + flip_win->attr.offset;
+ win->offset_v = flip_win->attr.offset_v + flip_win->attr.offset;
+ win->stride = flip_win->attr.stride;
+ win->stride_uv = flip_win->attr.stride_uv;
+
+ if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+ nvhost_syncpt_wait_timeout(&tegra_fb->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500));
+ }
+
+
+ return 0;
+}
+
+static void tegra_fb_flip_worker(struct work_struct *work)
+{
+ struct tegra_fb_flip_data *data =
+ container_of(work, struct tegra_fb_flip_data, work);
+ struct tegra_fb_info *tegra_fb = data->fb;
+ struct tegra_dc_win *win;
+ struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
+ struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+ int i, nr_win = 0, nr_unpin = 0;
+
+ data = container_of(work, struct tegra_fb_flip_data, work);
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ struct tegra_fb_flip_win *flip_win = &data->win[i];
+ int idx = flip_win->attr.index;
+ win = tegra_dc_get_window(tegra_fb->win->dc, idx);
+
+ if (!win)
+ continue;
+
+ if (win->flags && win->cur_handle)
+ unpin_handles[nr_unpin++] = win->cur_handle;
+
+ tegra_fb_set_windowattr(tegra_fb, win, &data->win[i]);
+
+ wins[nr_win++] = win;
+
+#if 0
+ if (flip_win->attr.pre_syncpt_id < 0)
+ continue;
+ printk("%08x %08x\n",
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val);
+
+ nvhost_syncpt_wait_timeout(&tegra_fb->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500));
+#endif
+ }
+
+ tegra_dc_update_windows(wins, nr_win);
+ /* TODO: implement swapinterval here */
+ tegra_dc_sync_windows(wins, nr_win);
+
+ tegra_dc_incr_syncpt_min(tegra_fb->win->dc, data->syncpt_max);
+
+ /* unpin and deref previous front buffers */
+ for (i = 0; i < nr_unpin; i++) {
+ nvmap_unpin(tegra_fb->fb_nvmap, unpin_handles[i]);
+ nvmap_free(tegra_fb->fb_nvmap, unpin_handles[i]);
+ }
+
+ kfree(data);
+}
+
+static int tegra_fb_flip(struct tegra_fb_info *tegra_fb,
+ struct tegra_fb_flip_args *args)
+{
+ struct tegra_fb_flip_data *data;
+ struct tegra_fb_flip_win *flip_win;
+ u32 syncpt_max;
+ int i, err;
+
+ if (WARN_ON(!tegra_fb->user_nvmap))
+ return -EFAULT;
+
+ if (WARN_ON(!tegra_fb->ndev))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&tegra_fb->ndev->dev,
+ "can't allocate memory for flip\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&data->work, tegra_fb_flip_worker);
+ data->fb = tegra_fb;
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ flip_win = &data->win[i];
+
+ memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+
+ err = tegra_fb_pin_window(tegra_fb, flip_win);
+ if (err < 0) {
+ dev_err(&tegra_fb->ndev->dev,
+ "error setting window attributes\n");
+ goto surf_err;
+ }
+ }
+
+ syncpt_max = tegra_dc_incr_syncpt_max(tegra_fb->win->dc);
+ data->syncpt_max = syncpt_max;
+
+ queue_work(tegra_fb->flip_wq, &data->work);
+
+ args->post_syncpt_val = syncpt_max;
+ args->post_syncpt_id = tegra_dc_get_syncpt_id(tegra_fb->win->dc);
+
+ return 0;
+
+surf_err:
+ while (i--) {
+ if (data->win[i].handle) {
+ nvmap_unpin(tegra_fb->fb_nvmap,
+ data->win[i].handle);
+ nvmap_free(tegra_fb->fb_nvmap,
+ data->win[i].handle);
+ }
+ }
+ kfree(data);
+ return err;
+}
+
+/* TODO: implement private window ioctls to set overlay x,y */
+
+static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct tegra_fb_flip_args flip_args;
+ struct tegra_fb_modedb modedb;
+ struct fb_modelist *modelist;
+ int i;
+ int fd;
+ int ret;
+
+ switch (cmd) {
+ case FBIO_TEGRA_SET_NVMAP_FD:
+ if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
+ return -EFAULT;
+
+ return tegra_fb_set_nvmap_fd(tegra_fb, fd);
+
+ case FBIO_TEGRA_FLIP:
+ if (copy_from_user(&flip_args, (void __user *)arg, sizeof(flip_args)))
+ return -EFAULT;
+
+ ret = tegra_fb_flip(tegra_fb, &flip_args);
+
+ if (copy_to_user((void __user *)arg, &flip_args, sizeof(flip_args)))
+ return -EFAULT;
+
+ return ret;
+
+ case FBIO_TEGRA_GET_MODEDB:
+ if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb)))
+ return -EFAULT;
+
+ i = 0;
+ list_for_each_entry(modelist, &info->modelist, list) {
+ struct fb_var_screeninfo var;
+
+ if (i >= modedb.modedb_len)
+ break;
+ memset(&var, 0x0, sizeof(var));
+ fb_videomode_to_var(&var, &modelist->mode);
+
+ if (copy_to_user((void __user *)&modedb.modedb[i],
+ &var, sizeof(var)))
+ return -EFAULT;
+ i++;
+ }
+ modedb.modedb_len = i;
+
+ if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops tegra_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = tegra_fb_open,
+ .fb_release = tegra_fb_release,
+ .fb_check_var = tegra_fb_check_var,
+ .fb_set_par = tegra_fb_set_par,
+ .fb_setcolreg = tegra_fb_setcolreg,
+ .fb_blank = tegra_fb_blank,
+ .fb_pan_display = tegra_fb_pan_display,
+ .fb_fillrect = tegra_fb_fillrect,
+ .fb_copyarea = tegra_fb_copyarea,
+ .fb_imageblit = tegra_fb_imageblit,
+ .fb_ioctl = tegra_fb_ioctl,
+};
+
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode))
+{
+ struct fb_event event;
+ struct fb_modelist *m;
+ int i;
+
+ mutex_lock(&fb_info->info->lock);
+ fb_destroy_modedb(fb_info->info->monspecs.modedb);
+
+ fb_destroy_modelist(&fb_info->info->modelist);
+
+ if (specs == NULL) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->monspecs, 0x0,
+ sizeof(fb_info->info->monspecs));
+ memset(&mode, 0x0, sizeof(mode));
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ mutex_unlock(&fb_info->info->lock);
+ return;
+ }
+
+ memcpy(&fb_info->info->monspecs, specs,
+ sizeof(fb_info->info->monspecs));
+
+ for (i = 0; i < specs->modedb_len; i++) {
+ if (mode_filter) {
+ if (mode_filter(&specs->modedb[i]))
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ } else {
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ }
+ }
+
+ if (list_empty(&fb_info->info->modelist)) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->var, 0x0, sizeof(fb_info->info->var));
+ memset(&mode, 0x0, sizeof(mode));
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ } else {
+ /* in case the first mode was not matched */
+ m = list_first_entry(&fb_info->info->modelist, struct fb_modelist, list);
+ m->mode.flag |= FB_MODE_IS_FIRST;
+ fb_info->info->mode = (struct fb_videomode *)
+ fb_find_best_display(specs, &fb_info->info->modelist);
+
+ memset(&fb_info->info->var, 0x0,
+ sizeof(fb_info->info->var));
+ fb_videomode_to_var(&fb_info->info->var, fb_info->info->mode);
+ tegra_fb_set_par(fb_info->info);
+ }
+
+ event.info = fb_info->info;
+ fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+ mutex_unlock(&fb_info->info->lock);
+}
+
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem)
+{
+ struct tegra_dc_win *win;
+ struct fb_info *info;
+ struct tegra_fb_info *tegra_fb;
+ void __iomem *fb_base = NULL;
+ unsigned long fb_size = 0;
+ unsigned long fb_phys = 0;
+ int ret = 0;
+
+ win = tegra_dc_get_window(dc, fb_data->win);
+ if (!win) {
+ dev_err(&ndev->dev, "dc does not have a window at index %d\n",
+ fb_data->win);
+ return ERR_PTR(-ENOENT);
+ }
+
+ info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tegra_fb = info->par;
+ tegra_fb->win = win;
+ tegra_fb->ndev = ndev;
+ tegra_fb->fb_mem = fb_mem;
+ tegra_fb->xres = fb_data->xres;
+ tegra_fb->yres = fb_data->yres;
+ tegra_fb->fb_nvmap = nvmap_create_client(nvmap_dev, "tegra-fb");
+ if (!tegra_fb->fb_nvmap) {
+ dev_err(&ndev->dev, "couldn't create nvmap client\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ atomic_set(&tegra_fb->in_use, 0);
+
+ tegra_fb->flip_wq = create_singlethread_workqueue(dev_name(&ndev->dev));
+ if (!tegra_fb->flip_wq) {
+ dev_err(&ndev->dev, "couldn't create flip work-queue\n");
+ ret = -ENOMEM;
+ goto err_delete_wq;
+ }
+
+ if (fb_mem) {
+ fb_size = resource_size(fb_mem);
+ fb_phys = fb_mem->start;
+ fb_base = ioremap_nocache(fb_phys, fb_size);
+ if (!fb_base) {
+ dev_err(&ndev->dev, "fb can't be mapped\n");
+ ret = -EBUSY;
+ goto err_put_client;
+ }
+ tegra_fb->valid = true;
+ }
+
+ info->fbops = &tegra_fb_ops;
+ info->pseudo_palette = pseudo_palette;
+ info->screen_base = fb_base;
+ info->screen_size = fb_size;
+
+ strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id));
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_start = fb_phys;
+ info->fix.smem_len = fb_size;
+
+ info->var.xres = fb_data->xres;
+ info->var.yres = fb_data->yres;
+ info->var.xres_virtual = fb_data->xres;
+ info->var.yres_virtual = fb_data->yres * 2;
+ info->var.bits_per_pixel = fb_data->bits_per_pixel;
+ info->var.activate = FB_ACTIVATE_VBL;
+ info->var.height = tegra_dc_get_out_height(dc);
+ info->var.width = tegra_dc_get_out_width(dc);
+ info->var.pixclock = 0;
+ info->var.left_margin = 0;
+ info->var.right_margin = 0;
+ info->var.upper_margin = 0;
+ info->var.lower_margin = 0;
+ info->var.hsync_len = 0;
+ info->var.vsync_len = 0;
+ info->var.vmode = FB_VMODE_NONINTERLACED;
+
+ win->x = 0;
+ win->y = 0;
+ win->w = fb_data->xres;
+ win->h = fb_data->yres;
+ /* TODO: set to output res dc */
+ win->out_x = 0;
+ win->out_y = 0;
+ win->out_w = fb_data->xres;
+ win->out_h = fb_data->yres;
+ win->z = 0;
+ win->phys_addr = fb_phys;
+ win->virt_addr = fb_base;
+ win->offset_u = 0;
+ win->offset_v = 0;
+ win->stride = fb_data->xres * fb_data->bits_per_pixel / 8;
+ win->stride_uv = 0;
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+
+ if (fb_mem)
+ tegra_fb_set_par(info);
+
+ if (register_framebuffer(info)) {
+ dev_err(&ndev->dev, "failed to register framebuffer\n");
+ ret = -ENODEV;
+ goto err_iounmap_fb;
+ }
+
+ tegra_fb->info = info;
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) {
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+ }
+
+ return tegra_fb;
+
+err_iounmap_fb:
+ iounmap(fb_base);
+err_put_client:
+ nvmap_client_put(tegra_fb->fb_nvmap);
+err_delete_wq:
+ destroy_workqueue(tegra_fb->flip_wq);
+err_free:
+ framebuffer_release(info);
+err:
+ return ERR_PTR(ret);
+}
+
+void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+ struct fb_info *info = fb_info->info;
+
+ if (fb_info->win->cur_handle) {
+ nvmap_unpin(fb_info->fb_nvmap, fb_info->win->cur_handle);
+ nvmap_free(fb_info->fb_nvmap, fb_info->win->cur_handle);
+ }
+
+ if (fb_info->fb_nvmap)
+ nvmap_client_put(fb_info->fb_nvmap);
+
+ unregister_framebuffer(info);
+
+ flush_workqueue(fb_info->flip_wq);
+ destroy_workqueue(fb_info->flip_wq);
+
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+}
--- /dev/null
+nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_cpuaccess.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_3dctx.o \
+ dev.o \
+ bus.o \
+ debug.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
--- /dev/null
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * based heavily on drivers/base/platform.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <mach/nvhost.h>
+
+#include "dev.h"
+
+struct nvhost_master *nvhost;
+struct device nvhost_bus = {
+ .init_name = "nvhost",
+};
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+ unsigned int type, unsigned int num)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+ struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && !strcmp(r->name, name))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+ struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+ name);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ dev->host = nvhost;
+
+ return drv->probe(dev);
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+ drv->driver.bus = &nvhost_bus_type;
+ if (drv->probe)
+ drv->driver.probe = nvhost_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = nvhost_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = nvhost_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+ int i, ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ device_initialize(&dev->dev);
+
+ if (!dev->dev.parent)
+ dev->dev.parent = &nvhost_bus;
+
+ dev->dev.bus = &nvhost_bus_type;
+
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, "%s", dev->name);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *p, *r = &dev->resource[i];
+
+ if (r->name == NULL)
+ r->name = dev_name(&dev->dev);
+
+ p = r->parent;
+ if (!p) {
+ if (resource_type(r) == IORESOURCE_MEM)
+ p = &iomem_resource;
+ else if (resource_type(r) == IORESOURCE_IO)
+ p = &ioport_resource;
+ }
+
+ if (p && insert_resource(p, r)) {
+ pr_err("%s: failed to claim resource %d\n",
+ dev_name(&dev->dev), i);
+ ret = -EBUSY;
+ goto failed;
+ }
+ }
+
+ ret = device_add(&dev->dev);
+ if (ret == 0)
+ return ret;
+
+failed:
+ while (--i >= 0) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+ int i;
+ if (dev) {
+ device_del(&dev->dev);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ put_device(&dev->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ pr_info("host1x: %s %s\n", dev->name, drv->name);
+ return !strncmp(dev->name, drv->name, strlen(drv->name));
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
+
+ return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
+
+ return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare NULL
+#define nvhost_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend NULL
+#define nvhost_pm_resume NULL
+#define nvhost_pm_suspend_noirq NULL
+#define nvhost_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze NULL
+#define nvhost_pm_thaw NULL
+#define nvhost_pm_poweroff NULL
+#define nvhost_pm_restore NULL
+#define nvhost_pm_freeze_noirq NULL
+#define nvhost_pm_thaw_noirq NULL
+#define nvhost_pm_poweroff_noirq NULL
+#define nvhost_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+};
+
+int __weak nvhost_pm_runtime_idle(struct device *dev)
+{
+ return pm_generic_runtime_idle(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+#define nvhost_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+ .prepare = nvhost_pm_prepare,
+ .complete = nvhost_pm_complete,
+ .suspend = nvhost_pm_suspend,
+ .resume = nvhost_pm_resume,
+ .freeze = nvhost_pm_freeze,
+ .thaw = nvhost_pm_thaw,
+ .poweroff = nvhost_pm_poweroff,
+ .restore = nvhost_pm_restore,
+ .suspend_noirq = nvhost_pm_suspend_noirq,
+ .resume_noirq = nvhost_pm_resume_noirq,
+ .freeze_noirq = nvhost_pm_freeze_noirq,
+ .thaw_noirq = nvhost_pm_thaw_noirq,
+ .poweroff_noirq = nvhost_pm_poweroff_noirq,
+ .restore_noirq = nvhost_pm_restore_noirq,
+ .runtime_suspend = nvhost_pm_runtime_suspend,
+ .runtime_resume = nvhost_pm_runtime_resume,
+ .runtime_idle = nvhost_pm_runtime_idle,
+};
+
+struct bus_type nvhost_bus_type = {
+ .name = "nvhost",
+ .match = nvhost_bus_match,
+ .pm = &nvhost_dev_pm_ops,
+};
+EXPORT_SYMBOL(nvhost_bus_type);
+
+int nvhost_bus_register(struct nvhost_master *host)
+{
+ nvhost = host;
+
+ return 0;
+}
+
+
+int nvhost_bus_init(void)
+{
+ int err;
+
+ pr_info("host1x bus init\n");
+ err = device_register(&nvhost_bus);
+ if (err)
+ return err;
+
+ err = bus_register(&nvhost_bus_type);
+ if (err)
+ device_unregister(&nvhost_bus);
+
+ return err;
+}
+postcore_initcall(nvhost_bus_init);
+
--- /dev/null
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+
+#include "dev.h"
+
+static struct nvhost_master *debug_master;
+
+enum {
+ NVHOST_DBG_STATE_CMD = 0,
+ NVHOST_DBG_STATE_DATA = 1,
+};
+
+static int nvhost_debug_handle_cmd(struct seq_file *s, u32 val, int *count)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case 0x0:
+ mask = val & 0x3f;
+ if (mask) {
+ seq_printf(s, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+ *count = hweight8(mask);
+ return NVHOST_DBG_STATE_DATA;
+ } else {
+ seq_printf(s, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return NVHOST_DBG_STATE_CMD;
+ }
+
+ case 0x1:
+ seq_printf(s, "INCR(offset=%03x, [", val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x2:
+ seq_printf(s, "NONINCR(offset=%03x, [", val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x3:
+ mask = val & 0xffff;
+ seq_printf(s, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0xfff, mask);
+ *count = hweight16(mask);
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x4:
+ seq_printf(s, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0x3ff, val & 0xffff);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x5:
+ seq_printf(s, "RESTART(offset=%08x)\n", val << 4);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x6:
+ seq_printf(s, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0x3ff, val >> 15 & 0x1, val >> 15 & 0x1,
+ val & 0x3fff);
+ *count = 1;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0xe:
+ subop = val >> 24 & 0xf;
+ if (subop == 0)
+ seq_printf(s, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff);
+ else if (subop == 1)
+ seq_printf(s, "RELEASE_MLOCK(index=%d)\n", val & 0xff);
+ else
+ seq_printf(s, "EXTEND_UNKNOWN(%08x)\n", val);
+
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0xf:
+ seq_printf(s, "DONE()\n");
+ return NVHOST_DBG_STATE_CMD;
+
+ default:
+ return NVHOST_DBG_STATE_CMD;
+ }
+}
+
+static void nvhost_debug_handle_word(struct seq_file *s, int *state, int *count,
+ unsigned long addr, int channel, u32 val)
+{
+ switch (*state) {
+ case NVHOST_DBG_STATE_CMD:
+ if (addr)
+ seq_printf(s, "%d: %08lx: %08x:", channel, addr, val);
+ else
+ seq_printf(s, "%d: %08x:", channel, val);
+
+ *state = nvhost_debug_handle_cmd(s, val, count);
+ if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
+ *state = NVHOST_DBG_STATE_CMD;
+ seq_printf(s, "])\n");
+ }
+ break;
+
+ case NVHOST_DBG_STATE_DATA:
+ (*count)--;
+ seq_printf(s, "%08x%s", val, *count > 0 ? ", " : "])\n");
+ if (*count == 0)
+ *state = NVHOST_DBG_STATE_CMD;
+ break;
+ }
+}
+
+static void nvhost_sync_reg_dump(struct seq_file *s)
+{
+ struct nvhost_master *m = s->private;
+ int i;
+
+ /* print HOST1X_SYNC regs 4 per line (from 0x3000 -> 0x31E0) */
+ for (i = 0; i <= 0x1E0; i += 4) {
+ if ((i & 0xF) == 0x0)
+ seq_printf(s, "\n0x%08x : ", i);
+ seq_printf(s, "%08x ", readl(m->sync_aperture + i));
+ }
+
+ seq_printf(s, "\n\n");
+
+ /* print HOST1X_SYNC regs 4 per line (from 0x3340 -> 0x3774) */
+ for (i = 0x340; i <= 0x774; i += 4) {
+ if ((i & 0xF) == 0x0)
+ seq_printf(s, "\n0x%08x : ", i);
+ seq_printf(s, "%08x ", readl(m->sync_aperture + i));
+ }
+}
+
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+ struct nvhost_master *m = s->private;
+ int i;
+
+ nvhost_module_busy(&m->mod);
+
+ seq_printf(s, "---- mlocks ----\n");
+ for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
+ u32 owner = readl(m->sync_aperture + HOST1X_SYNC_MLOCK_OWNER_0 + i * 4);
+ if (owner & 0x1)
+ seq_printf(s, "%d: locked by channel %d\n", i, (owner >> 8) * 0xff);
+ else if (owner & 0x2)
+ seq_printf(s, "%d: locked by cpu\n", i);
+ else
+ seq_printf(s, "%d: unlocked\n", i);
+ }
+ seq_printf(s, "\n---- syncpts ----\n");
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
+ if (!max)
+ continue;
+ seq_printf(s, "id %d (%s) min %d max %d\n",
+ i, nvhost_syncpt_name(i),
+ nvhost_syncpt_update_min(&m->syncpt, i), max);
+
+ }
+
+ seq_printf(s, "\n---- channels ----\n");
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ void __iomem *regs = m->channels[i].aperture;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 fifostat;
+ u32 val, base, offset;
+ unsigned start, end;
+ unsigned wr_ptr, rd_ptr;
+ int state;
+ int count;
+ u32 phys_addr, size;
+
+ dmaput = readl(regs + HOST1X_CHANNEL_DMAPUT);
+ dmaget = readl(regs + HOST1X_CHANNEL_DMAGET);
+ dmactrl = readl(regs + HOST1X_CHANNEL_DMACTRL);
+ cbread = readl(m->aperture + HOST1X_SYNC_CBREAD(i));
+ cbstat = readl(m->aperture + HOST1X_SYNC_CBSTAT(i));
+
+ seq_printf(s, "%d-%s (%d): ", i, m->channels[i].mod.name,
+ atomic_read(&m->channels[i].mod.refcount));
+
+ if (dmactrl != 0x0 || !m->channels[i].cdma.push_buffer.mapped) {
+ seq_printf(s, "inactive\n\n");
+ continue;
+ }
+
+ switch (cbstat) {
+ case 0x00010008: /* HOST_WAIT_SYNCPT */
+ seq_printf(s, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ break;
+
+ case 0x00010009: /* HOST_WAIT_SYNCPT_BASE */
+ base = cbread >> 15 & 0xf;
+ offset = cbread & 0xffff;
+
+ val = readl(m->aperture + HOST1X_SYNC_SYNCPT_BASE(base)) & 0xffff;
+ val += offset;
+
+ seq_printf(s, "waiting on syncpt %d val %d (base %d, offset %d)\n",
+ cbread >> 24, val, base, offset);
+ break;
+
+ default:
+ seq_printf(s, "active class %02x, offset %04x, val %08x\n",
+ cbstat >> 16, cbstat & 0xffff, cbread);
+ break;
+ }
+
+ nvhost_cdma_find_gather(&m->channels[i].cdma, dmaget, &phys_addr, &size);
+
+ /* If dmaget is in the pushbuffer (should always be?),
+ * check if we're executing a fetch, and if so dump
+ * it. */
+ if (size) {
+ u32 map_base = phys_addr & PAGE_MASK;
+ u32 map_size = (size * 4 + PAGE_SIZE - 1) & PAGE_MASK;
+ u32 map_offset = phys_addr - map_base;
+ void *map_addr = ioremap_nocache(map_base, map_size);
+
+ if (map_addr) {
+ u32 ii;
+
+ seq_printf(s, "\n%d: gather (%d words)\n", i, size);
+ state = NVHOST_DBG_STATE_CMD;
+ for (ii = 0; ii < size; ii++) {
+ val = readl(map_addr + map_offset + ii*sizeof(u32));
+ nvhost_debug_handle_word(s, &state, &count, phys_addr + ii, i, val);
+ }
+ iounmap(map_addr);
+ }
+ }
+
+ fifostat = readl(regs + HOST1X_CHANNEL_FIFOSTAT);
+ if ((fifostat & 1 << 10) == 0 ) {
+
+ seq_printf(s, "\n%d: fifo:\n", i);
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ rd_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) & 0x1ff;
+ wr_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) >> 16 & 0x1ff;
+
+ start = readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) & 0x1ff;
+ end = (readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) >> 16) & 0x1ff;
+
+ state = NVHOST_DBG_STATE_CMD;
+
+ do {
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16 | rd_ptr, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ val = readl(m->aperture + HOST1X_SYNC_CFPEEK_READ);
+
+ nvhost_debug_handle_word(s, &state, &count, 0, i, val);
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+
+
+ } while (rd_ptr != wr_ptr);
+
+ if (state == NVHOST_DBG_STATE_DATA)
+ seq_printf(s, ", ...])\n");
+ }
+
+ seq_printf(s, "\n");
+ }
+
+ nvhost_sync_reg_dump(s);
+
+ nvhost_module_idle(&m->mod);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+ .open = nvhost_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ debug_master = master;
+ debugfs_create_file("tegra_host", S_IRUGO, NULL, master, &nvhost_debug_fops);
+}
+#else
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ debug_master = master;
+}
+
+#endif
+
+static char nvhost_debug_dump_buff[16 * 1024];
+
+void nvhost_debug_dump(void)
+{
+ struct seq_file s;
+ int i;
+ char c;
+
+ memset(&s, 0x0, sizeof(s));
+
+ s.buf = nvhost_debug_dump_buff;
+ s.size = sizeof(nvhost_debug_dump_buff);
+ s.private = debug_master;
+
+ nvhost_debug_show(&s, NULL);
+
+ i = 0;
+ while (i < s.count ) {
+ if ((s.count - i) > 256) {
+ c = s.buf[i + 256];
+ s.buf[i + 256] = 0;
+ printk("%s", s.buf + i);
+ s.buf[i + 256] = c;
+ } else {
+ printk("%s", s.buf + i);
+ }
+ i += 256;
+ }
+}
+
--- /dev/null
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "dev.h"
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#define DRIVER_NAME "tegra_grhost"
+#define IFACE_NAME "nvhost"
+
+static int nvhost_major = NVHOST_MAJOR;
+static int nvhost_minor = NVHOST_CHANNEL_BASE;
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 cmdbufs_pending;
+ u32 relocs_pending;
+ u32 waitchk_pending;
+ u32 waitchk_ref;
+ struct nvmap_handle_ref *gather_mem;
+ struct nvhost_op_pair *gathers;
+ int num_gathers;
+ int pinarray_size;
+ struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_client *nvmap;
+ struct nvhost_waitchk waitchks[NVHOST_MAX_WAIT_CHECKS];
+ u32 num_waitchks;
+ u32 waitchk_mask;
+};
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_master *dev;
+ u32 mod_locks[NV_HOST1X_NB_MLOCKS];
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+
+ filp->private_data = NULL;
+
+ nvhost_putchannel(priv->ch, priv->hwctx);
+
+ if (priv->hwctx)
+ priv->ch->ctxhandler.put(priv->hwctx);
+
+ if (priv->gathers)
+ nvmap_munmap(priv->gather_mem, priv->gathers);
+
+ if (!IS_ERR_OR_NULL(priv->gather_mem))
+ nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
+
+ nvmap_client_put(priv->nvmap);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+ size_t gather_size;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (!ch)
+ return -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ gather_size = sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS;
+ priv->gather_mem = nvmap_alloc(ch->dev->nvmap, gather_size, 32,
+ NVMAP_HANDLE_CACHEABLE);
+ if (IS_ERR(priv->gather_mem))
+ goto fail;
+
+ if (ch->ctxhandler.alloc) {
+ priv->hwctx = ch->ctxhandler.alloc(ch);
+ if (!priv->hwctx)
+ goto fail;
+ }
+
+ priv->gathers = (struct nvhost_op_pair *)nvmap_mmap(priv->gather_mem);
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
+ u32 mem_id, u32 words, u32 offset)
+{
+ struct nvmap_pinarray_elem *pin;
+ pin = &ctx->pinarray[ctx->pinarray_size++];
+ pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
+ pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
+ offsetof(struct nvhost_op_pair, op2);
+ pin->pin_mem = mem_id;
+ pin->pin_offset = offset;
+ ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->cmdbufs_pending = 0;
+ ctx->relocs_pending = 0;
+ ctx->waitchk_pending = 0;
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+
+ while (remaining) {
+ size_t consumed;
+ if (!priv->relocs_pending && !priv->cmdbufs_pending && !priv->waitchk_pending) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ if (!priv->cmdbufs_pending) {
+ err = -EFAULT;
+ break;
+ }
+ /* leave room for ctx switch */
+ priv->num_gathers = 2;
+ priv->pinarray_size = 0;
+ priv->waitchk_mask |= priv->waitchk_ref;
+ } else if (priv->cmdbufs_pending) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ add_gather(priv, priv->num_gathers++,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ priv->cmdbufs_pending--;
+ } else if (priv->relocs_pending) {
+ int numrelocs = remaining / sizeof(struct nvhost_reloc);
+ if (!numrelocs)
+ break;
+ numrelocs = min_t(int, numrelocs, priv->relocs_pending);
+ consumed = numrelocs * sizeof(struct nvhost_reloc);
+ if (copy_from_user(&priv->pinarray[priv->pinarray_size],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->pinarray_size += numrelocs;
+ priv->relocs_pending -= numrelocs;
+ } else if (priv->waitchk_pending) {
+ struct nvhost_waitchk *waitp;
+ consumed = sizeof(struct nvhost_waitchk);
+ if (remaining < consumed)
+ break;
+ waitp = &priv->waitchks[priv->num_waitchks];
+ if (copy_from_user(waitp, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->num_waitchks++;
+ priv->waitchk_pending--;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return (count - remaining);
+}
+
+static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ struct nvhost_cpuinterrupt ctxsw;
+ int gather_idx = 2;
+ int num_intrs = 0;
+ u32 syncval;
+ int num_unpin;
+ int err;
+
+ if (ctx->relocs_pending || ctx->cmdbufs_pending || ctx->waitchk_pending) {
+ reset_submit(ctx);
+ dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+ if (!ctx->nvmap) {
+ dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
+ return -EFAULT;
+ }
+ if (ctx->num_gathers <= 2)
+ return 0;
+
+ /* keep module powered */
+ nvhost_module_busy(&ctx->ch->mod);
+
+ /* pin mem handles and patch physical addresses */
+ num_unpin = nvmap_pin_array(ctx->nvmap,
+ nvmap_ref_to_handle(ctx->gather_mem),
+ ctx->pinarray, ctx->pinarray_size,
+ ctx->unpinarray);
+ if (num_unpin < 0) {
+ dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: "
+ "%d\n", num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return num_unpin;
+ }
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ctx->ch->submitlock);
+ if (err) {
+ nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+
+ /* remove stale waits */
+ if (ctx->num_waitchks) {
+ err = nvhost_syncpt_wait_check(ctx->nvmap,
+ &ctx->ch->dev->syncpt, ctx->waitchk_mask,
+ ctx->waitchks, ctx->num_waitchks);
+ if (err) {
+ dev_warn(&ctx->ch->dev->pdev->dev,
+ "nvhost_syncpt_wait_check failed: %d\n", err);
+ mutex_unlock(&ctx->ch->submitlock);
+ nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+ ctx->num_waitchks = 0;
+ ctx->waitchk_mask = 0;
+ }
+
+ /* context switch */
+ if (ctx->ch->cur_ctx != ctx->hwctx) {
+ struct nvhost_hwctx *hw = ctx->hwctx;
+ if (hw && hw->valid) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->restore_size);
+ ctx->gathers[gather_idx].op2 = hw->restore_phys;
+ ctx->syncpt_incrs += hw->restore_incrs;
+ }
+ hw = ctx->ch->cur_ctx;
+ if (hw) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->save_size);
+ ctx->gathers[gather_idx].op2 = hw->save_phys;
+ ctx->syncpt_incrs += hw->save_incrs;
+ num_intrs = 1;
+ ctxsw.syncpt_val = hw->save_incrs - 1;
+ ctxsw.intr_data = hw;
+ hw->valid = true;
+ ctx->ch->ctxhandler.get(hw);
+ }
+ ctx->ch->cur_ctx = ctx->hwctx;
+ }
+
+ /* add a setclass for modules that require it */
+ if (gather_idx == 2 && ctx->ch->desc->class) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
+ ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
+ }
+
+ /* get absolute sync value */
+ if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
+ syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+ else
+ syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+
+ /* patch absolute syncpt value into interrupt triggers */
+ ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
+
+ nvhost_channel_submit(ctx->ch, ctx->nvmap, &ctx->gathers[gather_idx],
+ ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
+ ctx->unpinarray, num_unpin,
+ ctx->syncpt_id, syncval);
+
+ /* schedule a submit complete interrupt */
+ nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
+
+ mutex_unlock(&ctx->ch->submitlock);
+ args->value = syncval;
+ return 0;
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ /* host syncpt ID is used by the RM (and never be given out) */
+ BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct nvmap_client *new_client = nvmap_client_get_file(fd);
+
+ if (IS_ERR(new_client)) {
+ err = PTR_ERR(new_client);
+ break;
+ }
+
+ if (priv->nvmap)
+ nvmap_client_put(priv->nvmap);
+
+ priv->nvmap = new_client;
+ break;
+ }
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(&priv->dev->mod);
+ for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
+ struct nvhost_ctrl_userctx *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = host;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_wait(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_wait_args *args)
+{
+ u32 timeout;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout);
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= NV_HOST1X_NB_MLOCKS ||
+ args->lock > 1)
+ return -EINVAL;
+
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(&ctx->dev->mod);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ }
+ else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(&ctx->dev->mod);
+ else
+ nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ void *values = args->values;
+ u32 vals[64];
+
+ if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
+ (num_offsets == 0))
+ return -EINVAL;
+
+ while (num_offsets--) {
+ u32 remaining = args->block_size;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ u32 batch = min(remaining, 64*sizeof(u32));
+ if (args->write) {
+ if (copy_from_user(vals, values, batch))
+ return -EFAULT;
+ nvhost_write_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ } else {
+ nvhost_read_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ if (copy_to_user(values, vals, batch))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch;
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
+
+ if (action == NVHOST_POWER_ACTION_ON) {
+ nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
+ }
+ else if (action == NVHOST_POWER_ACTION_OFF) {
+ int i;
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++)
+ nvhost_channel_suspend(&dev->channels[i]);
+ nvhost_syncpt_save(&dev->syncpt);
+ }
+}
+
+static int __devinit nvhost_user_init(struct nvhost_master *host)
+{
+ int i, err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->pdev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ if (nvhost_major) {
+ devno = MKDEV(nvhost_major, nvhost_minor);
+ err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ } else {
+ err = alloc_chrdev_region(&devno, nvhost_minor,
+ NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ nvhost_major = MAJOR(devno);
+ }
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ devno = MKDEV(nvhost_major, nvhost_minor + i);
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
+ goto fail;
+ }
+ ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-%s", ch->desc->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
+ goto fail;
+ }
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->pdev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int __devinit nvhost_probe(struct platform_device *pdev)
+{
+ struct nvhost_master *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&pdev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
+ if (!host->nvmap) {
+ dev_err(&pdev->dev, "unable to create nvmap client\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), pdev->name);
+ if (!host->reg_mem) {
+ dev_err(&pdev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&pdev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->sync_aperture = host->aperture +
+ (NV_HOST1X_CHANNEL0_BASE +
+ HOST1X_CHANNEL_SYNC_REG_BASE);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ err = nvhost_channel_init(ch, host, i);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init channel %d\n", i);
+ goto fail;
+ }
+ }
+
+ err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
+ if (err) goto fail;
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err) goto fail;
+ err = nvhost_user_init(host);
+ if (err) goto fail;
+ err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
+ if (err) goto fail;
+
+ platform_set_drvdata(pdev, host);
+
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_reset(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+
+ nvhost_bus_register(host);
+
+ nvhost_debug_init(host);
+
+ dev_info(&pdev->dev, "initialized\n");
+ return 0;
+
+fail:
+ if (host->nvmap)
+ nvmap_client_put(host->nvmap);
+ /* TODO: [ahatala 2010-05-04] */
+ kfree(host);
+ return err;
+}
+
+static int __exit nvhost_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "suspending\n");
+ nvhost_module_suspend(&host->mod);
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_save(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+ dev_info(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int nvhost_resume(struct platform_device *pdev)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "resuming\n");
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_reset(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+ dev_info(&pdev->dev, "resumed\n");
+ return 0;
+}
+
+static struct platform_driver nvhost_driver = {
+ .remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
+ .resume = nvhost_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME
+ }
+};
+
+static int __init nvhost_mod_init(void)
+{
+ return platform_driver_probe(&nvhost_driver, nvhost_probe);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+ platform_driver_unregister(&nvhost_driver);
+}
+
+module_init(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform-nvhost");
--- /dev/null
+/*
+ * drivers/video/tegra/host/dev.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_DEV_H
+#define __NVHOST_DEV_H
+#include "nvhost_acm.h"
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+#include "nvhost_cpuaccess.h"
+#include "nvhost_channel.h"
+#include "nvhost_hardware.h"
+
+#define NVHOST_MAJOR 0 /* dynamic */
+
+struct nvhost_master {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
+ struct platform_device *pdev;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct nvmap_client *nvmap;
+ struct nvhost_cpuaccess cpuaccess;
+ struct nvhost_intr intr;
+ struct nvhost_module mod;
+ struct nvhost_channel channels[NVHOST_NUMCHANNELS];
+};
+
+void nvhost_debug_init(struct nvhost_master *master);
+void nvhost_debug_dump(void);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_3dctx.c
+ *
+ * Tegra Graphics Host 3d hardware context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+
+#include <linux/slab.h>
+
+const struct hwctx_reginfo ctxsave_regs_3d[] = {
+ HWCTX_REGINFO(0xe00, 16, DIRECT),
+ HWCTX_REGINFO(0xe10, 16, DIRECT),
+ HWCTX_REGINFO(0xe20, 1, DIRECT),
+ HWCTX_REGINFO(0xe21, 1, DIRECT),
+ HWCTX_REGINFO(0xe22, 1, DIRECT),
+ HWCTX_REGINFO(0xe25, 1, DIRECT),
+ HWCTX_REGINFO(0xe26, 1, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x1, 1, DIRECT),
+ HWCTX_REGINFO(0x2, 1, DIRECT),
+ HWCTX_REGINFO(0xc, 2, DIRECT),
+ HWCTX_REGINFO(0xe, 2, DIRECT),
+ HWCTX_REGINFO(0x10, 2, DIRECT),
+ HWCTX_REGINFO(0x12, 2, DIRECT),
+ HWCTX_REGINFO(0x14, 2, DIRECT),
+ HWCTX_REGINFO(0x100, 32, DIRECT),
+ HWCTX_REGINFO(0x120, 1, DIRECT),
+ HWCTX_REGINFO(0x121, 1, DIRECT),
+ HWCTX_REGINFO(0x124, 1, DIRECT),
+ HWCTX_REGINFO(0x125, 1, DIRECT),
+ HWCTX_REGINFO(0x200, 1, DIRECT),
+ HWCTX_REGINFO(0x201, 1, DIRECT),
+ HWCTX_REGINFO(0x202, 1, DIRECT),
+ HWCTX_REGINFO(0x203, 1, DIRECT),
+ HWCTX_REGINFO(0x204, 1, DIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 1, DIRECT),
+ HWCTX_REGINFO(0x344, 1, DIRECT),
+ HWCTX_REGINFO(0x345, 1, DIRECT),
+ HWCTX_REGINFO(0x346, 1, DIRECT),
+ HWCTX_REGINFO(0x347, 1, DIRECT),
+ HWCTX_REGINFO(0x348, 1, DIRECT),
+ HWCTX_REGINFO(0x349, 1, DIRECT),
+ HWCTX_REGINFO(0x34a, 1, DIRECT),
+ HWCTX_REGINFO(0x34b, 1, DIRECT),
+ HWCTX_REGINFO(0x34c, 1, DIRECT),
+ HWCTX_REGINFO(0x34d, 1, DIRECT),
+ HWCTX_REGINFO(0x34e, 1, DIRECT),
+ HWCTX_REGINFO(0x34f, 1, DIRECT),
+ HWCTX_REGINFO(0x350, 1, DIRECT),
+ HWCTX_REGINFO(0x351, 1, DIRECT),
+ HWCTX_REGINFO(0x352, 1, DIRECT),
+ HWCTX_REGINFO(0x353, 1, DIRECT),
+ HWCTX_REGINFO(0x354, 1, DIRECT),
+ HWCTX_REGINFO(0x355, 1, DIRECT),
+ HWCTX_REGINFO(0x356, 1, DIRECT),
+ HWCTX_REGINFO(0x357, 1, DIRECT),
+ HWCTX_REGINFO(0x358, 1, DIRECT),
+ HWCTX_REGINFO(0x359, 1, DIRECT),
+ HWCTX_REGINFO(0x35a, 1, DIRECT),
+ HWCTX_REGINFO(0x35b, 1, DIRECT),
+ HWCTX_REGINFO(0x363, 1, DIRECT),
+ HWCTX_REGINFO(0x364, 1, DIRECT),
+ HWCTX_REGINFO(0x400, 2, DIRECT),
+ HWCTX_REGINFO(0x402, 1, DIRECT),
+ HWCTX_REGINFO(0x403, 1, DIRECT),
+ HWCTX_REGINFO(0x404, 1, DIRECT),
+ HWCTX_REGINFO(0x405, 1, DIRECT),
+ HWCTX_REGINFO(0x406, 1, DIRECT),
+ HWCTX_REGINFO(0x407, 1, DIRECT),
+ HWCTX_REGINFO(0x408, 1, DIRECT),
+ HWCTX_REGINFO(0x409, 1, DIRECT),
+ HWCTX_REGINFO(0x40a, 1, DIRECT),
+ HWCTX_REGINFO(0x40b, 1, DIRECT),
+ HWCTX_REGINFO(0x40c, 1, DIRECT),
+ HWCTX_REGINFO(0x40d, 1, DIRECT),
+ HWCTX_REGINFO(0x40e, 1, DIRECT),
+ HWCTX_REGINFO(0x40f, 1, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 1, DIRECT),
+ HWCTX_REGINFO(0x501, 1, DIRECT),
+ HWCTX_REGINFO(0x502, 1, DIRECT),
+ HWCTX_REGINFO(0x503, 1, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x602, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 16, DIRECT),
+ HWCTX_REGINFO(0x720, 32, DIRECT),
+ HWCTX_REGINFO(0x740, 1, DIRECT),
+ HWCTX_REGINFO(0x741, 1, DIRECT),
+ HWCTX_REGINFO(0x800, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x802, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 1, DIRECT),
+ HWCTX_REGINFO(0x903, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 1, DIRECT),
+ HWCTX_REGINFO(0xa03, 1, DIRECT),
+ HWCTX_REGINFO(0xa04, 1, DIRECT),
+ HWCTX_REGINFO(0xa05, 1, DIRECT),
+ HWCTX_REGINFO(0xa06, 1, DIRECT),
+ HWCTX_REGINFO(0xa07, 1, DIRECT),
+ HWCTX_REGINFO(0xa08, 1, DIRECT),
+ HWCTX_REGINFO(0xa09, 1, DIRECT),
+ HWCTX_REGINFO(0xa0a, 1, DIRECT),
+ HWCTX_REGINFO(0xa0b, 1, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT)
+};
+
+
+/*** restore ***/
+
+static unsigned int context_restore_size = 0;
+
+static void restore_begin(u32 *ptr, u32 waitbase)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(waitbase, 1);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(0x545, 0);
+}
+#define RESTORE_BEGIN_SIZE 4
+
+static void restore_end(u32 *ptr, u32 syncpt_id)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm(0x0, ((1UL << 8) | (u8)(syncpt_id & 0xff)));
+}
+#define RESTORE_END_SIZE 1
+
+static void restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+#define RESTORE_DIRECT_SIZE 1
+
+static void restore_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+}
+#define RESTORE_INDOFFSET_SIZE 1
+
+static void restore_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(data_reg, count);
+}
+#define RESTORE_INDDATA_SIZE 1
+
+static void restore_registers_from_fifo(u32 *ptr, unsigned int count,
+ struct nvhost_channel *channel,
+ unsigned int *pending)
+{
+ void __iomem *chan_regs = channel->aperture;
+ unsigned int entries = *pending;
+ while (count) {
+ unsigned int num;
+
+ while (!entries) {
+ /* query host for number of entries in fifo */
+ entries = nvhost_channel_fifostat_outfentries(
+ readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
+ if (!entries)
+ cpu_relax();
+ /* TODO: [ahowe 2010-06-14] timeout */
+ }
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ }
+ *pending = entries;
+}
+
+static void setup_restore(u32 *ptr, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+
+ restore_begin(ptr, waitbase);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ restore_indoffset(ptr, offset, 0);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ restore_inddata(ptr, offset + 1, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ restore_indoffset(ptr, offset, count);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ restore_inddata(ptr, offset, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+
+ restore_end(ptr, NVSYNCPT_3D);
+ wmb();
+}
+
+/*** save ***/
+
+/* the same context save command sequence is used for all contexts. */
+static struct nvmap_handle_ref *context_save_buf = NULL;
+static u32 context_save_phys = 0;
+static u32 *context_save_ptr = NULL;
+static unsigned int context_save_size = 0;
+
+static void save_begin(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* set class to the unit to flush */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /*
+ * Flush pipe and signal context read thread to start reading
+ * sync point increment
+ */
+ ptr[1] = nvhost_opcode_imm(0, 0x100 | syncpt_id);
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ /* wait for base+1 */
+ ptr[3] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 1);
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[5] = nvhost_opcode_imm(0, syncpt_id);
+ ptr[6] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, 0, 0);
+}
+#define SAVE_BEGIN_SIZE 7
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_write(NV_HOST_MODULE_GR3D,
+ offset_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, 1);
+ ptr[3] = offset;
+}
+#define SAVE_INDOFFSET_SIZE 4
+
+static inline void save_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_INDDDATA_SIZE 3
+
+static void save_end(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* Wait for context read service */
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 3);
+ /* Increment syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(waitbase, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void __init setup_save(
+ u32 *ptr, unsigned int *words_save, unsigned int *words_restore,
+ u32 syncpt_id, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int save = SAVE_BEGIN_SIZE + SAVE_END_SIZE;
+ unsigned int restore = RESTORE_BEGIN_SIZE + RESTORE_END_SIZE;
+
+ if (ptr) {
+ save_begin(ptr, syncpt_id, waitbase);
+ ptr += SAVE_BEGIN_SIZE;
+ }
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ }
+ save += SAVE_DIRECT_SIZE;
+ restore += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indoffset(ptr, offset, 0);
+ ptr += SAVE_INDOFFSET_SIZE;
+ save_inddata(ptr, offset + 1, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ if (ptr) {
+ save_indoffset(ptr, offset, count);
+ ptr += SAVE_INDOFFSET_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ if (ptr) {
+ save_inddata(ptr, offset, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ if (ptr) {
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save += count;
+ restore += count;
+ }
+
+ if (ptr)
+ save_end(ptr, syncpt_id, waitbase);
+
+ if (words_save)
+ *words_save = save;
+ if (words_restore)
+ *words_restore = restore;
+ wmb();
+}
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc(struct nvhost_channel *ch)
+{
+ struct nvhost_hwctx *ctx;
+ struct nvmap_client *nvmap = ch->dev->nvmap;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = nvmap_alloc(nvmap, context_restore_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->save_cpu_data = nvmap_mmap(ctx->restore);
+ if (!ctx->save_cpu_data) {
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+ return NULL;
+ }
+
+ setup_restore(ctx->save_cpu_data, NVWAITBASE_3D);
+ ctx->channel = ch;
+ ctx->restore_phys = nvmap_pin(nvmap, ctx->restore);
+ ctx->restore_size = context_restore_size;
+ ctx->save = context_save_buf;
+ ctx->save_phys = context_save_phys;
+ ctx->save_size = context_save_size;
+ ctx->save_incrs = 3;
+ ctx->restore_incrs = 1;
+ ctx->valid = false;
+ kref_init(&ctx->ref);
+ return ctx;
+}
+
+static void ctx3d_free(struct kref *ref)
+{
+ struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct nvmap_client *nvmap = ctx->channel->dev->nvmap;
+
+ nvmap_munmap(ctx->restore, ctx->save_cpu_data);
+ nvmap_unpin(nvmap, ctx->restore);
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+}
+
+static void ctx3d_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static void ctx3d_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, ctx3d_free);
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *ctx)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int pending = 0;
+ u32 *ptr = (u32 *)ctx->save_cpu_data + RESTORE_BEGIN_SIZE;
+
+ BUG_ON(!ctx->save_cpu_data);
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ ptr += RESTORE_INDOFFSET_SIZE + RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ restore_registers_from_fifo(ptr, count, ctx->channel, &pending);
+ ptr += count;
+ }
+
+ BUG_ON((u32)((ptr + RESTORE_END_SIZE) - (u32*)ctx->save_cpu_data)
+ != context_restore_size);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
+}
+
+
+/*** nvhost_3dctx ***/
+
+int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ setup_save(NULL, &context_save_size, &context_restore_size, 0, 0);
+
+ context_save_buf = nvmap_alloc(nvmap, context_save_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR(context_save_buf)) {
+ int err = PTR_ERR(context_save_buf);
+ context_save_buf = NULL;
+ return err;
+ }
+
+ context_save_ptr = nvmap_mmap(context_save_buf);
+ if (!context_save_ptr) {
+ nvmap_free(nvmap, context_save_buf);
+ context_save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ context_save_phys = nvmap_pin(nvmap, context_save_buf);
+ setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D);
+
+ h->alloc = ctx3d_alloc;
+ h->get = ctx3d_get;
+ h->put = ctx3d_put;
+ h->save_service = ctx3d_save_service;
+ return 0;
+}
+
+/* TODO: [ahatala 2010-05-27] */
+int __init nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ return 0;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_acm.h"
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+
+#include "dev.h"
+
+#define ACM_TIMEOUT 1*HZ
+
+#define DISABLE_3D_POWERGATING
+#define DISABLE_MPE_POWERGATING
+
+void nvhost_module_busy(struct nvhost_module *mod)
+{
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerdown);
+ if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) {
+ if (mod->parent)
+ nvhost_module_busy(mod->parent);
+ if (mod->powergate_id != -1) {
+ BUG_ON(mod->num_clks != 1);
+ tegra_powergate_sequence_power_up(
+ mod->powergate_id, mod->clk[0]);
+ } else {
+ int i;
+ for (i = 0; i < mod->num_clks; i++)
+ clk_enable(mod->clk[i]);
+ }
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_ON);
+ mod->powered = true;
+ }
+ mutex_unlock(&mod->lock);
+}
+
+static void powerdown_handler(struct work_struct *work)
+{
+ struct nvhost_module *mod;
+ mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
+ mutex_lock(&mod->lock);
+ if ((atomic_read(&mod->refcount) == 0) && mod->powered) {
+ int i;
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_OFF);
+ for (i = 0; i < mod->num_clks; i++) {
+ clk_disable(mod->clk[i]);
+ }
+ if (mod->powergate_id != -1) {
+ tegra_periph_reset_assert(mod->clk[0]);
+ tegra_powergate_power_off(mod->powergate_id);
+ }
+ mod->powered = false;
+ if (mod->parent)
+ nvhost_module_idle(mod->parent);
+ }
+ mutex_unlock(&mod->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
+{
+ bool kick = false;
+
+ mutex_lock(&mod->lock);
+ if (atomic_sub_return(refs, &mod->refcount) == 0) {
+ BUG_ON(!mod->powered);
+ schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT);
+ kick = true;
+ }
+ mutex_unlock(&mod->lock);
+
+ if (kick)
+ wake_up(&mod->idle);
+}
+
+static const char *get_module_clk_id(const char *module, int index)
+{
+ if (index == 1 && strcmp(module, "gr2d") == 0)
+ return "epp";
+ else if (index == 2 && strcmp(module, "gr2d") == 0)
+ return "emc";
+ else if (index == 1 && strcmp(module, "gr3d") == 0)
+ return "emc";
+ else if (index == 1 && strcmp(module, "mpe") == 0)
+ return "emc";
+ else if (index == 0)
+ return module;
+ return NULL;
+}
+
+static int get_module_powergate_id(const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return TEGRA_POWERGATE_3D;
+ else if (strcmp(module, "mpe") == 0)
+ return TEGRA_POWERGATE_MPE;
+ return -1;
+}
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev)
+{
+ int i = 0;
+ mod->name = name;
+
+ while (i < NVHOST_MODULE_MAX_CLOCKS) {
+ long rate;
+ mod->clk[i] = clk_get(dev, get_module_clk_id(name, i));
+ if (IS_ERR_OR_NULL(mod->clk[i]))
+ break;
+ rate = clk_round_rate(mod->clk[i], UINT_MAX);
+ if (rate < 0) {
+ pr_err("%s: can't get maximum rate for %s\n",
+ __func__, name);
+ break;
+ }
+ if (rate != clk_get_rate(mod->clk[i])) {
+ clk_set_rate(mod->clk[i], rate);
+ }
+ i++;
+ }
+
+ mod->num_clks = i;
+ mod->func = func;
+ mod->parent = parent;
+ mod->powered = false;
+ mod->powergate_id = get_module_powergate_id(name);
+
+#ifdef DISABLE_3D_POWERGATING
+ /*
+ * It is possible for the 3d block to generate an invalid memory
+ * request during the power up sequence in some cases. Workaround
+ * is to disable 3d block power gating.
+ */
+ if (mod->powergate_id == TEGRA_POWERGATE_3D) {
+ tegra_powergate_sequence_power_up(mod->powergate_id,
+ mod->clk[0]);
+ clk_disable(mod->clk[0]);
+ mod->powergate_id = -1;
+ }
+#endif
+
+#ifdef DISABLE_MPE_POWERGATING
+ /*
+ * Disable power gating for MPE as it seems to cause issues with
+ * camera record stress tests when run in loop.
+ */
+ if (mod->powergate_id == TEGRA_POWERGATE_MPE) {
+ tegra_powergate_sequence_power_up(mod->powergate_id,
+ mod->clk[0]);
+ clk_disable(mod->clk[0]);
+ mod->powergate_id = -1;
+ }
+#endif
+
+ mutex_init(&mod->lock);
+ init_waitqueue_head(&mod->idle);
+ INIT_DELAYED_WORK(&mod->powerdown, powerdown_handler);
+
+ return 0;
+}
+
+static int is_module_idle(struct nvhost_module *mod)
+{
+ int count;
+ mutex_lock(&mod->lock);
+ count = atomic_read(&mod->refcount);
+ mutex_unlock(&mod->lock);
+ return (count == 0);
+}
+
+void nvhost_module_suspend(struct nvhost_module *mod)
+{
+ int ret;
+
+ ret = wait_event_timeout(mod->idle, is_module_idle(mod),
+ ACM_TIMEOUT + msecs_to_jiffies(500));
+ if (ret == 0)
+ nvhost_debug_dump();
+ flush_delayed_work(&mod->powerdown);
+ BUG_ON(mod->powered);
+}
+
+void nvhost_module_deinit(struct nvhost_module *mod)
+{
+ int i;
+ nvhost_module_suspend(mod);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_put(mod->clk[i]);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#define NVHOST_MODULE_MAX_CLOCKS 3
+
+struct nvhost_module;
+
+enum nvhost_power_action {
+ NVHOST_POWER_ACTION_OFF,
+ NVHOST_POWER_ACTION_ON,
+};
+
+typedef void (*nvhost_modulef)(struct nvhost_module *mod, enum nvhost_power_action action);
+
+struct nvhost_module {
+ const char *name;
+ nvhost_modulef func;
+ struct delayed_work powerdown;
+ struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
+ int num_clks;
+ struct mutex lock;
+ bool powered;
+ atomic_t refcount;
+ wait_queue_head_t idle;
+ struct nvhost_module *parent;
+ int powergate_id;
+};
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev);
+void nvhost_module_deinit(struct nvhost_module *mod);
+void nvhost_module_suspend(struct nvhost_module *mod);
+
+void nvhost_module_busy(struct nvhost_module *mod);
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
+
+static inline bool nvhost_module_powered(struct nvhost_module *mod)
+{
+ return mod->powered;
+}
+
+static inline void nvhost_module_idle(struct nvhost_module *mod)
+{
+ nvhost_module_idle_mult(mod, 1);
+
+}
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include <asm/cacheflush.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer & sync queue
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
+#define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+// 8 bytes per slot. (This number does not include the final RESTART.)
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+static void destroy_push_buffer(struct push_buffer *pb);
+
+/**
+ * Reset to empty push buffer
+ */
+static void reset_push_buffer(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int init_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ reset_push_buffer(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+ pb->mapped = nvmap_mmap(pb->mem);
+ if (pb->mapped == NULL)
+ goto fail;
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = nvmap_pin(nvmap, pb->mem);
+ if (pb->phys >= 0xfffff000) {
+ pb->phys = 0;
+ goto fail;
+ }
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ destroy_push_buffer(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void destroy_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ if (pb->mapped)
+ nvmap_munmap(pb->mem, pb->mapped);
+
+ if (pb->phys != 0)
+ nvmap_unpin(nvmap, pb->mem);
+
+ if (pb->mem)
+ nvmap_free(nvmap, pb->mem);
+
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_to_push_buffer(struct push_buffer *pb, u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32*)((u32)pb->mapped + cur);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+ /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void pop_from_push_buffer(struct push_buffer *pb, unsigned int slots)
+{
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+
+/* Sync Queue
+ *
+ * The sync queue is a circular buffer of u32s interpreted as:
+ * 0: SyncPointID
+ * 1: SyncPointValue
+ * 2: NumSlots (how many pushbuffer slots to free)
+ * 3: NumHandles
+ * 4: nvmap client which pinned the handles
+ * 5..: NumHandles * nvmemhandle to unpin
+ *
+ * There's always one word unused, so (accounting for wrap):
+ * - Write == Read => queue empty
+ * - Write + 1 == Read => queue full
+ * The queue must not be left with less than SYNC_QUEUE_MIN_ENTRY words
+ * of space at the end of the array.
+ *
+ * We want to pass contiguous arrays of handles to NrRmMemUnpin, so arrays
+ * that would wrap at the end of the buffer will be split into two (or more)
+ * entries.
+ */
+
+/* Number of words needed to store an entry containing one handle */
+#define SYNC_QUEUE_MIN_ENTRY (4 + (2 * sizeof(void *) / sizeof(u32)))
+
+/**
+ * Reset to empty queue.
+ */
+static void reset_sync_queue(struct sync_queue *queue)
+{
+ queue->read = 0;
+ queue->write = 0;
+}
+
+/**
+ * Find the number of handles that can be stashed in the sync queue without
+ * waiting.
+ * 0 -> queue is full, must update to wait for some entries to be freed.
+ */
+static unsigned int sync_queue_space(struct sync_queue *queue)
+{
+ unsigned int read = queue->read;
+ unsigned int write = queue->write;
+ u32 size;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ /*
+ * We can use all of the space up to the end of the buffer, unless the
+ * read position is within that space (the read position may advance
+ * asynchronously, but that can't take space away once we've seen it).
+ */
+ if (read > write) {
+ size = (read - 1) - write;
+ } else {
+ size = NVHOST_SYNC_QUEUE_SIZE - write;
+
+ /*
+ * If the read position is zero, it gets complicated. We can't
+ * use the last word in the buffer, because that would leave
+ * the queue empty.
+ * But also if we use too much we would not leave enough space
+ * for a single handle packet, and would have to wrap in
+ * add_to_sync_queue - also leaving write == read == 0,
+ * an empty queue.
+ */
+ if (read == 0)
+ size -= SYNC_QUEUE_MIN_ENTRY;
+ }
+
+ /*
+ * There must be room for an entry header and at least one handle,
+ * otherwise we report a full queue.
+ */
+ if (size < SYNC_QUEUE_MIN_ENTRY)
+ return 0;
+ /* Minimum entry stores one handle */
+ return (size - SYNC_QUEUE_MIN_ENTRY) + 1;
+}
+
+/**
+ * Add an entry to the sync queue.
+ */
+#define entry_size(_cnt) ((1 + _cnt)*sizeof(void *)/sizeof(u32))
+
+static void add_to_sync_queue(struct sync_queue *queue,
+ u32 sync_point_id, u32 sync_point_value,
+ u32 nr_slots, struct nvmap_client *user_nvmap,
+ struct nvmap_handle **handles, u32 nr_handles)
+{
+ u32 write = queue->write;
+ u32 *p = queue->buffer + write;
+ u32 size = 4 + (entry_size(nr_handles));
+
+ BUG_ON(sync_point_id == NVSYNCPT_INVALID);
+ BUG_ON(sync_queue_space(queue) < nr_handles);
+
+ write += size;
+ BUG_ON(write > NVHOST_SYNC_QUEUE_SIZE);
+
+ *p++ = sync_point_id;
+ *p++ = sync_point_value;
+ *p++ = nr_slots;
+ *p++ = nr_handles;
+ BUG_ON(!user_nvmap);
+ *(struct nvmap_client **)p = nvmap_client_get(user_nvmap);
+
+ p = (u32 *)((void *)p + sizeof(struct nvmap_client *));
+
+ if (nr_handles)
+ memcpy(p, handles, nr_handles * sizeof(struct nvmap_handle *));
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((write + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE) {
+ /*
+ * It's an error for the read position to be zero, as that
+ * would mean we emptied the queue while adding something.
+ */
+ BUG_ON(queue->read == 0);
+ write = 0;
+ }
+
+ queue->write = write;
+}
+
+/**
+ * Get a pointer to the next entry in the queue, or NULL if the queue is empty.
+ * Doesn't consume the entry.
+ */
+static u32 *sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 write = queue->write;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ if (read == write)
+ return NULL;
+ return queue->buffer + read;
+}
+
+/**
+ * Advances to the next queue entry, if you want to consume it.
+ */
+static void
+dequeue_sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 size;
+
+ BUG_ON(read == queue->write);
+
+ size = 4 + entry_size(queue->buffer[read + 3]);
+
+ read += size;
+ BUG_ON(read > NVHOST_SYNC_QUEUE_SIZE);
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((read + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE)
+ read = 0;
+
+ queue->read = read;
+}
+
+
+/*** Cdma internal stuff ***/
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void kick_cdma(struct nvhost_cdma *cdma)
+{
+ u32 put = push_buffer_putptr(&cdma->push_buffer);
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ cdma->last_put = put;
+ }
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - sq space: returns the number of handles that can be stored in the queue
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return sync_queue_head(&cdma->sync_queue) ? 0 : 1;
+ case CDMA_EVENT_SYNC_QUEUE_SPACE:
+ return sync_queue_space(&cdma->sync_queue);
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
+ return push_buffer_space(&cdma->push_buffer);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+static unsigned int wait_cdma(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status(cdma, event);
+ if (space)
+ return space;
+
+ BUG_ON(cdma->event != CDMA_EVENT_NONE);
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+
+ BUG_ON(!cdma->running);
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ for (;;) {
+ u32 syncpt_id, syncpt_val;
+ unsigned int nr_slots, nr_handles;
+ struct nvmap_handle **handles;
+ struct nvmap_client *nvmap;
+ u32 *sync;
+
+ sync = sync_queue_head(&cdma->sync_queue);
+ if (!sync) {
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+ break;
+ }
+
+ syncpt_id = *sync++;
+ syncpt_val = *sync++;
+
+ BUG_ON(syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_min_cmp(&dev->syncpt, syncpt_id, syncpt_val))
+ break;
+
+ nr_slots = *sync++;
+ nr_handles = *sync++;
+ nvmap = *(struct nvmap_client **)sync;
+ sync = ((void *)sync + sizeof(struct nvmap_client *));
+ handles = (struct nvmap_handle **)sync;
+
+ BUG_ON(!nvmap);
+
+ /* Unpin the memory */
+ nvmap_unpin_handles(nvmap, handles, nr_handles);
+
+ nvmap_client_put(nvmap);
+
+ /* Pop push buffer slots */
+ if (nr_slots) {
+ pop_from_push_buffer(&cdma->push_buffer, nr_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ dequeue_sync_queue_head(&cdma->sync_queue);
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
+ signal = true;
+ }
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ err = init_push_buffer(&cdma->push_buffer);
+ if (err)
+ return err;
+ reset_sync_queue(&cdma->sync_queue);
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ BUG_ON(cdma->running);
+ destroy_push_buffer(&cdma->push_buffer);
+}
+
+static void start_cdma(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ cdma->last_put = push_buffer_putptr(&cdma->push_buffer);
+
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ writel(nvhost_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ writel(nvhost_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+
+}
+
+void nvhost_cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ mutex_lock(&cdma->lock);
+ if (cdma->running) {
+ wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+ cdma->running = false;
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Begin a cdma submit
+ */
+void nvhost_cdma_begin(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ if (!cdma->running)
+ start_cdma(cdma);
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ if (slots_free == 0) {
+ kick_cdma(cdma);
+ slots_free = wait_cdma(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ push_to_push_buffer(&cdma->push_buffer, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add a contiguous block of memory handles to the sync queue,
+ * and a number of slots to be freed from the pushbuffer.
+ * Blocks as necessary if the sync queue is full.
+ * The handles for a submit must all be pinned at the same time, but they
+ * can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvmap_client *user_nvmap, struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles)
+{
+ kick_cdma(cdma);
+
+ while (nr_handles || cdma->slots_used) {
+ unsigned int count;
+ /*
+ * Wait until there's enough room in the
+ * sync queue to write something.
+ */
+ count = wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
+
+ /*
+ * Add reloc entries to sync queue (as many as will fit)
+ * and unlock it
+ */
+ if (count > nr_handles)
+ count = nr_handles;
+ add_to_sync_queue(&cdma->sync_queue, sync_point_id,
+ sync_point_value, cdma->slots_used,
+ user_nvmap, handles, count);
+ /* NumSlots only goes in the first packet */
+ cdma->slots_used = 0;
+ handles += count;
+ nr_handles -= count;
+ }
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Manually spin until all CDMA has finished. Used if an async update
+ * cannot be scheduled for any reason.
+ */
+void nvhost_cdma_flush(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ while (sync_queue_head(&cdma->sync_queue)) {
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Find the currently executing gather in the push buffer and return
+ * its physical address and size.
+ */
+void nvhost_cdma_find_gather(struct nvhost_cdma *cdma, u32 dmaget, u32 *addr, u32 *size)
+{
+ u32 offset = dmaget - cdma->push_buffer.phys;
+
+ *addr = *size = 0;
+
+ if (offset >= 8 && offset < cdma->push_buffer.cur) {
+ u32 *p = cdma->push_buffer.mapped + (offset - 8) / 4;
+
+ /* Make sure we have a gather */
+ if ((p[0] >> 28) == 6) {
+ *addr = p[1];
+ *size = p[0] & 0x3fff;
+ }
+ }
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "nvhost_acm.h"
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 8192
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+struct push_buffer {
+ struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+};
+
+struct sync_queue {
+ unsigned int read; /* read position within buffer */
+ unsigned int write; /* write position within buffer */
+ u32 buffer[NVHOST_SYNC_QUEUE_SIZE]; /* queue data */
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct sync_queue sync_queue; /* channel's sync queue */
+ bool running;
+};
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+void nvhost_cdma_begin(struct nvhost_cdma *cdma);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvmap_client *user_nvmap,
+ struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+void nvhost_cdma_flush(struct nvhost_cdma *cdma);
+void nvhost_cdma_find_gather(struct nvhost_cdma *cdma, u32 dmaget,
+ u32 *addr, u32 *size);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/platform_device.h>
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action);
+
+static const struct nvhost_channeldesc channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .power = power_3d,
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .power = power_2d,
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .power = power_mpe,
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+}};
+
+static inline void __iomem *channel_aperture(void __iomem *p, int ndx)
+{
+ ndx += NVHOST_CHANNEL_BASE;
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+int __init nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ BUILD_BUG_ON(NVHOST_NUMCHANNELS != ARRAY_SIZE(channelmap));
+
+ ch->dev = dev;
+ ch->desc = &channelmap[index];
+ ch->aperture = channel_aperture(dev->aperture, index);
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ return nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ err = nvhost_module_init(&ch->mod, ch->desc->name,
+ ch->desc->power, &ch->dev->mod,
+ &ch->dev->pdev->dev);
+ if (!err) {
+ err = nvhost_cdma_init(&ch->cdma);
+ if (err)
+ nvhost_module_deinit(&ch->mod);
+ }
+ }
+ if (!err) {
+ ch->refcount++;
+ }
+ mutex_unlock(&ch->reflock);
+
+ return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ nvhost_module_deinit(&ch->mod);
+ /* cdma may already be stopped, that's ok */
+ nvhost_cdma_stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ mutex_lock(&ch->reflock);
+ BUG_ON(nvhost_module_powered(&ch->mod));
+ if (ch->refcount)
+ nvhost_cdma_stop(&ch->cdma);
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val)
+{
+ int i;
+ struct nvhost_op_pair* p;
+
+ /* schedule interrupts */
+ for (i = 0; i < num_intrs; i++) {
+ nvhost_intr_add_action(&ch->dev->intr, syncpt_id, intrs[i].syncpt_val,
+ NVHOST_INTR_ACTION_CTXSAVE, intrs[i].intr_data, NULL);
+ }
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&ch->cdma);
+
+ /* push ops */
+ for (i = 0, p = ops; i < num_pairs; i++, p++)
+ nvhost_cdma_push(&ch->cdma, p->op1, p->op2);
+
+ /* end CDMA submit & stash pinned hMems into sync queue for later cleanup */
+ nvhost_cdma_end(user_nvmap, &ch->cdma, syncpt_id, syncpt_val,
+ unpins, num_unpins);
+}
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ /* TODO: [ahatala 2010-06-17] reimplement EPP hang war */
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ /* TODO: [ahatala 2010-06-17] reset EPP */
+ }
+}
+
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_channel *ch = container_of(mod, struct nvhost_channel, mod);
+
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx) {
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nvhost_op_pair save;
+ struct nvhost_cpuinterrupt ctxsw;
+ u32 syncval;
+ void *ref;
+ syncval = nvhost_syncpt_incr_max(&ch->dev->syncpt,
+ NVSYNCPT_3D,
+ ch->cur_ctx->save_incrs);
+ save.op1 = nvhost_opcode_gather(0, ch->cur_ctx->save_size);
+ save.op2 = ch->cur_ctx->save_phys;
+ ctxsw.intr_data = ch->cur_ctx;
+ ctxsw.syncpt_val = syncval - 1;
+ ch->cur_ctx->valid = true;
+ ch->ctxhandler.get(ch->cur_ctx);
+ ch->cur_ctx = NULL;
+
+ nvhost_channel_submit(ch, ch->dev->nvmap,
+ &save, 1, &ctxsw, 1, NULL, 0,
+ NVSYNCPT_3D, syncval);
+
+ nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D,
+ syncval,
+ NVHOST_INTR_ACTION_WAKEUP,
+ &wq, &ref);
+ wait_event(wq,
+ nvhost_syncpt_min_cmp(&ch->dev->syncpt,
+ NVSYNCPT_3D, syncval));
+ nvhost_intr_put_ref(&ch->dev->intr, ref);
+ nvhost_cdma_update(&ch->cdma);
+ }
+ mutex_unlock(&ch->submitlock);
+ }
+}
+
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include "nvhost_cdma.h"
+#include "nvhost_acm.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+
+#define NVHOST_CHANNEL_BASE 0
+#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
+#define NVHOST_MAX_WAIT_CHECKS 256
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+
+struct nvhost_master;
+
+struct nvhost_channeldesc {
+ const char *name;
+ nvhost_modulef power;
+ u32 syncpts;
+ u32 waitbases;
+ u32 modulemutexes;
+ u32 class;
+};
+
+struct nvhost_channel {
+ int refcount;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_master *dev;
+ const struct nvhost_channeldesc *desc;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler ctxhandler;
+ struct nvhost_module mod;
+ struct nvhost_cdma cdma;
+};
+
+struct nvhost_op_pair {
+ u32 op1;
+ u32 op2;
+};
+
+struct nvhost_cpuinterrupt {
+ u32 syncpt_val;
+ void *intr_data;
+};
+
+int nvhost_channel_init(
+ struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index);
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+void nvhost_channel_suspend(struct nvhost_channel *ch);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "dev.h"
+#include <linux/string.h>
+
+#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_master, cpuaccess)
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ struct resource *mem;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i+1);
+ if (!mem) {
+ dev_err(&pdev->dev, "missing module memory resource\n");
+ return -ENXIO;
+ }
+
+ ctx->regs[i] = ioremap(mem->start, resource_size(mem));
+ if (!ctx->regs[i]) {
+ dev_err(&pdev->dev, "failed to map module registers\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ iounmap(ctx->regs[i]);
+ release_resource(ctx->reg_mem[i]);
+ }
+}
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 reg;
+
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ nvhost_module_busy(&dev->mod);
+ reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ if (reg) {
+ nvhost_module_idle(&dev->mod);
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ u32* out = (u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ *(out++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ const u32* in = (const u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ writel(*(in++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(&dev->mod);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.h
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CPUACCESS_H
+#define __NVHOST_CPUACCESS_H
+
+#include "nvhost_hardware.h"
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+enum nvhost_module_id {
+ NVHOST_MODULE_DISPLAY_A = 0,
+ NVHOST_MODULE_DISPLAY_B,
+ NVHOST_MODULE_VI,
+ NVHOST_MODULE_ISP,
+ NVHOST_MODULE_MPE,
+#if 0
+ /* TODO: [ahatala 2010-07-02] find out if these are needed */
+ NVHOST_MODULE_FUSE,
+ NVHOST_MODULE_APB_MISC,
+ NVHOST_MODULE_CLK_RESET,
+#endif
+ NVHOST_MODULE_NUM
+};
+
+struct nvhost_cpuaccess {
+ struct resource *reg_mem[NVHOST_MODULE_NUM];
+ void __iomem *regs[NVHOST_MODULE_NUM];
+};
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev);
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx);
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+static inline bool nvhost_access_module_regs(
+ struct nvhost_cpuaccess *ctx, u32 module)
+{
+ return (module < NVHOST_MODULE_NUM);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values);
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HARDWARE_H
+#define __NVHOST_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNELS 8
+#define NV_HOST1X_CHANNEL0_BASE 0
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+
+
+#define HOST1X_CHANNEL_FIFOSTAT 0x00
+#define HOST1X_CHANNEL_INDDATA 0x0c
+#define HOST1X_CHANNEL_DMASTART 0x14
+#define HOST1X_CHANNEL_DMAPUT 0x18
+#define HOST1X_CHANNEL_DMAGET 0x1c
+#define HOST1X_CHANNEL_DMAEND 0x20
+#define HOST1X_CHANNEL_DMACTRL 0x24
+
+#define HOST1X_SYNC_CF_SETUP(x) (0x3080 + (4 * (x)))
+
+#define HOST1X_SYNC_SYNCPT_BASE(x) (0x3600 + (4 * (x)))
+
+#define HOST1X_SYNC_CBREAD(x) (0x3720 + (4 * (x)))
+#define HOST1X_SYNC_CFPEEK_CTRL 0x374c
+#define HOST1X_SYNC_CFPEEK_READ 0x3750
+#define HOST1X_SYNC_CFPEEK_PTRS 0x3754
+#define HOST1X_SYNC_CBSTAT(x) (0x3758 + (4 * (x)))
+
+static inline unsigned nvhost_channel_fifostat_outfentries(u32 reg)
+{
+ return (reg >> 24) & 0x1f;
+}
+
+static inline u32 nvhost_channel_dmactrl(bool stop, bool get_rst, bool init_get)
+{
+ u32 v = stop ? 1 : 0;
+ if (get_rst)
+ v |= 2;
+ if (init_get)
+ v |= 4;
+ return v;
+}
+
+
+/* sync registers */
+#define NV_HOST1X_SYNCPT_NB_PTS 32
+#define NV_HOST1X_SYNCPT_NB_BASES 8
+#define NV_HOST1X_NB_MLOCKS 16
+#define HOST1X_CHANNEL_SYNC_REG_BASE 12288
+
+enum {
+ HOST1X_SYNC_INTMASK = 0x4,
+ HOST1X_SYNC_INTC0MASK = 0x8,
+ HOST1X_SYNC_HINTSTATUS = 0x20,
+ HOST1X_SYNC_HINTMASK = 0x24,
+ HOST1X_SYNC_HINTSTATUS_EXT = 0x28,
+ HOST1X_SYNC_HINTMASK_EXT = 0x2c,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0 = 0x50,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1 = 0x54,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 = 0x68,
+ HOST1X_SYNC_USEC_CLK = 0x1a4,
+ HOST1X_SYNC_CTXSW_TIMEOUT_CFG = 0x1a8,
+ HOST1X_SYNC_IP_BUSY_TIMEOUT = 0x1bc,
+ HOST1X_SYNC_IP_READ_TIMEOUT_ADDR = 0x1c0,
+ HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR = 0x1c4,
+ HOST1X_SYNC_MLOCK_0 = 0x2c0,
+ HOST1X_SYNC_MLOCK_OWNER_0 = 0x340,
+ HOST1X_SYNC_SYNCPT_0 = 0x400,
+ HOST1X_SYNC_SYNCPT_INT_THRESH_0 = 0x500,
+ HOST1X_SYNC_SYNCPT_BASE_0 = 0x600,
+ HOST1X_SYNC_SYNCPT_CPU_INCR = 0x700
+};
+
+static inline bool nvhost_sync_hintstatus_ext_ip_read_int(u32 reg)
+{
+ return (reg & BIT(30)) != 0;
+}
+
+static inline bool nvhost_sync_hintstatus_ext_ip_write_int(u32 reg)
+{
+ return (reg & BIT(31)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_ch_owns(u32 reg)
+{
+ return (reg & BIT(0)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_cpu_owns(u32 reg)
+{
+ return (reg & BIT(1)) != 0;
+}
+
+static inline unsigned int nvhost_sync_mlock_owner_owner_chid(u32 reg)
+{
+ return (reg >> 8) & 0xf;
+}
+
+
+/* host class */
+enum {
+ NV_CLASS_HOST_INCR_SYNCPT = 0x0,
+ NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
+ NV_CLASS_HOST_INDOFF = 0x2d,
+ NV_CLASS_HOST_INDDATA = 0x2e
+};
+
+static inline u32 nvhost_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return (indx << 24) | (threshold & 0xffffff);
+}
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return (indx << 24) | (base_indx << 16) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return (base_indx << 24) | offset;
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (mod_id << 18) | (offset << 2) | 1;
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+
+
+#endif /* __NVHOST_HARDWARE_H */
+
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+struct nvhost_channel;
+
+struct nvhost_hwctx {
+ struct kref ref;
+
+ struct nvhost_channel *channel;
+ bool valid;
+
+ struct nvmap_handle_ref *save;
+ u32 save_phys;
+ u32 save_size;
+ u32 save_incrs;
+ void *save_cpu_data;
+
+ struct nvmap_handle_ref *restore;
+ u32 restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+};
+
+struct nvhost_hwctx_handler {
+ struct nvhost_hwctx * (*alloc) (struct nvhost_channel *ch);
+ void (*get) (struct nvhost_hwctx *ctx);
+ void (*put) (struct nvhost_hwctx *ctx);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+};
+
+int nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h);
+int nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h);
+
+static inline int nvhost_hwctx_handler_init(struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_3dctx_handler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpectx_handler_init(h);
+
+ return 0;
+}
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_OFFSET,
+ HWCTX_REGINFO_INDIRECT_DATA
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+
+
+/*** HW sync point threshold interrupt management ***/
+
+static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
+{
+ thresh &= 0xffff;
+ writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
+}
+
+static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
+{
+ writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
+}
+
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state
+{
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(&channel->mod, nr_completed);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ channel->ctxhandler.save_service(hwctx);
+ channel->ctxhandler.put(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+
+/*** Interrupt service functions ***/
+
+/**
+ * Host1x intterrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
+ ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+
+ if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ u32 sync;
+ unsigned int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ sync = nvhost_syncpt_update_min(&dev->syncpt, id);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, sync, completed);
+
+ if (!list_empty(&syncpt->wait_head)) {
+ u32 thresh = list_first_entry(&syncpt->wait_head,
+ struct nvhost_waitlist, list)->thresh;
+
+ set_syncpt_threshold(sync_regs, id, thresh);
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * lazily request a syncpt's irq
+ */
+static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ static DEFINE_MUTEX(mutex);
+ int err;
+
+ mutex_lock(&mutex);
+ if (!syncpt->irq_requested) {
+ err = request_threaded_irq(syncpt->irq,
+ syncpt_thresh_isr, syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (!err)
+ syncpt->irq_requested = 1;
+ }
+ mutex_unlock(&mutex);
+ return err;
+}
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ void __iomem *sync_regs;
+ int queue_was_empty;
+ int err;
+
+ /* create and initialize a new waiter */
+ waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
+ syncpt = intr->syncpt + id;
+ sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ err = request_syncpt_irq(syncpt);
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ set_syncpt_threshold(sync_regs, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ int err;
+
+ err = request_irq(irq_gen, host1x_isr, 0, "host_status", intr);
+ if (err)
+ goto fail;
+ intr->host1x_irq = irq_gen;
+ intr->host1x_isr_started = true;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "%s", nvhost_syncpt_name(id));
+ }
+
+ return 0;
+
+fail:
+ nvhost_intr_deinit(intr);
+ return err;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ struct nvhost_waitlist *waiter, *next;
+ list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
+ == WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+
+ if(!list_empty(&syncpt->wait_head)) { // output diagnostics
+ printk("%s id=%d\n",__func__,id);
+ BUG_ON(1);
+ }
+
+ if (syncpt->irq_requested)
+ free_irq(syncpt->irq, syncpt);
+ }
+
+ if (intr->host1x_isr_started) {
+ free_irq(intr->host1x_irq, intr);
+ intr->host1x_isr_started = false;
+ }
+}
+
+void nvhost_intr_configure (struct nvhost_intr *intr, u32 hz)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ // write microsecond clock register
+ writel((hz + 1000000 - 1)/1000000, sync_regs + HOST1X_SYNC_USEC_CLK);
+
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* disable interrupts for both cpu's */
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0);
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1);
+
+ /* masking all of the interrupts actually means "enable" */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
+
+ /* enable HOST_INT_C0MASK */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
+
+ /* enable HINTMASK_EXT */
+ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
+
+ /* enable IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include "nvhost_hardware.h"
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr_syncpt {
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt syncpt[NV_HOST1X_SYNCPT_NB_PTS];
+ int host1x_irq;
+ bool host1x_isr_started;
+};
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_configure(struct nvhost_intr *intr, u32 hz);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_mpectx.c
+ *
+ * Tegra Graphics Host MPE HW Context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* Placeholder */
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_syncpt.h"
+#include "dev.h"
+
+#define client_managed(id) (BIT(id) & NVSYNCPTS_CLIENT_MANAGED)
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define SYNCPT_CHECK_PERIOD 2*HZ
+
+static bool check_max(struct nvhost_syncpt *sp, u32 id, u32 real)
+{
+ u32 max;
+ if (client_managed(id))
+ return true;
+ smp_rmb();
+ max = (u32)atomic_read(&sp->max_val[id]);
+ return ((s32)(max - real) >= 0);
+}
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void reset_syncpt(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ int min;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void reset_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++)
+ reset_syncpt(sp, i);
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ reset_syncpt_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ if (client_managed(i))
+ nvhost_syncpt_update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ read_syncpt_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ smp_rmb();
+ old = (u32)atomic_read(&sp->min_val[id]);
+ live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ BUG_ON(!check_max(sp, id, live));
+
+ return live;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ val = nvhost_syncpt_update_min(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ BUG_ON(!nvhost_module_powered(&dev->mod));
+ BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id));
+ writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
+ wmb();
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ int err = 0;
+
+ BUG_ON(!check_max(sp, id, thresh));
+
+ /* first check cache */
+ if (nvhost_syncpt_min_cmp(sp, id, thresh))
+ return 0;
+
+ /* keep host alive */
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+
+ if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
+ /* try to read from register */
+ u32 val = nvhost_syncpt_update_min(sp, id);
+ if ((s32)(val - thresh) >= 0)
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ nvhost_syncpt_min_cmp(sp, id, thresh),
+ check);
+ if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ if (timeout != NVHOST_NO_TIMEOUT)
+ timeout -= check;
+ if (timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "syncpoint id %d (%s) stuck waiting %d\n",
+ id, nvhost_syncpt_name(id), thresh);
+ nvhost_syncpt_debug(sp);
+ }
+ };
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
+
+done:
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return err;
+}
+
+static const char *s_syncpt_names[32] = {
+ "gfx_host", "", "", "", "", "", "", "", "", "", "", "",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", "vi_isp_5",
+ "2d_0", "2d_1",
+ "", "",
+ "3d", "mpe", "disp0", "disp1", "vblank0", "vblank1", "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt", "dsi"
+};
+
+const char *nvhost_syncpt_name(u32 id)
+{
+ BUG_ON(id > ARRAY_SIZE(s_syncpt_names));
+ return s_syncpt_names[id];
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ if (!max)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, nvhost_syncpt_name(i),
+ nvhost_syncpt_update_min(sp, i), max);
+
+ }
+}
+
+/* returns true, if a <= b < c using wrapping comparison */
+static inline bool nvhost_syncpt_is_between(u32 a, u32 b, u32 c)
+{
+ return b-a < c-a;
+}
+
+/* returns true, if x >= y (mod 1 << 32) */
+static bool nvhost_syncpt_wrapping_comparison(u32 x, u32 y)
+{
+ return nvhost_syncpt_is_between(y, x, (1UL<<31UL)+y);
+}
+
+/* check for old WAITs to be removed (avoiding a wrap) */
+int nvhost_syncpt_wait_check(struct nvmap_client *nvmap,
+ struct nvhost_syncpt *sp, u32 waitchk_mask,
+ struct nvhost_waitchk *waitp, u32 waitchks)
+{
+ u32 idx;
+ int err = 0;
+
+ /* get current syncpt values */
+ for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
+ if (BIT(idx) & waitchk_mask) {
+ nvhost_syncpt_update_min(sp, idx);
+ }
+ }
+
+ BUG_ON(!waitp);
+
+ /* compare syncpt vs wait threshold */
+ while (waitchks) {
+ u32 syncpt, override;
+
+ BUG_ON(waitp->syncpt_id > NV_HOST1X_SYNCPT_NB_PTS);
+
+ syncpt = atomic_read(&sp->min_val[waitp->syncpt_id]);
+ if (nvhost_syncpt_wrapping_comparison(syncpt, waitp->thresh)) {
+
+ /* wait has completed already, so can be removed */
+ dev_dbg(&syncpt_to_dev(sp)->pdev->dev,
+ "drop WAIT id %d (%s) thresh 0x%x, syncpt 0x%x\n",
+ waitp->syncpt_id, nvhost_syncpt_name(waitp->syncpt_id),
+ waitp->thresh, syncpt);
+
+ /* move wait to a kernel reserved syncpt (that's always 0) */
+ override = nvhost_class_host_wait_syncpt(NVSYNCPT_GRAPHICS_HOST, 0);
+
+ /* patch the wait */
+ err = nvmap_patch_wait(nvmap,
+ (struct nvmap_handle *)waitp->mem,
+ waitp->offset, override);
+ if (err)
+ break;
+ }
+ waitchks--;
+ waitp++;
+ }
+ return err;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+#include <asm/atomic.h>
+
+#include "nvhost_hardware.h"
+
+#define NVSYNCPT_GRAPHICS_HOST (0)
+#define NVSYNCPT_VI_ISP_0 (12)
+#define NVSYNCPT_VI_ISP_1 (13)
+#define NVSYNCPT_VI_ISP_2 (14)
+#define NVSYNCPT_VI_ISP_3 (15)
+#define NVSYNCPT_VI_ISP_4 (16)
+#define NVSYNCPT_VI_ISP_5 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_DISP0 (24)
+#define NVSYNCPT_DISP1 (25)
+#define NVSYNCPT_VBLANK0 (26)
+#define NVSYNCPT_VBLANK1 (27)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+#define NVSYNCPT_DSI (31)
+#define NVSYNCPT_INVALID (-1)
+
+/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
+/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
+/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
+/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) | BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+struct nvhost_syncpt {
+ atomic_t min_val[NV_HOST1X_SYNCPT_NB_PTS];
+ atomic_t max_val[NV_HOST1X_SYNCPT_NB_PTS];
+ u32 base_val[NV_HOST1X_SYNCPT_NB_BASES];
+};
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+/**
+ * Returns true if syncpoint has reached threshold
+ */
+static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
+ u32 id, u32 thresh)
+{
+ u32 cur;
+ smp_rmb();
+ cur = (u32)atomic_read(&sp->min_val[id]);
+ return ((s32)(cur - thresh) >= 0);
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, MAX_SCHEDULE_TIMEOUT);
+}
+
+int nvhost_syncpt_wait_check(struct nvmap_client *nvmap,
+ struct nvhost_syncpt *sp, u32 mask,
+ struct nvhost_waitchk *waitp, u32 num_waits);
+
+const char *nvhost_syncpt_name(u32 id);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+#endif
--- /dev/null
+obj-y += nvmap.o
+obj-y += nvmap_dev.o
+obj-y += nvmap_handle.o
+obj-y += nvmap_heap.o
+obj-y += nvmap_ioctl.o
+obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o
\ No newline at end of file
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap.c
+ *
+ * Memory manager for Tegra GPU
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMAP_HANDLE_VISITED (0x1ul << 31)
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void map_iovmm_area(struct nvmap_handle *h)
+{
+ tegra_iovmm_addr_t va;
+ unsigned long i;
+
+ BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+ BUG_ON(h->size & ~PAGE_MASK);
+ WARN_ON(!h->pgalloc.dirty);
+
+ for (va = h->pgalloc.area->iovm_start, i = 0;
+ va < (h->pgalloc.area->iovm_start + h->size);
+ i++, va += PAGE_SIZE) {
+ BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
+ tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
+ page_to_pfn(h->pgalloc.pages[i]));
+ }
+ h->pgalloc.dirty = false;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without racing with a second stream. handle should
+ * have nvmap_handle_get (or nvmap_validate_get) called before calling
+ * this function. */
+static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ struct tegra_iovmm_area *area;
+ BUG_ON(!h->alloc);
+
+ if (atomic_inc_return(&h->pin) == 1) {
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ area = nvmap_handle_iovmm(client, h);
+ if (!area) {
+ /* no race here, inside the pin mutex */
+ atomic_dec(&h->pin);
+ return -ENOMEM;
+ }
+ if (area != h->pgalloc.area)
+ h->pgalloc.dirty = true;
+ h->pgalloc.area = area;
+ }
+ }
+ return 0;
+}
+
+static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ ret = pin_locked(client, h);
+
+ if (ret) {
+ ret = wait_event_interruptible(client->share->pin_wait,
+ !pin_locked(client, h));
+ }
+
+ return ret ? -EINTR : 0;
+
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ nvmap_mru_lock(client->share);
+
+ if (atomic_read(&h->pin) == 0) {
+ nvmap_err(client, "%s unpinning unpinned handle %p\n",
+ current->group_leader->comm, h);
+ nvmap_mru_unlock(client->share);
+ return 0;
+ }
+
+ BUG_ON(!h->alloc);
+
+ if (!atomic_dec_return(&h->pin)) {
+ if (h->heap_pgalloc && h->pgalloc.area) {
+ /* if a secure handle is clean (i.e., mapped into
+ * IOVMM, it needs to be zapped on unpin. */
+ if (h->secure && !h->pgalloc.dirty) {
+ tegra_iovmm_zap_vm(h->pgalloc.area);
+ h->pgalloc.dirty = true;
+ }
+ nvmap_mru_insert_locked(client->share, h);
+ ret = 1;
+ }
+ }
+
+ nvmap_mru_unlock(client->share);
+
+ nvmap_handle_put(h);
+ return ret;
+}
+
+static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle *h;
+ int w;
+
+ h = nvmap_validate_get(client, id);
+ if (unlikely(!h)) {
+ nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
+ current->group_leader->comm, (void *)id);
+ return 0;
+ }
+
+ nvmap_err(client, "%s unpinning unreferenced handle %p\n",
+ current->group_leader->comm, h);
+ WARN_ON(1);
+
+ w = handle_unpin(client, h);
+ nvmap_handle_put(h);
+ return w;
+}
+
+void nvmap_unpin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ unsigned int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (!ids[i])
+ continue;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ struct nvmap_handle *h = ref->handle;
+ int e = atomic_add_unless(&ref->pin, -1, 0);
+
+ nvmap_ref_unlock(client);
+
+ if (!e) {
+ nvmap_err(client, "%s unpinning unpinned "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ } else {
+ do_wake |= handle_unpin(client, h);
+ }
+ } else {
+ nvmap_ref_unlock(client);
+ if (client->super)
+ do_wake |= handle_unpin_noref(client, ids[i]);
+ else
+ nvmap_err(client, "%s unpinning invalid "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ }
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ int ret = 0;
+ int cnt = 0;
+ unsigned int i;
+ struct nvmap_handle **h = (struct nvmap_handle **)ids;
+ struct nvmap_handle_ref *ref;
+
+ /* to optimize for the common case (client provided valid handle
+ * references and the pin succeeds), increment the handle_ref pin
+ * count during validation. in error cases, the tree will need to
+ * be re-walked, since the handle_ref is discarded so that an
+ * allocation isn't required. if a handle_ref is not found,
+ * locally validate that the caller has permission to pin the handle;
+ * handle_refs are not created in this case, so it is possible that
+ * if the caller crashes after pinning a global handle, the handle
+ * will be permanently leaked. */
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr && !ret; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ atomic_inc(&ref->pin);
+ nvmap_handle_get(h[i]);
+ } else {
+ struct nvmap_handle *verify;
+ nvmap_ref_unlock(client);
+ verify = nvmap_validate_get(client, ids[i]);
+ if (verify)
+ nvmap_warn(client, "%s pinning unreferenced "
+ "handle %p\n",
+ current->group_leader->comm, h[i]);
+ else
+ ret = -EPERM;
+ nvmap_ref_lock(client);
+ }
+ }
+ nvmap_ref_unlock(client);
+
+ nr = i;
+
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&client->share->pin_lock);
+ if (WARN_ON(ret))
+ goto out;
+
+ for (cnt = 0; cnt < nr && !ret; cnt++) {
+ ret = wait_pin_locked(client, h[cnt]);
+ }
+ mutex_unlock(&client->share->pin_lock);
+
+ if (ret) {
+ int do_wake = 0;
+
+ for (i = 0; i < cnt; i++)
+ do_wake |= handle_unpin(client, h[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ ret = -EINTR;
+ } else {
+ for (i = 0; i < nr; i++) {
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ map_iovmm_area(h[i]);
+ }
+ }
+
+out:
+ if (ret) {
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (!ref) {
+ nvmap_warn(client, "%s freed handle %p "
+ "during pinning\n",
+ current->group_leader->comm,
+ (void *)ids[i]);
+ continue;
+ }
+ atomic_dec(&ref->pin);
+ }
+ nvmap_ref_unlock(client);
+
+ for (i = cnt; i < nr; i++)
+ nvmap_handle_put(h[i]);
+ }
+
+ return ret;
+}
+
+static unsigned long handle_phys(struct nvmap_handle *h)
+{
+ u32 addr;
+
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout->base;
+ }
+
+ return addr;
+}
+
+/* stores the physical address (+offset) of each handle relocation entry
+ * into its output location. see nvmap_pin_array for more details.
+ *
+ * each entry in arr (i.e., each relocation request) specifies two handles:
+ * the handle to pin (pin), and the handle where the address of pin should be
+ * written (patch). in pseudocode, this loop basically looks like:
+ *
+ * for (i = 0; i < nr; i++) {
+ * (pin, pin_offset, patch, patch_offset) = arr[i];
+ * patch[patch_offset] = address_of(pin) + pin_offset;
+ * }
+ */
+static int nvmap_reloc_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle *gather)
+{
+ struct nvmap_handle *last_patch = NULL;
+ unsigned int last_pfn = 0;
+ pte_t **pte;
+ void *addr;
+ int i;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle *patch;
+ struct nvmap_handle *pin;
+ unsigned long reloc_addr;
+ unsigned long phys;
+ unsigned int pfn;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct nvmap_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ patch = nvmap_get_handle_id(client, arr[i].patch_mem);
+ if (!patch) {
+ nvmap_free_pte(client->dev, pte);
+ return -EPERM;
+ }
+ last_patch = patch;
+ }
+
+ if (patch->heap_pgalloc) {
+ unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (arr[i].patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + arr[i].patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+ if (pfn != last_pfn) {
+ pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
+ unsigned long kaddr = (unsigned long)addr;
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+ last_pfn = pfn;
+ }
+
+ reloc_addr = handle_phys(pin) + arr[i].pin_offset;
+ __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
+ }
+
+ nvmap_free_pte(client->dev, pte);
+
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ wmb();
+
+ return 0;
+}
+
+static int nvmap_validate_get_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle **h)
+{
+ int i;
+ int ret = 0;
+ int count = 0;
+
+ nvmap_ref_lock(client);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (need_resched()) {
+ nvmap_ref_unlock(client);
+ schedule();
+ nvmap_ref_lock(client);
+ }
+
+ ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
+
+ if (!ref)
+ nvmap_warn(client, "falied to validate id\n");
+ else if (!ref->handle)
+ nvmap_warn(client, "id had no associated handle\n");
+ else if (!ref->handle->alloc)
+ nvmap_warn(client, "handle had no allocation\n");
+
+ if (!ref || !ref->handle || !ref->handle->alloc) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* a handle may be referenced multiple times in arr, but
+ * it will only be pinned once; this ensures that the
+ * minimum number of sync-queue slots in the host driver
+ * are dedicated to storing unpin lists, which allows
+ * for greater parallelism between the CPU and graphics
+ * processor */
+ if (ref->handle->flags & NVMAP_HANDLE_VISITED)
+ continue;
+
+ ref->handle->flags |= NVMAP_HANDLE_VISITED;
+
+ h[count] = nvmap_handle_get(ref->handle);
+ BUG_ON(!h[count]);
+ count++;
+ }
+
+ nvmap_ref_unlock(client);
+
+ if (ret) {
+ for (i = 0; i < count; i++) {
+ h[i]->flags &= ~NVMAP_HANDLE_VISITED;
+ nvmap_handle_put(h[i]);
+ }
+ }
+
+ return ret ?: count;
+}
+
+/* a typical mechanism host1x clients use for using the Tegra graphics
+ * processor is to build a command buffer which contains relocatable
+ * memory handle commands, and rely on the kernel to convert these in-place
+ * to addresses which are understood by the GPU hardware.
+ *
+ * this is implemented by having clients provide a sideband array
+ * of relocatable handles (+ offsets) and the location in the command
+ * buffer handle to patch with the GPU address when the client submits
+ * its command buffer to the host1x driver.
+ *
+ * the host driver also uses this relocation mechanism internally to
+ * relocate the client's (unpinned) command buffers into host-addressable
+ * memory.
+ *
+ * @client: nvmap_client which should be used for validation; should be
+ * owned by the process which is submitting command buffers
+ * @gather: special handle for relocated command buffer outputs used
+ * internally by the host driver. if this handle is encountered
+ * as an output handle in the relocation array, it is assumed
+ * to be a known-good output and is not validated.
+ * @arr: array of ((relocatable handle, offset), (output handle, offset))
+ * tuples.
+ * @nr: number of entries in arr
+ * @unique_arr: list of nvmap_handle objects which were pinned by
+ * nvmap_pin_array. must be unpinned by the caller after the
+ * command buffers referenced in gather have completed.
+ */
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique_arr)
+{
+ int count = 0;
+ int pinned = 0;
+ int ret = 0;
+ int i;
+
+ if (mutex_lock_interruptible(&client->share->pin_lock)) {
+ nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
+ current->group_leader->comm);
+ return -EINTR;
+ }
+
+ count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
+ if (count < 0) {
+ mutex_unlock(&client->share->pin_lock);
+ nvmap_warn(client, "failed to validate pin array\n");
+ return count;
+ }
+
+ for (i = 0; i < count; i++)
+ unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
+
+ for (pinned = 0; pinned < count && !ret; pinned++)
+ ret = wait_pin_locked(client, unique_arr[pinned]);
+
+ mutex_unlock(&client->share->pin_lock);
+
+ if (!ret)
+ ret = nvmap_reloc_pin_array(client, arr, nr, gather);
+
+ if (WARN_ON(ret)) {
+ int do_wake = 0;
+
+ for (i = pinned; i < count; i++)
+ nvmap_handle_put(unique_arr[i]);
+
+ for (i = 0; i < pinned; i++)
+ do_wake |= handle_unpin(client, unique_arr[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ return ret;
+ } else {
+ for (i = 0; i < count; i++) {
+ if (unique_arr[i]->heap_pgalloc &&
+ unique_arr[i]->pgalloc.dirty)
+ map_iovmm_area(unique_arr[i]);
+ }
+ }
+
+ return count;
+}
+
+unsigned long nvmap_pin(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+ int ret = 0;
+
+ h = nvmap_handle_get(ref->handle);
+ if (WARN_ON(!h))
+ return -EINVAL;
+
+ atomic_inc(&ref->pin);
+
+ if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
+ ret = -EINTR;
+ } else {
+ ret = wait_pin_locked(client, h);
+ mutex_unlock(&client->share->pin_lock);
+ }
+
+ if (ret) {
+ atomic_dec(&ref->pin);
+ nvmap_handle_put(h);
+ } else {
+ if (h->heap_pgalloc && h->pgalloc.dirty)
+ map_iovmm_area(h);
+ phys = handle_phys(h);
+ }
+
+ return ret ?: phys;
+}
+
+unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+
+ h = nvmap_get_handle_id(c, id);
+ if (!h)
+ return -EPERM;
+
+ phys = handle_phys(h);
+ nvmap_handle_put(h);
+
+ return phys;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
+{
+ atomic_dec(&ref->pin);
+ if (handle_unpin(client, ref->handle))
+ wake_up(&client->share->pin_wait);
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ if (WARN_ON(!h[i]))
+ continue;
+ do_wake |= handle_unpin(client, h[i]);
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ pgprot_t prot;
+ unsigned long adj_size;
+ unsigned long offs;
+ struct vm_struct *v;
+ void *p;
+
+ h = nvmap_handle_get(ref->handle);
+ if (!h)
+ return NULL;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->heap_pgalloc)
+ return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
+ -1, prot);
+
+ /* carveout - explicitly map the pfns into a vmalloc area */
+ adj_size = h->carveout->base & ~PAGE_MASK;
+ adj_size += h->size;
+ adj_size = PAGE_ALIGN(adj_size);
+
+ v = alloc_vm_area(adj_size);
+ if (!v) {
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ p = v->addr + (h->carveout->base & ~PAGE_MASK);
+
+ for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
+ unsigned long addr = (unsigned long) v->addr + offs;
+ unsigned int pfn;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pfn = __phys_to_pfn(h->carveout->base + offs);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ break;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ break;
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(addr);
+ }
+
+ if (offs != adj_size) {
+ free_vm_area(v);
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ /* leave the handle ref count incremented by 1, so that
+ * the handle will not be freed while the kernel mapping exists.
+ * nvmap_handle_put will be called by unmapping this address */
+ return p;
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
+{
+ struct nvmap_handle *h;
+
+ if (!ref)
+ return;
+
+ h = ref->handle;
+
+ if (h->heap_pgalloc) {
+ vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
+ } else {
+ struct vm_struct *vm;
+ addr -= (h->carveout->base & ~PAGE_MASK);
+ vm = remove_vm_area(addr);
+ BUG_ON(!vm);
+ }
+
+ nvmap_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
+ NVMAP_HEAP_CARVEOUT_GENERIC);
+ struct nvmap_handle_ref *r = NULL;
+ int err;
+
+ r = nvmap_create_handle(client, size);
+ if (IS_ERR(r))
+ return r;
+
+ err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+ default_heap, align, flags);
+
+ if (err) {
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+ return ERR_PTR(err);
+ }
+
+ return r;
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+}
+
+/*
+ * create a mapping to the user's buffer and write it
+ * (uses similar logic from nvmap_reloc_pin_array to map the cmdbuf)
+ */
+int nvmap_patch_wait(struct nvmap_client *client,
+ struct nvmap_handle *patch,
+ u32 patch_offset, u32 patch_value)
+{
+ unsigned long phys;
+ unsigned int pfn, last_pfn = 0;
+ void *addr;
+ pte_t **pte;
+
+ if (patch_offset >= patch->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ return -EFAULT;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ /* derive physaddr of cmdbuf WAIT to patch */
+ if (patch->heap_pgalloc) {
+ unsigned int page = patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+
+ /* write PTE, so addr points to cmdbuf PFN */
+ if (pfn != last_pfn) {
+ pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
+ unsigned long kaddr = (unsigned long)addr;
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+ last_pfn = pfn;
+ }
+
+ /* write patch_value to addr + page offset */
+ __raw_writel(patch_value, addr + (phys & ~PAGE_MASK));
+
+ nvmap_free_pte(client->dev, pte);
+ wmb();
+ return 0;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap/nvmap.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
+#define __VIDEO_TEGRA_NVMAP_NVMAP_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+#define nvmap_err(_client, _fmt, ...) \
+ dev_err(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_warn(_client, _fmt, ...) \
+ dev_warn(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_debug(_client, _fmt, ...) \
+ dev_dbg(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
+
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+/* handles allocated using shared system memory (either IOVMM- or high-order
+ * page allocations */
+struct nvmap_pgalloc {
+ struct page **pages;
+ struct tegra_iovmm_area *area;
+ struct list_head mru_list; /* MRU entry for IOVMM reclamation */
+ bool contig; /* contiguous system memory */
+ bool dirty; /* area is invalid and needs mapping */
+};
+
+struct nvmap_handle {
+ struct rb_node node; /* entry on global handle tree */
+ atomic_t ref; /* reference count (i.e., # of duplications) */
+ atomic_t pin; /* pin count */
+ unsigned long flags;
+ size_t size; /* padded (as-allocated) size */
+ size_t orig_size; /* original (as-requested) size */
+ struct nvmap_client *owner;
+ struct nvmap_device *dev;
+ union {
+ struct nvmap_pgalloc pgalloc;
+ struct nvmap_heap_block *carveout;
+ };
+ bool global; /* handle may be duplicated by other clients */
+ bool secure; /* zap IOVMM area on unpin */
+ bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
+ bool alloc; /* handle has memory allocated */
+ struct mutex lock;
+};
+
+struct nvmap_share {
+ struct tegra_iovmm_client *iovmm;
+ wait_queue_head_t pin_wait;
+ struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ spinlock_t mru_lock;
+ struct list_head *mru_lists;
+ int nr_mru;
+#endif
+};
+
+struct nvmap_carveout_commit {
+ size_t commit;
+ struct list_head list;
+};
+
+struct nvmap_client {
+ const char *name;
+ struct nvmap_device *dev;
+ struct nvmap_share *share;
+ struct rb_root handle_refs;
+ atomic_t iovm_commit;
+ size_t iovm_limit;
+ spinlock_t ref_lock;
+ bool super;
+ atomic_t count;
+ struct task_struct *task;
+ struct nvmap_carveout_commit carveout_commit[0];
+};
+
+/* handle_ref objects are client-local references to an nvmap_handle;
+ * they are distinct objects so that handles can be unpinned and
+ * unreferenced the correct number of times when a client abnormally
+ * terminates */
+struct nvmap_handle_ref {
+ struct nvmap_handle *handle;
+ struct rb_node node;
+ atomic_t dupes; /* number of times to free on file close */
+ atomic_t pin; /* number of times to unpin on free */
+};
+
+struct nvmap_vma_priv {
+ struct nvmap_handle *handle;
+ size_t offs;
+ atomic_t count; /* number of processes cloning the VMA */
+};
+
+static inline void nvmap_ref_lock(struct nvmap_client *priv)
+{
+ spin_lock(&priv->ref_lock);
+}
+
+static inline void nvmap_ref_unlock(struct nvmap_client *priv)
+{
+ spin_unlock(&priv->ref_lock);
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client);
+
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
+
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
+
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot);
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b);
+
+struct nvmap_carveout_node;
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node, size_t len);
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len);
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
+
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long handle);
+
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
+ unsigned long id);
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size);
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags);
+
+void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
+
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids);
+
+void nvmap_unpin_ids(struct nvmap_client *priv,
+ unsigned int nr, const unsigned long *ids);
+
+void _nvmap_handle_free(struct nvmap_handle *h);
+
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
+
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+
+static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
+ pr_err("%s: %s getting a freed handle\n",
+ __func__, current->group_leader->comm);
+ if (atomic_read(&h->ref) <= 0)
+ return NULL;
+ }
+ return h;
+}
+
+static inline void nvmap_handle_put(struct nvmap_handle *h)
+{
+ int cnt = atomic_dec_return(&h->ref);
+
+ if (WARN_ON(cnt < 0)) {
+ pr_err("%s: %s put to negative references\n",
+ __func__, current->comm);
+ } else if (cnt == 0)
+ _nvmap_handle_free(h);
+}
+
+static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
+{
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+ return pgprot_dmacoherent(prot);
+ else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ return pgprot_writecombine(prot);
+ else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ return pgprot_inner_writeback(prot);
+ return prot;
+}
+
+int is_nvmap_vma(struct vm_area_struct *vma);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap/nvmap_dev.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_ioctl.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_NUM_PTES 64
+#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
+
+#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
+static bool carveout_killer = true;
+#else
+static bool carveout_killer;
+#endif
+module_param(carveout_killer, bool, 0640);
+
+struct nvmap_carveout_node {
+ unsigned int heap_bit;
+ struct nvmap_heap *carveout;
+ int index;
+ struct list_head clients;
+ spinlock_t clients_lock;
+};
+
+struct nvmap_device {
+ struct vm_struct *vm_rgn;
+ pte_t *ptes[NVMAP_NUM_PTES];
+ unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
+ unsigned int lastpte;
+ spinlock_t ptelock;
+
+ struct rb_root handles;
+ spinlock_t handle_lock;
+ wait_queue_head_t pte_wait;
+ struct miscdevice dev_super;
+ struct miscdevice dev_user;
+ struct nvmap_carveout_node *heaps;
+ int nr_carveouts;
+ struct nvmap_share iovmm_master;
+};
+
+struct nvmap_device *nvmap_dev;
+
+static struct backing_dev_info nvmap_bdi = {
+ .ra_pages = 0,
+ .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
+};
+
+static int nvmap_open(struct inode *inode, struct file *filp);
+static int nvmap_release(struct inode *inode, struct file *filp);
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
+static void nvmap_vma_open(struct vm_area_struct *vma);
+static void nvmap_vma_close(struct vm_area_struct *vma);
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static const struct file_operations nvmap_user_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static const struct file_operations nvmap_super_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static struct vm_operations_struct nvmap_vma_ops = {
+ .open = nvmap_vma_open,
+ .close = nvmap_vma_close,
+ .fault = nvmap_vma_fault,
+};
+
+int is_nvmap_vma(struct vm_area_struct *vma)
+{
+ return vma->vm_ops == &nvmap_vma_ops;
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client)
+{
+ if (client->super)
+ return client->dev->dev_super.this_device;
+ else
+ return client->dev->dev_user.this_device;
+}
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
+{
+ return &dev->iovmm_master;
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. may be called from IRQs */
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
+{
+ unsigned long flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
+ if (bit == NVMAP_NUM_PTES) {
+ bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
+ if (bit == dev->lastpte)
+ bit = NVMAP_NUM_PTES;
+ }
+
+ if (bit == NVMAP_NUM_PTES) {
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->lastpte = bit;
+ set_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+
+ *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
+ return &(dev->ptes[bit]);
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. must be called from sleepable contexts */
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
+{
+ int ret;
+ pte_t **pte;
+ ret = wait_event_interruptible(dev->pte_wait,
+ !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
+
+ if (ret == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ return pte;
+}
+
+/* frees a PTE */
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
+{
+ unsigned long addr;
+ unsigned int bit = pte - dev->ptes;
+ unsigned long flags;
+
+ if (WARN_ON(bit >= NVMAP_NUM_PTES))
+ return;
+
+ addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
+ set_pte_at(&init_mm, addr, *pte, 0);
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ clear_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ wake_up(&dev->pte_wait);
+}
+
+/* verifies that the handle ref value "ref" is a valid handle ref for the
+ * file. caller must hold the file's ref_lock prior to calling this function */
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
+ unsigned long id)
+{
+ struct rb_node *n = c->handle_refs.rb_node;
+
+ while (n) {
+ struct nvmap_handle_ref *ref;
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ if ((unsigned long)ref->handle == id)
+ return ref;
+ else if (id > (unsigned long)ref->handle)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+
+ return NULL;
+}
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h = NULL;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, id);
+ if (ref)
+ h = ref->handle;
+ if (h)
+ h = nvmap_handle_get(h);
+ nvmap_ref_unlock(client);
+ return h;
+}
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b)
+{
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
+ struct nvmap_carveout_node *n;
+ int i;
+
+ for (i = 0; i < c->dev->nr_carveouts; i++) {
+ n = &c->dev->heaps[i];
+ if (n->carveout == h)
+ return n->heap_bit;
+ }
+ return 0;
+}
+
+static int nvmap_flush_heap_block(struct nvmap_client *client,
+ struct nvmap_heap_block *block, size_t len)
+{
+ pte_t **pte;
+ void *addr;
+ unsigned long kaddr;
+ unsigned long phys = block->base;
+ unsigned long end = block->base + len;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ kaddr = (unsigned long)addr;
+
+ while (phys < end) {
+ unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys);
+ void *base = (void *)kaddr + (phys & ~PAGE_MASK);
+
+ next = min(next, end);
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
+ flush_tlb_kernel_page(kaddr);
+ __cpuc_flush_dcache_area(base, next - phys);
+ phys = next;
+ }
+
+ outer_flush_range(block->base, block->base + len);
+
+ nvmap_free_pte(client->dev, pte);
+ return 0;
+}
+
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ nvmap_ref_lock(client);
+ spin_lock_irqsave(&node->clients_lock, flags);
+ BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
+ client->carveout_commit[node->index].commit != 0);
+
+ client->carveout_commit[node->index].commit += len;
+ /* if this client isn't already on the list of nodes for this heap,
+ add it */
+ if (list_empty(&client->carveout_commit[node->index].list)) {
+ list_add(&client->carveout_commit[node->index].list,
+ &node->clients);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+ nvmap_ref_unlock(client);
+}
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ client->carveout_commit[node->index].commit -= len;
+ BUG_ON(client->carveout_commit[node->index].commit < 0);
+ /* if no more allocation in this carveout for this node, delete it */
+ if (!client->carveout_commit[node->index].commit)
+ list_del_init(&client->carveout_commit[node->index].list);
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+}
+
+static struct nvmap_client* get_client_from_carveout_commit(
+ struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
+{
+ struct nvmap_carveout_commit *first_commit = commit - node->index;
+ return (void *)first_commit - offsetof(struct nvmap_client,
+ carveout_commit);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
+static int wait_count;
+bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
+{
+ struct nvmap_carveout_commit *commit;
+ size_t selected_size = 0;
+ int selected_oom_adj = OOM_ADJUST_MIN;
+ struct task_struct *selected_task = NULL;
+ unsigned long flags;
+ bool wait = false;
+ int current_oom_adj = OOM_ADJUST_MIN;
+
+ task_lock(current);
+ if (current->signal)
+ current_oom_adj = current->signal->oom_adj;
+ task_unlock(current);
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ /* find the task with the smallest oom_adj (lowest priority)
+ * and largest carveout allocation -- ignore kernel allocations,
+ * there's no way to handle them */
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ size_t size = commit->commit;
+ struct task_struct *task = client->task;
+ struct signal_struct *sig;
+
+ if (!task)
+ continue;
+
+ task_lock(task);
+ sig = task->signal;
+ if (!task->mm || !sig)
+ goto end;
+ /* don't try to kill current */
+ if (task == current->group_leader)
+ goto end;
+ /* don't try to kill higher priority tasks */
+ if (sig->oom_adj < current_oom_adj)
+ goto end;
+ if (sig->oom_adj < selected_oom_adj)
+ goto end;
+ if (sig->oom_adj == selected_oom_adj &&
+ size <= selected_size)
+ goto end;
+ selected_oom_adj = sig->oom_adj;
+ selected_size = size;
+ selected_task = task;
+end:
+ task_unlock(task);
+ }
+ if (selected_task) {
+ wait = true;
+ if (fatal_signal_pending(selected_task)) {
+ pr_warning("carveout_killer: process %d dying "
+ "slowly\n", selected_task->pid);
+ goto out;
+ }
+ pr_info("carveout_killer: killing process %d with oom_adj %d "
+ "to reclaim %d (for process with oom_adj %d)\n",
+ selected_task->pid, selected_oom_adj,
+ selected_size, current_oom_adj);
+ force_sig(SIGKILL, selected_task);
+ }
+out:
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+ return wait;
+}
+
+struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot)
+{
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+ int i;
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_heap_block *block;
+ co_heap = &dev->heaps[i];
+
+ if (!(co_heap->heap_bit & usage))
+ continue;
+
+ block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
+ if (block) {
+ /* flush any stale data that may be left in the
+ * cache at the block's address, since the new
+ * block may be mapped uncached */
+ if (nvmap_flush_heap_block(client, block, len)) {
+ nvmap_heap_free(block);
+ return NULL;
+ } else
+ return block;
+ }
+ }
+ return NULL;
+}
+
+static bool nvmap_carveout_freed(int count)
+{
+ smp_rmb();
+ return count != wait_count;
+}
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot)
+{
+ struct nvmap_heap_block *block;
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+ int i;
+ unsigned long end = jiffies +
+ msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
+ int count = 0;
+
+ do {
+ block = do_nvmap_carveout_alloc(client, len, align,
+ usage, prot);
+ if (!carveout_killer)
+ return block;
+
+ if (block)
+ return block;
+
+ if (!count++) {
+ char task_comm[TASK_COMM_LEN];
+ if (client->task)
+ get_task_comm(task_comm, client->task);
+ else
+ task_comm[0] = 0;
+ pr_info("%s: failed to allocate %u bytes for "
+ "process %s, firing carveout "
+ "killer!\n", __func__, len, task_comm);
+
+ } else {
+ pr_info("%s: still can't allocate %u bytes, "
+ "attempt %d!\n", __func__, len, count);
+ }
+
+ /* shrink carveouts that matter and try again */
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ int count;
+ co_heap = &dev->heaps[i];
+
+ if (!(co_heap->heap_bit & usage))
+ continue;
+
+ count = wait_count;
+ /* indicates we didn't find anything to kill,
+ might as well stop trying */
+ if (!nvmap_shrink_carveout(co_heap))
+ return NULL;
+
+ if (time_is_after_jiffies(end))
+ wait_event_interruptible_timeout(wait_reclaim,
+ nvmap_carveout_freed(count),
+ end - jiffies);
+ }
+ } while (time_is_after_jiffies(end));
+
+ if (time_is_before_jiffies(end))
+ pr_info("carveout_killer: timeout expired without "
+ "allocation succeeding.\n");
+
+ return NULL;
+}
+
+/* remove a handle from the device's tree of all handles; called
+ * when freeing handles. */
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ spin_lock(&dev->handle_lock);
+
+ /* re-test inside the spinlock if the handle really has no clients;
+ * only remove the handle if it is unreferenced */
+ if (atomic_add_return(0, &h->ref) > 0) {
+ spin_unlock(&dev->handle_lock);
+ return -EBUSY;
+ }
+ smp_rmb();
+ BUG_ON(atomic_read(&h->ref) < 0);
+ BUG_ON(atomic_read(&h->pin) != 0);
+
+ rb_erase(&h->node, &dev->handles);
+
+ spin_unlock(&dev->handle_lock);
+ return 0;
+}
+
+/* adds a newly-created handle to the device master tree */
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ spin_lock(&dev->handle_lock);
+ p = &dev->handles.rb_node;
+ while (*p) {
+ struct nvmap_handle *b;
+
+ parent = *p;
+ b = rb_entry(parent, struct nvmap_handle, node);
+ if (h > b)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &dev->handles);
+ spin_unlock(&dev->handle_lock);
+}
+
+/* validates that a handle is in the device master tree, and that the
+ * client has permission to access it */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle *h = NULL;
+ struct rb_node *n;
+
+ spin_lock(&client->dev->handle_lock);
+
+ n = client->dev->handles.rb_node;
+
+ while (n) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ if ((unsigned long)h == id) {
+ if (client->super || h->global || (h->owner == client))
+ h = nvmap_handle_get(h);
+ else
+ h = NULL;
+ spin_unlock(&client->dev->handle_lock);
+ return h;
+ }
+ if (id > (unsigned long)h)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+ spin_unlock(&client->dev->handle_lock);
+ return NULL;
+}
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
+{
+ struct nvmap_client *client;
+ struct task_struct *task;
+ int i;
+
+ if (WARN_ON(!dev))
+ return NULL;
+
+ client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
+ * dev->nr_carveouts), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->name = name;
+ client->super = true;
+ client->dev = dev;
+ /* TODO: allocate unique IOVMM client for each nvmap client */
+ client->share = &dev->iovmm_master;
+ client->handle_refs = RB_ROOT;
+
+ atomic_set(&client->iovm_commit, 0);
+
+ client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ INIT_LIST_HEAD(&client->carveout_commit[i].list);
+ client->carveout_commit[i].commit = 0;
+ }
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+ client->task = task;
+
+ spin_lock_init(&client->ref_lock);
+ atomic_set(&client->count, 1);
+
+ return client;
+}
+
+static void destroy_client(struct nvmap_client *client)
+{
+ struct rb_node *n;
+ int i;
+
+ if (!client)
+ return;
+
+
+ while ((n = rb_first(&client->handle_refs))) {
+ struct nvmap_handle_ref *ref;
+ int pins, dupes;
+
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+
+ mutex_lock(&ref->handle->lock);
+ if (ref->handle->owner == client)
+ ref->handle->owner = NULL;
+ mutex_unlock(&ref->handle->lock);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ dupes = atomic_read(&ref->dupes);
+ while (dupes--)
+ nvmap_handle_put(ref->handle);
+
+ kfree(ref);
+ }
+
+ if (carveout_killer) {
+ wait_count++;
+ smp_wmb();
+ wake_up_all(&wait_reclaim);
+ }
+
+ for (i = 0; i < client->dev->nr_carveouts; i++)
+ list_del(&client->carveout_commit[i].list);
+
+ if (client->task)
+ put_task_struct(client->task);
+
+ kfree(client);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ if (WARN_ON(!client))
+ return NULL;
+
+ if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
+ return NULL;
+
+ return client;
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ struct nvmap_client *client = ERR_PTR(-EFAULT);
+ struct file *f = fget(fd);
+ if (!f)
+ return ERR_PTR(-EINVAL);
+
+ if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
+ client = f->private_data;
+ atomic_inc(&client->count);
+ }
+
+ fput(f);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *client)
+{
+ if (!client)
+ return;
+
+ if (!atomic_dec_return(&client->count))
+ destroy_client(client);
+}
+
+static int nvmap_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
+ struct nvmap_client *priv;
+ int ret;
+
+ ret = nonseekable_open(inode, filp);
+ if (unlikely(ret))
+ return ret;
+
+ BUG_ON(dev != nvmap_dev);
+ priv = nvmap_create_client(dev, "user");
+ if (!priv)
+ return -ENOMEM;
+
+ priv->super = (filp->f_op == &nvmap_super_fops);
+
+ filp->f_mapping->backing_dev_info = &nvmap_bdi;
+
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+ nvmap_client_put(filp->private_data);
+ return 0;
+}
+
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
+ * will be stored in vm_private_data and faulted in. until the
+ * ioctl is made, the VMA is mapped no-access */
+ vma->vm_private_data = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->offs = 0;
+ priv->handle = NULL;
+ atomic_set(&priv->count, 1);
+
+ vma->vm_flags |= VM_SHARED;
+ vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
+ vma->vm_ops = &nvmap_vma_ops;
+ vma->vm_private_data = priv;
+
+ return 0;
+}
+
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVMAP_IOC_CLAIM:
+ nvmap_warn(filp->private_data, "preserved handles not"
+ "supported\n");
+ err = -ENODEV;
+ break;
+ case NVMAP_IOC_CREATE:
+ case NVMAP_IOC_FROM_ID:
+ err = nvmap_ioctl_create(filp, cmd, uarg);
+ break;
+
+ case NVMAP_IOC_GET_ID:
+ err = nvmap_ioctl_getid(filp, uarg);
+ break;
+
+ case NVMAP_IOC_PARAM:
+ err = nvmap_ioctl_get_param(filp, uarg);
+ break;
+
+ case NVMAP_IOC_UNPIN_MULT:
+ case NVMAP_IOC_PIN_MULT:
+ err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
+ break;
+
+ case NVMAP_IOC_ALLOC:
+ err = nvmap_ioctl_alloc(filp, uarg);
+ break;
+
+ case NVMAP_IOC_FREE:
+ err = nvmap_ioctl_free(filp, arg);
+ break;
+
+ case NVMAP_IOC_MMAP:
+ err = nvmap_map_into_caller_ptr(filp, uarg);
+ break;
+
+ case NVMAP_IOC_WRITE:
+ case NVMAP_IOC_READ:
+ err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
+ break;
+
+ case NVMAP_IOC_CACHE:
+ err = nvmap_ioctl_cache_maint(filp, uarg);
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+ return err;
+}
+
+/* to ensure that the backing store for the VMA isn't freed while a fork'd
+ * reference still exists, nvmap_vma_open increments the reference count on
+ * the handle, and nvmap_vma_close decrements it. alternatively, we could
+ * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
+*/
+static void nvmap_vma_open(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ priv = vma->vm_private_data;
+
+ BUG_ON(!priv);
+
+ atomic_inc(&priv->count);
+}
+
+static void nvmap_vma_close(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv = vma->vm_private_data;
+
+ if (priv && !atomic_dec_return(&priv->count)) {
+ if (priv->handle)
+ nvmap_handle_put(priv->handle);
+ kfree(priv);
+ }
+
+ vma->vm_private_data = NULL;
+}
+
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct nvmap_vma_priv *priv;
+ unsigned long offs;
+
+ offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
+ priv = vma->vm_private_data;
+ if (!priv || !priv->handle || !priv->handle->alloc)
+ return VM_FAULT_SIGBUS;
+
+ offs += priv->offs;
+ /* if the VMA was split for some reason, vm_pgoff will be the VMA's
+ * offset from the original VMA */
+ offs += (vma->vm_pgoff << PAGE_SHIFT);
+
+ if (offs >= priv->handle->size)
+ return VM_FAULT_SIGBUS;
+
+ if (!priv->handle->heap_pgalloc) {
+ unsigned long pfn;
+ BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
+ pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ return VM_FAULT_NOPAGE;
+ } else {
+ struct page *page;
+ offs >>= PAGE_SHIFT;
+ page = priv->handle->pgalloc.pages[offs];
+ if (page)
+ get_page(page);
+ vmf->page = page;
+ return (page) ? 0 : VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t attr_show_usage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+
+ return sprintf(buf, "%08x\n", node->heap_bit);
+}
+
+static struct device_attribute heap_attr_show_usage =
+ __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+ &heap_attr_show_usage.attr,
+ NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+ .attrs = heap_extra_attrs,
+};
+
+static void client_stringify(struct nvmap_client *client, struct seq_file *s)
+{
+ char task_comm[TASK_COMM_LEN];
+ if (!client->task) {
+ seq_printf(s, "%8s %16s %8u", client->name, "kernel", 0);
+ return;
+ }
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%8s %16s %8u", client->name, task_comm,
+ client->task->pid);
+}
+
+static void allocations_stringify(struct nvmap_client *client,
+ struct seq_file *s)
+{
+ struct rb_node *n = rb_first(&client->handle_refs);
+ unsigned long long total = 0;
+
+ for (; n != NULL; n = rb_next(n)) {
+ struct nvmap_handle_ref *ref =
+ rb_entry(n, struct nvmap_handle_ref, node);
+ struct nvmap_handle *handle = ref->handle;
+ if (handle->alloc && !handle->heap_pgalloc) {
+ seq_printf(s, " %8u@%8lx ", handle->size,
+ handle->carveout->base);
+ total += handle->size;
+ }
+ }
+ seq_printf(s, " total: %llu\n", total);
+}
+
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ allocations_stringify(client, s);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_allocations_show,
+ inode->i_private);
+}
+
+static struct file_operations debug_allocations_fops = {
+ .open = nvmap_debug_allocations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ seq_printf(s, " %8u\n", commit->commit);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_clients_show, inode->i_private);
+}
+
+static struct file_operations debug_clients_fops = {
+ .open = nvmap_debug_clients_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_probe(struct platform_device *pdev)
+{
+ struct nvmap_platform_data *plat = pdev->dev.platform_data;
+ struct nvmap_device *dev;
+ struct dentry *nvmap_debug_root;
+ unsigned int i;
+ int e;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(nvmap_dev != NULL)) {
+ dev_err(&pdev->dev, "only one nvmap device may be present\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "out of memory for device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_user.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_user.name = "nvmap";
+ dev->dev_user.fops = &nvmap_user_fops;
+ dev->dev_user.parent = &pdev->dev;
+
+ dev->dev_super.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_super.name = "knvmap";
+ dev->dev_super.fops = &nvmap_super_fops;
+ dev->dev_super.parent = &pdev->dev;
+
+ dev->handles = RB_ROOT;
+
+ init_waitqueue_head(&dev->pte_wait);
+
+ init_waitqueue_head(&dev->iovmm_master.pin_wait);
+ mutex_init(&dev->iovmm_master.pin_lock);
+ dev->iovmm_master.iovmm =
+ tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
+ if (IS_ERR(dev->iovmm_master.iovmm)) {
+ e = PTR_ERR(dev->iovmm_master.iovmm);
+ dev_err(&pdev->dev, "couldn't create iovmm client\n");
+ goto fail;
+ }
+ dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
+ if (!dev->vm_rgn) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate remapping region\n");
+ goto fail;
+ }
+ e = nvmap_mru_init(&dev->iovmm_master);
+ if (e) {
+ dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
+ goto fail;
+ }
+
+ spin_lock_init(&dev->ptelock);
+ spin_lock_init(&dev->handle_lock);
+
+ for (i = 0; i < NVMAP_NUM_PTES; i++) {
+ unsigned long addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ dev->ptes[i] = pte_alloc_kernel(pmd, addr);
+ if (!dev->ptes[i]) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ }
+
+ e = misc_register(&dev->dev_user);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_user.name);
+ goto fail;
+ }
+
+ e = misc_register(&dev->dev_super);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_super.name);
+ goto fail;
+ }
+
+ dev->nr_carveouts = 0;
+ dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
+ plat->nr_carveouts, GFP_KERNEL);
+ if (!dev->heaps) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
+ goto fail;
+ }
+
+ nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
+ if (IS_ERR_OR_NULL(nvmap_debug_root))
+ dev_err(&pdev->dev, "couldn't create debug files\n");
+
+ for (i = 0; i < plat->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ node->carveout = nvmap_heap_create(dev->dev_user.this_device,
+ co->name, co->base, co->size,
+ co->buddy_size, node);
+ if (!node->carveout) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't create %s\n", co->name);
+ goto fail_heaps;
+ }
+ dev->nr_carveouts++;
+ spin_lock_init(&node->clients_lock);
+ node->index = i;
+ INIT_LIST_HEAD(&node->clients);
+ node->heap_bit = co->usage_mask;
+ if (nvmap_heap_create_group(node->carveout,
+ &heap_extra_attr_group))
+ dev_warn(&pdev->dev, "couldn't add extra attributes\n");
+
+ dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
+ co->name, co->size / 1024);
+
+ if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+ struct dentry *heap_root =
+ debugfs_create_dir(co->name, nvmap_debug_root);
+ if (!IS_ERR_OR_NULL(heap_root)) {
+ debugfs_create_file("clients", 0664, heap_root,
+ node, &debug_clients_fops);
+ debugfs_create_file("allocations", 0664,
+ heap_root, node, &debug_allocations_fops);
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, dev);
+ nvmap_dev = dev;
+ return 0;
+fail_heaps:
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+fail:
+ kfree(dev->heaps);
+ nvmap_mru_destroy(&dev->iovmm_master);
+ if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_super);
+ if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_user);
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+ if (dev->vm_rgn)
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return e;
+}
+
+static int nvmap_remove(struct platform_device *pdev)
+{
+ struct nvmap_device *dev = platform_get_drvdata(pdev);
+ struct rb_node *n;
+ struct nvmap_handle *h;
+ int i;
+
+ misc_deregister(&dev->dev_super);
+ misc_deregister(&dev->dev_user);
+
+ while ((n = rb_first(&dev->handles))) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ rb_erase(&h->node, &dev->handles);
+ kfree(h);
+ }
+
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+
+ nvmap_mru_destroy(&dev->iovmm_master);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+ kfree(dev->heaps);
+
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return 0;
+}
+
+static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int nvmap_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver nvmap_driver = {
+ .probe = nvmap_probe,
+ .remove = nvmap_remove,
+ .suspend = nvmap_suspend,
+ .resume = nvmap_resume,
+
+ .driver = {
+ .name = "tegra-nvmap",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvmap_init_driver(void)
+{
+ int e;
+
+ nvmap_dev = NULL;
+
+ e = nvmap_heap_init();
+ if (e)
+ goto fail;
+
+ e = platform_driver_register(&nvmap_driver);
+ if (e) {
+ nvmap_heap_deinit();
+ goto fail;
+ }
+
+fail:
+ return e;
+}
+fs_initcall(nvmap_init_driver);
+
+static void __exit nvmap_exit_driver(void)
+{
+ platform_driver_unregister(&nvmap_driver);
+ nvmap_heap_deinit();
+ nvmap_dev = NULL;
+}
+module_exit(nvmap_exit_driver);
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap_handle.c
+ *
+ * Handle allocation and freeing routines for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
+#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
+/* handles may be arbitrarily large (16+MiB), and any handle allocated from
+ * the kernel (i.e., not a carveout handle) includes its array of pages. to
+ * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
+ * the array is allocated using vmalloc. */
+#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+static inline void *altalloc(size_t len)
+{
+ if (len >= PAGELIST_VMALLOC_MIN)
+ return vmalloc(len);
+ else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static inline void altfree(void *ptr, size_t len)
+{
+ if (!ptr)
+ return;
+
+ if (len >= PAGELIST_VMALLOC_MIN)
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ struct nvmap_device *dev = h->dev;
+ unsigned int i, nr_page;
+
+ if (nvmap_handle_remove(dev, h) != 0)
+ return;
+
+ if (!h->alloc)
+ goto out;
+
+ if (!h->heap_pgalloc) {
+ nvmap_heap_free(h->carveout);
+ goto out;
+ }
+
+ nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
+
+ BUG_ON(h->size & ~PAGE_MASK);
+ BUG_ON(!h->pgalloc.pages);
+
+ nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
+
+ if (h->pgalloc.area)
+ tegra_iovmm_free_vm(h->pgalloc.area);
+
+ for (i = 0; i < nr_page; i++)
+ __free_page(h->pgalloc.pages[i]);
+
+ altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
+
+out:
+ kfree(h);
+}
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+ struct page *page, *p, *e;
+ unsigned int order;
+ unsigned long base;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ e = page + (1 << order);
+ for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+ __free_page(p);
+
+ e = page + (size >> PAGE_SHIFT);
+ for (p = page; p < e; p++)
+ __flush_dcache_page(page_mapping(p), p);
+
+ base = page_to_phys(page);
+ outer_flush_range(base, base + size);
+ return page;
+}
+
+static int handle_page_alloc(struct nvmap_client *client,
+ struct nvmap_handle *h, bool contiguous)
+{
+ size_t size = PAGE_ALIGN(h->size);
+ unsigned int nr_page = size >> PAGE_SHIFT;
+ pgprot_t prot;
+ unsigned int i = 0;
+ struct page **pages;
+
+ pages = altalloc(nr_page * sizeof(*pages));
+ if (!pages)
+ return -ENOMEM;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ if (nr_page == 1)
+ contiguous = true;
+#endif
+
+ h->pgalloc.area = NULL;
+ if (contiguous) {
+ struct page *page;
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+ if (!page)
+ goto fail;
+
+ for (i = 0; i < nr_page; i++)
+ pages[i] = nth_page(page, i);
+
+ } else {
+ for (i = 0; i < nr_page; i++) {
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
+ if (!pages[i])
+ goto fail;
+ }
+
+#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
+ NULL, size, prot);
+ if (!h->pgalloc.area)
+ goto fail;
+
+ h->pgalloc.dirty = true;
+#endif
+ }
+
+
+ h->size = size;
+ h->pgalloc.pages = pages;
+ h->pgalloc.contig = contiguous;
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return 0;
+
+fail:
+ while (i--)
+ __free_page(pages[i]);
+ altfree(pages, nr_page * sizeof(*pages));
+ return -ENOMEM;
+}
+
+static void alloc_handle(struct nvmap_client *client, size_t align,
+ struct nvmap_handle *h, unsigned int type)
+{
+ BUG_ON(type & (type - 1));
+
+ if (type & NVMAP_HEAP_CARVEOUT_MASK) {
+ struct nvmap_heap_block *b;
+ b = nvmap_carveout_alloc(client, h->size, align,
+ type, h->flags);
+ if (b) {
+ h->carveout = b;
+ h->heap_pgalloc = false;
+ h->alloc = true;
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(b)),
+ h->size);
+ }
+ } else if (type & NVMAP_HEAP_IOVMM) {
+ size_t reserved = PAGE_ALIGN(h->size);
+ int commit;
+ int ret;
+
+ BUG_ON(align > PAGE_SIZE);
+
+ /* increment the committed IOVM space prior to allocation
+ * to avoid race conditions with other threads simultaneously
+ * allocating. */
+ commit = atomic_add_return(reserved, &client->iovm_commit);
+
+ if (commit < client->iovm_limit)
+ ret = handle_page_alloc(client, h, false);
+ else
+ ret = -ENOMEM;
+
+ if (!ret) {
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ } else {
+ atomic_sub(reserved, &client->iovm_commit);
+ }
+
+ } else if (type & NVMAP_HEAP_SYSMEM) {
+
+ if (handle_page_alloc(client, h, true) == 0) {
+ BUG_ON(!h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+}
+
+/* small allocations will try to allocate from generic OS memory before
+ * any of the limited heaps, to increase the effective memory for graphics
+ * allocations, and to reduce fragmentation of the graphics heaps with
+ * sub-page splinters */
+static const unsigned int heap_policy_small[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ NVMAP_HEAP_CARVEOUT_MASK,
+ NVMAP_HEAP_IOVMM,
+ 0,
+};
+
+static const unsigned int heap_policy_large[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+ NVMAP_HEAP_IOVMM,
+ NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ 0,
+};
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags)
+{
+ struct nvmap_handle *h = NULL;
+ const unsigned int *alloc_policy;
+ int nr_page;
+ int err = -ENOMEM;
+
+ align = max_t(size_t, align, L1_CACHE_BYTES);
+
+ /* can't do greater than page size alignment with page alloc */
+ if (align > PAGE_SIZE)
+ heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+
+ h = nvmap_get_handle_id(client, id);
+
+ if (!h)
+ return -EINVAL;
+
+ if (h->alloc)
+ goto out;
+
+ nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ h->secure = !!(flags & NVMAP_HANDLE_SECURE);
+ h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
+
+ /* secure allocations can only be served from secure heaps */
+ if (h->secure)
+ heap_mask &= NVMAP_SECURE_HEAPS;
+
+ if (!heap_mask) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
+
+ while (!h->alloc && *alloc_policy) {
+ unsigned int heap_type;
+
+ heap_type = *alloc_policy++;
+ heap_type &= heap_mask;
+
+ if (!heap_type)
+ continue;
+
+ heap_mask &= ~heap_type;
+
+ while (heap_type && !h->alloc) {
+ unsigned int heap;
+
+ /* iterate possible heaps MSB-to-LSB, since higher-
+ * priority carveouts will have higher usage masks */
+ heap = 1 << __fls(heap_type);
+ alloc_handle(client, align, h, heap);
+ heap_type &= ~heap;
+ }
+ }
+
+out:
+ err = (h->alloc) ? 0 : err;
+ nvmap_handle_put(h);
+ return err;
+}
+
+void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h;
+ int pins;
+
+ nvmap_ref_lock(client);
+
+ ref = _nvmap_validate_id_locked(client, id);
+ if (!ref) {
+ nvmap_ref_unlock(client);
+ return;
+ }
+
+ BUG_ON(!ref->handle);
+ h = ref->handle;
+
+ if (atomic_dec_return(&ref->dupes)) {
+ nvmap_ref_unlock(client);
+ goto out;
+ }
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+ atomic_sub(h->size, &client->iovm_commit);
+
+ if (h->alloc && !h->heap_pgalloc)
+ nvmap_carveout_commit_subtract(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+
+ nvmap_ref_unlock(client);
+
+ if (pins)
+ nvmap_err(client, "%s freeing pinned handle %p\n",
+ current->group_leader->comm, h);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ if (h->owner == client)
+ h->owner = NULL;
+
+ kfree(ref);
+
+out:
+ BUG_ON(!atomic_read(&h->ref));
+ nvmap_handle_put(h);
+}
+
+static void add_handle_ref(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct rb_node **p, *parent = NULL;
+
+ nvmap_ref_lock(client);
+ p = &client->handle_refs.rb_node;
+ while (*p) {
+ struct nvmap_handle_ref *node;
+ parent = *p;
+ node = rb_entry(parent, struct nvmap_handle_ref, node);
+ if (ref->handle > node->handle)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&ref->node, parent, p);
+ rb_insert_color(&ref->node, &client->handle_refs);
+ nvmap_ref_unlock(client);
+}
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size)
+{
+ struct nvmap_handle *h;
+ struct nvmap_handle_ref *ref = NULL;
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&h->ref, 1);
+ atomic_set(&h->pin, 0);
+ h->owner = client;
+ h->dev = client->dev;
+ BUG_ON(!h->owner);
+ h->size = h->orig_size = size;
+ h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+ mutex_init(&h->lock);
+
+ nvmap_handle_add(client->dev, h);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_handle *h = NULL;
+
+ BUG_ON(!client || client->dev != nvmap_dev);
+ /* on success, the reference count for the handle should be
+ * incremented, so the success paths will not call nvmap_handle_put */
+ h = nvmap_validate_get(client, id);
+
+ if (!h) {
+ nvmap_debug(client, "%s duplicate handle failed\n",
+ current->group_leader->comm);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (!h->alloc) {
+ nvmap_err(client, "%s duplicating unallocated handle\n",
+ current->group_leader->comm);
+ nvmap_handle_put(h);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, (unsigned long)h);
+
+ if (ref) {
+ /* handle already duplicated in client; just increment
+ * the reference count rather than re-duplicating it */
+ atomic_inc(&ref->dupes);
+ nvmap_ref_unlock(client);
+ return ref;
+ }
+
+ nvmap_ref_unlock(client);
+
+ /* verify that adding this handle to the process' access list
+ * won't exceed the IOVM limit */
+ if (h->heap_pgalloc && !h->pgalloc.contig && !client->super) {
+ int oc;
+ oc = atomic_add_return(h->size, &client->iovm_commit);
+ if (oc > client->iovm_limit) {
+ atomic_sub(h->size, &client->iovm_commit);
+ nvmap_handle_put(h);
+ nvmap_err(client, "duplicating %p in %s over-commits"
+ " IOVMM space\n", (void *)id,
+ current->group_leader->comm);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ nvmap_handle_put(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!h->heap_pgalloc)
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.c
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+/*
+ * "carveouts" are platform-defined regions of physically contiguous memory
+ * which are not managed by the OS. a platform may specify multiple carveouts,
+ * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
+ * or reserved regions of main system memory.
+ *
+ * the carveout allocator returns allocations which are physically contiguous.
+ * to reduce external fragmentation, the allocation algorithm implemented in
+ * this file employs 3 strategies for keeping allocations of similar size
+ * grouped together inside the larger heap: the "small", "normal" and "huge"
+ * strategies. the size thresholds (in bytes) for determining which strategy
+ * to employ should be provided by the platform for each heap. it is possible
+ * for a platform to define a heap where only the "normal" strategy is used.
+ *
+ * o "normal" allocations use an address-order first-fit allocator (called
+ * BOTTOM_UP in the code below). each allocation is rounded up to be
+ * an integer multiple of the "small" allocation size.
+ *
+ * o "huge" allocations use an address-order last-fit allocator (called
+ * TOP_DOWN in the code below). like "normal" allocations, each allocation
+ * is rounded up to be an integer multiple of the "small" allocation size.
+ *
+ * o "small" allocations are treatedy differently: the heap manager maintains
+ * a pool of "small"-sized blocks internally from which allocations less
+ * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
+ * is requested and none of the buddy sub-heaps is able to service it,
+ * the heap manager will try to allocate a new buddy-heap.
+ *
+ * this allocator is intended to keep "splinters" colocated in the carveout,
+ * and to ensure that the minimum free block size in the carveout (i.e., the
+ * "small" threshold) is still a meaningful size.
+ *
+ */
+
+#define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */
+
+enum direction {
+ TOP_DOWN,
+ BOTTOM_UP
+};
+
+enum block_type {
+ BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
+ BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
+};
+
+struct heap_stat {
+ size_t free; /* total free size */
+ size_t free_largest; /* largest free block */
+ size_t free_count; /* number of free blocks */
+ size_t total; /* total size */
+ size_t largest; /* largest unique block */
+ size_t count; /* total number of blocks */
+};
+
+struct buddy_heap;
+
+struct buddy_block {
+ struct nvmap_heap_block block;
+ struct buddy_heap *heap;
+};
+
+struct list_block {
+ struct nvmap_heap_block block;
+ struct list_head all_list;
+ unsigned int mem_prot;
+ unsigned long orig_addr;
+ size_t size;
+ struct nvmap_heap *heap;
+ struct list_head free_list;
+};
+
+struct combo_block {
+ union {
+ struct list_block lb;
+ struct buddy_block bb;
+ };
+};
+
+struct buddy_bits {
+ unsigned int alloc:1;
+ unsigned int order:7; /* log2(MAX_BUDDY_NR); */
+};
+
+struct buddy_heap {
+ struct list_block *heap_base;
+ unsigned int nr_buddies;
+ struct list_head buddy_list;
+ struct buddy_bits bitmap[MAX_BUDDY_NR];
+};
+
+struct nvmap_heap {
+ struct list_head all_list;
+ struct list_head free_list;
+ struct mutex lock;
+ struct list_head buddy_list;
+ unsigned int min_buddy_shift;
+ unsigned int buddy_heap_size;
+ unsigned int small_alloc;
+ const char *name;
+ void *arg;
+ struct device dev;
+};
+
+static struct kmem_cache *buddy_heap_cache;
+static struct kmem_cache *block_cache;
+
+static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
+{
+ return heap->heap_base->heap;
+}
+
+static inline unsigned int order_of(size_t len, size_t min_shift)
+{
+ len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
+ return fls(len)-1;
+}
+
+/* returns the free size in bytes of the buddy heap; must be called while
+ * holding the parent heap's lock. */
+static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
+{
+ unsigned int index;
+ unsigned int shift = parent_of(heap)->min_buddy_shift;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+ size_t curr = 1 << (heap->bitmap[index].order + shift);
+
+ stat->largest = max(stat->largest, curr);
+ stat->total += curr;
+ stat->count++;
+
+ if (!heap->bitmap[index].alloc) {
+ stat->free += curr;
+ stat->free_largest = max(stat->free_largest, curr);
+ stat->free_count++;
+ }
+ }
+}
+
+/* returns the free size of the heap (including any free blocks in any
+ * buddy-heap suballocators; must be called while holding the parent
+ * heap's lock. */
+static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
+{
+ struct buddy_heap *bh;
+ struct list_block *l = NULL;
+ unsigned long base = -1ul;
+
+ memset(stat, 0, sizeof(*stat));
+ mutex_lock(&heap->lock);
+ list_for_each_entry(l, &heap->all_list, all_list) {
+ stat->total += l->size;
+ stat->largest = max(l->size, stat->largest);
+ stat->count++;
+ base = min(base, l->orig_addr);
+ }
+
+ list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
+ buddy_stat(bh, stat);
+ /* the total counts are double-counted for buddy heaps
+ * since the blocks allocated for buddy heaps exist in the
+ * all_list; subtract out the doubly-added stats */
+ stat->total -= bh->heap_base->size;
+ stat->count--;
+ }
+
+ list_for_each_entry(l, &heap->free_list, free_list) {
+ stat->free += l->size;
+ stat->free_count++;
+ stat->free_largest = max(l->size, stat->free_largest);
+ }
+ mutex_unlock(&heap->lock);
+
+ return base;
+}
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static struct device_attribute heap_stat_total_max =
+ __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_count =
+ __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_size =
+ __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_max =
+ __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_count =
+ __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_size =
+ __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_base =
+ __ATTR(base, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_attr_name =
+ __ATTR(name, S_IRUGO, heap_name_show, NULL);
+
+static struct attribute *heap_stat_attrs[] = {
+ &heap_stat_total_max.attr,
+ &heap_stat_total_count.attr,
+ &heap_stat_total_size.attr,
+ &heap_stat_free_max.attr,
+ &heap_stat_free_count.attr,
+ &heap_stat_free_size.attr,
+ &heap_stat_base.attr,
+ &heap_attr_name.attr,
+ NULL,
+};
+
+static struct attribute_group heap_stat_attr_group = {
+ .attrs = heap_stat_attrs,
+};
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return sprintf(buf, "%s\n", heap->name);
+}
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ struct heap_stat stat;
+ unsigned long base;
+
+ base = heap_stat(heap, &stat);
+
+ if (attr == &heap_stat_total_max)
+ return sprintf(buf, "%u\n", stat.largest);
+ else if (attr == &heap_stat_total_count)
+ return sprintf(buf, "%u\n", stat.count);
+ else if (attr == &heap_stat_total_size)
+ return sprintf(buf, "%u\n", stat.total);
+ else if (attr == &heap_stat_free_max)
+ return sprintf(buf, "%u\n", stat.free_largest);
+ else if (attr == &heap_stat_free_count)
+ return sprintf(buf, "%u\n", stat.free_count);
+ else if (attr == &heap_stat_free_size)
+ return sprintf(buf, "%u\n", stat.free);
+ else if (attr == &heap_stat_base)
+ return sprintf(buf, "%08lx\n", base);
+ else
+ return -EINVAL;
+}
+
+static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
+ size_t size, size_t align,
+ unsigned int mem_prot)
+{
+ unsigned int index = 0;
+ unsigned int min_shift = parent_of(heap)->min_buddy_shift;
+ unsigned int order = order_of(size, min_shift);
+ unsigned int align_mask;
+ unsigned int best = heap->nr_buddies;
+ struct buddy_block *b;
+
+ if (heap->heap_base->mem_prot != mem_prot)
+ return NULL;
+
+ align = max(align, (size_t)(1 << min_shift));
+ align_mask = (align >> min_shift) - 1;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+
+ if (heap->bitmap[index].alloc || (index & align_mask) ||
+ (heap->bitmap[index].order < order))
+ continue;
+
+ if (best == heap->nr_buddies ||
+ heap->bitmap[index].order < heap->bitmap[best].order)
+ best = index;
+
+ if (heap->bitmap[best].order == order)
+ break;
+ }
+
+ if (best == heap->nr_buddies)
+ return NULL;
+
+ b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ while (heap->bitmap[best].order != order) {
+ unsigned int buddy;
+ heap->bitmap[best].order--;
+ buddy = best ^ (1 << heap->bitmap[best].order);
+ heap->bitmap[buddy].order = heap->bitmap[best].order;
+ heap->bitmap[buddy].alloc = 0;
+ }
+ heap->bitmap[best].alloc = 1;
+ b->block.base = heap->heap_base->block.base + (best << min_shift);
+ b->heap = heap;
+ b->block.type = BLOCK_BUDDY;
+ return &b->block;
+}
+
+static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
+{
+ struct buddy_block *b = container_of(block, struct buddy_block, block);
+ struct buddy_heap *h = b->heap;
+ unsigned int min_shift = parent_of(h)->min_buddy_shift;
+ unsigned int index;
+
+ index = (block->base - h->heap_base->block.base) >> min_shift;
+ h->bitmap[index].alloc = 0;
+
+ for (;;) {
+ unsigned int buddy = index ^ (1 << h->bitmap[index].order);
+ if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
+ h->bitmap[buddy].order != h->bitmap[index].order)
+ break;
+
+ h->bitmap[buddy].order++;
+ h->bitmap[index].order++;
+ index = min(buddy, index);
+ }
+
+ kmem_cache_free(block_cache, b);
+ if ((1 << h->bitmap[0].order) == h->nr_buddies)
+ return h;
+
+ return NULL;
+}
+
+static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct list_block *b = NULL;
+ struct list_block *i = NULL;
+ struct list_block *rem = NULL;
+ unsigned long fix_base;
+ enum direction dir;
+
+ /* since pages are only mappable with one cache attribute,
+ * and most allocations from carveout heaps are DMA coherent
+ * (i.e., non-cacheable), round cacheable allocations up to
+ * a page boundary to ensure that the physical pages will
+ * only be mapped one way. */
+ if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
+ mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
+ align = max_t(size_t, align, PAGE_SIZE);
+ len = PAGE_ALIGN(len);
+ }
+
+ dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+
+ if (dir == BOTTOM_UP) {
+ list_for_each_entry(i, &heap->free_list, free_list) {
+ size_t fix_size;
+ fix_base = ALIGN(i->block.base, align);
+ fix_size = i->size - (fix_base - i->block.base);
+
+ if (fix_size >= len) {
+ b = i;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_reverse(i, &heap->free_list, free_list) {
+ if (i->size >= len) {
+ fix_base = i->block.base + i->size - len;
+ fix_base &= ~(align-1);
+ if (fix_base >= i->block.base) {
+ b = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!b)
+ return NULL;
+
+ if (b->block.base != fix_base) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem) {
+ b->orig_addr = b->block.base;
+ b->block.base = fix_base;
+ b->size -= (b->block.base - b->orig_addr);
+ goto out;
+ }
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base;
+ rem->orig_addr = rem->block.base;
+ rem->size = fix_base - rem->block.base;
+ b->block.base = fix_base;
+ b->orig_addr = fix_base;
+ b->size -= rem->size;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add_tail(&rem->free_list, &b->free_list);
+ }
+
+ b->orig_addr = b->block.base;
+
+ if (b->size > len) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem)
+ goto out;
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base + len;
+ rem->size = b->size - len;
+ BUG_ON(rem->size > b->size);
+ rem->orig_addr = rem->block.base;
+ b->size = len;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add(&rem->free_list, &b->free_list);
+ }
+
+out:
+ list_del(&b->free_list);
+ b->heap = heap;
+ b->mem_prot = mem_prot;
+ return &b->block;
+}
+
+#ifdef DEBUG_FREE_LIST
+static void freelist_debug(struct nvmap_heap *heap, const char *title,
+ struct list_block *token)
+{
+ int i;
+ struct list_block *n;
+
+ dev_debug(&heap->dev, "%s\n", title);
+ i = 0;
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ dev_debug(&heap->dev,"\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
+ (void *)(n->orig_addr + n->size),
+ (n == token) ? "<--" : "");
+ i++;
+ }
+}
+#else
+#define freelist_debug(_heap, _title, _token) do { } while (0)
+#endif
+
+static void do_heap_free(struct nvmap_heap_block *block)
+{
+ struct list_block *b = container_of(block, struct list_block, block);
+ struct list_block *n = NULL;
+ struct nvmap_heap *heap = b->heap;
+
+ BUG_ON(b->block.base > b->orig_addr);
+ b->size += (b->block.base - b->orig_addr);
+ b->block.base = b->orig_addr;
+
+ freelist_debug(heap, "free list before", b);
+
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ if (n->block.base > b->block.base)
+ break;
+ }
+
+ list_add_tail(&b->free_list, &n->free_list);
+ BUG_ON(list_empty(&b->all_list));
+
+ freelist_debug(heap, "free list pre-merge", b);
+
+ if (!list_is_last(&b->free_list, &heap->free_list)) {
+ n = list_first_entry(&b->free_list, struct list_block, free_list);
+ if (n->block.base == b->block.base + b->size) {
+ list_del(&n->all_list);
+ list_del(&n->free_list);
+ BUG_ON(b->orig_addr >= n->orig_addr);
+ b->size += n->size;
+ kmem_cache_free(block_cache, n);
+ }
+ }
+
+ if (b->free_list.prev != &heap->free_list) {
+ n = list_entry(b->free_list.prev, struct list_block, free_list);
+ if (n->block.base + n->size == b->block.base) {
+ list_del(&b->all_list);
+ list_del(&b->free_list);
+ BUG_ON(n->orig_addr >= b->orig_addr);
+ n->size += b->size;
+ kmem_cache_free(block_cache, b);
+ }
+ }
+
+ freelist_debug(heap, "free list after", b);
+}
+
+static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct buddy_heap *bh;
+ struct nvmap_heap_block *b = NULL;
+
+ list_for_each_entry(bh, &h->buddy_list, buddy_list) {
+ b = buddy_alloc(bh, len, align, mem_prot);
+ if (b)
+ return b;
+ }
+
+ /* no buddy heaps could service this allocation: try to create a new
+ * buddy heap instead */
+ bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
+ if (!bh)
+ return NULL;
+
+ b = do_heap_alloc(h, h->buddy_heap_size, h->buddy_heap_size, mem_prot);
+ if (!b) {
+ kmem_cache_free(buddy_heap_cache, bh);
+ return NULL;
+ }
+
+ bh->heap_base = container_of(b, struct list_block, block);
+ bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
+ bh->bitmap[0].alloc = 0;
+ bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
+ list_add_tail(&bh->buddy_list, &h->buddy_list);
+ return buddy_alloc(bh, len, align, mem_prot);
+}
+
+/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
+ * align bytes. */
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
+ size_t align, unsigned int prot)
+{
+ struct nvmap_heap_block *b;
+
+ mutex_lock(&h->lock);
+ if (len <= h->buddy_heap_size / 2) {
+ b = do_buddy_alloc(h, len, align, prot);
+ } else {
+ if (h->buddy_heap_size)
+ len = ALIGN(len, h->buddy_heap_size);
+ align = max(align, (size_t)L1_CACHE_BYTES);
+ b = do_heap_alloc(h, len, align, prot);
+ }
+ mutex_unlock(&h->lock);
+ return b;
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+ struct buddy_heap *bh = NULL;
+ struct nvmap_heap *h;
+
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ h = bb->heap->heap_base->heap;
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ h = lb->heap;
+ }
+
+ mutex_lock(&h->lock);
+ if (b->type == BLOCK_BUDDY)
+ bh = do_buddy_free(b);
+ else
+ do_heap_free(b);
+
+ if (bh) {
+ list_del(&bh->buddy_list);
+ mutex_unlock(&h->lock);
+ nvmap_heap_free(&bh->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, bh);
+ } else
+ mutex_unlock(&h->lock);
+}
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
+{
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ return parent_of(bb->heap);
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ return lb->heap;
+ }
+}
+
+static void heap_release(struct device *heap)
+{
+}
+
+/* nvmap_heap_create: create a heap object of len bytes, starting from
+ * address base.
+ *
+ * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
+ * of the buddy heap size will use a buddy sub-allocator, where each buddy
+ * heap is buddy_size bytes (should be a power of 2). all other allocations
+ * will be rounded up to be a multiple of buddy_size bytes.
+ */
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ size_t buddy_size, void *arg)
+{
+ struct nvmap_heap *h = NULL;
+ struct list_block *l = NULL;
+
+ if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
+ dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size >= len)) {
+ dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
+ dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
+ __func__, buddy_size);
+ buddy_size = 1 << (ilog2(buddy_size) + 1);
+ }
+
+ if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
+ unsigned long orig = base;
+ dev_warn(parent, "%s: base address %p not aligned to "
+ "buddy_size %u\n", __func__, (void *)base, buddy_size);
+ base = ALIGN(base, buddy_size);
+ len -= (base - orig);
+ }
+
+ if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
+ dev_warn(parent, "%s: length %u not aligned to "
+ "buddy_size %u\n", __func__, len, buddy_size);
+ len &= ~(buddy_size - 1);
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!l) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ dev_set_name(&h->dev, "heap-%s", name);
+ h->name = name;
+ h->arg = arg;
+ h->dev.parent = parent;
+ h->dev.driver = NULL;
+ h->dev.release = heap_release;
+ if (device_register(&h->dev)) {
+ dev_err(parent, "%s: failed to register %s\n", __func__,
+ dev_name(&h->dev));
+ goto fail_alloc;
+ }
+ if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
+ dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
+ goto fail_register;
+ }
+ h->small_alloc = max(2 * buddy_size, len / 256);
+ h->buddy_heap_size = buddy_size;
+ if (buddy_size)
+ h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
+ INIT_LIST_HEAD(&h->free_list);
+ INIT_LIST_HEAD(&h->buddy_list);
+ INIT_LIST_HEAD(&h->all_list);
+ mutex_init(&h->lock);
+ l->block.base = base;
+ l->block.type = BLOCK_FIRST_FIT;
+ l->size = len;
+ l->orig_addr = base;
+ list_add_tail(&l->free_list, &h->free_list);
+ list_add_tail(&l->all_list, &h->all_list);
+ return h;
+
+fail_register:
+ device_unregister(&h->dev);
+fail_alloc:
+ if (l)
+ kmem_cache_free(block_cache, l);
+ kfree(h);
+ return NULL;
+}
+
+void *nvmap_heap_device_to_arg(struct device *dev)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return heap->arg;
+}
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap)
+{
+ return heap->arg;
+}
+
+/* nvmap_heap_destroy: frees all resources in heap */
+void nvmap_heap_destroy(struct nvmap_heap *heap)
+{
+ WARN_ON(!list_empty(&heap->buddy_list));
+
+ sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
+ device_unregister(&heap->dev);
+
+ while (!list_empty(&heap->buddy_list)) {
+ struct buddy_heap *b;
+ b = list_first_entry(&heap->buddy_list, struct buddy_heap,
+ buddy_list);
+ list_del(&heap->buddy_list);
+ nvmap_heap_free(&b->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, b);
+ }
+
+ WARN_ON(!list_is_singular(&heap->all_list));
+ while (!list_empty(&heap->all_list)) {
+ struct list_block *l;
+ l = list_first_entry(&heap->all_list, struct list_block,
+ all_list);
+ list_del(&l->all_list);
+ kmem_cache_free(block_cache, l);
+ }
+
+ kfree(heap);
+}
+
+/* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ return sysfs_create_group(&heap->dev.kobj, grp);
+}
+
+/* nvmap_heap_remove_group: removes the attribute_group grp */
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ sysfs_remove_group(&heap->dev.kobj, grp);
+}
+
+int nvmap_heap_init(void)
+{
+ BUG_ON(buddy_heap_cache != NULL);
+ buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
+ if (!buddy_heap_cache) {
+ pr_err("%s: unable to create buddy heap cache\n", __func__);
+ return -ENOMEM;
+ }
+
+ block_cache = KMEM_CACHE(combo_block, 0);
+ if (!block_cache) {
+ kmem_cache_destroy(buddy_heap_cache);
+ pr_err("%s: unable to create block cache\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void nvmap_heap_deinit(void)
+{
+ if (buddy_heap_cache)
+ kmem_cache_destroy(buddy_heap_cache);
+ if (block_cache)
+ kmem_cache_destroy(block_cache);
+
+ block_cache = NULL;
+ buddy_heap_cache = NULL;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap_heap.h
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVMAP_HEAP_H
+#define __NVMAP_HEAP_H
+
+struct device;
+struct nvmap_heap;
+struct attribute_group;
+
+struct nvmap_heap_block {
+ unsigned long base;
+ unsigned int type;
+};
+
+#define NVMAP_HEAP_MIN_BUDDY_SIZE 8192
+
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ unsigned int buddy_size, void *arg);
+
+void nvmap_heap_destroy(struct nvmap_heap *heap);
+
+void *nvmap_heap_device_to_arg(struct device *dev);
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap);
+
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, size_t len,
+ size_t align, unsigned int prot);
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
+
+void nvmap_heap_free(struct nvmap_heap_block *block);
+
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+int __init nvmap_heap_init(void);
+
+void nvmap_heap_deinit(void);
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap_ioctl.h"
+#include "nvmap.h"
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count);
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op);
+
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
+{
+ struct nvmap_pin_handle op;
+ struct nvmap_handle *h;
+ unsigned long on_stack[16];
+ unsigned long *refs;
+ unsigned long __user *output;
+ unsigned int i;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.count)
+ return -EINVAL;
+
+ if (op.count > 1) {
+ size_t bytes = op.count * sizeof(unsigned long *);
+
+ if (op.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, (void *)op.handles, bytes)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ refs = on_stack;
+ on_stack[0] = (unsigned long)op.handles;
+ }
+
+ if (is_pin)
+ err = nvmap_pin_ids(filp->private_data, op.count, refs);
+ else
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+ /* skip the output stage on unpin */
+ if (err || !is_pin)
+ goto out;
+
+ /* it is guaranteed that if nvmap_pin_ids returns 0 that
+ * all of the handle_ref objects are valid, so dereferencing
+ * directly here is safe */
+ if (op.count > 1)
+ output = (unsigned long __user *)op.addr;
+ else {
+ struct nvmap_pin_handle __user *tmp = arg;
+ output = (unsigned long __user *)&(tmp->addr);
+ }
+
+ if (!output)
+ goto out;
+
+ for (i = 0; i < op.count && !err; i++) {
+ unsigned long addr;
+
+ h = (struct nvmap_handle *)refs[i];
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ addr = h->pgalloc.area->iovm_start;
+ else
+ addr = h->carveout->base;
+
+ err = put_user(addr, &output[i]);
+ }
+
+ if (err)
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+out:
+ if (refs != on_stack)
+ kfree(refs);
+
+ return err;
+}
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_create_handle op;
+ struct nvmap_handle *h = NULL;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ op.id = (__u32)h;
+ if (client == h->owner)
+ h->global = true;
+
+ nvmap_handle_put(h);
+
+ return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
+}
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+ struct nvmap_alloc_handle op;
+ struct nvmap_client *client = filp->private_data;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ if (op.align & (op.align - 1))
+ return -EINVAL;
+
+ /* user-space handles are aligned to page boundaries, to prevent
+ * data leakage. */
+ op.align = max_t(size_t, op.align, PAGE_SIZE);
+
+ return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
+ op.align, op.flags);
+}
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
+{
+ struct nvmap_create_handle op;
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_client *client = filp->private_data;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!client)
+ return -ENODEV;
+
+ if (cmd == NVMAP_IOC_CREATE) {
+ ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
+ if (!IS_ERR(ref))
+ ref->handle->orig_size = op.size;
+ } else if (cmd == NVMAP_IOC_FROM_ID) {
+ ref = nvmap_duplicate_handle_id(client, op.id);
+ } else {
+ return -EINVAL;
+ }
+
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+
+ op.handle = nvmap_ref_to_id(ref);
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ err = -EFAULT;
+ nvmap_free_handle_id(client, op.handle);
+ }
+
+ return err;
+}
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_map_caller op;
+ struct nvmap_vma_priv *vpriv;
+ struct vm_area_struct *vma;
+ struct nvmap_handle *h = NULL;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ down_read(¤t->mm->mmap_sem);
+
+ vma = find_vma(current->mm, op.addr);
+ if (!vma || !vma->vm_private_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (op.offset & ~PAGE_MASK) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if ((op.offset + op.length) > h->size) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = vma->vm_private_data;
+ BUG_ON(!vpriv);
+
+ /* the VMA must exactly match the requested mapping operation, and the
+ * VMA that is targetted must have been created by this driver
+ */
+ if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
+ (vma->vm_end-vma->vm_start != op.length)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ /* verify that each mmap() system call creates a unique VMA */
+
+ if (vpriv->handle && (h == vpriv->handle)) {
+ goto out;
+ } else if (vpriv->handle) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ vpriv->handle = h;
+ vpriv->offs = op.offset;
+
+ vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+
+out:
+ up_read(¤t->mm->mmap_sem);
+ if (err)
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+ struct nvmap_handle_param op;
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_handle *h;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EINVAL;
+
+ switch (op.param) {
+ case NVMAP_HANDLE_PARAM_SIZE:
+ op.result = h->orig_size;
+ break;
+ case NVMAP_HANDLE_PARAM_ALIGNMENT:
+ if (!h->alloc)
+ op.result = 0;
+ else if (h->heap_pgalloc)
+ op.result = PAGE_SIZE;
+ else if (h->carveout->base)
+ op.result = (h->carveout->base & -h->carveout->base);
+ else
+ op.result = SZ_4M;
+ break;
+ case NVMAP_HANDLE_PARAM_BASE:
+ if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
+ op.result = -1ul;
+ else if (!h->heap_pgalloc)
+ op.result = h->carveout->base;
+ else if (h->pgalloc.contig)
+ op.result = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->pgalloc.area)
+ op.result = h->pgalloc.area->iovm_start;
+ else
+ op.result = -1ul;
+ break;
+ case NVMAP_HANDLE_PARAM_HEAP:
+ if (!h->alloc)
+ op.result = 0;
+ else if (!h->heap_pgalloc)
+ op.result = nvmap_carveout_usage(client, h->carveout);
+ else if (h->pgalloc.contig)
+ op.result = NVMAP_HEAP_SYSMEM;
+ else
+ op.result = NVMAP_HEAP_IOVMM;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (!err && copy_to_user(arg, &op, sizeof(op)))
+ err = -EFAULT;
+
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_rw_handle __user *uarg = arg;
+ struct nvmap_rw_handle op;
+ struct nvmap_handle *h;
+ ssize_t copied;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || !op.count || !op.elem_size)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EPERM;
+
+ copied = rw_handle(client, h, is_read, op.offset,
+ (unsigned long)op.addr, op.hmem_stride,
+ op.user_stride, op.elem_size, op.count);
+
+ if (copied < 0) {
+ err = copied;
+ copied = 0;
+ } else if (copied < (op.count * op.elem_size))
+ err = -EINTR;
+
+ __put_user(copied, &uarg->count);
+
+ nvmap_handle_put(h);
+
+ return err;
+}
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_cache_op op;
+ struct vm_area_struct *vma;
+ struct nvmap_vma_priv *vpriv;
+ unsigned long start;
+ unsigned long end;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
+ op.op > NVMAP_CACHE_OP_WB_INV)
+ return -EINVAL;
+
+ down_read(¤t->mm->mmap_sem);
+
+ vma = find_vma(current->active_mm, (unsigned long)op.addr);
+ if (!vma || !is_nvmap_vma(vma) ||
+ (unsigned long)op.addr + op.len > vma->vm_end) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
+
+ if ((unsigned long)vpriv->handle != op.handle) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ start = (unsigned long)op.addr - vma->vm_start;
+ end = start + op.len;
+
+ err = cache_maint(client, vpriv->handle, start, end, op.op);
+out:
+ up_read(¤t->mm->mmap_sem);
+ return err;
+}
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+ struct nvmap_client *client = filp->private_data;
+
+ if (!arg)
+ return 0;
+
+ nvmap_free_handle_id(client, arg);
+ return 0;
+}
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op)
+{
+ enum dma_data_direction dir;
+ pgprot_t prot;
+ pte_t **pte = NULL;
+ unsigned long kaddr;
+ unsigned long loop;
+ int err = 0;
+
+ h = nvmap_handle_get(h);
+ if (!h)
+ return -EFAULT;
+
+ if (!h->alloc) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
+ start == end)
+ goto out;
+
+ if (WARN_ON_ONCE(op == NVMAP_CACHE_OP_WB_INV))
+ dir = DMA_BIDIRECTIONAL;
+ else if (op == NVMAP_CACHE_OP_WB)
+ dir = DMA_TO_DEVICE;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ if (h->heap_pgalloc) {
+ while (start < end) {
+ unsigned long next = (start + PAGE_SIZE) & PAGE_MASK;
+ struct page *page;
+
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ next = min(next, end);
+ __dma_page_cpu_to_dev(page, start & ~PAGE_MASK,
+ next - start, dir);
+ start = next;
+ }
+ goto out;
+ }
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+ pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+ if (IS_ERR(pte)) {
+ err = PTR_ERR(pte);
+ pte = NULL;
+ goto out;
+ }
+
+ if (start > h->size || end > h->size) {
+ nvmap_warn(client, "cache maintenance outside handle\n");
+ return -EINVAL;
+ }
+
+ start += h->carveout->base;
+ end += h->carveout->base;
+
+ loop = start;
+
+ while (loop < end) {
+ unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
+ void *base = (void *)kaddr + (loop & ~PAGE_MASK);
+ next = min(next, end);
+
+ set_pte_at(&init_mm, kaddr, *pte,
+ pfn_pte(__phys_to_pfn(loop), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ dmac_map_area(base, next - loop, dir);
+ loop = next;
+ }
+
+ if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
+ if (dir != DMA_FROM_DEVICE)
+ outer_clean_range(start, end);
+ else
+ outer_inv_range(start, end);
+ }
+
+out:
+ if (pte)
+ nvmap_free_pte(client->dev, pte);
+ nvmap_handle_put(h);
+ wmb();
+ return err;
+}
+
+static int rw_handle_page(struct nvmap_handle *h, int is_read,
+ unsigned long start, unsigned long rw_addr,
+ unsigned long bytes, unsigned long kaddr, pte_t *pte)
+{
+ pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
+ unsigned long end = start + bytes;
+ int err = 0;
+
+ while (!err && start < end) {
+ struct page *page = NULL;
+ unsigned long phys;
+ size_t count;
+ void *src;
+
+ if (!h->heap_pgalloc) {
+ phys = h->carveout->base + start;
+ } else {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ BUG_ON(!page);
+ get_page(page);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ }
+
+ set_pte_at(&init_mm, kaddr, pte,
+ pfn_pte(__phys_to_pfn(phys), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ src = (void *)kaddr + (phys & ~PAGE_MASK);
+ phys = PAGE_SIZE - (phys & ~PAGE_MASK);
+ count = min_t(size_t, end - start, phys);
+
+ if (is_read)
+ err = copy_to_user((void *)rw_addr, src, count);
+ else
+ err = copy_from_user(src, (void *)rw_addr, count);
+
+ if (err)
+ err = -EFAULT;
+
+ rw_addr += count;
+ start += count;
+
+ if (page)
+ put_page(page);
+ }
+
+ return err;
+}
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count)
+{
+ ssize_t copied = 0;
+ pte_t **pte;
+ void *addr;
+ int ret = 0;
+
+ if (!elem_size)
+ return -EINVAL;
+
+ if (!h->alloc)
+ return -EFAULT;
+
+ if (elem_size == h_stride && elem_size == sys_stride) {
+ elem_size *= count;
+ h_stride = elem_size;
+ sys_stride = elem_size;
+ count = 1;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ while (count--) {
+ if (h_offs + elem_size > h->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = rw_handle_page(h, is_read, h_offs, sys_addr,
+ elem_size, (unsigned long)addr, *pte);
+
+ if (ret)
+ break;
+
+ copied += elem_size;
+ sys_addr += sys_stride;
+ h_offs += h_stride;
+ }
+
+ nvmap_free_pte(client->dev, pte);
+ return ret ?: copied;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.h
+ *
+ * ioctl declarations for nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
+#define __VIDEO_TEGRA_NVMAP_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#include <mach/nvmap.h>
+
+enum {
+ NVMAP_HANDLE_PARAM_SIZE = 1,
+ NVMAP_HANDLE_PARAM_ALIGNMENT,
+ NVMAP_HANDLE_PARAM_BASE,
+ NVMAP_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMAP_CACHE_OP_WB = 0,
+ NVMAP_CACHE_OP_INV,
+ NVMAP_CACHE_OP_WB_INV,
+};
+
+
+struct nvmap_create_handle {
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
+};
+
+struct nvmap_alloc_handle {
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
+};
+
+struct nvmap_map_caller {
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
+};
+
+struct nvmap_rw_handle {
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
+};
+
+struct nvmap_pin_handle {
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmap_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
+};
+
+struct nvmap_cache_op {
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
+};
+
+#define NVMAP_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
+#define NVMAP_IOC_CLAIM _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle)
+#define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
+#define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
+
+#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
+#define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
+
+#define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
+
+#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID))
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg);
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg);
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg);
+
+
+
+#endif
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
+ * unpinned handles are placed onto a most-recently-used eviction list;
+ * multiple lists are maintained, segmented by size (sizes were chosen to
+ * roughly correspond with common sizes for graphics surfaces).
+ *
+ * if a handle is located on the MRU list, then the code below may
+ * steal its IOVMM area at any time to satisfy a pin operation if no
+ * free IOVMM space is available
+ */
+
+static const size_t mru_cutoff[] = {
+ 262144, 393216, 786432, 1048576, 1572864
+};
+
+static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
+{
+ unsigned int i;
+
+ BUG_ON(!share->mru_lists);
+ for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
+ if (size <= mru_cutoff[i])
+ break;
+
+ return &share->mru_lists[i];
+}
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
+{
+ size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
+ return (vm_size >> 2) * 3;
+}
+
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
+{
+ size_t len = h->pgalloc.area->iovm_length;
+ list_add(&h->pgalloc.mru_list, mru_list(share, len));
+}
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
+{
+ nvmap_mru_lock(s);
+ if (!list_empty(&h->pgalloc.mru_list))
+ list_del(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(s);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+}
+
+/* returns a tegra_iovmm_area for a handle. if the handle already has
+ * an iovmm_area allocated, the handle is simply removed from its MRU list
+ * and the existing iovmm_area is returned.
+ *
+ * if no existing allocation exists, try to allocate a new IOVMM area.
+ *
+ * if a new area can not be allocated, try to re-use the most-recently-unpinned
+ * handle's allocation.
+ *
+ * and if that fails, iteratively evict handles from the MRU lists and free
+ * their allocations, until the new allocation succeeds.
+ */
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ struct list_head *mru;
+ struct nvmap_handle *evict = NULL;
+ struct tegra_iovmm_area *vm = NULL;
+ unsigned int i, idx;
+ pgprot_t prot;
+
+ BUG_ON(!h || !c || !c->share);
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->pgalloc.area) {
+ /* since this is only called inside the pin lock, and the
+ * handle is gotten before it is pinned, there are no races
+ * where h->pgalloc.area is changed after the comparison */
+ nvmap_mru_lock(c->share);
+ BUG_ON(list_empty(&h->pgalloc.mru_list));
+ list_del(&h->pgalloc.mru_list);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return h->pgalloc.area;
+ }
+
+ vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);
+
+ if (vm) {
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return vm;
+ }
+ /* attempt to re-use the most recently unpinned IOVMM area in the
+ * same size bin as the current handle. If that fails, iteratively
+ * evict handles (starting from the current bin) until an allocation
+ * succeeds or no more areas can be evicted */
+
+ nvmap_mru_lock(c->share);
+ mru = mru_list(c->share, h->size);
+ if (!list_empty(mru))
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+ list_del(&evict->pgalloc.mru_list);
+ vm = evict->pgalloc.area;
+ evict->pgalloc.area = NULL;
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return vm;
+ }
+
+ idx = mru - c->share->mru_lists;
+
+ for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
+ if (idx >= c->share->nr_mru)
+ idx = 0;
+ mru = &c->share->mru_lists[idx];
+ while (!list_empty(mru) && !vm) {
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ BUG_ON(atomic_read(&evict->pin) != 0);
+ BUG_ON(!evict->pgalloc.area);
+ list_del(&evict->pgalloc.mru_list);
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ tegra_iovmm_free_vm(evict->pgalloc.area);
+ evict->pgalloc.area = NULL;
+ vm = tegra_iovmm_create_vm(c->share->iovmm,
+ NULL, h->size, prot);
+ nvmap_mru_lock(c->share);
+ }
+ }
+ nvmap_mru_unlock(c->share);
+ return vm;
+}
+
+int nvmap_mru_init(struct nvmap_share *share)
+{
+ int i;
+ spin_lock_init(&share->mru_lock);
+ share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
+
+ share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
+ GFP_KERNEL);
+
+ if (!share->mru_lists)
+ return -ENOMEM;
+
+ for (i = 0; i <= share->nr_mru; i++)
+ INIT_LIST_HEAD(&share->mru_lists[i]);
+
+ return 0;
+}
+
+void nvmap_mru_destroy(struct nvmap_share *share)
+{
+ if (share->mru_lists)
+ kfree(share->mru_lists);
+
+ share->mru_lists = NULL;
+}
--- /dev/null
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef __VIDEO_TEGRA_NVMAP_MRU_H
+#define __VIDEO_TEGRA_NVMAP_MRU_H
+
+#include <linux/spinlock.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area;
+struct tegra_iovmm_client;
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+
+static inline void nvmap_mru_lock(struct nvmap_share *share)
+{
+ spin_lock(&share->mru_lock);
+}
+
+static inline void nvmap_mru_unlock(struct nvmap_share *share)
+{
+ spin_unlock(&share->mru_lock);
+}
+
+int nvmap_mru_init(struct nvmap_share *share);
+
+void nvmap_mru_destroy(struct nvmap_share *share);
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm);
+
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
+
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h);
+
+#else
+
+#define nvmap_mru_lock(_s) do { } while (0)
+#define nvmap_mru_unlock(_s) do { } while (0)
+#define nvmap_mru_init(_s) 0
+#define nvmap_mru_destroy(_s) do { } while (0)
+#define nvmap_mru_vm_size(_a) tegra_iovmm_get_vm_size(_a)
+
+static inline void nvmap_mru_insert_locked(struct nvmap_share *share,
+ struct nvmap_handle *h)
+{ }
+
+static inline void nvmap_mru_remove(struct nvmap_share *s,
+ struct nvmap_handle *h)
+{ }
+
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ BUG_ON(!h->pgalloc.area);
+ return h->pgalloc.area;
+}
+
+#endif
+
+#endif
writel(tmp, engine + 0x1C);
}
- if (op != VIA_BITBLT_COLOR)
+ if (op == VIA_BITBLT_FILL) {
+ writel(fg_color, engine + 0x58);
+ } else if (op == VIA_BITBLT_MONO) {
writel(fg_color, engine + 0x4C);
-
- if (op == VIA_BITBLT_MONO)
writel(bg_color, engine + 0x50);
+ }
if (op == VIA_BITBLT_FILL)
ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
{
+ int ret;
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
{
+ int ret;
u8 msg[2] = { index, data };
struct i2c_msg msgs;
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+ if (ret == 1)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
{
+ int ret;
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
/*
This support is also available as a module. If so, the module
will be called w1-gpio.
+config W1_MASTER_TEGRA
+ tristate "NVidia Tegra SoC 1-wire busmaster"
+ depends on ARCH_TEGRA
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ the NVidia Tegra SoC one-wire interfaces.
+
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
depends on ARCH_OMAP2430 || ARCH_OMAP3
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
+obj-$(CONFIG_W1_MASTER_TEGRA) += tegra_w1.o
--- /dev/null
+/*
+ * drivers/w1/masters/tegra-w1.c
+ *
+ * W1 master driver for internal OWR controllers in NVIDIA Tegra SoCs.
+ *
+ * Copyright (C) 2010 Motorola, Inc
+ * Author: Andrei Warkentin <andreiw@motorola.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <mach/w1.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_log.h"
+
+#define DRIVER_NAME "tegra_w1"
+
+/* OWR_CONTROL_0 is the main control register, and should be configured
+ last after configuring all other settings. */
+#define OWR_CONTROL (0x0)
+#define OC_RD_BIT (1 << 31)
+#define OC_WR0_BIT (1 << 30)
+#define OC_RD_SCLK_SHIFT (23)
+#define OC_RD_SCLK_MASK (0xF)
+#define OC_P_SCLK_SHIFT (15)
+#define OC_P_SCLK_MASK (0xFF)
+#define OC_BIT_XMODE (1 << 2)
+#define OC_GO (1 << 0)
+
+/* OWR_WR_RD_TCTL_0 controls read/write timings. */
+#define OWR_WR_RD_TCTL (0xc)
+#define ORWT_TSU_SHIFT (28)
+#define ORWT_TSU_MASK (0x3)
+#define ORWT_TRELEASE_SHIFT (22)
+#define ORWT_TRELEASE_MASK (0x3F)
+#define ORWT_TRDV_SHIFT (18)
+#define ORWT_TRDV_MASK (0xF)
+#define ORWT_TLOW0_SHIFT (11)
+#define ORWT_TLOW0_MASK (0x7F)
+#define ORWT_TLOW1_SHIFT (7)
+#define ORWT_TLOW1_MASK (0xF)
+#define ORWT_TSLOT_SHIFT (0)
+#define ORWT_TSLOT_MASK (0x7F)
+
+/* OWR_RST_PRES_TCTL_0 controls reset presence timings. */
+#define OWR_RST_PRES_TCTL (0x10)
+#define ORPT_TPDL_SHIFT (24)
+#define ORPT_TPDL_MASK (0xFF)
+#define ORPT_TPDH_SHIFT (18)
+#define ORPT_TPDH_MASK (0x3F)
+#define ORPT_TRSTL_SHIFT (9)
+#define ORPT_TRSTL_MASK (0x1FF)
+#define ORPT_TRSTH_SHIFT (0)
+#define ORPT_TRSTH_MASK (0x1FF)
+
+/* OWR_INTR_MASK_0 stores the masks for the interrupts. */
+#define OWR_INTR_MASK (0x24)
+#define OI_BIT_XFER_DONE (1 << 13)
+#define OI_PRESENCE_DONE (1 << 5)
+#define OI_PRESENCE_ERR (1 << 0)
+
+/* OWR_INTR_STATUS_0 is the interrupt status register. */
+#define OWR_INTR_STATUS (0x28)
+
+/* OWR_STATUS_0 is the status register. */
+#define OWR_STATUS (0x34)
+#define OS_READ_BIT_SHIFT (23)
+#define OS_RDY (1 << 0)
+
+/* Transfer_completion wait time. */
+#define BIT_XFER_COMPLETION_TIMEOUT_MSEC (5000)
+
+/* Errors in the interrupt status register for bit
+ transfers. */
+#define BIT_XFER_ERRORS (OI_PRESENCE_ERR)
+
+/* OWR requires 1MHz clock. This value is in Herz. */
+#define OWR_CLOCK (1000000)
+
+#define W1_ERR(format, ...) \
+ printk(KERN_ERR "(%s: line %d) " format, \
+ __func__, __LINE__, ## __VA_ARGS__)
+
+struct tegra_device {
+ bool ready;
+ struct w1_bus_master bus_master;
+ struct clk *clk;
+ void __iomem *ioaddr;
+ struct mutex mutex;
+ spinlock_t spinlock;
+ struct completion *transfer_completion;
+ unsigned long intr_status;
+ struct tegra_w1_timings *timings;
+};
+
+/* If debug_print & DEBUG_PRESENCE, print whether slaves detected
+ or not in reset_bus. */
+#define DEBUG_PRESENCE (0x1)
+
+/* If debug_print & DEBUG_TIMEOUT, print whether timeouts on waiting
+ for device interrupts occurs. */
+#define DEBUG_TIMEOUT (0x2)
+
+static uint32_t debug_print;
+module_param_named(debug, debug_print, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debugging output commands:\n"
+ "\tbit 0 - log reset_bus presence detects\n"
+ "\tbit 1 - log interrupt timeouts\n");
+
+/* Reads the OWR register specified by base offset in 'reg'. */
+static inline unsigned long w1_readl(struct tegra_device *dev,
+ unsigned long reg)
+{
+ return readl(dev->ioaddr + reg);
+}
+
+/* Writes 'val' into the OWR registers specified by base offset in 'reg'. */
+static inline void w1_writel(struct tegra_device *dev, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, dev->ioaddr + reg);
+}
+
+/* Sets interrupt mask the device. */
+static inline void w1_imask(struct tegra_device *dev, unsigned long mask)
+{
+ w1_writel(dev, mask, OWR_INTR_MASK);
+}
+
+/* Waits for completion of a bit transfer, checks intr_status against
+ BIT_XFER_ERRORS and an additional provided bit mask. */
+static inline int w1_wait(struct tegra_device *dev, unsigned long mask)
+{
+ int ret;
+ unsigned long irq_flags;
+ unsigned long intr_status;
+
+ ret = wait_for_completion_timeout(dev->transfer_completion,
+ msecs_to_jiffies(BIT_XFER_COMPLETION_TIMEOUT_MSEC));
+
+ if (unlikely(!ret)) {
+ if (debug_print & DEBUG_TIMEOUT)
+ W1_ERR("timeout\n");
+ return -ETIME;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ intr_status = dev->intr_status;
+ dev->intr_status = 0;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ if (unlikely(intr_status & BIT_XFER_ERRORS ||
+ !(intr_status & mask)))
+ return -EIO;
+ return 0;
+}
+
+/* Programs timing registers, and puts the device into a known state.
+ Interrupts are safe to enable past this point. */
+static int w1_setup(struct tegra_device *dev)
+{
+ unsigned long value;
+ clk_enable(dev->clk);
+
+ value =
+ ((dev->timings->tslot & ORWT_TSLOT_MASK) << ORWT_TSLOT_SHIFT) |
+ ((dev->timings->tlow1 & ORWT_TLOW1_MASK) << ORWT_TLOW1_SHIFT) |
+ ((dev->timings->tlow0 & ORWT_TLOW0_MASK) << ORWT_TLOW0_SHIFT) |
+ ((dev->timings->trdv & ORWT_TRDV_MASK) << ORWT_TRDV_SHIFT) |
+ ((dev->timings->trelease & ORWT_TRELEASE_MASK) <<
+ ORWT_TRELEASE_SHIFT) |
+ ((dev->timings->tsu & ORWT_TSU_MASK) << ORWT_TSU_SHIFT);
+ w1_writel(dev, value, OWR_WR_RD_TCTL);
+
+ value =
+ ((dev->timings->trsth & ORPT_TRSTH_MASK) << ORPT_TRSTH_SHIFT) |
+ ((dev->timings->trstl & ORPT_TRSTL_MASK) << ORPT_TRSTL_SHIFT) |
+ ((dev->timings->tpdh & ORPT_TPDH_MASK) << ORPT_TPDH_SHIFT) |
+ ((dev->timings->tpdl & ORPT_TPDL_MASK) << ORPT_TPDL_SHIFT);
+ w1_writel(dev, value, OWR_RST_PRES_TCTL);
+
+ /* Clear interrupt status/mask registers in case
+ anything was set in it. */
+ w1_imask(dev, 0);
+ w1_writel(dev, 0xFFFFFFFF, OWR_INTR_STATUS);
+ clk_disable(dev->clk);
+ return 0;
+}
+
+/* Interrupt handler for OWR communication. */
+static irqreturn_t tegra_w1_irq(int irq, void *cookie)
+{
+ unsigned long irq_flags;
+ unsigned long status;
+ struct tegra_device *dev = cookie;
+
+ status = w1_readl(dev, OWR_INTR_STATUS);
+ if (unlikely(!status)) {
+
+ /* Not for me if no status bits are set. */
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+
+ if (likely(dev->transfer_completion)) {
+ dev->intr_status = status;
+ w1_writel(dev, status, OWR_INTR_STATUS);
+ complete(dev->transfer_completion);
+ } else {
+ W1_ERR("spurious interrupt, status = 0x%lx\n", status);
+ }
+
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return IRQ_HANDLED;
+}
+
+/* Perform a write-0 cycle if bit == 0, otherwise
+ perform a read cycle. */
+static u8 tegra_w1_touch_bit(void *data, u8 bit)
+{
+ int rc;
+ u8 return_bit;
+ unsigned long control;
+ DECLARE_COMPLETION_ONSTACK(touch_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ return_bit = 0;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_BIT_XFER_DONE);
+ dev->transfer_completion = &touch_done;
+ control =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE;
+
+ /* Read bit (well, writes a 1 to the bus as well). */
+ if (bit) {
+ w1_writel(dev, control | OC_RD_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+
+ if (rc) {
+ W1_ERR("write-1/read failed\n");
+ goto done;
+ }
+
+ return_bit =
+ (w1_readl(dev, OWR_STATUS) >> OS_READ_BIT_SHIFT) & 1;
+
+ }
+
+ /* Write 0. */
+ else {
+ w1_writel(dev, control | OC_WR0_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+ if (rc) {
+ W1_ERR("write-0 failed\n");
+ goto done;
+ }
+ }
+
+done:
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return return_bit;
+}
+
+/* Performs a bus reset cycle, and returns 0 if slaves present. */
+static u8 tegra_w1_reset_bus(void *data)
+{
+ int rc;
+ int presence;
+ unsigned long value;
+ DECLARE_COMPLETION_ONSTACK(reset_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ presence = 1;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_PRESENCE_DONE);
+ dev->transfer_completion = &reset_done;
+ value =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE | OC_GO;
+ w1_writel(dev, value, OWR_CONTROL);
+
+ rc = w1_wait(dev, OI_PRESENCE_DONE);
+ if (rc)
+ goto done;
+
+ presence = 0;
+done:
+
+ if (debug_print & DEBUG_PRESENCE) {
+ if (presence)
+ W1_ERR("no slaves present\n");
+ else
+ W1_ERR("slaves present\n");
+ }
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return presence;
+}
+
+static int tegra_w1_probe(struct platform_device *pdev)
+{
+ int rc;
+ int irq;
+ struct resource *res;
+ struct tegra_device *dev;
+ struct tegra_w1_platform_data *plat = pdev->dev.platform_data;
+
+ printk(KERN_INFO "Driver for Tegra SoC 1-wire controller\n");
+
+ if (plat == NULL || plat->timings == NULL)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(struct tegra_device), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ dev->clk = clk_get(&pdev->dev, plat->clk_id);
+ if (IS_ERR(dev->clk)) {
+ rc = PTR_ERR(dev->clk);
+ goto cleanup_alloc;
+ }
+
+ /* OWR requires 1MHz clock. */
+ rc = clk_set_rate(dev->clk, OWR_CLOCK);
+ if (rc)
+ goto cleanup_clock;
+
+ if (!request_mem_region
+ (res->start, res->end - res->start + 1, dev_name(&pdev->dev))) {
+ rc = -EBUSY;
+ goto cleanup_clock;
+ }
+
+ dev->ioaddr = ioremap(res->start, res->end - res->start + 1);
+ if (!dev->ioaddr) {
+ rc = -ENOMEM;
+ goto cleanup_reqmem;
+ }
+
+ dev->timings = plat->timings;
+ dev->bus_master.data = dev;
+ dev->bus_master.touch_bit = tegra_w1_touch_bit;
+ dev->bus_master.reset_bus = tegra_w1_reset_bus;
+
+ spin_lock_init(&dev->spinlock);
+ mutex_init(&dev->mutex);
+
+ /* Program device into known state. */
+ w1_setup(dev);
+
+ rc = request_irq(irq, tegra_w1_irq, IRQF_SHARED, DRIVER_NAME, dev);
+ if (rc)
+ goto cleanup_ioremap;
+
+ rc = w1_add_master_device(&dev->bus_master);
+ if (rc)
+ goto cleanup_irq;
+
+ dev->ready = true;
+ return 0;
+
+cleanup_irq:
+ free_irq(irq, dev);
+cleanup_ioremap:
+ iounmap(dev->ioaddr);
+cleanup_reqmem:
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+cleanup_clock:
+ clk_put(dev->clk);
+cleanup_alloc:
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return rc;
+}
+
+static int tegra_w1_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ mutex_lock(&dev->mutex);
+ dev->ready = false;
+ mutex_unlock(&dev->mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ free_irq(res->start, dev);
+ iounmap(dev->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start + 1);
+ clk_put(dev->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return 0;
+}
+
+static int tegra_w1_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int tegra_w1_resume(struct platform_device *pdev)
+{
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ /* TODO: Is this necessary? I would assume yes. */
+ w1_setup(dev);
+ return 0;
+}
+
+static struct platform_driver tegra_w1_driver = {
+ .probe = tegra_w1_probe,
+ .remove = tegra_w1_remove,
+ .suspend = tegra_w1_suspend,
+ .resume = tegra_w1_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_w1_init(void)
+{
+ return platform_driver_register(&tegra_w1_driver);
+}
+
+static void __exit tegra_w1_exit(void)
+{
+ platform_driver_unregister(&tegra_w1_driver);
+}
+
+module_init(tegra_w1_init);
+module_exit(tegra_w1_exit);
+
+MODULE_DESCRIPTION("Tegra W1 master driver");
+MODULE_AUTHOR("Andrei Warkentin <andreiw@motorola.com>");
+MODULE_LICENSE("GPL");
To compile this driver as a module, choose M here: the
module will be called mpcore_wdt.
+config TEGRA_WATCHDOG
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA
+ help
+ Say Y here to include support for the watchdog timer
+ embedded in NVIDIA Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra_wdt.
+
+config TEGRA_WATCHDOG_ENABLE_ON_PROBE
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA && TEGRA_WATCHDOG
+ help
+ Say Y here to enable the tegra watchdog at driver
+ probe time, rather than when the device is opened.
+
config EP93XX_WATCHDOG
tristate "EP93xx Watchdog"
depends on ARCH_EP93XX
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
+obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = platform_get_drvdata(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
--- /dev/null
+/*
+ * drivers/watchdog/tegra_wdt.c
+ *
+ * watchdog driver for NVIDIA tegra internal watchdog
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * based on drivers/watchdog/softdog.c and drivers/watchdog/omap_wdt.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+/* minimum and maximum watchdog trigger periods, in seconds */
+#define MIN_WDT_PERIOD 5
+#define MAX_WDT_PERIOD 1000
+
+#define TIMER_PTV 0x0
+#define TIMER_EN (1 << 31)
+#define TIMER_PERIODIC (1 << 30)
+
+#define TIMER_PCR 0x4
+#define TIMER_PCR_INTR (1 << 30)
+
+#define WDT_EN (1 << 5)
+#define WDT_SEL_TMR1 (0 << 4)
+#define WDT_SYS_RST (1 << 2)
+
+static int heartbeat = 30; /* must be greater than MIN_WDT_PERIOD and lower than MAX_WDT_PERIOD */
+
+struct tegra_wdt {
+ struct miscdevice miscdev;
+ struct notifier_block notifier;
+ struct resource *res_src;
+ struct resource *res_wdt;
+ unsigned long users;
+ void __iomem *wdt_source;
+ void __iomem *wdt_timer;
+ int irq;
+ int timeout;
+ bool enabled;
+};
+
+static struct tegra_wdt *tegra_wdt_dev;
+
+static void tegra_wdt_enable(struct tegra_wdt *wdt)
+{
+ u32 val;
+
+ /* since the watchdog reset occurs when a second interrupt
+ * is asserted before the first is processed, program the
+ * timer period to one-half of the watchdog period */
+ val = wdt->timeout * 1000000ul / 2;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->wdt_timer + TIMER_PTV);
+
+ val = WDT_EN | WDT_SEL_TMR1 | WDT_SYS_RST;
+ writel(val, wdt->wdt_source);
+}
+
+static void tegra_wdt_disable(struct tegra_wdt *wdt)
+{
+ writel(0, wdt->wdt_source);
+ writel(0, wdt->wdt_timer + TIMER_PTV);
+}
+
+static irqreturn_t tegra_wdt_interrupt(int irq, void *dev_id)
+{
+ struct tegra_wdt *wdt = dev_id;
+
+ writel(TIMER_PCR_INTR, wdt->wdt_timer + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+
+static int tegra_wdt_notify(struct notifier_block *this,
+ unsigned long code, void *dev)
+{
+ struct tegra_wdt *wdt = container_of(this, struct tegra_wdt, notifier);
+
+ if (code == SYS_DOWN || code == SYS_HALT)
+ tegra_wdt_disable(wdt);
+ return NOTIFY_DONE;
+}
+
+static int tegra_wdt_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct tegra_wdt *wdt = dev_get_drvdata(miscdev->parent);
+
+ if (test_and_set_bit(1, &wdt->users))
+ return -EBUSY;
+
+ wdt->enabled = true;
+ wdt->timeout = heartbeat;
+ tegra_wdt_enable(wdt);
+ file->private_data = wdt;
+ return nonseekable_open(inode, file);
+}
+
+static int tegra_wdt_release(struct inode *inode, struct file *file)
+{
+ struct tegra_wdt *wdt = file->private_data;
+
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ tegra_wdt_disable(wdt);
+ wdt->enabled = false;
+#endif
+ wdt->users = 0;
+ return 0;
+}
+
+static long tegra_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_wdt *wdt = file->private_data;
+ static DEFINE_SPINLOCK(lock);
+ int new_timeout;
+ static const struct watchdog_info ident = {
+ .identity = "Tegra Watchdog",
+ .options = WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info __user *)arg, &ident,
+ sizeof(ident));
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int __user *)arg);
+
+ case WDIOC_KEEPALIVE:
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, (int __user *)arg))
+ return -EFAULT;
+ spin_lock(&lock);
+ tegra_wdt_disable(wdt);
+ wdt->timeout = clamp(new_timeout, MIN_WDT_PERIOD, MAX_WDT_PERIOD);
+ tegra_wdt_enable(wdt);
+ spin_unlock(&lock);
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt->timeout, (int __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t tegra_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ return len;
+}
+
+static const struct file_operations tegra_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = tegra_wdt_write,
+ .unlocked_ioctl = tegra_wdt_ioctl,
+ .open = tegra_wdt_open,
+ .release = tegra_wdt_release,
+};
+
+static int tegra_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res_src, *res_wdt, *res_irq;
+ struct tegra_wdt *wdt;
+ u32 src;
+ int ret = 0;
+
+ if (pdev->id != -1) {
+ dev_err(&pdev->dev, "only id -1 supported\n");
+ return -ENODEV;
+ }
+
+ if (tegra_wdt_dev != NULL) {
+ dev_err(&pdev->dev, "watchdog already registered\n");
+ return -EIO;
+ }
+
+ res_src = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_wdt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!res_src || !res_wdt || !res_irq) {
+ dev_err(&pdev->dev, "incorrect resources\n");
+ return -ENOENT;
+ }
+
+ wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
+ if (!wdt) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ wdt->irq = -1;
+ wdt->miscdev.parent = &pdev->dev;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+ wdt->miscdev.fops = &tegra_wdt_fops;
+
+ wdt->notifier.notifier_call = tegra_wdt_notify;
+
+ res_src = request_mem_region(res_src->start, resource_size(res_src),
+ pdev->name);
+ res_wdt = request_mem_region(res_wdt->start, resource_size(res_wdt),
+ pdev->name);
+
+ if (!res_src || !res_wdt) {
+ dev_err(&pdev->dev, "unable to request memory resources\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ wdt->wdt_source = ioremap(res_src->start, resource_size(res_src));
+ wdt->wdt_timer = ioremap(res_wdt->start, resource_size(res_wdt));
+ if (!wdt->wdt_source || !wdt->wdt_timer) {
+ dev_err(&pdev->dev, "unable to map registers\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ src = readl(wdt->wdt_source);
+ if (src & BIT(12))
+ dev_info(&pdev->dev, "last reset due to watchdog timeout\n");
+
+ tegra_wdt_disable(wdt);
+ writel(TIMER_PCR_INTR, wdt->wdt_timer + TIMER_PCR);
+
+ ret = request_irq(res_irq->start, tegra_wdt_interrupt, IRQF_DISABLED,
+ dev_name(&pdev->dev), wdt);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to configure IRQ\n");
+ goto fail;
+ }
+
+ wdt->irq = res_irq->start;
+ wdt->res_src = res_src;
+ wdt->res_wdt = res_wdt;
+
+ ret = register_reboot_notifier(&wdt->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register reboot notifier\n");
+ goto fail;
+ }
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register misc device\n");
+ unregister_reboot_notifier(&wdt->notifier);
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+ tegra_wdt_dev = wdt;
+#ifdef CONFIG_TEGRA_WATCHDOG_ENABLE_ON_PROBE
+ wdt->enabled = true;
+ wdt->timeout = heartbeat;
+ tegra_wdt_enable(wdt);
+#endif
+ return 0;
+fail:
+ if (wdt->irq != -1)
+ free_irq(wdt->irq, wdt);
+ if (wdt->wdt_source)
+ iounmap(wdt->wdt_source);
+ if (wdt->wdt_timer)
+ iounmap(wdt->wdt_timer);
+ if (res_src)
+ release_mem_region(res_src->start, resource_size(res_src));
+ if (res_wdt)
+ release_mem_region(res_wdt->start, resource_size(res_wdt));
+ kfree(wdt);
+ return ret;
+}
+
+static int tegra_wdt_remove(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+
+ unregister_reboot_notifier(&wdt->notifier);
+ misc_deregister(&wdt->miscdev);
+ free_irq(wdt->irq, wdt);
+ iounmap(wdt->wdt_source);
+ iounmap(wdt->wdt_timer);
+ release_mem_region(wdt->res_src->start, resource_size(wdt->res_src));
+ release_mem_region(wdt->res_wdt->start, resource_size(wdt->res_wdt));
+ kfree(wdt);
+ tegra_wdt_dev = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+ return 0;
+}
+
+static int tegra_wdt_resume(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->enabled)
+ tegra_wdt_enable(wdt);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_wdt_driver = {
+ .probe = tegra_wdt_probe,
+ .remove = __devexit_p(tegra_wdt_remove),
+#ifdef CONFIG_PM
+ .suspend = tegra_wdt_suspend,
+ .resume = tegra_wdt_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra_wdt",
+ },
+};
+
+static int __init tegra_wdt_init(void)
+{
+ return platform_driver_register(&tegra_wdt_driver);
+}
+
+static void __exit tegra_wdt_exit(void)
+{
+ platform_driver_unregister(&tegra_wdt_driver);
+}
+
+module_init(tegra_wdt_init);
+module_exit(tegra_wdt_exit);
+
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:tegra_wdt");
+
}
#endif
- memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
+ memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
}
static inline void clear_evtchn(int port)
{
struct bio *bio;
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
gfp_mask);
if (unlikely(!bio))
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
gfp_t gfp_mask)
{
- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
+ struct bio_map_data *bmd;
+ if (iov_count > UIO_MAXIOV)
+ return NULL;
+
+ bmd = kmalloc(sizeof(*bmd), gfp_mask);
if (!bmd)
return NULL;
end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT;
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
nr_pages += end - start;
len += iov[i].iov_len;
}
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
+ /*
+ * Overflow, abort
+ */
+ if (end < start)
+ return ERR_PTR(-EINVAL);
+
nr_pages += end - start;
/*
* buffer must be aligned to at least hardsector size for now
unsigned long start = uaddr >> PAGE_SHIFT;
const int local_nr_pages = end - start;
const int page_limit = cur_page + local_nr_pages;
-
+
ret = get_user_pages_fast(uaddr, local_nr_pages,
write_to_vm, &pages[cur_page]);
if (ret < local_nr_pages) {
extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
__u16 fileHandle, struct file *file,
- struct vfsmount *mnt, unsigned int oflags);
+ struct vfsmount *mnt, unsigned int oflags,
+ __u32 oplock);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb,
int mode, int oflags,
struct cifsFileInfo *
cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
- struct file *file, struct vfsmount *mnt, unsigned int oflags)
+ struct file *file, struct vfsmount *mnt, unsigned int oflags,
+ __u32 oplock)
{
- int oplock = 0;
struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
if (pCifsFile == NULL)
return pCifsFile;
- if (oplockEnabled)
- oplock = REQ_OPLOCK;
-
pCifsFile->netfid = fileHandle;
pCifsFile->pid = current->tgid;
pCifsFile->pInode = igrab(newinode);
}
pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
- nd->path.mnt, oflags);
+ nd->path.mnt, oflags, oplock);
if (pfile_info == NULL) {
fput(filp);
CIFSSMBClose(xid, tcon, fileHandle);
cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
nd->path.mnt,
- nd->intent.open.flags);
+ nd->intent.open.flags,
+ oplock);
if (cfile == NULL) {
fput(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
/* Search for server name delimiter */
sep = memchr(hostname, '\\', len);
if (sep)
- len = sep - unc;
+ len = sep - hostname;
else
cFYI(1, "%s: probably server name is whole unc: %s",
__func__, unc);
pCifsFile = cifs_new_fileinfo(inode, netfid, file,
file->f_path.mnt,
- oflags);
+ oflags, oplock);
if (pCifsFile == NULL) {
CIFSSMBClose(xid, tcon, netfid);
rc = -ENOMEM;
goto out;
pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
- file->f_flags);
+ file->f_flags, oplock);
if (pCifsFile == NULL) {
rc = -ENOMEM;
goto out;
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
- if (!inode)
- return ERR_PTR(rc);
+ if (!inode) {
+ inode = ERR_PTR(rc);
+ goto out;
+ }
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
} else if (rc) {
- kfree(full_path);
- _FreeXid(xid);
iget_failed(inode);
- return ERR_PTR(rc);
+ inode = ERR_PTR(rc);
}
-
+out:
kfree(full_path);
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
argv++;
if (i++ >= max)
return -E2BIG;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
+ cond_resched();
}
}
return i;
while (len > 0) {
int offset, bytes_to_copy;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
+ cond_resched();
+
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
-#ifdef CONFIG_STACK_GROWSUP
- ret = expand_stack_downwards(bprm->vma, pos);
- if (ret < 0) {
- /* We've exceed the stack rlimit. */
- ret = -E2BIG;
- goto out;
- }
-#endif
- ret = get_user_pages(current, bprm->mm, pos,
- 1, 1, 1, &page, NULL);
- if (ret <= 0) {
- /* We've exceed the stack rlimit. */
+ page = get_arg_page(bprm, pos, 1);
+ if (!page) {
ret = -E2BIG;
goto out;
}
return retval;
out:
- if (bprm->mm)
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
mmput(bprm->mm);
+ }
out_file:
if (bprm->file) {
#include <linux/crypto.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <asm/unaligned.h>
#include "ecryptfs_kernel.h"
struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
struct dentry *dentry_save;
struct vfsmount *vfsmount_save;
+ unsigned int flags_save;
int rc;
dentry_save = nd->path.dentry;
vfsmount_save = nd->path.mnt;
+ flags_save = nd->flags;
nd->path.dentry = lower_dentry;
nd->path.mnt = lower_mnt;
+ nd->flags &= ~LOOKUP_OPEN;
rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
nd->path.dentry = dentry_save;
nd->path.mnt = vfsmount_save;
+ nd->flags = flags_save;
return rc;
}
rc = -EOPNOTSUPP;
goto out;
}
- mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = lower_dentry->d_inode->i_op->setxattr(lower_dentry, name, value,
- size, flags);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags);
out:
return rc;
}
#ifdef CONFIG_MMU
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+ struct mm_struct *mm = current->mm;
+ long diff = (long)(pages - bprm->vma_pages);
+
+ if (!mm || !diff)
+ return;
+
+ bprm->vma_pages = pages;
+
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_ANONPAGES, diff);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_ANONPAGES, diff);
+ spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim;
+ acct_arg_size(bprm, size / PAGE_SIZE);
+
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
+
+ err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
+ if (err)
+ goto err;
+
err = insert_vm_struct(mm, vma);
if (err)
goto err;
#else
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
/*
* Release all of the old mmap stuff
*/
+ acct_arg_size(bprm, 0);
retval = exec_mmap(bprm->mm);
if (retval)
goto out;
return retval;
out:
- if (bprm->mm)
- mmput (bprm->mm);
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
+ mmput(bprm->mm);
+ }
out_file:
if (bprm->file) {
ext4_abort(sb, "Couldn't clean up the journal");
}
+ del_timer(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <linux/compat.h>
static const struct file_operations fuse_direct_io_file_operations;
return 0;
}
+/* Make sure iov_length() won't overflow */
+static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+{
+ size_t n;
+ u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+
+ for (n = 0; n < count; n++) {
+ if (iov->iov_len > (size_t) max)
+ return -ENOMEM;
+ max -= iov->iov_len;
+ }
+ return 0;
+}
+
+/*
+ * CUSE servers compiled on 32bit broke on 64bit kernels because the
+ * ABI was defined to be 'struct iovec' which is different on 32bit
+ * and 64bit. Fortunately we can determine which structure the server
+ * used from the size of the reply.
+ */
+static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src,
+ size_t transferred, unsigned count,
+ bool is_compat)
+{
+#ifdef CONFIG_COMPAT
+ if (count * sizeof(struct compat_iovec) == transferred) {
+ struct compat_iovec *ciov = src;
+ unsigned i;
+
+ /*
+ * With this interface a 32bit server cannot support
+ * non-compat (i.e. ones coming from 64bit apps) ioctl
+ * requests
+ */
+ if (!is_compat)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ dst[i].iov_base = compat_ptr(ciov[i].iov_base);
+ dst[i].iov_len = ciov[i].iov_len;
+ }
+ return 0;
+ }
+#endif
+
+ if (count * sizeof(struct iovec) != transferred)
+ return -EIO;
+
+ memcpy(dst, src, transferred);
+ return 0;
+}
+
/*
* For ioctls, there is no generic way to determine how much memory
* needs to be read and/or written. Furthermore, ioctls are allowed
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
- err = -EIO;
- if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
- goto out;
-
- /* okay, copy in iovs and retry */
vaddr = kmap_atomic(pages[0], KM_USER0);
- memcpy(page_address(iov_page), vaddr, transferred);
+ err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
+ transferred, in_iovs + out_iovs,
+ (flags & FUSE_IOCTL_COMPAT) != 0);
kunmap_atomic(vaddr, KM_USER0);
+ if (err)
+ goto out;
in_iov = page_address(iov_page);
out_iov = in_iov + in_iovs;
+ err = fuse_verify_ioctl_iov(in_iov, in_iovs);
+ if (err)
+ goto out;
+
+ err = fuse_verify_ioctl_iov(out_iov, out_iovs);
+ if (err)
+ goto out;
+
goto retry;
}
extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out);
+ void *fsid_out, int fsid_size, long *namelen_out);
#endif
err = do_statfs(dentry->d_sb->s_fs_info,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
- &sf->f_namelen, sf->f_spare);
+ &sf->f_namelen);
if (err)
return err;
sf->f_blocks = f_blocks;
int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out)
+ void *fsid_out, int fsid_size, long *namelen_out)
{
struct statfs64 buf;
int err;
sizeof(buf.f_fsid) > fsid_size ? fsid_size :
sizeof(buf.f_fsid));
*namelen_out = buf.f_namelen;
- spare_out[0] = buf.f_spare[0];
- spare_out[1] = buf.f_spare[1];
- spare_out[2] = buf.f_spare[2];
- spare_out[3] = buf.f_spare[3];
- spare_out[4] = buf.f_spare[4];
+
return 0;
}
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
- if (dreq->l_ctx != NULL)
+ if (dreq->l_ctx == NULL)
goto out_release;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
struct file *filp = vma->vm_file;
struct dentry *dentry = filp->f_path.dentry;
unsigned pagelen;
- int ret = -EINVAL;
+ int ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
if (mapping != dentry->d_inode->i_mapping)
goto out_unlock;
- ret = 0;
pagelen = nfs_page_length(page);
if (pagelen == 0)
goto out_unlock;
- ret = nfs_flush_incompatible(filp, page);
- if (ret != 0)
- goto out_unlock;
+ ret = VM_FAULT_LOCKED;
+ if (nfs_flush_incompatible(filp, page) == 0 &&
+ nfs_updatepage(filp, page, 0, pagelen) == 0)
+ goto out;
- ret = nfs_updatepage(filp, page, 0, pagelen);
+ ret = VM_FAULT_SIGBUS;
out_unlock:
- if (!ret)
- return VM_FAULT_LOCKED;
unlock_page(page);
- return VM_FAULT_SIGBUS;
+out:
+ return ret;
}
static const struct vm_operations_struct nfs_file_vm_ops = {
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
+ unsigned int saved_type = fl->fl_type;
/* Try local locking first */
posix_test_lock(filp, fl);
/* found a conflict */
goto out;
}
+ fl->fl_type = saved_type;
if (nfs_have_delegation(inode, FMODE_READ))
goto out_noconflict;
static struct rpc_version mnt_version1 = {
.number = 1,
- .nrprocs = 2,
+ .nrprocs = ARRAY_SIZE(mnt_procedures),
.procs = mnt_procedures,
};
static struct rpc_version mnt_version3 = {
.number = 3,
- .nrprocs = 2,
+ .nrprocs = ARRAY_SIZE(mnt3_procedures),
.procs = mnt3_procedures,
};
nfs4_state_mark_reclaim_nograce(clp, state);
goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- if (state == NULL)
- break;
- nfs4_state_mark_reclaim_reboot(clp, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
goto do_state_recovery;
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
return -ESTALE;
}
if (state->n_wronly != 0) {
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
return -ESTALE;
}
if (state->n_rdonly != 0) {
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
nfs4_state_mark_reclaim_nograce(clp, state);
goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- if (state == NULL)
- break;
- nfs4_state_mark_reclaim_reboot(clp, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
goto do_state_recovery;
(void)ops->reclaim_complete(clp);
}
-static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
struct nfs4_state *state;
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
- return;
-
- nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
+ return 0;
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
}
nfs_delegation_reap_unclaimed(clp);
+ return 1;
+}
+
+static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+{
+ if (!nfs4_state_clear_reclaim_reboot(clp))
+ return;
+ nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
}
static void nfs_delegation_clear_all(struct nfs_client *clp)
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_end_reclaim_reboot(clp);
+ nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
if (req == NULL)
return ERR_PTR(-ENOMEM);
+ /* get lock context early so we can deal with alloc failures */
+ req->wb_lock_context = nfs_get_lock_context(ctx);
+ if (req->wb_lock_context == NULL) {
+ nfs_page_free(req);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_pgbase = offset;
req->wb_bytes = count;
req->wb_context = get_nfs_open_context(ctx);
- req->wb_lock_context = nfs_get_lock_context(ctx);
kref_init(&req->wb_kref);
return req;
}
err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
- if (err)
+ if (err) {
fhp->fh_post_saved = 0;
- else
+ /* Grab the ctime anyway - set_change_info might use it */
+ fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime;
+ } else
fhp->fh_post_saved = 1;
}
static inline void
set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
- BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
- cinfo->atomic = 1;
+ BUG_ON(!fhp->fh_pre_saved);
+ cinfo->atomic = fhp->fh_post_saved;
cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
- if (cinfo->change_supported) {
- cinfo->before_change = fhp->fh_pre_change;
- cinfo->after_change = fhp->fh_post_change;
- } else {
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
- }
+
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
if (ret >= 0)
return ret;
+ fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
partition table format used by Motorola Delta machines (using
sysv68).
Otherwise, say N.
+
+config CMDLINE_PARTITION
+ bool "Kernel command line partition table support" if PARTITION_ADVANCED
+ help
+ Say Y here if you would like to pass the partition table for a device
+ on the kernel command line.
obj-$(CONFIG_EFI_PARTITION) += efi.o
obj-$(CONFIG_KARMA_PARTITION) += karma.o
obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
#include "efi.h"
#include "karma.h"
#include "sysv68.h"
+#include "cmdline.h"
#ifdef CONFIG_BLK_DEV_MD
extern void md_autodetect_dev(dev_t dev);
int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
static int (*check_part[])(struct parsed_partitions *) = {
+#ifdef CONFIG_CMDLINE_PARTITION
+ cmdline_partition,
+#endif
/*
* Probe partition formats with tables at disk address 0
* that also have an ADFS boot block at 0xdc0.
struct hd_struct *part = dev_to_part(dev);
add_uevent_var(env, "PARTN=%u", part->partno);
+ if (part->partition_name)
+ add_uevent_var(env, "PARTNAME=%s", part->partition_name);
return 0;
}
--- /dev/null
+/*
+ * fs/partitions/cmdline.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*#define DEBUG 1*/
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static int cmdline_parsed;
+static struct part_device *cmdline_device;
+
+struct part {
+ char *name;
+ unsigned long from;
+ unsigned long size;
+ unsigned long sector_size;
+ struct part *next_part;
+};
+
+struct part_device {
+ char *name;
+ struct part *first_part;
+ struct part_device *next_device;
+};
+
+
+/* Passed a string like:
+ * system:3600:10000:800
+ */
+static struct part *parse_partition(char *s, int alloc_size, void **alloc)
+{
+ char *p;
+ struct part *this_part;
+ pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc);
+
+ if (*alloc == NULL)
+ *alloc = kzalloc(alloc_size, GFP_KERNEL);
+
+ this_part = *alloc;
+ *alloc += sizeof(*this_part);
+
+ /* Name */
+ p = strchr(s, ':');
+ if (!p)
+ return this_part;
+ *p = 0;
+ this_part->name = s;
+
+ /* From */
+ s = p+1;
+ p = strchr(s, ':');
+ if (!p)
+ return this_part;
+ *p = 0;
+ this_part->from = simple_strtoul(s, NULL, 16);
+
+ /* Size */
+ s = p+1;
+ p = strchr(s, ':');
+ if (!p)
+ return this_part;
+ *p = 0;
+ this_part->size = simple_strtoul(s, NULL, 16);
+
+ /* Sector size */
+ s = p+1;
+ this_part->sector_size = simple_strtoul(s, NULL, 16);
+ pr_debug("%s: Found %s %lu %lu %lu\n", __func__, this_part->name,
+ this_part->from, this_part->size, this_part->sector_size);
+ return this_part;
+}
+
+/* Passed a string like:
+ * system:3600:10000:800,cache:13600:4000:800,userdata:17600:80000:800
+ * Could be an empty string
+ */
+static struct part *parse_partition_list(char *s, int alloc_size, void **alloc)
+{
+ char *p;
+ struct part *this_part;
+ struct part *next_part = NULL;
+ pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc);
+
+ alloc_size += sizeof(struct part);
+ p = strchr(s, ',');
+ if (p) {
+ *p = 0;
+ next_part = parse_partition_list(p+1, alloc_size, alloc);
+ if (!next_part)
+ BUG();
+ }
+ this_part = parse_partition(s, alloc_size, alloc);
+ this_part->next_part = next_part;
+ return this_part;
+}
+/* Passed a string like:
+ * sdhci.0=system:3600:10000:800,cache:13600:4000:800,userdata:17600:80000:800
+ */
+static struct part_device *parse_device(char *s, int alloc_size, void **alloc) {
+ char *p;
+ struct part *part;
+ struct part_device *device;
+
+ pr_debug("%s: '%s', %d, %p\n", __func__, s, alloc_size, alloc);
+ p = strchr(s, '=');
+ if (!p)
+ return NULL;
+ *p = 0;
+ alloc_size += sizeof(struct part_device);
+ part = parse_partition_list(p+1, alloc_size, alloc);
+ if (part) {
+ device = *alloc;
+ *alloc += sizeof(struct part_device);
+ device->name = s;
+ device->first_part = part;
+ }
+ return device;
+}
+
+
+static void parse_cmdline(void) {
+ char *s = cmdline;
+ void *alloc = 0;
+ if (cmdline_parsed)
+ return;
+ cmdline_device = parse_device(s, 0, &alloc);
+ cmdline_parsed = 1;
+}
+
+
+int copy_partitions_to_state(struct part_device *device,
+ struct parsed_partitions *state, unsigned int ssz)
+{
+ int i = 0;
+ struct part *part = device->first_part;
+ while (part) {
+ sector_t from = part->from * (part->sector_size / ssz);
+ sector_t size = part->size * (part->sector_size / ssz);
+ put_named_partition(state, i+1, from, size, part->name,
+ strlen(part->name));
+ i++;
+ part = part->next_part;
+ }
+ return i;
+}
+
+int cmdline_partition(struct parsed_partitions *state)
+{
+ struct block_device *bdev = state->bdev;
+ unsigned int ssz = bdev_logical_block_size(bdev);
+ parse_cmdline();
+ pr_debug("%s: %s\n", __func__, dev_name(disk_to_dev(bdev->bd_disk)));
+
+ if (!cmdline_device)
+ return 0;
+
+ if (strcmp(cmdline_device->name, dev_name(disk_to_dev(bdev->bd_disk))))
+ return 0;
+
+ /* We have a command line partition that matches this device */
+ copy_partitions_to_state(cmdline_device, state, ssz);
+ return 1;
+}
+
+
+__init static int cmdline_partition_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+
+__setup("tegrapart=", cmdline_partition_setup);
+
+
--- /dev/null
+/*
+ * fs/partitions/cmdline.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef FS_PART_CMDLINE_H
+#define FS_PART_CMDLINE_H
+
+extern int cmdline_partition(struct parsed_partitions *state);
+
+#endif
* the partition tables happens after init too.
*/
static int force_gpt;
+static u64 force_gpt_sector;
static int __init
force_gpt_fn(char *str)
{
}
__setup("gpt", force_gpt_fn);
+static int __init force_gpt_sector_fn(char *str)
+{
+ force_gpt_sector = simple_strtoull(str, NULL, 0);
+ return 1;
+}
+__setup("gpt_sector=", force_gpt_sector_fn);
+
/**
* efi_crc32() - EFI version of crc32 function
if (!good_agpt && force_gpt)
good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
+ if (!good_agpt && force_gpt && force_gpt_sector)
+ good_agpt = is_gpt_valid(state, force_gpt_sector, &agpt, &aptes);
+
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
goto fail;
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
- error = ret;
+ ret = error;
break;
}
return ret;
}
+/*
+ * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
+ * location, so checking ->i_pipe is not enough to verify that this is a
+ * pipe.
+ */
+struct pipe_inode_info *get_pipe_info(struct file *file)
+{
+ struct inode *i = file->f_path.dentry->d_inode;
+
+ return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
+}
+
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
- pipe = file->f_path.dentry->d_inode->i_pipe;
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
if (!tmp)
return -ENOMEM;
- pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE);
+ pathname = d_path(path, tmp, PAGE_SIZE);
len = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
return 0;
}
- /* we need to make sure nobody is changing the file size beneath
- ** us
- */
- reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
depth = reiserfs_write_lock_once(inode->i_sb);
+ /* we need to make sure nobody is changing the file size beneath us */
+ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
if (write_from == 0) {
struct reiserfs_transaction_handle th;
size_t size = reiserfs_xattr_nblocks(inode,
reiserfs_acl_size(clone->a_count));
- reiserfs_write_lock(inode->i_sb);
+ int depth;
+
+ depth = reiserfs_write_lock_once(inode->i_sb);
error = journal_begin(&th, inode->i_sb, size * 2);
if (!error) {
int error2;
if (error2)
error = error2;
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
}
posix_acl_release(clone);
return error;
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
-/*
- * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
- * location, so checking ->i_pipe is not enough to verify that this is a
- * pipe.
- */
-static inline struct pipe_inode_info *pipe_info(struct inode *inode)
-{
- if (S_ISFIFO(inode->i_mode))
- return inode->i_pipe;
-
- return NULL;
-}
/*
* Determine where to splice to/from.
loff_t offset, *off;
long ret;
- ipipe = pipe_info(in->f_path.dentry->d_inode);
- opipe = pipe_info(out->f_path.dentry->d_inode);
+ ipipe = get_pipe_info(in);
+ opipe = get_pipe_info(out);
if (ipipe && opipe) {
if (off_in || off_out)
int error;
long ret;
- pipe = pipe_info(file->f_path.dentry->d_inode);
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
};
long ret;
- pipe = pipe_info(file->f_path.dentry->d_inode);
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
static long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags)
{
- struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
- struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
+ struct pipe_inode_info *ipipe = get_pipe_info(in);
+ struct pipe_inode_info *opipe = get_pipe_info(out);
int ret = -EINVAL;
/*
char buf[BINPRM_BUF_SIZE];
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
+ unsigned long vma_pages;
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
unsigned long loader, exec;
};
+extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
+extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ int write);
+
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
unsigned char misaligned;
unsigned char discard_misaligned;
- unsigned char no_cluster;
+ unsigned char cluster;
signed char discard_zeroes_data;
};
#endif
};
-#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+static inline unsigned int blk_queue_cluster(struct request_queue *q)
+{
+ return q->limits.cluster;
+}
+
/*
* We regard a request as sync, if either a read or a sync write
*/
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
return q->limits.physical_block_size;
}
-static inline int bdev_physical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
{
return queue_physical_block_size(bdev_get_queue(bdev));
}
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_align(x, align) \
+ __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages(x) \
#define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
+#define FB_MISC_HDMI 4 /* display supports HDMI signaling */
+
struct fb_chroma {
__u32 redx; /* in fraction of 1024 */
__u32 greenx;
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
+extern void fb_edid_add_monspecs(unsigned char *edid,
+ struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
+#define CEA_MODEDB_SIZE 65
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
+extern const struct fb_videomode cea_modes[];
struct fb_modelist {
struct list_head list;
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
+ void *phy_config;
};
/* Flags in fsl_usb2_mph_platform_data */
extern gfp_t gfp_allowed_mask;
-extern void set_gfp_allowed_mask(gfp_t mask);
-extern gfp_t clear_gfp_allowed_mask(gfp_t mask);
+extern void pm_restrict_gfp_mask(void);
+extern void pm_restore_gfp_mask(void);
#endif /* __LINUX_GFP_H */
--- /dev/null
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_I2C_TEGRA_H
+#define _LINUX_I2C_TEGRA_H
+
+#include <mach/pinmux.h>
+
+#define TEGRA_I2C_MAX_BUS 3
+
+struct tegra_i2c_platform_data {
+ int adapter_nr;
+ int bus_count;
+ const struct tegra_pingroup_config *bus_mux[TEGRA_I2C_MAX_BUS];
+ int bus_mux_len[TEGRA_I2C_MAX_BUS];
+ unsigned long bus_clk_rate[TEGRA_I2C_MAX_BUS];
+ bool is_dvc;
+};
+
+#endif /* _LINUX_I2C_TEGRA_H */
--- /dev/null
+/*
+ * include/linux/i2c/panjit_ts.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_I2C_PANJIT_TS_H
+#define _LINUX_I2C_PANJIT_TS_H
+
+struct device;
+
+struct panjit_i2c_ts_platform_data {
+ int gpio_reset;
+};
+
+#endif
}
-static inline unsigned int __must_check
-__kfifo_must_check_helper(unsigned int val)
-{
- return val;
-}
+/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
+#define __kfifo_must_check_helper(x) (x)
/**
* kfifo_initialized - Check if the fifo is initialized
TPS6586X_ID_LDO_RTC,
};
+enum {
+ TPS6586X_INT_PLDO_0,
+ TPS6586X_INT_PLDO_1,
+ TPS6586X_INT_PLDO_2,
+ TPS6586X_INT_PLDO_3,
+ TPS6586X_INT_PLDO_4,
+ TPS6586X_INT_PLDO_5,
+ TPS6586X_INT_PLDO_6,
+ TPS6586X_INT_PLDO_7,
+ TPS6586X_INT_COMP_DET,
+ TPS6586X_INT_ADC,
+ TPS6586X_INT_PLDO_8,
+ TPS6586X_INT_PLDO_9,
+ TPS6586X_INT_PSM_0,
+ TPS6586X_INT_PSM_1,
+ TPS6586X_INT_PSM_2,
+ TPS6586X_INT_PSM_3,
+ TPS6586X_INT_RTC_ALM1,
+ TPS6586X_INT_ACUSB_OVP,
+ TPS6586X_INT_USB_DET,
+ TPS6586X_INT_AC_DET,
+ TPS6586X_INT_BAT_DET,
+ TPS6586X_INT_CHG_STAT,
+ TPS6586X_INT_CHG_TEMP,
+ TPS6586X_INT_PP,
+ TPS6586X_INT_RESUME,
+ TPS6586X_INT_LOW_SYS,
+ TPS6586X_INT_RTC_ALM2,
+};
+
struct tps6586x_subdev_info {
int id;
const char *name;
void *platform_data;
};
+struct tps6586x_rtc_platform_data {
+ int irq;
+};
+
struct tps6586x_platform_data {
int num_subdevs;
struct tps6586x_subdev_info *subdevs;
int gpio_base;
+ int irq_base;
};
/*
#define WM8994_CONFIGURE_GPIO 0x8000
#define WM8994_DRC_REGS 5
-#define WM8994_EQ_REGS 19
+#define WM8994_EQ_REGS 20
/**
* DRC configurations are specified with a label and a set of register
#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
+#define MMC_CAP_FORCE_HS (1 << 11) /* Must enable highspeed mode */
mmc_pm_flag_t pm_caps; /* supported pm features */
--- /dev/null
+/*
+ * include/linux/nct1008.h
+ *
+ * NCT1008, temperature monitoring device from ON Semiconductors
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_NCT1008_H
+#define _LINUX_NCT1008_H
+
+#include <linux/types.h>
+
+struct nct1008_platform_data {
+ bool supported_hwrev;
+ bool ext_range;
+ u8 conv_rate;
+ u8 offset;
+ u8 hysteresis;
+ u8 shutdown_ext_limit;
+ u8 shutdown_local_limit;
+ u8 throttling_ext_limit;
+ void (*alarm_fn)(bool raised);
+};
+
+#endif /* _LINUX_NCT1008_H */
int ret;
if (!cond ||
- (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1))
+ ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
ret = okfn(skb);
return ret;
}
#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
+#define PCI_VENDOR_ID_BCM_GVC 0x14a4
#define PCI_VENDOR_ID_BROADCOM 0x14e4
#define PCI_DEVICE_ID_TIGON3_5752 0x1600
#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
int nr_active;
int is_active;
int nr_stat;
+ int rotate_disable;
atomic_t refcount;
struct task_struct *task;
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+struct pipe_inode_info *get_pipe_info(struct file *file);
#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _TEGRA_USB_H_
+#define _TEGRA_USB_H_
+
+enum tegra_usb_operating_modes {
+ TEGRA_USB_DEVICE,
+ TEGRA_USB_HOST,
+ TEGRA_USB_OTG,
+};
+
+struct tegra_ehci_platform_data {
+ enum tegra_usb_operating_modes operating_mode;
+ /* power down the phy on bus suspend */
+ int power_down_on_bus_suspend;
+ void *phy_config;
+};
+
+#endif /* _TEGRA_USB_H_ */
static inline bool pm_runtime_suspended(struct device *dev)
{
- return dev->power.runtime_status == RPM_SUSPENDED;
+ return dev->power.runtime_status == RPM_SUSPENDED
+ && !dev->power.disable_depth;
}
#else /* !CONFIG_PM_RUNTIME */
* RCU.
*/
#define RADIX_TREE_INDIRECT_PTR 1
-#define RADIX_TREE_RETRY ((void *)-1UL)
-
-static inline void *radix_tree_ptr_to_indirect(void *ptr)
-{
- return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
-}
-
-static inline void *radix_tree_indirect_to_ptr(void *ptr)
-{
- return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
-}
static inline int radix_tree_is_indirect_ptr(void *ptr)
{
* removed.
*
* For use with radix_tree_lookup_slot(). Caller must hold tree at least read
- * locked across slot lookup and dereference. More likely, will be used with
- * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ * locked across slot lookup and dereference. Not required if write lock is
+ * held (ie. items cannot be concurrently inserted).
+ *
+ * radix_tree_deref_retry must be used to confirm validity of the pointer if
+ * only the read lock is held.
*/
static inline void *radix_tree_deref_slot(void **pslot)
{
- void *ret = rcu_dereference(*pslot);
- if (unlikely(radix_tree_is_indirect_ptr(ret)))
- ret = RADIX_TREE_RETRY;
- return ret;
+ return rcu_dereference(*pslot);
}
+
+/**
+ * radix_tree_deref_retry - check radix_tree_deref_slot
+ * @arg: pointer returned by radix_tree_deref_slot
+ * Returns: 0 if retry is not required, otherwise retry is required
+ *
+ * radix_tree_deref_retry must be used with radix_tree_deref_slot.
+ */
+static inline int radix_tree_deref_retry(void *arg)
+{
+ return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+}
+
/**
* radix_tree_replace_slot - replace item in a slot
* @pslot: pointer to slot, returned by radix_tree_lookup_slot
extern unsigned long this_cpu_load(void);
-extern void calc_global_load(void);
+extern void calc_global_load(unsigned long ticks);
extern unsigned long get_parent_ip(unsigned long addr);
int offset,
unsigned int len, __wsum *csump);
-extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
--- /dev/null
+/* include/linux/tegra_audio.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Iliyan Malchev <malchev@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _TEGRA_AUDIO_H
+#define _TEGRA_AUDIO_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_AUDIO_MAGIC 't'
+
+#define TEGRA_AUDIO_IN_START _IO(TEGRA_AUDIO_MAGIC, 0)
+#define TEGRA_AUDIO_IN_STOP _IO(TEGRA_AUDIO_MAGIC, 1)
+
+struct tegra_audio_in_config {
+ int rate;
+ int stereo;
+};
+
+#define TEGRA_AUDIO_IN_SET_CONFIG _IOW(TEGRA_AUDIO_MAGIC, 2, \
+ const struct tegra_audio_in_config *)
+#define TEGRA_AUDIO_IN_GET_CONFIG _IOR(TEGRA_AUDIO_MAGIC, 3, \
+ struct tegra_audio_in_config *)
+
+#define TEGRA_AUDIO_IN_SET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 4, \
+ const unsigned int *)
+#define TEGRA_AUDIO_IN_GET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 5, \
+ unsigned int *)
+#define TEGRA_AUDIO_OUT_SET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 6, \
+ const unsigned int *)
+#define TEGRA_AUDIO_OUT_GET_NUM_BUFS _IOW(TEGRA_AUDIO_MAGIC, 7, \
+ unsigned int *)
+
+#define TEGRA_AUDIO_OUT_FLUSH _IO(TEGRA_AUDIO_MAGIC, 10)
+
+#define TEGRA_AUDIO_BIT_FORMAT_DEFAULT 0
+#define TEGRA_AUDIO_BIT_FORMAT_DSP 1
+#define TEGRA_AUDIO_SET_BIT_FORMAT _IOW(TEGRA_AUDIO_MAGIC, 11, \
+ const unsigned int *)
+#define TEGRA_AUDIO_GET_BIT_FORMAT _IOR(TEGRA_AUDIO_MAGIC, 12, \
+ unsigned int *)
+
+#endif/*_CPCAP_AUDIO_H*/
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_AVP_H
+#define __LINUX_TEGRA_AVP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define TEGRA_AVP_LIB_MAX_NAME 32
+#define TEGRA_AVP_LIB_MAX_ARGS 220 /* DO NOT CHANGE THIS! */
+
+struct tegra_avp_lib {
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+ void __user *args;
+ size_t args_len;
+ int greedy;
+ unsigned long handle;
+};
+
+#define TEGRA_AVP_IOCTL_MAGIC 'r'
+
+#define TEGRA_AVP_IOCTL_LOAD_LIB _IOWR(TEGRA_AVP_IOCTL_MAGIC, 0x40, struct tegra_avp_lib)
+#define TEGRA_AVP_IOCTL_UNLOAD_LIB _IOW(TEGRA_AVP_IOCTL_MAGIC, 0x41, unsigned long)
+
+#define TEGRA_AVP_IOCTL_MIN_NR _IOC_NR(TEGRA_AVP_IOCTL_LOAD_LIB)
+#define TEGRA_AVP_IOCTL_MAX_NR _IOC_NR(TEGRA_AVP_IOCTL_UNLOAD_LIB)
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_RPC_H
+#define __LINUX_TEGRA_RPC_H
+
+#define TEGRA_RPC_MAX_MSG_LEN 256
+
+/* Note: the actual size of the name in the protocol message is 16 bytes,
+ * but that is because the name there is not NUL terminated, only NUL
+ * padded. */
+#define TEGRA_RPC_MAX_NAME_LEN 17
+
+struct tegra_rpc_port_desc {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+ int notify_fd; /* fd representing a trpc_sema to signal when a
+ * message has been received */
+};
+
+#define TEGRA_RPC_IOCTL_MAGIC 'r'
+
+#define TEGRA_RPC_IOCTL_PORT_CREATE _IOW(TEGRA_RPC_IOCTL_MAGIC, 0x20, struct tegra_rpc_port_desc)
+#define TEGRA_RPC_IOCTL_PORT_GET_NAME _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x21, char *)
+#define TEGRA_RPC_IOCTL_PORT_CONNECT _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x22, long)
+#define TEGRA_RPC_IOCTL_PORT_LISTEN _IOR(TEGRA_RPC_IOCTL_MAGIC, 0x23, long)
+
+#define TEGRA_RPC_IOCTL_MIN_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_CREATE)
+#define TEGRA_RPC_IOCTL_MAX_NR _IOC_NR(TEGRA_RPC_IOCTL_PORT_LISTEN)
+
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_TEGRA_SEMA_H
+#define __LINUX_TEGRA_SEMA_H
+
+/* this shares the magic with the tegra RPC and AVP drivers.
+ * See include/linux/tegra_avp.h and include/linux/tegra_rpc.h */
+#define TEGRA_SEMA_IOCTL_MAGIC 'r'
+
+/* If IOCTL_WAIT is interrupted by a signal and the timeout was not -1,
+ * then the value pointed to by the argument will be updated with the amount
+ * of time remaining for the wait. */
+#define TEGRA_SEMA_IOCTL_WAIT _IOW(TEGRA_SEMA_IOCTL_MAGIC, 0x30, long *)
+#define TEGRA_SEMA_IOCTL_SIGNAL _IO(TEGRA_SEMA_IOCTL_MAGIC, 0x31)
+
+#define TEGRA_SEMA_IOCTL_MIN_NR _IOC_NR(TEGRA_SEMA_IOCTL_WAIT)
+#define TEGRA_SEMA_IOCTL_MAX_NR _IOC_NR(TEGRA_SEMA_IOCTL_SIGNAL)
+
+#endif
--- /dev/null
+/* include/linux/tegra_spdif.h
+ *
+ * SPDIF audio driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _TEGRA_SPDIF_H
+#define _TEGRA_SPDIF_H
+
+#include <linux/ioctl.h>
+
+#define TEGRA_SPDIF_MAGIC 's'
+
+
+
+struct tegra_audio_buf_config {
+ unsigned size; /* order */
+ unsigned threshold; /* order */
+ unsigned chunk; /* order */
+};
+
+
+
+#define TEGRA_AUDIO_OUT_SET_BUF_CONFIG _IOW(TEGRA_SPDIF_MAGIC, 0, \
+ const struct tegra_audio_buf_config *)
+#define TEGRA_AUDIO_OUT_GET_BUF_CONFIG _IOR(TEGRA_SPDIF_MAGIC, 1, \
+ struct tegra_audio_buf_config *)
+
+#define TEGRA_AUDIO_OUT_GET_ERROR_COUNT _IOR(TEGRA_SPDIF_MAGIC, 2, \
+ unsigned *)
+
+struct tegra_audio_out_preload {
+ void *data;
+ size_t len;
+ size_t len_written;
+};
+
+#define TEGRA_AUDIO_OUT_PRELOAD_FIFO _IOWR(TEGRA_SPDIF_MAGIC, 3, \
+ struct tegra_audio_out_preload *)
+
+#endif/*_TEGRA_SPDIF_H*/
#define TTY_HUPPED 18 /* Post driver->hangup() */
#define TTY_FLUSHING 19 /* Flushing to ldisc in progress */
#define TTY_FLUSHPENDING 20 /* Queued buffer flush pending */
+#define TTY_HUPPING 21 /* ->hangup() in progress */
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
+#define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */
struct usb_iso_packet_descriptor {
unsigned int offset;
#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
/* 19:16 for port testing */
-#define PORT_TEST_PKT (0x4<<16) /* Port Test Control - packet test */
+#define PORT_TEST(x) (((x)&0xf)<<16) /* Port Test Control */
+#define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */
+#define PORT_TEST_FORCE PORT_TEST(0x5) /* Port Test Control - force enable */
#define PORT_LED_OFF (0<<14)
#define PORT_LED_AMBER (1<<14)
#define PORT_LED_GREEN (2<<14)
int (*urb_dequeue)(struct usb_hcd *hcd,
struct urb *urb, int status);
+ /*
+ * (optional) these hooks allow an HCD to override the default DMA
+ * mapping and unmapping routines. In general, they shouldn't be
+ * necessary unless the host controller has special DMA requirements,
+ * such as alignment contraints. If these are not specified, the
+ * general usb_hcd_(un)?map_urb_for_dma functions will be used instead
+ * (and it may be a good idea to call these functions in your HCD
+ * implementation)
+ */
+ int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+ void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb);
+
/* hw synch, freeing endpoint resources that urb_dequeue can't */
void (*endpoint_disable)(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
extern int usb_hcd_unlink_urb(struct urb *urb, int status);
extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
+extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *);
extern void usb_hcd_flush_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep);
extern void usb_hcd_disable_endpoint(struct usb_device *udev,
struct otg_transceiver *otg_ulpi_create(struct otg_io_access_ops *ops,
unsigned int flags);
+#ifdef CONFIG_USB_ULPI_VIEWPORT
+/* access ops for controllers with a viewport register */
+extern struct otg_io_access_ops ulpi_viewport_access_ops;
+#endif
+
#endif /* __LINUX_USB_ULPI_H */
extern struct mutex saa7146_devices_lock;
int saa7146_register_extension(struct saa7146_extension*);
int saa7146_unregister_extension(struct saa7146_extension*);
-struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
+struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
--- /dev/null
+/*
+ * include/linux/tegra_camera.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+enum {
+ TEGRA_CAMERA_MODULE_ISP = 0,
+ TEGRA_CAMERA_MODULE_VI,
+ TEGRA_CAMERA_MODULE_CSI,
+};
+
+enum {
+ TEGRA_CAMERA_VI_CLK,
+ TEGRA_CAMERA_VI_SENSOR_CLK,
+};
+
+struct tegra_camera_clk_info {
+ uint id;
+ uint clk_id;
+ unsigned long rate;
+};
+
+#define TEGRA_CAMERA_IOCTL_ENABLE _IOWR('i', 1, uint)
+#define TEGRA_CAMERA_IOCTL_DISABLE _IOWR('i', 2, uint)
+#define TEGRA_CAMERA_IOCTL_CLK_SET_RATE \
+ _IOWR('i', 3, struct tegra_camera_clk_info)
+#define TEGRA_CAMERA_IOCTL_RESET _IOWR('i', 4, uint)
extern void unix_notinflight(struct file *fp);
extern void unix_gc(void);
extern void wait_for_unix_gc(void);
+extern struct sock *unix_get_socket(struct file *filp);
#define UNIX_HASH_SIZE 256
spinlock_t lock;
unsigned int gc_candidate : 1;
unsigned int gc_maybe_cycle : 1;
+ unsigned char recursion_level;
struct socket_wq peer_wq;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
* @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
* @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
* frame and selects the maximum number of streams that it can use.
+ *
+ * Note: If you have to add new flags to the enumeration, then don't
+ * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+/*
+ * This definition is used as a mask to clear all temporary flags, which are
+ * set by the tx handlers for each transmission attempt by the mac80211 stack.
+ */
+#define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \
+ IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \
+ IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \
+ IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \
+ IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \
+ IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \
+ IEEE80211_TX_CTL_STBC)
+
/**
* enum mac80211_rate_control_flags - per-rate flags set by the
* Rate Control algorithm.
/* Initialise core socket variables */
extern void sock_init_data(struct socket *sock, struct sock *sk);
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
/**
* sk_filter_release - release a socket filter
* @fp: filter to remove
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- kfree(fp);
+ call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
--- /dev/null
+/*
+ * include/video/nvhdcp.h
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_NVHDCP_H_
+#define _LINUX_NVHDCP_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+/* maximum receivers and repeaters connected at a time */
+#define TEGRA_NVHDCP_MAX_DEVS 127
+
+/* values for value_flags */
+#define TEGRA_NVHDCP_FLAG_AN 0x0001
+#define TEGRA_NVHDCP_FLAG_AKSV 0x0002
+#define TEGRA_NVHDCP_FLAG_BKSV 0x0004
+#define TEGRA_NVHDCP_FLAG_BSTATUS 0x0008 /* repeater status */
+#define TEGRA_NVHDCP_FLAG_CN 0x0010 /* c_n */
+#define TEGRA_NVHDCP_FLAG_CKSV 0x0020 /* c_ksv */
+#define TEGRA_NVHDCP_FLAG_DKSV 0x0040 /* d_ksv */
+#define TEGRA_NVHDCP_FLAG_KP 0x0080 /* k_prime */
+#define TEGRA_NVHDCP_FLAG_S 0x0100 /* hdcp_status */
+#define TEGRA_NVHDCP_FLAG_CS 0x0200 /* connection state */
+#define TEGRA_NVHDCP_FLAG_V 0x0400
+#define TEGRA_NVHDCP_FLAG_MP 0x0800
+#define TEGRA_NVHDCP_FLAG_BKSVLIST 0x1000
+
+/* values for packet_results */
+#define TEGRA_NVHDCP_RESULT_SUCCESS 0
+#define TEGRA_NVHDCP_RESULT_UNSUCCESSFUL 1
+#define TEGRA_NVHDCP_RESULT_PENDING 0x103
+#define TEGRA_NVHDCP_RESULT_LINK_FAILED 0xc0000013
+/* TODO: replace with -EINVAL */
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER 0xc000000d
+#define TEGRA_NVHDCP_RESULT_INVALID_PARAMETER_MIX 0xc0000030
+/* TODO: replace with -ENOMEM */
+#define TEGRA_NVHDCP_RESULT_NO_MEMORY 0xc0000017
+
+struct tegra_nvhdcp_packet {
+ __u32 value_flags; // (IN/OUT)
+ __u32 packet_results; // (OUT)
+
+ __u64 c_n; // (IN) upstream exchange number
+ __u64 c_ksv; // (IN)
+
+ __u32 b_status; // (OUT) link/repeater status
+ __u64 hdcp_status; // (OUT) READ_S
+ __u64 cs; // (OUT) Connection State
+
+ __u64 k_prime; // (OUT)
+ __u64 a_n; // (OUT)
+ __u64 a_ksv; // (OUT)
+ __u64 b_ksv; // (OUT)
+ __u64 d_ksv; // (OUT)
+ __u8 v_prime[20]; // (OUT) 160-bit
+ __u64 m_prime; // (OUT)
+
+ // (OUT) Valid KSVs in the bKsvList. Maximum is 127 devices
+ __u32 num_bksv_list;
+
+ // (OUT) Up to 127 receivers & repeaters
+ __u64 bksv_list[TEGRA_NVHDCP_MAX_DEVS];
+};
+
+/* parameters to TEGRAIO_NVHDCP_SET_POLICY */
+#define TEGRA_NVHDCP_POLICY_ON_DEMAND 0
+#define TEGRA_NVHDCP_POLICY_ALWAYS_ON 1
+
+/* ioctls */
+#define TEGRAIO_NVHDCP_ON _IO('F', 0x70)
+#define TEGRAIO_NVHDCP_OFF _IO('F', 0x71)
+#define TEGRAIO_NVHDCP_SET_POLICY _IOW('F', 0x72, __u32)
+#define TEGRAIO_NVHDCP_READ_M _IOWR('F', 0x73, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_READ_S _IOWR('F', 0x74, struct tegra_nvhdcp_packet)
+#define TEGRAIO_NVHDCP_RENEGOTIATE _IO('F', 0x75)
+
+#endif
--- /dev/null
+/*
+ * include/video/tegrafb.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_TEGRAFB_H_
+#define _LINUX_TEGRAFB_H_
+
+#include <linux/fb.h>
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+#define TEGRA_FB_WIN_FMT_P1 0
+#define TEGRA_FB_WIN_FMT_P2 1
+#define TEGRA_FB_WIN_FMT_P4 2
+#define TEGRA_FB_WIN_FMT_P8 3
+#define TEGRA_FB_WIN_FMT_B4G4R4A4 4
+#define TEGRA_FB_WIN_FMT_B5G5R5A 5
+#define TEGRA_FB_WIN_FMT_B5G6R5 6
+#define TEGRA_FB_WIN_FMT_AB5G5R5 7
+#define TEGRA_FB_WIN_FMT_B8G8R8A8 12
+#define TEGRA_FB_WIN_FMT_R8G8B8A8 13
+#define TEGRA_FB_WIN_FMT_B6x2G6x2R6x2A8 14
+#define TEGRA_FB_WIN_FMT_R6x2G6x2B6x2A8 15
+#define TEGRA_FB_WIN_FMT_YCbCr422 16
+#define TEGRA_FB_WIN_FMT_YUV422 17
+#define TEGRA_FB_WIN_FMT_YCbCr420P 18
+#define TEGRA_FB_WIN_FMT_YUV420P 19
+#define TEGRA_FB_WIN_FMT_YCbCr422P 20
+#define TEGRA_FB_WIN_FMT_YUV422P 21
+#define TEGRA_FB_WIN_FMT_YCbCr422R 22
+#define TEGRA_FB_WIN_FMT_YUV422R 23
+#define TEGRA_FB_WIN_FMT_YCbCr422RA 24
+#define TEGRA_FB_WIN_FMT_YUV422RA 25
+
+#define TEGRA_FB_WIN_BLEND_NONE 0
+#define TEGRA_FB_WIN_BLEND_PREMULT 1
+#define TEGRA_FB_WIN_BLEND_COVERAGE 2
+
+#define TEGRA_FB_WIN_FLAG_INVERT_H (1 << 0)
+#define TEGRA_FB_WIN_FLAG_INVERT_V (1 << 1)
+#define TEGRA_FB_WIN_FLAG_TILED (1 << 2)
+
+/* set index to -1 to ignore window data */
+struct tegra_fb_windowattr {
+ __s32 index;
+ __u32 buff_id;
+ __u32 flags;
+ __u32 blend;
+ __u32 offset;
+ __u32 offset_u;
+ __u32 offset_v;
+ __u32 stride;
+ __u32 stride_uv;
+ __u32 pixformat;
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+ __u32 out_x;
+ __u32 out_y;
+ __u32 out_w;
+ __u32 out_h;
+ __u32 z;
+ __u32 pre_syncpt_id;
+ __u32 pre_syncpt_val;
+};
+
+#define TEGRA_FB_FLIP_N_WINDOWS 3
+
+struct tegra_fb_flip_args {
+ struct tegra_fb_windowattr win[TEGRA_FB_FLIP_N_WINDOWS];
+ __u32 post_syncpt_id;
+ __u32 post_syncpt_val;
+};
+
+struct tegra_fb_modedb {
+ struct fb_var_screeninfo *modedb;
+ __u32 modedb_len;
+};
+
+#define FBIO_TEGRA_SET_NVMAP_FD _IOW('F', 0x40, __u32)
+#define FBIO_TEGRA_FLIP _IOW('F', 0x41, struct tegra_fb_flip_args)
+#define FBIO_TEGRA_GET_MODEDB _IOWR('F', 0x42, struct tegra_fb_modedb)
+
+#endif
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
-#define __RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
+
+/*
+ * The same for passing in an actual pointer instead of a name tag.
+ */
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
struct semid64_ds __user *up64;
int version = compat_ipc_parse_version(&third);
+ memset(&s64, 0, sizeof(s64));
+
if (!uptr)
return -EINVAL;
if (get_user(pad, (u32 __user *) uptr))
int version = compat_ipc_parse_version(&second);
void __user *p;
+ memset(&m64, 0, sizeof(m64));
+
switch (second & (~IPC_64)) {
case IPC_INFO:
case IPC_RMID:
int err, err2;
int version = compat_ipc_parse_version(&second);
+ memset(&s64, 0, sizeof(s64));
+
switch (second & (~IPC_64)) {
case IPC_RMID:
case SHM_LOCK:
void __user *p = NULL;
if (u_attr && oflag & O_CREAT) {
struct mq_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
p = compat_alloc_user_space(sizeof(attr));
if (get_compat_mq_attr(&attr, u_attr) ||
copy_to_user(p, &attr, sizeof(attr)))
struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
long ret;
+ memset(&mqstat, 0, sizeof(mqstat));
+
if (u_mqstat) {
if (get_compat_mq_attr(&mqstat, u_mqstat) ||
copy_to_user(p, &mqstat, sizeof(mqstat)))
{
struct shmid_ds out;
+ memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
out.shm_segsz = in->shm_segsz;
out.shm_atime = in->shm_atime;
tty = sig->tty;
sig->tty = NULL;
} else {
+ /*
+ * This can only happen if the caller is de_thread().
+ * FIXME: this is the temporary hack, we should teach
+ * posix-cpu-timers to handle this case correctly.
+ */
+ if (unlikely(has_group_leader_pid(tsk)))
+ posix_cpu_timers_exit_group(tsk);
+
/*
* If there is any task waiting for the group exit
* then notify it:
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
+ /*
+ * If do_exit is called because this processes oopsed, it's possible
+ * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
+ * continuing. Amongst other possible reasons, this is to prevent
+ * mm_release()->clear_child_tid() from writing to a user-controlled
+ * kernel address.
+ */
+ set_fs(USER_DS);
+
tracehook_report_exit(&code);
validate_creds_for_do_exit(tsk);
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
+ clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
{
struct futex_hash_bucket *hb;
- get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
- drop_futex_key_refs(&q->key);
}
/**
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
-
- drop_futex_key_refs(&q->key);
}
/*
}
retry:
- /* Prepare to wait on uaddr. */
+ /*
+ * Prepare to wait on uaddr. On success, holds hb lock and increments
+ * q.key refs.
+ */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out;
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
+ /* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
- goto out_put_key;
+ goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
- goto out_put_key;
+ goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
- if (!signal_pending(current)) {
- put_futex_key(fshared, &q.key);
+ if (!signal_pending(current))
goto retry;
- }
ret = -ERESTARTSYS;
if (!abs_time)
- goto out_put_key;
+ goto out;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
ret = -ERESTART_RESTARTBLOCK;
-out_put_key:
- put_futex_key(fshared, &q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
- /* Prepare to wait on uaddr. */
+ /*
+ * Prepare to wait on uaddr. On success, increments q.key (key1) ref
+ * count.
+ */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
goto out_key2;
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
- * race with the atomic proxy lock acquition by the requeue code.
+ * race with the atomic proxy lock acquisition by the requeue code. The
+ * futex_requeue dropped our key1 reference and incremented our key2
+ * reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
static int irq_spurious_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, irq_spurious_proc_show, NULL);
+ return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
}
static const struct file_operations irq_spurious_proc_fops = {
account_global_scheduler_latency(tsk, &lat);
- /*
- * short term hack; if we're > 32 we stop; future we recycle:
- */
- tsk->latency_record_count++;
- if (tsk->latency_record_count >= LT_SAVECOUNT)
- goto out_unlock;
-
- for (i = 0; i < LT_SAVECOUNT; i++) {
+ for (i = 0; i < tsk->latency_record_count; i++) {
struct latency_record *mylat;
int same = 1;
}
}
+ /*
+ * short term hack; if we're > 32 we stop; future we recycle:
+ */
+ if (tsk->latency_record_count >= LT_SAVECOUNT)
+ goto out_unlock;
+
/* Allocated a new one: */
- i = tsk->latency_record_count;
+ i = tsk->latency_record_count++;
memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
out_unlock:
{
raw_spin_lock(&ctx->lock);
- /* Rotate the first entry last of non-pinned groups */
- list_rotate_left(&ctx->flexible_groups);
+ /*
+ * Rotate the first entry last of non-pinned groups. Rotation might be
+ * disabled by the inheritance code.
+ */
+ if (!ctx->rotate_disable)
+ list_rotate_left(&ctx->flexible_groups);
raw_spin_unlock(&ctx->lock);
}
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
- update_context_time(ctx);
+ /*
+ * may read while context is not active
+ * (e.g., thread is blocked), in that case
+ * we cannot update context time
+ */
+ if (ctx->is_active)
+ update_context_time(ctx);
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
+ unsigned long flags;
int ret = 0;
child->perf_event_ctxp = NULL;
break;
}
+ /*
+ * We can't hold ctx->lock when iterating the ->flexible_group list due
+ * to allocations, but we need to prevent rotation because
+ * rotate_ctx() will change the list from interrupt context.
+ */
+ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+ parent_ctx->rotate_disable = 1;
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx, child,
&inherited_all);
break;
}
+ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+ parent_ctx->rotate_disable = 0;
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
child_ctx = child->perf_event_ctxp;
if (child_ctx && inherited_all) {
int hibernation_snapshot(int platform_mode)
{
int error;
- gfp_t saved_mask;
error = platform_begin(platform_mode);
if (error)
goto Close;
suspend_console();
- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
+ pm_restrict_gfp_mask();
error = dpm_suspend_start(PMSG_FREEZE);
if (error)
goto Recover_platform;
goto Recover_platform;
error = create_image(platform_mode);
- /* Control returns here after successful restore */
+ /*
+ * Control returns here (1) after the image has been created or the
+ * image creation has failed and (2) after a successful restore.
+ */
Resume_devices:
/* We may need to release the preallocated image pages here. */
dpm_resume_end(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
- set_gfp_allowed_mask(saved_mask);
+
+ if (error || !in_suspend)
+ pm_restore_gfp_mask();
+
resume_console();
Close:
platform_end(platform_mode);
int hibernation_restore(int platform_mode)
{
int error;
- gfp_t saved_mask;
pm_prepare_console();
suspend_console();
- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
+ pm_restrict_gfp_mask();
error = dpm_suspend_start(PMSG_QUIESCE);
if (!error) {
error = resume_target_kernel(platform_mode);
dpm_resume_end(PMSG_RECOVER);
}
- set_gfp_allowed_mask(saved_mask);
+ pm_restore_gfp_mask();
resume_console();
pm_restore_console();
return error;
int hibernation_platform_enter(void)
{
int error;
- gfp_t saved_mask;
if (!hibernation_ops)
return -ENOSYS;
entering_platform_hibernation = true;
suspend_console();
- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
error = dpm_suspend_start(PMSG_HIBERNATE);
if (error) {
if (hibernation_ops->recover)
Resume_devices:
entering_platform_hibernation = false;
dpm_resume_end(PMSG_RESTORE);
- set_gfp_allowed_mask(saved_mask);
resume_console();
Close:
swsusp_free();
if (!error)
power_down();
+ pm_restore_gfp_mask();
} else {
pr_debug("PM: Image restored successfully.\n");
}
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
- gfp_t saved_mask;
if (!suspend_ops)
return -ENOSYS;
goto Close;
}
suspend_console();
- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
+ pm_restrict_gfp_mask();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
- set_gfp_allowed_mask(saved_mask);
+ pm_restore_gfp_mask();
resume_console();
Close:
if (suspend_ops->end)
free_all_swap_pages(data->swap);
if (data->frozen)
thaw_processes();
- pm_notifier_call_chain(data->mode == O_WRONLY ?
+ pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
case SNAPSHOT_UNFREEZE:
if (!data->frozen || data->ready)
break;
+ pm_restore_gfp_mask();
thaw_processes();
usermodehelper_enable();
data->frozen = 0;
error = -EPERM;
break;
}
+ pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
if (!error)
error = put_user(in_suspend, (int __user *)arg);
int printk_needs_cpu(int cpu)
{
+ if (unlikely(cpu_is_offline(cpu)))
+ printk_tick();
return per_cpu(printk_pending, cpu);
}
void wake_up_klogd(void)
{
if (waitqueue_active(&log_wait))
- __raw_get_cpu_var(printk_pending) = 1;
+ this_cpu_write(printk_pending, 1);
}
/**
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
- if (test_tsk_need_resched(p))
+ if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
size_t cnt, loff_t *ppos)
{
char buf[64];
- char *cmp = buf;
+ char *cmp;
int neg = 0;
int i;
return -EFAULT;
buf[cnt] = 0;
+ cmp = strstrip(buf);
if (strncmp(buf, "NO_", 3) == 0) {
neg = 1;
}
for (i = 0; sched_feat_names[i]; i++) {
- int len = strlen(sched_feat_names[i]);
-
- if (strncmp(cmp, sched_feat_names[i], len) == 0) {
+ if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg)
sysctl_sched_features &= ~(1UL << i);
else
static void set_load_weight(struct task_struct *p)
{
- if (task_has_rt_policy(p)) {
- p->se.load.weight = 0;
- p->se.load.inv_weight = WMULT_CONST;
- return;
- }
-
/*
* SCHED_IDLE tasks get minimal weight:
*/
return delta;
}
+static unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+ load *= exp;
+ load += active * (FIXED_1 - exp);
+ load += 1UL << (FSHIFT - 1);
+ return load >> FSHIFT;
+}
+
#ifdef CONFIG_NO_HZ
/*
* For NO_HZ we delay the active fold to the next LOAD_FREQ update.
return delta;
}
+
+/**
+ * fixed_power_int - compute: x^n, in O(log n) time
+ *
+ * @x: base of the power
+ * @frac_bits: fractional bits of @x
+ * @n: power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ */
+static unsigned long
+fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
+{
+ unsigned long result = 1UL << frac_bits;
+
+ if (n) for (;;) {
+ if (n & 1) {
+ result *= x;
+ result += 1UL << (frac_bits - 1);
+ result >>= frac_bits;
+ }
+ n >>= 1;
+ if (!n)
+ break;
+ x *= x;
+ x += 1UL << (frac_bits - 1);
+ x >>= frac_bits;
+ }
+
+ return result;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ *
+ * a2 = a1 * e + a * (1 - e)
+ * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
+ * = a0 * e^2 + a * (1 - e) * (1 + e)
+ *
+ * a3 = a2 * e + a * (1 - e)
+ * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
+ * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
+ *
+ * ...
+ *
+ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
+ * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
+ * = a0 * e^n + a * (1 - e^n)
+ *
+ * [1] application of the geometric series:
+ *
+ * n 1 - x^(n+1)
+ * S_n := \Sum x^i = -------------
+ * i=0 1 - x
+ */
+static unsigned long
+calc_load_n(unsigned long load, unsigned long exp,
+ unsigned long active, unsigned int n)
+{
+
+ return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
+}
+
+/*
+ * NO_HZ can leave us missing all per-cpu ticks calling
+ * calc_load_account_active(), but since an idle CPU folds its delta into
+ * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
+ * in the pending idle delta if our idle period crossed a load cycle boundary.
+ *
+ * Once we've updated the global active value, we need to apply the exponential
+ * weights adjusted to the number of cycles missed.
+ */
+static void calc_global_nohz(unsigned long ticks)
+{
+ long delta, active, n;
+
+ if (time_before(jiffies, calc_load_update))
+ return;
+
+ /*
+ * If we crossed a calc_load_update boundary, make sure to fold
+ * any pending idle changes, the respective CPUs might have
+ * missed the tick driven calc_load_account_active() update
+ * due to NO_HZ.
+ */
+ delta = calc_load_fold_idle();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
+ /*
+ * If we were idle for multiple load cycles, apply them.
+ */
+ if (ticks >= LOAD_FREQ) {
+ n = ticks / LOAD_FREQ;
+
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
+
+ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+ calc_load_update += n * LOAD_FREQ;
+ }
+
+ /*
+ * Its possible the remainder of the above division also crosses
+ * a LOAD_FREQ period, the regular check in calc_global_load()
+ * which comes after this will take care of that.
+ *
+ * Consider us being 11 ticks before a cycle completion, and us
+ * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
+ * age us 4 cycles, and the test in calc_global_load() will
+ * pick up the final one.
+ */
+}
#else
static void calc_load_account_idle(struct rq *this_rq)
{
{
return 0;
}
+
+static void calc_global_nohz(unsigned long ticks)
+{
+}
#endif
/**
loads[2] = (avenrun[2] + offset) << shift;
}
-static unsigned long
-calc_load(unsigned long load, unsigned long exp, unsigned long active)
-{
- load *= exp;
- load += active * (FIXED_1 - exp);
- return load >> FSHIFT;
-}
-
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*/
-void calc_global_load(void)
+void calc_global_load(unsigned long ticks)
{
- unsigned long upd = calc_load_update + 10;
long active;
- if (time_before(jiffies, upd))
+ calc_global_nohz(ticks);
+
+ if (time_before(jiffies, calc_load_update + 10))
return;
active = atomic_long_read(&calc_load_tasks);
{
if (prev->se.on_rq)
update_rq_clock(rq);
- rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
}
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
- clear_tsk_need_resched(prev);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
put_prev_task(rq, prev);
next = pick_next_task(rq);
+ clear_tsk_need_resched(prev);
+ rq->skip_clock_update = 0;
if (likely(prev != next)) {
sched_info_switch(prev, next);
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ /*
+ * We're having a chicken and egg problem, even though we are
+ * holding rq->lock, the cpu isn't yet set to this cpu so the
+ * lockdep check in task_group() will fail.
+ *
+ * Similar case to sched_fork(). / Alternatively we could
+ * use task_rq_lock() here and obtain the other rq->lock.
+ *
+ * Silence PROVE_RCU
+ */
+ rcu_read_lock();
__set_task_cpu(idle, cpu);
+ rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
struct tvec_base *base = __get_cpu_var(tvec_bases);
unsigned long expires;
+ /*
+ * Pretend that there is no timer pending if the cpu is offline.
+ * Possible pending timers will be migrated later to an active cpu.
+ */
+ if (cpu_is_offline(smp_processor_id()))
+ return now + NEXT_TIMER_MAX_DELTA;
spin_lock(&base->lock);
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
{
jiffies_64 += ticks;
update_wall_time();
- calc_global_load();
+ calc_global_load(ticks);
}
#ifdef __ARCH_WANT_SYS_ALARM
return count;
}
+static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
+{
+ if (file->f_mode & FMODE_READ)
+ return seq_lseek(file, offset, origin);
+ else
+ return 0;
+}
+
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
.write = tracing_write_stub,
- .llseek = seq_lseek,
+ .llseek = tracing_seek,
.release = tracing_release,
};
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ put_user_ns(ns);
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
goto out_save;
}
- printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
+ printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n",
+ cpu, PTR_ERR(event));
return -1;
/* success path */
if (!fbc->counters)
return -ENOMEM;
#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static inline void *ptr_to_indirect(void *ptr)
+{
+ return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
+}
+
+static inline void *indirect_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
+}
+
static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
{
return root->gfp_mask & __GFP_BITS_MASK;
return -ENOMEM;
/* Increase the height. */
- node->slots[0] = radix_tree_indirect_to_ptr(root->rnode);
+ node->slots[0] = indirect_to_ptr(root->rnode);
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
newheight = root->height+1;
node->height = newheight;
node->count = 1;
- node = radix_tree_ptr_to_indirect(node);
+ node = ptr_to_indirect(node);
rcu_assign_pointer(root->rnode, node);
root->height = newheight;
} while (height > root->height);
return error;
}
- slot = radix_tree_indirect_to_ptr(root->rnode);
+ slot = indirect_to_ptr(root->rnode);
height = root->height;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
rcu_assign_pointer(node->slots[offset], slot);
node->count++;
} else
- rcu_assign_pointer(root->rnode,
- radix_tree_ptr_to_indirect(slot));
+ rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
}
/* Go a level down */
return NULL;
return is_slot ? (void *)&root->rnode : node;
}
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
height = node->height;
if (index > radix_tree_maxindex(height))
height--;
} while (height > 0);
- return is_slot ? (void *)slot:node;
+ return is_slot ? (void *)slot : indirect_to_ptr(node);
}
/**
height = root->height;
BUG_ON(index > radix_tree_maxindex(height));
- slot = radix_tree_indirect_to_ptr(root->rnode);
+ slot = indirect_to_ptr(root->rnode);
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- slot = radix_tree_indirect_to_ptr(root->rnode);
+ slot = indirect_to_ptr(root->rnode);
while (height > 0) {
int offset;
if (!radix_tree_is_indirect_ptr(node))
return (index == 0);
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
height = node->height;
if (index > radix_tree_maxindex(height))
}
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
- slot = radix_tree_indirect_to_ptr(root->rnode);
+ slot = indirect_to_ptr(root->rnode);
/*
* we fill the path from (root->height - 2) to 0, leaving the index at
results[0] = node;
return 1;
}
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference_raw(slot);
+ results[ret + nr_found] =
+ indirect_to_ptr(rcu_dereference_raw(slot));
nr_found++;
}
ret += nr_found;
results[0] = (void **)&root->rnode;
return 1;
}
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
results[0] = node;
return 1;
}
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference_raw(slot);
+ results[ret + nr_found] =
+ indirect_to_ptr(rcu_dereference_raw(slot));
nr_found++;
}
ret += nr_found;
results[0] = (void **)&root->rnode;
return 1;
}
- node = radix_tree_indirect_to_ptr(node);
+ node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
void *newptr;
BUG_ON(!radix_tree_is_indirect_ptr(to_free));
- to_free = radix_tree_indirect_to_ptr(to_free);
+ to_free = indirect_to_ptr(to_free);
/*
* The candidate node has more than one child, or its child
/*
* We don't need rcu_assign_pointer(), since we are simply
- * moving the node from one part of the tree to another. If
- * it was safe to dereference the old pointer to it
+ * moving the node from one part of the tree to another: if it
+ * was safe to dereference the old pointer to it
* (to_free->slots[0]), it will be safe to dereference the new
- * one (root->rnode).
+ * one (root->rnode) as far as dependent read barriers go.
*/
newptr = to_free->slots[0];
if (root->height > 1)
- newptr = radix_tree_ptr_to_indirect(newptr);
+ newptr = ptr_to_indirect(newptr);
root->rnode = newptr;
root->height--;
+
+ /*
+ * We have a dilemma here. The node's slot[0] must not be
+ * NULLed in case there are concurrent lookups expecting to
+ * find the item. However if this was a bottom-level node,
+ * then it may be subject to the slot pointer being visible
+ * to callers dereferencing it. If item corresponding to
+ * slot[0] is subsequently deleted, these callers would expect
+ * their slot to become empty sooner or later.
+ *
+ * For example, lockless pagecache will look up a slot, deref
+ * the page pointer, and if the page is 0 refcount it means it
+ * was concurrently deleted from pagecache so try the deref
+ * again. Fortunately there is already a requirement for logic
+ * to retry the entire slot lookup -- the indirect pointer
+ * problem (replacing direct root node with an indirect pointer
+ * also results in a stale slot). So tag the slot as indirect
+ * to force callers to retry.
+ */
+ if (root->height == 0)
+ *((unsigned long *)&to_free->slots[0]) |=
+ RADIX_TREE_INDIRECT_PTR;
+
radix_tree_node_free(to_free);
}
}
root->rnode = NULL;
goto out;
}
- slot = radix_tree_indirect_to_ptr(slot);
+ slot = indirect_to_ptr(slot);
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
radix_tree_node_free(to_free);
if (pathp->node->count) {
- if (pathp->node ==
- radix_tree_indirect_to_ptr(root->rnode))
+ if (pathp->node == indirect_to_ptr(root->rnode))
radix_tree_shrink(root);
goto out;
}
pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
if (pagep) {
page = radix_tree_deref_slot(pagep);
- if (unlikely(!page || page == RADIX_TREE_RETRY))
+ if (unlikely(!page))
+ goto out;
+ if (radix_tree_deref_retry(page))
goto repeat;
if (!page_cache_get_speculative(page))
goto repeat;
}
}
+out:
rcu_read_unlock();
return page;
page = radix_tree_deref_slot((void **)pages[i]);
if (unlikely(!page))
continue;
- /*
- * this can only trigger if nr_found == 1, making livelock
- * a non issue.
- */
- if (unlikely(page == RADIX_TREE_RETRY))
+ if (radix_tree_deref_retry(page)) {
+ if (ret)
+ start = pages[ret-1]->index;
goto restart;
+ }
if (!page_cache_get_speculative(page))
goto repeat;
page = radix_tree_deref_slot((void **)pages[i]);
if (unlikely(!page))
continue;
- /*
- * this can only trigger if nr_found == 1, making livelock
- * a non issue.
- */
- if (unlikely(page == RADIX_TREE_RETRY))
+ if (radix_tree_deref_retry(page))
goto restart;
if (page->mapping == NULL || page->index != index)
page = radix_tree_deref_slot((void **)pages[i]);
if (unlikely(!page))
continue;
- /*
- * this can only trigger if nr_found == 1, making livelock
- * a non issue.
- */
- if (unlikely(page == RADIX_TREE_RETRY))
+ if (radix_tree_deref_retry(page))
goto restart;
if (!page_cache_get_speculative(page))
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
+ /* Did it get truncated before we got the lock? */
+ if (!page->mapping)
+ goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset))
goto page_not_up_to_date_locked;
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- if (unlikely(anon_vma_prepare(vma)))
+ if (unlikely(anon_vma_prepare(vma))) {
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
+ }
copy_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
unlock_page(pagecache_page);
put_page(pagecache_page);
}
- unlock_page(page);
+ if (page != pagecache_page)
+ unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
*/
static inline unsigned long page_order(struct page *page)
{
- VM_BUG_ON(!PageBuddy(page));
+ /* PageBuddy() must be checked by the caller */
return page_private(page);
}
long memblock_add(u64 base, u64 size)
{
struct memblock_region *_rgn = &memblock.memory;
+ u64 end = base + size;
+
+ base = PAGE_ALIGN(base);
+ size = (end & PAGE_MASK) - base;
/* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
if (base == 0)
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
- spinlock_t lock; /* for from, to, moving_task */
+ spinlock_t lock; /* for from, to */
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long precharge;
unsigned long moved_charge;
unsigned long moved_swap;
struct task_struct *moving_task; /* a task moving charges */
+ struct mm_struct *mm;
wait_queue_head_t waitq; /* a waitq for other context */
} mc = {
.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
rcu_read_lock();
p = rcu_dereference(mm->owner);
- VM_BUG_ON(!p);
/*
- * because we don't have task_lock(), "p" can exit while
- * we're here. In that case, "mem" can point to root
- * cgroup but never be NULL. (and task_struct itself is freed
- * by RCU, cgroup itself is RCU safe.) Then, we have small
- * risk here to get wrong cgroup. But such kind of mis-account
- * by race always happens because we don't have cgroup_mutex().
- * It's overkill and we allow that small race, here.
+ * Because we don't have task_lock(), "p" can exit.
+ * In that case, "mem" can point to root or p can be NULL with
+ * race with swapoff. Then, we have small risk of mis-accouning.
+ * But such kind of mis-account by race always happens because
+ * we don't have cgroup_mutex(). It's overkill and we allo that
+ * small race, here.
+ * (*) swapoff at el will charge against mm-struct not against
+ * task-struct. So, mm->owner can be NULL.
*/
mem = mem_cgroup_from_task(p);
- VM_BUG_ON(!mem);
- if (mem_cgroup_is_root(mem)) {
+ if (!mem || mem_cgroup_is_root(mem)) {
rcu_read_unlock();
goto done;
}
unsigned long precharge;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ /* We've already held the mmap_sem */
for (vma = mm->mmap; vma; vma = vma->vm_next) {
struct mm_walk mem_cgroup_count_precharge_walk = {
.pmd_entry = mem_cgroup_count_precharge_pte_range,
walk_page_range(vma->vm_start, vma->vm_end,
&mem_cgroup_count_precharge_walk);
}
- up_read(&mm->mmap_sem);
precharge = mc.precharge;
mc.precharge = 0;
mc.moved_swap = 0;
}
+ if (mc.mm) {
+ up_read(&mc.mm->mmap_sem);
+ mmput(mc.mm);
+ }
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
- mc.moving_task = NULL;
spin_unlock(&mc.lock);
+ mc.moving_task = NULL;
+ mc.mm = NULL;
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
return 0;
/* We move charges only when we move a owner of the mm */
if (mm->owner == p) {
+ /*
+ * We do all the move charge works under one mmap_sem to
+ * avoid deadlock with down_write(&mmap_sem)
+ * -> try_charge() -> if (mc.moving_task) -> sleep.
+ */
+ down_read(&mm->mmap_sem);
+
VM_BUG_ON(mc.from);
VM_BUG_ON(mc.to);
VM_BUG_ON(mc.precharge);
VM_BUG_ON(mc.moved_charge);
VM_BUG_ON(mc.moved_swap);
VM_BUG_ON(mc.moving_task);
+ VM_BUG_ON(mc.mm);
+
spin_lock(&mc.lock);
mc.from = from;
mc.to = mem;
mc.precharge = 0;
mc.moved_charge = 0;
mc.moved_swap = 0;
- mc.moving_task = current;
spin_unlock(&mc.lock);
+ mc.moving_task = current;
+ mc.mm = mm;
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
- }
- mmput(mm);
+ /* We call up_read() and mmput() in clear_mc(). */
+ } else
+ mmput(mm);
}
return ret;
}
struct vm_area_struct *vma;
lru_add_drain_all();
- down_read(&mm->mmap_sem);
+ /* We've already held the mmap_sem */
for (vma = mm->mmap; vma; vma = vma->vm_next) {
int ret;
struct mm_walk mem_cgroup_move_charge_walk = {
*/
break;
}
- up_read(&mm->mmap_sem);
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct task_struct *p,
bool threadgroup)
{
- struct mm_struct *mm;
-
- if (!mc.to)
+ if (!mc.mm)
/* no need to move charge */
return;
- mm = get_task_mm(p);
- if (mm) {
- mem_cgroup_move_charge(mm);
- mmput(mm);
- }
+ mem_cgroup_move_charge(mc.mm);
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
* Scanning pfn is much easier than scanning lru list.
* Scan pfn from start to end and Find LRU page.
*/
-int scan_lru_pages(unsigned long start, unsigned long end)
+unsigned long scan_lru_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
(void)first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes,
&zone);
- return zone->node;
+ return zone ? zone->node : numa_node_id();
}
default:
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
+ int ret;
struct vm_area_struct *vma;
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
vma->vm_ops = &special_mapping_vmops;
vma->vm_private_data = pages;
- if (unlikely(insert_vm_struct(mm, vma))) {
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
+ ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
+ if (ret)
+ goto out;
+
+ ret = insert_vm_struct(mm, vma);
+ if (ret)
+ goto out;
mm->total_vm += len >> PAGE_SHIFT;
perf_event_mmap(vma);
return 0;
+
+out:
+ kmem_cache_free(vm_area_cachep, vma);
+ return ret;
}
static DEFINE_MUTEX(mm_all_locks_mutex);
mmu_notifier_invalidate_range_end(mm, start, end);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+ perf_event_mmap(vma);
return 0;
fail:
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
- perf_event_mmap(vma);
nstart = tmp;
if (nstart < prev->vm_end)
mm->mmap = vma->vm_next;
delete_vma_from_mm(vma);
delete_vma(mm, vma);
+ cond_resched();
}
kleave("");
* only be modified with pm_mutex held, unless the suspend/hibernate code is
* guaranteed not to run in parallel with that modification).
*/
-void set_gfp_allowed_mask(gfp_t mask)
+
+static gfp_t saved_gfp_mask;
+
+void pm_restore_gfp_mask(void)
{
WARN_ON(!mutex_is_locked(&pm_mutex));
- gfp_allowed_mask = mask;
+ if (saved_gfp_mask) {
+ gfp_allowed_mask = saved_gfp_mask;
+ saved_gfp_mask = 0;
+ }
}
-gfp_t clear_gfp_allowed_mask(gfp_t mask)
+void pm_restrict_gfp_mask(void)
{
- gfp_t ret = gfp_allowed_mask;
-
WARN_ON(!mutex_is_locked(&pm_mutex));
- gfp_allowed_mask &= ~mask;
- return ret;
+ WARN_ON(saved_gfp_mask);
+ saved_gfp_mask = gfp_allowed_mask;
+ gfp_allowed_mask &= ~GFP_IOFS;
}
#endif /* CONFIG_PM_SLEEP */
* so it's less likely to be used soon and more likely to be merged
* as a higher order page
*/
- if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
struct page *higher_page, *higher_buddy;
combined_idx = __find_combined_index(page_idx, order);
higher_page = page + combined_idx - page_idx;
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
+/*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+ atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
/*
* Purges all lazily-freed vmap areas.
*
struct net_device *dev = skb->dev;
struct vlan_rx_stats *rx_stats;
+ if (unlikely(!is_vlan_dev(dev)))
+ return 0;
+
skb->dev = vlan_dev_info(dev)->real_dev;
netif_nit_deliver(skb);
ax25_cb *ax25;
int err = 0;
+ memset(fsa, 0, sizeof(fsa));
lock_sock(sk);
ax25 = ax25_sk(sk);
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
- fsa->fsa_ax25.sax25_ndigis = 0;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
struct l2cap_chan_list *list = &conn->chan_list;
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
- struct sock *parent, *uninitialized_var(sk);
+ struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
L2CAP_INFO_REQ, sizeof(info), &info);
}
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
+ if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
result == L2CAP_CR_SUCCESS) {
u8 buf[128];
l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
ip6h = ipv6_hdr(skb);
*(__force __be32 *)ip6h = htonl(0x60000000);
- ip6h->payload_len = 8 + sizeof(*mldq);
+ ip6h->payload_len = htons(8 + sizeof(*mldq));
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
struct proc_dir_entry *bcm_proc_read;
- char procname [9]; /* pointer printed in ASCII with \0 */
+ char procname [20]; /* pointer printed in ASCII with \0 */
};
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
compat_size_t len;
if (get_user(len, &uiov32->iov_len) ||
- get_user(buf, &uiov32->iov_base)) {
- tot_len = -EFAULT;
- break;
- }
+ get_user(buf, &uiov32->iov_base))
+ return -EFAULT;
+
+ if (len > INT_MAX - tot_len)
+ len = INT_MAX - tot_len;
+
tot_len += len;
kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len;
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_IP_CSUM) &&
+ return ((features & NETIF_F_NO_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_IPV6_CSUM) &&
+ ((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
ncls:
#endif
+ /* If we got this far with a hardware accelerated VLAN tag, it means
+ * that we were put in promiscuous mode but nobody is interested in
+ * this vid. Drop the packet now to prevent it from getting propagated
+ * to other parts of the stack that won't know how to deal with packets
+ * tagged in this manner.
+ */
+ if (unlikely(vlan_tx_tag_present(skb)))
+ goto bypass;
+
/* Handle special case of bridge or macvlan */
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
}
}
+bypass:
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
static struct notifier_block dst_dev_notifier = {
.notifier_call = dst_dev_event,
+ .priority = -10, /* must be called after other network notifiers */
};
void __init dst_init(void)
*/
unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
- struct sock_filter *fentry; /* We walk down these */
void *ptr;
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
+ unsigned long memvalid = 0;
u32 tmp;
int k;
int pc;
+ BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
/*
* Process array of filter instructions.
*/
for (pc = 0; pc < flen; pc++) {
- fentry = &filter[pc];
+ const struct sock_filter *fentry = &filter[pc];
+ u32 f_k = fentry->k;
switch (fentry->code) {
case BPF_S_ALU_ADD_X:
A += X;
continue;
case BPF_S_ALU_ADD_K:
- A += fentry->k;
+ A += f_k;
continue;
case BPF_S_ALU_SUB_X:
A -= X;
continue;
case BPF_S_ALU_SUB_K:
- A -= fentry->k;
+ A -= f_k;
continue;
case BPF_S_ALU_MUL_X:
A *= X;
continue;
case BPF_S_ALU_MUL_K:
- A *= fentry->k;
+ A *= f_k;
continue;
case BPF_S_ALU_DIV_X:
if (X == 0)
A /= X;
continue;
case BPF_S_ALU_DIV_K:
- A /= fentry->k;
+ A /= f_k;
continue;
case BPF_S_ALU_AND_X:
A &= X;
continue;
case BPF_S_ALU_AND_K:
- A &= fentry->k;
+ A &= f_k;
continue;
case BPF_S_ALU_OR_X:
A |= X;
continue;
case BPF_S_ALU_OR_K:
- A |= fentry->k;
+ A |= f_k;
continue;
case BPF_S_ALU_LSH_X:
A <<= X;
continue;
case BPF_S_ALU_LSH_K:
- A <<= fentry->k;
+ A <<= f_k;
continue;
case BPF_S_ALU_RSH_X:
A >>= X;
continue;
case BPF_S_ALU_RSH_K:
- A >>= fentry->k;
+ A >>= f_k;
continue;
case BPF_S_ALU_NEG:
A = -A;
continue;
case BPF_S_JMP_JA:
- pc += fentry->k;
+ pc += f_k;
continue;
case BPF_S_JMP_JGT_K:
- pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A > f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGE_K:
- pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A >= f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JEQ_K:
- pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A == f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JSET_K:
- pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+ pc += (A & f_k) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGT_X:
pc += (A > X) ? fentry->jt : fentry->jf;
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_LD_W_ABS:
- k = fentry->k;
+ k = f_k;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
}
break;
case BPF_S_LD_H_ABS:
- k = fentry->k;
+ k = f_k;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
}
break;
case BPF_S_LD_B_ABS:
- k = fentry->k;
+ k = f_k;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
if (ptr != NULL) {
X = skb->len;
continue;
case BPF_S_LD_W_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_w;
case BPF_S_LD_H_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_h;
case BPF_S_LD_B_IND:
- k = X + fentry->k;
+ k = X + f_k;
goto load_b;
case BPF_S_LDX_B_MSH:
- ptr = load_pointer(skb, fentry->k, 1, &tmp);
+ ptr = load_pointer(skb, f_k, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
case BPF_S_LD_IMM:
- A = fentry->k;
+ A = f_k;
continue;
case BPF_S_LDX_IMM:
- X = fentry->k;
+ X = f_k;
continue;
case BPF_S_LD_MEM:
- A = mem[fentry->k];
+ A = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_S_LDX_MEM:
- X = mem[fentry->k];
+ X = (memvalid & (1UL << f_k)) ?
+ mem[f_k] : 0;
continue;
case BPF_S_MISC_TAX:
X = A;
A = X;
continue;
case BPF_S_RET_K:
- return fentry->k;
+ return f_k;
case BPF_S_RET_A:
return A;
case BPF_S_ST:
- mem[fentry->k] = A;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = A;
continue;
case BPF_S_STX:
- mem[fentry->k] = X;
+ memvalid |= 1UL << f_k;
+ mem[f_k] = X;
continue;
default:
WARN_ON(1);
EXPORT_SYMBOL(sk_chk_filter);
/**
- * sk_filter_rcu_release: Release a socket filter by rcu_head
+ * sk_filter_release_rcu - Release a socket filter by rcu_head
* @rcu: rcu_head that contains the sk_filter to free
*/
-static void sk_filter_rcu_release(struct rcu_head *rcu)
+void sk_filter_release_rcu(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
- sk_filter_release(fp);
-}
-
-static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
-{
- unsigned int size = sk_filter_len(fp);
-
- atomic_sub(size, &sk->sk_omem_alloc);
- call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+ kfree(fp);
}
+EXPORT_SYMBOL(sk_filter_release_rcu);
/**
* sk_attach_filter - attach a socket filter
rcu_read_unlock_bh();
if (old_fp)
- sk_filter_delayed_uncharge(sk, old_fp);
+ sk_filter_uncharge(sk, old_fp);
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
- sk_filter_delayed_uncharge(sk, filter);
+ sk_filter_uncharge(sk, filter);
ret = 0;
}
rcu_read_unlock_bh();
* in any case.
*/
-long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, ct;
- long err;
+ int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
- err += iov[ct].iov_len;
- /*
- * Goal is not to verify user data, but to prevent returning
- * negative value, which is interpreted as errno.
- * Overflow is still possible, but it is harmless.
- */
- if (err < 0)
- return -EMSGSIZE;
+ size_t len = iov[ct].iov_len;
+
+ if (len > INT_MAX - err) {
+ len = INT_MAX - err;
+ iov[ct].iov_len = len;
+ }
+ err += len;
}
return err;
struct phy_device *phydev;
unsigned int type;
- skb_push(skb, ETH_HLEN);
+ if (skb_headroom(skb) < ETH_HLEN)
+ return false;
+ __skb_push(skb, ETH_HLEN);
type = classify(skb);
- skb_pull(skb, ETH_HLEN);
+ __skb_pull(skb, ETH_HLEN);
switch (type) {
case PTP_CLASS_V1_IPV4:
if (r_len > sizeof(struct linkinfo_dn))
r_len = sizeof(struct linkinfo_dn);
+ memset(&link, 0, sizeof(link));
+
switch(sock->state) {
case SS_CONNECTING:
link.idn_linkstate = LL_CONNECTING;
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <linux/stat.h>
#endif
#ifdef CONFIG_ECONET_AUNUDP
struct msghdr udpmsg;
- struct iovec iov[msg->msg_iovlen+1];
+ struct iovec iov[2];
struct aunhdr ah;
struct sockaddr_in udpdest;
__kernel_size_t size;
- int i;
mm_segment_t oldfs;
+ char *userbuf;
#endif
/*
mutex_lock(&econet_mutex);
- if (saddr == NULL) {
- struct econet_sock *eo = ec_sk(sk);
-
- addr.station = eo->station;
- addr.net = eo->net;
- port = eo->port;
- cb = eo->cb;
- } else {
- if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
- mutex_unlock(&econet_mutex);
- return -EINVAL;
- }
- addr.station = saddr->addr.station;
- addr.net = saddr->addr.net;
- port = saddr->port;
- cb = saddr->cb;
- }
+ if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+ mutex_unlock(&econet_mutex);
+ return -EINVAL;
+ }
+ addr.station = saddr->addr.station;
+ addr.net = saddr->addr.net;
+ port = saddr->port;
+ cb = saddr->cb;
/* Look for a device with the right network number. */
dev = net2dev_map[addr.net];
}
}
- if (len + 15 > dev->mtu) {
- mutex_unlock(&econet_mutex);
- return -EMSGSIZE;
- }
-
if (dev->type == ARPHRD_ECONET) {
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
int res;
+ if (len + 15 > dev->mtu) {
+ mutex_unlock(&econet_mutex);
+ return -EMSGSIZE;
+ }
+
dev_hold(dev);
skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
eb = (struct ec_cb *)&skb->cb;
- /* BUG: saddr may be NULL */
eb->cookie = saddr->cookie;
eb->sec = *saddr;
eb->sent = ec_tx_done;
return -ENETDOWN; /* No socket - can't send */
}
+ if (len > 32768) {
+ err = -E2BIG;
+ goto error;
+ }
+
/* Make up a UDP datagram and hand it off to some higher intellect. */
memset(&udpdest, 0, sizeof(udpdest));
/* tack our header on the front of the iovec */
size = sizeof(struct aunhdr);
- /*
- * XXX: that is b0rken. We can't mix userland and kernel pointers
- * in iovec, since on a lot of platforms copy_from_user() will
- * *not* work with the kernel and userland ones at the same time,
- * regardless of what we do with set_fs(). And we are talking about
- * econet-over-ethernet here, so "it's only ARM anyway" doesn't
- * apply. Any suggestions on fixing that code? -- AV
- */
iov[0].iov_base = (void *)&ah;
iov[0].iov_len = size;
- for (i = 0; i < msg->msg_iovlen; i++) {
- void __user *base = msg->msg_iov[i].iov_base;
- size_t iov_len = msg->msg_iov[i].iov_len;
- /* Check it now since we switch to KERNEL_DS later. */
- if (!access_ok(VERIFY_READ, base, iov_len)) {
- mutex_unlock(&econet_mutex);
- return -EFAULT;
- }
- iov[i+1].iov_base = base;
- iov[i+1].iov_len = iov_len;
- size += iov_len;
+
+ userbuf = vmalloc(len);
+ if (userbuf == NULL) {
+ err = -ENOMEM;
+ goto error;
}
+ iov[1].iov_base = userbuf;
+ iov[1].iov_len = len;
+ err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
+ if (err)
+ goto error_free_buf;
+
/* Get a skbuff (no data, just holds our cb information) */
if ((skb = sock_alloc_send_skb(sk, 0,
msg->msg_flags & MSG_DONTWAIT,
- &err)) == NULL) {
- mutex_unlock(&econet_mutex);
- return err;
- }
+ &err)) == NULL)
+ goto error_free_buf;
eb = (struct ec_cb *)&skb->cb;
udpmsg.msg_name = (void *)&udpdest;
udpmsg.msg_namelen = sizeof(udpdest);
udpmsg.msg_iov = &iov[0];
- udpmsg.msg_iovlen = msg->msg_iovlen + 1;
+ udpmsg.msg_iovlen = 2;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
err = sock_sendmsg(udpsock, &udpmsg, size);
set_fs(oldfs);
+
+error_free_buf:
+ vfree(userbuf);
#else
err = -EPROTOTYPE;
#endif
+ error:
mutex_unlock(&econet_mutex);
return err;
err = 0;
switch (cmd) {
case SIOCSIFADDR:
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
edev = dev->ec_ptr;
if (edev == NULL) {
/* Magic up a new one. */
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
+ struct dst_entry *dst = skb_dst(skb);
+ struct ec_device *edev = NULL;
struct sock *sk = NULL;
struct sk_buff *newskb;
- struct ec_device *edev = skb->dev->ec_ptr;
+
+ if (dst)
+ edev = dst->dev->ec_ptr;
if (! edev)
goto bad;
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
* know which interface is going to be used */
- if (val < 8 || val > MAX_TCP_WINDOW) {
+ if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
err = -EINVAL;
break;
}
!icsk->icsk_backoff)
break;
+ if (sock_owned_by_user(sk))
+ break;
+
icsk->icsk_backoff--;
inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
icsk->icsk_backoff;
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
- } else if (sock_owned_by_user(sk)) {
- /* RTO revert clocked out retransmission,
- * but socket is locked. Will defer. */
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- HZ/20, TCP_RTO_MAX);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
- if (init_rcv_wnd &&
- (*rcv_wnd > init_rcv_wnd * mss))
- *rcv_wnd = init_rcv_wnd * mss;
- else if (*rcv_wnd > init_cwnd * mss)
- *rcv_wnd = init_cwnd * mss;
+ if (init_rcv_wnd)
+ *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+ else
+ *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
}
/* Set the clamp no higher than max representable value */
*/
static u8 tcp_cookie_size_check(u8 desired)
{
- if (desired > 0) {
+ int cookie_size;
+
+ if (desired > 0)
/* previously specified */
return desired;
- }
- if (sysctl_tcp_cookie_size <= 0) {
+
+ cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
+ if (cookie_size <= 0)
/* no default specified */
return 0;
- }
- if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+
+ if (cookie_size <= TCP_COOKIE_MIN)
/* value too small, specify minimum */
return TCP_COOKIE_MIN;
- }
- if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+
+ if (cookie_size >= TCP_COOKIE_MAX)
/* value too large, specify maximum */
return TCP_COOKIE_MAX;
- }
- if (0x1 & sysctl_tcp_cookie_size) {
+
+ if (cookie_size & 1)
/* 8-bit multiple, illegal, fix it */
- return (u8)(sysctl_tcp_cookie_size + 0x1);
- }
- return (u8)sysctl_tcp_cookie_size;
+ cookie_size++;
+
+ return (u8)cookie_size;
}
/* Write previously computed TCP options to the packet.
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
+ int win_divisor;
if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now;
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
- if (sysctl_tcp_tso_win_divisor) {
+ win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+ if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
/* If at least some fraction of a window is available,
* just use it.
*/
- chunk /= sysctl_tcp_tso_win_divisor;
+ chunk /= win_divisor;
if (limit >= chunk)
goto send_now;
} else {
IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
/* Make sure the string is null-terminated */
- fp[n+value_len] = 0x00;
+ if (n + value_len < skb->len)
+ fp[n + value_len] = 0x00;
IRDA_DEBUG(4, "Got string %s\n", fp+n);
/* Will truncate to IAS_MAX_STRING bytes */
p.pi = pi; /* In case handler needs to know */
p.pl = buf[1]; /* Extract length of value */
+ if (p.pl > 32)
+ p.pl = 32;
IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
p.pi, p.pl);
(__u8) str[0], (__u8) str[1]);
/* Null terminate string */
- str[p.pl+1] = '\0';
+ str[p.pl] = '\0';
p.pv.c = str; /* Handler will need to take a copy */
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP over IP");
MODULE_VERSION("1.0");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
goto out;
rc = -ENODEV;
rtnl_lock();
+ rcu_read_lock();
if (sk->sk_bound_dev_if) {
- llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
if (!addr->sllc_arphrd)
addr->sllc_arphrd = llc->dev->type;
!llc_mac_match(addr->sllc_mac,
llc->dev->dev_addr)) {
rc = -EINVAL;
- dev_put(llc->dev);
llc->dev = NULL;
}
}
} else
llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
addr->sllc_mac);
+ rcu_read_unlock();
rtnl_unlock();
if (!llc->dev)
goto out;
struct sta_info *sta,
struct station_parameters *params)
{
+ unsigned long flags;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[local->oper_channel->band];
- spin_lock_bh(&sta->lock);
+ spin_lock_irqsave(&sta->flaglock, flags);
mask = params->sta_flags_mask;
set = params->sta_flags_set;
if (set & BIT(NL80211_STA_FLAG_MFP))
sta->flags |= WLAN_STA_MFP;
}
- spin_unlock_bh(&sta->lock);
+ spin_unlock_irqrestore(&sta->flaglock, flags);
/*
* cfg80211 validates this (1-2007) and allows setting the AID
if (!sta)
return NULL;
+ sta->last_rx = jiffies;
set_sta_flags(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
chan = scan_chan;
channel_type = NL80211_CHAN_NO_HT;
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
- } else if (local->tmp_channel) {
+ } else if (local->tmp_channel &&
+ local->oper_channel != local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
enum plink_event event;
enum plink_frame_type ftype;
size_t baselen;
- bool deactivated;
+ bool deactivated, matches_local = true;
u8 ie_len;
u8 *baseaddr;
__le16 plid, llid, reason;
/* Now we will figure out the appropriate event... */
event = PLINK_UNDEFINED;
if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
+ matches_local = false;
switch (ftype) {
case PLINK_OPEN:
event = OPN_RJCT;
/* avoid warning */
break;
}
- spin_lock_bh(&sta->lock);
+ }
+
+ if (!sta && !matches_local) {
+ rcu_read_unlock();
+ reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ llid = 0;
+ mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid,
+ plid, reason);
+ return;
} else if (!sta) {
/* ftype == PLINK_OPEN */
u32 rates;
}
event = OPN_ACPT;
spin_lock_bh(&sta->lock);
- } else {
+ } else if (matches_local) {
spin_lock_bh(&sta->lock);
switch (ftype) {
case PLINK_OPEN:
rcu_read_unlock();
return;
}
+ } else {
+ spin_lock_bh(&sta->lock);
}
mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
mod_timer(&ifmgd->timer, timeout);
}
-static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
+void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
{
if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
return;
round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
}
+void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
+ mod_timer(&sdata->u.mgd.conn_mon_timer,
+ round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
+
+ ifmgd->probe_send_count = 0;
+}
+
static int ecw2cw(int ecw)
{
return (1 << ecw) - 1;
if (is_multicast_ether_addr(hdr->addr1))
return;
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- return;
-
- mod_timer(&sdata->u.mgd.conn_mon_timer,
- round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
+ ieee80211_sta_reset_conn_monitor(sdata);
}
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
+ u8 *dst = ifmgd->associated->bssid;
+ u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+
+ /*
+ * Try sending broadcast probe requests for the last three
+ * probe requests after the first ones failed since some
+ * buggy APs only support broadcast probe requests.
+ */
+ if (ifmgd->probe_send_count >= unicast_limit)
+ dst = NULL;
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
- ssid + 2, ssid[1], NULL, 0);
+ ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
rates = 0;
basic_rates = 0;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[wk->chan->band];
for (i = 0; i < elems.supp_rates_len; i++) {
int rate = (elems.supp_rates[i] & 0x7f) * 5;
}
}
- sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ sta->sta.supp_rates[wk->chan->band] = rates;
sdata->vif.bss_conf.basic_rates = basic_rates;
/* cf. IEEE 802.11 9.2.12 */
- if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+ if (wk->chan->band == IEEE80211_BAND_2GHZ &&
have_higher_than_11mbit)
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
else
* Also start the timer that will detect beacon loss.
*/
ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
return true;
}
* we have or will be receiving any beacons or data, so let's
* schedule the timers again, just in case.
*/
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
ifmgd->last_beacon_signal = rx_status->signal;
if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
- ifmgd->ave_beacon_signal = rx_status->signal;
+ ifmgd->ave_beacon_signal = rx_status->signal * 16;
ifmgd->last_cqm_event_signal = 0;
} else {
ifmgd->ave_beacon_signal =
* Push the beacon loss detection into the future since
* we are processing a beacon from the AP just now.
*/
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
local->offchannel_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
del_timer_sync(&local->dynamic_ps_timer);
+ del_timer_sync(&ifmgd->bcn_mon_timer);
+ del_timer_sync(&ifmgd->conn_mon_timer);
+
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
+
+ ieee80211_sta_reset_beacon_monitor(sdata);
+ ieee80211_sta_reset_conn_monitor(sdata);
}
void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
* if needed.
*/
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ /* Skip invalid rates */
+ if (info->control.rates[i].idx < 0)
+ break;
/* Rate masking supports only legacy rates for now */
if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
continue;
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- if (!info->status.ampdu_len) {
- info->status.ampdu_ack_len = 1;
+ if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
+ info->status.ampdu_ack_len =
+ (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
info->status.ampdu_len = 1;
}
group = minstrel_ht_get_group_idx(&ar[i]);
rate = &mi->groups[group].rates[ar[i].idx % 8];
- if (last && (info->flags & IEEE80211_TX_STAT_ACK))
+ if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += ar[i].count * info->status.ampdu_len;
fwd_skb = skb_copy(skb, GFP_ATOMIC);
- if (!fwd_skb && net_ratelimit())
+ if (!fwd_skb && net_ratelimit()) {
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
sdata->name);
+ goto out;
+ }
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
}
}
+ out:
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->dev->flags & IFF_PROMISC)
return RX_CONTINUE;
info->control.vif = &sta->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
IEEE80211_TX_INTFL_RETRANSMISSION;
+ info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
sta->tx_filtered_count++;
int nh_pos, h_pos;
struct sta_info *sta = NULL;
u32 sta_flags = 0;
+ struct sk_buff *tmp_skb;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
goto fail;
}
- nh_pos = skb_network_header(skb) - skb->data;
- h_pos = skb_transport_header(skb) - skb->data;
-
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
ethertype = (skb->data[12] << 8) | skb->data[13];
goto fail;
}
+ /*
+ * If the skb is shared we need to obtain our own copy.
+ */
+ if (skb_shared(skb)) {
+ tmp_skb = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ kfree_skb(tmp_skb);
+
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto fail;
+ }
+ }
+
hdr.frame_control = fc;
hdr.duration_id = 0;
hdr.seq_ctrl = 0;
encaps_len = 0;
}
+ nh_pos = skb_network_header(skb) - skb->data;
+ h_pos = skb_transport_header(skb) - skb->data;
+
skb_pull(skb, skip_header_bytes);
nh_pos -= skip_header_bytes;
h_pos -= skip_header_bytes;
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
}
if (hash && nulls)
switch (info->mode) {
case SECMARK_MODE_SEL:
err = checkentry_selinux(info);
- if (err <= 0)
+ if (err)
return err;
break;
err = -EINVAL;
vnet_hdr_len = sizeof(vnet_hdr);
- if ((len -= vnet_hdr_len) < 0)
+ if (len < vnet_hdr_len)
goto out_free;
+ len -= vnet_hdr_len;
+
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
- strlcpy(uaddr->sa_data, dev->name, 15);
+ strncpy(uaddr->sa_data, dev->name, 14);
else
memset(uaddr->sa_data, 0, 14);
rcu_read_unlock();
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
+ sll->sll_pkttype = 0;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
if (dev) {
goto out;
}
- if (args->nr_local > (u64)UINT_MAX) {
+ if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out;
}
.populate = cgrp_populate,
#ifdef CONFIG_NET_CLS_CGROUP
.subsys_id = net_cls_subsys_id,
-#else
-#define net_cls_subsys_id net_cls_subsys.subsys_id
#endif
.module = THIS_MODULE,
};
struct iovec iov;
int fput_needed;
+ if (len > INT_MAX)
+ len = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
int err, err2;
int fput_needed;
+ if (size > INT_MAX)
+ size = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
- task->tk_action = call_refresh;
+ task->tk_action = call_reserve;
goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
spin_lock(&svc_xprt_class_lock);
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
struct svc_xprt *newxprt;
+ unsigned short newport;
if (strcmp(xprt_name, xcl->xcl_name))
continue;
spin_lock_bh(&serv->sv_lock);
list_add(&newxprt->xpt_list, &serv->sv_permsocks);
spin_unlock_bh(&serv->sv_lock);
+ newport = svc_xprt_local_port(newxprt);
clear_bit(XPT_BUSY, &newxprt->xpt_flags);
- return svc_xprt_local_port(newxprt);
+ return newport;
}
err:
spin_unlock(&svc_xprt_class_lock);
{
BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
xprt->xpt_pool = NULL;
+ /* As soon as we clear busy, the xprt could be closed and
+ * 'put', so we need a reference to call svc_xprt_enqueue with:
+ */
+ svc_xprt_get(xprt);
clear_bit(XPT_BUSY, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
+ svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_received);
sock_wfree(skb);
}
+#define MAX_RECURSION_LEVEL 4
+
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
+ unsigned char max_level = 0;
+ int unix_sock_count = 0;
+
+ for (i = scm->fp->count - 1; i >= 0; i--) {
+ struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+
+ if (sk) {
+ unix_sock_count++;
+ max_level = max(max_level,
+ unix_sk(sk)->recursion_level);
+ }
+ }
+ if (unlikely(max_level > MAX_RECURSION_LEVEL))
+ return -ETOOMANYREFS;
/*
* Need to duplicate file references for the sake of garbage
if (!UNIXCB(skb).fp)
return -ENOMEM;
- for (i = scm->fp->count-1; i >= 0; i--)
- unix_inflight(scm->fp->fp[i]);
- return 0;
+ if (unix_sock_count) {
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->fp[i]);
+ }
+ return max_level;
}
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
+ int max_level;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
goto out;
err = unix_scm_to_skb(siocb->scm, skb, true);
- if (err)
+ if (err < 0)
goto out_free;
+ max_level = err + 1;
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
}
skb_queue_tail(&other->sk_receive_queue, skb);
+ if (max_level > unix_sk(other)->recursion_level)
+ unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
int sent = 0;
struct scm_cookie tmp_scm;
bool fds_sent = false;
+ int max_level;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
- if (err) {
+ if (err < 0) {
kfree_skb(skb);
goto out_err;
}
+ max_level = err + 1;
fds_sent = true;
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
goto pipe_err_free;
skb_queue_tail(&other->sk_receive_queue, skb);
+ if (max_level > unix_sk(other)->recursion_level)
+ unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, size);
sent += size;
unix_state_lock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
+ unix_sk(sk)->recursion_level = 0;
if (copied >= target)
goto unlock;
unsigned int unix_tot_inflight;
-static struct sock *unix_get_socket(struct file *filp)
+struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_path.dentry->d_inode;
}
static bool gc_in_progress = false;
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
+ /*
+ * If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ */
+ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
+ unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
return chan;
}
+static bool can_beacon_sec_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *sec_chan;
+ int diff;
+
+ switch (channel_type) {
+ case NL80211_CHAN_HT40PLUS:
+ diff = 20;
+ case NL80211_CHAN_HT40MINUS:
+ diff = -20;
+ default:
+ return false;
+ }
+
+ sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
+ if (!sec_chan)
+ return false;
+
+ /* we'll need a DFS capability later */
+ if (sec_chan->flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_PASSIVE_SCAN |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_RADAR))
+ return false;
+
+ return true;
+}
+
int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, int freq,
enum nl80211_channel_type channel_type)
if (!chan)
return -EINVAL;
+ /* Both channels should be able to initiate communication */
+ if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
+ wdev->iftype == NL80211_IFTYPE_AP ||
+ wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
+ wdev->iftype == NL80211_IFTYPE_MESH_POINT)) {
+ switch (channel_type) {
+ case NL80211_CHAN_HT40PLUS:
+ case NL80211_CHAN_HT40MINUS:
+ if (!can_beacon_sec_chan(&rdev->wiphy, chan,
+ channel_type)) {
+ printk(KERN_DEBUG
+ "cfg80211: Secondary channel not "
+ "allowed to initiate communication\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
result = rdev->ops->set_channel(&rdev->wiphy,
wdev ? wdev->netdev : NULL,
chan, channel_type);
result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev);
if (result)
- goto unlock;
+ goto unlock_rtnl;
result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
- unlock:
+ dev_put(netdev);
+ cfg80211_unlock_rdev(rdev);
+ unlock_rtnl:
rtnl_unlock();
return result;
err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
- goto unlock_rdev;
+ goto unlock_rtnl;
wdev = dev->ieee80211_ptr;
err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
threshold, hysteresis);
-unlock_rdev:
+ unlock_rdev:
cfg80211_unlock_rdev(rdev);
dev_put(dev);
+ unlock_rtnl:
rtnl_unlock();
return err;
return 0;
return -EALREADY;
}
- return REG_INTERSECT;
+ return 0;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
if (regdom_changes(pending_request->alpha2))
bss = container_of(pub, struct cfg80211_internal_bss, pub);
spin_lock_bh(&dev->bss_lock);
+ if (!list_empty(&bss->list)) {
+ list_del_init(&bss->list);
+ dev->bss_generation++;
+ rb_erase(&bss->rbn, &dev->bss_tree);
- list_del(&bss->list);
- dev->bss_generation++;
- rb_erase(&bss->rbn, &dev->bss_tree);
-
+ kref_put(&bss->ref, bss_release);
+ }
spin_unlock_bh(&dev->bss_lock);
-
- kref_put(&bss->ref, bss_release);
}
EXPORT_SYMBOL(cfg80211_unlink_bss);
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
+ if (len < 2)
+ return 0;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
len -= 2;
break;
case X25_FAC_CLASS_B:
+ if (len < 3)
+ return 0;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
len -= 3;
break;
case X25_FAC_CLASS_C:
+ if (len < 4)
+ return 0;
printk(KERN_DEBUG "X.25: unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
len -= 4;
break;
case X25_FAC_CLASS_D:
+ if (len < p[1] + 2)
+ return 0;
switch (*p) {
case X25_FAC_CALLING_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
- if (p[1] > X25_MAX_DTE_FACIL_LEN)
- break;
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return 0;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
break;
default:
printk(KERN_DEBUG "X.25: unknown facility %02X,"
- "length %d, values %02X, %02X, "
- "%02X, %02X\n",
- p[0], p[1], p[2], p[3], p[4], p[5]);
+ "length %d\n", p[0], p[1]);
break;
}
len -= p[1] + 2;
&x25->vc_facil_mask);
if (len > 0)
skb_pull(skb, len);
+ else
+ return -1;
/*
* Copy any Call User Data.
*/
list_for_each_safe(entry, tmp, &x25_neigh_list) {
nb = list_entry(entry, struct x25_neigh, node);
__x25_remove_neigh(nb);
+ dev_put(nb->dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
- while ((opt = getopt_long_only(ac, av, "", long_opts, NULL)) != -1) {
+ while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
input_mode = (enum input_mode)opt;
switch (opt) {
case silentoldconfig:
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
Audit_equal, args,
&entry->lsm[lsm_rule].rule);
+ if (!entry->lsm[lsm_rule].rule)
+ return -EINVAL;
return result;
}
if (numid == ID_UNKNOWN)
return;
down_read(&card->controls_rwsem);
- if ((kctl = snd_ctl_find_numid(card, numid)) == NULL)
+ if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
+ up_read(&card->controls_rwsem);
return;
+ }
uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL);
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (uinfo == NULL || uctl == NULL)
return;
down_read(&card->controls_rwsem);
if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
- up_read(&fmixer->card->controls_rwsem);
+ up_read(&card->controls_rwsem);
return;
}
uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL);
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (uinfo == NULL || uctl == NULL) {
err = -ENOMEM;
- goto __unlock;
+ goto __free_only;
}
down_read(&card->controls_rwsem);
kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
err = 0;
__unlock:
up_read(&card->controls_rwsem);
+ __free_only:
kfree(uctl);
kfree(uinfo);
return err;
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (uinfo == NULL || uctl == NULL) {
err = -ENOMEM;
- goto __unlock;
+ goto __free_only;
}
down_read(&card->controls_rwsem);
kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
err = 0;
__unlock:
up_read(&card->controls_rwsem);
+ __free_only:
kfree(uctl);
kfree(uinfo);
return err;
static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
{
struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ int i;
- substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
- if (substream != NULL) {
- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
- substream->runtime->oss.prepare = 1;
- }
- substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
- if (substream != NULL) {
+ for (i = 0; i < 2; i++) {
+ substream = pcm_oss_file->streams[i];
+ if (!substream)
+ continue;
+ runtime = substream->runtime;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
- substream->runtime->oss.prepare = 1;
+ runtime->oss.prepare = 1;
+ runtime->oss.buffer_used = 0;
+ runtime->oss.prev_hw_ptr_period = 0;
+ runtime->oss.period_ptr = 0;
}
return 0;
}
int i, n;
for (i = 0; i < num_mixer_volumes; i++) {
- if (strcmp(name, mixer_vols[i].name) == 0) {
+ if (strncmp(name, mixer_vols[i].name, 32) == 0) {
if (present)
mixer_vols[i].num = i;
return mixer_vols[i].levels;
}
n = num_mixer_volumes++;
- strcpy(mixer_vols[n].name, name);
+ strncpy(mixer_vols[n].name, name, 32);
if (present)
mixer_vols[n].num = n;
struct hda_codec *c;
struct hda_cvt_setup *p;
unsigned int oldval, newval;
+ int type;
int i;
if (!nid)
p->dirty = 0;
/* make other inactive cvts with the same stream-tag dirty */
+ type = get_wcaps_type(get_wcaps(codec, nid));
list_for_each_entry(c, &codec->bus->codec_list, list) {
for (i = 0; i < c->cvt_setups.used; i++) {
p = snd_array_elem(&c->cvt_setups, i);
- if (!p->active && p->stream_tag == stream_tag)
+ if (!p->active && p->stream_tag == stream_tag &&
+ get_wcaps_type(get_wcaps(codec, p->nid)) == type)
p->dirty = 1;
}
}
if (!nid)
return;
+ if (codec->no_sticky_stream)
+ do_now = 1;
+
snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
p = get_hda_cvt_setup(codec, nid);
if (p) {
unsigned int pin_amp_workaround:1; /* pin out-amp takes index
* (e.g. Conexant codecs)
*/
+ unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
unsigned int pins_shutup:1; /* pins are shut up */
unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
#ifdef CONFIG_SND_HDA_POWER_SAVE
{
int i;
- pcm->rates = 0;
- pcm->formats = 0;
- pcm->maxbps = 0;
- pcm->channels_min = -1;
- pcm->channels_max = 0;
+ /* assume basic audio support (the basic audio flag is not in ELD;
+ * however, all audio capable sinks are required to support basic
+ * audio) */
+ pcm->rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000;
+ pcm->formats = SNDRV_PCM_FMTBIT_S16_LE;
+ pcm->maxbps = 16;
+ pcm->channels_max = 2;
for (i = 0; i < eld->sad_count; i++) {
struct cea_sad *a = &eld->sad[i];
pcm->rates |= a->rates;
- if (a->channels < pcm->channels_min)
- pcm->channels_min = a->channels;
if (a->channels > pcm->channels_max)
pcm->channels_max = a->channels;
if (a->format == AUDIO_CODING_TYPE_LPCM) {
- if (a->sample_bits & AC_SUPPCM_BITS_16) {
- pcm->formats |= SNDRV_PCM_FMTBIT_S16_LE;
- if (pcm->maxbps < 16)
- pcm->maxbps = 16;
- }
if (a->sample_bits & AC_SUPPCM_BITS_20) {
pcm->formats |= SNDRV_PCM_FMTBIT_S32_LE;
if (pcm->maxbps < 20)
/* restrict the parameters by the values the codec provides */
pcm->rates &= codec_pars->rates;
pcm->formats &= codec_pars->formats;
- pcm->channels_min = max(pcm->channels_min, codec_pars->channels_min);
pcm->channels_max = min(pcm->channels_max, codec_pars->channels_max);
pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
}
AZX_DRIVER_ULI,
AZX_DRIVER_NVIDIA,
AZX_DRIVER_TERA,
+ AZX_DRIVER_CTX,
AZX_DRIVER_GENERIC,
AZX_NUM_DRIVERS, /* keep this as last entry */
};
[AZX_DRIVER_ULI] = "HDA ULI M5461",
[AZX_DRIVER_NVIDIA] = "HDA NVidia",
[AZX_DRIVER_TERA] = "HDA Teradici",
+ [AZX_DRIVER_CTX] = "HDA Creative",
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
/* reset the rirb hw write pointer */
azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
/* set N=1, get RIRB response interrupt for new entry */
- azx_writew(chip, RINTCNT, 1);
+ if (chip->driver_type == AZX_DRIVER_CTX)
+ azx_writew(chip, RINTCNT, 0xc0);
+ else
+ azx_writew(chip, RINTCNT, 1);
/* enable rirb dma and response irq */
azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
spin_unlock_irq(&chip->reg_lock);
/* clear rirb int */
status = azx_readb(chip, RIRBSTS);
if (status & RIRB_INT_MASK) {
- if (status & RIRB_INT_RESPONSE)
+ if (status & RIRB_INT_RESPONSE) {
+ if (chip->driver_type == AZX_DRIVER_CTX)
+ udelay(80);
azx_update_rirb(chip);
+ }
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned int bufsize, period_bytes, format_val;
+ unsigned int bufsize, period_bytes, format_val, stream_tag;
int err;
azx_stream_reset(chip, azx_dev);
else
azx_dev->fifo_size = 0;
- return snd_hda_codec_prepare(apcm->codec, hinfo, azx_dev->stream_tag,
+ stream_tag = azx_dev->stream_tag;
+ /* CA-IBG chips need the playback stream starting from 1 */
+ if (chip->driver_type == AZX_DRIVER_CTX &&
+ stream_tag > chip->capture_streams)
+ stream_tag -= chip->capture_streams;
+ return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
azx_dev->format_val, substream);
}
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1025, 0x026f, "Acer Aspire 5538", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
{ PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_GENERIC },
+ .driver_data = AZX_DRIVER_CTX },
#else
/* this entry seems still valid -- i.e. without emu20kx chip */
- { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC },
+ { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX },
#endif
/* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
spec->multiout.no_share_stream = 1;
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
codec->patch_ops = ad198x_patch_ops;
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
}
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
spec->vmaster_nid = 0x04;
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
codec->patch_ops = ad198x_patch_ops;
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
}
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
}
codec->no_trigger_sense = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
if (cfg->dig_outs &&
snd_hda_get_connections(codec, cfg->dig_out_pins[0],
&spec->dig_out, 1) == 1)
- spec->multiout.dig_out_nid = cfg->dig_out_pins[0];
+ spec->multiout.dig_out_nid = spec->dig_out;
}
static int ca0110_parse_auto_config(struct hda_codec *codec)
static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
CXT5066_LAPTOP),
- SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
- CXT5066_DELL_LAPTOP),
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
+ SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
return -ENODEV;
} else {
/* fallback to the codec default */
- hinfo->channels_min = codec_pars->channels_min;
hinfo->channels_max = codec_pars->channels_max;
hinfo->rates = codec_pars->rates;
hinfo->formats = codec_pars->formats;
spec->init_amp = ALC_INIT_GPIO3;
break;
case 5:
+ default:
spec->init_amp = ALC_INIT_DEFAULT;
break;
}
SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
+ SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_LG),
SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_LG),
SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_LG_LW),
SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_TCL_S700),
SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763),
SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763),
SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY),
- SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2),
SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
+ return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x22, 0);
}
return 0x02;
else if (nid >= 0x0c && nid <= 0x0e)
return nid - 0x0c + 0x02;
+ else if (nid == 0x26) /* ALC887-VD has this DAC too */
+ return 0x25;
else
return 0;
}
static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
hda_nid_t dac)
{
- hda_nid_t mix[4];
+ hda_nid_t mix[5];
int i, num;
num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
alc_inithook(codec);
}
+enum {
+ ALC662_FIXUP_ASPIRE,
+ ALC662_FIXUP_IDEAPAD,
+};
+
+static const struct alc_fixup alc662_fixups[] = {
+ [ALC662_FIXUP_ASPIRE] = {
+ .pins = (const struct alc_pincfg[]) {
+ { 0x15, 0x99130112 }, /* subwoofer */
+ { }
+ }
+ },
+ [ALC662_FIXUP_IDEAPAD] = {
+ .pins = (const struct alc_pincfg[]) {
+ { 0x17, 0x99130112 }, /* subwoofer */
+ { }
+ }
+ },
+};
+
+static struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ {}
+};
+
+
+
static int patch_alc662(struct hda_codec *codec)
{
struct alc_spec *spec;
}
if (board_config == ALC662_AUTO) {
+ alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 1);
/* automatic parse from the BIOS config */
err = alc662_parse_auto_config(codec);
if (err < 0) {
spec->vmaster_nid = 0x02;
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC662_AUTO)
+ if (board_config == ALC662_AUTO) {
spec->init_hook = alc662_auto_init;
+ alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0);
+ }
+
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (!spec->loopback.amplist)
spec->loopback.amplist = alc662_loopbacks;
{
if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){
kfree(codec->chip_name);
- codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL);
+ if (codec->vendor_id == 0x10ec0887)
+ codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL);
+ else
+ codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL);
if (!codec->chip_name) {
alc_free(codec);
return -ENOMEM;
{ .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
.patch = patch_alc882 },
{ .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
- { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 },
+ { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 },
{ .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
.patch = patch_alc882 },
{ .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 },
STAC_92HD83XXX_REF,
STAC_92HD83XXX_PWR_REF,
STAC_DELL_S14,
+ STAC_DELL_E6410,
STAC_92HD83XXX_HP,
STAC_HP_DV7_4000,
STAC_92HD83XXX_MODELS
static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
"Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
{} /* terminator */
};
0x40f000f0, 0x40f000f0,
};
+/* Deliberately turn off 0x0f (Dock Mic) to make it choose Int Mic instead */
+static unsigned int dell_e6410_pin_configs[10] = {
+ 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110,
+ 0x23011050, 0x40f000f0, 0x400000f0, 0x90a60130,
+ 0x40f000f0, 0x40f000f0,
+};
+
static unsigned int hp_dv7_4000_pin_configs[10] = {
0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
[STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
[STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
[STAC_DELL_S14] = dell_s14_pin_configs,
+ [STAC_DELL_E6410] = dell_e6410_pin_configs,
[STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
};
[STAC_92HD83XXX_REF] = "ref",
[STAC_92HD83XXX_PWR_REF] = "mic-ref",
[STAC_DELL_S14] = "dell-s14",
+ [STAC_DELL_E6410] = "dell-e6410",
[STAC_92HD83XXX_HP] = "hp",
[STAC_HP_DV7_4000] = "hp-dv7-4000",
};
"DFI LanParty", STAC_92HD83XXX_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
"unknown Dell", STAC_DELL_S14),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040a,
+ "Dell E6410", STAC_DELL_E6410),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040b,
+ "Dell E6510", STAC_DELL_E6410),
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
"HP", STAC_92HD83XXX_HP),
{} /* terminator */
.name = "Dell Inspiron 8600", /* STAC9750/51 */
.type = AC97_TUNE_HP_ONLY
},
+ {
+ .subvendor = 0x1028,
+ .subdevice = 0x0182,
+ .name = "Dell Latitude D610", /* STAC9750/51 */
+ .type = AC97_TUNE_HP_ONLY
+ },
{
.subvendor = 0x1028,
.subdevice = 0x0186,
static const u16 wm8580_reg[] = {
0x0121, 0x017e, 0x007d, 0x0014, /*R3*/
0x0121, 0x017e, 0x007d, 0x0194, /*R7*/
- 0x001c, 0x0002, 0x0002, 0x00c2, /*R11*/
+ 0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/
0x0182, 0x0082, 0x000a, 0x0024, /*R15*/
0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/
0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/
{
switch (reg) {
case WM8900_REG_ID:
- case WM8900_REG_POWER1:
return 1;
default:
return 0;
goto err;
}
- /* Read back from the chip */
- reg = snd_soc_read(codec, WM8900_REG_POWER1);
- reg = (reg >> 12) & 0xf;
- dev_info(&i2c->dev, "WM8900 revision %d\n", reg);
-
wm8900_reset(codec);
/* Turn the chip on */
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
- return wm8904->deemph;
+ ucontrol->value.enumerated.item[0] = wm8904->deemph;
+ return 0;
}
static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
- return wm8955->deemph;
+ ucontrol->value.enumerated.item[0] = wm8955->deemph;
+ return 0;
}
static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
- return wm8960->deemph;
+ ucontrol->value.enumerated.item[0] = wm8960->deemph;
+ return 0;
}
static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
if (fs <= 24000)
reg |= WM8961_DACSLOPE;
else
- reg &= WM8961_DACSLOPE;
+ reg &= ~WM8961_DACSLOPE;
snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
return 0;
freq /= 2;
} else {
dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
- reg &= WM8961_MCLKDIV;
+ reg &= ~WM8961_MCLKDIV;
}
snd_soc_write(codec, WM8961_CLOCKING1, reg);
SOC_DOUBLE_R("Speaker ZC Switch",
WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
7, 1, 0),
-SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0,
+SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 3, 0, 7, 0,
spkboost_tlv),
SOC_ENUM("Speaker Reference", speaker_ref),
SOC_ENUM("Speaker Mode", speaker_mode),