mirror of
https://github.com/S3NEO/android_kernel_samsung_msm8226.git
synced 2024-11-07 03:47:13 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/core/dev.c
This commit is contained in:
commit
2198a10b50
192 changed files with 1306 additions and 761 deletions
|
@ -478,7 +478,7 @@ static void prepare_hwpoison_fd(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opt_unpoison && !hwpoison_forget_fd) {
|
if (opt_unpoison && !hwpoison_forget_fd) {
|
||||||
sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
|
sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs);
|
||||||
hwpoison_forget_fd = checked_open(buf, O_WRONLY);
|
hwpoison_forget_fd = checked_open(buf, O_WRONLY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
24
MAINTAINERS
24
MAINTAINERS
|
@ -969,6 +969,16 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-s5p*/
|
F: arch/arm/mach-s5p*/
|
||||||
|
|
||||||
|
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
|
||||||
|
M: Kyungmin Park <kyungmin.park@samsung.com>
|
||||||
|
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||||
|
L: linux-arm-kernel@lists.infradead.org
|
||||||
|
L: linux-media@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: arch/arm/plat-s5p/dev-fimc*
|
||||||
|
F: arch/arm/plat-samsung/include/plat/*fimc*
|
||||||
|
F: drivers/media/video/s5p-fimc/
|
||||||
|
|
||||||
ARM/SHMOBILE ARM ARCHITECTURE
|
ARM/SHMOBILE ARM ARCHITECTURE
|
||||||
M: Paul Mundt <lethal@linux-sh.org>
|
M: Paul Mundt <lethal@linux-sh.org>
|
||||||
M: Magnus Damm <magnus.damm@gmail.com>
|
M: Magnus Damm <magnus.damm@gmail.com>
|
||||||
|
@ -2552,7 +2562,7 @@ S: Supported
|
||||||
F: drivers/scsi/gdt*
|
F: drivers/scsi/gdt*
|
||||||
|
|
||||||
GENERIC GPIO I2C DRIVER
|
GENERIC GPIO I2C DRIVER
|
||||||
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
|
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/i2c/busses/i2c-gpio.c
|
F: drivers/i2c/busses/i2c-gpio.c
|
||||||
F: include/linux/i2c-gpio.h
|
F: include/linux/i2c-gpio.h
|
||||||
|
@ -3175,7 +3185,7 @@ F: drivers/net/ioc3-eth.c
|
||||||
|
|
||||||
IOC3 SERIAL DRIVER
|
IOC3 SERIAL DRIVER
|
||||||
M: Pat Gefre <pfg@sgi.com>
|
M: Pat Gefre <pfg@sgi.com>
|
||||||
L: linux-mips@linux-mips.org
|
L: linux-serial@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/serial/ioc3_serial.c
|
F: drivers/serial/ioc3_serial.c
|
||||||
|
|
||||||
|
@ -5050,6 +5060,12 @@ F: drivers/media/common/saa7146*
|
||||||
F: drivers/media/video/*7146*
|
F: drivers/media/video/*7146*
|
||||||
F: include/media/*7146*
|
F: include/media/*7146*
|
||||||
|
|
||||||
|
SAMSUNG AUDIO (ASoC) DRIVERS
|
||||||
|
M: Jassi Brar <jassi.brar@samsung.com>
|
||||||
|
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||||
|
S: Supported
|
||||||
|
F: sound/soc/s3c24xx
|
||||||
|
|
||||||
TLG2300 VIDEO4LINUX-2 DRIVER
|
TLG2300 VIDEO4LINUX-2 DRIVER
|
||||||
M: Huang Shijie <shijie8@gmail.com>
|
M: Huang Shijie <shijie8@gmail.com>
|
||||||
M: Kang Yong <kangyong@telegent.com>
|
M: Kang Yong <kangyong@telegent.com>
|
||||||
|
@ -6492,8 +6508,10 @@ F: include/linux/wm97xx.h
|
||||||
WOLFSON MICROELECTRONICS DRIVERS
|
WOLFSON MICROELECTRONICS DRIVERS
|
||||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||||
M: Ian Lartey <ian@opensource.wolfsonmicro.com>
|
M: Ian Lartey <ian@opensource.wolfsonmicro.com>
|
||||||
|
M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
|
||||||
|
T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
|
||||||
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
|
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
|
||||||
W: http://opensource.wolfsonmicro.com/node/8
|
W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/hwmon/wm83??
|
F: Documentation/hwmon/wm83??
|
||||||
F: drivers/leds/leds-wm83*.c
|
F: drivers/leds/leds-wm83*.c
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -1,8 +1,8 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 36
|
SUBLEVEL = 36
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION =
|
||||||
NAME = Sheep on Meth
|
NAME = Flesh-Eating Bats with Fangs
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
# To see a list of typical targets execute "make help"
|
# To see a list of typical targets execute "make help"
|
||||||
|
|
|
@ -1101,6 +1101,20 @@ config ARM_ERRATA_720789
|
||||||
invalidated are not, resulting in an incoherency in the system page
|
invalidated are not, resulting in an incoherency in the system page
|
||||||
tables. The workaround changes the TLB flushing routines to invalidate
|
tables. The workaround changes the TLB flushing routines to invalidate
|
||||||
entries regardless of the ASID.
|
entries regardless of the ASID.
|
||||||
|
|
||||||
|
config ARM_ERRATA_743622
|
||||||
|
bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
|
||||||
|
depends on CPU_V7
|
||||||
|
help
|
||||||
|
This option enables the workaround for the 743622 Cortex-A9
|
||||||
|
(r2p0..r2p2) erratum. Under very rare conditions, a faulty
|
||||||
|
optimisation in the Cortex-A9 Store Buffer may lead to data
|
||||||
|
corruption. This workaround sets a specific bit in the diagnostic
|
||||||
|
register of the Cortex-A9 which disables the Store Buffer
|
||||||
|
optimisation, preventing the defect from occurring. This has no
|
||||||
|
visible impact on the overall performance or power consumption of the
|
||||||
|
processor.
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
source "arch/arm/common/Kconfig"
|
source "arch/arm/common/Kconfig"
|
||||||
|
|
|
@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
|
* MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
|
||||||
* Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx
|
* Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
|
||||||
* ALU op with S bit and Rd == 15 :
|
* ALU op with S bit and Rd == 15 :
|
||||||
* cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
|
* cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
|
||||||
*/
|
*/
|
||||||
if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */
|
if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
|
||||||
|
(insn & 0x0ff00000) == 0x03400000 || /* Undef */
|
||||||
(insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
|
(insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
|
||||||
return INSN_REJECTED;
|
return INSN_REJECTED;
|
||||||
|
|
||||||
|
@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
||||||
* *S (bit 20) updates condition codes
|
* *S (bit 20) updates condition codes
|
||||||
* ADC/SBC/RSC reads the C flag
|
* ADC/SBC/RSC reads the C flag
|
||||||
*/
|
*/
|
||||||
insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */
|
insn &= 0xffff0fff; /* Rd = r0 */
|
||||||
asi->insn[0] = insn;
|
asi->insn[0] = insn;
|
||||||
asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
|
asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
|
||||||
emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
|
emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
|
||||||
|
|
|
@ -28,17 +28,16 @@
|
||||||
|
|
||||||
static inline void arch_idle(void)
|
static inline void arch_idle(void)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_DEBUG_KERNEL
|
|
||||||
/*
|
/*
|
||||||
* Disable the processor clock. The processor will be automatically
|
* Disable the processor clock. The processor will be automatically
|
||||||
* re-enabled by an interrupt or by a reset.
|
* re-enabled by an interrupt or by a reset.
|
||||||
*/
|
*/
|
||||||
at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
|
at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
|
||||||
#else
|
#ifndef CONFIG_CPU_ARM920T
|
||||||
/*
|
/*
|
||||||
* Set the processor (CP15) into 'Wait for Interrupt' mode.
|
* Set the processor (CP15) into 'Wait for Interrupt' mode.
|
||||||
* Unlike disabling the processor clock via the PMC (above)
|
* Post-RM9200 processors need this in conjunction with the above
|
||||||
* this allows the processor to be woken via JTAG.
|
* to save power when idle.
|
||||||
*/
|
*/
|
||||||
cpu_do_idle();
|
cpu_do_idle();
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch)
|
||||||
v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
|
v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
|
||||||
m2p_set_control(ch, v);
|
m2p_set_control(ch, v);
|
||||||
|
|
||||||
while (m2p_channel_state(ch) == STATE_ON)
|
while (m2p_channel_state(ch) >= STATE_ON)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
||||||
m2p_set_control(ch, 0x0);
|
m2p_set_control(ch, 0x0);
|
||||||
|
|
|
@ -122,6 +122,7 @@ config MACH_CPUIMX27
|
||||||
select IMX_HAVE_PLATFORM_IMX_I2C
|
select IMX_HAVE_PLATFORM_IMX_I2C
|
||||||
select IMX_HAVE_PLATFORM_IMX_UART
|
select IMX_HAVE_PLATFORM_IMX_UART
|
||||||
select IMX_HAVE_PLATFORM_MXC_NAND
|
select IMX_HAVE_PLATFORM_MXC_NAND
|
||||||
|
select MXC_ULPI if USB_ULPI
|
||||||
help
|
help
|
||||||
Include support for Eukrea CPUIMX27 platform. This includes
|
Include support for Eukrea CPUIMX27 platform. This includes
|
||||||
specific configurations for the module and its peripherals.
|
specific configurations for the module and its peripherals.
|
||||||
|
|
|
@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void)
|
||||||
i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
|
i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
|
||||||
ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
|
ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
|
||||||
|
|
||||||
imx27_add_i2c_imx1(&cpuimx27_i2c1_data);
|
imx27_add_i2c_imx0(&cpuimx27_i2c1_data);
|
||||||
|
|
||||||
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
|
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/serial_core.h>
|
#include <linux/serial_core.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/serial_core.h>
|
#include <linux/serial_core.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/serial_core.h>
|
#include <linux/serial_core.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
|
|
@ -173,11 +173,6 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
|
||||||
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
|
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
|
|
||||||
{
|
|
||||||
return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
|
static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
|
||||||
{
|
{
|
||||||
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
|
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
|
|
@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
static void ct_ca9x4_timer_init(void)
|
static void __init ct_ca9x4_timer_init(void)
|
||||||
{
|
{
|
||||||
writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
|
writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
|
||||||
writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
|
writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
|
||||||
|
@ -222,7 +222,7 @@ static struct platform_device pmu_device = {
|
||||||
.resource = pmu_resources,
|
.resource = pmu_resources,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ct_ca9x4_init(void)
|
static void __init ct_ca9x4_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void v2m_timer_init(void)
|
static void __init v2m_timer_init(void)
|
||||||
{
|
{
|
||||||
writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
|
writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
|
||||||
writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
|
writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
|
||||||
|
|
|
@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
||||||
/*
|
/*
|
||||||
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(pfn_valid(pfn)))
|
if (pfn_valid(pfn)) {
|
||||||
return NULL;
|
printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
|
||||||
|
KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
|
||||||
|
KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
|
||||||
|
WARN_ON(1);
|
||||||
|
}
|
||||||
|
|
||||||
type = get_mem_type(mtype);
|
type = get_mem_type(mtype);
|
||||||
if (!type)
|
if (!type)
|
||||||
|
|
|
@ -248,7 +248,7 @@ static struct mem_type mem_types[] = {
|
||||||
},
|
},
|
||||||
[MT_MEMORY] = {
|
[MT_MEMORY] = {
|
||||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||||
L_PTE_USER | L_PTE_EXEC,
|
L_PTE_WRITE | L_PTE_EXEC,
|
||||||
.prot_l1 = PMD_TYPE_TABLE,
|
.prot_l1 = PMD_TYPE_TABLE,
|
||||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||||
.domain = DOMAIN_KERNEL,
|
.domain = DOMAIN_KERNEL,
|
||||||
|
@ -259,7 +259,7 @@ static struct mem_type mem_types[] = {
|
||||||
},
|
},
|
||||||
[MT_MEMORY_NONCACHED] = {
|
[MT_MEMORY_NONCACHED] = {
|
||||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||||
L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
|
L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
|
||||||
.prot_l1 = PMD_TYPE_TABLE,
|
.prot_l1 = PMD_TYPE_TABLE,
|
||||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||||
.domain = DOMAIN_KERNEL,
|
.domain = DOMAIN_KERNEL,
|
||||||
|
|
|
@ -253,6 +253,14 @@ __v7_setup:
|
||||||
orreq r10, r10, #1 << 22 @ set bit #22
|
orreq r10, r10, #1 << 22 @ set bit #22
|
||||||
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_ARM_ERRATA_743622
|
||||||
|
teq r6, #0x20 @ present in r2p0
|
||||||
|
teqne r6, #0x21 @ present in r2p1
|
||||||
|
teqne r6, #0x22 @ present in r2p2
|
||||||
|
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||||
|
orreq r10, r10, #1 << 6 @ set bit #6
|
||||||
|
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||||
|
#endif
|
||||||
|
|
||||||
3: mov r10, #0
|
3: mov r10, #0
|
||||||
#ifdef HARVARD_CACHE
|
#ifdef HARVARD_CACHE
|
||||||
|
@ -365,7 +373,7 @@ __v7_ca9mp_proc_info:
|
||||||
b __v7_ca9mp_setup
|
b __v7_ca9mp_setup
|
||||||
.long cpu_arch_name
|
.long cpu_arch_name
|
||||||
.long cpu_elf_name
|
.long cpu_elf_name
|
||||||
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
|
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
|
||||||
.long cpu_v7_name
|
.long cpu_v7_name
|
||||||
.long v7_processor_functions
|
.long v7_processor_functions
|
||||||
.long v7wbi_tlb_fns
|
.long v7wbi_tlb_fns
|
||||||
|
|
|
@ -320,6 +320,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
|
||||||
if ((start <= da) && (da < start + bytes)) {
|
if ((start <= da) && (da < start + bytes)) {
|
||||||
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
|
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
|
||||||
__func__, start, da, bytes);
|
__func__, start, da, bytes);
|
||||||
|
iotlb_load_cr(obj, &cr);
|
||||||
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -435,7 +435,6 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
|
||||||
static int s3c_adc_resume(struct platform_device *pdev)
|
static int s3c_adc_resume(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct adc_device *adc = platform_get_drvdata(pdev);
|
struct adc_device *adc = platform_get_drvdata(pdev);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
clk_enable(adc->clk);
|
clk_enable(adc->clk);
|
||||||
enable_irq(adc->irq);
|
enable_irq(adc->irq);
|
||||||
|
|
|
@ -48,6 +48,9 @@
|
||||||
#include <plat/clock.h>
|
#include <plat/clock.h>
|
||||||
#include <plat/cpu.h>
|
#include <plat/cpu.h>
|
||||||
|
|
||||||
|
#include <linux/serial_core.h>
|
||||||
|
#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
|
||||||
|
|
||||||
/* clock information */
|
/* clock information */
|
||||||
|
|
||||||
static LIST_HEAD(clocks);
|
static LIST_HEAD(clocks);
|
||||||
|
@ -65,6 +68,28 @@ static int clk_null_enable(struct clk *clk, int enable)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dev_is_s3c_uart(struct device *dev)
|
||||||
|
{
|
||||||
|
struct platform_device **pdev = s3c24xx_uart_devs;
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
|
||||||
|
if (*pdev && dev == &(*pdev)->dev)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Serial drivers call get_clock() very early, before platform bus
|
||||||
|
* has been set up, this requires a special check to let them get
|
||||||
|
* a proper clock
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int dev_is_platform_device(struct device *dev)
|
||||||
|
{
|
||||||
|
return dev->bus == &platform_bus_type ||
|
||||||
|
(dev->bus == NULL && dev_is_s3c_uart(dev));
|
||||||
|
}
|
||||||
|
|
||||||
/* Clock API calls */
|
/* Clock API calls */
|
||||||
|
|
||||||
struct clk *clk_get(struct device *dev, const char *id)
|
struct clk *clk_get(struct device *dev, const char *id)
|
||||||
|
@ -73,7 +98,7 @@ struct clk *clk_get(struct device *dev, const char *id)
|
||||||
struct clk *clk = ERR_PTR(-ENOENT);
|
struct clk *clk = ERR_PTR(-ENOENT);
|
||||||
int idno;
|
int idno;
|
||||||
|
|
||||||
if (dev == NULL || dev->bus != &platform_bus_type)
|
if (dev == NULL || !dev_is_platform_device(dev))
|
||||||
idno = -1;
|
idno = -1;
|
||||||
else
|
else
|
||||||
idno = to_platform_device(dev)->id;
|
idno = to_platform_device(dev)->id;
|
||||||
|
|
|
@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t;
|
||||||
* These are used to set parameters in the core dumps.
|
* These are used to set parameters in the core dumps.
|
||||||
*/
|
*/
|
||||||
#define ELF_CLASS ELFCLASS32
|
#define ELF_CLASS ELFCLASS32
|
||||||
#if defined(__LITTLE_ENDIAN)
|
#if defined(__LITTLE_ENDIAN__)
|
||||||
#define ELF_DATA ELFDATA2LSB
|
#define ELF_DATA ELFDATA2LSB
|
||||||
#elif defined(__BIG_ENDIAN)
|
#elif defined(__BIG_ENDIAN__)
|
||||||
#define ELF_DATA ELFDATA2MSB
|
#define ELF_DATA ELFDATA2MSB
|
||||||
#else
|
#else
|
||||||
#error no endian defined
|
#error no endian defined
|
||||||
|
|
1
arch/m32r/kernel/.gitignore
vendored
Normal file
1
arch/m32r/kernel/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
vmlinux.lds
|
|
@ -28,6 +28,8 @@
|
||||||
|
|
||||||
#define DEBUG_SIG 0
|
#define DEBUG_SIG 0
|
||||||
|
|
||||||
|
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||||
|
|
||||||
asmlinkage int
|
asmlinkage int
|
||||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||||
|
@ -254,7 +256,7 @@ give_sigsegv:
|
||||||
static int prev_insn(struct pt_regs *regs)
|
static int prev_insn(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u16 inst;
|
u16 inst;
|
||||||
if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
|
if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
||||||
regs->bpc -= 2;
|
regs->bpc -= 2;
|
||||||
|
|
|
@ -7,6 +7,10 @@ subdir-ccflags-y := -Werror
|
||||||
include arch/mips/Kbuild.platforms
|
include arch/mips/Kbuild.platforms
|
||||||
obj-y := $(platform-y)
|
obj-y := $(platform-y)
|
||||||
|
|
||||||
|
# make clean traverses $(obj-) without having included .config, so
|
||||||
|
# everything ends up here
|
||||||
|
obj- := $(platform-)
|
||||||
|
|
||||||
# mips object files
|
# mips object files
|
||||||
# The object files are linked as core-y files would be linked
|
# The object files are linked as core-y files would be linked
|
||||||
|
|
||||||
|
|
|
@ -881,11 +881,15 @@ config NO_IOPORT
|
||||||
config GENERIC_ISA_DMA
|
config GENERIC_ISA_DMA
|
||||||
bool
|
bool
|
||||||
select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
|
select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
|
||||||
|
select ISA_DMA_API
|
||||||
|
|
||||||
config GENERIC_ISA_DMA_SUPPORT_BROKEN
|
config GENERIC_ISA_DMA_SUPPORT_BROKEN
|
||||||
bool
|
bool
|
||||||
select GENERIC_ISA_DMA
|
select GENERIC_ISA_DMA
|
||||||
|
|
||||||
|
config ISA_DMA_API
|
||||||
|
bool
|
||||||
|
|
||||||
config GENERIC_GPIO
|
config GENERIC_GPIO
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
|
|
@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
|
||||||
vmlinuz.srec: vmlinuz
|
vmlinuz.srec: vmlinuz
|
||||||
$(call cmd,objcopy)
|
$(call cmd,objcopy)
|
||||||
|
|
||||||
clean-files := $(objtree)/vmlinuz.*
|
clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#
|
#
|
||||||
# DECstation family
|
# DECstation family
|
||||||
#
|
#
|
||||||
platform-$(CONFIG_MACH_DECSTATION) = dec/
|
platform-$(CONFIG_MACH_DECSTATION) += dec/
|
||||||
cflags-$(CONFIG_MACH_DECSTATION) += \
|
cflags-$(CONFIG_MACH_DECSTATION) += \
|
||||||
-I$(srctree)/arch/mips/include/asm/mach-dec
|
-I$(srctree)/arch/mips/include/asm/mach-dec
|
||||||
libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
|
libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
|
||||||
|
|
|
@ -56,6 +56,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_32BIT
|
#ifdef CONFIG_32BIT
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
struct flock {
|
struct flock {
|
||||||
short l_type;
|
short l_type;
|
||||||
|
|
|
@ -88,6 +88,7 @@ typedef struct siginfo {
|
||||||
#ifdef __ARCH_SI_TRAPNO
|
#ifdef __ARCH_SI_TRAPNO
|
||||||
int _trapno; /* TRAP # which caused the signal */
|
int _trapno; /* TRAP # which caused the signal */
|
||||||
#endif
|
#endif
|
||||||
|
short _addr_lsb;
|
||||||
} _sigfault;
|
} _sigfault;
|
||||||
|
|
||||||
/* SIGPOLL, SIGXFSZ (To do ...) */
|
/* SIGPOLL, SIGXFSZ (To do ...) */
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
core-$(CONFIG_MACH_JZ4740) += arch/mips/jz4740/
|
platform-$(CONFIG_MACH_JZ4740) += jz4740/
|
||||||
cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
|
cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
|
||||||
load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000
|
load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000
|
||||||
|
|
|
@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
regs->regs[0] = 0;
|
|
||||||
switch (insn.i_format.opcode) {
|
switch (insn.i_format.opcode) {
|
||||||
/*
|
/*
|
||||||
* jr and jalr are in r_format format.
|
* jr and jalr are in r_format format.
|
||||||
|
|
|
@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
|
||||||
{
|
{
|
||||||
/* do the secure computing check first */
|
/* do the secure computing check first */
|
||||||
if (!entryexit)
|
if (!entryexit)
|
||||||
secure_computing(regs->regs[0]);
|
secure_computing(regs->regs[2]);
|
||||||
|
|
||||||
if (unlikely(current->audit_context) && entryexit)
|
if (unlikely(current->audit_context) && entryexit)
|
||||||
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
|
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
|
||||||
|
@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (unlikely(current->audit_context) && !entryexit)
|
if (unlikely(current->audit_context) && !entryexit)
|
||||||
audit_syscall_entry(audit_arch(), regs->regs[0],
|
audit_syscall_entry(audit_arch(), regs->regs[2],
|
||||||
regs->regs[4], regs->regs[5],
|
regs->regs[4], regs->regs[5],
|
||||||
regs->regs[6], regs->regs[7]);
|
regs->regs[6], regs->regs[7]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,9 +63,9 @@ stack_done:
|
||||||
sw t0, PT_R7(sp) # set error flag
|
sw t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
lw t1, PT_R2(sp) # syscall number
|
||||||
negu v0 # error
|
negu v0 # error
|
||||||
sw v0, PT_R0(sp) # set flag for syscall
|
sw t1, PT_R0(sp) # save it for syscall restarting
|
||||||
# restarting
|
|
||||||
1: sw v0, PT_R2(sp) # result
|
1: sw v0, PT_R2(sp) # result
|
||||||
|
|
||||||
o32_syscall_exit:
|
o32_syscall_exit:
|
||||||
|
@ -104,9 +104,9 @@ syscall_trace_entry:
|
||||||
sw t0, PT_R7(sp) # set error flag
|
sw t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
lw t1, PT_R2(sp) # syscall number
|
||||||
negu v0 # error
|
negu v0 # error
|
||||||
sw v0, PT_R0(sp) # set flag for syscall
|
sw t1, PT_R0(sp) # save it for syscall restarting
|
||||||
# restarting
|
|
||||||
1: sw v0, PT_R2(sp) # result
|
1: sw v0, PT_R2(sp) # result
|
||||||
|
|
||||||
j syscall_exit
|
j syscall_exit
|
||||||
|
@ -169,8 +169,7 @@ stackargs:
|
||||||
* We probably should handle this case a bit more drastic.
|
* We probably should handle this case a bit more drastic.
|
||||||
*/
|
*/
|
||||||
bad_stack:
|
bad_stack:
|
||||||
negu v0 # error
|
li v0, EFAULT
|
||||||
sw v0, PT_R0(sp)
|
|
||||||
sw v0, PT_R2(sp)
|
sw v0, PT_R2(sp)
|
||||||
li t0, 1 # set error flag
|
li t0, 1 # set error flag
|
||||||
sw t0, PT_R7(sp)
|
sw t0, PT_R7(sp)
|
||||||
|
|
|
@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp)
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # set flag for syscall
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
# restarting
|
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
n64_syscall_exit:
|
n64_syscall_exit:
|
||||||
|
@ -109,8 +109,9 @@ syscall_trace_entry:
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # set flag for syscall restarting
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
j syscall_exit
|
j syscall_exit
|
||||||
|
|
|
@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp)
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # set flag for syscall restarting
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
local_irq_disable # make sure need_resched and
|
local_irq_disable # make sure need_resched and
|
||||||
|
@ -106,8 +107,9 @@ n32_syscall_trace_entry:
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # set flag for syscall restarting
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
j syscall_exit
|
j syscall_exit
|
||||||
|
@ -320,10 +322,10 @@ EXPORT(sysn32_call_table)
|
||||||
PTR sys_cacheflush
|
PTR sys_cacheflush
|
||||||
PTR sys_cachectl
|
PTR sys_cachectl
|
||||||
PTR sys_sysmips
|
PTR sys_sysmips
|
||||||
PTR sys_io_setup /* 6200 */
|
PTR compat_sys_io_setup /* 6200 */
|
||||||
PTR sys_io_destroy
|
PTR sys_io_destroy
|
||||||
PTR sys_io_getevents
|
PTR compat_sys_io_getevents
|
||||||
PTR sys_io_submit
|
PTR compat_sys_io_submit
|
||||||
PTR sys_io_cancel
|
PTR sys_io_cancel
|
||||||
PTR sys_exit_group /* 6205 */
|
PTR sys_exit_group /* 6205 */
|
||||||
PTR sys_lookup_dcookie
|
PTR sys_lookup_dcookie
|
||||||
|
|
|
@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp)
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # flag for syscall restarting
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
o32_syscall_exit:
|
o32_syscall_exit:
|
||||||
|
@ -142,8 +143,9 @@ trace_a_syscall:
|
||||||
sd t0, PT_R7(sp) # set error flag
|
sd t0, PT_R7(sp) # set error flag
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
|
|
||||||
|
ld t1, PT_R2(sp) # syscall number
|
||||||
dnegu v0 # error
|
dnegu v0 # error
|
||||||
sd v0, PT_R0(sp) # set flag for syscall restarting
|
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||||
1: sd v0, PT_R2(sp) # result
|
1: sd v0, PT_R2(sp) # result
|
||||||
|
|
||||||
j syscall_exit
|
j syscall_exit
|
||||||
|
@ -154,8 +156,7 @@ trace_a_syscall:
|
||||||
* The stackpointer for a call with more than 4 arguments is bad.
|
* The stackpointer for a call with more than 4 arguments is bad.
|
||||||
*/
|
*/
|
||||||
bad_stack:
|
bad_stack:
|
||||||
dnegu v0 # error
|
li v0, EFAULT
|
||||||
sd v0, PT_R0(sp)
|
|
||||||
sd v0, PT_R2(sp)
|
sd v0, PT_R2(sp)
|
||||||
li t0, 1 # set error flag
|
li t0, 1 # set error flag
|
||||||
sd t0, PT_R7(sp)
|
sd t0, PT_R7(sp)
|
||||||
|
@ -444,10 +445,10 @@ sys_call_table:
|
||||||
PTR compat_sys_futex
|
PTR compat_sys_futex
|
||||||
PTR compat_sys_sched_setaffinity
|
PTR compat_sys_sched_setaffinity
|
||||||
PTR compat_sys_sched_getaffinity /* 4240 */
|
PTR compat_sys_sched_getaffinity /* 4240 */
|
||||||
PTR sys_io_setup
|
PTR compat_sys_io_setup
|
||||||
PTR sys_io_destroy
|
PTR sys_io_destroy
|
||||||
PTR sys_io_getevents
|
PTR compat_sys_io_getevents
|
||||||
PTR sys_io_submit
|
PTR compat_sys_io_submit
|
||||||
PTR sys_io_cancel /* 4245 */
|
PTR sys_io_cancel /* 4245 */
|
||||||
PTR sys_exit_group
|
PTR sys_exit_group
|
||||||
PTR sys32_lookup_dcookie
|
PTR sys32_lookup_dcookie
|
||||||
|
|
|
@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||||
{
|
{
|
||||||
struct rt_sigframe __user *frame;
|
struct rt_sigframe __user *frame;
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
stack_t st;
|
|
||||||
int sig;
|
int sig;
|
||||||
|
|
||||||
frame = (struct rt_sigframe __user *) regs.regs[29];
|
frame = (struct rt_sigframe __user *) regs.regs[29];
|
||||||
|
@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||||
else if (sig)
|
else if (sig)
|
||||||
force_sig(sig, current);
|
force_sig(sig, current);
|
||||||
|
|
||||||
if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
|
|
||||||
goto badframe;
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
|
do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't let your children do this ...
|
* Don't let your children do this ...
|
||||||
|
@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
||||||
struct mips_abi *abi = current->thread.abi;
|
struct mips_abi *abi = current->thread.abi;
|
||||||
void *vdso = current->mm->context.vdso;
|
void *vdso = current->mm->context.vdso;
|
||||||
|
|
||||||
switch(regs->regs[0]) {
|
if (regs->regs[0]) {
|
||||||
case ERESTART_RESTARTBLOCK:
|
switch(regs->regs[2]) {
|
||||||
case ERESTARTNOHAND:
|
case ERESTART_RESTARTBLOCK:
|
||||||
regs->regs[2] = EINTR;
|
case ERESTARTNOHAND:
|
||||||
break;
|
|
||||||
case ERESTARTSYS:
|
|
||||||
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
|
||||||
regs->regs[2] = EINTR;
|
regs->regs[2] = EINTR;
|
||||||
break;
|
break;
|
||||||
|
case ERESTARTSYS:
|
||||||
|
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
||||||
|
regs->regs[2] = EINTR;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* fallthrough */
|
||||||
|
case ERESTARTNOINTR:
|
||||||
|
regs->regs[7] = regs->regs[26];
|
||||||
|
regs->regs[2] = regs->regs[0];
|
||||||
|
regs->cp0_epc -= 4;
|
||||||
}
|
}
|
||||||
/* fallthrough */
|
|
||||||
case ERESTARTNOINTR: /* Userland will reload $v0. */
|
|
||||||
regs->regs[7] = regs->regs[26];
|
|
||||||
regs->cp0_epc -= 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
regs->regs[0] = 0; /* Don't deal with this again. */
|
regs->regs[0] = 0; /* Don't deal with this again. */
|
||||||
|
}
|
||||||
|
|
||||||
if (sig_uses_siginfo(ka))
|
if (sig_uses_siginfo(ka))
|
||||||
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
|
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
|
||||||
|
@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
||||||
ret = abi->setup_frame(vdso + abi->signal_return_offset,
|
ret = abi->setup_frame(vdso + abi->signal_return_offset,
|
||||||
ka, regs, sig, oldset);
|
ka, regs, sig, oldset);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
||||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||||
|
@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Who's code doesn't conform to the restartable syscall convention
|
|
||||||
* dies here!!! The li instruction, a single machine instruction,
|
|
||||||
* must directly be followed by the syscall instruction.
|
|
||||||
*/
|
|
||||||
if (regs->regs[0]) {
|
if (regs->regs[0]) {
|
||||||
if (regs->regs[2] == ERESTARTNOHAND ||
|
if (regs->regs[2] == ERESTARTNOHAND ||
|
||||||
regs->regs[2] == ERESTARTSYS ||
|
regs->regs[2] == ERESTARTSYS ||
|
||||||
regs->regs[2] == ERESTARTNOINTR) {
|
regs->regs[2] == ERESTARTNOINTR) {
|
||||||
|
regs->regs[2] = regs->regs[0];
|
||||||
regs->regs[7] = regs->regs[26];
|
regs->regs[7] = regs->regs[26];
|
||||||
regs->cp0_epc -= 8;
|
regs->cp0_epc -= 4;
|
||||||
}
|
}
|
||||||
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
|
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
|
||||||
regs->regs[2] = current->thread.abi->restart;
|
regs->regs[2] = current->thread.abi->restart;
|
||||||
|
|
|
@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||||
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||||
{
|
{
|
||||||
struct rt_sigframe_n32 __user *frame;
|
struct rt_sigframe_n32 __user *frame;
|
||||||
|
mm_segment_t old_fs;
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
stack_t st;
|
stack_t st;
|
||||||
s32 sp;
|
s32 sp;
|
||||||
|
@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||||
|
|
||||||
/* It is more difficult to avoid calling this function than to
|
/* It is more difficult to avoid calling this function than to
|
||||||
call it and ignore errors. */
|
call it and ignore errors. */
|
||||||
|
old_fs = get_fs();
|
||||||
|
set_fs(KERNEL_DS);
|
||||||
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
|
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
|
||||||
|
set_fs(old_fs);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't let your children do this ...
|
* Don't let your children do this ...
|
||||||
|
|
|
@ -109,8 +109,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
|
||||||
unsigned long value;
|
unsigned long value;
|
||||||
unsigned int res;
|
unsigned int res;
|
||||||
|
|
||||||
regs->regs[0] = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This load never faults.
|
* This load never faults.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -40,6 +40,11 @@ static char *mixer = HOSTAUDIO_DEV_MIXER;
|
||||||
" This is used to specify the host mixer device to the hostaudio driver.\n"\
|
" This is used to specify the host mixer device to the hostaudio driver.\n"\
|
||||||
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
|
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
|
||||||
|
|
||||||
|
module_param(dsp, charp, 0644);
|
||||||
|
MODULE_PARM_DESC(dsp, DSP_HELP);
|
||||||
|
module_param(mixer, charp, 0644);
|
||||||
|
MODULE_PARM_DESC(mixer, MIXER_HELP);
|
||||||
|
|
||||||
#ifndef MODULE
|
#ifndef MODULE
|
||||||
static int set_dsp(char *name, int *add)
|
static int set_dsp(char *name, int *add)
|
||||||
{
|
{
|
||||||
|
@ -56,15 +61,6 @@ static int set_mixer(char *name, int *add)
|
||||||
}
|
}
|
||||||
|
|
||||||
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
|
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
|
||||||
|
|
||||||
#else /*MODULE*/
|
|
||||||
|
|
||||||
module_param(dsp, charp, 0644);
|
|
||||||
MODULE_PARM_DESC(dsp, DSP_HELP);
|
|
||||||
|
|
||||||
module_param(mixer, charp, 0644);
|
|
||||||
MODULE_PARM_DESC(mixer, MIXER_HELP);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* /dev/dsp file operations */
|
/* /dev/dsp file operations */
|
||||||
|
|
|
@ -163,6 +163,7 @@ struct ubd {
|
||||||
struct scatterlist sg[MAX_SG];
|
struct scatterlist sg[MAX_SG];
|
||||||
struct request *request;
|
struct request *request;
|
||||||
int start_sg, end_sg;
|
int start_sg, end_sg;
|
||||||
|
sector_t rq_pos;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DEFAULT_COW { \
|
#define DEFAULT_COW { \
|
||||||
|
@ -187,6 +188,7 @@ struct ubd {
|
||||||
.request = NULL, \
|
.request = NULL, \
|
||||||
.start_sg = 0, \
|
.start_sg = 0, \
|
||||||
.end_sg = 0, \
|
.end_sg = 0, \
|
||||||
|
.rq_pos = 0, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Protected by ubd_lock */
|
/* Protected by ubd_lock */
|
||||||
|
@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct io_thread_req *io_req;
|
struct io_thread_req *io_req;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
sector_t sector;
|
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
while(1){
|
while(1){
|
||||||
|
@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev->request = req;
|
dev->request = req;
|
||||||
|
dev->rq_pos = blk_rq_pos(req);
|
||||||
dev->start_sg = 0;
|
dev->start_sg = 0;
|
||||||
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
|
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
req = dev->request;
|
req = dev->request;
|
||||||
sector = blk_rq_pos(req);
|
|
||||||
while(dev->start_sg < dev->end_sg){
|
while(dev->start_sg < dev->end_sg){
|
||||||
struct scatterlist *sg = &dev->sg[dev->start_sg];
|
struct scatterlist *sg = &dev->sg[dev->start_sg];
|
||||||
|
|
||||||
|
@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
prepare_request(req, io_req,
|
prepare_request(req, io_req,
|
||||||
(unsigned long long)sector << 9,
|
(unsigned long long)dev->rq_pos << 9,
|
||||||
sg->offset, sg->length, sg_page(sg));
|
sg->offset, sg->length, sg_page(sg));
|
||||||
|
|
||||||
sector += sg->length >> 9;
|
|
||||||
n = os_write_file(thread_fd, &io_req,
|
n = os_write_file(thread_fd, &io_req,
|
||||||
sizeof(struct io_thread_req *));
|
sizeof(struct io_thread_req *));
|
||||||
if(n != sizeof(struct io_thread_req *)){
|
if(n != sizeof(struct io_thread_req *)){
|
||||||
|
@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev->rq_pos += sg->length >> 9;
|
||||||
dev->start_sg++;
|
dev->start_sg++;
|
||||||
}
|
}
|
||||||
dev->end_sg = 0;
|
dev->end_sg = 0;
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
#include <asm/ia32.h>
|
#include <asm/ia32.h>
|
||||||
|
|
||||||
#undef WARN_OLD
|
#undef WARN_OLD
|
||||||
#undef CORE_DUMP /* probably broken */
|
#undef CORE_DUMP /* definitely broken */
|
||||||
|
|
||||||
static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
|
static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
|
||||||
static int load_aout_library(struct file *);
|
static int load_aout_library(struct file *);
|
||||||
|
@ -131,21 +131,15 @@ static void set_brk(unsigned long start, unsigned long end)
|
||||||
* macros to write out all the necessary info.
|
* macros to write out all the necessary info.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int dump_write(struct file *file, const void *addr, int nr)
|
#include <linux/coredump.h>
|
||||||
{
|
|
||||||
return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DUMP_WRITE(addr, nr) \
|
#define DUMP_WRITE(addr, nr) \
|
||||||
if (!dump_write(file, (void *)(addr), (nr))) \
|
if (!dump_write(file, (void *)(addr), (nr))) \
|
||||||
goto end_coredump;
|
goto end_coredump;
|
||||||
|
|
||||||
#define DUMP_SEEK(offset) \
|
#define DUMP_SEEK(offset) \
|
||||||
if (file->f_op->llseek) { \
|
if (!dump_seek(file, offset)) \
|
||||||
if (file->f_op->llseek(file, (offset), 0) != (offset)) \
|
goto end_coredump;
|
||||||
goto end_coredump; \
|
|
||||||
} else \
|
|
||||||
file->f_pos = (offset)
|
|
||||||
|
|
||||||
#define START_DATA() (u.u_tsize << PAGE_SHIFT)
|
#define START_DATA() (u.u_tsize << PAGE_SHIFT)
|
||||||
#define START_STACK(u) (u.start_stack)
|
#define START_STACK(u) (u.start_stack)
|
||||||
|
@ -217,12 +211,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
|
||||||
dump_size = dump.u_ssize << PAGE_SHIFT;
|
dump_size = dump.u_ssize << PAGE_SHIFT;
|
||||||
DUMP_WRITE(dump_start, dump_size);
|
DUMP_WRITE(dump_start, dump_size);
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Finally dump the task struct. Not be used by gdb, but
|
|
||||||
* could be useful
|
|
||||||
*/
|
|
||||||
set_fs(KERNEL_DS);
|
|
||||||
DUMP_WRITE(current, sizeof(*current));
|
|
||||||
end_coredump:
|
end_coredump:
|
||||||
set_fs(fs);
|
set_fs(fs);
|
||||||
return has_dumped;
|
return has_dumped;
|
||||||
|
|
|
@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
|
||||||
return (struct kvm_mmu_page *)page_private(page);
|
return (struct kvm_mmu_page *)page_private(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u16 kvm_read_fs(void)
|
|
||||||
{
|
|
||||||
u16 seg;
|
|
||||||
asm("mov %%fs, %0" : "=g"(seg));
|
|
||||||
return seg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 kvm_read_gs(void)
|
|
||||||
{
|
|
||||||
u16 seg;
|
|
||||||
asm("mov %%gs, %0" : "=g"(seg));
|
|
||||||
return seg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 kvm_read_ldt(void)
|
static inline u16 kvm_read_ldt(void)
|
||||||
{
|
{
|
||||||
u16 ldt;
|
u16 ldt;
|
||||||
|
@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void)
|
||||||
return ldt;
|
return ldt;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_load_fs(u16 sel)
|
|
||||||
{
|
|
||||||
asm("mov %0, %%fs" : : "rm"(sel));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_load_gs(u16 sel)
|
|
||||||
{
|
|
||||||
asm("mov %0, %%gs" : : "rm"(sel));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_load_ldt(u16 sel)
|
static inline void kvm_load_ldt(u16 sel)
|
||||||
{
|
{
|
||||||
asm("lldt %0" : : "rm"(sel));
|
asm("lldt %0" : : "rm"(sel));
|
||||||
|
|
|
@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||||
address = (low & MASK_BLKPTR_LO) >> 21;
|
address = (low & MASK_BLKPTR_LO) >> 21;
|
||||||
if (!address)
|
if (!address)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
address += MCG_XBLK_ADDR;
|
address += MCG_XBLK_ADDR;
|
||||||
} else
|
} else
|
||||||
++address;
|
++address;
|
||||||
|
@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||||
if (rdmsr_safe(address, &low, &high))
|
if (rdmsr_safe(address, &low, &high))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!(high & MASK_VALID_HI)) {
|
if (!(high & MASK_VALID_HI))
|
||||||
if (block)
|
continue;
|
||||||
continue;
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(high & MASK_CNTP_HI) ||
|
if (!(high & MASK_CNTP_HI) ||
|
||||||
(high & MASK_LOCKED_HI))
|
(high & MASK_LOCKED_HI))
|
||||||
|
|
|
@ -216,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
|
||||||
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
||||||
&attr_core_power_limit_count.attr,
|
&attr_core_power_limit_count.attr,
|
||||||
thermal_attr_group.name);
|
thermal_attr_group.name);
|
||||||
if (cpu_has(c, X86_FEATURE_PTS))
|
if (cpu_has(c, X86_FEATURE_PTS)) {
|
||||||
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
||||||
&attr_package_throttle_count.attr,
|
&attr_package_throttle_count.attr,
|
||||||
thermal_attr_group.name);
|
thermal_attr_group.name);
|
||||||
|
@ -224,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
|
||||||
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
err = sysfs_add_file_to_group(&sys_dev->kobj,
|
||||||
&attr_package_power_limit_count.attr,
|
&attr_package_power_limit_count.attr,
|
||||||
thermal_attr_group.name);
|
thermal_attr_group.name);
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm)
|
||||||
|
|
||||||
control->iopm_base_pa = iopm_base;
|
control->iopm_base_pa = iopm_base;
|
||||||
control->msrpm_base_pa = __pa(svm->msrpm);
|
control->msrpm_base_pa = __pa(svm->msrpm);
|
||||||
control->tsc_offset = 0;
|
|
||||||
control->int_ctl = V_INTR_MASKING_MASK;
|
control->int_ctl = V_INTR_MASKING_MASK;
|
||||||
|
|
||||||
init_seg(&save->es);
|
init_seg(&save->es);
|
||||||
|
@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
|
||||||
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
|
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
|
||||||
svm->asid_generation = 0;
|
svm->asid_generation = 0;
|
||||||
init_vmcb(svm);
|
init_vmcb(svm);
|
||||||
|
svm->vmcb->control.tsc_offset = 0-native_read_tsc();
|
||||||
|
|
||||||
err = fx_init(&svm->vcpu);
|
err = fx_init(&svm->vcpu);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
sync_lapic_to_cr8(vcpu);
|
sync_lapic_to_cr8(vcpu);
|
||||||
|
|
||||||
save_host_msrs(vcpu);
|
save_host_msrs(vcpu);
|
||||||
fs_selector = kvm_read_fs();
|
savesegment(fs, fs_selector);
|
||||||
gs_selector = kvm_read_gs();
|
savesegment(gs, gs_selector);
|
||||||
ldt_selector = kvm_read_ldt();
|
ldt_selector = kvm_read_ldt();
|
||||||
svm->vmcb->save.cr2 = vcpu->arch.cr2;
|
svm->vmcb->save.cr2 = vcpu->arch.cr2;
|
||||||
/* required for live migration with NPT */
|
/* required for live migration with NPT */
|
||||||
|
@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
||||||
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
|
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
|
||||||
|
|
||||||
kvm_load_fs(fs_selector);
|
|
||||||
kvm_load_gs(gs_selector);
|
|
||||||
kvm_load_ldt(ldt_selector);
|
|
||||||
load_host_msrs(vcpu);
|
load_host_msrs(vcpu);
|
||||||
|
loadsegment(fs, fs_selector);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
load_gs_index(gs_selector);
|
||||||
|
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
|
||||||
|
#else
|
||||||
|
loadsegment(gs, gs_selector);
|
||||||
|
#endif
|
||||||
|
kvm_load_ldt(ldt_selector);
|
||||||
|
|
||||||
reload_tss(vcpu);
|
reload_tss(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
vmx->host_state.ldt_sel = kvm_read_ldt();
|
vmx->host_state.ldt_sel = kvm_read_ldt();
|
||||||
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
||||||
vmx->host_state.fs_sel = kvm_read_fs();
|
savesegment(fs, vmx->host_state.fs_sel);
|
||||||
if (!(vmx->host_state.fs_sel & 7)) {
|
if (!(vmx->host_state.fs_sel & 7)) {
|
||||||
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
||||||
vmx->host_state.fs_reload_needed = 0;
|
vmx->host_state.fs_reload_needed = 0;
|
||||||
|
@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
||||||
vmcs_write16(HOST_FS_SELECTOR, 0);
|
vmcs_write16(HOST_FS_SELECTOR, 0);
|
||||||
vmx->host_state.fs_reload_needed = 1;
|
vmx->host_state.fs_reload_needed = 1;
|
||||||
}
|
}
|
||||||
vmx->host_state.gs_sel = kvm_read_gs();
|
savesegment(gs, vmx->host_state.gs_sel);
|
||||||
if (!(vmx->host_state.gs_sel & 7))
|
if (!(vmx->host_state.gs_sel & 7))
|
||||||
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
|
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
|
||||||
else {
|
else {
|
||||||
|
@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!vmx->host_state.loaded)
|
if (!vmx->host_state.loaded)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
++vmx->vcpu.stat.host_state_reload;
|
++vmx->vcpu.stat.host_state_reload;
|
||||||
vmx->host_state.loaded = 0;
|
vmx->host_state.loaded = 0;
|
||||||
if (vmx->host_state.fs_reload_needed)
|
if (vmx->host_state.fs_reload_needed)
|
||||||
kvm_load_fs(vmx->host_state.fs_sel);
|
loadsegment(fs, vmx->host_state.fs_sel);
|
||||||
if (vmx->host_state.gs_ldt_reload_needed) {
|
if (vmx->host_state.gs_ldt_reload_needed) {
|
||||||
kvm_load_ldt(vmx->host_state.ldt_sel);
|
kvm_load_ldt(vmx->host_state.ldt_sel);
|
||||||
/*
|
|
||||||
* If we have to reload gs, we must take care to
|
|
||||||
* preserve our gs base.
|
|
||||||
*/
|
|
||||||
local_irq_save(flags);
|
|
||||||
kvm_load_gs(vmx->host_state.gs_sel);
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
|
load_gs_index(vmx->host_state.gs_sel);
|
||||||
|
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
|
||||||
|
#else
|
||||||
|
loadsegment(gs, vmx->host_state.gs_sel);
|
||||||
#endif
|
#endif
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
reload_tss();
|
reload_tss();
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
||||||
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
||||||
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||||
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||||
vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
|
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
|
||||||
vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
|
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
|
||||||
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
rdmsrl(MSR_FS_BASE, a);
|
rdmsrl(MSR_FS_BASE, a);
|
||||||
|
|
|
@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_node_mask(i, nodes_parsed)
|
for (i = 0; i < num_node_memblks; i++)
|
||||||
e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
|
e820_register_active_regions(memblk_nodeid[i],
|
||||||
nodes[i].end >> PAGE_SHIFT);
|
node_memblk_range[i].start >> PAGE_SHIFT,
|
||||||
|
node_memblk_range[i].end >> PAGE_SHIFT);
|
||||||
|
|
||||||
/* for out of order entries in SRAT */
|
/* for out of order entries in SRAT */
|
||||||
sort_node_map();
|
sort_node_map();
|
||||||
if (!nodes_cover_memory(nodes)) {
|
if (!nodes_cover_memory(nodes)) {
|
||||||
|
|
|
@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
||||||
/*
|
/*
|
||||||
* fill in all the output members
|
* fill in all the output members
|
||||||
*/
|
*/
|
||||||
hdr->device_status = status_byte(rq->errors);
|
hdr->device_status = rq->errors & 0xff;
|
||||||
hdr->transport_status = host_byte(rq->errors);
|
hdr->transport_status = host_byte(rq->errors);
|
||||||
hdr->driver_status = driver_byte(rq->errors);
|
hdr->driver_status = driver_byte(rq->errors);
|
||||||
hdr->info = 0;
|
hdr->info = 0;
|
||||||
|
|
|
@ -938,6 +938,7 @@ int elv_register_queue(struct request_queue *q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kobject_uevent(&e->kobj, KOBJ_ADD);
|
kobject_uevent(&e->kobj, KOBJ_ADD);
|
||||||
|
e->registered = 1;
|
||||||
}
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -947,6 +948,7 @@ static void __elv_unregister_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||||
kobject_del(&e->kobj);
|
kobject_del(&e->kobj);
|
||||||
|
e->registered = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void elv_unregister_queue(struct request_queue *q)
|
void elv_unregister_queue(struct request_queue *q)
|
||||||
|
@ -1042,11 +1044,13 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||||
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
__elv_unregister_queue(old_elevator);
|
if (old_elevator->registered) {
|
||||||
|
__elv_unregister_queue(old_elevator);
|
||||||
|
|
||||||
err = elv_register_queue(q);
|
err = elv_register_queue(q);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail_register;
|
goto fail_register;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* finally exit old elevator and turn off BYPASS.
|
* finally exit old elevator and turn off BYPASS.
|
||||||
|
|
|
@ -204,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* There have a NVIF method in MSI GX723 DSDT need call by Nvidia
|
||||||
|
* driver (e.g. nouveau) when user press brightness hotkey.
|
||||||
|
* Currently, nouveau driver didn't do the job and it causes there
|
||||||
|
* have a infinite while loop in DSDT when user press hotkey.
|
||||||
|
* We add MSI GX723's dmi information to this table for workaround
|
||||||
|
* this issue.
|
||||||
|
* Will remove MSI GX723 from the table after nouveau grows support.
|
||||||
|
*/
|
||||||
|
.callback = dmi_disable_osi_vista,
|
||||||
|
.ident = "MSI GX723",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
.callback = dmi_disable_osi_vista,
|
.callback = dmi_disable_osi_vista,
|
||||||
.ident = "Sony VGN-NS10J_S",
|
.ident = "Sony VGN-NS10J_S",
|
||||||
.matches = {
|
.matches = {
|
||||||
|
|
|
@ -346,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void)
|
||||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||||
ACPI_UINT32_MAX,
|
ACPI_UINT32_MAX,
|
||||||
early_init_pdc, NULL, NULL, NULL);
|
early_init_pdc, NULL, NULL, NULL);
|
||||||
|
acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
|
||||||
memcpy(buf, dev->bounce_buf+offset, size);
|
memcpy(buf, dev->bounce_buf+offset, size);
|
||||||
offset += size;
|
offset += size;
|
||||||
flush_kernel_dcache_page(bvec->bv_page);
|
flush_kernel_dcache_page(bvec->bv_page);
|
||||||
bvec_kunmap_irq(bvec, &flags);
|
bvec_kunmap_irq(buf, &flags);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -202,6 +202,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||||
struct virtio_blk *vblk = disk->private_data;
|
struct virtio_blk *vblk = disk->private_data;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
int err;
|
||||||
|
|
||||||
bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
|
bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@ -215,7 +216,10 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
req->cmd_type = REQ_TYPE_SPECIAL;
|
req->cmd_type = REQ_TYPE_SPECIAL;
|
||||||
return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
|
err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
|
||||||
|
blk_put_request(req);
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
|
|
|
@ -459,9 +459,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait till the host acknowledges it pushed out the data we
|
* Wait till the host acknowledges it pushed out the data we
|
||||||
* sent. This is done for ports in blocking mode or for data
|
* sent. This is done for data from the hvc_console; the tty
|
||||||
* from the hvc_console; the tty operations are performed with
|
* operations are performed with spinlocks held so we can't
|
||||||
* spinlocks held so we can't sleep here.
|
* sleep here. An alternative would be to copy the data to a
|
||||||
|
* buffer and relax the spinning requirement. The downside is
|
||||||
|
* we need to kmalloc a GFP_ATOMIC buffer each time the
|
||||||
|
* console driver writes something out.
|
||||||
*/
|
*/
|
||||||
while (!virtqueue_get_buf(out_vq, &len))
|
while (!virtqueue_get_buf(out_vq, &len))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
@ -626,6 +629,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
|
||||||
goto free_buf;
|
goto free_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We now ask send_buf() to not spin for generic ports -- we
|
||||||
|
* can re-use the same code path that non-blocking file
|
||||||
|
* descriptors take for blocking file descriptors since the
|
||||||
|
* wait is already done and we're certain the write will go
|
||||||
|
* through to the host.
|
||||||
|
*/
|
||||||
|
nonblock = true;
|
||||||
ret = send_buf(port, buf, count, nonblock);
|
ret = send_buf(port, buf, count, nonblock);
|
||||||
|
|
||||||
if (nonblock && ret > 0)
|
if (nonblock && ret > 0)
|
||||||
|
|
|
@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
|
||||||
dma->device_issue_pending = ioat2_issue_pending;
|
dma->device_issue_pending = ioat2_issue_pending;
|
||||||
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
|
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
|
||||||
dma->device_free_chan_resources = ioat2_free_chan_resources;
|
dma->device_free_chan_resources = ioat2_free_chan_resources;
|
||||||
dma->device_tx_status = ioat_tx_status;
|
dma->device_tx_status = ioat_dma_tx_status;
|
||||||
|
|
||||||
err = ioat_probe(device);
|
err = ioat_probe(device);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -2840,7 +2840,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
||||||
const struct pci_device_id *ent)
|
const struct pci_device_id *ent)
|
||||||
{
|
{
|
||||||
struct fw_ohci *ohci;
|
struct fw_ohci *ohci;
|
||||||
u32 bus_options, max_receive, link_speed, version, link_enh;
|
u32 bus_options, max_receive, link_speed, version;
|
||||||
u64 guid;
|
u64 guid;
|
||||||
int i, err, n_ir, n_it;
|
int i, err, n_ir, n_it;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
@ -2894,23 +2894,6 @@ static int __devinit pci_probe(struct pci_dev *dev,
|
||||||
if (param_quirks)
|
if (param_quirks)
|
||||||
ohci->quirks = param_quirks;
|
ohci->quirks = param_quirks;
|
||||||
|
|
||||||
/* TI OHCI-Lynx and compatible: set recommended configuration bits. */
|
|
||||||
if (dev->vendor == PCI_VENDOR_ID_TI) {
|
|
||||||
pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
|
|
||||||
|
|
||||||
/* adjust latency of ATx FIFO: use 1.7 KB threshold */
|
|
||||||
link_enh &= ~TI_LinkEnh_atx_thresh_mask;
|
|
||||||
link_enh |= TI_LinkEnh_atx_thresh_1_7K;
|
|
||||||
|
|
||||||
/* use priority arbitration for asynchronous responses */
|
|
||||||
link_enh |= TI_LinkEnh_enab_unfair;
|
|
||||||
|
|
||||||
/* required for aPhyEnhanceEnable to work */
|
|
||||||
link_enh |= TI_LinkEnh_enab_accel;
|
|
||||||
|
|
||||||
pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
|
|
||||||
}
|
|
||||||
|
|
||||||
ar_context_init(&ohci->ar_request_ctx, ohci,
|
ar_context_init(&ohci->ar_request_ctx, ohci,
|
||||||
OHCI1394_AsReqRcvContextControlSet);
|
OHCI1394_AsReqRcvContextControlSet);
|
||||||
|
|
||||||
|
|
|
@ -155,12 +155,4 @@
|
||||||
|
|
||||||
#define OHCI1394_phy_tcode 0xe
|
#define OHCI1394_phy_tcode 0xe
|
||||||
|
|
||||||
/* TI extensions */
|
|
||||||
|
|
||||||
#define PCI_CFG_TI_LinkEnh 0xf4
|
|
||||||
#define TI_LinkEnh_enab_accel 0x00000002
|
|
||||||
#define TI_LinkEnh_enab_unfair 0x00000080
|
|
||||||
#define TI_LinkEnh_atx_thresh_mask 0x00003000
|
|
||||||
#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
|
|
||||||
|
|
||||||
#endif /* _FIREWIRE_OHCI_H */
|
#endif /* _FIREWIRE_OHCI_H */
|
||||||
|
|
|
@ -2231,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||||
dev_priv->mchdev_lock = &mchdev_lock;
|
dev_priv->mchdev_lock = &mchdev_lock;
|
||||||
spin_unlock(&mchdev_lock);
|
spin_unlock(&mchdev_lock);
|
||||||
|
|
||||||
|
/* XXX Prevent module unload due to memory corruption bugs. */
|
||||||
|
__module_get(THIS_MODULE);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_workqueue_free:
|
out_workqueue_free:
|
||||||
|
|
|
@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev,
|
||||||
|
|
||||||
drm_framebuffer_cleanup(&ifb->base);
|
drm_framebuffer_cleanup(&ifb->base);
|
||||||
if (ifb->obj) {
|
if (ifb->obj) {
|
||||||
drm_gem_object_handle_unreference(ifb->obj);
|
|
||||||
drm_gem_object_unreference(ifb->obj);
|
drm_gem_object_unreference(ifb->obj);
|
||||||
|
ifb->obj = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
|
||||||
|
|
||||||
if (nouveau_fb->nvbo) {
|
if (nouveau_fb->nvbo) {
|
||||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||||
drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
|
|
||||||
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
|
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
|
||||||
nouveau_fb->nvbo = NULL;
|
nouveau_fb->nvbo = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
nouveau_bo_unpin(chan->notifier_bo);
|
nouveau_bo_unpin(chan->notifier_bo);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
|
|
||||||
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
|
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
|
||||||
drm_mm_takedown(&chan->notifier_heap);
|
drm_mm_takedown(&chan->notifier_heap);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1137,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||||
|
|
||||||
WREG32(RCU_IND_INDEX, 0x203);
|
WREG32(RCU_IND_INDEX, 0x203);
|
||||||
efuse_straps_3 = RREG32(RCU_IND_DATA);
|
efuse_straps_3 = RREG32(RCU_IND_DATA);
|
||||||
efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
|
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
|
||||||
|
|
||||||
switch(efuse_box_bit_127_124) {
|
switch(efuse_box_bit_127_124) {
|
||||||
case 0x0:
|
case 0x0:
|
||||||
|
@ -1407,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
|
||||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||||
radeon_update_bandwidth_info(rdev);
|
radeon_update_bandwidth_info(rdev);
|
||||||
|
|
||||||
|
@ -1520,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
WREG32(CP_INT_CNTL, 0);
|
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
|
||||||
WREG32(GRBM_INT_CNTL, 0);
|
WREG32(GRBM_INT_CNTL, 0);
|
||||||
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
|
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
|
||||||
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
|
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
|
||||||
|
|
|
@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
rdev->cp.ready = true;
|
rdev->cp.ready = true;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.real_vram_size;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev)
|
||||||
void r100_cp_disable(struct radeon_device *rdev)
|
void r100_cp_disable(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
/* Disable ring */
|
/* Disable ring */
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
rdev->cp.ready = false;
|
rdev->cp.ready = false;
|
||||||
WREG32(RADEON_CP_CSQ_MODE, 0);
|
WREG32(RADEON_CP_CSQ_MODE, 0);
|
||||||
WREG32(RADEON_CP_CSQ_CNTL, 0);
|
WREG32(RADEON_CP_CSQ_CNTL, 0);
|
||||||
|
@ -2295,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
|
||||||
/* FIXME we don't use the second aperture yet when we could use it */
|
/* FIXME we don't use the second aperture yet when we could use it */
|
||||||
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
|
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
|
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
|
||||||
if (rdev->flags & RADEON_IS_IGP) {
|
if (rdev->flags & RADEON_IS_IGP) {
|
||||||
uint32_t tom;
|
uint32_t tom;
|
||||||
|
|
|
@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev)
|
||||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||||
|
|
||||||
if (rdev->flags & RADEON_IS_IGP) {
|
if (rdev->flags & RADEON_IS_IGP) {
|
||||||
|
@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||||
*/
|
*/
|
||||||
void r600_cp_stop(struct radeon_device *rdev)
|
void r600_cp_stop(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
|
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2910,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
WREG32(CP_INT_CNTL, 0);
|
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
|
||||||
WREG32(GRBM_INT_CNTL, 0);
|
WREG32(GRBM_INT_CNTL, 0);
|
||||||
WREG32(DxMODE_INT_MASK, 0);
|
WREG32(DxMODE_INT_MASK, 0);
|
||||||
if (ASIC_IS_DCE3(rdev)) {
|
if (ASIC_IS_DCE3(rdev)) {
|
||||||
|
|
|
@ -532,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev)
|
||||||
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
|
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
|
||||||
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
||||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.real_vram_size;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
if (rdev->r600_blit.shader_obj == NULL)
|
if (rdev->r600_blit.shader_obj == NULL)
|
||||||
return;
|
return;
|
||||||
/* If we can't reserve the bo, unref should be enough to destroy
|
/* If we can't reserve the bo, unref should be enough to destroy
|
||||||
|
|
|
@ -344,6 +344,7 @@ struct radeon_mc {
|
||||||
* about vram size near mc fb location */
|
* about vram size near mc fb location */
|
||||||
u64 mc_vram_size;
|
u64 mc_vram_size;
|
||||||
u64 visible_vram_size;
|
u64 visible_vram_size;
|
||||||
|
u64 active_vram_size;
|
||||||
u64 gtt_size;
|
u64 gtt_size;
|
||||||
u64 gtt_start;
|
u64 gtt_start;
|
||||||
u64 gtt_end;
|
u64 gtt_end;
|
||||||
|
|
|
@ -1558,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
|
||||||
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
||||||
case ATOM_TV_NTSC:
|
case ATOM_TV_NTSC:
|
||||||
tv_std = TV_STD_NTSC;
|
tv_std = TV_STD_NTSC;
|
||||||
DRM_INFO("Default TV standard: NTSC\n");
|
DRM_DEBUG_KMS("Default TV standard: NTSC\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_NTSCJ:
|
case ATOM_TV_NTSCJ:
|
||||||
tv_std = TV_STD_NTSC_J;
|
tv_std = TV_STD_NTSC_J;
|
||||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_PAL:
|
case ATOM_TV_PAL:
|
||||||
tv_std = TV_STD_PAL;
|
tv_std = TV_STD_PAL;
|
||||||
DRM_INFO("Default TV standard: PAL\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_PALM:
|
case ATOM_TV_PALM:
|
||||||
tv_std = TV_STD_PAL_M;
|
tv_std = TV_STD_PAL_M;
|
||||||
DRM_INFO("Default TV standard: PAL-M\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_PALN:
|
case ATOM_TV_PALN:
|
||||||
tv_std = TV_STD_PAL_N;
|
tv_std = TV_STD_PAL_N;
|
||||||
DRM_INFO("Default TV standard: PAL-N\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_PALCN:
|
case ATOM_TV_PALCN:
|
||||||
tv_std = TV_STD_PAL_CN;
|
tv_std = TV_STD_PAL_CN;
|
||||||
DRM_INFO("Default TV standard: PAL-CN\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_PAL60:
|
case ATOM_TV_PAL60:
|
||||||
tv_std = TV_STD_PAL_60;
|
tv_std = TV_STD_PAL_60;
|
||||||
DRM_INFO("Default TV standard: PAL-60\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
|
||||||
break;
|
break;
|
||||||
case ATOM_TV_SECAM:
|
case ATOM_TV_SECAM:
|
||||||
tv_std = TV_STD_SECAM;
|
tv_std = TV_STD_SECAM;
|
||||||
DRM_INFO("Default TV standard: SECAM\n");
|
DRM_DEBUG_KMS("Default TV standard: SECAM\n");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
tv_std = TV_STD_NTSC;
|
tv_std = TV_STD_NTSC;
|
||||||
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
|
DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
|
||||||
switch (RBIOS8(tv_info + 7) & 0xf) {
|
switch (RBIOS8(tv_info + 7) & 0xf) {
|
||||||
case 1:
|
case 1:
|
||||||
tv_std = TV_STD_NTSC;
|
tv_std = TV_STD_NTSC;
|
||||||
DRM_INFO("Default TV standard: NTSC\n");
|
DRM_DEBUG_KMS("Default TV standard: NTSC\n");
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
tv_std = TV_STD_PAL;
|
tv_std = TV_STD_PAL;
|
||||||
DRM_INFO("Default TV standard: PAL\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL\n");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
tv_std = TV_STD_PAL_M;
|
tv_std = TV_STD_PAL_M;
|
||||||
DRM_INFO("Default TV standard: PAL-M\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
tv_std = TV_STD_PAL_60;
|
tv_std = TV_STD_PAL_60;
|
||||||
DRM_INFO("Default TV standard: PAL-60\n");
|
DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
tv_std = TV_STD_NTSC_J;
|
tv_std = TV_STD_NTSC_J;
|
||||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
|
||||||
break;
|
break;
|
||||||
case 6:
|
case 6:
|
||||||
tv_std = TV_STD_SCART_PAL;
|
tv_std = TV_STD_SCART_PAL;
|
||||||
DRM_INFO("Default TV standard: SCART-PAL\n");
|
DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
tv_std = TV_STD_NTSC;
|
tv_std = TV_STD_NTSC;
|
||||||
DRM_INFO
|
DRM_DEBUG_KMS
|
||||||
("Unknown TV standard; defaulting to NTSC\n");
|
("Unknown TV standard; defaulting to NTSC\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
|
switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
|
||||||
case 0:
|
case 0:
|
||||||
DRM_INFO("29.498928713 MHz TV ref clk\n");
|
DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
DRM_INFO("28.636360000 MHz TV ref clk\n");
|
DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
DRM_INFO("14.318180000 MHz TV ref clk\n");
|
DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
DRM_INFO("27.000000000 MHz TV ref clk\n");
|
DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
|
||||||
|
|
||||||
if (tmds_info) {
|
if (tmds_info) {
|
||||||
ver = RBIOS8(tmds_info);
|
ver = RBIOS8(tmds_info);
|
||||||
DRM_INFO("DFP table revision: %d\n", ver);
|
DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
|
||||||
if (ver == 3) {
|
if (ver == 3) {
|
||||||
n = RBIOS8(tmds_info + 5) + 1;
|
n = RBIOS8(tmds_info + 5) + 1;
|
||||||
if (n > 4)
|
if (n > 4)
|
||||||
|
@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
|
||||||
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
|
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
|
||||||
if (offset) {
|
if (offset) {
|
||||||
ver = RBIOS8(offset);
|
ver = RBIOS8(offset);
|
||||||
DRM_INFO("External TMDS Table revision: %d\n", ver);
|
DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
|
||||||
tmds->slave_addr = RBIOS8(offset + 4 + 2);
|
tmds->slave_addr = RBIOS8(offset + 4 + 2);
|
||||||
tmds->slave_addr >>= 1; /* 7 bit addressing */
|
tmds->slave_addr >>= 1; /* 7 bit addressing */
|
||||||
gpio = RBIOS8(offset + 4 + 3);
|
gpio = RBIOS8(offset + 4 + 3);
|
||||||
|
|
|
@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||||
int xorigin = 0, yorigin = 0;
|
int xorigin = 0, yorigin = 0;
|
||||||
|
int w = radeon_crtc->cursor_width;
|
||||||
|
|
||||||
if (x < 0)
|
if (x < 0)
|
||||||
xorigin = -x + 1;
|
xorigin = -x + 1;
|
||||||
|
@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
if (yorigin >= CURSOR_HEIGHT)
|
if (yorigin >= CURSOR_HEIGHT)
|
||||||
yorigin = CURSOR_HEIGHT - 1;
|
yorigin = CURSOR_HEIGHT - 1;
|
||||||
|
|
||||||
radeon_lock_cursor(crtc, true);
|
if (ASIC_IS_AVIVO(rdev)) {
|
||||||
if (ASIC_IS_DCE4(rdev)) {
|
|
||||||
/* cursors are offset into the total surface */
|
|
||||||
x += crtc->x;
|
|
||||||
y += crtc->y;
|
|
||||||
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
|
||||||
|
|
||||||
/* XXX: check if evergreen has the same issues as avivo chips */
|
|
||||||
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
|
|
||||||
((xorigin ? 0 : x) << 16) |
|
|
||||||
(yorigin ? 0 : y));
|
|
||||||
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
|
||||||
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
|
|
||||||
((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
|
|
||||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
|
||||||
int w = radeon_crtc->cursor_width;
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
struct drm_crtc *crtc_p;
|
struct drm_crtc *crtc_p;
|
||||||
|
|
||||||
|
@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
if (w <= 0)
|
if (w <= 0)
|
||||||
w = 1;
|
w = 1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
radeon_lock_cursor(crtc, true);
|
||||||
|
if (ASIC_IS_DCE4(rdev)) {
|
||||||
|
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
|
||||||
|
((xorigin ? 0 : x) << 16) |
|
||||||
|
(yorigin ? 0 : y));
|
||||||
|
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||||
|
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
|
||||||
|
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
|
||||||
|
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||||
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||||
((xorigin ? 0 : x) << 16) |
|
((xorigin ? 0 : x) << 16) |
|
||||||
(yorigin ? 0 : y));
|
(yorigin ? 0 : y));
|
||||||
|
|
|
@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
||||||
radeon_bo_unpin(rbo);
|
radeon_bo_unpin(rbo);
|
||||||
radeon_bo_unreserve(rbo);
|
radeon_bo_unreserve(rbo);
|
||||||
}
|
}
|
||||||
drm_gem_object_handle_unreference(gobj);
|
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||||
u32 c = 0;
|
u32 c = 0;
|
||||||
|
|
||||||
rbo->placement.fpfn = 0;
|
rbo->placement.fpfn = 0;
|
||||||
rbo->placement.lpfn = 0;
|
rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
|
||||||
rbo->placement.placement = rbo->placements;
|
rbo->placement.placement = rbo->placements;
|
||||||
rbo->placement.busy_placement = rbo->placements;
|
rbo->placement.busy_placement = rbo->placements;
|
||||||
if (domain & RADEON_GEM_DOMAIN_VRAM)
|
if (domain & RADEON_GEM_DOMAIN_VRAM)
|
||||||
|
|
|
@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
|
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0))
|
||||||
if (r != -ERESTARTSYS)
|
|
||||||
dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
|
||||||
spin_lock(&bo->tbo.lock);
|
spin_lock(&bo->tbo.lock);
|
||||||
if (mem_type)
|
if (mem_type)
|
||||||
*mem_type = bo->tbo.mem.mem_type;
|
*mem_type = bo->tbo.mem.mem_type;
|
||||||
|
|
|
@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev)
|
||||||
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||||
base = RREG32_MC(R_000004_MC_FB_LOCATION);
|
base = RREG32_MC(R_000004_MC_FB_LOCATION);
|
||||||
base = G_000004_MC_FB_START(base) << 16;
|
base = G_000004_MC_FB_START(base) << 16;
|
||||||
|
|
|
@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev)
|
||||||
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
|
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
|
||||||
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
|
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
||||||
base = G_000100_MC_FB_START(base) << 16;
|
base = G_000100_MC_FB_START(base) << 16;
|
||||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||||
|
|
|
@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
|
||||||
*/
|
*/
|
||||||
void r700_cp_stop(struct radeon_device *rdev)
|
void r700_cp_stop(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
|
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -992,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev)
|
||||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
|
||||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||||
|
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
|
||||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||||
radeon_update_bandwidth_info(rdev);
|
radeon_update_bandwidth_info(rdev);
|
||||||
|
|
||||||
|
|
|
@ -441,6 +441,43 @@ out_err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Call bo::reserved and with the lru lock held.
|
||||||
|
* Will release GPU memory type usage on destruction.
|
||||||
|
* This is the place to put in driver specific hooks.
|
||||||
|
* Will release the bo::reserved lock and the
|
||||||
|
* lru lock on exit.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
||||||
|
{
|
||||||
|
struct ttm_bo_global *glob = bo->glob;
|
||||||
|
|
||||||
|
if (bo->ttm) {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Release the lru_lock, since we don't want to have
|
||||||
|
* an atomic requirement on ttm_tt[unbind|destroy].
|
||||||
|
*/
|
||||||
|
|
||||||
|
spin_unlock(&glob->lru_lock);
|
||||||
|
ttm_tt_unbind(bo->ttm);
|
||||||
|
ttm_tt_destroy(bo->ttm);
|
||||||
|
bo->ttm = NULL;
|
||||||
|
spin_lock(&glob->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bo->mem.mm_node) {
|
||||||
|
drm_mm_put_block(bo->mem.mm_node);
|
||||||
|
bo->mem.mm_node = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic_set(&bo->reserved, 0);
|
||||||
|
wake_up_all(&bo->event_queue);
|
||||||
|
spin_unlock(&glob->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If bo idle, remove from delayed- and lru lists, and unref.
|
* If bo idle, remove from delayed- and lru lists, and unref.
|
||||||
* If not idle, and already on delayed list, do nothing.
|
* If not idle, and already on delayed list, do nothing.
|
||||||
|
@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&bo->lock);
|
spin_lock(&bo->lock);
|
||||||
|
retry:
|
||||||
(void) ttm_bo_wait(bo, false, false, !remove_all);
|
(void) ttm_bo_wait(bo, false, false, !remove_all);
|
||||||
|
|
||||||
if (!bo->sync_obj) {
|
if (!bo->sync_obj) {
|
||||||
|
@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
||||||
spin_unlock(&bo->lock);
|
spin_unlock(&bo->lock);
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
put_count = ttm_bo_del_from_lru(bo);
|
ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
|
||||||
|
|
||||||
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
|
/**
|
||||||
BUG_ON(ret);
|
* Someone else has the object reserved. Bail and retry.
|
||||||
if (bo->ttm)
|
*/
|
||||||
ttm_tt_unbind(bo->ttm);
|
|
||||||
|
if (unlikely(ret == -EBUSY)) {
|
||||||
|
spin_unlock(&glob->lru_lock);
|
||||||
|
spin_lock(&bo->lock);
|
||||||
|
goto requeue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* We can re-check for sync object without taking
|
||||||
|
* the bo::lock since setting the sync object requires
|
||||||
|
* also bo::reserved. A busy object at this point may
|
||||||
|
* be caused by another thread starting an accelerated
|
||||||
|
* eviction.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (unlikely(bo->sync_obj)) {
|
||||||
|
atomic_set(&bo->reserved, 0);
|
||||||
|
wake_up_all(&bo->event_queue);
|
||||||
|
spin_unlock(&glob->lru_lock);
|
||||||
|
spin_lock(&bo->lock);
|
||||||
|
if (remove_all)
|
||||||
|
goto retry;
|
||||||
|
else
|
||||||
|
goto requeue;
|
||||||
|
}
|
||||||
|
|
||||||
|
put_count = ttm_bo_del_from_lru(bo);
|
||||||
|
|
||||||
if (!list_empty(&bo->ddestroy)) {
|
if (!list_empty(&bo->ddestroy)) {
|
||||||
list_del_init(&bo->ddestroy);
|
list_del_init(&bo->ddestroy);
|
||||||
++put_count;
|
++put_count;
|
||||||
}
|
}
|
||||||
if (bo->mem.mm_node) {
|
|
||||||
drm_mm_put_block(bo->mem.mm_node);
|
|
||||||
bo->mem.mm_node = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock(&glob->lru_lock);
|
|
||||||
|
|
||||||
atomic_set(&bo->reserved, 0);
|
ttm_bo_cleanup_memtype_use(bo);
|
||||||
|
|
||||||
while (put_count--)
|
while (put_count--)
|
||||||
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
requeue:
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
if (list_empty(&bo->ddestroy)) {
|
if (list_empty(&bo->ddestroy)) {
|
||||||
void *sync_obj = bo->sync_obj;
|
void *sync_obj = bo->sync_obj;
|
||||||
|
|
|
@ -237,6 +237,8 @@ static const struct hid_device_id cando_devices[] = {
|
||||||
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
|
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
|
||||||
USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
|
USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
|
||||||
|
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(hid, cando_devices);
|
MODULE_DEVICE_TABLE(hid, cando_devices);
|
||||||
|
|
|
@ -1292,6 +1292,7 @@ static const struct hid_device_id hid_blacklist[] = {
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
|
||||||
|
|
|
@ -134,6 +134,7 @@
|
||||||
#define USB_VENDOR_ID_CANDO 0x2087
|
#define USB_VENDOR_ID_CANDO 0x2087
|
||||||
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
|
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
|
||||||
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
|
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
|
||||||
|
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
|
||||||
|
|
||||||
#define USB_VENDOR_ID_CH 0x068e
|
#define USB_VENDOR_ID_CH 0x068e
|
||||||
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
|
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
|
||||||
|
@ -503,6 +504,7 @@
|
||||||
|
|
||||||
#define USB_VENDOR_ID_TURBOX 0x062a
|
#define USB_VENDOR_ID_TURBOX 0x062a
|
||||||
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
|
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
|
||||||
|
#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
|
||||||
|
|
||||||
#define USB_VENDOR_ID_TWINHAN 0x6253
|
#define USB_VENDOR_ID_TWINHAN 0x6253
|
||||||
#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100
|
#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100
|
||||||
|
|
|
@ -109,6 +109,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&minors_lock);
|
mutex_lock(&minors_lock);
|
||||||
|
|
||||||
|
if (!hidraw_table[minor]) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
dev = hidraw_table[minor]->hid;
|
dev = hidraw_table[minor]->hid;
|
||||||
|
|
||||||
if (!dev->hid_output_raw_report) {
|
if (!dev->hid_output_raw_report) {
|
||||||
|
@ -244,6 +250,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
|
||||||
|
|
||||||
mutex_lock(&minors_lock);
|
mutex_lock(&minors_lock);
|
||||||
dev = hidraw_table[minor];
|
dev = hidraw_table[minor];
|
||||||
|
if (!dev) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case HIDIOCGRDESCSIZE:
|
case HIDIOCGRDESCSIZE:
|
||||||
|
@ -317,6 +327,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
|
||||||
|
|
||||||
ret = -ENOTTY;
|
ret = -ENOTTY;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
mutex_unlock(&minors_lock);
|
mutex_unlock(&minors_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ static const struct hid_blacklist {
|
||||||
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
|
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
|
||||||
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
|
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
|
||||||
|
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
|
||||||
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
||||||
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
||||||
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
|
||||||
|
|
|
@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
|
||||||
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
|
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
|
||||||
cpm->adap.name);
|
cpm->adap.name);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* register OF I2C devices
|
||||||
|
*/
|
||||||
|
of_i2c_register_devices(&cpm->adap);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
out_shut:
|
out_shut:
|
||||||
cpm_i2c_shutdown(cpm);
|
cpm_i2c_shutdown(cpm);
|
||||||
|
|
|
@ -331,21 +331,16 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
|
||||||
INIT_COMPLETION(dev->cmd_complete);
|
INIT_COMPLETION(dev->cmd_complete);
|
||||||
dev->cmd_err = 0;
|
dev->cmd_err = 0;
|
||||||
|
|
||||||
/* Take I2C out of reset, configure it as master and set the
|
/* Take I2C out of reset and configure it as master */
|
||||||
* start bit */
|
flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST;
|
||||||
flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT;
|
|
||||||
|
|
||||||
/* if the slave address is ten bit address, enable XA bit */
|
/* if the slave address is ten bit address, enable XA bit */
|
||||||
if (msg->flags & I2C_M_TEN)
|
if (msg->flags & I2C_M_TEN)
|
||||||
flag |= DAVINCI_I2C_MDR_XA;
|
flag |= DAVINCI_I2C_MDR_XA;
|
||||||
if (!(msg->flags & I2C_M_RD))
|
if (!(msg->flags & I2C_M_RD))
|
||||||
flag |= DAVINCI_I2C_MDR_TRX;
|
flag |= DAVINCI_I2C_MDR_TRX;
|
||||||
if (stop)
|
if (msg->len == 0)
|
||||||
flag |= DAVINCI_I2C_MDR_STP;
|
|
||||||
if (msg->len == 0) {
|
|
||||||
flag |= DAVINCI_I2C_MDR_RM;
|
flag |= DAVINCI_I2C_MDR_RM;
|
||||||
flag &= ~DAVINCI_I2C_MDR_STP;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Enable receive or transmit interrupts */
|
/* Enable receive or transmit interrupts */
|
||||||
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
|
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
|
||||||
|
@ -357,18 +352,29 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
|
||||||
|
|
||||||
dev->terminate = 0;
|
dev->terminate = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write mode register first as needed for correct behaviour
|
||||||
|
* on OMAP-L138, but don't set STT yet to avoid a race with XRDY
|
||||||
|
* occuring before we have loaded DXR
|
||||||
|
*/
|
||||||
|
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First byte should be set here, not after interrupt,
|
* First byte should be set here, not after interrupt,
|
||||||
* because transmit-data-ready interrupt can come before
|
* because transmit-data-ready interrupt can come before
|
||||||
* NACK-interrupt during sending of previous message and
|
* NACK-interrupt during sending of previous message and
|
||||||
* ICDXR may have wrong data
|
* ICDXR may have wrong data
|
||||||
|
* It also saves us one interrupt, slightly faster
|
||||||
*/
|
*/
|
||||||
if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
|
if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
|
||||||
davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
|
davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
|
||||||
dev->buf_len--;
|
dev->buf_len--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write the data into mode register; start transmitting */
|
/* Set STT to begin transmit now DXR is loaded */
|
||||||
|
flag |= DAVINCI_I2C_MDR_STT;
|
||||||
|
if (stop && msg->len != 0)
|
||||||
|
flag |= DAVINCI_I2C_MDR_STP;
|
||||||
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
|
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
|
||||||
|
|
||||||
r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
|
r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
|
||||||
|
|
|
@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev,
|
||||||
dev_info(&ofdev->dev, "using %s mode\n",
|
dev_info(&ofdev->dev, "using %s mode\n",
|
||||||
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
|
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
|
||||||
|
|
||||||
|
/* Now register all the child nodes */
|
||||||
|
of_i2c_register_devices(adap);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_cleanup:
|
error_cleanup:
|
||||||
|
|
|
@ -159,15 +159,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
|
||||||
|
|
||||||
static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
|
static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
|
||||||
{
|
{
|
||||||
int result;
|
wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
|
||||||
|
|
||||||
result = wait_event_interruptible_timeout(i2c_imx->queue,
|
if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
|
||||||
i2c_imx->i2csr & I2SR_IIF, HZ / 10);
|
|
||||||
|
|
||||||
if (unlikely(result < 0)) {
|
|
||||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__);
|
|
||||||
return result;
|
|
||||||
} else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
|
|
||||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
|
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
@ -295,7 +289,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
|
||||||
i2c_imx->i2csr = temp;
|
i2c_imx->i2csr = temp;
|
||||||
temp &= ~I2SR_IIF;
|
temp &= ~I2SR_IIF;
|
||||||
writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
|
writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
|
||||||
wake_up_interruptible(&i2c_imx->queue);
|
wake_up(&i2c_imx->queue);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
|
||||||
dev_err(i2c->dev, "failed to add adapter\n");
|
dev_err(i2c->dev, "failed to add adapter\n");
|
||||||
goto fail_add;
|
goto fail_add;
|
||||||
}
|
}
|
||||||
|
of_i2c_register_devices(&i2c->adap);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
|
|
|
@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
|
||||||
|
|
||||||
static int pca_isa_waitforcompletion(void *pd)
|
static int pca_isa_waitforcompletion(void *pd)
|
||||||
{
|
{
|
||||||
long ret = ~0;
|
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
|
long ret;
|
||||||
|
|
||||||
if (irq > -1) {
|
if (irq > -1) {
|
||||||
ret = wait_event_timeout(pca_wait,
|
ret = wait_event_timeout(pca_wait,
|
||||||
|
@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
|
||||||
} else {
|
} else {
|
||||||
/* Do polling */
|
/* Do polling */
|
||||||
timeout = jiffies + pca_isa_ops.timeout;
|
timeout = jiffies + pca_isa_ops.timeout;
|
||||||
while (((pca_isa_readbyte(pd, I2C_PCA_CON)
|
do {
|
||||||
& I2C_PCA_CON_SI) == 0)
|
ret = time_before(jiffies, timeout);
|
||||||
&& (ret = time_before(jiffies, timeout)))
|
if (pca_isa_readbyte(pd, I2C_PCA_CON)
|
||||||
|
& I2C_PCA_CON_SI)
|
||||||
|
break;
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
} while (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret > 0;
|
return ret > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
|
||||||
static int i2c_pca_pf_waitforcompletion(void *pd)
|
static int i2c_pca_pf_waitforcompletion(void *pd)
|
||||||
{
|
{
|
||||||
struct i2c_pca_pf_data *i2c = pd;
|
struct i2c_pca_pf_data *i2c = pd;
|
||||||
long ret = ~0;
|
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
|
long ret;
|
||||||
|
|
||||||
if (i2c->irq) {
|
if (i2c->irq) {
|
||||||
ret = wait_event_timeout(i2c->wait,
|
ret = wait_event_timeout(i2c->wait,
|
||||||
|
@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
|
||||||
} else {
|
} else {
|
||||||
/* Do polling */
|
/* Do polling */
|
||||||
timeout = jiffies + i2c->adap.timeout;
|
timeout = jiffies + i2c->adap.timeout;
|
||||||
while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
|
do {
|
||||||
& I2C_PCA_CON_SI) == 0)
|
ret = time_before(jiffies, timeout);
|
||||||
&& (ret = time_before(jiffies, timeout)))
|
if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
|
||||||
|
& I2C_PCA_CON_SI)
|
||||||
|
break;
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
} while (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret > 0;
|
return ret > 0;
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/of_i2c.h>
|
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
|
@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
if (pm) {
|
||||||
return 0;
|
if (pm_runtime_suspended(dev))
|
||||||
|
return 0;
|
||||||
if (pm)
|
else
|
||||||
return pm->suspend ? pm->suspend(dev) : 0;
|
return pm->suspend ? pm->suspend(dev) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
return i2c_legacy_suspend(dev, PMSG_SUSPEND);
|
return i2c_legacy_suspend(dev, PMSG_SUSPEND);
|
||||||
}
|
}
|
||||||
|
@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev)
|
||||||
else
|
else
|
||||||
ret = i2c_legacy_resume(dev);
|
ret = i2c_legacy_resume(dev);
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
pm_runtime_disable(dev);
|
|
||||||
pm_runtime_set_active(dev);
|
|
||||||
pm_runtime_enable(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev)
|
||||||
{
|
{
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
if (pm) {
|
||||||
return 0;
|
if (pm_runtime_suspended(dev))
|
||||||
|
return 0;
|
||||||
if (pm)
|
else
|
||||||
return pm->freeze ? pm->freeze(dev) : 0;
|
return pm->freeze ? pm->freeze(dev) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
return i2c_legacy_suspend(dev, PMSG_FREEZE);
|
return i2c_legacy_suspend(dev, PMSG_FREEZE);
|
||||||
}
|
}
|
||||||
|
@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev)
|
||||||
{
|
{
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
if (pm) {
|
||||||
return 0;
|
if (pm_runtime_suspended(dev))
|
||||||
|
return 0;
|
||||||
if (pm)
|
else
|
||||||
return pm->thaw ? pm->thaw(dev) : 0;
|
return pm->thaw ? pm->thaw(dev) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
return i2c_legacy_resume(dev);
|
return i2c_legacy_resume(dev);
|
||||||
}
|
}
|
||||||
|
@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev)
|
||||||
{
|
{
|
||||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev))
|
if (pm) {
|
||||||
return 0;
|
if (pm_runtime_suspended(dev))
|
||||||
|
return 0;
|
||||||
if (pm)
|
else
|
||||||
return pm->poweroff ? pm->poweroff(dev) : 0;
|
return pm->poweroff ? pm->poweroff(dev) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
|
return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
|
||||||
}
|
}
|
||||||
|
@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
|
||||||
if (adap->nr < __i2c_first_dynamic_bus_num)
|
if (adap->nr < __i2c_first_dynamic_bus_num)
|
||||||
i2c_scan_static_board_info(adap);
|
i2c_scan_static_board_info(adap);
|
||||||
|
|
||||||
/* Register devices from the device tree */
|
|
||||||
of_i2c_register_devices(adap);
|
|
||||||
|
|
||||||
/* Notify drivers */
|
/* Notify drivers */
|
||||||
mutex_lock(&core_lock);
|
mutex_lock(&core_lock);
|
||||||
bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
|
bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
|
||||||
|
|
|
@ -157,13 +157,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
|
||||||
{ /* MWAIT C5 */ },
|
{ /* MWAIT C5 */ },
|
||||||
{ /* MWAIT C6 */
|
{ /* MWAIT C6 */
|
||||||
.name = "ATM-C6",
|
.name = "ATM-C6",
|
||||||
.desc = "MWAIT 0x40",
|
.desc = "MWAIT 0x52",
|
||||||
.driver_data = (void *) 0x40,
|
.driver_data = (void *) 0x52,
|
||||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||||
.exit_latency = 200,
|
.exit_latency = 140,
|
||||||
.power_usage = 150,
|
.power_usage = 150,
|
||||||
.target_residency = 800,
|
.target_residency = 560,
|
||||||
.enter = NULL }, /* disabled */
|
.enter = &intel_idle },
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -669,6 +669,9 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
||||||
|
|
||||||
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
|
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
|
||||||
|
|
||||||
|
if (!dev->absinfo)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
t = _IOC_NR(cmd) & ABS_MAX;
|
t = _IOC_NR(cmd) & ABS_MAX;
|
||||||
abs = dev->absinfo[t];
|
abs = dev->absinfo[t];
|
||||||
|
|
||||||
|
@ -680,10 +683,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_IOC_DIR(cmd) == _IOC_READ) {
|
if (_IOC_DIR(cmd) == _IOC_WRITE) {
|
||||||
|
|
||||||
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
|
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
|
||||||
|
|
||||||
|
if (!dev->absinfo)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
t = _IOC_NR(cmd) & ABS_MAX;
|
t = _IOC_NR(cmd) & ABS_MAX;
|
||||||
|
|
||||||
if (copy_from_user(&abs, p, min_t(size_t,
|
if (copy_from_user(&abs, p, min_t(size_t,
|
||||||
|
|
|
@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
|
||||||
|
|
||||||
memcpy(joydev->abspam, abspam, len);
|
memcpy(joydev->abspam, abspam, len);
|
||||||
|
|
||||||
|
for (i = 0; i < joydev->nabs; i++)
|
||||||
|
joydev->absmap[joydev->abspam[i]] = i;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(abspam);
|
kfree(abspam);
|
||||||
return retval;
|
return retval;
|
||||||
|
|
|
@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
|
||||||
retval = uinput_validate_absbits(dev);
|
retval = uinput_validate_absbits(dev);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
if (test_bit(ABS_MT_SLOT, dev->absbit)) {
|
||||||
|
int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
|
||||||
|
input_mt_create_slots(dev, nslot);
|
||||||
|
input_set_events_per_packet(dev, 6 * nslot);
|
||||||
|
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
|
||||||
|
input_set_events_per_packet(dev, 60);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
udev->state = UIST_SETUP_COMPLETE;
|
udev->state = UIST_SETUP_COMPLETE;
|
||||||
|
|
|
@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb)
|
||||||
static int wacom_open(struct input_dev *dev)
|
static int wacom_open(struct input_dev *dev)
|
||||||
{
|
{
|
||||||
struct wacom *wacom = input_get_drvdata(dev);
|
struct wacom *wacom = input_get_drvdata(dev);
|
||||||
|
int retval = 0;
|
||||||
|
|
||||||
|
if (usb_autopm_get_interface(wacom->intf) < 0)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
mutex_lock(&wacom->lock);
|
mutex_lock(&wacom->lock);
|
||||||
|
|
||||||
wacom->irq->dev = wacom->usbdev;
|
|
||||||
|
|
||||||
if (usb_autopm_get_interface(wacom->intf) < 0) {
|
|
||||||
mutex_unlock(&wacom->lock);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
|
if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
|
||||||
usb_autopm_put_interface(wacom->intf);
|
retval = -EIO;
|
||||||
mutex_unlock(&wacom->lock);
|
goto out;
|
||||||
return -EIO;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wacom->open = true;
|
wacom->open = true;
|
||||||
wacom->intf->needs_remote_wakeup = 1;
|
wacom->intf->needs_remote_wakeup = 1;
|
||||||
|
|
||||||
|
out:
|
||||||
mutex_unlock(&wacom->lock);
|
mutex_unlock(&wacom->lock);
|
||||||
return 0;
|
if (retval)
|
||||||
|
usb_autopm_put_interface(wacom->intf);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wacom_close(struct input_dev *dev)
|
static void wacom_close(struct input_dev *dev)
|
||||||
|
@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev)
|
||||||
wacom->open = false;
|
wacom->open = false;
|
||||||
wacom->intf->needs_remote_wakeup = 0;
|
wacom->intf->needs_remote_wakeup = 0;
|
||||||
mutex_unlock(&wacom->lock);
|
mutex_unlock(&wacom->lock);
|
||||||
|
|
||||||
|
usb_autopm_put_interface(wacom->intf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
|
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
|
||||||
|
|
|
@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
|
||||||
/* general pen packet */
|
/* general pen packet */
|
||||||
if ((data[1] & 0xb8) == 0xa0) {
|
if ((data[1] & 0xb8) == 0xa0) {
|
||||||
t = (data[6] << 2) | ((data[7] >> 6) & 3);
|
t = (data[6] << 2) | ((data[7] >> 6) & 3);
|
||||||
if (features->type >= INTUOS4S && features->type <= INTUOS4L)
|
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
|
||||||
|
features->type == WACOM_21UX2) {
|
||||||
t = (t << 1) | (data[1] & 1);
|
t = (t << 1) | (data[1] & 1);
|
||||||
|
}
|
||||||
input_report_abs(input, ABS_PRESSURE, t);
|
input_report_abs(input, ABS_PRESSURE, t);
|
||||||
input_report_abs(input, ABS_TILT_X,
|
input_report_abs(input, ABS_TILT_X,
|
||||||
((data[7] << 1) & 0x7e) | (data[8] >> 7));
|
((data[7] << 1) & 0x7e) | (data[8] >> 7));
|
||||||
|
|
|
@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
||||||
page = bitmap->sb_page;
|
page = bitmap->sb_page;
|
||||||
offset = sizeof(bitmap_super_t);
|
offset = sizeof(bitmap_super_t);
|
||||||
if (!file)
|
if (!file)
|
||||||
read_sb_page(bitmap->mddev,
|
page = read_sb_page(
|
||||||
bitmap->mddev->bitmap_info.offset,
|
bitmap->mddev,
|
||||||
page,
|
bitmap->mddev->bitmap_info.offset,
|
||||||
index, count);
|
page,
|
||||||
|
index, count);
|
||||||
} else if (file) {
|
} else if (file) {
|
||||||
page = read_page(file, index, bitmap, count);
|
page = read_page(file, index, bitmap, count);
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
|
|
@ -1839,7 +1839,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
||||||
|
|
||||||
/* take from bio_init */
|
/* take from bio_init */
|
||||||
bio->bi_next = NULL;
|
bio->bi_next = NULL;
|
||||||
|
bio->bi_flags &= ~(BIO_POOL_MASK-1);
|
||||||
bio->bi_flags |= 1 << BIO_UPTODATE;
|
bio->bi_flags |= 1 << BIO_UPTODATE;
|
||||||
|
bio->bi_comp_cpu = -1;
|
||||||
bio->bi_rw = READ;
|
bio->bi_rw = READ;
|
||||||
bio->bi_vcnt = 0;
|
bio->bi_vcnt = 0;
|
||||||
bio->bi_idx = 0;
|
bio->bi_idx = 0;
|
||||||
|
@ -1912,7 +1914,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
||||||
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||||
break;
|
break;
|
||||||
BUG_ON(sync_blocks < (PAGE_SIZE>>9));
|
BUG_ON(sync_blocks < (PAGE_SIZE>>9));
|
||||||
if (len > (sync_blocks<<9))
|
if ((len >> 9) > sync_blocks)
|
||||||
len = sync_blocks<<9;
|
len = sync_blocks<<9;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue