Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/bnx2x/bnx2x.h
This commit is contained in:
David S. Miller 2011-03-03 21:27:42 -08:00
commit 0a0e9ae1bd
358 changed files with 3323 additions and 1984 deletions

1
.gitignore vendored
View File

@ -28,6 +28,7 @@ modules.builtin
*.gz
*.bz2
*.lzma
*.xz
*.lzo
*.patch
*.gcno

View File

@ -82,6 +82,11 @@
</sect1>
</chapter>
<chapter id="fs_events">
<title>Events based on file descriptors</title>
!Efs/eventfd.c
</chapter>
<chapter id="sysfs">
<title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c

View File

@ -51,7 +51,8 @@ Supported chips:
* JEDEC JC 42.4 compliant temperature sensor chips
Prefix: 'jc42'
Addresses scanned: I2C 0x18 - 0x1f
Datasheet: -
Datasheet:
http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
Author:
Guenter Roeck <guenter.roeck@ericsson.com>
@ -60,7 +61,11 @@ Author:
Description
-----------
This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
which are used on many DDR3 memory modules for mobile devices and servers. Some
systems use the sensor to prevent memory overheating by automatically throttling
the memory controller.
The driver auto-detects the chips listed above, but can be manually instantiated
to support other JC 42.4 compliant chips.
@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
which applies to all limits. This register can be written by writing into
temp1_crit_hyst. Other hysteresis attributes are read-only.
If the BIOS has configured the sensor for automatic temperature management, it
is likely that it has locked the registers, i.e., that the temperature limits
cannot be changed.
Sysfs entries
-------------
temp1_input Temperature (RO)
temp1_min Minimum temperature (RW)
temp1_max Maximum temperature (RW)
temp1_crit Critical high temperature (RW)
temp1_min Minimum temperature (RO or RW)
temp1_max Maximum temperature (RO or RW)
temp1_crit Critical high temperature (RO or RW)
temp1_crit_hyst Critical hysteresis temperature (RW)
temp1_crit_hyst Critical hysteresis temperature (RO or RW)
temp1_max_hyst Maximum hysteresis temperature (RO)
temp1_min_alarm Temperature low alarm

View File

@ -9,6 +9,8 @@ Supported chips:
Socket S1G3: Athlon II, Sempron, Turion II
* AMD Family 11h processors:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
* AMD Family 12h processors: "Llano"
* AMD Family 14h processors: "Brazos" (C/E/G-Series)
Prefix: 'k10temp'
Addresses scanned: PCI space
@ -17,10 +19,14 @@ Supported chips:
http://support.amd.com/us/Processor_TechDocs/31116.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41256.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/43170.pdf
Revision Guide for AMD Family 10h Processors:
http://support.amd.com/us/Processor_TechDocs/41322.pdf
Revision Guide for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41788.pdf
Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/47534.pdf
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
http://support.amd.com/us/Processor_TechDocs/43373.pdf
AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@ -34,7 +40,7 @@ Description
-----------
This driver permits reading of the internal temperature sensor of AMD
Family 10h and 11h processors.
Family 10h/11h/12h/14h processors.
All these processors have a sensor, but on those for Socket F or AM2+,
the sensor may return inconsistent values (erratum 319). The driver

View File

@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
and is between 256 and 4096 characters. It is defined in the file
./include/asm/setup.h as COMMAND_LINE_SIZE.
Finally, the [KMG] suffix is commonly described after a number of kernel
parameter values. These 'K', 'M', and 'G' letters represent the _binary_
multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
bytes respectively. Such letter suffixes can also be entirely omitted.
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
crashkernel=nn[KMG]@ss[KMG]
[KNL] Reserve a chunk of physical memory to
hold a kernel to switch to with kexec on panic.
crashkernel=size[KMG][@offset[KMG]]
[KNL] Using kexec, Linux can switch to a 'crash kernel'
upon panic. This parameter reserves the physical
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
is selected automatically. Check
Documentation/kdump/kdump.txt for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
in the running system. The syntax of range is
start-[end] where start and end are both
a memory unit (amount[KMG]). See also
Documentation/kdump/kdump.txt for a example.
Documentation/kdump/kdump.txt for an example.
cs89x0_dma= [HW,NET]
Format: <dma>
@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
6 (KERN_INFO) informational
7 (KERN_DEBUG) debug-level messages
log_buf_len=n Sets the size of the printk ring buffer, in bytes.
Format: { n | nk | nM }
n must be a power of two. The default size
is set in the kernel config file.
log_buf_len=n[KMG] Sets the size of the printk ring buffer,
in bytes. n must be a power of two. The default
size is set in the kernel config file.
logo.nologo [FB] Disables display of the built-in Linux logo.
This may be used to provide more screen space for

View File

@ -40,8 +40,6 @@ decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
dgrs.txt
- the Digi International RightSwitch SE-X Ethernet driver
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
e100.txt
@ -50,8 +48,6 @@ e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
eql.txt
- serial IP load balancing
ethertap.txt
- the Ethertap user space packet reception and transmission driver
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
filter.txt
@ -104,8 +100,6 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
wavelan.txt
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt
- general info on X.25 development.
x25-iface.txt

View File

@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
create dns_resolver foo:* * /usr/sbin/dns.foo %k
=====
USAGE
=====
@ -104,6 +103,14 @@ implemented in the module can be called after doing:
returned also.
===============================
READING DNS KEYS FROM USERSPACE
===============================
Keys of dns_resolver type can be read from userspace using keyctl_read() or
"keyctl read/print/pipe".
=========
MECHANISM
=========

View File

@ -885,7 +885,7 @@ S: Supported
ARM/QUALCOMM MSM MACHINE SUPPORT
M: David Brown <davidb@codeaurora.org>
M: Daniel Walker <dwalker@codeaurora.org>
M: Daniel Walker <dwalker@fifo99.com>
M: Bryan Huntsman <bryanh@codeaurora.org>
L: linux-arm-msm@vger.kernel.org
F: arch/arm/mach-msm/
@ -1467,6 +1467,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
@ -1692,6 +1693,13 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
CHINESE DOCUMENTATION
M: Harry Wei <harryxiyou@gmail.com>
L: xiyoulinuxkernelgroup@googlegroups.com
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Vasanthy Kolluri <vkolluri@cisco.com>
@ -2027,7 +2035,7 @@ F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
L: dccp@vger.kernel.org
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
S: Maintained
@ -2874,7 +2882,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
@ -5269,7 +5276,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
@ -6107,7 +6114,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -11,6 +11,7 @@ config ALPHA
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_HARDIRQS_NO_DEPRECATED
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,

View File

@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
int irq_select_affinity(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc[irq];
struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip;
static int last_cpu;
int cpu = last_cpu + 1;
if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
if (!data)
return 1;
chip = irq_data_get_irq_chip(data);
if (!chip->irq_set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu) ||
@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
cpumask_copy(desc->affinity, cpumask_of(cpu));
get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
cpumask_copy(data->affinity, cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0;
}
#endif /* CONFIG_SMP */

View File

@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
void __init
init_rtc_irq(void)
{
struct irq_desc *desc = irq_to_desc(RTC_IRQ);
if (desc) {
desc->status |= IRQ_DISABLED;
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
/* Dummy irqactions. */

View File

@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
}
inline void
i8259a_enable_irq(unsigned int irq)
i8259a_enable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
spin_unlock(&i8259_irq_lock);
}
@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
}
void
i8259a_disable_irq(unsigned int irq)
i8259a_disable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
__i8259a_disable_irq(d->irq);
spin_unlock(&i8259_irq_lock);
}
void
i8259a_mask_and_ack_irq(unsigned int irq)
i8259a_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
struct irq_chip i8259a_irq_type = {
.name = "XT-PIC",
.unmask = i8259a_enable_irq,
.mask = i8259a_disable_irq,
.mask_ack = i8259a_mask_and_ack_irq,
.irq_unmask = i8259a_enable_irq,
.irq_mask = i8259a_disable_irq,
.irq_mask_ack = i8259a_mask_and_ack_irq,
};
void __init

View File

@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
extern void common_init_isa_dma(void);
extern void i8259a_enable_irq(unsigned int);
extern void i8259a_disable_irq(unsigned int);
extern void i8259a_mask_and_ack_irq(unsigned int);
extern unsigned int i8259a_startup_irq(unsigned int);
extern void i8259a_end_irq(unsigned int);
extern void i8259a_enable_irq(struct irq_data *d);
extern void i8259a_disable_irq(struct irq_data *d);
extern void i8259a_mask_and_ack_irq(struct irq_data *d);
extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void);

View File

@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
}
static inline void
pyxis_enable_irq(unsigned int irq)
pyxis_enable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
pyxis_disable_irq(unsigned int irq)
pyxis_disable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
pyxis_mask_and_ack_irq(unsigned int irq)
pyxis_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit = 1UL << (irq - 16);
unsigned long bit = 1UL << (d->irq - 16);
unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */
@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
static struct irq_chip pyxis_irq_type = {
.name = "PYXIS",
.mask_ack = pyxis_mask_and_ack_irq,
.mask = pyxis_disable_irq,
.unmask = pyxis_enable_irq,
.irq_mask_ack = pyxis_mask_and_ack_irq,
.irq_mask = pyxis_disable_irq,
.irq_unmask = pyxis_enable_irq,
};
void
@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
if ((ignore_mask >> i) & 1)
continue;
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
setup_irq(16+7, &isa_cascade_irqaction);

View File

@ -18,27 +18,27 @@
DEFINE_SPINLOCK(srm_irq_lock);
static inline void
srm_enable_irq(unsigned int irq)
srm_enable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_ena(irq - 16);
cserve_ena(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
static void
srm_disable_irq(unsigned int irq)
srm_disable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_dis(irq - 16);
cserve_dis(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = {
.name = "SRM",
.unmask = srm_enable_irq,
.mask = srm_disable_irq,
.mask_ack = srm_disable_irq,
.irq_unmask = srm_enable_irq,
.irq_mask = srm_disable_irq,
.irq_mask_ack = srm_disable_irq,
};
void __init
@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View File

@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
}
static inline void
alcor_enable_irq(unsigned int irq)
alcor_enable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
alcor_disable_irq(unsigned int irq)
alcor_disable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
alcor_mask_and_ack_irq(unsigned int irq)
alcor_mask_and_ack_irq(struct irq_data *d)
{
alcor_disable_irq(irq);
alcor_disable_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static void
alcor_isa_mask_and_ack_irq(unsigned int irq)
alcor_isa_mask_and_ack_irq(struct irq_data *d)
{
i8259a_mask_and_ack_irq(irq);
i8259a_mask_and_ack_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
static struct irq_chip alcor_irq_type = {
.name = "ALCOR",
.unmask = alcor_enable_irq,
.mask = alcor_disable_irq,
.mask_ack = alcor_mask_and_ack_irq,
.irq_unmask = alcor_enable_irq,
.irq_mask = alcor_disable_irq,
.irq_mask_ack = alcor_mask_and_ack_irq,
};
static void
@ -126,9 +126,9 @@ alcor_init_irq(void)
if (i >= 16+20 && i <= 16+30)
continue;
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs();
common_init_isa_dma();

View File

@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
cabriolet_enable_irq(unsigned int irq)
cabriolet_enable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
}
static void
cabriolet_disable_irq(unsigned int irq)
cabriolet_disable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
}
static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET",
.unmask = cabriolet_enable_irq,
.mask = cabriolet_disable_irq,
.mask_ack = cabriolet_disable_irq,
.irq_unmask = cabriolet_enable_irq,
.irq_mask = cabriolet_disable_irq,
.irq_mask_ack = cabriolet_disable_irq,
};
static void
@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
for (i = 16; i < 35; ++i) {
set_irq_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View File

@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
}
static void
dp264_enable_irq(unsigned int irq)
dp264_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << irq;
cached_irq_mask |= 1UL << d->irq;
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
dp264_disable_irq(unsigned int irq)
dp264_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << irq);
cached_irq_mask &= ~(1UL << d->irq);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_enable_irq(unsigned int irq)
clipper_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << (irq - 16);
cached_irq_mask |= 1UL << (d->irq - 16);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_disable_irq(unsigned int irq)
clipper_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << (irq - 16));
cached_irq_mask &= ~(1UL << (d->irq - 16));
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
{
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq, *affinity);
cpu_set_irq_affinity(d->irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
}
static int
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
{
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq - 16, *affinity);
cpu_set_irq_affinity(d->irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
}
static struct irq_chip dp264_irq_type = {
.name = "DP264",
.unmask = dp264_enable_irq,
.mask = dp264_disable_irq,
.mask_ack = dp264_disable_irq,
.set_affinity = dp264_set_affinity,
.name = "DP264",
.irq_unmask = dp264_enable_irq,
.irq_mask = dp264_disable_irq,
.irq_mask_ack = dp264_disable_irq,
.irq_set_affinity = dp264_set_affinity,
};
static struct irq_chip clipper_irq_type = {
.name = "CLIPPER",
.unmask = clipper_enable_irq,
.mask = clipper_disable_irq,
.mask_ack = clipper_disable_irq,
.set_affinity = clipper_set_affinity,
.name = "CLIPPER",
.irq_unmask = clipper_enable_irq,
.irq_mask = clipper_disable_irq,
.irq_mask_ack = clipper_disable_irq,
.irq_set_affinity = clipper_set_affinity,
};
static void
@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View File

@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
eb64p_enable_irq(unsigned int irq)
eb64p_enable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
}
static void
eb64p_disable_irq(unsigned int irq)
eb64p_disable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
}
static struct irq_chip eb64p_irq_type = {
.name = "EB64P",
.unmask = eb64p_enable_irq,
.mask = eb64p_disable_irq,
.mask_ack = eb64p_disable_irq,
.irq_unmask = eb64p_enable_irq,
.irq_mask = eb64p_disable_irq,
.irq_mask_ack = eb64p_disable_irq,
};
static void
@ -118,9 +118,9 @@ eb64p_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
}
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);

View File

@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
eiger_enable_irq(unsigned int irq)
eiger_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
eiger_update_irq_hw(irq, mask);
}
static void
eiger_disable_irq(unsigned int irq)
eiger_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
eiger_update_irq_hw(irq, mask);
@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
static struct irq_chip eiger_irq_type = {
.name = "EIGER",
.unmask = eiger_enable_irq,
.mask = eiger_disable_irq,
.mask_ack = eiger_disable_irq,
.irq_unmask = eiger_enable_irq,
.irq_mask = eiger_disable_irq,
.irq_mask_ack = eiger_disable_irq,
};
static void
@ -136,8 +138,8 @@ eiger_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View File

@ -63,34 +63,34 @@
*/
static void
jensen_local_enable(unsigned int irq)
jensen_local_enable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_enable_irq(1);
if (d->irq == 7)
i8259a_enable_irq(d);
}
static void
jensen_local_disable(unsigned int irq)
jensen_local_disable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_disable_irq(1);
if (d->irq == 7)
i8259a_disable_irq(d);
}
static void
jensen_local_mask_ack(unsigned int irq)
jensen_local_mask_ack(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_mask_and_ack_irq(1);
if (d->irq == 7)
i8259a_mask_and_ack_irq(d);
}
static struct irq_chip jensen_local_irq_type = {
.name = "LOCAL",
.unmask = jensen_local_enable,
.mask = jensen_local_disable,
.mask_ack = jensen_local_mask_ack,
.irq_unmask = jensen_local_enable,
.irq_mask = jensen_local_disable,
.irq_mask_ack = jensen_local_mask_ack,
};
static void

View File

@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
}
static void
io7_enable_irq(unsigned int irq)
io7_enable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24;
mb();
@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
}
static void
io7_disable_irq(unsigned int irq)
io7_disable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24);
mb();
@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
}
static void
marvel_irq_noop(unsigned int irq)
{
return;
}
static unsigned int
marvel_irq_noop_return(unsigned int irq)
{
return 0;
marvel_irq_noop(struct irq_data *d)
{
return;
}
static struct irq_chip marvel_legacy_irq_type = {
.name = "LEGACY",
.mask = marvel_irq_noop,
.unmask = marvel_irq_noop,
.irq_mask = marvel_irq_noop,
.irq_unmask = marvel_irq_noop,
};
static struct irq_chip io7_lsi_irq_type = {
.name = "LSI",
.unmask = io7_enable_irq,
.mask = io7_disable_irq,
.mask_ack = io7_disable_irq,
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_mask_ack = io7_disable_irq,
};
static struct irq_chip io7_msi_irq_type = {
.name = "MSI",
.unmask = io7_enable_irq,
.mask = io7_disable_irq,
.ack = marvel_irq_noop,
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_ack = marvel_irq_noop,
};
static void
@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
/* Disable the implemented irqs in hardware. */
@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
for (i = 0; i < 16; ++i)

View File

@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
}
static inline void
mikasa_enable_irq(unsigned int irq)
mikasa_enable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
}
static void
mikasa_disable_irq(unsigned int irq)
mikasa_disable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip mikasa_irq_type = {
.name = "MIKASA",
.unmask = mikasa_enable_irq,
.mask = mikasa_disable_irq,
.mask_ack = mikasa_disable_irq,
.irq_unmask = mikasa_enable_irq,
.irq_mask = mikasa_disable_irq,
.irq_mask_ack = mikasa_disable_irq,
};
static void
@ -98,8 +98,8 @@ mikasa_init_irq(void)
mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View File

@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
}
static void
noritake_enable_irq(unsigned int irq)
noritake_enable_irq(struct irq_data *d)
{
noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
}
static void
noritake_disable_irq(unsigned int irq)
noritake_disable_irq(struct irq_data *d)
{
noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip noritake_irq_type = {
.name = "NORITAKE",
.unmask = noritake_enable_irq,
.mask = noritake_disable_irq,
.mask_ack = noritake_disable_irq,
.irq_unmask = noritake_enable_irq,
.irq_mask = noritake_disable_irq,
.irq_mask_ack = noritake_disable_irq,
};
static void
@ -127,8 +127,8 @@ noritake_init_irq(void)
outw(0, 0x54c);
for (i = 16; i < 48; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View File

@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
static inline void
rawhide_enable_irq(unsigned int irq)
rawhide_enable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
}
static void
rawhide_disable_irq(unsigned int irq)
rawhide_disable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
}
static void
rawhide_mask_and_ack_irq(unsigned int irq)
rawhide_mask_and_ack_irq(struct irq_data *d)
{
unsigned int mask, mask1, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
static struct irq_chip rawhide_irq_type = {
.name = "RAWHIDE",
.unmask = rawhide_enable_irq,
.mask = rawhide_disable_irq,
.mask_ack = rawhide_mask_and_ack_irq,
.irq_unmask = rawhide_enable_irq,
.irq_mask = rawhide_disable_irq,
.irq_mask_ack = rawhide_mask_and_ack_irq,
};
static void
@ -177,8 +180,8 @@ rawhide_init_irq(void)
}
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View File

@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
}
static inline void
rx164_enable_irq(unsigned int irq)
rx164_enable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
rx164_disable_irq(unsigned int irq)
rx164_disable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static struct irq_chip rx164_irq_type = {
.name = "RX164",
.unmask = rx164_enable_irq,
.mask = rx164_disable_irq,
.mask_ack = rx164_disable_irq,
.irq_unmask = rx164_enable_irq,
.irq_mask = rx164_disable_irq,
.irq_mask_ack = rx164_disable_irq,
};
static void
@ -99,8 +99,8 @@ rx164_init_irq(void)
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View File

@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
/* GENERIC irq routines */
static inline void
sable_lynx_enable_irq(unsigned int irq)
sable_lynx_enable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
}
static void
sable_lynx_disable_irq(unsigned int irq)
sable_lynx_disable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
}
static void
sable_lynx_mask_and_ack_irq(unsigned int irq)
sable_lynx_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
static struct irq_chip sable_lynx_irq_type = {
.name = "SABLE/LYNX",
.unmask = sable_lynx_enable_irq,
.mask = sable_lynx_disable_irq,
.mask_ack = sable_lynx_mask_and_ack_irq,
.irq_unmask = sable_lynx_enable_irq,
.irq_mask = sable_lynx_disable_irq,
.irq_mask_ack = sable_lynx_mask_and_ack_irq,
};
static void
@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
long i;
for (i = 0; i < nr_of_irqs; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();

View File

@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
takara_enable_irq(unsigned int irq)
takara_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask);
}
static void
takara_disable_irq(unsigned int irq)
takara_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask);
@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
static struct irq_chip takara_irq_type = {
.name = "TAKARA",
.unmask = takara_enable_irq,
.mask = takara_disable_irq,
.mask_ack = takara_disable_irq,
.irq_unmask = takara_enable_irq,
.irq_mask = takara_disable_irq,
.irq_mask_ack = takara_disable_irq,
};
static void
@ -136,8 +138,8 @@ takara_init_irq(void)
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();

View File

@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
}
static inline void
titan_enable_irq(unsigned int irq)
titan_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask);
@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
}
static inline void
titan_disable_irq(unsigned int irq)
titan_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask);
@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity);
@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static struct irq_chip titan_irq_type = {
.name = "TITAN",
.unmask = titan_enable_irq,
.mask = titan_disable_irq,
.mask_ack = titan_disable_irq,
.set_affinity = titan_set_irq_affinity,
.name = "TITAN",
.irq_unmask = titan_enable_irq,
.irq_mask = titan_disable_irq,
.irq_mask_ack = titan_disable_irq,
.irq_set_affinity = titan_set_irq_affinity,
};
static irqreturn_t

View File

@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
}
static void
wildfire_enable_irq(unsigned int irq)
wildfire_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_enable_irq(irq);
i8259a_enable_irq(d);
spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask);
@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
}
static void
wildfire_disable_irq(unsigned int irq)
wildfire_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_disable_irq(irq);
i8259a_disable_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
}
static void
wildfire_mask_and_ack_irq(unsigned int irq)
wildfire_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_mask_and_ack_irq(irq);
i8259a_mask_and_ack_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE",
.unmask = wildfire_enable_irq,
.mask = wildfire_disable_irq,
.mask_ack = wildfire_mask_and_ack_irq,
.irq_unmask = wildfire_enable_irq,
.irq_mask = wildfire_disable_irq,
.irq_mask_ack = wildfire_mask_and_ack_irq,
};
static void __init
@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
setup_irq(32+irq_bias, &isa_enable);
setup_irq(32+irq_bias, &isa_enable);
}
static void __init

View File

@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622
visible impact on the overall performance or power consumption of the
processor.
config ARM_ERRATA_751472
bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 751472 Cortex-A9 (prior
to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
completion of a following broadcasted operation if the second
operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB.
config ARM_ERRATA_753970
bool "ARM errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
Under some condition the effect of cache sync operation on
the store buffer still remains when the operation completes.
This means that the store buffer is always asked to drain and
this prevents it from merging any further writes. The workaround
is to replace the normal offset of cache sync operation (0x730)
by another offset targeting an unmapped PL310 register 0x740.
This has the same effect as the cache sync operation: store buffer
drain and waiting for all buffers empty.
endmenu
source "arch/arm/common/Kconfig"

View File

@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:

View File

@ -1,3 +1,7 @@
font.c
piggy.gz
lib1funcs.S
piggy.gzip
piggy.lzo
piggy.lzma
vmlinux
vmlinux.lds

View File

@ -36,6 +36,7 @@
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0

View File

@ -58,6 +58,9 @@
static inline void sysctl_soft_reset(void __iomem *base)
{
/* switch to slow mode */
writel(0x2, base + SCCTRL);
/* writing any value to SCSYSSTAT reg will reset system */
writel(0, base + SCSYSSTAT);
}

View File

@ -18,16 +18,34 @@
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
#define tlb_flush(tlb) ((void) tlb)
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* We need to delay page freeing for SMP as other CPUs can access pages
* which have been removed but not yet had their TLB entries invalidated.
* Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
* we need to apply this same delaying tactic to ensure correct operation.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
#define tlb_fast_mode(tlb) 0
#define FREE_PTE_NR 500
#else
#define tlb_fast_mode(tlb) 1
#define FREE_PTE_NR 0
#endif
/*
* TLB handling. This allows us to remove pages from the page
@ -36,12 +54,58 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
}
}
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->vma = NULL;
tlb->nr = 0;
return tlb;
}
@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
tlb_add_flush(tlb, addr);
}
/*
@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->range_end > 0)
flush_tlb_range(vma, tlb->range_start, tlb->range_end);
if (!tlb->fullmm)
tlb_flush(tlb);
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
} else {
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
tlb_flush_mmu(tlb);
}
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, pte);
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)

View File

@ -10,12 +10,7 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#ifndef CONFIG_MMU
#define tlb_flush(tlb) ((void) tlb)
#else /* CONFIG_MMU */
#ifdef CONFIG_MMU
#include <asm/glue.h>

View File

@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return space_cccc_1100_010x(insn, asi);
} else if ((insn & 0x0e000000) == 0x0c400000) {
} else if ((insn & 0x0e000000) == 0x0c000000) {
return space_cccc_110x(insn, asi);

View File

@ -97,28 +97,34 @@ set_irq_affinity(int irq,
irq, cpu);
return err;
#else
return 0;
return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
int i, err = 0;
int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
if (!pdev) {
err = -ENODEV;
goto out;
}
if (!pdev)
return -ENODEV;
for (i = 0; i < pdev->num_resources; ++i) {
irqs = pdev->num_resources;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
return 0;
for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
out:
return err;
}

View File

@ -226,8 +226,8 @@ int cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4"
: "=r" (mmfr0));
if ((mmfr0 & 0x0000000f) == 0x00000003 ||
(mmfr0 & 0x000000f0) == 0x00000030)
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
(mmfr0 & 0x000000f0) >= 0x00000030)
cpu_arch = CPU_ARCH_ARMv7;
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
(mmfr0 & 0x000000f0) == 0x00000020)

View File

@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.

View File

@ -21,6 +21,12 @@
#define ARM_CPU_KEEP(x)
#endif
#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
#define ARM_EXIT_KEEP(x) x
#else
#define ARM_EXIT_KEEP(x)
#endif
OUTPUT_ARCH(arm)
ENTRY(stext)
@ -43,6 +49,7 @@ SECTIONS
_sinittext = .;
HEAD_TEXT
INIT_TEXT
ARM_EXIT_KEEP(EXIT_TEXT)
_einittext = .;
ARM_CPU_DISCARD(PROC_INFO)
__arch_info_begin = .;
@ -67,6 +74,7 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
#endif
}
@ -162,6 +170,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
@ -247,6 +256,8 @@ SECTIONS
}
#endif
NOTES
BSS_SECTION(0, 0, 0)
_end = .;

View File

@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
fint = clk->parent->rate / (n + 1);
fint = clk->parent->rate / n;
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "

View File

@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)

View File

@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}

View File

@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;

View File

@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
/*

View File

@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
return PTR_ERR(dbg_dir);
}
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
(void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
&sr_info->err_weight);
@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
strcpy(name, "volt_");
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
strcat(name, volt_name);
(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}

View File

@ -39,6 +39,7 @@
#include <asm/mach/time.h>
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
#include <asm/sched_clock.h>
#include "timer-gp.h"
@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
/*
* clocksource
*/
static DEFINE_CLOCK_DATA(cd);
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void notrace dmtimer_update_sched_clock(void)
{
u32 cyc;
cyc = omap_dm_timer_read_counter(gpt_clocksource);
update_sched_clock(&cd, cyc, (u32)~0);
}
/* Setup free-running counter for clocksource */
static void __init omap2_gp_clocksource_init(void)
{
@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
omap_dm_timer_set_load_start(gpt, 1, 0);
init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
printk(err2, clocksource_gpt.name);
}

View File

@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p6442/include/mach/map.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5P6442 - Memory map definitions
@ -16,56 +16,61 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
#define S5P6442_PA_CHIPID (0xE0000000)
#define S5P_PA_CHIPID S5P6442_PA_CHIPID
#define S5P6442_PA_SDRAM 0x20000000
#define S5P6442_PA_SYSCON (0xE0100000)
#define S5P_PA_SYSCON S5P6442_PA_SYSCON
#define S5P6442_PA_I2S0 0xC0B00000
#define S5P6442_PA_I2S1 0xF2200000
#define S5P6442_PA_GPIO (0xE0200000)
#define S5P6442_PA_CHIPID 0xE0000000
#define S5P6442_PA_VIC0 (0xE4000000)
#define S5P6442_PA_VIC1 (0xE4100000)
#define S5P6442_PA_VIC2 (0xE4200000)
#define S5P6442_PA_SYSCON 0xE0100000
#define S5P6442_PA_SROMC (0xE7000000)
#define S5P_PA_SROMC S5P6442_PA_SROMC
#define S5P6442_PA_GPIO 0xE0200000
#define S5P6442_PA_VIC0 0xE4000000
#define S5P6442_PA_VIC1 0xE4100000
#define S5P6442_PA_VIC2 0xE4200000
#define S5P6442_PA_SROMC 0xE7000000
#define S5P6442_PA_MDMA 0xE8000000
#define S5P6442_PA_PDMA 0xE9000000
#define S5P6442_PA_TIMER (0xEA000000)
#define S5P_PA_TIMER S5P6442_PA_TIMER
#define S5P6442_PA_TIMER 0xEA000000
#define S5P6442_PA_SYSTIMER (0xEA100000)
#define S5P6442_PA_SYSTIMER 0xEA100000
#define S5P6442_PA_WATCHDOG (0xEA200000)
#define S5P6442_PA_WATCHDOG 0xEA200000
#define S5P6442_PA_UART (0xEC000000)
#define S5P6442_PA_UART 0xEC000000
#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0)
#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400)
#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800)
#define S5P_SZ_UART SZ_256
#define S5P6442_PA_IIC0 (0xEC100000)
#define S5P6442_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5P6442_PA_SDRAM
#define S5P6442_PA_IIC0 0xEC100000
#define S5P6442_PA_SPI 0xEC300000
/* I2S */
#define S5P6442_PA_I2S0 0xC0B00000
#define S5P6442_PA_I2S1 0xF2200000
/* PCM */
#define S5P6442_PA_PCM0 0xF2400000
#define S5P6442_PA_PCM1 0xF2500000
/* compatibiltiy defines. */
#define S3C_PA_WDT S5P6442_PA_WATCHDOG
#define S3C_PA_UART S5P6442_PA_UART
/* Compatibiltiy Defines */
#define S3C_PA_IIC S5P6442_PA_IIC0
#define S3C_PA_WDT S5P6442_PA_WATCHDOG
#define S5P_PA_CHIPID S5P6442_PA_CHIPID
#define S5P_PA_SDRAM S5P6442_PA_SDRAM
#define S5P_PA_SROMC S5P6442_PA_SROMC
#define S5P_PA_SYSCON S5P6442_PA_SYSCON
#define S5P_PA_TIMER S5P6442_PA_TIMER
/* UART */
#define S3C_PA_UART S5P6442_PA_UART
#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */

View File

@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p64x0/include/mach/map.h
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P64X0 - Memory map definitions
@ -16,30 +16,63 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
#define S5P64X0_PA_SDRAM (0x20000000)
#define S5P64X0_PA_SDRAM 0x20000000
#define S5P64X0_PA_CHIPID 0xE0000000
#define S5P64X0_PA_SYSCON 0xE0100000
#define S5P64X0_PA_GPIO 0xE0308000
#define S5P64X0_PA_VIC0 0xE4000000
#define S5P64X0_PA_VIC1 0xE4100000
#define S5P64X0_PA_SROMC 0xE7000000
#define S5P64X0_PA_PDMA 0xE9000000
#define S5P64X0_PA_TIMER 0xEA000000
#define S5P64X0_PA_RTC 0xEA100000
#define S5P64X0_PA_WDT 0xEA200000
#define S5P6440_PA_IIC0 0xEC104000
#define S5P6440_PA_IIC1 0xEC20F000
#define S5P6450_PA_IIC0 0xEC100000
#define S5P6450_PA_IIC1 0xEC200000
#define S5P64X0_PA_SPI0 0xEC400000
#define S5P64X0_PA_SPI1 0xEC500000
#define S5P64X0_PA_HSOTG 0xED100000
#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
#define S5P64X0_PA_I2S 0xF2000000
#define S5P6450_PA_I2S1 0xF2800000
#define S5P6450_PA_I2S2 0xF2900000
#define S5P64X0_PA_PCM 0xF2100000
#define S5P64X0_PA_ADC 0xF3000000
/* Compatibiltiy Defines */
#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5P64X0_PA_HSMMC(2)
#define S3C_PA_IIC S5P6440_PA_IIC0
#define S3C_PA_IIC1 S5P6440_PA_IIC1
#define S3C_PA_RTC S5P64X0_PA_RTC
#define S3C_PA_WDT S5P64X0_PA_WDT
#define S5P64X0_PA_CHIPID (0xE0000000)
#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
#define S5P64X0_PA_SYSCON (0xE0100000)
#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
#define S5P64X0_PA_GPIO (0xE0308000)
#define S5P64X0_PA_VIC0 (0xE4000000)
#define S5P64X0_PA_VIC1 (0xE4100000)
#define S5P64X0_PA_SROMC (0xE7000000)
#define S5P_PA_SROMC S5P64X0_PA_SROMC
#define S5P64X0_PA_PDMA (0xE9000000)
#define S5P64X0_PA_TIMER (0xEA000000)
#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
#define S5P_PA_TIMER S5P64X0_PA_TIMER
#define S5P64X0_PA_RTC (0xEA100000)
#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
#define S5P64X0_PA_WDT (0xEA200000)
/* UART */
#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
@ -53,36 +86,4 @@
#define S5P_SZ_UART SZ_256
#define S5P6440_PA_IIC0 (0xEC104000)
#define S5P6440_PA_IIC1 (0xEC20F000)
#define S5P6450_PA_IIC0 (0xEC100000)
#define S5P6450_PA_IIC1 (0xEC200000)
#define S5P64X0_PA_SPI0 (0xEC400000)
#define S5P64X0_PA_SPI1 (0xEC500000)
#define S5P64X0_PA_HSOTG (0xED100000)
#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
#define S5P64X0_PA_I2S (0xF2000000)
#define S5P6450_PA_I2S1 0xF2800000
#define S5P6450_PA_I2S2 0xF2900000
#define S5P64X0_PA_PCM (0xF2100000)
#define S5P64X0_PA_ADC (0xF3000000)
/* compatibiltiy defines. */
#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5P64X0_PA_HSMMC(2)
#define S3C_PA_IIC S5P6440_PA_IIC0
#define S3C_PA_IIC1 S5P6440_PA_IIC1
#define S3C_PA_RTC S5P64X0_PA_RTC
#define S3C_PA_WDT S5P64X0_PA_WDT
#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
#endif /* __ASM_ARCH_MAP_H */

View File

@ -1,4 +1,7 @@
/* linux/arch/arm/mach-s5pc100/include/mach/map.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
@ -16,145 +19,115 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
/*
* map-base.h has already defined virtual memory address
* S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s)
* S3C_VA_SYS S3C_ADDR(0x00100000) system control
* S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used)
* S3C_VA_TIMER S3C_ADDR(0x00300000) timer block
* S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog
* S3C_VA_UART S3C_ADDR(0x01000000) UART
*
* S5PC100 specific virtual memory address can be defined here
* S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO
*
*/
#define S5PC100_PA_SDRAM 0x20000000
#define S5PC100_PA_ONENAND_BUF (0xB0000000)
#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
#define S5PC100_PA_ONENAND 0xE7100000
#define S5PC100_PA_ONENAND_BUF 0xB0000000
/* Chip ID */
#define S5PC100_PA_CHIPID 0xE0000000
#define S5PC100_PA_CHIPID (0xE0000000)
#define S5P_PA_CHIPID S5PC100_PA_CHIPID
#define S5PC100_PA_SYSCON 0xE0100000
#define S5PC100_PA_SYSCON (0xE0100000)
#define S5P_PA_SYSCON S5PC100_PA_SYSCON
#define S5PC100_PA_OTHERS 0xE0200000
#define S5PC100_PA_OTHERS (0xE0200000)
#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
#define S5PC100_PA_GPIO 0xE0300000
#define S5PC100_PA_GPIO (0xE0300000)
#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
#define S5PC100_PA_VIC0 0xE4000000
#define S5PC100_PA_VIC1 0xE4100000
#define S5PC100_PA_VIC2 0xE4200000
/* Interrupt */
#define S5PC100_PA_VIC0 (0xE4000000)
#define S5PC100_PA_VIC1 (0xE4100000)
#define S5PC100_PA_VIC2 (0xE4200000)
#define S5PC100_VA_VIC S3C_VA_IRQ
#define S5PC100_VA_VIC_OFFSET 0x10000
#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
#define S5PC100_PA_SROMC 0xE7000000
#define S5PC100_PA_SROMC (0xE7000000)
#define S5P_PA_SROMC S5PC100_PA_SROMC
#define S5PC100_PA_CFCON 0xE7800000
#define S5PC100_PA_ONENAND (0xE7100000)
#define S5PC100_PA_MDMA 0xE8100000
#define S5PC100_PA_PDMA0 0xE9000000
#define S5PC100_PA_PDMA1 0xE9200000
#define S5PC100_PA_CFCON (0xE7800000)
#define S5PC100_PA_TIMER 0xEA000000
#define S5PC100_PA_SYSTIMER 0xEA100000
#define S5PC100_PA_WATCHDOG 0xEA200000
#define S5PC100_PA_RTC 0xEA300000
/* DMA */
#define S5PC100_PA_MDMA (0xE8100000)
#define S5PC100_PA_PDMA0 (0xE9000000)
#define S5PC100_PA_PDMA1 (0xE9200000)
#define S5PC100_PA_UART 0xEC000000
/* Timer */
#define S5PC100_PA_TIMER (0xEA000000)
#define S5P_PA_TIMER S5PC100_PA_TIMER
#define S5PC100_PA_IIC0 0xEC100000
#define S5PC100_PA_IIC1 0xEC200000
#define S5PC100_PA_SYSTIMER (0xEA100000)
#define S5PC100_PA_SPI0 0xEC300000
#define S5PC100_PA_SPI1 0xEC400000
#define S5PC100_PA_SPI2 0xEC500000
#define S5PC100_PA_WATCHDOG (0xEA200000)
#define S5PC100_PA_RTC (0xEA300000)
#define S5PC100_PA_USB_HSOTG 0xED200000
#define S5PC100_PA_USB_HSPHY 0xED300000
#define S5PC100_PA_UART (0xEC000000)
#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0)
#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400)
#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800)
#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
#define S5P_SZ_UART SZ_256
#define S5PC100_PA_FB 0xEE000000
#define S5PC100_PA_IIC0 (0xEC100000)
#define S5PC100_PA_IIC1 (0xEC200000)
#define S5PC100_PA_FIMC0 0xEE200000
#define S5PC100_PA_FIMC1 0xEE300000
#define S5PC100_PA_FIMC2 0xEE400000
/* SPI */
#define S5PC100_PA_SPI0 0xEC300000
#define S5PC100_PA_SPI1 0xEC400000
#define S5PC100_PA_SPI2 0xEC500000
#define S5PC100_PA_I2S0 0xF2000000
#define S5PC100_PA_I2S1 0xF2100000
#define S5PC100_PA_I2S2 0xF2200000
/* USB HS OTG */
#define S5PC100_PA_USB_HSOTG (0xED200000)
#define S5PC100_PA_USB_HSPHY (0xED300000)
#define S5PC100_PA_AC97 0xF2300000
#define S5PC100_PA_FB (0xEE000000)
#define S5PC100_PA_PCM0 0xF2400000
#define S5PC100_PA_PCM1 0xF2500000
#define S5PC100_PA_FIMC0 (0xEE200000)
#define S5PC100_PA_FIMC1 (0xEE300000)
#define S5PC100_PA_FIMC2 (0xEE400000)
#define S5PC100_PA_SPDIF 0xF2600000
#define S5PC100_PA_I2S0 (0xF2000000)
#define S5PC100_PA_I2S1 (0xF2100000)
#define S5PC100_PA_I2S2 (0xF2200000)
#define S5PC100_PA_TSADC 0xF3000000
#define S5PC100_PA_AC97 0xF2300000
#define S5PC100_PA_KEYPAD 0xF3100000
/* PCM */
#define S5PC100_PA_PCM0 0xF2400000
#define S5PC100_PA_PCM1 0xF2500000
/* Compatibiltiy Defines */
#define S5PC100_PA_SPDIF 0xF2600000
#define S3C_PA_FB S5PC100_PA_FB
#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
#define S3C_PA_IIC S5PC100_PA_IIC0
#define S3C_PA_IIC1 S5PC100_PA_IIC1
#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
#define S3C_PA_ONENAND S5PC100_PA_ONENAND
#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
#define S3C_PA_RTC S5PC100_PA_RTC
#define S3C_PA_TSADC S5PC100_PA_TSADC
#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
#define S3C_PA_WDT S5PC100_PA_WATCHDOG
#define S5PC100_PA_TSADC (0xF3000000)
#define S5P_PA_CHIPID S5PC100_PA_CHIPID
#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
#define S5P_PA_SDRAM S5PC100_PA_SDRAM
#define S5P_PA_SROMC S5PC100_PA_SROMC
#define S5P_PA_SYSCON S5PC100_PA_SYSCON
#define S5P_PA_TIMER S5PC100_PA_TIMER
/* KEYPAD */
#define S5PC100_PA_KEYPAD (0xF3100000)
#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
#define S5PC100_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5PC100_PA_SDRAM
#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
/* compatibiltiy defines. */
#define S3C_PA_UART S5PC100_PA_UART
#define S3C_PA_IIC S5PC100_PA_IIC0
#define S3C_PA_IIC1 S5PC100_PA_IIC1
#define S3C_PA_FB S5PC100_PA_FB
#define S3C_PA_G2D S5PC100_PA_G2D
#define S3C_PA_G3D S5PC100_PA_G3D
#define S3C_PA_JPEG S5PC100_PA_JPEG
#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
#define S3C_PA_WDT S5PC100_PA_WATCHDOG
#define S3C_PA_TSADC S5PC100_PA_TSADC
#define S3C_PA_ONENAND S5PC100_PA_ONENAND
#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
#define S3C_PA_RTC S5PC100_PA_RTC
/* UART */
#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
#define S3C_PA_UART S5PC100_PA_UART
#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
#define S5P_PA_UART3 S5P_PA_UART(3)
#endif /* __ASM_ARCH_C100_MAP_H */
#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */

View File

@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5pv210/include/mach/map.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - Memory map definitions
@ -16,122 +16,120 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
#define S5PV210_PA_SROM_BANK5 (0xA8000000)
#define S5PV210_PA_SDRAM 0x20000000
#define S5PC110_PA_ONENAND (0xB0000000)
#define S5P_PA_ONENAND S5PC110_PA_ONENAND
#define S5PV210_PA_SROM_BANK5 0xA8000000
#define S5PC110_PA_ONENAND_DMA (0xB0600000)
#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
#define S5PC110_PA_ONENAND 0xB0000000
#define S5PC110_PA_ONENAND_DMA 0xB0600000
#define S5PV210_PA_CHIPID (0xE0000000)
#define S5P_PA_CHIPID S5PV210_PA_CHIPID
#define S5PV210_PA_CHIPID 0xE0000000
#define S5PV210_PA_SYSCON (0xE0100000)
#define S5P_PA_SYSCON S5PV210_PA_SYSCON
#define S5PV210_PA_SYSCON 0xE0100000
#define S5PV210_PA_GPIO (0xE0200000)
#define S5PV210_PA_GPIO 0xE0200000
/* SPI */
#define S5PV210_PA_SPI0 0xE1300000
#define S5PV210_PA_SPI1 0xE1400000
#define S5PV210_PA_SPDIF 0xE1100000
#define S5PV210_PA_KEYPAD (0xE1600000)
#define S5PV210_PA_SPI0 0xE1300000
#define S5PV210_PA_SPI1 0xE1400000
#define S5PV210_PA_IIC0 (0xE1800000)
#define S5PV210_PA_IIC1 (0xFAB00000)
#define S5PV210_PA_IIC2 (0xE1A00000)
#define S5PV210_PA_KEYPAD 0xE1600000
#define S5PV210_PA_TIMER (0xE2500000)
#define S5P_PA_TIMER S5PV210_PA_TIMER
#define S5PV210_PA_ADC 0xE1700000
#define S5PV210_PA_SYSTIMER (0xE2600000)
#define S5PV210_PA_IIC0 0xE1800000
#define S5PV210_PA_IIC1 0xFAB00000
#define S5PV210_PA_IIC2 0xE1A00000
#define S5PV210_PA_WATCHDOG (0xE2700000)
#define S5PV210_PA_AC97 0xE2200000
#define S5PV210_PA_RTC (0xE2800000)
#define S5PV210_PA_UART (0xE2900000)
#define S5PV210_PA_PCM0 0xE2300000
#define S5PV210_PA_PCM1 0xE1200000
#define S5PV210_PA_PCM2 0xE2B00000
#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0)
#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400)
#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800)
#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00)
#define S5PV210_PA_TIMER 0xE2500000
#define S5PV210_PA_SYSTIMER 0xE2600000
#define S5PV210_PA_WATCHDOG 0xE2700000
#define S5PV210_PA_RTC 0xE2800000
#define S5P_SZ_UART SZ_256
#define S5PV210_PA_UART 0xE2900000
#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
#define S5PV210_PA_SROMC 0xE8000000
#define S5PV210_PA_SROMC (0xE8000000)
#define S5P_PA_SROMC S5PV210_PA_SROMC
#define S5PV210_PA_CFCON 0xE8200000
#define S5PV210_PA_CFCON (0xE8200000)
#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
#define S5PV210_PA_MDMA 0xFA200000
#define S5PV210_PA_PDMA0 0xE0900000
#define S5PV210_PA_PDMA1 0xE0A00000
#define S5PV210_PA_HSOTG 0xEC000000
#define S5PV210_PA_HSPHY 0xEC100000
#define S5PV210_PA_FB (0xF8000000)
#define S5PV210_PA_IIS0 0xEEE30000
#define S5PV210_PA_IIS1 0xE2100000
#define S5PV210_PA_IIS2 0xE2A00000
#define S5PV210_PA_FIMC0 (0xFB200000)
#define S5PV210_PA_FIMC1 (0xFB300000)
#define S5PV210_PA_FIMC2 (0xFB400000)
#define S5PV210_PA_DMC0 0xF0000000
#define S5PV210_PA_DMC1 0xF1400000
#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
#define S5PV210_PA_VIC0 0xF2000000
#define S5PV210_PA_VIC1 0xF2100000
#define S5PV210_PA_VIC2 0xF2200000
#define S5PV210_PA_VIC3 0xF2300000
#define S5PV210_PA_HSOTG (0xEC000000)
#define S5PV210_PA_HSPHY (0xEC100000)
#define S5PV210_PA_FB 0xF8000000
#define S5PV210_PA_VIC0 (0xF2000000)
#define S5PV210_PA_VIC1 (0xF2100000)
#define S5PV210_PA_VIC2 (0xF2200000)
#define S5PV210_PA_VIC3 (0xF2300000)
#define S5PV210_PA_MDMA 0xFA200000
#define S5PV210_PA_PDMA0 0xE0900000
#define S5PV210_PA_PDMA1 0xE0A00000
#define S5PV210_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5PV210_PA_SDRAM
#define S5PV210_PA_MIPI_CSIS 0xFA600000
/* S/PDIF */
#define S5PV210_PA_SPDIF 0xE1100000
#define S5PV210_PA_FIMC0 0xFB200000
#define S5PV210_PA_FIMC1 0xFB300000
#define S5PV210_PA_FIMC2 0xFB400000
/* I2S */
#define S5PV210_PA_IIS0 0xEEE30000
#define S5PV210_PA_IIS1 0xE2100000
#define S5PV210_PA_IIS2 0xE2A00000
/* Compatibiltiy Defines */
/* PCM */
#define S5PV210_PA_PCM0 0xE2300000
#define S5PV210_PA_PCM1 0xE1200000
#define S5PV210_PA_PCM2 0xE2B00000
#define S3C_PA_FB S5PV210_PA_FB
#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
#define S3C_PA_IIC S5PV210_PA_IIC0
#define S3C_PA_IIC1 S5PV210_PA_IIC1
#define S3C_PA_IIC2 S5PV210_PA_IIC2
#define S3C_PA_RTC S5PV210_PA_RTC
#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
#define S3C_PA_WDT S5PV210_PA_WATCHDOG
/* AC97 */
#define S5PV210_PA_AC97 0xE2200000
#define S5P_PA_CHIPID S5PV210_PA_CHIPID
#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
#define S5P_PA_ONENAND S5PC110_PA_ONENAND
#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
#define S5P_PA_SDRAM S5PV210_PA_SDRAM
#define S5P_PA_SROMC S5PV210_PA_SROMC
#define S5P_PA_SYSCON S5PV210_PA_SYSCON
#define S5P_PA_TIMER S5PV210_PA_TIMER
#define S5PV210_PA_ADC (0xE1700000)
#define SAMSUNG_PA_ADC S5PV210_PA_ADC
#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
#define S5PV210_PA_DMC0 (0xF0000000)
#define S5PV210_PA_DMC1 (0xF1400000)
/* UART */
#define S5PV210_PA_MIPI_CSIS 0xFA600000
#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV210_PA_UART
#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
#define S3C_PA_IIC S5PV210_PA_IIC0
#define S3C_PA_IIC1 S5PV210_PA_IIC1
#define S3C_PA_IIC2 S5PV210_PA_IIC2
#define S3C_PA_FB S5PV210_PA_FB
#define S3C_PA_RTC S5PV210_PA_RTC
#define S3C_PA_WDT S5PV210_PA_WATCHDOG
#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
#define S3C_PA_UART S5PV210_PA_UART
#define SAMSUNG_PA_ADC S5PV210_PA_ADC
#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
#define S5P_PA_UART3 S5P_PA_UART(3)
#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */

View File

@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
static struct regulator_init_data aquila_ldo3_data = {
.constraints = {
.name = "VUSB/MIPI_1.1V",
.name = "VUSB+MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
static struct regulator_init_data aquila_ldo8_data = {
.constraints = {
.name = "VUSB/VADC_3.3V",
.name = "VUSB+VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
static struct regulator_init_data aquila_ldo9_data = {
.constraints = {
.name = "VCC/VCAM_2.8V",
.name = "VCC+VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
.buck1_max_voltage1 = 1200000,
.buck1_max_voltage2 = 1200000,
.buck2_max_voltage = 1200000,
.buck1_voltage1 = 1200000,
.buck1_voltage2 = 1200000,
.buck1_voltage3 = 1200000,
.buck1_voltage4 = 1200000,
.buck2_voltage1 = 1200000,
.buck2_voltage2 = 1200000,
};
#endif

View File

@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = {
static struct regulator_init_data goni_ldo3_data = {
.constraints = {
.name = "VUSB/MIPI_1.1V",
.name = "VUSB+MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = {
static struct regulator_init_data goni_ldo8_data = {
.constraints = {
.name = "VUSB/VADC_3.3V",
.name = "VUSB+VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = {
static struct regulator_init_data goni_ldo9_data = {
.constraints = {
.name = "VCC/VCAM_2.8V",
.name = "VCC+VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
.buck1_max_voltage1 = 1200000,
.buck1_max_voltage2 = 1200000,
.buck2_max_voltage = 1200000,
.buck1_voltage1 = 1200000,
.buck1_voltage2 = 1200000,
.buck1_voltage3 = 1200000,
.buck1_voltage4 = 1200000,
.buck2_voltage1 = 1200000,
.buck2_voltage2 = 1200000,
};
#endif

View File

@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5pv310/include/mach/map.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV310 - Memory map definitions
@ -23,90 +23,43 @@
#include <plat/map-s5p.h>
#define S5PV310_PA_SYSRAM (0x02025000)
#define S5PV310_PA_SYSRAM 0x02025000
#define S5PV310_PA_I2S0 0x03830000
#define S5PV310_PA_I2S1 0xE3100000
#define S5PV310_PA_I2S2 0xE2A00000
#define S5PV310_PA_PCM0 0x03840000
#define S5PV310_PA_PCM1 0x13980000
#define S5PV310_PA_PCM2 0x13990000
#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
#define S5PC210_PA_ONENAND (0x0C000000)
#define S5P_PA_ONENAND S5PC210_PA_ONENAND
#define S5PC210_PA_ONENAND 0x0C000000
#define S5PC210_PA_ONENAND_DMA 0x0C600000
#define S5PC210_PA_ONENAND_DMA (0x0C600000)
#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
#define S5PV310_PA_CHIPID 0x10000000
#define S5PV310_PA_CHIPID (0x10000000)
#define S5P_PA_CHIPID S5PV310_PA_CHIPID
#define S5PV310_PA_SYSCON 0x10010000
#define S5PV310_PA_PMU 0x10020000
#define S5PV310_PA_CMU 0x10030000
#define S5PV310_PA_SYSCON (0x10010000)
#define S5P_PA_SYSCON S5PV310_PA_SYSCON
#define S5PV310_PA_WATCHDOG 0x10060000
#define S5PV310_PA_RTC 0x10070000
#define S5PV310_PA_PMU (0x10020000)
#define S5PV310_PA_DMC0 0x10400000
#define S5PV310_PA_CMU (0x10030000)
#define S5PV310_PA_COMBINER 0x10448000
#define S5PV310_PA_WATCHDOG (0x10060000)
#define S5PV310_PA_RTC (0x10070000)
#define S5PV310_PA_COREPERI 0x10500000
#define S5PV310_PA_GIC_CPU 0x10500100
#define S5PV310_PA_TWD 0x10500600
#define S5PV310_PA_GIC_DIST 0x10501000
#define S5PV310_PA_L2CC 0x10502000
#define S5PV310_PA_DMC0 (0x10400000)
#define S5PV310_PA_COMBINER (0x10448000)
#define S5PV310_PA_COREPERI (0x10500000)
#define S5PV310_PA_GIC_CPU (0x10500100)
#define S5PV310_PA_TWD (0x10500600)
#define S5PV310_PA_GIC_DIST (0x10501000)
#define S5PV310_PA_L2CC (0x10502000)
/* DMA */
#define S5PV310_PA_MDMA 0x10810000
#define S5PV310_PA_PDMA0 0x12680000
#define S5PV310_PA_PDMA1 0x12690000
#define S5PV310_PA_GPIO1 (0x11400000)
#define S5PV310_PA_GPIO2 (0x11000000)
#define S5PV310_PA_GPIO3 (0x03860000)
#define S5PV310_PA_MIPI_CSIS0 0x11880000
#define S5PV310_PA_MIPI_CSIS1 0x11890000
#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
#define S5PV310_PA_SROMC (0x12570000)
#define S5P_PA_SROMC S5PV310_PA_SROMC
/* S/PDIF */
#define S5PV310_PA_SPDIF 0xE1100000
/* I2S */
#define S5PV310_PA_I2S0 0x03830000
#define S5PV310_PA_I2S1 0xE3100000
#define S5PV310_PA_I2S2 0xE2A00000
/* PCM */
#define S5PV310_PA_PCM0 0x03840000
#define S5PV310_PA_PCM1 0x13980000
#define S5PV310_PA_PCM2 0x13990000
/* AC97 */
#define S5PV310_PA_AC97 0x139A0000
#define S5PV310_PA_UART (0x13800000)
#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
#define S5P_PA_UART3 S5P_PA_UART(3)
#define S5P_PA_UART4 S5P_PA_UART(4)
#define S5P_SZ_UART SZ_256
#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
#define S5PV310_PA_TIMER (0x139D0000)
#define S5P_PA_TIMER S5PV310_PA_TIMER
#define S5PV310_PA_SDRAM (0x40000000)
#define S5P_PA_SDRAM S5PV310_PA_SDRAM
#define S5PV310_PA_MDMA 0x10810000
#define S5PV310_PA_PDMA0 0x12680000
#define S5PV310_PA_PDMA1 0x12690000
#define S5PV310_PA_SYSMMU_MDMA 0x10A40000
#define S5PV310_PA_SYSMMU_SSS 0x10A50000
@ -125,8 +78,31 @@
#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV310_PA_UART
#define S5PV310_PA_GPIO1 0x11400000
#define S5PV310_PA_GPIO2 0x11000000
#define S5PV310_PA_GPIO3 0x03860000
#define S5PV310_PA_MIPI_CSIS0 0x11880000
#define S5PV310_PA_MIPI_CSIS1 0x11890000
#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
#define S5PV310_PA_SROMC 0x12570000
#define S5PV310_PA_UART 0x13800000
#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
#define S5PV310_PA_AC97 0x139A0000
#define S5PV310_PA_TIMER 0x139D0000
#define S5PV310_PA_SDRAM 0x40000000
#define S5PV310_PA_SPDIF 0xE1100000
/* Compatibiltiy Defines */
#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
@ -141,7 +117,28 @@
#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
#define S3C_PA_RTC S5PV310_PA_RTC
#define S3C_PA_WDT S5PV310_PA_WATCHDOG
#define S5P_PA_CHIPID S5PV310_PA_CHIPID
#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0
#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1
#define S5P_PA_ONENAND S5PC210_PA_ONENAND
#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
#define S5P_PA_SDRAM S5PV310_PA_SDRAM
#define S5P_PA_SROMC S5PV310_PA_SROMC
#define S5P_PA_SYSCON S5PV310_PA_SYSCON
#define S5P_PA_TIMER S5PV310_PA_TIMER
/* UART */
#define S3C_PA_UART S5PV310_PA_UART
#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
#define S5P_PA_UART3 S5P_PA_UART(3)
#define S5P_PA_UART4 S5P_PA_UART(4)
#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */

View File

@ -62,7 +62,7 @@
#define SPEAR320_SMII1_BASE 0xAB000000
#define SPEAR320_SMII1_SIZE 0x01000000
#define SPEAR320_SOC_CONFIG_BASE 0xB4000000
#define SPEAR320_SOC_CONFIG_BASE 0xB3000000
#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
/* Interrupt registers offsets and masks */
#define INT_STS_MASK_REG 0x04

View File

@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
const struct matrix_keymap_data *keymap_data;
bool wakeup;
bool use_fn_map;
};
#endif

View File

@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
#ifdef CONFIG_ARM_ERRATA_753970
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
writel_relaxed(0, base + L2X0_CACHE_SYNC);
#endif
cache_wait(base + L2X0_CACHE_SYNC, 1);
}

View File

@ -264,6 +264,12 @@ __v7_setup:
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_751472
cmp r6, #0x30 @ present prior to r3p0
mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrlt r10, r10, #1 << 11 @ set bit #11
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
3: mov r10, #0
#ifdef HARVARD_CACHE

View File

@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
struct omap_mbox *mbox;
int ret;
struct omap_mbox *_mbox, *mbox = NULL;
int i, ret;
if (!mboxes)
return ERR_PTR(-EINVAL);
for (mbox = *mboxes; mbox; mbox++)
if (!strcmp(mbox->name, name))
for (i = 0; (_mbox = mboxes[i]); i++) {
if (!strcmp(_mbox->name, name)) {
mbox = _mbox;
break;
}
}
if (!mbox)
return ERR_PTR(-ENOENT);

View File

@ -28,7 +28,7 @@
static struct resource s5p_uart0_resource[] = {
[0] = {
.start = S5P_PA_UART0,
.end = S5P_PA_UART0 + S5P_SZ_UART,
.end = S5P_PA_UART0 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
static struct resource s5p_uart1_resource[] = {
[0] = {
.start = S5P_PA_UART1,
.end = S5P_PA_UART1 + S5P_SZ_UART,
.end = S5P_PA_UART1 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
static struct resource s5p_uart2_resource[] = {
[0] = {
.start = S5P_PA_UART2,
.end = S5P_PA_UART2 + S5P_SZ_UART,
.end = S5P_PA_UART2 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[0] = {
.start = S5P_PA_UART3,
.end = S5P_PA_UART3 + S5P_SZ_UART,
.end = S5P_PA_UART3 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
[0] = {
.start = S5P_PA_UART4,
.end = S5P_PA_UART4 + S5P_SZ_UART,
.end = S5P_PA_UART4 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
[0] = {
.start = S5P_PA_UART5,
.end = S5P_PA_UART5 + S5P_SZ_UART,
.end = S5P_PA_UART5 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {

View File

@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
s3c_device_ts.dev.platform_data = npd;
}
EXPORT_SYMBOL(s3c24xx_ts_set_platdata);

View File

@ -24,10 +24,10 @@ static inline void putc(int c)
{
void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
barrier();
writel(c, base + UART01x_DR);
writel_relaxed(c, base + UART01x_DR);
}
static inline void flush(void)

View File

@ -14,6 +14,6 @@
#ifndef __PLAT_VMALLOC_H
#define __PLAT_VMALLOC_H
#define VMALLOC_END 0xF0000000
#define VMALLOC_END 0xF0000000UL
#endif /* __PLAT_VMALLOC_H */

View File

@ -72,11 +72,6 @@ SECTIONS
INIT_TEXT_SECTION(PAGE_SIZE)
.init.data : { INIT_DATA }
.init.setup : { INIT_SETUP(16) }
#ifdef CONFIG_ETRAX_ARCH_V32
__start___param = .;
__param : { *(__param) }
__stop___param = .;
#endif
.initcall.init : {
INIT_CALLS
}

View File

@ -240,6 +240,12 @@ struct machdep_calls {
* claims to support kexec.
*/
int (*machine_kexec_prepare)(struct kimage *image);
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND

View File

@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
save_ftrace_enabled = __ftrace_enabled_save();
default_machine_kexec(image);
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
else
default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled);

View File

@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
prime_debug_regs(new_thread);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread)
{
if (thread->dabr) {
@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
set_dabr(0);
}
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
int set_dabr(unsigned long dabr)
@ -670,11 +672,11 @@ void flush_thread(void)
{
discard_lazy_cpu_state();
#ifdef CONFIG_HAVE_HW_BREAKPOINTS
#ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current);
#else /* CONFIG_HAVE_HW_BREAKPOINTS */
#else /* CONFIG_HAVE_HW_BREAKPOINT */
set_debug_reg_defaults(&current->thread);
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
void

View File

@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active
* batch on it.
*
* Must be called from within some kind of spinlock/non-preempt region...
*/
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr;
unsigned int psize;
int ssize;
@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0);
put_cpu_var(ppc64_tlb_batch);
return;
}
@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
put_cpu_var(ppc64_tlb_batch);
}
/*

View File

@ -133,11 +133,12 @@ unsigned long decompress_kernel(void)
unsigned long output_addr;
unsigned char *output;
check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
output = (unsigned char *) output_addr;
#ifdef CONFIG_BLK_DEV_INITRD
/*

View File

@ -36,14 +36,19 @@
static inline int atomic_read(const atomic_t *v)
{
barrier();
return v->counter;
int c;
asm volatile(
" l %0,%1\n"
: "=d" (c) : "Q" (v->counter));
return c;
}
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
barrier();
asm volatile(
" st %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline long long atomic64_read(const atomic64_t *v)
{
barrier();
return v->counter;
long long c;
asm volatile(
" lg %0,%1\n"
: "=d" (c) : "Q" (v->counter));
return c;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
v->counter = i;
barrier();
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)

View File

@ -13,6 +13,7 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
#define NET_SKB_PAD 32
#define __read_mostly __attribute__((__section__(".data..read_mostly")))

View File

@ -88,6 +88,7 @@ extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;

View File

@ -36,6 +36,11 @@
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119

View File

@ -22,6 +22,7 @@
#define ARCH_P4_CNTRVAL_BITS (40)
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
#define P4_ESCR_EVENT_MASK 0x7e000000U
#define P4_ESCR_EVENT_SHIFT 25

View File

@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)

View File

@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
if (acpi_skip_timer_override &&
intsrc->source_irq == 0 && intsrc->global_irq == 2) {
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
return 0;
if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
if (acpi_skip_timer_override) {
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
return 0;
}
if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
mp_override_legacy_irq(intsrc->source_irq,

View File

@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
global_clock_event = &adev->evt;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);

View File

@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
printk(KERN_WARNING PFX "Warning: EST-capable CPU "
"detected. The acpi-cpufreq module offers "
"voltage scaling in addition of frequency "
printk_once(KERN_WARNING PFX "Warning: EST-capable "
"CPU detected. The acpi-cpufreq module offers "
"voltage scaling in addition to frequency "
"scaling. You should use that instead of "
"p4-clockmod, if possible.\n");
switch (c->x86_model) {

View File

@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0, cpu;
int rv;
for_each_online_cpu(i) {
int rc;
@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
cpb_capable = true;
register_cpu_notifier(&cpb_nb);
msrs = msrs_alloc();
if (!msrs) {
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
return -ENOMEM;
}
register_cpu_notifier(&cpb_nb);
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
for_each_cpu(cpu, cpu_online_mask) {
@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
(cpb_enabled ? "on" : "off"));
}
return cpufreq_register_driver(&cpufreq_amd64_driver);
rv = cpufreq_register_driver(&cpufreq_amd64_driver);
if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
unregister_cpu_notifier(&cpb_nb);
msrs_free(msrs);
msrs = NULL;
}
return rv;
}
/* driver entry point for term */

View File

@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
return 1;
}
/* it might be unflagged overflow */
rdmsrl(hwc->event_base + hwc->idx, v);
if (!(v & ARCH_P4_CNTRVAL_MASK))
/*
* In some circumstances the overflow might issue an NMI but did
* not set P4_CCCR_OVF bit. Because a counter holds a negative value
* we simply check for high bit being set, if it's cleared it means
* the counter has reached zero value and continued counting before
* real NMI signal was received:
*/
if (!(v & ARCH_P4_UNFLAGGED_BIT))
return 1;
return 0;

View File

@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
u32 old, d;
u32 d;
d = read_pci_config(num, slot, func, 0x70);
old = d;
d &= ~(1<<8);
write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
write_pci_config(num, slot, func, 0x70, old);
return d;
}
@ -160,11 +155,14 @@ static void __init ati_bugs_contd(int num, int slot, int func)
{
u32 d, rev;
if (acpi_use_timer_override)
rev = ati_sbx00_rev(num, slot, func);
if (rev >= 0x40)
acpi_fix_pin2_polarity = 1;
if (rev > 0x13)
return;
rev = ati_sbx00_rev(num, slot, func);
if (rev > 0x13)
if (acpi_use_timer_override)
return;
/* check for IRQ0 interrupt swap */

View File

@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
{ /* Handle problems with rebooting on VersaLogic Menlow boards */
.callback = set_bios_reboot,
.ident = "VersaLogic Menlow based board",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
},
},
{ }
};

View File

@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
return 1;
}

View File

@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers.
*/
res = alloc_bootmem(chunk_size);
if (!res)
return NULL;
BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
free_mem = chunk_size;

View File

@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
__blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);
@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
void __blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
blk_remove_plug(q);
@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
__blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
__blk_run_queue(q);
__blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *

View File

@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
/*
* Moving a request silently to empty queue_head may stall the
* queue. Kick the queue in those cases.
* queue. Kick the queue in those cases. This function is called
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
*/
if (was_empty && next_rq)
__blk_run_queue(q);
__blk_run_queue(q, true);
}
static void pre_flush_end_io(struct request *rq, int error)
@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
BUG();
}
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
return rq;
}

View File

@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
}
/**
* blkdev_issue_zeroout generate number of zero filed write bios
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write

View File

@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
unsigned long delay);
struct throtl_rb_root {
struct rb_root rb;
struct rb_node *left;
@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies))
throtl_schedule_delayed_work(td->queue, 0);
throtl_schedule_delayed_work(td, 0);
else
throtl_schedule_delayed_work(td->queue,
(st->min_disptime - jiffies));
throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
}
static inline void
@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
}
/* Call with queue lock held */
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{
struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) {
@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
kblockd_schedule_delayed_work(q, dwork, delay);
queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
}
EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
smp_mb__after_atomic_inc();
/* Schedule a work now to process the limit change */
throtl_schedule_delayed_work(td->queue, 0);
throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_write_bps(void *key,
@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0);
throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_read_iops(void *key,
@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0);
throtl_schedule_delayed_work(td, 0);
}
static void throtl_update_blkio_group_write_iops(void *key,
@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0);
throtl_schedule_delayed_work(td, 0);
}
void throtl_shutdown_timer_wq(struct request_queue *q)
@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
static int __init throtl_init(void)
{
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
blkio_policy_register(&blkio_policy_throtl);
return 0;
}

View File

@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
__blk_run_queue(cfqd->queue, false);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue);
__blk_run_queue(cfqd->queue, false);
}
}
@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue);
__blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock);
}

View File

@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
__blk_run_queue(q);
__blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
__blk_run_queue(q);
__blk_run_queue(q, false);
break;
case ELEVATOR_INSERT_SORT:

View File

@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
res = __invalidate_device(bdev);
res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;

View File

@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
if (!(mode & FMODE_EXCL) &&
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
return -EBUSY;
if (!(mode & FMODE_EXCL)) {
bdgrab(bdev);
if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
return -EBUSY;
}
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);

View File

@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */
};
struct acpi_gpe_notify_object {
struct acpi_namespace_node *node;
struct acpi_gpe_notify_object *next;
};
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
};
/*

View File

@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* from this thread -- because handlers may in turn run other
* control methods.
*/
status =
acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
device_node,
ACPI_NOTIFY_DEVICE_WAKE);
status = acpi_ev_queue_notify_request(
local_gpe_event_info->dispatch.device.node,
ACPI_NOTIFY_DEVICE_WAKE);
notify_object = local_gpe_event_info->dispatch.device.next;
while (ACPI_SUCCESS(status) && notify_object) {
status = acpi_ev_queue_notify_request(
notify_object->node,
ACPI_NOTIFY_DEVICE_WAKE);
notify_object = notify_object->next;
}
break;
case ACPI_GPE_DISPATCH_METHOD:

View File

@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node;
struct acpi_gpe_notify_object *notify_object;
acpi_cpu_flags flags;
u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
goto unlock_and_exit;
}
if (wake_device == ACPI_ROOT_OBJECT) {
goto out;
}
/*
* If there is no method or handler for this GPE, then the
* wake_device will be notified whenever this GPE fires (aka
* "implicit notify") Note: The GPE is assumed to be
* level-triggered (for windows compatibility).
*/
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
/* Validate wake_device is of type Device */
device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
wake_device);
if (device_node->type != ACPI_TYPE_DEVICE) {
goto unlock_and_exit;
}
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
gpe_event_info->dispatch.device_node = device_node;
gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
&& gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
goto out;
}
/* Validate wake_device is of type Device */
device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
if (device_node->type != ACPI_TYPE_DEVICE) {
goto unlock_and_exit;
}
if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
gpe_event_info->dispatch.device.node = device_node;
gpe_event_info->dispatch.device.next = NULL;
} else {
/* There are multiple devices to notify implicitly. */
notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
if (!notify_object) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
notify_object->node = device_node;
notify_object->next = gpe_event_info->dispatch.device.next;
gpe_event_info->dispatch.device.next = notify_object;
}
out:
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
status = AE_OK;

View File

@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
static int uncopied_bytes;
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = table.length;
buf = kzalloc(uncopied_bytes, GFP_KERNEL);
uncopied_bytes = max_size = table.length;
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (uncopied_bytes < count) {
kfree(buf);
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL;
}
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);

View File

@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
__invalidate_device(bdev);
__invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {

Some files were not shown because too many files have changed in this diff Show More