mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
This is the 3.10.73 stable release
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2
iQIcBAABCAAGBQJVFBE+AAoJEDjbvchgkmk+oTkP/j2ipSvgXghFEipZbOJUQkqC
fa8elfoF7riTKpKOuDtDU2WI1ttCGYs5gmTNpd4KaEt23eJOQgVqIpV8GhAkW5Af
NVyGhjF3dXNqpBkxnyuIkk5OLrNKGRNS2xpz1U254iGObYrK+tr62IzGPxEcPAhX
Y+58xPVSjLtNdTJW3YLT3DohUbnbHG6Br9geI1IHtlxg1oDiTxtnX2FmOFzzDpP5
qu8gnPIekg/+1EE46nEiq0C59AwC3aCzNxwlYe1Kd41SY3LUFF1eZMzmOnnwyI5K
3FslAzT6x/sOmGJFTYrKjFA4GKsW67xHVkB/hp/Mu768RqxiQCxV4kgmPsAFLbXb
D5qbNwr3i0iQ/9AaD7h8HJkxC/KHmszMux00L/mgZ3SGdGMEIBxHg+oP8+nP8V6C
WfXKSWA94dpdRyULEfWdnKnUnp2860C7kt7ASTkOl8rIgU8HgaRqeu+U/KPM2ovD
ZJtXPVB5UXCRuVAhZwbvvrLOY8UMZTnv2auAaeLYG8YptcvGeN5Z398/8qdV/z7c
A9kOsgebs74X+lR3rbVgSDPQaq2AEiuIvtX77SfmrWXBXGmc99i9+PikuFggRprz
cJm5bCM9DaHu/3b77X9Fwl7vnpReB0zPHiwTdH/p7OPMf5m1uQt7SqegC6btLPHs
iYgjLd4oW+6uiV/2X1Vx
=L+mC
-----END PGP SIGNATURE-----
Merge commit 'v3.10.73' into LA.BF64.1.2.9
This merge brings us up to date with upstream kernel.org tag v3.10.73.
As part of the conflict resolution, changes introduced by commit 72684eae7
("arm64: Fix up /proc/cpuinfo") have been intentionally dropped, as they
conflict with Android changes msm-3.10 kernel to solve the problems
in a different way. Since userspace readers of this file may depend on
the existing msm-3.10 implementation, it's left as-is for now. The
commit may later be introduced if it is found to not impact userspaces
paired with this kernel.
* commit 'v3.10.73' (264 commits):
Linux 3.10.73
target: Allow Write Exclusive non-reservation holders to READ
target: Allow AllRegistrants to re-RESERVE existing reservation
target: Fix R_HOLDER bit usage for AllRegistrants
target/pscsi: Fix NULL pointer dereference in get_device_type
iscsi-target: Avoid early conn_logout_comp for iser connections
target: Fix reference leak in target_get_sess_cmd() error path
ARM: at91: pm: fix at91rm9200 standby
ipvs: rerouting to local clients is not needed anymore
ipvs: add missing ip_vs_pe_put in sync code
powerpc/smp: Wait until secondaries are active & online
x86/vdso: Fix the build on GCC5
x86/fpu: Drop_fpu() should not assume that tsk equals current
x86/fpu: Avoid math_state_restore() without used_math() in __restore_xstate_sig()
crypto: aesni - fix memory usage in GCM decryption
libsas: Fix Kernel Crash in smp_execute_task
xen-pciback: limit guest control of command register
nilfs2: fix deadlock of segment constructor during recovery
regulator: core: Fix enable GPIO reference counting
regulator: Only enable disabled regulators on resume
ALSA: hda - Treat stereo-to-mono mix properly
ALSA: hda - Add workaround for MacBook Air 5,2 built-in mic
ALSA: hda - Set single_adc_amp flag for CS420x codecs
ALSA: hda - Don't access stereo amps for mono channel widgets
ALSA: hda - Fix built-in mic on Compaq Presario CQ60
ALSA: control: Add sanity checks for user ctl id name string
spi: pl022: Fix race in giveback() leading to driver lock-up
tpm/ibmvtpm: Additional LE support for tpm_ibmvtpm_send
workqueue: fix hang involving racing cancel[_delayed]_work_sync()'s for PREEMPT_NONE
can: add missing initialisations in CAN related skbuffs
Change email address for 8250_pci
virtio_console: init work unconditionally
fuse: notify: don't move pages
fuse: set stolen page uptodate
drm/radeon: drop setting UPLL to sleep mode
drm/radeon: do a posting read in rs600_set_irq
drm/radeon: do a posting read in si_set_irq
drm/radeon: do a posting read in r600_set_irq
drm/radeon: do a posting read in r100_set_irq
drm/radeon: do a posting read in evergreen_set_irq
drm/radeon: fix DRM_IOCTL_RADEON_CS oops
tcp: make connect() mem charging friendly
net: compat: Update get_compat_msghdr() to match copy_msghdr_from_user() behaviour
tcp: fix tcp fin memory accounting
Revert "net: cx82310_eth: use common match macro"
rxrpc: bogus MSG_PEEK test in rxrpc_recvmsg()
caif: fix MSG_OOB test in caif_seqpkt_recvmsg()
inet_diag: fix possible overflow in inet_diag_dump_one_icsk()
rds: avoid potential stack overflow
net: sysctl_net_core: check SNDBUF and RCVBUF for min length
sparc64: Fix several bugs in memmove().
sparc: Touch NMI watchdog when walking cpus and calling printk
sparc: perf: Make counting mode actually work
sparc: perf: Remove redundant perf_pmu_{en|dis}able calls
sparc: semtimedop() unreachable due to comparison error
sparc32: destroy_context() and switch_mm() needs to disable interrupts.
Linux 3.10.72
ath5k: fix spontaneus AR5312 freezes
ACPI / video: Load the module even if ACPI is disabled
drm/radeon: fix 1 RB harvest config setup for TN/RL
Drivers: hv: vmbus: incorrect device name is printed when child device is unregistered
HID: fixup the conflicting keyboard mappings quirk
HID: input: fix confusion on conflicting mappings
staging: comedi: cb_pcidas64: fix incorrect AI range code handling
dm snapshot: fix a possible invalid memory access on unload
dm: fix a race condition in dm_get_md
dm io: reject unsupported DISCARD requests with EOPNOTSUPP
dm mirror: do not degrade the mirror on discard error
staging: comedi: comedi_compat32.c: fix COMEDI_CMD copy back
clk: sunxi: Support factor clocks with N factor starting not from 0
fixed invalid assignment of 64bit mask to host dma_boundary for scatter gather segment boundary limit.
nilfs2: fix potential memory overrun on inode
IB/qib: Do not write EEPROM
sg: fix read() error reporting
ALSA: hda - Add pin configs for ASUS mobo with IDT 92HD73XX codec
ALSA: pcm: Don't leave PREPARED state after draining
tty: fix up atime/mtime mess, take four
sunrpc: fix braino in ->poll()
procfs: fix race between symlink removals and traversals
debugfs: leave freeing a symlink body until inode eviction
autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for allocation
USB: serial: fix potential use-after-free after failed probe
TTY: fix tty_wait_until_sent on 64-bit machines
USB: serial: fix infinite wait_until_sent timeout
net: irda: fix wait_until_sent poll timeout
xhci: fix reporting of 0-sized URBs in control endpoint
xhci: Allocate correct amount of scratchpad buffers
usb: ftdi_sio: Add jtag quirk support for Cyber Cortex AV boards
USB: usbfs: don't leak kernel data in siginfo
USB: serial: cp210x: Adding Seletek device id's
KVM: MIPS: Fix trace event to save PC directly
KVM: emulate: fix CMPXCHG8B on 32-bit hosts
Btrfs:__add_inode_ref: out of bounds memory read when looking for extended ref.
Btrfs: fix data loss in the fast fsync path
btrfs: fix lost return value due to variable shadowing
iio: imu: adis16400: Fix sign extension
x86/asm/entry/64: Remove a bogus 'ret_from_fork' optimization
PM / QoS: remove duplicate call to pm_qos_update_target
target: Check for LBA + sectors wrap-around in sbc_parse_cdb
mm/memory.c: actually remap enough memory
mm/compaction: fix wrong order check in compact_finished()
mm/nommu.c: fix arithmetic overflow in __vm_enough_memory()
mm/mmap.c: fix arithmetic overflow in __vm_enough_memory()
mm/hugetlb: add migration entry check in __unmap_hugepage_range
team: don't traverse port list using rcu in team_set_mac_address
udp: only allow UFO for packets from SOCK_DGRAM sockets
usb: plusb: Add support for National Instruments host-to-host cable
macvtap: make sure neighbour code can push ethernet header
net: compat: Ignore MSG_CMSG_COMPAT in compat_sys_{send, recv}msg
team: fix possible null pointer dereference in team_handle_frame
net: reject creation of netdev names with colons
ematch: Fix auto-loading of ematch modules.
net: phy: Fix verification of EEE support in phy_init_eee
ipv4: ip_check_defrag should not assume that skb_network_offset is zero
ipv4: ip_check_defrag should correctly check return value of skb_copy_bits
gen_stats.c: Duplicate xstats buffer for later use
rtnetlink: call ->dellink on failure when ->newlink exists
ipv6: fix ipv6_cow_metrics for non DST_HOST case
rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY
Linux 3.10.71
libceph: fix double __remove_osd() problem
libceph: change from BUG to WARN for __remove_osd() asserts
libceph: assert both regular and lingering lists in __remove_osd()
MIPS: Export FP functions used by lose_fpu(1) for KVM
x86, mm/ASLR: Fix stack randomization on 64-bit systems
blk-throttle: check stats_cpu before reading it from sysfs
jffs2: fix handling of corrupted summary length
md/raid1: fix read balance when a drive is write-mostly.
md/raid5: Fix livelock when array is both resyncing and degraded.
metag: Fix KSTK_EIP() and KSTK_ESP() macros
gpio: tps65912: fix wrong container_of arguments
arm64: compat Fix siginfo_t -> compat_siginfo_t conversion on big endian
hx4700: regulator: declare full constraints
KVM: x86: update masterclock values on TSC writes
KVM: MIPS: Don't leak FPU/DSP to guest
ARC: fix page address calculation if PAGE_OFFSET != LINUX_LINK_BASE
ntp: Fixup adjtimex freq validation on 32-bit systems
kdb: fix incorrect counts in KDB summary command output
ARM: pxa: add regulator_has_full_constraints to poodle board file
ARM: pxa: add regulator_has_full_constraints to corgi board file
vt: provide notifications on selection changes
usb: core: buffer: smallest buffer should start at ARCH_DMA_MINALIGN
USB: fix use-after-free bug in usb_hcd_unlink_urb()
USB: cp210x: add ID for RUGGEDCOM USB Serial Console
tty: Prevent untrappable signals from malicious program
axonram: Fix bug in direct_access
cfq-iosched: fix incorrect filing of rt async cfqq
cfq-iosched: handle failure of cfq group allocation
iscsi-target: Drop problematic active_ts_list usage
NFSv4.1: Fix a kfree() of uninitialised pointers in decode_cb_sequence_args
Added Little Endian support to vtpm module
tpm/tpm_i2c_stm_st33: Fix potential bug in tpm_stm_i2c_send
tpm: Fix NULL return in tpm_ibmvtpm_get_desired_dma
tpm_tis: verify interrupt during init
ARM: 8284/1: sa1100: clear RCSR_SMR on resume
tracing: Fix unmapping loop in tracing_mark_write
MIPS: KVM: Deliver guest interrupts after local_irq_disable()
nfs: don't call blocking operations while !TASK_RUNNING
mmc: sdhci-pxav3: fix setting of pdata->clk_delay_cycles
power_supply: 88pm860x: Fix leaked power supply on probe fail
ALSA: hdspm - Constrain periods to 2 on older cards
ALSA: off by one bug in snd_riptide_joystick_probe()
lmedm04: Fix usb_submit_urb BOGUS urb xfer, pipe 1 != type 3 in interrupt urb
cpufreq: speedstep-smi: enable interrupts when waiting
PCI: Fix infinite loop with ROM image of size 0
PCI: Generate uppercase hex for modalias var in uevent
HID: i2c-hid: Limit reads to wMaxInputLength bytes for input events
iwlwifi: mvm: always use mac color zero
iwlwifi: mvm: fix failure path when power_update fails in add_interface
iwlwifi: mvm: validate tid and sta_id in ba_notif
iwlwifi: pcie: disable the SCD_BASE_ADDR when we resume from WoWLAN
fsnotify: fix handling of renames in audit
xfs: set superblock buffer type correctly
xfs: inode unlink does not set AGI buffer type
xfs: ensure buffer types are set correctly
Bluetooth: ath3k: workaround the compatibility issue with xHCI controller
Linux 3.10.70
rbd: drop an unsafe assertion
media/rc: Send sync space information on the lirc device
net: sctp: fix passing wrong parameter header to param_type2af in sctp_process_param
ppp: deflate: never return len larger than output buffer
ipv4: tcp: get rid of ugly unicast_sock
tcp: ipv4: initialize unicast_sock sk_pacing_rate
bridge: dont send notification when skb->len == 0 in rtnl_bridge_notify
ipv6: replacing a rt6_info needs to purge possible propagated rt6_infos too
ping: Fix race in free in receive path
udp_diag: Fix socket skipping within chain
ipv4: try to cache dst_entries which would cause a redirect
net: sctp: fix slab corruption from use after free on INIT collisions
netxen: fix netxen_nic_poll() logic
ipv6: stop sending PTB packets for MTU < 1280
net: rps: fix cpu unplug
ip: zero sockaddr returned on error queue
Linux 3.10.69
crypto: crc32c - add missing crypto module alias
x86,kvm,vmx: Preserve CR4 across VM entry
kvm: vmx: handle invvpid vm exit gracefully
smpboot: Add missing get_online_cpus() in smpboot_register_percpu_thread()
ALSA: ak411x: Fix stall in work callback
ASoC: sgtl5000: add delay before first I2C access
ASoC: atmel_ssc_dai: fix start event for I2S mode
lib/checksum.c: fix build for generic csum_tcpudp_nofold
ext4: prevent bugon on race between write/fcntl
arm64: Fix up /proc/cpuinfo
nilfs2: fix deadlock of segment constructor over I_SYNC flag
lib/checksum.c: fix carry in csum_tcpudp_nofold
mm: pagewalk: call pte_hole() for VM_PFNMAP during walk_page_range
MIPS: Fix kernel lockup or crash after CPU offline/online
MIPS: IRQ: Fix disable_irq on CPU IRQs
PCI: Add NEC variants to Stratus ftServer PCIe DMI check
gpio: sysfs: fix memory leak in gpiod_sysfs_set_active_low
gpio: sysfs: fix memory leak in gpiod_export_link
Linux 3.10.68
target: Drop arbitrary maximum I/O size limit
iser-target: Fix implicit termination of connections
iser-target: Handle ADDR_CHANGE event for listener cm_id
iser-target: Fix connected_handler + teardown flow race
iser-target: Parallelize CM connection establishment
iser-target: Fix flush + disconnect completion handling
iscsi,iser-target: Initiate termination only once
vhost-scsi: Add missing virtio-scsi -> TCM attribute conversion
tcm_loop: Fix wrong I_T nexus association
vhost-scsi: Take configfs group dependency during VHOST_SCSI_SET_ENDPOINT
ib_isert: Add max_send_sge=2 minimum for control PDU responses
IB/isert: Adjust CQ size to HW limits
workqueue: fix subtle pool management issue which can stall whole worker_pool
gpio: squelch a compiler warning
efi-pstore: Make efi-pstore return a unique id
pstore/ram: avoid atomic accesses for ioremapped regions
pstore: Fix NULL pointer fault if get NULL prz in ramoops_get_next_prz
pstore: skip zero size persistent ram buffer in traverse
pstore: clarify clearing of _read_cnt in ramoops_context
pstore: d_alloc_name() doesn't return an ERR_PTR
pstore: Fail to unlink if a driver has not defined pstore_erase
ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE
ARM: 8108/1: mm: Introduce {pte,pmd}_isset and {pte,pmd}_isclear
ARM: DMA: ensure that old section mappings are flushed from the TLB
ARM: 7931/1: Correct virt_addr_valid
ARM: fix asm/memory.h build error
ARM: 7867/1: include: asm: use 'int' instead of 'unsigned long' for 'oldval' in atomic_cmpxchg().
ARM: 7866/1: include: asm: use 'long long' instead of 'u64' within atomic.h
ARM: lpae: fix definition of PTE_HWTABLE_PTRS
ARM: fix type of PHYS_PFN_OFFSET to unsigned long
ARM: LPAE: use phys_addr_t in alloc_init_pud()
ARM: LPAE: use signed arithmetic for mask definitions
ARM: mm: correct pte_same behaviour for LPAE.
ARM: 7829/1: Add ".text.unlikely" and ".text.hot" to arm unwind tables
drivers: net: cpsw: discard dual emac default vlan configuration
regulator: core: fix race condition in regulator_put()
spi/pxa2xx: Clear cur_chip pointer before starting next message
dm cache: fix missing ERR_PTR returns and handling
dm thin: don't allow messages to be sent to a pool target in READ_ONLY or FAIL mode
nl80211: fix per-station group key get/del and memory leak
NFSv4.1: Fix an Oops in nfs41_walk_client_list
nfs: fix dio deadlock when O_DIRECT flag is flipped
Input: i8042 - add noloop quirk for Medion Akoya E7225 (MD98857)
ALSA: seq-dummy: remove deadlock-causing events on close
powerpc/xmon: Fix another endiannes issue in RTAS call from xmon
can: kvaser_usb: Fix state handling upon BUS_ERROR events
can: kvaser_usb: Retry the first bulk transfer on -ETIMEDOUT
can: kvaser_usb: Send correct context to URB completion
can: kvaser_usb: Do not sleep in atomic context
ASoC: wm8960: Fix capture sample rate from 11250 to 11025
spi: dw-mid: fix FIFO size
Signed-off-by: Ian Maund <imaund@codeaurora.org>
This commit is contained in:
commit
faa9cc5339
245 changed files with 2119 additions and 1236 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 67
|
||||
SUBLEVEL = 73
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
|
|
|
@ -270,7 +270,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
|
||||
|
||||
#define pte_page(x) (mem_map + \
|
||||
(unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
|
||||
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
|
||||
PAGE_SHIFT)))
|
||||
|
||||
#define mk_pte(page, pgprot) \
|
||||
({ \
|
||||
|
|
|
@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|||
|
||||
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
int oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
|
||||
|
@ -209,15 +210,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
long long counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_read\n"
|
||||
" ldrd %0, %H0, [%1]"
|
||||
|
@ -228,7 +229,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, u64 i)
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
__asm__ __volatile__("@ atomic64_set\n"
|
||||
" strd %2, %H2, [%1]"
|
||||
|
@ -237,9 +238,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
|
|||
);
|
||||
}
|
||||
#else
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_read\n"
|
||||
" ldrexd %0, %H0, [%1]"
|
||||
|
@ -250,9 +251,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, u64 i)
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
u64 tmp;
|
||||
long long tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_set\n"
|
||||
"1: ldrexd %0, %H0, [%2]\n"
|
||||
|
@ -265,9 +266,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void atomic64_add(u64 i, atomic64_t *v)
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_add\n"
|
||||
|
@ -282,9 +283,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|||
: "cc");
|
||||
}
|
||||
|
||||
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
@ -305,9 +306,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic64_sub(u64 i, atomic64_t *v)
|
||||
static inline void atomic64_sub(long long i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("@ atomic64_sub\n"
|
||||
|
@ -322,9 +323,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|||
: "cc");
|
||||
}
|
||||
|
||||
static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
||||
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
@ -345,9 +346,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
||||
long long new)
|
||||
{
|
||||
u64 oldval;
|
||||
long long oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
|
@ -369,9 +371,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
|||
return oldval;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
||||
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
@ -390,9 +392,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
|
@ -416,9 +418,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
u64 val;
|
||||
long long val;
|
||||
unsigned long tmp;
|
||||
int ret = 1;
|
||||
|
||||
|
|
|
@ -98,23 +98,19 @@
|
|||
#define TASK_UNMAPPED_BASE UL(0x00000000)
|
||||
#endif
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
|
||||
#endif
|
||||
|
||||
#ifndef END_MEM
|
||||
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_OFFSET
|
||||
#define PAGE_OFFSET (PHYS_OFFSET)
|
||||
#define PAGE_OFFSET PLAT_PHYS_OFFSET
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The module can be at any place in ram in nommu mode.
|
||||
*/
|
||||
#define MODULES_END (END_MEM)
|
||||
#define MODULES_VADDR (PHYS_OFFSET)
|
||||
#define MODULES_VADDR PAGE_OFFSET
|
||||
|
||||
#define XIP_VIRT_ADDR(physaddr) (physaddr)
|
||||
|
||||
|
@ -141,6 +137,16 @@
|
|||
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
||||
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
||||
|
||||
/*
|
||||
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
|
||||
* memory. This is used for XIP and NoMMU kernels, or by kernels which
|
||||
* have their own mach/memory.h. Assembly code must always use
|
||||
* PLAT_PHYS_OFFSET and not PHYS_OFFSET.
|
||||
*/
|
||||
#ifndef PLAT_PHYS_OFFSET
|
||||
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
|
@ -183,22 +189,15 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
|||
return t;
|
||||
}
|
||||
#else
|
||||
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
|
||||
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
|
||||
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#ifdef PLAT_PHYS_OFFSET
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
#else
|
||||
#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
* PFN 0 == physical address 0.
|
||||
|
@ -207,7 +206,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
|||
* direct-mapped view. We assume this is the first page
|
||||
* of RAM in the mem_map as well.
|
||||
*/
|
||||
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
|
||||
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
|
||||
|
||||
/*
|
||||
* These are *only* valid on the kernel direct mapped RAM memory.
|
||||
|
@ -275,7 +274,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
|
|||
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
|
||||
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
|
||||
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
|
||||
&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
|
||||
|
||||
/*
|
||||
* Set if the architecture speculatively fetches data into cache.
|
||||
|
|
|
@ -12,6 +12,8 @@ enum {
|
|||
ARM_SEC_CORE,
|
||||
ARM_SEC_EXIT,
|
||||
ARM_SEC_DEVEXIT,
|
||||
ARM_SEC_HOT,
|
||||
ARM_SEC_UNLIKELY,
|
||||
ARM_SEC_MAX,
|
||||
};
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
/* PAGE_SHIFT determines the page size */
|
||||
#define PAGE_SHIFT 12
|
||||
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
|
||||
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
|
||||
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
|
||||
#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
|
||||
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
||||
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
|
||||
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#define PTRS_PER_PMD 512
|
||||
#define PTRS_PER_PGD 4
|
||||
|
||||
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
|
||||
#define PTE_HWTABLE_PTRS (0)
|
||||
#define PTE_HWTABLE_OFF (0)
|
||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
|
||||
|
||||
|
@ -48,16 +48,16 @@
|
|||
#define PMD_SHIFT 21
|
||||
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
|
||||
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
* section address mask and size definitions.
|
||||
*/
|
||||
#define SECTION_SHIFT 21
|
||||
#define SECTION_SIZE (1UL << SECTION_SHIFT)
|
||||
#define SECTION_MASK (~(SECTION_SIZE-1))
|
||||
#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
|
||||
|
||||
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
|
||||
|
||||
|
@ -71,13 +71,13 @@
|
|||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
||||
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
||||
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
|
||||
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
|
||||
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
|
||||
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
|
||||
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
|
||||
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
|
||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
|
||||
|
||||
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
||||
#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
|
||||
|
@ -174,6 +174,23 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
clean_pmd_entry(pmdp); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
|
||||
* that are written to a page table but not for ptes created with mk_pte.
|
||||
*
|
||||
* In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
|
||||
* hugetlb_cow, where it is compared with an entry in a page table.
|
||||
* This comparison test fails erroneously leading ultimately to a memory leak.
|
||||
*
|
||||
* To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
|
||||
* present before running the comparison.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
|
||||
: pte_val(pte_a)) \
|
||||
== (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
|
||||
: pte_val(pte_b)))
|
||||
|
||||
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
|
||||
|
||||
#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
|
||||
|
|
|
@ -228,12 +228,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|||
|
||||
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
|
||||
|
||||
#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
|
||||
: !!(pte_val(pte) & (val)))
|
||||
#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
|
||||
|
||||
#define pte_none(pte) (!pte_val(pte))
|
||||
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
|
||||
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
|
||||
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
|
||||
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
|
||||
#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
|
||||
#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
|
||||
#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
|
||||
#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
|
||||
#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
|
||||
#define pte_special(pte) (0)
|
||||
|
||||
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
|
||||
|
|
|
@ -109,7 +109,7 @@ ENTRY(stext)
|
|||
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
|
||||
add r8, r8, r4 @ PHYS_OFFSET
|
||||
#else
|
||||
ldr r8, =PHYS_OFFSET @ always constant in this case
|
||||
ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -296,6 +296,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
|||
maps[ARM_SEC_EXIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
|
||||
maps[ARM_SEC_UNLIKELY].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
|
||||
maps[ARM_SEC_HOT].unw_sec = s;
|
||||
else if (strcmp(".init.text", secname) == 0)
|
||||
maps[ARM_SEC_INIT].txt_sec = s;
|
||||
else if (strcmp(".devinit.text", secname) == 0)
|
||||
|
@ -306,6 +310,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
|||
maps[ARM_SEC_EXIT].txt_sec = s;
|
||||
else if (strcmp(".devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].txt_sec = s;
|
||||
else if (strcmp(".text.unlikely", secname) == 0)
|
||||
maps[ARM_SEC_UNLIKELY].txt_sec = s;
|
||||
else if (strcmp(".text.hot", secname) == 0)
|
||||
maps[ARM_SEC_HOT].txt_sec = s;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARM_SEC_MAX; i++)
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline void at91rm9200_standby(void)
|
|||
" mcr p15, 0, %0, c7, c0, 4\n\t"
|
||||
" str %5, [%1, %2]"
|
||||
:
|
||||
: "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
|
||||
: "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
|
||||
"r" (1), "r" (AT91RM9200_SDRAMC_SRR),
|
||||
"r" (lpr));
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c/pxa-i2c.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/ads7846.h>
|
||||
#include <linux/spi/corgi_lcd.h>
|
||||
|
@ -711,6 +712,8 @@ static void __init corgi_init(void)
|
|||
sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
|
||||
|
||||
platform_add_devices(devices, ARRAY_SIZE(devices));
|
||||
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
static void __init fixup_corgi(struct tag *tags, char **cmdline,
|
||||
|
|
|
@ -891,6 +891,8 @@ static void __init hx4700_init(void)
|
|||
mdelay(10);
|
||||
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
|
||||
mdelay(10);
|
||||
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
MACHINE_START(H4700, "HP iPAQ HX4700")
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/gpio.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c/pxa-i2c.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/ads7846.h>
|
||||
#include <linux/spi/pxa2xx_spi.h>
|
||||
|
@ -452,6 +453,7 @@ static void __init poodle_init(void)
|
|||
pxa_set_i2c_info(NULL);
|
||||
i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
|
||||
poodle_init_spi();
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
static void __init fixup_poodle(struct tag *tags, char **cmdline,
|
||||
|
|
|
@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
|
|||
/*
|
||||
* Ensure not to come back here if it wasn't intended
|
||||
*/
|
||||
RCSR = RCSR_SMR;
|
||||
PSPR = 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -508,12 +508,21 @@ void __init dma_contiguous_remap(void)
|
|||
map.type = MT_MEMORY_DMA_READY;
|
||||
|
||||
/*
|
||||
* Clear previous low-memory mapping
|
||||
* Clear previous low-memory mapping to ensure that the
|
||||
* TLB does not see any conflicting entries, then flush
|
||||
* the TLB of the old entries before creating new mappings.
|
||||
*
|
||||
* This ensures that any speculatively loaded TLB entries
|
||||
* (even though they may be rare) can not cause any problems,
|
||||
* and ensures that this code is architecturally compliant.
|
||||
*/
|
||||
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
|
||||
addr += PMD_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
|
||||
flush_tlb_kernel_range(__phys_to_virt(start),
|
||||
__phys_to_virt(end));
|
||||
|
||||
iotable_init(&map, 1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -778,7 +778,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
|||
}
|
||||
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, unsigned long phys, const struct mem_type *type)
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
unsigned long next;
|
||||
|
|
|
@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
|
|||
tst rh, #1 << (57 - 32) @ L_PTE_NONE
|
||||
bicne rl, #L_PTE_VALID
|
||||
bne 1f
|
||||
tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
|
||||
orreq rl, #L_PTE_RDONLY
|
||||
|
||||
eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
|
||||
@ test for !L_PTE_DIRTY || L_PTE_RDONLY
|
||||
tst ip, #1 << (55 - 32) | 1 << (58 - 32)
|
||||
orrne rl, #PTE_AP2
|
||||
biceq rl, #PTE_AP2
|
||||
|
||||
1: strd r2, r3, [r0]
|
||||
ALT_SMP(W(nop))
|
||||
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
|
||||
|
|
|
@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|||
case __SI_TIMER:
|
||||
err |= __put_user(from->si_tid, &to->si_tid);
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
|
||||
&to->si_ptr);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
case __SI_POLL:
|
||||
err |= __put_user(from->si_band, &to->si_band);
|
||||
|
@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|||
case __SI_MESGQ: /* But this is */
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
default: /* this is just in case for now ... */
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
|
|
|
@ -149,8 +149,8 @@ extern void exit_thread(void);
|
|||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC)
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0)
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
|
||||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
|
||||
|
||||
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
||||
|
||||
|
|
|
@ -56,6 +56,8 @@ static struct irq_chip mips_cpu_irq_controller = {
|
|||
.irq_mask_ack = mask_mips_irq,
|
||||
.irq_unmask = unmask_mips_irq,
|
||||
.irq_eoi = unmask_mips_irq,
|
||||
.irq_disable = mask_mips_irq,
|
||||
.irq_enable = unmask_mips_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -92,6 +94,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
|
|||
.irq_mask_ack = mips_mt_cpu_irq_ack,
|
||||
.irq_unmask = unmask_mips_irq,
|
||||
.irq_eoi = unmask_mips_irq,
|
||||
.irq_disable = mask_mips_irq,
|
||||
.irq_enable = unmask_mips_irq,
|
||||
};
|
||||
|
||||
void __init mips_cpu_irq_init(void)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
extern void *__bzero(void *__s, size_t __count);
|
||||
extern long __strncpy_from_user_nocheck_asm(char *__to,
|
||||
|
@ -25,6 +26,13 @@ extern long __strlen_user_asm(const char *s);
|
|||
extern long __strnlen_user_nocheck_asm(const char *s);
|
||||
extern long __strnlen_user_asm(const char *s);
|
||||
|
||||
/*
|
||||
* Core architecture code
|
||||
*/
|
||||
#ifdef CONFIG_CPU_R4K_FPU
|
||||
EXPORT_SYMBOL_GPL(_save_fp);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* String functions
|
||||
*/
|
||||
|
|
|
@ -109,10 +109,10 @@ asmlinkage __cpuinit void start_secondary(void)
|
|||
else
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
cpu_probe();
|
||||
cpu_report();
|
||||
per_cpu_trap_init(false);
|
||||
mips_clockevent_init();
|
||||
mp_ops->init_secondary();
|
||||
cpu_report();
|
||||
|
||||
/*
|
||||
* XXX parity protection should be folded in here when it's converted
|
||||
|
|
|
@ -431,7 +431,7 @@ __kvm_mips_return_to_guest:
|
|||
/* Setup status register for running guest in UM */
|
||||
.set at
|
||||
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
|
||||
and v1, v1, ~ST0_CU0
|
||||
and v1, v1, ~(ST0_CU0 | ST0_MX)
|
||||
.set noat
|
||||
mtc0 v1, CP0_STATUS
|
||||
ehb
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -413,11 +414,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
vcpu->mmio_needed = 0;
|
||||
}
|
||||
|
||||
lose_fpu(1);
|
||||
|
||||
local_irq_disable();
|
||||
/* Check if we have any exceptions/interrupts pending */
|
||||
kvm_mips_deliver_interrupts(vcpu,
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
||||
|
||||
local_irq_disable();
|
||||
kvm_guest_enter();
|
||||
|
||||
r = __kvm_mips_vcpu_run(run, vcpu);
|
||||
|
@ -1017,9 +1020,6 @@ void kvm_mips_set_c0_status(void)
|
|||
{
|
||||
uint32_t status = read_c0_status();
|
||||
|
||||
if (cpu_has_fpu)
|
||||
status |= (ST0_CU1);
|
||||
|
||||
if (cpu_has_dsp)
|
||||
status |= (ST0_MX);
|
||||
|
||||
|
|
|
@ -26,18 +26,18 @@ TRACE_EVENT(kvm_exit,
|
|||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
TP_ARGS(vcpu, reason),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(unsigned long, pc)
|
||||
__field(unsigned int, reason)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
|
||||
TP_printk("[%s]PC: 0x%08lx",
|
||||
kvm_mips_exit_types_str[__entry->reason],
|
||||
__entry->vcpu->arch.pc)
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
|
|
@ -544,8 +544,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
if (smp_ops->give_timebase)
|
||||
smp_ops->give_timebase();
|
||||
|
||||
/* Wait until cpu puts itself in the online map */
|
||||
while (!cpu_online(cpu))
|
||||
/* Wait until cpu puts itself in the online & active maps */
|
||||
while (!cpu_online(cpu) || !cpu_active(cpu))
|
||||
cpu_relax();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -155,7 +155,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
|
|||
}
|
||||
|
||||
*kaddr = (void *)(bank->ph_addr + offset);
|
||||
*pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
|
||||
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
|
|||
args.token = rtas_token("set-indicator");
|
||||
if (args.token == RTAS_UNKNOWN_SERVICE)
|
||||
return;
|
||||
args.token = cpu_to_be32(args.token);
|
||||
args.nargs = cpu_to_be32(3);
|
||||
args.nret = cpu_to_be32(1);
|
||||
args.rets = &args.args[3];
|
||||
|
|
|
@ -960,6 +960,8 @@ out:
|
|||
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
|
||||
}
|
||||
|
||||
static void sparc_pmu_start(struct perf_event *event, int flags);
|
||||
|
||||
/* On this PMU each PIC has it's own PCR control register. */
|
||||
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
|
@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
|
|||
struct perf_event *cp = cpuc->event[i];
|
||||
struct hw_perf_event *hwc = &cp->hw;
|
||||
int idx = hwc->idx;
|
||||
u64 enc;
|
||||
|
||||
if (cpuc->current_idx[i] != PIC_NO_INDEX)
|
||||
continue;
|
||||
|
||||
sparc_perf_event_set_period(cp, hwc, idx);
|
||||
cpuc->current_idx[i] = idx;
|
||||
|
||||
enc = perf_event_get_enc(cpuc->events[i]);
|
||||
cpuc->pcr[idx] &= ~mask_for_index(idx);
|
||||
if (hwc->state & PERF_HES_STOPPED)
|
||||
cpuc->pcr[idx] |= nop_for_index(idx);
|
||||
else
|
||||
cpuc->pcr[idx] |= event_encoding(enc, idx);
|
||||
sparc_pmu_start(cp, PERF_EF_RELOAD);
|
||||
}
|
||||
out:
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
|
@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
if (event == cpuc->event[i]) {
|
||||
|
@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|||
}
|
||||
}
|
||||
|
||||
perf_pmu_enable(event->pmu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
|
|||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
n0 = cpuc->n_events;
|
||||
if (n0 >= sparc_pmu->max_hw_events)
|
||||
|
@ -1394,7 +1386,6 @@ nocheck:
|
|||
|
||||
ret = 0;
|
||||
out:
|
||||
perf_pmu_enable(event->pmu);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -280,6 +280,8 @@ void arch_trigger_all_cpu_backtrace(void)
|
|||
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
|
||||
gp->tpc, gp->o7, gp->i7, gp->rpc);
|
||||
}
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
||||
|
@ -352,6 +354,8 @@ static void pmu_snapshot_all_cpus(void)
|
|||
(cpu == this_cpu ? '*' : ' '), cpu,
|
||||
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
|
||||
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
||||
|
|
|
@ -334,7 +334,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
|||
long err;
|
||||
|
||||
/* No need for backward compatibility. We can start fresh... */
|
||||
if (call <= SEMCTL) {
|
||||
if (call <= SEMTIMEDOP) {
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
err = sys_semtimedop(first, ptr,
|
||||
|
|
|
@ -8,9 +8,11 @@
|
|||
|
||||
.text
|
||||
ENTRY(memmove) /* o0=dst o1=src o2=len */
|
||||
mov %o0, %g1
|
||||
brz,pn %o2, 99f
|
||||
mov %o0, %g1
|
||||
|
||||
cmp %o0, %o1
|
||||
bleu,pt %xcc, memcpy
|
||||
bleu,pt %xcc, 2f
|
||||
add %o1, %o2, %g7
|
||||
cmp %g7, %o0
|
||||
bleu,pt %xcc, memcpy
|
||||
|
@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
|
|||
stb %g7, [%o0]
|
||||
bne,pt %icc, 1b
|
||||
sub %o0, 1, %o0
|
||||
|
||||
99:
|
||||
retl
|
||||
mov %g1, %o0
|
||||
|
||||
/* We can't just call memcpy for these memmove cases. On some
|
||||
* chips the memcpy uses cache initializing stores and when dst
|
||||
* and src are close enough, those can clobber the source data
|
||||
* before we've loaded it in.
|
||||
*/
|
||||
2: or %o0, %o1, %g7
|
||||
or %o2, %g7, %g7
|
||||
andcc %g7, 0x7, %g0
|
||||
bne,pn %xcc, 4f
|
||||
nop
|
||||
|
||||
3: ldx [%o1], %g7
|
||||
add %o1, 8, %o1
|
||||
subcc %o2, 8, %o2
|
||||
add %o0, 8, %o0
|
||||
bne,pt %icc, 3b
|
||||
stx %g7, [%o0 - 0x8]
|
||||
ba,a,pt %xcc, 99b
|
||||
|
||||
4: ldub [%o1], %g7
|
||||
add %o1, 1, %o1
|
||||
subcc %o2, 1, %o2
|
||||
add %o0, 1, %o0
|
||||
bne,pt %icc, 4b
|
||||
stb %g7, [%o0 - 0x1]
|
||||
ba,a,pt %xcc, 99b
|
||||
ENDPROC(memmove)
|
||||
|
|
|
@ -455,10 +455,12 @@ static void __init sparc_context_init(int numctx)
|
|||
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (mm->context == NO_CONTEXT) {
|
||||
spin_lock(&srmmu_context_spinlock);
|
||||
spin_lock_irqsave(&srmmu_context_spinlock, flags);
|
||||
alloc_context(old_mm, mm);
|
||||
spin_unlock(&srmmu_context_spinlock);
|
||||
spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
|
||||
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
|
||||
}
|
||||
|
||||
|
@ -983,14 +985,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||
|
||||
void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (mm->context != NO_CONTEXT) {
|
||||
flush_cache_mm(mm);
|
||||
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
|
||||
flush_tlb_mm(mm);
|
||||
spin_lock(&srmmu_context_spinlock);
|
||||
spin_lock_irqsave(&srmmu_context_spinlock, flags);
|
||||
free_context(mm->context);
|
||||
spin_unlock(&srmmu_context_spinlock);
|
||||
spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
|
||||
mm->context = NO_CONTEXT;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -989,7 +989,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|||
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
assoc = (src + req->cryptlen + auth_tag_len);
|
||||
assoc = (src + req->cryptlen);
|
||||
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
|
||||
scatterwalk_map_and_copy(assoc, req->assoc, 0,
|
||||
req->assoclen, 0);
|
||||
|
@ -1014,7 +1014,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|||
scatterwalk_done(&src_sg_walk, 0, 0);
|
||||
scatterwalk_done(&assoc_sg_walk, 0, 0);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
|
||||
kfree(src);
|
||||
}
|
||||
return retval;
|
||||
|
|
|
@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
|
|||
preempt_disable();
|
||||
tsk->fpu_counter = 0;
|
||||
__drop_fpu(tsk);
|
||||
clear_used_math();
|
||||
clear_stopped_child_used_math(tsk);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_INVEPT 50
|
||||
#define EXIT_REASON_PREEMPTION_TIMER 52
|
||||
#define EXIT_REASON_INVVPID 53
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
#define EXIT_REASON_XSETBV 55
|
||||
#define EXIT_REASON_APIC_WRITE 56
|
||||
|
@ -112,6 +113,7 @@
|
|||
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
|
||||
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
|
||||
{ EXIT_REASON_INVD, "INVD" }, \
|
||||
{ EXIT_REASON_INVVPID, "INVVPID" }, \
|
||||
{ EXIT_REASON_INVPCID, "INVPCID" }, \
|
||||
{ EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }
|
||||
|
||||
|
|
|
@ -557,11 +557,14 @@ ENTRY(ret_from_fork)
|
|||
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
||||
jz 1f
|
||||
|
||||
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
||||
jnz int_ret_from_sys_call
|
||||
|
||||
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
||||
jmp ret_from_sys_call # go to the SYSRET fastpath
|
||||
/*
|
||||
* By the time we get here, we have no idea whether our pt_regs,
|
||||
* ti flags, and ti status came from the 64-bit SYSCALL fast path,
|
||||
* the slow path, or one of the ia32entry paths.
|
||||
* Use int_ret_from_sys_call to return, since it can safely handle
|
||||
* all of the above.
|
||||
*/
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
1:
|
||||
subq $REST_SKIP, %rsp # leave space for volatiles
|
||||
|
|
|
@ -376,7 +376,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||
* thread's fpu state, reconstruct fxstate from the fsave
|
||||
* header. Sanitize the copied state etc.
|
||||
*/
|
||||
struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
struct user_i387_ia32_struct env;
|
||||
int err = 0;
|
||||
|
||||
|
@ -390,14 +390,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||
*/
|
||||
drop_fpu(tsk);
|
||||
|
||||
if (__copy_from_user(xsave, buf_fx, state_size) ||
|
||||
if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
|
||||
__copy_from_user(&env, buf, sizeof(env))) {
|
||||
fpu_finit(fpu);
|
||||
err = -1;
|
||||
} else {
|
||||
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
|
||||
set_used_math();
|
||||
}
|
||||
|
||||
set_used_math();
|
||||
if (use_eager_fpu()) {
|
||||
preempt_disable();
|
||||
math_state_restore();
|
||||
|
|
|
@ -4732,7 +4732,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|||
if (rc != X86EMUL_CONTINUE)
|
||||
goto done;
|
||||
}
|
||||
ctxt->dst.orig_val = ctxt->dst.val;
|
||||
/* Copy full 64-bit value for CMPXCHG8B. */
|
||||
ctxt->dst.orig_val64 = ctxt->dst.val64;
|
||||
|
||||
special_insn:
|
||||
|
||||
|
|
|
@ -438,6 +438,7 @@ struct vcpu_vmx {
|
|||
#endif
|
||||
int gs_ldt_reload_needed;
|
||||
int fs_reload_needed;
|
||||
unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
||||
} host_state;
|
||||
struct {
|
||||
int vm86_active;
|
||||
|
@ -4076,11 +4077,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|||
u32 low32, high32;
|
||||
unsigned long tmpl;
|
||||
struct desc_ptr dt;
|
||||
unsigned long cr4;
|
||||
|
||||
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
|
||||
vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
|
||||
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
||||
|
||||
/* Save the most likely value for this task's CR4 in the VMCS. */
|
||||
cr4 = read_cr4();
|
||||
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
|
||||
vmx->host_state.vmcs_host_cr4 = cr4;
|
||||
|
||||
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
|
@ -6248,6 +6254,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int handle_invvpid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exit handlers return 1 if the exit was handled fully and guest execution
|
||||
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
||||
|
@ -6293,6 +6305,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|||
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
|
||||
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
|
||||
[EXIT_REASON_INVEPT] = handle_invept,
|
||||
[EXIT_REASON_INVVPID] = handle_invvpid,
|
||||
};
|
||||
|
||||
static const int kvm_vmx_max_exit_handlers =
|
||||
|
@ -6519,7 +6532,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|||
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
||||
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
||||
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
||||
case EXIT_REASON_INVEPT:
|
||||
case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
|
||||
/*
|
||||
* VMX instructions trap unconditionally. This allows L1 to
|
||||
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
||||
|
@ -6964,7 +6977,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
|||
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
unsigned long debugctlmsr;
|
||||
unsigned long debugctlmsr, cr4;
|
||||
|
||||
/* Record the guest's net vcpu time for enforced NMI injections. */
|
||||
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
|
||||
|
@ -6985,6 +6998,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||||
|
||||
cr4 = read_cr4();
|
||||
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
||||
vmcs_writel(HOST_CR4, cr4);
|
||||
vmx->host_state.vmcs_host_cr4 = cr4;
|
||||
}
|
||||
|
||||
/* When single-stepping over STI and MOV SS, we must clear the
|
||||
* corresponding interruptibility bits in the guest state. Otherwise
|
||||
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
||||
|
|
|
@ -1182,21 +1182,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
bool vcpus_matched;
|
||||
bool do_request = false;
|
||||
struct kvm_arch *ka = &vcpu->kvm->arch;
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
|
||||
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
|
||||
atomic_read(&vcpu->kvm->online_vcpus));
|
||||
|
||||
if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
|
||||
if (!ka->use_master_clock)
|
||||
do_request = 1;
|
||||
|
||||
if (!vcpus_matched && ka->use_master_clock)
|
||||
do_request = 1;
|
||||
|
||||
if (do_request)
|
||||
/*
|
||||
* Once the masterclock is enabled, always perform request in
|
||||
* order to update it.
|
||||
*
|
||||
* In order to enable masterclock, the host clocksource must be TSC
|
||||
* and the vcpus need to have matched TSCs. When that happens,
|
||||
* perform request to enable masterclock.
|
||||
*/
|
||||
if (ka->use_master_clock ||
|
||||
(gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
|
||||
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
||||
|
||||
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
|
||||
|
|
|
@ -35,12 +35,12 @@ struct __read_mostly va_alignment va_align = {
|
|||
.flags = -1,
|
||||
};
|
||||
|
||||
static unsigned int stack_maxrandom_size(void)
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
unsigned int max = 0;
|
||||
unsigned long max = 0;
|
||||
if ((current->flags & PF_RANDOMIZE) &&
|
||||
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
||||
max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
|
||||
max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
return max;
|
||||
|
|
|
@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = set_scan_all,
|
||||
.ident = "Stratus/NEC ftServer",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = set_scan_all,
|
||||
.ident = "Stratus/NEC ftServer",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
.text
|
||||
.globl __kernel_sigreturn
|
||||
.type __kernel_sigreturn,@function
|
||||
nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
|
||||
ALIGN
|
||||
__kernel_sigreturn:
|
||||
.LSTART_sigreturn:
|
||||
|
|
|
@ -942,6 +942,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
|
|||
struct blkg_rwstat rwstat = { }, tmp;
|
||||
int i, cpu;
|
||||
|
||||
if (tg->stats_cpu == NULL)
|
||||
return 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
|
||||
|
||||
|
|
|
@ -3578,6 +3578,11 @@ retry:
|
|||
|
||||
blkcg = bio_blkcg(bio);
|
||||
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
|
||||
if (!cfqg) {
|
||||
cfqq = &cfqd->oom_cfqq;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cfqq = cic_to_cfqq(cic, is_sync);
|
||||
|
||||
/*
|
||||
|
@ -3614,7 +3619,7 @@ retry:
|
|||
} else
|
||||
cfqq = &cfqd->oom_cfqq;
|
||||
}
|
||||
|
||||
out:
|
||||
if (new_cfqq)
|
||||
kmem_cache_free(cfq_pool, new_cfqq);
|
||||
|
||||
|
@ -3644,12 +3649,17 @@ static struct cfq_queue *
|
|||
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
||||
const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
||||
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
||||
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
||||
struct cfq_queue **async_cfqq = NULL;
|
||||
struct cfq_queue *cfqq = NULL;
|
||||
|
||||
if (!is_sync) {
|
||||
if (!ioprio_valid(cic->ioprio)) {
|
||||
struct task_struct *tsk = current;
|
||||
ioprio = task_nice_ioprio(tsk);
|
||||
ioprio_class = task_nice_ioclass(tsk);
|
||||
}
|
||||
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
|
||||
cfqq = *async_cfqq;
|
||||
}
|
||||
|
|
|
@ -170,3 +170,4 @@ module_exit(crc32c_mod_fini);
|
|||
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
|
||||
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_CRYPTO("crc32c");
|
||||
|
|
|
@ -1953,6 +1953,17 @@ EXPORT_SYMBOL(acpi_video_unregister);
|
|||
|
||||
static int __init acpi_video_init(void)
|
||||
{
|
||||
/*
|
||||
* Let the module load even if ACPI is disabled (e.g. due to
|
||||
* a broken BIOS) so that i915.ko can still be loaded on such
|
||||
* old systems without an AcpiOpRegion.
|
||||
*
|
||||
* acpi_video_register() will report -ENODEV later as well due
|
||||
* to acpi_disabled when i915.ko tries to register itself afterwards.
|
||||
*/
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
dmi_check_system(video_dmi_table);
|
||||
|
||||
if (intel_opregion_present())
|
||||
|
|
|
@ -2149,7 +2149,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
|
|||
rbd_assert(img_request->obj_request_count > 0);
|
||||
rbd_assert(which != BAD_WHICH);
|
||||
rbd_assert(which < img_request->obj_request_count);
|
||||
rbd_assert(which >= img_request->next_completion);
|
||||
|
||||
spin_lock_irq(&img_request->completion_lock);
|
||||
if (which != img_request->next_completion)
|
||||
|
|
|
@ -201,6 +201,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
|
|||
#define USB_REQ_DFU_DNLOAD 1
|
||||
#define BULK_SIZE 4096
|
||||
#define FW_HDR_SIZE 20
|
||||
#define TIMEGAP_USEC_MIN 50
|
||||
#define TIMEGAP_USEC_MAX 100
|
||||
|
||||
static int ath3k_load_firmware(struct usb_device *udev,
|
||||
const struct firmware *firmware)
|
||||
|
@ -231,6 +233,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
|
|||
count -= 20;
|
||||
|
||||
while (count) {
|
||||
/* workaround the compatibility issue with xHCI controller*/
|
||||
usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
|
||||
|
||||
size = min_t(uint, count, BULK_SIZE);
|
||||
pipe = usb_sndbulkpipe(udev, 0x02);
|
||||
memcpy(send_buf, firmware->data + sent, size);
|
||||
|
@ -361,6 +366,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
|
|||
count -= size;
|
||||
|
||||
while (count) {
|
||||
/* workaround the compatibility issue with xHCI controller*/
|
||||
usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
|
||||
|
||||
size = min_t(uint, count, BULK_SIZE);
|
||||
pipe = usb_sndbulkpipe(udev, 0x02);
|
||||
|
||||
|
|
|
@ -488,7 +488,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
|
|||
if (burstcnt < 0)
|
||||
return burstcnt;
|
||||
size = min_t(int, len - i - 1, burstcnt);
|
||||
ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
|
||||
ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|||
{
|
||||
struct ibmvtpm_dev *ibmvtpm;
|
||||
struct ibmvtpm_crq crq;
|
||||
u64 *word = (u64 *) &crq;
|
||||
__be64 *word = (__be64 *)&crq;
|
||||
int rc;
|
||||
|
||||
ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
|
||||
|
@ -145,10 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|||
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
|
||||
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
||||
crq.msg = (u8)VTPM_TPM_COMMAND;
|
||||
crq.len = (u16)count;
|
||||
crq.data = ibmvtpm->rtce_dma_handle;
|
||||
crq.len = cpu_to_be16(count);
|
||||
crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
|
||||
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
|
||||
be64_to_cpu(word[1]));
|
||||
if (rc != H_SUCCESS) {
|
||||
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
|
||||
rc = 0;
|
||||
|
@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
|
|||
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
||||
crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
|
||||
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
||||
cpu_to_be64(buf[1]));
|
||||
if (rc != H_SUCCESS)
|
||||
dev_err(ibmvtpm->dev,
|
||||
"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
|
||||
|
@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
|
|||
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
||||
crq.msg = (u8)VTPM_GET_VERSION;
|
||||
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
||||
cpu_to_be64(buf[1]));
|
||||
if (rc != H_SUCCESS)
|
||||
dev_err(ibmvtpm->dev,
|
||||
"ibmvtpm_crq_get_version failed rc=%d\n", rc);
|
||||
|
@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
|
|||
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
|
||||
{
|
||||
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
|
||||
|
||||
/* ibmvtpm initializes at probe time, so the data we are
|
||||
* asking for may not be set yet. Estimate that 4K required
|
||||
* for TCE-mapped buffer in addition to CRQ.
|
||||
*/
|
||||
if (!ibmvtpm)
|
||||
return CRQ_RES_BUF_SIZE + PAGE_SIZE;
|
||||
|
||||
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
|
||||
}
|
||||
|
||||
|
@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
|
|||
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
||||
crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
|
||||
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
||||
cpu_to_be64(buf[1]));
|
||||
if (rc != H_SUCCESS)
|
||||
dev_err(ibmvtpm->dev,
|
||||
"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
|
||||
|
@ -511,11 +523,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
|
|||
case IBMVTPM_VALID_CMD:
|
||||
switch (crq->msg) {
|
||||
case VTPM_GET_RTCE_BUFFER_SIZE_RES:
|
||||
if (crq->len <= 0) {
|
||||
if (be16_to_cpu(crq->len) <= 0) {
|
||||
dev_err(ibmvtpm->dev, "Invalid rtce size\n");
|
||||
return;
|
||||
}
|
||||
ibmvtpm->rtce_size = crq->len;
|
||||
ibmvtpm->rtce_size = be16_to_cpu(crq->len);
|
||||
ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
|
||||
GFP_KERNEL);
|
||||
if (!ibmvtpm->rtce_buf) {
|
||||
|
@ -536,11 +548,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
|
|||
|
||||
return;
|
||||
case VTPM_GET_VERSION_RES:
|
||||
ibmvtpm->vtpm_version = crq->data;
|
||||
ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
|
||||
return;
|
||||
case VTPM_TPM_COMMAND_RES:
|
||||
/* len of the data in rtce buffer */
|
||||
ibmvtpm->res_len = crq->len;
|
||||
ibmvtpm->res_len = be16_to_cpu(crq->len);
|
||||
wake_up_interruptible(&ibmvtpm->wq);
|
||||
return;
|
||||
default:
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
struct ibmvtpm_crq {
|
||||
u8 valid;
|
||||
u8 msg;
|
||||
u16 len;
|
||||
u32 data;
|
||||
u64 reserved;
|
||||
__be16 len;
|
||||
__be32 data;
|
||||
__be64 reserved;
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
struct ibmvtpm_crq_queue {
|
||||
|
|
|
@ -75,6 +75,10 @@ enum tis_defaults {
|
|||
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
|
||||
#define TPM_RID(l) (0x0F04 | ((l) << 12))
|
||||
|
||||
struct priv_data {
|
||||
bool irq_tested;
|
||||
};
|
||||
|
||||
static LIST_HEAD(tis_chips);
|
||||
static DEFINE_MUTEX(tis_lock);
|
||||
|
||||
|
@ -338,12 +342,27 @@ out_err:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void disable_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
u32 intmask;
|
||||
|
||||
intmask =
|
||||
ioread32(chip->vendor.iobase +
|
||||
TPM_INT_ENABLE(chip->vendor.locality));
|
||||
intmask &= ~TPM_GLOBAL_INT_ENABLE;
|
||||
iowrite32(intmask,
|
||||
chip->vendor.iobase +
|
||||
TPM_INT_ENABLE(chip->vendor.locality));
|
||||
free_irq(chip->vendor.irq, chip);
|
||||
chip->vendor.irq = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If interrupts are used (signaled by an irq set in the vendor structure)
|
||||
* tpm.c can skip polling for the data to be available as the interrupt is
|
||||
* waited for here
|
||||
*/
|
||||
static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
{
|
||||
int rc;
|
||||
u32 ordinal;
|
||||
|
@ -373,6 +392,30 @@ out_err:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
{
|
||||
int rc, irq;
|
||||
struct priv_data *priv = chip->vendor.priv;
|
||||
|
||||
if (!chip->vendor.irq || priv->irq_tested)
|
||||
return tpm_tis_send_main(chip, buf, len);
|
||||
|
||||
/* Verify receipt of the expected IRQ */
|
||||
irq = chip->vendor.irq;
|
||||
chip->vendor.irq = 0;
|
||||
rc = tpm_tis_send_main(chip, buf, len);
|
||||
chip->vendor.irq = irq;
|
||||
if (!priv->irq_tested)
|
||||
msleep(1);
|
||||
if (!priv->irq_tested) {
|
||||
disable_interrupts(chip);
|
||||
dev_err(chip->dev,
|
||||
FW_BUG "TPM interrupt not working, polling instead\n");
|
||||
}
|
||||
priv->irq_tested = true;
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct tis_vendor_timeout_override {
|
||||
u32 did_vid;
|
||||
unsigned long timeout_us[4];
|
||||
|
@ -546,6 +589,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
|||
if (interrupt == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
((struct priv_data *)chip->vendor.priv)->irq_tested = true;
|
||||
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
|
||||
wake_up_interruptible(&chip->vendor.read_queue);
|
||||
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
|
||||
|
@ -575,9 +619,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|||
u32 vendor, intfcaps, intmask;
|
||||
int rc, i, irq_s, irq_e, probe;
|
||||
struct tpm_chip *chip;
|
||||
struct priv_data *priv;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
|
||||
if (priv == NULL)
|
||||
return -ENOMEM;
|
||||
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
|
||||
return -ENODEV;
|
||||
chip->vendor.priv = priv;
|
||||
|
||||
chip->vendor.iobase = ioremap(start, len);
|
||||
if (!chip->vendor.iobase) {
|
||||
|
@ -646,19 +695,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|||
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
|
||||
dev_dbg(dev, "\tData Avail Int Support\n");
|
||||
|
||||
/* get the timeouts before testing for irqs */
|
||||
if (tpm_get_timeouts(chip)) {
|
||||
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
||||
rc = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (tpm_do_selftest(chip)) {
|
||||
dev_err(dev, "TPM self test failed\n");
|
||||
rc = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* INTERRUPT Setup */
|
||||
init_waitqueue_head(&chip->vendor.read_queue);
|
||||
init_waitqueue_head(&chip->vendor.int_queue);
|
||||
|
@ -760,6 +796,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|||
}
|
||||
}
|
||||
|
||||
if (tpm_get_timeouts(chip)) {
|
||||
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
||||
rc = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (tpm_do_selftest(chip)) {
|
||||
dev_err(dev, "TPM self test failed\n");
|
||||
rc = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&chip->vendor.list);
|
||||
mutex_lock(&tis_lock);
|
||||
list_add(&chip->vendor.list, &tis_chips);
|
||||
|
|
|
@ -2023,12 +2023,13 @@ static int virtcons_probe(struct virtio_device *vdev)
|
|||
spin_lock_init(&portdev->ports_lock);
|
||||
INIT_LIST_HEAD(&portdev->ports);
|
||||
|
||||
INIT_WORK(&portdev->control_work, &control_work_handler);
|
||||
|
||||
if (multiport) {
|
||||
unsigned int nr_added_bufs;
|
||||
|
||||
spin_lock_init(&portdev->c_ivq_lock);
|
||||
spin_lock_init(&portdev->c_ovq_lock);
|
||||
INIT_WORK(&portdev->control_work, &control_work_handler);
|
||||
|
||||
nr_added_bufs = fill_queue(portdev->c_ivq,
|
||||
&portdev->c_ivq_lock);
|
||||
|
|
|
@ -70,7 +70,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
|
|||
p = FACTOR_GET(config->pshift, config->pwidth, reg);
|
||||
|
||||
/* Calculate the rate */
|
||||
rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
|
||||
rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ struct clk_factors_config {
|
|||
u8 mwidth;
|
||||
u8 pshift;
|
||||
u8 pwidth;
|
||||
u8 n_start;
|
||||
};
|
||||
|
||||
struct clk *clk_register_factors(struct device *dev, const char *name,
|
||||
|
|
|
@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
|||
|
||||
pr_debug("previous speed is %u\n", prev_speed);
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
|
||||
/* switch to low state */
|
||||
|
@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
|||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
|
||||
|
|
|
@ -188,6 +188,7 @@ static void speedstep_set_state(unsigned int state)
|
|||
return;
|
||||
|
||||
/* Disable IRQs */
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
|
||||
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
|
||||
|
@ -198,9 +199,19 @@ static void speedstep_set_state(unsigned int state)
|
|||
|
||||
do {
|
||||
if (retry) {
|
||||
/*
|
||||
* We need to enable interrupts, otherwise the blockage
|
||||
* won't resolve.
|
||||
*
|
||||
* We disable preemption so that other processes don't
|
||||
* run. If other processes were running, they could
|
||||
* submit more DMA requests, making the blockage worse.
|
||||
*/
|
||||
pr_debug("retry %u, previous result %u, waiting...\n",
|
||||
retry, result);
|
||||
local_irq_enable();
|
||||
mdelay(retry * 50);
|
||||
local_irq_disable();
|
||||
}
|
||||
retry++;
|
||||
__asm__ __volatile__(
|
||||
|
@ -217,6 +228,7 @@ static void speedstep_set_state(unsigned int state)
|
|||
|
||||
/* enable IRQs */
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
|
||||
if (new_state == state)
|
||||
pr_debug("change to %u MHz succeeded after %u tries "
|
||||
|
|
|
@ -38,6 +38,12 @@ struct pstore_read_data {
|
|||
char **buf;
|
||||
};
|
||||
|
||||
static inline u64 generic_id(unsigned long timestamp,
|
||||
unsigned int part, int count)
|
||||
{
|
||||
return (timestamp * 100 + part) * 1000 + count;
|
||||
}
|
||||
|
||||
static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
|
||||
{
|
||||
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
|
||||
|
@ -56,7 +62,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
|
|||
|
||||
if (sscanf(name, "dump-type%u-%u-%d-%lu",
|
||||
cb_data->type, &part, &cnt, &time) == 4) {
|
||||
*cb_data->id = part;
|
||||
*cb_data->id = generic_id(time, part, cnt);
|
||||
*cb_data->count = cnt;
|
||||
cb_data->timespec->tv_sec = time;
|
||||
cb_data->timespec->tv_nsec = 0;
|
||||
|
@ -67,7 +73,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
|
|||
* which doesn't support holding
|
||||
* multiple logs, remains.
|
||||
*/
|
||||
*cb_data->id = part;
|
||||
*cb_data->id = generic_id(time, part, 0);
|
||||
*cb_data->count = 0;
|
||||
cb_data->timespec->tv_sec = time;
|
||||
cb_data->timespec->tv_nsec = 0;
|
||||
|
@ -185,14 +191,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
|||
char name[DUMP_NAME_LEN];
|
||||
efi_char16_t efi_name[DUMP_NAME_LEN];
|
||||
int found, i;
|
||||
unsigned int part;
|
||||
|
||||
sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
|
||||
time.tv_sec);
|
||||
do_div(id, 1000);
|
||||
part = do_div(id, 100);
|
||||
sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
|
||||
|
||||
for (i = 0; i < DUMP_NAME_LEN; i++)
|
||||
efi_name[i] = name[i];
|
||||
|
||||
edata.id = id;
|
||||
edata.id = part;
|
||||
edata.type = type;
|
||||
edata.count = count;
|
||||
edata.time = time;
|
||||
|
|
|
@ -26,9 +26,12 @@ struct tps65912_gpio_data {
|
|||
struct gpio_chip gpio_chip;
|
||||
};
|
||||
|
||||
#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
|
||||
|
||||
static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
|
||||
{
|
||||
struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
||||
struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
||||
struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
||||
int val;
|
||||
|
||||
val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
|
||||
|
@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
|
|||
static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
|
||||
int value)
|
||||
{
|
||||
struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
||||
struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
||||
struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
||||
|
||||
if (value)
|
||||
tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
|
||||
|
@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
|
|||
static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
|
||||
int value)
|
||||
{
|
||||
struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
||||
struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
||||
struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
||||
|
||||
/* Set the initial value */
|
||||
tps65912_gpio_set(gc, offset, value);
|
||||
|
@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
|
|||
|
||||
static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
|
||||
{
|
||||
struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
||||
struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
||||
struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
||||
|
||||
return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
|
||||
GPIO_CFG_MASK);
|
||||
|
|
|
@ -874,6 +874,7 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
|||
if (tdev != NULL) {
|
||||
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
|
||||
name);
|
||||
put_device(tdev);
|
||||
} else {
|
||||
status = -ENODEV;
|
||||
}
|
||||
|
@ -927,7 +928,7 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
|||
}
|
||||
|
||||
status = sysfs_set_active_low(desc, dev, value);
|
||||
|
||||
put_device(dev);
|
||||
unlock:
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
|
|
|
@ -4016,6 +4016,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
|||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
|
||||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -930,12 +930,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|||
|
||||
if ((rdev->config.cayman.max_backends_per_se == 1) &&
|
||||
(rdev->flags & RADEON_IS_IGP)) {
|
||||
if ((disabled_rb_mask & 3) == 1) {
|
||||
/* RB0 disabled, RB1 enabled */
|
||||
tmp = 0x11111111;
|
||||
} else {
|
||||
if ((disabled_rb_mask & 3) == 2) {
|
||||
/* RB1 disabled, RB0 enabled */
|
||||
tmp = 0x00000000;
|
||||
} else {
|
||||
/* RB0 disabled, RB1 enabled */
|
||||
tmp = 0x11111111;
|
||||
}
|
||||
} else {
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
|
|
|
@ -743,6 +743,10 @@ int r100_irq_set(struct radeon_device *rdev)
|
|||
tmp |= RADEON_FP2_DETECT_MASK;
|
||||
}
|
||||
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
||||
|
||||
/* read back to post the write */
|
||||
RREG32(RADEON_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3459,6 +3459,9 @@ int r600_init(struct radeon_device *rdev)
|
|||
rdev->accel_working = false;
|
||||
}
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000E50_SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -177,11 +177,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
u32 ring = RADEON_CS_RING_GFX;
|
||||
s32 priority = 0;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
||||
if (!cs->num_chunks) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
p->idx = 0;
|
||||
p->ib.sa_bo = NULL;
|
||||
p->ib.semaphore = NULL;
|
||||
|
|
|
@ -636,6 +636,10 @@ int rs600_irq_set(struct radeon_device *rdev)
|
|||
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
|
||||
if (ASIC_IS_DCE2(rdev))
|
||||
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000040_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5704,8 +5704,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|||
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
|
||||
|
||||
if (!vclk || !dclk) {
|
||||
/* keep the Bypass mode, put PLL to sleep */
|
||||
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
|
||||
/* keep the Bypass mode */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5721,8 +5720,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|||
/* set VCO_MODE to 1 */
|
||||
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
|
||||
|
||||
/* toggle UPLL_SLEEP to 1 then back to 0 */
|
||||
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
|
||||
/* disable sleep mode */
|
||||
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
|
||||
|
||||
/* deassert UPLL_RESET */
|
||||
|
@ -5778,5 +5776,8 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|||
|
||||
mdelay(100);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1083,6 +1083,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ignore reports for absolute data if the data didn't change. This is
|
||||
* not only an optimization but also fixes 'dead' key reports. Some
|
||||
* RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
|
||||
* 0x31 and 0x32) report multiple keys, even though a localized keyboard
|
||||
* can only have one of them physically available. The 'dead' keys
|
||||
* report constant 0. As all map to the same keycode, they'd confuse
|
||||
* the input layer. If we filter the 'dead' keys on the HID level, we
|
||||
* skip the keycode translation and only forward real events.
|
||||
*/
|
||||
if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
|
||||
HID_MAIN_ITEM_BUFFERED_BYTE)) &&
|
||||
(field->flags & HID_MAIN_ITEM_VARIABLE) &&
|
||||
usage->usage_index < field->maxusage &&
|
||||
value == field->value[usage->usage_index])
|
||||
return;
|
||||
|
||||
/* report the usage code as scancode if the key status has changed */
|
||||
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
|
||||
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
|
||||
|
|
|
@ -341,7 +341,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
|
|||
static void i2c_hid_get_input(struct i2c_hid *ihid)
|
||||
{
|
||||
int ret, ret_size;
|
||||
int size = ihid->bufsize;
|
||||
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
|
||||
|
||||
if (size > ihid->bufsize)
|
||||
size = ihid->bufsize;
|
||||
|
||||
ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
|
||||
if (ret != size) {
|
||||
|
|
|
@ -686,7 +686,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
|
|||
if (ret)
|
||||
pr_err("Unable to register child device\n");
|
||||
else
|
||||
pr_info("child device %s registered\n",
|
||||
pr_debug("child device %s registered\n",
|
||||
dev_name(&child_device_obj->device));
|
||||
|
||||
return ret;
|
||||
|
@ -698,14 +698,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
|
|||
*/
|
||||
void vmbus_device_unregister(struct hv_device *device_obj)
|
||||
{
|
||||
pr_debug("child device %s unregistered\n",
|
||||
dev_name(&device_obj->device));
|
||||
|
||||
/*
|
||||
* Kick off the process of unregistering the device.
|
||||
* This will call vmbus_remove() and eventually vmbus_device_release()
|
||||
*/
|
||||
device_unregister(&device_obj->device);
|
||||
|
||||
pr_info("child device %s unregistered\n",
|
||||
dev_name(&device_obj->device));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/sysfs.h>
|
||||
|
@ -447,7 +448,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
|
|||
mutex_unlock(&indio_dev->mlock);
|
||||
if (ret)
|
||||
return ret;
|
||||
val16 = ((val16 & 0xFFF) << 4) >> 4;
|
||||
val16 = sign_extend32(val16, 11);
|
||||
*val = val16;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_CHAN_INFO_OFFSET:
|
||||
|
|
|
@ -1055,12 +1055,6 @@ struct qib_devdata {
|
|||
/* control high-level access to EEPROM */
|
||||
struct mutex eep_lock;
|
||||
uint64_t traffic_wds;
|
||||
/* active time is kept in seconds, but logged in hours */
|
||||
atomic_t active_time;
|
||||
/* Below are nominal shadow of EEPROM, new since last EEPROM update */
|
||||
uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
|
||||
uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
|
||||
uint16_t eep_hrs;
|
||||
/*
|
||||
* masks for which bits of errs, hwerrs that cause
|
||||
* each of the counters to increment.
|
||||
|
@ -1278,8 +1272,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
|
|||
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
|
||||
const void *buffer, int len);
|
||||
void qib_get_eeprom_info(struct qib_devdata *);
|
||||
int qib_update_eeprom_log(struct qib_devdata *dd);
|
||||
void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
|
||||
#define qib_inc_eeprom_err(dd, eidx, incr)
|
||||
void qib_dump_lookup_output_queue(struct qib_devdata *);
|
||||
void qib_force_pio_avail_update(struct qib_devdata *);
|
||||
void qib_clear_symerror_on_linkup(unsigned long opaque);
|
||||
|
|
|
@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
|
|||
"Board SN %s did not pass functional test: %s\n",
|
||||
dd->serial, ifp->if_comment);
|
||||
|
||||
memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
|
||||
/*
|
||||
* Power-on (actually "active") hours are kept as little-endian value
|
||||
* in EEPROM, but as seconds in a (possibly as small as 24-bit)
|
||||
* atomic_t while running.
|
||||
*/
|
||||
atomic_set(&dd->active_time, 0);
|
||||
dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
|
||||
|
||||
done:
|
||||
vfree(buf);
|
||||
|
||||
bail:;
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_update_eeprom_log - copy active-time and error counters to eeprom
|
||||
* @dd: the qlogic_ib device
|
||||
*
|
||||
* Although the time is kept as seconds in the qib_devdata struct, it is
|
||||
* rounded to hours for re-write, as we have only 16 bits in EEPROM.
|
||||
* First-cut code reads whole (expected) struct qib_flash, modifies,
|
||||
* re-writes. Future direction: read/write only what we need, assuming
|
||||
* that the EEPROM had to have been "good enough" for driver init, and
|
||||
* if not, we aren't making it worse.
|
||||
*
|
||||
*/
|
||||
int qib_update_eeprom_log(struct qib_devdata *dd)
|
||||
{
|
||||
void *buf;
|
||||
struct qib_flash *ifp;
|
||||
int len, hi_water;
|
||||
uint32_t new_time, new_hrs;
|
||||
u8 csum;
|
||||
int ret, idx;
|
||||
unsigned long flags;
|
||||
|
||||
/* first, check if we actually need to do anything. */
|
||||
ret = 0;
|
||||
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
||||
if (dd->eep_st_new_errs[idx]) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
new_time = atomic_read(&dd->active_time);
|
||||
|
||||
if (ret == 0 && new_time < 3600)
|
||||
goto bail;
|
||||
|
||||
/*
|
||||
* The quick-check above determined that there is something worthy
|
||||
* of logging, so get current contents and do a more detailed idea.
|
||||
* read full flash, not just currently used part, since it may have
|
||||
* been written with a newer definition
|
||||
*/
|
||||
len = sizeof(struct qib_flash);
|
||||
buf = vmalloc(len);
|
||||
ret = 1;
|
||||
if (!buf) {
|
||||
qib_dev_err(dd,
|
||||
"Couldn't allocate memory to read %u bytes from eeprom for logging\n",
|
||||
len);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Grab semaphore and read current EEPROM. If we get an
|
||||
* error, let go, but if not, keep it until we finish write.
|
||||
*/
|
||||
ret = mutex_lock_interruptible(&dd->eep_lock);
|
||||
if (ret) {
|
||||
qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
|
||||
goto free_bail;
|
||||
}
|
||||
ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
|
||||
if (ret) {
|
||||
mutex_unlock(&dd->eep_lock);
|
||||
qib_dev_err(dd, "Unable read EEPROM for logging\n");
|
||||
goto free_bail;
|
||||
}
|
||||
ifp = (struct qib_flash *)buf;
|
||||
|
||||
csum = flash_csum(ifp, 0);
|
||||
if (csum != ifp->if_csum) {
|
||||
mutex_unlock(&dd->eep_lock);
|
||||
qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
|
||||
csum, ifp->if_csum);
|
||||
ret = 1;
|
||||
goto free_bail;
|
||||
}
|
||||
hi_water = 0;
|
||||
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
||||
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
||||
int new_val = dd->eep_st_new_errs[idx];
|
||||
if (new_val) {
|
||||
/*
|
||||
* If we have seen any errors, add to EEPROM values
|
||||
* We need to saturate at 0xFF (255) and we also
|
||||
* would need to adjust the checksum if we were
|
||||
* trying to minimize EEPROM traffic
|
||||
* Note that we add to actual current count in EEPROM,
|
||||
* in case it was altered while we were running.
|
||||
*/
|
||||
new_val += ifp->if_errcntp[idx];
|
||||
if (new_val > 0xFF)
|
||||
new_val = 0xFF;
|
||||
if (ifp->if_errcntp[idx] != new_val) {
|
||||
ifp->if_errcntp[idx] = new_val;
|
||||
hi_water = offsetof(struct qib_flash,
|
||||
if_errcntp) + idx;
|
||||
}
|
||||
/*
|
||||
* update our shadow (used to minimize EEPROM
|
||||
* traffic), to match what we are about to write.
|
||||
*/
|
||||
dd->eep_st_errs[idx] = new_val;
|
||||
dd->eep_st_new_errs[idx] = 0;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Now update active-time. We would like to round to the nearest hour
|
||||
* but unless atomic_t are sure to be proper signed ints we cannot,
|
||||
* because we need to account for what we "transfer" to EEPROM and
|
||||
* if we log an hour at 31 minutes, then we would need to set
|
||||
* active_time to -29 to accurately count the _next_ hour.
|
||||
*/
|
||||
if (new_time >= 3600) {
|
||||
new_hrs = new_time / 3600;
|
||||
atomic_sub((new_hrs * 3600), &dd->active_time);
|
||||
new_hrs += dd->eep_hrs;
|
||||
if (new_hrs > 0xFFFF)
|
||||
new_hrs = 0xFFFF;
|
||||
dd->eep_hrs = new_hrs;
|
||||
if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
|
||||
ifp->if_powerhour[0] = new_hrs & 0xFF;
|
||||
hi_water = offsetof(struct qib_flash, if_powerhour);
|
||||
}
|
||||
if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
|
||||
ifp->if_powerhour[1] = new_hrs >> 8;
|
||||
hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* There is a tiny possibility that we could somehow fail to write
|
||||
* the EEPROM after updating our shadows, but problems from holding
|
||||
* the spinlock too long are a much bigger issue.
|
||||
*/
|
||||
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
||||
if (hi_water) {
|
||||
/* we made some change to the data, uopdate cksum and write */
|
||||
csum = flash_csum(ifp, 1);
|
||||
ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
|
||||
}
|
||||
mutex_unlock(&dd->eep_lock);
|
||||
if (ret)
|
||||
qib_dev_err(dd, "Failed updating EEPROM\n");
|
||||
|
||||
free_bail:
|
||||
vfree(buf);
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_inc_eeprom_err - increment one of the four error counters
|
||||
* that are logged to EEPROM.
|
||||
* @dd: the qlogic_ib device
|
||||
* @eidx: 0..3, the counter to increment
|
||||
* @incr: how much to add
|
||||
*
|
||||
* Each counter is 8-bits, and saturates at 255 (0xFF). They
|
||||
* are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
|
||||
* is called, but it can only be called in a context that allows sleep.
|
||||
* This function can be called even at interrupt level.
|
||||
*/
|
||||
void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
|
||||
{
|
||||
uint new_val;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
||||
new_val = dd->eep_st_new_errs[eidx] + incr;
|
||||
if (new_val > 255)
|
||||
new_val = 255;
|
||||
dd->eep_st_new_errs[eidx] = new_val;
|
||||
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
||||
}
|
||||
|
|
|
@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
|
|||
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
||||
traffic_wds -= dd->traffic_wds;
|
||||
dd->traffic_wds += traffic_wds;
|
||||
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
||||
atomic_add(5, &dd->active_time); /* S/B #define */
|
||||
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
||||
|
||||
qib_chk_6120_errormask(dd);
|
||||
|
|
|
@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
|
|||
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
||||
traffic_wds -= dd->traffic_wds;
|
||||
dd->traffic_wds += traffic_wds;
|
||||
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
||||
atomic_add(5, &dd->active_time); /* S/B #define */
|
||||
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
||||
done:
|
||||
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
|
||||
|
|
|
@ -4854,8 +4854,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
|
|||
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
|
||||
traffic_wds -= ppd->dd->traffic_wds;
|
||||
ppd->dd->traffic_wds += traffic_wds;
|
||||
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
||||
atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
|
||||
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
|
||||
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
|
||||
QIB_IB_QDR) &&
|
||||
|
|
|
@ -892,7 +892,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
|
|||
}
|
||||
}
|
||||
|
||||
qib_update_eeprom_log(dd);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -611,28 +611,6 @@ bail:
|
|||
return ret < 0 ? ret : count;
|
||||
}
|
||||
|
||||
static ssize_t show_logged_errs(struct device *device,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct qib_ibdev *dev =
|
||||
container_of(device, struct qib_ibdev, ibdev.dev);
|
||||
struct qib_devdata *dd = dd_from_dev(dev);
|
||||
int idx, count;
|
||||
|
||||
/* force consistency with actual EEPROM */
|
||||
if (qib_update_eeprom_log(dd) != 0)
|
||||
return -ENXIO;
|
||||
|
||||
count = 0;
|
||||
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
||||
count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
|
||||
dd->eep_st_errs[idx],
|
||||
idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump tempsense regs. in decimal, to ease shell-scripts.
|
||||
*/
|
||||
|
@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
|
|||
static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
|
||||
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
|
||||
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
|
||||
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
|
||||
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
|
||||
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
|
||||
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
|
||||
|
@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
|
|||
&dev_attr_nfreectxts,
|
||||
&dev_attr_serial,
|
||||
&dev_attr_boardversion,
|
||||
&dev_attr_logged_errors,
|
||||
&dev_attr_tempsense,
|
||||
&dev_attr_localbus_info,
|
||||
&dev_attr_chip_reset,
|
||||
|
|
|
@ -40,8 +40,15 @@ static DEFINE_MUTEX(device_list_mutex);
|
|||
static LIST_HEAD(device_list);
|
||||
static struct workqueue_struct *isert_rx_wq;
|
||||
static struct workqueue_struct *isert_comp_wq;
|
||||
static struct workqueue_struct *isert_release_wq;
|
||||
static struct kmem_cache *isert_cmd_cache;
|
||||
|
||||
static int
|
||||
isert_rdma_post_recvl(struct isert_conn *isert_conn);
|
||||
static int
|
||||
isert_rdma_accept(struct isert_conn *isert_conn);
|
||||
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
|
||||
|
||||
static void
|
||||
isert_qp_event_callback(struct ib_event *e, void *context)
|
||||
{
|
||||
|
@ -107,9 +114,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|||
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
|
||||
/*
|
||||
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
|
||||
* work-around for RDMA_READ..
|
||||
* work-around for RDMA_READs with ConnectX-2.
|
||||
*
|
||||
* Also, still make sure to have at least two SGEs for
|
||||
* outgoing control PDU responses.
|
||||
*/
|
||||
attr.cap.max_send_sge = devattr.max_sge - 2;
|
||||
attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
|
||||
isert_conn->max_sge = attr.cap.max_send_sge;
|
||||
|
||||
attr.cap.max_recv_sge = 1;
|
||||
|
@ -124,12 +134,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|||
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
|
||||
if (ret) {
|
||||
pr_err("rdma_create_qp failed for cma_id %d\n", ret);
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
isert_conn->conn_qp = cma_id->qp;
|
||||
pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
|
||||
|
||||
return 0;
|
||||
err:
|
||||
mutex_lock(&device_list_mutex);
|
||||
device->cq_active_qps[min_index]--;
|
||||
mutex_unlock(&device_list_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -212,6 +228,13 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
struct ib_device *ib_dev = device->ib_device;
|
||||
struct isert_cq_desc *cq_desc;
|
||||
int ret = 0, i, j;
|
||||
int max_rx_cqe, max_tx_cqe;
|
||||
struct ib_device_attr dev_attr;
|
||||
|
||||
memset(&dev_attr, 0, sizeof(struct ib_device_attr));
|
||||
ret = isert_query_device(device->ib_device, &dev_attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
device->cqs_used = min_t(int, num_online_cpus(),
|
||||
device->ib_device->num_comp_vectors);
|
||||
|
@ -234,6 +257,9 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
goto out_cq_desc;
|
||||
}
|
||||
|
||||
max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
|
||||
max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
|
||||
|
||||
for (i = 0; i < device->cqs_used; i++) {
|
||||
cq_desc[i].device = device;
|
||||
cq_desc[i].cq_index = i;
|
||||
|
@ -242,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
isert_cq_rx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_RX_CQ_LEN, i);
|
||||
max_rx_cqe, i);
|
||||
if (IS_ERR(device->dev_rx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_rx_cq[i]);
|
||||
device->dev_rx_cq[i] = NULL;
|
||||
|
@ -253,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
isert_cq_tx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_TX_CQ_LEN, i);
|
||||
max_tx_cqe, i);
|
||||
if (IS_ERR(device->dev_tx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_tx_cq[i]);
|
||||
device->dev_tx_cq[i] = NULL;
|
||||
|
@ -375,8 +401,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
|
|||
static int
|
||||
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
{
|
||||
struct iscsi_np *np = cma_id->context;
|
||||
struct isert_np *isert_np = np->np_context;
|
||||
struct isert_np *isert_np = cma_id->context;
|
||||
struct iscsi_np *np = isert_np->np;
|
||||
struct isert_conn *isert_conn;
|
||||
struct isert_device *device;
|
||||
struct ib_device *ib_dev = cma_id->device;
|
||||
|
@ -401,12 +427,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
isert_conn->state = ISER_CONN_INIT;
|
||||
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
|
||||
init_completion(&isert_conn->conn_login_comp);
|
||||
init_completion(&isert_conn->login_req_comp);
|
||||
init_completion(&isert_conn->conn_wait);
|
||||
init_completion(&isert_conn->conn_wait_comp_err);
|
||||
kref_init(&isert_conn->conn_kref);
|
||||
mutex_init(&isert_conn->conn_mutex);
|
||||
|
||||
cma_id->context = isert_conn;
|
||||
isert_conn->conn_cm_id = cma_id;
|
||||
isert_conn->responder_resources = event->param.conn.responder_resources;
|
||||
isert_conn->initiator_depth = event->param.conn.initiator_depth;
|
||||
|
@ -466,6 +492,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
if (ret)
|
||||
goto out_conn_dev;
|
||||
|
||||
ret = isert_rdma_post_recvl(isert_conn);
|
||||
if (ret)
|
||||
goto out_conn_dev;
|
||||
|
||||
ret = isert_rdma_accept(isert_conn);
|
||||
if (ret)
|
||||
goto out_conn_dev;
|
||||
|
||||
mutex_lock(&isert_np->np_accept_mutex);
|
||||
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
|
||||
mutex_unlock(&isert_np->np_accept_mutex);
|
||||
|
@ -486,6 +520,7 @@ out_login_buf:
|
|||
kfree(isert_conn->login_buf);
|
||||
out:
|
||||
kfree(isert_conn);
|
||||
rdma_reject(cma_id, NULL, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -498,18 +533,20 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|||
|
||||
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
isert_free_rx_descriptors(isert_conn);
|
||||
rdma_destroy_id(isert_conn->conn_cm_id);
|
||||
|
||||
if (isert_conn->conn_qp) {
|
||||
cq_index = ((struct isert_cq_desc *)
|
||||
isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
|
||||
pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
|
||||
mutex_lock(&device_list_mutex);
|
||||
isert_conn->conn_device->cq_active_qps[cq_index]--;
|
||||
mutex_unlock(&device_list_mutex);
|
||||
|
||||
rdma_destroy_qp(isert_conn->conn_cm_id);
|
||||
ib_destroy_qp(isert_conn->conn_qp);
|
||||
}
|
||||
|
||||
isert_free_rx_descriptors(isert_conn);
|
||||
rdma_destroy_id(isert_conn->conn_cm_id);
|
||||
|
||||
if (isert_conn->login_buf) {
|
||||
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
||||
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
|
||||
|
@ -529,9 +566,19 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|||
static void
|
||||
isert_connected_handler(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct isert_conn *isert_conn = cma_id->context;
|
||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
kref_get(&isert_conn->conn_kref);
|
||||
pr_info("conn %p\n", isert_conn);
|
||||
|
||||
if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
|
||||
pr_warn("conn %p connect_release is running\n", isert_conn);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
|
||||
isert_conn->state = ISER_CONN_UP;
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -552,65 +599,108 @@ isert_put_conn(struct isert_conn *isert_conn)
|
|||
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
|
||||
}
|
||||
|
||||
/**
|
||||
* isert_conn_terminate() - Initiate connection termination
|
||||
* @isert_conn: isert connection struct
|
||||
*
|
||||
* Notes:
|
||||
* In case the connection state is FULL_FEATURE, move state
|
||||
* to TEMINATING and start teardown sequence (rdma_disconnect).
|
||||
* In case the connection state is UP, complete flush as well.
|
||||
*
|
||||
* This routine must be called with conn_mutex held. Thus it is
|
||||
* safe to call multiple times.
|
||||
*/
|
||||
static void
|
||||
isert_disconnect_work(struct work_struct *work)
|
||||
isert_conn_terminate(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = container_of(work,
|
||||
struct isert_conn, conn_logout_work);
|
||||
int err;
|
||||
|
||||
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
if (isert_conn->state == ISER_CONN_UP)
|
||||
switch (isert_conn->state) {
|
||||
case ISER_CONN_TERMINATING:
|
||||
break;
|
||||
case ISER_CONN_UP:
|
||||
/*
|
||||
* No flush completions will occur as we didn't
|
||||
* get to ISER_CONN_FULL_FEATURE yet, complete
|
||||
* to allow teardown progress.
|
||||
*/
|
||||
complete(&isert_conn->conn_wait_comp_err);
|
||||
case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
|
||||
pr_info("Terminating conn %p state %d\n",
|
||||
isert_conn, isert_conn->state);
|
||||
isert_conn->state = ISER_CONN_TERMINATING;
|
||||
|
||||
if (isert_conn->post_recv_buf_count == 0 &&
|
||||
atomic_read(&isert_conn->post_send_buf_count) == 0) {
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
goto wake_up;
|
||||
err = rdma_disconnect(isert_conn->conn_cm_id);
|
||||
if (err)
|
||||
pr_warn("Failed rdma_disconnect isert_conn %p\n",
|
||||
isert_conn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("conn %p teminating in state %d\n",
|
||||
isert_conn, isert_conn->state);
|
||||
}
|
||||
if (!isert_conn->conn_cm_id) {
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
isert_put_conn(isert_conn);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isert_conn->disconnect) {
|
||||
/* Send DREQ/DREP towards our initiator */
|
||||
rdma_disconnect(isert_conn->conn_cm_id);
|
||||
}
|
||||
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
wake_up:
|
||||
complete(&isert_conn->conn_wait);
|
||||
}
|
||||
|
||||
static int
|
||||
isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
|
||||
isert_np_cma_handler(struct isert_np *isert_np,
|
||||
enum rdma_cm_event_type event)
|
||||
{
|
||||
struct isert_conn *isert_conn;
|
||||
|
||||
if (!cma_id->qp) {
|
||||
struct isert_np *isert_np = cma_id->context;
|
||||
pr_debug("isert np %p, handling event %d\n", isert_np, event);
|
||||
|
||||
switch (event) {
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
isert_np->np_cm_id = NULL;
|
||||
return -1;
|
||||
break;
|
||||
case RDMA_CM_EVENT_ADDR_CHANGE:
|
||||
isert_np->np_cm_id = isert_setup_id(isert_np);
|
||||
if (IS_ERR(isert_np->np_cm_id)) {
|
||||
pr_err("isert np %p setup id failed: %ld\n",
|
||||
isert_np, PTR_ERR(isert_np->np_cm_id));
|
||||
isert_np->np_cm_id = NULL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_err("isert np %p Unexpected event %d\n",
|
||||
isert_np, event);
|
||||
}
|
||||
|
||||
isert_conn = (struct isert_conn *)cma_id->context;
|
||||
return -1;
|
||||
}
|
||||
|
||||
isert_conn->disconnect = disconnect;
|
||||
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
|
||||
schedule_work(&isert_conn->conn_logout_work);
|
||||
static int
|
||||
isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
||||
enum rdma_cm_event_type event)
|
||||
{
|
||||
struct isert_np *isert_np = cma_id->context;
|
||||
struct isert_conn *isert_conn;
|
||||
|
||||
if (isert_np->np_cm_id == cma_id)
|
||||
return isert_np_cma_handler(cma_id->context, event);
|
||||
|
||||
isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
isert_conn_terminate(isert_conn);
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
pr_info("conn %p completing conn_wait\n", isert_conn);
|
||||
complete(&isert_conn->conn_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
isert_connect_error(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
isert_put_conn(isert_conn);
|
||||
}
|
||||
|
||||
static int
|
||||
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
{
|
||||
int ret = 0;
|
||||
bool disconnect = false;
|
||||
|
||||
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
|
||||
event->event, event->status, cma_id->context, cma_id);
|
||||
|
@ -628,11 +718,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
|
||||
disconnect = true;
|
||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
|
||||
ret = isert_disconnected_handler(cma_id, disconnect);
|
||||
ret = isert_disconnected_handler(cma_id, event->event);
|
||||
break;
|
||||
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
isert_connect_error(cma_id);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unhandled RDMA CMA event: %d\n", event->event);
|
||||
break;
|
||||
|
@ -834,7 +927,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
isert_conn->state = ISER_CONN_UP;
|
||||
/* Now we are in FULL_FEATURE phase */
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
isert_conn->state = ISER_CONN_FULL_FEATURE;
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
goto post_send;
|
||||
}
|
||||
|
||||
|
@ -851,18 +947,17 @@ post_send:
|
|||
}
|
||||
|
||||
static void
|
||||
isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
|
||||
struct isert_conn *isert_conn)
|
||||
isert_rx_login_req(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
|
||||
int rx_buflen = isert_conn->login_req_len;
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsi_login *login = conn->conn_login;
|
||||
int size;
|
||||
|
||||
if (!login) {
|
||||
pr_err("conn->conn_login is NULL\n");
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
pr_info("conn %p\n", isert_conn);
|
||||
|
||||
WARN_ON_ONCE(!login);
|
||||
|
||||
if (login->first_request) {
|
||||
struct iscsi_login_req *login_req =
|
||||
|
@ -892,7 +987,8 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
|
|||
size, rx_buflen, MAX_KEY_VALUE_PAIRS);
|
||||
memcpy(login->req_buf, &rx_desc->data[0], size);
|
||||
|
||||
complete(&isert_conn->conn_login_comp);
|
||||
if (login->first_request)
|
||||
complete(&isert_conn->conn_login_comp);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1169,11 +1265,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|||
hdr->opcode, hdr->itt, hdr->flags,
|
||||
(int)(xfer_len - ISER_HEADERS_LEN));
|
||||
|
||||
if ((char *)desc == isert_conn->login_req_buf)
|
||||
isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
|
||||
isert_conn);
|
||||
else
|
||||
if ((char *)desc == isert_conn->login_req_buf) {
|
||||
isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
|
||||
if (isert_conn->conn) {
|
||||
struct iscsi_login *login = isert_conn->conn->conn_login;
|
||||
|
||||
if (login && !login->first_request)
|
||||
isert_rx_login_req(isert_conn);
|
||||
}
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
complete(&isert_conn->login_req_comp);
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
} else {
|
||||
isert_rx_do_work(desc, isert_conn);
|
||||
}
|
||||
|
||||
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
|
||||
DMA_FROM_DEVICE);
|
||||
|
@ -1483,7 +1588,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
|
|||
msleep(3000);
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
isert_conn->state = ISER_CONN_DOWN;
|
||||
isert_conn_terminate(isert_conn);
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
|
||||
|
@ -2044,13 +2149,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct rdma_cm_id *
|
||||
isert_setup_id(struct isert_np *isert_np)
|
||||
{
|
||||
struct iscsi_np *np = isert_np->np;
|
||||
struct rdma_cm_id *id;
|
||||
struct sockaddr *sa;
|
||||
int ret;
|
||||
|
||||
sa = (struct sockaddr *)&np->np_sockaddr;
|
||||
pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
|
||||
|
||||
id = rdma_create_id(isert_cma_handler, isert_np,
|
||||
RDMA_PS_TCP, IB_QPT_RC);
|
||||
if (IS_ERR(id)) {
|
||||
pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
|
||||
ret = PTR_ERR(id);
|
||||
goto out;
|
||||
}
|
||||
pr_debug("id %p context %p\n", id, id->context);
|
||||
|
||||
ret = rdma_bind_addr(id, sa);
|
||||
if (ret) {
|
||||
pr_err("rdma_bind_addr() failed: %d\n", ret);
|
||||
goto out_id;
|
||||
}
|
||||
|
||||
ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
|
||||
if (ret) {
|
||||
pr_err("rdma_listen() failed: %d\n", ret);
|
||||
goto out_id;
|
||||
}
|
||||
|
||||
return id;
|
||||
out_id:
|
||||
rdma_destroy_id(id);
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
isert_setup_np(struct iscsi_np *np,
|
||||
struct __kernel_sockaddr_storage *ksockaddr)
|
||||
{
|
||||
struct isert_np *isert_np;
|
||||
struct rdma_cm_id *isert_lid;
|
||||
struct sockaddr *sa;
|
||||
int ret;
|
||||
|
||||
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
|
||||
|
@ -2062,9 +2205,8 @@ isert_setup_np(struct iscsi_np *np,
|
|||
mutex_init(&isert_np->np_accept_mutex);
|
||||
INIT_LIST_HEAD(&isert_np->np_accept_list);
|
||||
init_completion(&isert_np->np_login_comp);
|
||||
isert_np->np = np;
|
||||
|
||||
sa = (struct sockaddr *)ksockaddr;
|
||||
pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
|
||||
/*
|
||||
* Setup the np->np_sockaddr from the passed sockaddr setup
|
||||
* in iscsi_target_configfs.c code..
|
||||
|
@ -2072,37 +2214,20 @@ isert_setup_np(struct iscsi_np *np,
|
|||
memcpy(&np->np_sockaddr, ksockaddr,
|
||||
sizeof(struct __kernel_sockaddr_storage));
|
||||
|
||||
isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
|
||||
IB_QPT_RC);
|
||||
isert_lid = isert_setup_id(isert_np);
|
||||
if (IS_ERR(isert_lid)) {
|
||||
pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
|
||||
PTR_ERR(isert_lid));
|
||||
ret = PTR_ERR(isert_lid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = rdma_bind_addr(isert_lid, sa);
|
||||
if (ret) {
|
||||
pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
|
||||
goto out_lid;
|
||||
}
|
||||
|
||||
ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
|
||||
if (ret) {
|
||||
pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
|
||||
goto out_lid;
|
||||
}
|
||||
|
||||
isert_np->np_cm_id = isert_lid;
|
||||
np->np_context = isert_np;
|
||||
pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
|
||||
|
||||
return 0;
|
||||
|
||||
out_lid:
|
||||
rdma_destroy_id(isert_lid);
|
||||
out:
|
||||
kfree(isert_np);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2138,13 +2263,27 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|||
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
||||
int ret;
|
||||
|
||||
pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
|
||||
pr_info("before login_req comp conn: %p\n", isert_conn);
|
||||
ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
|
||||
if (ret) {
|
||||
pr_err("isert_conn %p interrupted before got login req\n",
|
||||
isert_conn);
|
||||
return ret;
|
||||
}
|
||||
isert_conn->login_req_comp.done = 0;
|
||||
|
||||
if (!login->first_request)
|
||||
return 0;
|
||||
|
||||
isert_rx_login_req(isert_conn);
|
||||
|
||||
pr_info("before conn_login_comp conn: %p\n", conn);
|
||||
|
||||
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
|
||||
pr_info("processing login->req: %p\n", login->req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2222,17 +2361,10 @@ accept_wait:
|
|||
isert_conn->conn = conn;
|
||||
max_accept = 0;
|
||||
|
||||
ret = isert_rdma_post_recvl(isert_conn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = isert_rdma_accept(isert_conn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
isert_set_conn_info(np, conn, isert_conn);
|
||||
|
||||
pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
|
||||
pr_debug("Processing isert_conn: %p\n", isert_conn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2248,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
|
|||
kfree(isert_np);
|
||||
}
|
||||
|
||||
static void isert_release_work(struct work_struct *work)
|
||||
{
|
||||
struct isert_conn *isert_conn = container_of(work,
|
||||
struct isert_conn,
|
||||
release_work);
|
||||
|
||||
pr_info("Starting release conn %p\n", isert_conn);
|
||||
|
||||
wait_for_completion(&isert_conn->conn_wait);
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
isert_conn->state = ISER_CONN_DOWN;
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
pr_info("Destroying conn %p\n", isert_conn);
|
||||
isert_put_conn(isert_conn);
|
||||
}
|
||||
|
||||
static void isert_wait_conn(struct iscsi_conn *conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
|
@ -2255,10 +2405,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|||
pr_debug("isert_wait_conn: Starting \n");
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
if (isert_conn->conn_cm_id) {
|
||||
pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
|
||||
rdma_disconnect(isert_conn->conn_cm_id);
|
||||
}
|
||||
/*
|
||||
* Only wait for conn_wait_comp_err if the isert_conn made it
|
||||
* into full feature phase..
|
||||
|
@ -2267,14 +2413,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
return;
|
||||
}
|
||||
if (isert_conn->state == ISER_CONN_UP)
|
||||
isert_conn->state = ISER_CONN_TERMINATING;
|
||||
isert_conn_terminate(isert_conn);
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
wait_for_completion(&isert_conn->conn_wait_comp_err);
|
||||
|
||||
wait_for_completion(&isert_conn->conn_wait);
|
||||
isert_put_conn(isert_conn);
|
||||
INIT_WORK(&isert_conn->release_work, isert_release_work);
|
||||
queue_work(isert_release_wq, &isert_conn->release_work);
|
||||
}
|
||||
|
||||
static void isert_free_conn(struct iscsi_conn *conn)
|
||||
|
@ -2320,20 +2465,30 @@ static int __init isert_init(void)
|
|||
goto destroy_rx_wq;
|
||||
}
|
||||
|
||||
isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
|
||||
WQ_UNBOUND_MAX_ACTIVE);
|
||||
if (!isert_release_wq) {
|
||||
pr_err("Unable to allocate isert_release_wq\n");
|
||||
ret = -ENOMEM;
|
||||
goto destroy_comp_wq;
|
||||
}
|
||||
|
||||
isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
|
||||
sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
|
||||
0, NULL);
|
||||
if (!isert_cmd_cache) {
|
||||
pr_err("Unable to create isert_cmd_cache\n");
|
||||
ret = -ENOMEM;
|
||||
goto destroy_tx_cq;
|
||||
goto destroy_release_wq;
|
||||
}
|
||||
|
||||
iscsit_register_transport(&iser_target_transport);
|
||||
pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
||||
pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
||||
return 0;
|
||||
|
||||
destroy_tx_cq:
|
||||
destroy_release_wq:
|
||||
destroy_workqueue(isert_release_wq);
|
||||
destroy_comp_wq:
|
||||
destroy_workqueue(isert_comp_wq);
|
||||
destroy_rx_wq:
|
||||
destroy_workqueue(isert_rx_wq);
|
||||
|
@ -2344,6 +2499,7 @@ static void __exit isert_exit(void)
|
|||
{
|
||||
flush_scheduled_work();
|
||||
kmem_cache_destroy(isert_cmd_cache);
|
||||
destroy_workqueue(isert_release_wq);
|
||||
destroy_workqueue(isert_comp_wq);
|
||||
destroy_workqueue(isert_rx_wq);
|
||||
iscsit_unregister_transport(&iser_target_transport);
|
||||
|
|
|
@ -21,6 +21,7 @@ enum iser_ib_op_code {
|
|||
enum iser_conn_state {
|
||||
ISER_CONN_INIT,
|
||||
ISER_CONN_UP,
|
||||
ISER_CONN_FULL_FEATURE,
|
||||
ISER_CONN_TERMINATING,
|
||||
ISER_CONN_DOWN,
|
||||
};
|
||||
|
@ -87,6 +88,7 @@ struct isert_conn {
|
|||
char *login_req_buf;
|
||||
char *login_rsp_buf;
|
||||
u64 login_req_dma;
|
||||
int login_req_len;
|
||||
u64 login_rsp_dma;
|
||||
unsigned int conn_rx_desc_head;
|
||||
struct iser_rx_desc *conn_rx_descs;
|
||||
|
@ -94,18 +96,18 @@ struct isert_conn {
|
|||
struct iscsi_conn *conn;
|
||||
struct list_head conn_accept_node;
|
||||
struct completion conn_login_comp;
|
||||
struct completion login_req_comp;
|
||||
struct iser_tx_desc conn_login_tx_desc;
|
||||
struct rdma_cm_id *conn_cm_id;
|
||||
struct ib_pd *conn_pd;
|
||||
struct ib_mr *conn_mr;
|
||||
struct ib_qp *conn_qp;
|
||||
struct isert_device *conn_device;
|
||||
struct work_struct conn_logout_work;
|
||||
struct mutex conn_mutex;
|
||||
struct completion conn_wait;
|
||||
struct completion conn_wait_comp_err;
|
||||
struct kref conn_kref;
|
||||
bool disconnect;
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
#define ISERT_MAX_CQ 64
|
||||
|
@ -131,6 +133,7 @@ struct isert_device {
|
|||
};
|
||||
|
||||
struct isert_np {
|
||||
struct iscsi_np *np;
|
||||
struct semaphore np_sem;
|
||||
struct rdma_cm_id *np_cm_id;
|
||||
struct mutex np_accept_mutex;
|
||||
|
|
|
@ -151,6 +151,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Medion Akoya E7225 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Blue FB5601 */
|
||||
.matches = {
|
||||
|
|
|
@ -648,7 +648,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
|
|||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd) {
|
||||
DMERR("could not allocate metadata struct");
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&cmd->ref_count, 1);
|
||||
|
@ -710,7 +710,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
|
|||
return cmd;
|
||||
|
||||
cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
|
||||
if (cmd) {
|
||||
if (!IS_ERR(cmd)) {
|
||||
mutex_lock(&table_lock);
|
||||
cmd2 = lookup(bdev);
|
||||
if (cmd2) {
|
||||
|
@ -745,9 +745,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
|
|||
{
|
||||
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
|
||||
may_format_device, policy_hint_size);
|
||||
if (cmd && !same_params(cmd, data_block_size)) {
|
||||
|
||||
if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
|
||||
dm_cache_metadata_close(cmd);
|
||||
return NULL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return cmd;
|
||||
|
|
|
@ -291,6 +291,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|||
unsigned short logical_block_size = queue_logical_block_size(q);
|
||||
sector_t num_sectors;
|
||||
|
||||
/* Reject unsupported discard requests */
|
||||
if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
|
||||
dec_count(io, region, -EOPNOTSUPP);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* where->count may be zero if rw holds a flush and we need to
|
||||
* send a zero-sized flush.
|
||||
|
|
|
@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the bio is discard, return an error, but do not
|
||||
* degrade the array.
|
||||
*/
|
||||
if (bio->bi_rw & REQ_DISCARD) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ms->nr_mirrors; i++)
|
||||
if (test_bit(i, &error))
|
||||
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
|
||||
|
|
|
@ -1439,8 +1439,6 @@ out:
|
|||
full_bio->bi_end_io = pe->full_bio_end_io;
|
||||
full_bio->bi_private = pe->full_bio_private;
|
||||
}
|
||||
free_pending_exception(pe);
|
||||
|
||||
increment_pending_exceptions_done_count();
|
||||
|
||||
up_write(&s->lock);
|
||||
|
@ -1457,6 +1455,8 @@ out:
|
|||
}
|
||||
|
||||
retry_origin_bios(s, origin_bios);
|
||||
|
||||
free_pending_exception(pe);
|
||||
}
|
||||
|
||||
static void commit_callback(void *context, int success)
|
||||
|
|
|
@ -2457,6 +2457,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
|
|||
struct pool_c *pt = ti->private;
|
||||
struct pool *pool = pt->pool;
|
||||
|
||||
if (get_pool_mode(pool) >= PM_READ_ONLY) {
|
||||
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
|
||||
dm_device_name(pool->pool_md));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!strcasecmp(argv[0], "create_thin"))
|
||||
r = process_create_thin_mesg(argc, argv, pool);
|
||||
|
||||
|
|
|
@ -2270,7 +2270,7 @@ int dm_setup_md_queue(struct mapped_device *md)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct mapped_device *dm_find_md(dev_t dev)
|
||||
struct mapped_device *dm_get_md(dev_t dev)
|
||||
{
|
||||
struct mapped_device *md;
|
||||
unsigned minor = MINOR(dev);
|
||||
|
@ -2281,12 +2281,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
|
|||
spin_lock(&_minor_lock);
|
||||
|
||||
md = idr_find(&_minor_idr, minor);
|
||||
if (md && (md == MINOR_ALLOCED ||
|
||||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
|
||||
dm_deleting_md(md) ||
|
||||
test_bit(DMF_FREEING, &md->flags))) {
|
||||
md = NULL;
|
||||
goto out;
|
||||
if (md) {
|
||||
if ((md == MINOR_ALLOCED ||
|
||||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
|
||||
dm_deleting_md(md) ||
|
||||
test_bit(DMF_FREEING, &md->flags))) {
|
||||
md = NULL;
|
||||
goto out;
|
||||
}
|
||||
dm_get(md);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2294,16 +2297,6 @@ out:
|
|||
|
||||
return md;
|
||||
}
|
||||
|
||||
struct mapped_device *dm_get_md(dev_t dev)
|
||||
{
|
||||
struct mapped_device *md = dm_find_md(dev);
|
||||
|
||||
if (md)
|
||||
dm_get(md);
|
||||
|
||||
return md;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_get_md);
|
||||
|
||||
void *dm_get_mdptr(struct mapped_device *md)
|
||||
|
|
|
@ -557,7 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|||
if (test_bit(WriteMostly, &rdev->flags)) {
|
||||
/* Don't balance among write-mostly, just
|
||||
* use the first as a last resort */
|
||||
if (best_disk < 0) {
|
||||
if (best_dist_disk < 0) {
|
||||
if (is_badblock(rdev, this_sector, sectors,
|
||||
&first_bad, &bad_sectors)) {
|
||||
if (first_bad < this_sector)
|
||||
|
@ -566,7 +566,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|||
best_good_sectors = first_bad - this_sector;
|
||||
} else
|
||||
best_good_sectors = sectors;
|
||||
best_disk = disk;
|
||||
best_dist_disk = disk;
|
||||
best_pending_disk = disk;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -2853,7 +2853,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
|
|||
* generate correct data from the parity.
|
||||
*/
|
||||
if (conf->max_degraded == 2 ||
|
||||
(recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
|
||||
(recovery_cp < MaxSector && sh->sector >= recovery_cp &&
|
||||
s->failed == 0)) {
|
||||
/* Calculate the real rcw later - for now make it
|
||||
* look like rcw is cheaper
|
||||
*/
|
||||
|
|
|
@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
|
|||
return -EINVAL;
|
||||
|
||||
/* Packet start */
|
||||
if (ev.reset)
|
||||
return 0;
|
||||
if (ev.reset) {
|
||||
/* Userspace expects a long space event before the start of
|
||||
* the signal to use as a sync. This may be done with repeat
|
||||
* packets and normal samples. But if a reset has been sent
|
||||
* then we assume that a long time has passed, so we send a
|
||||
* space with the maximum time value. */
|
||||
sample = LIRC_SPACE(LIRC_VALUE_MASK);
|
||||
IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
|
||||
|
||||
/* Carrier reports */
|
||||
if (ev.carrier_report) {
|
||||
} else if (ev.carrier_report) {
|
||||
sample = LIRC_FREQUENCY(ev.carrier);
|
||||
IR_dprintk(2, "carrier report (freq: %d)\n", sample);
|
||||
|
||||
|
|
|
@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
|
|||
{
|
||||
struct dvb_usb_device *d = adap_to_d(adap);
|
||||
struct lme2510_state *lme_int = adap_to_priv(adap);
|
||||
struct usb_host_endpoint *ep;
|
||||
|
||||
lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
|
||||
|
||||
|
@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
|
|||
adap,
|
||||
8);
|
||||
|
||||
/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
|
||||
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
|
||||
|
||||
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
|
||||
lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
|
||||
|
||||
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
|
||||
usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
|
||||
|
|
|
@ -201,8 +201,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
|
|||
if (!pdata)
|
||||
return NULL;
|
||||
|
||||
of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
|
||||
if (clk_delay_cycles > 0)
|
||||
if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
|
||||
&clk_delay_cycles))
|
||||
pdata->clk_delay_cycles = clk_delay_cycles;
|
||||
|
||||
return pdata;
|
||||
|
|
|
@ -503,6 +503,14 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
|
|||
skb->pkt_type = PACKET_BROADCAST;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
can_skb_reserve(skb);
|
||||
can_skb_prv(skb)->ifindex = dev->ifindex;
|
||||
|
||||
|
|
|
@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
|||
usb_sndbulkpipe(dev->udev,
|
||||
dev->bulk_out->bEndpointAddress),
|
||||
buf, msg->len,
|
||||
kvaser_usb_simple_msg_callback, priv);
|
||||
kvaser_usb_simple_msg_callback, netdev);
|
||||
usb_anchor_urb(urb, &priv->tx_submitted);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|||
priv = dev->nets[channel];
|
||||
stats = &priv->netdev->stats;
|
||||
|
||||
if (status & M16C_STATE_BUS_RESET) {
|
||||
kvaser_usb_unlink_tx_urbs(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
skb = alloc_can_err_skb(priv->netdev, &cf);
|
||||
if (!skb) {
|
||||
stats->rx_dropped++;
|
||||
|
@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|||
|
||||
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
|
||||
|
||||
if (status & M16C_STATE_BUS_OFF) {
|
||||
if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
|
||||
cf->can_id |= CAN_ERR_BUSOFF;
|
||||
|
||||
priv->can.can_stats.bus_off++;
|
||||
|
@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|||
}
|
||||
|
||||
new_state = CAN_STATE_ERROR_PASSIVE;
|
||||
}
|
||||
|
||||
if (status == M16C_STATE_BUS_ERROR) {
|
||||
} else if (status & M16C_STATE_BUS_ERROR) {
|
||||
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
|
||||
((txerr >= 96) || (rxerr >= 96))) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
|
@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|||
|
||||
priv->can.can_stats.error_warning++;
|
||||
new_state = CAN_STATE_ERROR_WARNING;
|
||||
} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
|
||||
} else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
|
||||
((txerr < 96) && (rxerr < 96))) {
|
||||
cf->can_id |= CAN_ERR_PROT;
|
||||
cf->data[2] = CAN_ERR_PROT_ACTIVE;
|
||||
|
||||
|
@ -1583,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
{
|
||||
struct kvaser_usb *dev;
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
int i, retry = 3;
|
||||
|
||||
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
|
@ -1601,7 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
|
||||
usb_set_intfdata(intf, dev);
|
||||
|
||||
err = kvaser_usb_get_software_info(dev);
|
||||
/* On some x86 laptops, plugging a Kvaser device again after
|
||||
* an unplug makes the firmware always ignore the very first
|
||||
* command. For such a case, provide some room for retries
|
||||
* instead of completely exiting the driver.
|
||||
*/
|
||||
do {
|
||||
err = kvaser_usb_get_software_info(dev);
|
||||
} while (--retry && err == -ETIMEDOUT);
|
||||
|
||||
if (err) {
|
||||
dev_err(&intf->dev,
|
||||
"Cannot get software infos, error %d\n", err);
|
||||
|
|
|
@ -2315,7 +2315,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
work_done = netxen_process_rcv_ring(sds_ring, budget);
|
||||
|
||||
if ((work_done < budget) && tx_complete) {
|
||||
if (!tx_complete)
|
||||
work_done = budget;
|
||||
|
||||
if (work_done < budget) {
|
||||
napi_complete(&sds_ring->napi);
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state))
|
||||
netxen_nic_enable_int(sds_ring);
|
||||
|
|
|
@ -1293,6 +1293,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
|||
if (vid == priv->data.default_vlan)
|
||||
return 0;
|
||||
|
||||
if (priv->data.dual_emac) {
|
||||
/* In dual EMAC, reserved VLAN id should not be used for
|
||||
* creating VLAN interfaces as this can break the dual
|
||||
* EMAC port separation
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
if (vid == priv->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
|
||||
return cpsw_add_vlan_ale_entry(priv, vid);
|
||||
}
|
||||
|
@ -1306,6 +1319,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
|||
if (vid == priv->data.default_vlan)
|
||||
return 0;
|
||||
|
||||
if (priv->data.dual_emac) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
if (vid == priv->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
|
||||
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
|
||||
if (ret != 0)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue