mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
sparse pointer use of zero as null
Get rid of sparse related warnings from places that use integer as NULL pointer. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Cc: Andi Kleen <ak@suse.de> Cc: Jeff Garzik <jeff@garzik.org> Cc: Matt Mackall <mpm@selenic.com> Cc: Ian Kent <raven@themaw.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Davide Libenzi <davidel@xmailserver.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0e9663ee45
commit
c80544dc0b
20 changed files with 34 additions and 33 deletions
|
@ -172,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t)
|
|||
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
|
||||
return time_syscall(t);
|
||||
|
||||
vgettimeofday(&tv, 0);
|
||||
vgettimeofday(&tv, NULL);
|
||||
result = tv.tv_sec;
|
||||
if (t)
|
||||
*t = result;
|
||||
|
|
|
@ -882,7 +882,7 @@ unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer
|
|||
/* Filter out DMA modes if the device has been configured by
|
||||
the BIOS as PIO only */
|
||||
|
||||
if (adev->link->ap->ioaddr.bmdma_addr == 0)
|
||||
if (adev->link->ap->ioaddr.bmdma_addr == NULL)
|
||||
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
||||
return xfer_mask;
|
||||
}
|
||||
|
|
|
@ -366,7 +366,7 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
|||
unsigned long flags;
|
||||
int map, block;
|
||||
|
||||
if ((page = pool_find_page (pool, dma)) == 0) {
|
||||
if ((page = pool_find_page(pool, dma)) == NULL) {
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
|
||||
pool->name, vaddr, (unsigned long) dma);
|
||||
|
|
|
@ -649,7 +649,7 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
|
|||
|
||||
void add_interrupt_randomness(int irq)
|
||||
{
|
||||
if (irq >= NR_IRQS || irq_timer_state[irq] == 0)
|
||||
if (irq >= NR_IRQS || irq_timer_state[irq] == NULL)
|
||||
return;
|
||||
|
||||
DEBUG_ENT("irq event %d\n", irq);
|
||||
|
|
|
@ -182,7 +182,7 @@ int autofs_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_toke
|
|||
{
|
||||
struct autofs_wait_queue *wq, **wql;
|
||||
|
||||
for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
|
||||
for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
|
||||
if ( wq->wait_queue_token == wait_queue_token )
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok
|
|||
struct autofs_wait_queue *wq, **wql;
|
||||
|
||||
mutex_lock(&sbi->wq_mutex);
|
||||
for (wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next) {
|
||||
for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
|
||||
if (wq->wait_queue_token == wait_queue_token)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -3001,7 +3001,7 @@ static int __init init_sys32_ioctl(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ioctl_start); i++) {
|
||||
if (ioctl_start[i].next != 0) {
|
||||
if (ioctl_start[i].next) {
|
||||
printk("ioctl translation %d bad\n",i);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -463,7 +463,7 @@ static void ep_free(struct eventpoll *ep)
|
|||
* holding "epmutex" we can be sure that no file cleanup code will hit
|
||||
* us during this operation. So we can avoid the lock on "ep->lock".
|
||||
*/
|
||||
while ((rbp = rb_first(&ep->rbr)) != 0) {
|
||||
while ((rbp = rb_first(&ep->rbr)) != NULL) {
|
||||
epi = rb_entry(rbp, struct epitem, rbn);
|
||||
ep_remove(ep, epi);
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
|
|||
struct inode *inode = dentry->d_inode;
|
||||
int ret = 0;
|
||||
|
||||
J_ASSERT(ext3_journal_current_handle() == 0);
|
||||
J_ASSERT(ext3_journal_current_handle() == NULL);
|
||||
|
||||
/*
|
||||
* data=writeback:
|
||||
|
|
|
@ -1028,7 +1028,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
|
|||
}
|
||||
if (buffer_new(&dummy)) {
|
||||
J_ASSERT(create != 0);
|
||||
J_ASSERT(handle != 0);
|
||||
J_ASSERT(handle != NULL);
|
||||
|
||||
/*
|
||||
* Now that we do not always journal data, we should
|
||||
|
|
|
@ -217,7 +217,7 @@ static int journal_start_thread(journal_t *journal)
|
|||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
|
||||
wait_event(journal->j_wait_done_commit, journal->j_task != 0);
|
||||
wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,8 @@ static void journal_kill_thread(journal_t *journal)
|
|||
while (journal->j_task) {
|
||||
wake_up(&journal->j_wait_commit);
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
wait_event(journal->j_wait_done_commit, journal->j_task == 0);
|
||||
wait_event(journal->j_wait_done_commit,
|
||||
journal->j_task == NULL);
|
||||
spin_lock(&journal->j_state_lock);
|
||||
}
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
|
@ -1651,14 +1652,14 @@ static struct journal_head *journal_alloc_journal_head(void)
|
|||
atomic_inc(&nr_journal_heads);
|
||||
#endif
|
||||
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
|
||||
if (ret == 0) {
|
||||
if (ret == NULL) {
|
||||
jbd_debug(1, "out of memory for journal_head\n");
|
||||
if (time_after(jiffies, last_warning + 5*HZ)) {
|
||||
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
||||
__FUNCTION__);
|
||||
last_warning = jiffies;
|
||||
}
|
||||
while (ret == 0) {
|
||||
while (ret == NULL) {
|
||||
yield();
|
||||
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
|
||||
}
|
||||
|
|
|
@ -1172,7 +1172,7 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|||
}
|
||||
|
||||
/* That test should have eliminated the following case: */
|
||||
J_ASSERT_JH(jh, jh->b_frozen_data == 0);
|
||||
J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
|
||||
|
||||
JBUFFER_TRACE(jh, "file as BJ_Metadata");
|
||||
spin_lock(&journal->j_list_lock);
|
||||
|
@ -1522,7 +1522,7 @@ static void __journal_temp_unlink_buffer(struct journal_head *jh)
|
|||
|
||||
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
|
||||
if (jh->b_jlist != BJ_None)
|
||||
J_ASSERT_JH(jh, transaction != 0);
|
||||
J_ASSERT_JH(jh, transaction != NULL);
|
||||
|
||||
switch (jh->b_jlist) {
|
||||
case BJ_None:
|
||||
|
@ -1591,11 +1591,11 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
|
|||
if (buffer_locked(bh) || buffer_dirty(bh))
|
||||
goto out;
|
||||
|
||||
if (jh->b_next_transaction != 0)
|
||||
if (jh->b_next_transaction != NULL)
|
||||
goto out;
|
||||
|
||||
spin_lock(&journal->j_list_lock);
|
||||
if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {
|
||||
if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
|
||||
if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
|
||||
/* A written-back ordered data buffer */
|
||||
JBUFFER_TRACE(jh, "release data");
|
||||
|
@ -1603,7 +1603,7 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
|
|||
journal_remove_journal_head(bh);
|
||||
__brelse(bh);
|
||||
}
|
||||
} else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
|
||||
} else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
|
||||
/* written-back checkpointed metadata buffer */
|
||||
if (jh->b_jlist == BJ_None) {
|
||||
JBUFFER_TRACE(jh, "remove from checkpoint list");
|
||||
|
@ -1963,7 +1963,7 @@ void __journal_file_buffer(struct journal_head *jh,
|
|||
|
||||
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
|
||||
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
|
||||
jh->b_transaction == 0);
|
||||
jh->b_transaction == NULL);
|
||||
|
||||
if (jh->b_transaction && jh->b_jlist == jlist)
|
||||
return;
|
||||
|
|
|
@ -111,7 +111,7 @@ utf8_wctomb(__u8 *s, wchar_t wc, int maxlen)
|
|||
int c, nc;
|
||||
const struct utf8_table *t;
|
||||
|
||||
if (s == 0)
|
||||
if (!s)
|
||||
return 0;
|
||||
|
||||
l = wc;
|
||||
|
|
|
@ -57,7 +57,7 @@ identify_ramdisk_image(int fd, int start_block)
|
|||
unsigned char *buf;
|
||||
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (buf == 0)
|
||||
if (!buf)
|
||||
return -1;
|
||||
|
||||
minixsb = (struct minix_super_block *) buf;
|
||||
|
@ -407,12 +407,12 @@ static int __init crd_load(int in_fd, int out_fd)
|
|||
crd_infd = in_fd;
|
||||
crd_outfd = out_fd;
|
||||
inbuf = kmalloc(INBUFSIZ, GFP_KERNEL);
|
||||
if (inbuf == 0) {
|
||||
if (!inbuf) {
|
||||
printk(KERN_ERR "RAMDISK: Couldn't allocate gzip buffer\n");
|
||||
return -1;
|
||||
}
|
||||
window = kmalloc(WSIZE, GFP_KERNEL);
|
||||
if (window == 0) {
|
||||
if (!window) {
|
||||
printk(KERN_ERR "RAMDISK: Couldn't allocate gzip window\n");
|
||||
kfree(inbuf);
|
||||
return -1;
|
||||
|
|
|
@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs);
|
|||
*/
|
||||
void drop_futex_key_refs(union futex_key *key)
|
||||
{
|
||||
if (key->both.ptr == 0)
|
||||
if (!key->both.ptr)
|
||||
return;
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
|
@ -1046,7 +1046,7 @@ static int unqueue_me(struct futex_q *q)
|
|||
retry:
|
||||
lock_ptr = q->lock_ptr;
|
||||
barrier();
|
||||
if (lock_ptr != 0) {
|
||||
if (lock_ptr != NULL) {
|
||||
spin_lock(lock_ptr);
|
||||
/*
|
||||
* q->lock_ptr can change between reading it and
|
||||
|
|
|
@ -785,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image,
|
|||
size_t uchunk, mchunk;
|
||||
|
||||
page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
|
||||
if (page == 0) {
|
||||
if (!page) {
|
||||
result = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -844,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image,
|
|||
size_t uchunk, mchunk;
|
||||
|
||||
page = pfn_to_page(maddr >> PAGE_SHIFT);
|
||||
if (page == 0) {
|
||||
if (!page) {
|
||||
result = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1020,7 +1020,7 @@ static long region_chg(struct list_head *head, long f, long t)
|
|||
* size such that we can guarentee to record the reservation. */
|
||||
if (&rg->link == head || t < rg->from) {
|
||||
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
|
||||
if (nrg == 0)
|
||||
if (!nrg)
|
||||
return -ENOMEM;
|
||||
nrg->from = f;
|
||||
nrg->to = f;
|
||||
|
|
|
@ -291,7 +291,7 @@ unsigned long do_mremap(unsigned long addr,
|
|||
if ((addr <= new_addr) && (addr+old_len) > new_addr)
|
||||
goto out;
|
||||
|
||||
ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
|
||||
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -399,7 +399,7 @@ unsigned long do_mremap(unsigned long addr,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
|
||||
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1282,7 +1282,7 @@ out:
|
|||
*/
|
||||
if (priority < 0)
|
||||
priority = 0;
|
||||
for (i = 0; zones[i] != 0; i++) {
|
||||
for (i = 0; zones[i] != NULL; i++) {
|
||||
struct zone *zone = zones[i];
|
||||
|
||||
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
||||
|
|
|
@ -448,7 +448,7 @@ int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
|
|||
if (dst) {
|
||||
struct dst_entry *dst_test;
|
||||
|
||||
for (dst_test = dst; dst_test != 0;
|
||||
for (dst_test = dst; dst_test != NULL;
|
||||
dst_test = dst_test->child) {
|
||||
struct xfrm_state *x = dst_test->xfrm;
|
||||
|
||||
|
|
Loading…
Reference in a new issue