android_kernel_samsung_msm8976/sound/core/timer.c

2135 lines
54 KiB
C
Raw Normal View History

/*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE)
#define DEFAULT_TIMER_LIMIT 4
#elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE)
#define DEFAULT_TIMER_LIMIT 2
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex ioctl_lock;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
if (master->timer->num_instances >=
master->timer->max_instances)
return -EBUSY;
list_move_tail(&slave->open_list,
&master->slave_list_head);
master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return 0;
}
}
}
return 0;
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
if (master->timer->num_instances >=
master->timer->max_instances)
return -EBUSY;
list_move_tail(&slave->open_list, &master->slave_list_head);
master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock(&master->timer->lock);
spin_unlock_irq(&slave_active_lock);
}
}
return 0;
}
static int snd_timer_close_locked(struct snd_timer_instance *timeri);
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
int err;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
snd_printd("invalid slave class %i\n", tid->dev_sclass);
return -EINVAL;
}
mutex_lock(&register_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(&register_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
err = snd_timer_check_slave(timeri);
if (err < 0) {
snd_timer_close_locked(timeri);
timeri = NULL;
}
mutex_unlock(&register_mutex);
*ti = timeri;
return err;
}
/* open a master instance */
mutex_lock(&register_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(&register_mutex);
snd_timer_request(tid);
mutex_lock(&register_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(&register_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(&register_mutex);
return -EBUSY;
}
}
if (timer->num_instances >= timer->max_instances) {
mutex_unlock(&register_mutex);
return -EBUSY;
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(&register_mutex);
return -ENOMEM;
}
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
/* take a card refcount for safe disconnection */
if (timer->card)
get_device(timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
ALSA: timer: fix NULL pointer dereference on memory allocation failure commit 8ddc05638ee42b18ba4fe99b5fb647fa3ad20456 upstream. I hit this with syzkaller: kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 1327 Comm: a.out Not tainted 4.8.0-rc2+ #190 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 task: ffff88011278d600 task.stack: ffff8801120c0000 RIP: 0010:[<ffffffff82c8ba07>] [<ffffffff82c8ba07>] snd_hrtimer_start+0x77/0x100 RSP: 0018:ffff8801120c7a60 EFLAGS: 00010006 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000007 RDX: 0000000000000009 RSI: 1ffff10023483091 RDI: 0000000000000048 RBP: ffff8801120c7a78 R08: ffff88011a5cf768 R09: ffff88011a5ba790 R10: 0000000000000002 R11: ffffed00234b9ef1 R12: ffff880114843980 R13: ffffffff84213c00 R14: ffff880114843ab0 R15: 0000000000000286 FS: 00007f72958f3700(0000) GS:ffff88011aa00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000603001 CR3: 00000001126ab000 CR4: 00000000000006f0 Stack: ffff880114843980 ffff880111eb2dc0 ffff880114843a34 ffff8801120c7ad0 ffffffff82c81ab1 0000000000000000 ffffffff842138e0 0000000100000000 ffff880111eb2dd0 ffff880111eb2dc0 0000000000000001 ffff880111eb2dc0 Call Trace: [<ffffffff82c81ab1>] snd_timer_start1+0x331/0x670 [<ffffffff82c85bfd>] snd_timer_start+0x5d/0xa0 [<ffffffff82c8795e>] snd_timer_user_ioctl+0x88e/0x2830 [<ffffffff8159f3a0>] ? __follow_pte.isra.49+0x430/0x430 [<ffffffff82c870d0>] ? snd_timer_pause+0x80/0x80 [<ffffffff815a26fa>] ? do_wp_page+0x3aa/0x1c90 [<ffffffff8132762f>] ? put_prev_entity+0x108f/0x21a0 [<ffffffff82c870d0>] ? snd_timer_pause+0x80/0x80 [<ffffffff816b0733>] do_vfs_ioctl+0x193/0x1050 [<ffffffff813510af>] ? cpuacct_account_field+0x12f/0x1a0 [<ffffffff816b05a0>] ? ioctl_preallocate+0x200/0x200 [<ffffffff81002f2f>] ? syscall_trace_enter+0x3cf/0xdb0 [<ffffffff815045ba>] ? __context_tracking_exit.part.4+0x9a/0x1e0 [<ffffffff81002b60>] ? exit_to_usermode_loop+0x190/0x190 [<ffffffff82001a97>] ? check_preemption_disabled+0x37/0x1e0 [<ffffffff81d93889>] ? security_file_ioctl+0x89/0xb0 [<ffffffff816b167f>] SyS_ioctl+0x8f/0xc0 [<ffffffff816b15f0>] ? do_vfs_ioctl+0x1050/0x1050 [<ffffffff81005524>] do_syscall_64+0x1c4/0x4e0 [<ffffffff83c32b2a>] entry_SYSCALL64_slow_path+0x25/0x25 Code: c7 c7 c4 b9 c8 82 48 89 d9 4c 89 ee e8 63 88 7f fe e8 7e 46 7b fe 48 8d 7b 48 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 04 84 c0 7e 65 80 7b 48 00 74 0e e8 52 46 RIP [<ffffffff82c8ba07>] snd_hrtimer_start+0x77/0x100 RSP <ffff8801120c7a60> ---[ end trace 5955b08db7f2b029 ]--- This can happen if snd_hrtimer_open() fails to allocate memory and returns an error, which is currently not checked by snd_timer_open(): ioctl(SNDRV_TIMER_IOCTL_SELECT) - snd_timer_user_tselect() - snd_timer_close() - snd_hrtimer_close() - (struct snd_timer *) t->private_data = NULL - snd_timer_open() - snd_hrtimer_open() - kzalloc() fails; t->private_data is still NULL ioctl(SNDRV_TIMER_IOCTL_START) - snd_timer_user_start() - snd_timer_start() - snd_timer_start1() - snd_hrtimer_start() - t->private_data == NULL // boom [js] no put_device in 3.12 yet Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Willy Tarreau <w@1wt.eu>
2016-08-28 22:33:51 +00:00
if (list_empty(&timer->open_list_head) && timer->hw.open) {
int err = timer->hw.open(timer);
if (err) {
kfree(timeri->owner);
kfree(timeri);
module_put(timer->module);
mutex_unlock(&register_mutex);
return err;
}
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
timer->num_instances++;
err = snd_timer_check_master(timeri);
if (err < 0) {
snd_timer_close_locked(timeri);
timeri = NULL;
}
mutex_unlock(&register_mutex);
*ti = timeri;
return err;
}
/*
* close a timer instance
* call this with register_mutex down.
*/
static int snd_timer_close_locked(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
list_del(&timeri->open_list);
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
timer->num_instances--;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
timer->num_instances--;
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
list_del_init(&slave->active_list);
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
/* slave doesn't need to release timer resources below */
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
timer = NULL;
}
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
put_device(timer->card->card_dev);
module_put(timer->module);
}
return 0;
}
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
int err;
if (snd_BUG_ON(!timeri))
return -ENXIO;
mutex_lock(&register_mutex);
err = snd_timer_close_locked(timeri);
mutex_unlock(&register_mutex);
return err;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
do_posix_clock_monotonic_gettime(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
}
/* start/continue a master timer */
static int snd_timer_start1(struct snd_timer_instance *timeri,
bool start, unsigned long ticks)
{
struct snd_timer *timer;
int result;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (timer->card && timer->card->shutdown) {
result = -ENODEV;
goto unlock;
}
if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START)) {
result = -EBUSY;
goto unlock;
}
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
result = 1; /* delayed start */
} else {
if (start)
timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
result = 0;
}
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* start/continue a slave timer */
static int snd_timer_start_slave(struct snd_timer_instance *timeri,
bool start)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/* stop/pause a master timer */
static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
int result = 0;
unsigned long flags;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_PAUSE);
unlock:
spin_unlock_irqrestore(&timer->lock, flags);
return result;
}
/* stop/pause a slave timer */
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_PAUSE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, true);
else
return snd_timer_start1(timeri, true, ticks);
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, true);
else
return snd_timer_stop1(timeri, true);
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
return snd_timer_start1(timeri, false, 0);
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_stop_slave(timeri, false);
else
return snd_timer_stop1(timeri, false);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (timer->card && timer->card->shutdown)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
--timer->running;
list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (timer == NULL) {
snd_printk(KERN_ERR "timer: cannot allocate\n");
return -ENOMEM;
}
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
ALSA: timer: fix division by zero after SNDRV_TIMER_IOCTL_CONTINUE commit 6b760bb2c63a9e322c0e4a0b5daf335ad93d5a33 upstream. I got this: divide error: 0000 [#1] PREEMPT SMP KASAN CPU: 1 PID: 1327 Comm: a.out Not tainted 4.8.0-rc2+ #189 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 task: ffff8801120a9580 task.stack: ffff8801120b0000 RIP: 0010:[<ffffffff82c8bd9a>] [<ffffffff82c8bd9a>] snd_hrtimer_callback+0x1da/0x3f0 RSP: 0018:ffff88011aa87da8 EFLAGS: 00010006 RAX: 0000000000004f76 RBX: ffff880112655e88 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff880112655ea0 RDI: 0000000000000001 RBP: ffff88011aa87e00 R08: ffff88013fff905c R09: ffff88013fff9048 R10: ffff88013fff9050 R11: 00000001050a7b8c R12: ffff880114778a00 R13: ffff880114778ab4 R14: ffff880114778b30 R15: 0000000000000000 FS: 00007f071647c700(0000) GS:ffff88011aa80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000603001 CR3: 0000000112021000 CR4: 00000000000006e0 Stack: 0000000000000000 ffff880114778ab8 ffff880112655ea0 0000000000004f76 ffff880112655ec8 ffff880112655e80 ffff880112655e88 ffff88011aa98fc0 00000000b97ccf2b dffffc0000000000 ffff88011aa98fc0 ffff88011aa87ef0 Call Trace: <IRQ> [<ffffffff813abce7>] __hrtimer_run_queues+0x347/0xa00 [<ffffffff82c8bbc0>] ? snd_hrtimer_close+0x130/0x130 [<ffffffff813ab9a0>] ? retrigger_next_event+0x1b0/0x1b0 [<ffffffff813ae1a6>] ? hrtimer_interrupt+0x136/0x4b0 [<ffffffff813ae220>] hrtimer_interrupt+0x1b0/0x4b0 [<ffffffff8120f91e>] local_apic_timer_interrupt+0x6e/0xf0 [<ffffffff81227ad3>] ? kvm_guest_apic_eoi_write+0x13/0xc0 [<ffffffff83c35086>] smp_apic_timer_interrupt+0x76/0xa0 [<ffffffff83c3416c>] apic_timer_interrupt+0x8c/0xa0 <EOI> [<ffffffff83c3239c>] ? _raw_spin_unlock_irqrestore+0x2c/0x60 [<ffffffff82c8185d>] snd_timer_start1+0xdd/0x670 [<ffffffff82c87015>] snd_timer_continue+0x45/0x80 [<ffffffff82c88100>] snd_timer_user_ioctl+0x1030/0x2830 [<ffffffff8159f3a0>] ? __follow_pte.isra.49+0x430/0x430 [<ffffffff82c870d0>] ? snd_timer_pause+0x80/0x80 [<ffffffff815a26fa>] ? do_wp_page+0x3aa/0x1c90 [<ffffffff815aa4f8>] ? handle_mm_fault+0xbc8/0x27f0 [<ffffffff815a9930>] ? __pmd_alloc+0x370/0x370 [<ffffffff82c870d0>] ? snd_timer_pause+0x80/0x80 [<ffffffff816b0733>] do_vfs_ioctl+0x193/0x1050 [<ffffffff816b05a0>] ? ioctl_preallocate+0x200/0x200 [<ffffffff81002f2f>] ? syscall_trace_enter+0x3cf/0xdb0 [<ffffffff815045ba>] ? __context_tracking_exit.part.4+0x9a/0x1e0 [<ffffffff81002b60>] ? exit_to_usermode_loop+0x190/0x190 [<ffffffff82001a97>] ? check_preemption_disabled+0x37/0x1e0 [<ffffffff81d93889>] ? security_file_ioctl+0x89/0xb0 [<ffffffff816b167f>] SyS_ioctl+0x8f/0xc0 [<ffffffff816b15f0>] ? do_vfs_ioctl+0x1050/0x1050 [<ffffffff81005524>] do_syscall_64+0x1c4/0x4e0 [<ffffffff83c32b2a>] entry_SYSCALL64_slow_path+0x25/0x25 Code: e8 fc 42 7b fe 8b 0d 06 8a 50 03 49 0f af cf 48 85 c9 0f 88 7c 01 00 00 48 89 4d a8 e8 e0 42 7b fe 48 8b 45 c0 48 8b 4d a8 48 99 <48> f7 f9 49 01 c7 e8 cb 42 7b fe 48 8b 55 d0 48 b8 00 00 00 00 RIP [<ffffffff82c8bd9a>] snd_hrtimer_callback+0x1da/0x3f0 RSP <ffff88011aa87da8> ---[ end trace 6aa380f756a21074 ]--- The problem happens when you call ioctl(SNDRV_TIMER_IOCTL_CONTINUE) on a completely new/unused timer -- it will have ->sticks == 0, which causes a divide by 0 in snd_hrtimer_callback(). Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Willy Tarreau <w@1wt.eu>
2016-08-28 22:33:50 +00:00
timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(&register_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
snd_printk(KERN_WARNING "timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(&register_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(&register_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(&register_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(&register_mutex);
return 0;
}
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
/* just for reference in snd_timer_dev_disconnect() below */
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event, struct timespec *tstamp,
unsigned long resolution);
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
struct snd_timer_instance *ti;
mutex_lock(&register_mutex);
list_del_init(&timer->device_list);
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
/* wake up pending sleepers */
list_for_each_entry(ti, &timer->open_list_head, open_list) {
/* FIXME: better to have a ti.disconnect() op */
if (ti->ccallback == snd_timer_user_ccallback) {
struct snd_timer_user *tu = ti->callback_data;
tu->disconnected = true;
wake_up(&tu->qchange_sleep);
}
}
mutex_unlock(&register_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (timer->card && timer->card->shutdown)
return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = njiff;
mod_timer(&priv->tlist, njiff);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
init_timer(&priv->tlist);
priv->tlist.function = snd_timer_s_function;
priv->tlist.data = (unsigned long) timer;
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(&register_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (timer->card && timer->card->shutdown)
continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(&register_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
do_posix_clock_monotonic_gettime(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
memset(&r1, 0, sizeof(r1));
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(&register_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else if (id.subdevice < INT_MAX) {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(&register_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(&register_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(&register_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
struct snd_timer *t;
int err;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
mutex_lock(&register_mutex);
t = snd_timer_find(&gparams.tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams.period_num, gparams.period_den);
_error:
mutex_unlock(&register_mutex);
return err;
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(&register_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(&register_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
ALSA: timer: Fix missing queue indices reset at SNDRV_TIMER_IOCTL_SELECT snd_timer_user_tselect() reallocates the queue buffer dynamically, but it forgot to reset its indices. Since the read may happen concurrently with ioctl and snd_timer_user_tselect() allocates the buffer via kmalloc(), this may lead to the leak of uninitialized kernel-space data, as spotted via KMSAN: BUG: KMSAN: use of unitialized memory in snd_timer_user_read+0x6c4/0xa10 CPU: 0 PID: 1037 Comm: probe Not tainted 4.11.0-rc5+ #2739 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x143/0x1b0 lib/dump_stack.c:52 kmsan_report+0x12a/0x180 mm/kmsan/kmsan.c:1007 kmsan_check_memory+0xc2/0x140 mm/kmsan/kmsan.c:1086 copy_to_user ./arch/x86/include/asm/uaccess.h:725 snd_timer_user_read+0x6c4/0xa10 sound/core/timer.c:2004 do_loop_readv_writev fs/read_write.c:716 __do_readv_writev+0x94c/0x1380 fs/read_write.c:864 do_readv_writev fs/read_write.c:894 vfs_readv fs/read_write.c:908 do_readv+0x52a/0x5d0 fs/read_write.c:934 SYSC_readv+0xb6/0xd0 fs/read_write.c:1021 SyS_readv+0x87/0xb0 fs/read_write.c:1018 This patch adds the missing reset of queue indices. Together with the previous fix for the ioctl/read race, we cover the whole problem. Change-Id: I7fb6a4cf027a4907549cbc21db54f7fca5bb04d8 Reported-by: Alexander Potapenko <glider@google.com> Tested-by: Alexander Potapenko <glider@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
2017-06-02 15:26:56 +00:00
tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
}
__err:
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(&params, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
u64 resolution;
if (params.ticks < 1) {
err = -EINVAL;
goto _end;
}
/* Don't allow resolution less than 1ms */
resolution = snd_timer_resolution(tu->timeri);
resolution *= params.ticks;
if (resolution < 1000000) {
err = -EINVAL;
goto _end;
}
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, &params, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
tu->tread = xarg ? 1 : 0;
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu = file->private_data;
long ret;
mutex_lock(&tu->ioctl_lock);
ret = __snd_timer_user_ioctl(file, cmd, arg);
mutex_unlock(&tu->ioctl_lock);
return ret;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int qhead;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
schedule();
mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (tu->disconnected) {
err = -ENODEV;
goto _error;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto _error;
}
}
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
err = -EFAULT;
} else {
if (copy_to_user(buffer, &tu->queue[qhead],
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
spin_lock_irq(&tu->qlock);
if (err < 0)
goto _error;
result += unit;
buffer += unit;
}
_error:
spin_unlock_irq(&tu->qlock);
mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
BACKPORT: ALSA: timer: Handle disconnection more safely [ Upstream commit 230323dac060123c340cf75997971145a42661ee ] Currently ALSA timer device doesn't take the disconnection into account very well; it merely unlinks the timer device at disconnection callback but does nothing else. Because of this, when an application accessing the timer device is disconnected, it may release the resource before actually closed. In most cases, it results in a warning message indicating a leftover timer instance like: ALSA: timer xxxx is busy? But basically this is an open race. This patch tries to address it. The strategy is like other ALSA devices: namely, - Manage card's refcount at each open/close - Wake up the pending tasks at disconnection - Check the shutdown flag appropriately at each possible call Note that this patch has one ugly hack to handle the wakeup of pending tasks. It'd be cleaner to introduce a new disconnect op to snd_timer_instance ops. But since it would lead to internal ABI breakage and it eventually increase my own work when backporting to stable kernels, I took a different path to implement locally in timer.c. A cleanup patch will follow at next for 4.5 kernel. Bug: 37240993 Change-Id: I05c7f0e7d28b63fc343091f800ceae9ec2afe4a4 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=109431 Cc: <stable@vger.kernel.org> # v3.15+ Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Siqi Lin <siqilin@google.com> (cherry picked from commit 230323dac060123c340cf75997971145a42661ee)
2016-01-21 16:19:31 +00:00
if (tu->disconnected)
mask |= POLLERR;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
if ((err = snd_timer_register_system()) < 0)
snd_printk(KERN_ERR "unable to register system timer (%i)\n",
err);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, "timer")) < 0)
snd_printk(KERN_ERR "unable to register timer device (%i)\n",
err);
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
struct list_head *p, *n;
snd_unregister_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0);
/* unregister the system timer */
list_for_each_safe(p, n, &snd_timer_list) {
struct snd_timer *timer = list_entry(p, struct snd_timer, device_list);
snd_timer_free(timer);
}
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);