mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
ceph: fix multiple mds session shutdown
The use of a completion when waiting for session shutdown during umount is inappropriate, given the complexity of the condition. For multiple MDS's, this resulted in the umount thread spinning, often preventing the session close message from being processed in some cases. Switch to a waitqueue and defined a condition helper. This cleans things up nicely. Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
parent
e56fa10e92
commit
f3c60c5918
2 changed files with 37 additions and 34 deletions
|
@ -2208,7 +2208,7 @@ static void handle_session(struct ceph_mds_session *session,
|
|||
pr_info("mds%d reconnect denied\n", session->s_mds);
|
||||
remove_session_caps(session);
|
||||
wake = 1; /* for good measure */
|
||||
complete_all(&mdsc->session_close_waiters);
|
||||
wake_up_all(&mdsc->session_close_wq);
|
||||
kick_requests(mdsc, mds);
|
||||
break;
|
||||
|
||||
|
@ -2876,7 +2876,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
|
|||
return -ENOMEM;
|
||||
|
||||
init_completion(&mdsc->safe_umount_waiters);
|
||||
init_completion(&mdsc->session_close_waiters);
|
||||
init_waitqueue_head(&mdsc->session_close_wq);
|
||||
INIT_LIST_HEAD(&mdsc->waiting_for_map);
|
||||
mdsc->sessions = NULL;
|
||||
mdsc->max_sessions = 0;
|
||||
|
@ -3021,6 +3021,23 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
|
|||
wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
|
||||
}
|
||||
|
||||
/*
|
||||
* true if all sessions are closed, or we force unmount
|
||||
*/
|
||||
bool done_closing_sessions(struct ceph_mds_client *mdsc)
|
||||
{
|
||||
int i, n = 0;
|
||||
|
||||
if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
|
||||
return true;
|
||||
|
||||
mutex_lock(&mdsc->mutex);
|
||||
for (i = 0; i < mdsc->max_sessions; i++)
|
||||
if (mdsc->sessions[i])
|
||||
n++;
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
return n == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* called after sb is ro.
|
||||
|
@ -3029,45 +3046,32 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
|
|||
{
|
||||
struct ceph_mds_session *session;
|
||||
int i;
|
||||
int n;
|
||||
struct ceph_client *client = mdsc->client;
|
||||
unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
|
||||
unsigned long timeout = client->mount_args->mount_timeout * HZ;
|
||||
|
||||
dout("close_sessions\n");
|
||||
|
||||
mutex_lock(&mdsc->mutex);
|
||||
|
||||
/* close sessions */
|
||||
started = jiffies;
|
||||
while (time_before(jiffies, started + timeout)) {
|
||||
dout("closing sessions\n");
|
||||
n = 0;
|
||||
for (i = 0; i < mdsc->max_sessions; i++) {
|
||||
session = __ceph_lookup_mds_session(mdsc, i);
|
||||
if (!session)
|
||||
continue;
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
mutex_lock(&session->s_mutex);
|
||||
__close_session(mdsc, session);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
ceph_put_mds_session(session);
|
||||
mutex_lock(&mdsc->mutex);
|
||||
n++;
|
||||
}
|
||||
if (n == 0)
|
||||
break;
|
||||
|
||||
if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
|
||||
break;
|
||||
|
||||
dout("waiting for sessions to close\n");
|
||||
mutex_lock(&mdsc->mutex);
|
||||
for (i = 0; i < mdsc->max_sessions; i++) {
|
||||
session = __ceph_lookup_mds_session(mdsc, i);
|
||||
if (!session)
|
||||
continue;
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
wait_for_completion_timeout(&mdsc->session_close_waiters,
|
||||
timeout);
|
||||
mutex_lock(&session->s_mutex);
|
||||
__close_session(mdsc, session);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
ceph_put_mds_session(session);
|
||||
mutex_lock(&mdsc->mutex);
|
||||
}
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
|
||||
dout("waiting for sessions to close\n");
|
||||
wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
|
||||
timeout);
|
||||
|
||||
/* tear down remaining sessions */
|
||||
mutex_lock(&mdsc->mutex);
|
||||
for (i = 0; i < mdsc->max_sessions; i++) {
|
||||
if (mdsc->sessions[i]) {
|
||||
session = get_session(mdsc->sessions[i]);
|
||||
|
@ -3080,9 +3084,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
|
|||
mutex_lock(&mdsc->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&mdsc->cap_delay_list));
|
||||
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
|
||||
ceph_cleanup_empty_realms(mdsc);
|
||||
|
|
|
@ -234,7 +234,8 @@ struct ceph_mds_client {
|
|||
struct mutex mutex; /* all nested structures */
|
||||
|
||||
struct ceph_mdsmap *mdsmap;
|
||||
struct completion safe_umount_waiters, session_close_waiters;
|
||||
struct completion safe_umount_waiters;
|
||||
wait_queue_head_t session_close_wq;
|
||||
struct list_head waiting_for_map;
|
||||
|
||||
struct ceph_mds_session **sessions; /* NULL for mds if no session */
|
||||
|
|
Loading…
Reference in a new issue