ceph: make ceph_msg_new return NULL on failure; clean up, fix callers

Returning ERR_PTR(-ENOMEM) is useless extra work.  Return NULL on failure
instead, and fix up the callers (about half of which were wrong anyway).

Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Sage Weil 2010-04-01 16:06:19 -07:00
parent d52f847a84
commit a79832f26b
7 changed files with 48 additions and 80 deletions

View File

@ -939,8 +939,8 @@ static int send_cap_msg(struct ceph_mds_session *session,
xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
if (IS_ERR(msg))
return PTR_ERR(msg);
if (!msg)
return -ENOMEM;
msg->hdr.tid = cpu_to_le64(flush_tid);

View File

@ -649,8 +649,8 @@ more:
do_sync,
ci->i_truncate_seq, ci->i_truncate_size,
&mtime, false, 2);
if (IS_ERR(req))
return PTR_ERR(req);
if (!req)
return -ENOMEM;
num_pages = calc_pages_for(pos, len);

View File

@ -666,9 +666,9 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
struct ceph_mds_session_head *h;
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg)) {
if (!msg) {
pr_err("create_session_msg ENOMEM creating msg\n");
return ERR_PTR(PTR_ERR(msg));
return NULL;
}
h = msg->front.iov_base;
h->op = cpu_to_le32(op);
@ -687,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc,
struct ceph_msg *msg;
int mstate;
int mds = session->s_mds;
int err = 0;
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
@ -698,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
/* send connect message */
msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto out;
}
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
out:
return 0;
}
@ -883,8 +878,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
ceph_mds_state_name(state));
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
if (IS_ERR(msg))
return PTR_ERR(msg);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 0;
}
@ -931,17 +926,15 @@ static int request_close_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_msg *msg;
int err = 0;
dout("request_close_session mds%d state %s seq %lld\n",
session->s_mds, session_state_name(session->s_state),
session->s_seq);
msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
if (IS_ERR(msg))
err = PTR_ERR(msg);
else
ceph_con_send(&session->s_con, msg);
return err;
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 0;
}
/*
@ -1426,8 +1419,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
len += req->r_old_dentry->d_name.len;
msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
if (IS_ERR(msg))
if (!msg) {
msg = ERR_PTR(-ENOMEM);
goto out_free2;
}
msg->hdr.tid = cpu_to_le64(req->r_tid);
@ -1518,7 +1513,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
if (IS_ERR(msg)) {
req->r_err = PTR_ERR(msg);
complete_request(mdsc, req);
return -PTR_ERR(msg);
return PTR_ERR(msg);
}
req->r_request = msg;
@ -2158,11 +2153,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
goto fail_nopagelist;
ceph_pagelist_init(pagelist);
err = -ENOMEM;
reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
if (!reply)
goto fail_nomsg;
}
/* find session */
session = __ceph_lookup_mds_session(mdsc, mds);
@ -2469,7 +2463,7 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
len += dnamelen;
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
if (IS_ERR(msg))
if (!msg)
return;
lease = msg->front.iov_base;
lease->action = action;

View File

@ -1402,19 +1402,17 @@ static int read_partial_message(struct ceph_connection *con)
con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
if (skip) {
/* skip this message */
dout("alloc_msg returned NULL, skipping message\n");
dout("alloc_msg said skip message\n");
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
return 0;
}
if (IS_ERR(con->in_msg)) {
ret = PTR_ERR(con->in_msg);
con->in_msg = NULL;
if (!con->in_msg) {
con->error_msg =
"error allocating memory for incoming message";
return ret;
return -ENOMEM;
}
m = con->in_msg;
m->front.iov_len = 0; /* haven't read it yet */
@ -2147,7 +2145,7 @@ out2:
ceph_msg_put(m);
out:
pr_err("msg_new can't create type %d len %d\n", type, front_len);
return ERR_PTR(-ENOMEM);
return NULL;
}
/*
@ -2190,10 +2188,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
mutex_unlock(&con->mutex);
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
if (IS_ERR(msg))
return msg;
if (*skip)
if (!msg || *skip)
return NULL;
}
if (!msg) {
@ -2202,17 +2197,16 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
if (!msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
return ERR_PTR(-ENOMEM);
return NULL;
}
}
memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
if (middle_len) {
ret = ceph_alloc_middle(con, msg);
if (ret < 0) {
ceph_msg_put(msg);
return msg;
return NULL;
}
}

View File

@ -490,16 +490,13 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
req->buf = buf;
init_completion(&req->completion);
err = -ENOMEM;
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
if (IS_ERR(req->request)) {
err = PTR_ERR(req->request);
if (!req->request)
goto out;
}
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, 0, 0, NULL);
if (IS_ERR(req->reply)) {
err = PTR_ERR(req->reply);
if (!req->reply)
goto out;
}
/* fill out request */
h = req->request->front.iov_base;
@ -634,30 +631,22 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
/* msg pools */
err = -ENOMEM;
monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
sizeof(struct ceph_mon_subscribe_ack),
0, 0, NULL);
if (IS_ERR(monc->m_subscribe_ack)) {
err = PTR_ERR(monc->m_subscribe_ack);
monc->m_subscribe_ack = NULL;
if (!monc->m_subscribe_ack)
goto out_monmap;
}
monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, 0, 0,
NULL);
if (IS_ERR(monc->m_auth_reply)) {
err = PTR_ERR(monc->m_auth_reply);
monc->m_auth_reply = NULL;
if (!monc->m_auth_reply)
goto out_subscribe_ack;
}
monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
monc->pending_auth = 0;
if (IS_ERR(monc->m_auth)) {
err = PTR_ERR(monc->m_auth);
monc->m_auth = NULL;
if (!monc->m_auth)
goto out_auth_reply;
}
monc->cur_mon = -1;
monc->hunting = true;

View File

@ -10,12 +10,8 @@
static void *alloc_fn(gfp_t gfp_mask, void *arg)
{
struct ceph_msgpool *pool = arg;
struct ceph_msg *m;
m = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
if (IS_ERR(m))
return NULL;
return m;
return ceph_msg_new(0, pool->front_len, 0, 0, NULL);
}
static void free_fn(void *element, void *arg)
@ -42,17 +38,12 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
int front_len)
{
if (front_len > pool->front_len) {
struct ceph_msg *msg;
pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
pool, front_len, pool->front_len);
WARN_ON(1);
/* try to alloc a fresh message */
msg = ceph_msg_new(0, front_len, 0, 0, NULL);
if (!IS_ERR(msg))
return msg;
return NULL;
return ceph_msg_new(0, front_len, 0, 0, NULL);
}
return mempool_alloc(pool->pool, GFP_NOFS);

View File

@ -147,7 +147,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
req = kzalloc(sizeof(*req), GFP_NOFS);
}
if (req == NULL)
return ERR_PTR(-ENOMEM);
return NULL;
req->r_osdc = osdc;
req->r_mempool = use_mempool;
@ -165,9 +165,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
else
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
OSD_OPREPLY_FRONT_LEN, 0, 0, NULL);
if (IS_ERR(msg)) {
if (!msg) {
ceph_osdc_put_request(req);
return ERR_PTR(PTR_ERR(msg));
return NULL;
}
req->r_reply = msg;
@ -179,9 +179,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
if (IS_ERR(msg)) {
if (!msg) {
ceph_osdc_put_request(req);
return ERR_PTR(PTR_ERR(msg));
return NULL;
}
msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
memset(msg->front.iov_base, 0, msg->front.iov_len);
@ -1263,8 +1263,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0, truncate_seq, truncate_size, NULL,
false, 1);
if (IS_ERR(req))
return PTR_ERR(req);
if (!req)
return -ENOMEM;
/* it may be a short read due to an object boundary */
req->r_pages = pages;
@ -1306,8 +1306,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
snapc, do_sync,
truncate_seq, truncate_size, mtime,
nofail, 1);
if (IS_ERR(req))
return PTR_ERR(req);
if (!req)
return -ENOMEM;
/* it may be a short write due to an object boundary */
req->r_pages = pages;
@ -1393,7 +1393,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
pr_warning("get_reply front %d > preallocated %d\n",
front, (int)req->r_reply->front.iov_len);
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL);
if (IS_ERR(m))
if (!m)
goto out;
ceph_msg_put(req->r_reply);
req->r_reply = m;
@ -1409,7 +1409,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
tid, want, m->nr_pages);
*skip = 1;
ceph_msg_put(m);
m = ERR_PTR(-EIO);
m = NULL;
goto out;
}
m->pages = req->r_pages;