path: root/net/ceph/messenger.c
diff options
authorAlex Elder <elder@inktank.com>2013-03-29 11:44:10 -0500
committerSage Weil <sage@inktank.com>2013-05-01 21:17:35 -0700
commit143334ff446d634fcd3145919b5cddcc9148a74a (patch)
tree18c2a0f8be6ddff6abdb10f49b189c6048aabd2b /net/ceph/messenger.c
parentf5db90bcf2c69d099f9d828a8104796f41de6bc5 (diff)
libceph: don't add to crc unless data sent
In write_partial_message_data() we aggregate the crc for the data portion of the message as each new piece of the data item is encountered. Because it was computed *before* sending the data, if an attempt to send a new piece resulted in 0 bytes being sent, the crc crc across that piece would erroneously get computed again and added to the aggregate result. This would occasionally happen in the evnet of a connection failure. The crc value isn't really needed until the complete value is known after sending all data, so there's no need to compute it before sending. So don't calculate the crc for a piece until *after* we know at least one byte of it has been sent. That will avoid this problem. This resolves: http://tracker.ceph.com/issues/4450 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Sage Weil <sage@inktank.com>
Diffstat (limited to 'net/ceph/messenger.c')
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index eee7a878dbfb..cb8b571ce79a 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1467,8 +1467,6 @@ static int write_partial_message_data(struct ceph_connection *con)
page = ceph_msg_data_next(&msg->data, &page_offset, &length,
- if (do_datacrc && cursor->need_crc)
- crc = ceph_crc32c_page(crc, page, page_offset, length);
ret = ceph_tcp_sendpage(con->sock, page, page_offset,
length, last_piece);
if (ret <= 0) {
@@ -1477,6 +1475,8 @@ static int write_partial_message_data(struct ceph_connection *con)
return ret;
+ if (do_datacrc && cursor->need_crc)
+ crc = ceph_crc32c_page(crc, page, page_offset, length);
out_msg_pos_next(con, page, length, (size_t) ret);

Privacy Policy