File tree Expand file tree Collapse file tree
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -289,10 +289,18 @@ private static void validate(ContainerCommandResponseProto response)
289289 throw new IllegalArgumentException ("Not GetBlock: response=" + response );
290290 }
291291 final GetBlockResponseProto b = response .getGetBlock ();
292+ final long blockLength = b .getBlockData ().getSize ();
292293 final List <ChunkInfo > chunks = b .getBlockData ().getChunksList ();
293294 for (int i = 0 ; i < chunks .size (); i ++) {
294295 final ChunkInfo c = chunks .get (i );
295- if (c .getLen () <= 0 ) {
296+ // HDDS-10682 caused an empty chunk to get written to the end of some EC blocks. Due to this
297+ // validation, these blocks will not be readable. In the EC case, the empty chunk is always
298+ // the last chunk and the offset is the block length. We can safely ignore this case and not fail.
299+ if (c .getLen () <= 0 && i == chunks .size () - 1 && c .getOffset () == blockLength ) {
300+ DatanodeBlockID blockID = b .getBlockData ().getBlockID ();
301+ LOG .warn ("The last chunk is empty for container/block {}/{} with an offset of the block length. " +
302+ "Likely due to HDDS-10682. This is safe to ignore." , blockID .getContainerID (), blockID .getLocalID ());
303+ } else if (c .getLen () <= 0 ) {
296304 throw new IOException ("Failed to get chunkInfo["
297305 + i + "]: len == " + c .getLen ());
298306 }
You can’t perform that action at this time.
0 commit comments