tests: skip part of dd/sparse on some file systems

* tests/dd/sparse: The last two parts of this test would fail due to
the underlying file system at least on Solaris 10 with NFS.  That file
system would report that a 3MiB file was occupying <= 1KiB of space
for nearly 50 seconds after creation.
Improved-by: Bernhard Voelker
This commit is contained in:
Jim Meyering
2012-03-23 10:53:56 +01:00
parent a04110e568
commit 4b101ccd17

View File

@@ -42,35 +42,29 @@ compare exp out || fail=1
dd if=file.in bs=1 conv=sparse | cat > file.out
cmp file.in file.out || fail=1
# Setup for block size tests
dd if=/dev/urandom of=file.in bs=1M count=1
truncate -s+1M file.in
dd if=/dev/urandom of=file.in bs=1M count=1 conv=notrunc oflag=append
# Setup for block size tests: create a 3MiB file with a 1MiB
# stretch of NUL bytes in the middle.
rm -f file.in
dd if=/dev/urandom of=file.in bs=1M count=3 iflag=fullblock || fail=1
dd if=/dev/zero of=file.in bs=1M count=1 seek=1 conv=notrunc || fail=1
# Note the block allocations below are usually equal,
# but can vary by a file system block due to alignment,
# which was seen on XFS at least. Also on various BSDs
# the sparse granularity was up to 8 file system blocks
# (16KiB for the tested systems), causing this to be the
# minimum accuracy we can support.
alloc_equal() {
# 8 and 512 below are related, so hardcode sector_size for now
# : ${sector_size:=$(stat -c "%B" "$1")}
: ${sectors_per_block:=$(expr $(stat -f -c '%S' "$1") / 512)}
: ${min_sectors_per_sparse_block:=$(expr $sectors_per_block '*' 8)}
alloc_diff=$(expr $(stat -c %b "$1") - $(stat -c %b "$2"))
alloc_diff=$(echo $alloc_diff | tr -d -- -) # abs()
test $alloc_diff -le $min_sectors_per_sparse_block
}
kb_alloc() { du -k "$1"|cut -f1; }
# Ensure NUL blocks smaller than the block size are not made sparse
dd if=file.in of=file.out bs=2M conv=sparse
test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
alloc_equal file.in file.out && fail=1
# If our just-created input file appears to be too small,
# skip the remaining tests. On at least Solaris 10 with NFS,
# file.in is reported to occupy <= 1KiB for about 50 seconds
# after its creation.
if test $(kb_alloc file.in) -gt 3000; then
# Ensure NUL blocks >= block size are made sparse
dd if=file.in of=file.out bs=1M conv=sparse
test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
alloc_equal file.in file.out || fail=1
# Ensure NUL blocks smaller than the block size are not made sparse.
# Here, with a 2MiB block size, dd's conv=sparse must *not* introduce a hole.
dd if=file.in of=file.out bs=2M conv=sparse
test 2500 -lt $(kb_alloc file.out) || fail=1
# Ensure that this 1MiB string of NULs *is* converted to a hole.
dd if=file.in of=file.out bs=1M conv=sparse
test $(kb_alloc file.out) -lt 2500 || fail=1
fi
Exit $fail