blob: 0745225c83385f3dff69d717a6a9461ff8c5e002 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
#!/bin/bash
# Run multiple dd processes in parallel to copy a single file/blockdevice. Each
# dd process will operate on a dedicated part of the file at a time (1GB chunks
# for big files).
set -e
# Source: https://stackoverflow.com/a/25268449
min() {
printf "%s\n" "${@:2}" | sort "$1" | head -n1
}
max() {
# using sort's -r (reverse) option - using tail instead of head is also possible
min "${1}r" "${@:2}"
}
inputfile=$1
if [[ -b "$inputfile" ]]; then
inputsize=$(blockdev --getsize64 "$inputfile")
else
inputsize=$(stat --format %s "$inputfile")
fi
outputfile=$2
stepsize=1000
parallel=6
# 1GB blocks or smaller blocks if we cannot achieve the required parallelization with 1GB
blocksize=$(min -g 1000000 "$((inputsize / stepsize / parallel))" )
seq 0 $stepsize $(($inputsize / $blocksize )) | xargs -P$parallel -I{} dd if="$inputfile" bs=$blocksize skip={} conv=sparse seek={} count=$stepsize status=none of="$outputfile"
|