|
| 1 | +#!/bin/ksh -p |
| 2 | +# |
| 3 | +# CDDL HEADER START |
| 4 | +# |
| 5 | +# The contents of this file are subject to the terms of the |
| 6 | +# Common Development and Distribution License (the "License"). |
| 7 | +# You may not use this file except in compliance with the License. |
| 8 | +# |
| 9 | +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| 10 | +# or https://opensource.org/licenses/CDDL-1.0. |
| 11 | +# See the License for the specific language governing permissions |
| 12 | +# and limitations under the License. |
| 13 | +# |
| 14 | +# When distributing Covered Code, include this CDDL HEADER in each |
| 15 | +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| 16 | +# If applicable, add the following below this CDDL HEADER, with the |
| 17 | +# fields enclosed by brackets "[]" replaced with your own identifying |
| 18 | +# information: Portions Copyright [yyyy] [name of copyright owner] |
| 19 | +# |
| 20 | +# CDDL HEADER END |
| 21 | +# |
| 22 | + |
| 23 | +# |
| 24 | +# Copyright (c) 2023, Kay Pedersen <mail@mkwg.de> |
| 25 | +# |
| 26 | + |
| 27 | +. $STF_SUITE/include/libtest.shlib |
| 28 | +. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib |
| 29 | + |
| 30 | +verify_runnable "global" |
| 31 | + |
| 32 | +if [[ $(linux_version) -lt $(linux_version "5.3") ]]; then |
| 33 | + log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3" |
| 34 | +fi |
| 35 | + |
| 36 | +claim="Block cloning across encrypted datasets." |
| 37 | + |
| 38 | +log_assert $claim |
| 39 | + |
| 40 | +DS1="$TESTPOOL/encrypted1" |
| 41 | +DS2="$TESTPOOL/encrypted2" |
| 42 | +PASSPHRASE="top_secret" |
| 43 | + |
| 44 | +function prepare_enc |
| 45 | +{ |
| 46 | + log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS |
| 47 | + log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \ |
| 48 | + "-o keyformat=passphrase $DS1" |
| 49 | + log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \ |
| 50 | + "-o keyformat=passphrase -o keylocation=prompt $DS2" |
| 51 | + log_must zfs create $DS1/child1 |
| 52 | + log_must zfs create $DS1/child2 |
| 53 | + |
| 54 | + log_note "Create test file" |
| 55 | + # we must wait until the src file txg is written to the disk otherwise we |
| 56 | + # will fallback to normal copy. See "dmu_read_l0_bps" in |
| 57 | + # "zfs/module/zfs/dmu.c" and "zfs_clone_range" in |
| 58 | + # "zfs/module/zfs/zfs_vnops.c" |
| 59 | + log_must dd if=/dev/urandom of=/$DS1/file bs=128K count=4 |
| 60 | + log_must dd if=/dev/urandom of=/$DS1/child1/file bs=128K count=4 |
| 61 | + log_must sync_pool $TESTPOOL |
| 62 | +} |
| 63 | + |
| 64 | +function cleanup_enc |
| 65 | +{ |
| 66 | + datasetexists $TESTPOOL && destroy_pool $TESTPOOL |
| 67 | +} |
| 68 | + |
| 69 | +function clone_and_check |
| 70 | +{ |
| 71 | + I_FILE="$1" |
| 72 | + O_FILE=$2 |
| 73 | + I_DS=$3 |
| 74 | + O_DS=$4 |
| 75 | + SAME_BLOCKS=$5 |
| 76 | + # the CLONE option provides a choice between copy_file_range |
| 77 | + # which should clone and a dd which is a copy no matter what |
| 78 | + CLONE=$6 |
| 79 | + SNAPSHOT=$7 |
| 80 | + if [ ${#SNAPSHOT} -gt 0 ]; then |
| 81 | + I_FILE=".zfs/snapshot/$SNAPSHOT/$1" |
| 82 | + fi |
| 83 | + if [ $CLONE ]; then |
| 84 | + log_must clonefile -f "/$I_DS/$I_FILE" "/$O_DS/$O_FILE" 0 0 524288 |
| 85 | + else |
| 86 | + log_must dd if="/$I_DS/$I_FILE" of="/$O_DS/$O_FILE" bs=128K |
| 87 | + fi |
| 88 | + log_must sync_pool $TESTPOOL |
| 89 | + |
| 90 | + log_must have_same_content "/$I_DS/$I_FILE" "/$O_DS/$O_FILE" |
| 91 | + |
| 92 | + if [ ${#SNAPSHOT} -gt 0 ]; then |
| 93 | + I_DS="$I_DS@$SNAPSHOT" |
| 94 | + I_FILE="$1" |
| 95 | + fi |
| 96 | + typeset blocks=$(get_same_blocks \ |
| 97 | + $I_DS $I_FILE $O_DS $O_FILE $PASSPHRASE) |
| 98 | + log_must [ "$blocks" = "$SAME_BLOCKS" ] |
| 99 | +} |
| 100 | + |
| 101 | +log_onexit cleanup_enc |
| 102 | + |
| 103 | +prepare_enc |
| 104 | + |
| 105 | +log_note "Cloning entire file with copy_file_range across different enc" \ |
| 106 | + "roots, should fallback" |
| 107 | +# we are expecting no same block map. |
| 108 | +clone_and_check "file" "clone" $DS1 $DS2 "" true |
| 109 | +log_note "check if the file is still readable and the same after" \ |
| 110 | + "unmount and key unload, shouldn't fail" |
| 111 | +typeset hash1=$(md5digest "/$DS1/file") |
| 112 | +log_must zfs umount $DS1 && zfs unload-key $DS1 |
| 113 | +typeset hash2=$(md5digest "/$DS2/clone") |
| 114 | +log_must [ "$hash1" = "$hash2" ] |
| 115 | + |
| 116 | +cleanup_enc |
| 117 | +prepare_enc |
| 118 | + |
| 119 | +log_note "Cloning entire file with copy_file_range across different child datasets" |
| 120 | +# clone shouldn't work because of deriving a new master key for the child |
| 121 | +# we are expecting no same block map. |
| 122 | +clone_and_check "file" "clone" $DS1 "$DS1/child1" "" true |
| 123 | +clone_and_check "file" "clone" "$DS1/child1" "$DS1/child2" "" true |
| 124 | + |
| 125 | +cleanup_enc |
| 126 | +prepare_enc |
| 127 | + |
| 128 | +log_note "Copying entire file with copy_file_range across same snapshot" |
| 129 | +log_must zfs snapshot -r $DS1@s1 |
| 130 | +log_must sync_pool $TESTPOOL |
| 131 | +log_must rm -f "/$DS1/file" |
| 132 | +log_must sync_pool $TESTPOOL |
| 133 | +clone_and_check "file" "clone" "$DS1" "$DS1" "0 1 2 3" true "s1" |
| 134 | + |
| 135 | +cleanup_enc |
| 136 | +prepare_enc |
| 137 | + |
| 138 | +log_note "Copying entire file with copy_file_range across different snapshot" |
| 139 | +clone_and_check "file" "file" $DS1 $DS2 "" true |
| 140 | +log_must zfs snapshot -r $DS2@s1 |
| 141 | +log_must sync_pool $TESTPOOL |
| 142 | +log_must rm -f "/$DS1/file" "/$DS2/file" |
| 143 | +log_must sync_pool $TESTPOOL |
| 144 | +clone_and_check "file" "clone" "$DS2" "$DS1" "" true "s1" |
| 145 | +typeset hash1=$(md5digest "/$DS1/.zfs/snapshot/s1/file") |
| 146 | +log_note "destroy the snapshot and check if the file is still readable and" \ |
| 147 | + "has the same content" |
| 148 | +log_must zfs destroy -r $DS2@s1 |
| 149 | +log_must sync_pool $TESTPOOL |
| 150 | +typeset hash2=$(md5digest "/$DS1/file") |
| 151 | +log_must [ "$hash1" = "$hash2" ] |
| 152 | + |
| 153 | +log_must sync_pool $TESTPOOL |
| 154 | + |
| 155 | +log_pass $claim |
0 commit comments